From f0ddf302b2d0136da9f529d05895c731e3d1a6f2 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Mon, 14 Jan 2019 09:55:44 +0300 Subject: [PATCH] Move Inference Engine to new API --- modules/dnn/perf/perf_net.cpp | 13 +- modules/dnn/src/dnn.cpp | 74 ++++++++-- modules/dnn/src/layers/batch_norm_layer.cpp | 9 ++ modules/dnn/src/layers/blank_layer.cpp | 6 + modules/dnn/src/layers/concat_layer.cpp | 9 ++ modules/dnn/src/layers/convolution_layer.cpp | 102 +++++++++---- modules/dnn/src/layers/crop_layer.cpp | 15 +- .../dnn/src/layers/detection_output_layer.cpp | 20 +++ modules/dnn/src/layers/elementwise_layers.cpp | 74 ++++++++++ modules/dnn/src/layers/eltwise_layer.cpp | 25 +++- modules/dnn/src/layers/flatten_layer.cpp | 13 +- .../dnn/src/layers/fully_connected_layer.cpp | 13 ++ modules/dnn/src/layers/lrn_layer.cpp | 12 ++ modules/dnn/src/layers/mvn_layer.cpp | 8 + .../dnn/src/layers/normalize_bbox_layer.cpp | 44 ++++++ modules/dnn/src/layers/permute_layer.cpp | 6 + modules/dnn/src/layers/pooling_layer.cpp | 43 ++++++ modules/dnn/src/layers/prior_box_layer.cpp | 53 +++++++ modules/dnn/src/layers/proposal_layer.cpp | 23 +++ modules/dnn/src/layers/reorg_layer.cpp | 6 + modules/dnn/src/layers/reshape_layer.cpp | 24 ++- modules/dnn/src/layers/resize_layer.cpp | 41 ++++++ modules/dnn/src/layers/scale_layer.cpp | 24 +++ modules/dnn/src/layers/slice_layer.cpp | 19 ++- modules/dnn/src/layers/softmax_layer.cpp | 8 + modules/dnn/src/op_inf_engine.cpp | 137 +++++++++++++++++- modules/dnn/src/op_inf_engine.hpp | 66 ++++++++- modules/dnn/test/test_backends.cpp | 4 +- modules/dnn/test/test_darknet_importer.cpp | 2 +- modules/dnn/test/test_halide_layers.cpp | 3 +- modules/dnn/test/test_layers.cpp | 4 - modules/dnn/test/test_onnx_importer.cpp | 10 +- modules/dnn/test/test_tf_importer.cpp | 13 +- modules/dnn/test/test_torch_importer.cpp | 9 +- 34 files changed, 852 insertions(+), 80 deletions(-) diff --git a/modules/dnn/perf/perf_net.cpp b/modules/dnn/perf/perf_net.cpp index cc95cc58ae..d06689a7fb 100644 --- a/modules/dnn/perf/perf_net.cpp +++ b/modules/dnn/perf/perf_net.cpp @@ -157,8 +157,7 @@ PERF_TEST_P_(DNNTestNetwork, MobileNet_SSD_v2_TensorFlow) PERF_TEST_P_(DNNTestNetwork, DenseNet_121) { - if (backend == DNN_BACKEND_HALIDE || - (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD))) + if (backend == DNN_BACKEND_HALIDE) throw SkipTestException(""); processNet("dnn/DenseNet_121.caffemodel", "dnn/DenseNet_121.prototxt", "", Mat(cv::Size(224, 224), CV_32FC3)); @@ -211,8 +210,7 @@ PERF_TEST_P_(DNNTestNetwork, Inception_v2_SSD_TensorFlow) PERF_TEST_P_(DNNTestNetwork, YOLOv3) { - if (backend == DNN_BACKEND_HALIDE || - (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) + if (backend == DNN_BACKEND_HALIDE) throw SkipTestException(""); Mat sample = imread(findDataFile("dnn/dog416.png", false)); Mat inp; @@ -222,8 +220,11 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv3) PERF_TEST_P_(DNNTestNetwork, EAST_text_detection) { - if (backend == DNN_BACKEND_HALIDE || - (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) + if (backend == DNN_BACKEND_HALIDE +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 + || (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) +#endif + ) throw SkipTestException(""); processNet("dnn/frozen_east_text_detection.pb", "", "", Mat(cv::Size(320, 320), CV_32FC3)); } diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index b6eff6c3b5..b83630a67f 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -701,12 +701,6 @@ struct DataLayer : public Layer virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "ScaleShift"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); - CV_CheckEQ(inputsData.size(), (size_t)1, ""); CV_CheckEQ(inputsData[0].dims, 4, ""); const size_t numChannels = inputsData[0].size[1]; @@ -717,7 +711,6 @@ struct DataLayer : public Layer {numChannels}); weights->allocate(); weights->set(std::vector(numChannels, scaleFactors[0])); - ieLayer->_weights = weights; // Mean subtraction auto biases = InferenceEngine::make_shared_blob(InferenceEngine::Precision::FP32, @@ -729,8 +722,21 @@ struct DataLayer : public Layer biasesVec[i] = -means[0][i] * scaleFactors[0]; } biases->set(biasesVec); - ieLayer->_biases = biases; +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::ScaleShiftLayer ieLayer(name); + ieLayer.setWeights(weights); + ieLayer.setBiases(biases); +#else + InferenceEngine::LayerParams lp; + lp.name = name; + lp.type = "ScaleShift"; + lp.precision = InferenceEngine::Precision::FP32; + std::shared_ptr ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); + + ieLayer->_weights = weights; + ieLayer->_biases = biases; +#endif return Ptr(new InfEngineBackendNode(ieLayer)); #endif // HAVE_INF_ENGINE return Ptr(); @@ -1451,7 +1457,11 @@ struct Net::Impl if (layerNet != ieInpNode->net) { // layerNet is empty or nodes are from different graphs. +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + ieInpNode->net->addOutput(ieInpNode->layer.getName()); +#else ieInpNode->net->addOutput(ieInpNode->layer->name); +#endif } } } @@ -1527,7 +1537,7 @@ struct Net::Impl // Build Inference Engine networks from sets of layers that support this // backend. Split a whole model on several Inference Engine networks if - // some of layers is not implemented. + // some of layers are not implemented. // Set of all input and output blobs wrappers for current network. std::map > netBlobsWrappers; @@ -1543,7 +1553,7 @@ struct Net::Impl { addInfEngineNetOutputs(ld); net = Ptr(); - netBlobsWrappers.clear(); + netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef. layer->preferableTarget = DNN_TARGET_CPU; continue; } @@ -1561,12 +1571,13 @@ struct Net::Impl if (ieInpNode->net != net) { net = Ptr(); - netBlobsWrappers.clear(); + netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef. break; } } } +#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) // The same blobs wrappers cannot be shared between two Inference Engine // networks because of explicit references between layers and blobs. // So we need to rewrap all the external blobs. @@ -1583,6 +1594,7 @@ struct Net::Impl ld.inputBlobsWrappers[i] = it->second; } netBlobsWrappers[LayerPin(ld.id, 0)] = ld.outputBlobsWrappers[0]; +#endif // IE < R5 Ptr node; if (!net.empty()) @@ -1613,6 +1625,40 @@ struct Net::Impl CV_Assert(!ieNode.empty()); ieNode->net = net; + // Convert weights in FP16 for specific targets. +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + if ((preferableTarget == DNN_TARGET_OPENCL_FP16 || + preferableTarget == DNN_TARGET_MYRIAD || + preferableTarget == DNN_TARGET_FPGA) && !fused) + { + auto& blobs = ieNode->layer.getConstantData(); + if (blobs.empty()) + { + // In case of non weightable layer we have to specify + // it's precision adding dummy blob. + auto blob = InferenceEngine::make_shared_blob( + InferenceEngine::Precision::FP16, + InferenceEngine::Layout::C, {1}); + blob->allocate(); + blobs[""] = blob; + } + else + { + for (auto& it : blobs) + it.second = convertFp16(std::const_pointer_cast(it.second)); + } + } + + if (!fused) + net->addLayer(ieNode->layer); + + net->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers, ieNode->layer.getName()); + net->addBlobs(ld.inputBlobsWrappers); + net->addBlobs(ld.outputBlobsWrappers); + addInfEngineNetOutputs(ld); + +#else // IE >= R5 + auto weightableLayer = std::dynamic_pointer_cast(ieNode->layer); if ((preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD || @@ -1650,10 +1696,10 @@ struct Net::Impl if (!fused) net->addLayer(ieNode->layer); addInfEngineNetOutputs(ld); +#endif // IE >= R5 } // Initialize all networks. - std::set initializedNets; for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it) { LayerData &ld = it->second; @@ -2546,7 +2592,11 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin) Net cvNet; cvNet.setInputsNames(inputsNames); +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + Ptr backendNode(new InfEngineBackendNode(InferenceEngine::Builder::Layer(""))); +#else Ptr backendNode(new InfEngineBackendNode(0)); +#endif backendNode->net = Ptr(new InfEngineBackendNet(ieNet)); for (auto& it : ieNet.getOutputsInfo()) { diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index 9a1707a3e8..522d0229ba 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -349,6 +349,14 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::ScaleShiftLayer ieLayer(name); + + const size_t numChannels = weights_.total(); + ieLayer.setWeights(wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C)); + ieLayer.setBiases(wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C)); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "ScaleShift"; @@ -360,6 +368,7 @@ public: ieLayer->_biases = wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp index 1eb149b3d1..9f8590bea7 100644 --- a/modules/dnn/src/layers/blank_layer.cpp +++ b/modules/dnn/src/layers/blank_layer.cpp @@ -110,6 +110,11 @@ public: virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::SplitLayer ieLayer(name); + ieLayer.setOutputPorts({InferenceEngine::Port()}); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); CV_Assert(!input->dims.empty()); @@ -123,6 +128,7 @@ public: ieLayer->params["out_sizes"] = format("%d", (int)input->dims[0]); #endif return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/concat_layer.cpp b/modules/dnn/src/layers/concat_layer.cpp index 6bad580334..90def743a4 100644 --- a/modules/dnn/src/layers/concat_layer.cpp +++ b/modules/dnn/src/layers/concat_layer.cpp @@ -301,6 +301,14 @@ public: virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); + + InferenceEngine::Builder::ConcatLayer ieLayer(name); + ieLayer.setAxis(clamp(axis, input->dims.size())); + ieLayer.setInputPorts(std::vector(inputs.size())); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::LayerParams lp; lp.name = name; @@ -309,6 +317,7 @@ public: std::shared_ptr ieLayer(new InferenceEngine::ConcatLayer(lp)); ieLayer->_axis = clamp(axis, input->dims.size()); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 5dbd5ba895..c54db528cc 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -451,6 +451,54 @@ public: const int inpGroupCn = blobs[0].size[1]; const int group = inpCn / inpGroupCn; + auto ieWeights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW); + if (newWeightAndBias) + { + if (weightsMat.isContinuous()) + { + Mat fusedWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size); + ieWeights = wrapToInfEngineBlob(fusedWeights, InferenceEngine::Layout::OIHW); + } + else + { + ieWeights = InferenceEngine::make_shared_blob( + InferenceEngine::Precision::FP32, InferenceEngine::Layout::OIHW, + ieWeights->dims()); + ieWeights->allocate(); + + Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn); + Mat fusedWeights = weightsMat.colRange(0, newWeights.cols); + fusedWeights.copyTo(newWeights); + } + } + InferenceEngine::Blob::Ptr ieBiases; + if (hasBias() || fusedBias) + { + Mat biasesMat({outCn}, CV_32F, &biasvec[0]); + ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C); + } + +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::ConvolutionLayer ieLayer(name); + + ieLayer.setKernel({kernel.height, kernel.width}); + ieLayer.setStrides({stride.height, stride.width}); + ieLayer.setDilation({dilation.height, dilation.width}); + ieLayer.setPaddingsBegin({pad.height, pad.width}); + ieLayer.setPaddingsEnd({pad.height, pad.width}); + ieLayer.setGroup(group); + ieLayer.setOutDepth(outCn); + + ieLayer.setWeights(ieWeights); + if (ieBiases) + ieLayer.setBiases(ieBiases); + + InferenceEngine::Builder::Layer l = ieLayer; + if (!padMode.empty()) + l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper"); + + return Ptr(new InfEngineBackendNode(l)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Convolution"; @@ -487,32 +535,11 @@ public: ieLayer->_out_depth = outCn; ieLayer->_group = group; - ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW); - if (newWeightAndBias) - { - if (weightsMat.isContinuous()) - { - Mat fusedWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size); - ieLayer->_weights = wrapToInfEngineBlob(fusedWeights, InferenceEngine::Layout::OIHW); - } - else - { - ieLayer->_weights = InferenceEngine::make_shared_blob( - InferenceEngine::Precision::FP32, InferenceEngine::Layout::OIHW, - ieLayer->_weights->dims()); - ieLayer->_weights->allocate(); - - Mat newWeights = infEngineBlobToMat(ieLayer->_weights).reshape(1, outCn); - Mat fusedWeights = weightsMat.colRange(0, newWeights.cols); - fusedWeights.copyTo(newWeights); - } - } - if (hasBias() || fusedBias) - { - Mat biasesMat({outCn}, CV_32F, &biasvec[0]); - ieLayer->_biases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C); - } + ieLayer->_weights = ieWeights; + if (ieBiases) + ieLayer->_biases = ieBiases; return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } @@ -1123,6 +1150,9 @@ public: #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE) { + if (INF_ENGINE_RELEASE == 2018050000 && (adjustPad.height || adjustPad.width)) + return false; + const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout const int group = numOutput / outGroupCn; if (group != 1) @@ -1677,6 +1707,27 @@ public: virtual Ptr initInfEngine(const std::vector > &) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout + const int group = numOutput / outGroupCn; + + InferenceEngine::Builder::DeconvolutionLayer ieLayer(name); + + ieLayer.setKernel({kernel.height, kernel.width}); + ieLayer.setStrides({stride.height, stride.width}); + ieLayer.setDilation({dilation.height, dilation.width}); + ieLayer.setPaddingsBegin({pad.height, pad.width}); + ieLayer.setPaddingsEnd({pad.height, pad.width}); + ieLayer.setGroup(group); + ieLayer.setOutDepth(numOutput); + + ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW)); + if (hasBias()) + { + ieLayer.setBiases(wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C)); + } + return Ptr(new InfEngineBackendNode(ieLayer)); +#else const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout const int group = numOutput / outGroupCn; @@ -1716,6 +1767,7 @@ public: ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C); } return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/crop_layer.cpp b/modules/dnn/src/layers/crop_layer.cpp index 32cdbbaa00..c7cd99c9aa 100644 --- a/modules/dnn/src/layers/crop_layer.cpp +++ b/modules/dnn/src/layers/crop_layer.cpp @@ -67,8 +67,12 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || - (backendId == DNN_BACKEND_INFERENCE_ENGINE && crop_ranges.size() == 4); +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE) + return INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) && crop_ranges.size() == 4; + else +#endif + return backendId == DNN_BACKEND_OPENCV; } bool getMemoryShapes(const std::vector &inputs, @@ -145,9 +149,10 @@ public: input(&crop_ranges[0]).copyTo(outputs[0]); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Crop"; @@ -181,9 +186,11 @@ public: ieLayer->dim.push_back(crop_ranges[3].end - crop_ranges[3].start); #endif return Ptr(new InfEngineBackendNode(ieLayer)); -#endif // HAVE_INF_ENGINE +#else return Ptr(); +#endif // IE < R5 } +#endif std::vector crop_ranges; }; diff --git a/modules/dnn/src/layers/detection_output_layer.cpp b/modules/dnn/src/layers/detection_output_layer.cpp index 4c341d74dc..0226cf26a5 100644 --- a/modules/dnn/src/layers/detection_output_layer.cpp +++ b/modules/dnn/src/layers/detection_output_layer.cpp @@ -939,6 +939,25 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::DetectionOutputLayer ieLayer(name); + + ieLayer.setNumClasses(_numClasses); + ieLayer.setShareLocation(_shareLocation); + ieLayer.setBackgroudLabelId(_backgroundLabelId); + ieLayer.setNMSThreshold(_nmsThreshold); + ieLayer.setTopK(_topK); + ieLayer.setKeepTopK(_keepTopK); + ieLayer.setConfidenceThreshold(_confidenceThreshold); + ieLayer.setVariantEncodedInTarget(_varianceEncodedInTarget); + ieLayer.setCodeType("caffe.PriorBoxParameter." + _codeType); + ieLayer.setInputPorts(std::vector(3)); + + InferenceEngine::Builder::Layer l = ieLayer; + l.getParameters()["eta"] = std::string("1.0"); + + return Ptr(new InfEngineBackendNode(l)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "DetectionOutput"; @@ -956,6 +975,7 @@ public: ieLayer->params["variance_encoded_in_target"] = _varianceEncodedInTarget ? "1" : "0"; ieLayer->params["code_type"] = "caffe.PriorBoxParameter." + _codeType; return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index dea7c6c0d6..b2e0621d6d 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -152,10 +152,16 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer ieLayer = func.initInfEngineBuilderAPI(); + ieLayer.setName(this->name); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = this->name; lp.precision = InferenceEngine::Precision::FP32; return Ptr(new InfEngineBackendNode(func.initInfEngine(lp))); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } @@ -345,6 +351,12 @@ struct ReLUFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer initInfEngineBuilderAPI() + { + return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(slope); + } +#else InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) { lp.type = "ReLU"; @@ -353,6 +365,7 @@ struct ReLUFunctor ieLayer->params["negative_slope"] = format("%f", slope); return ieLayer; } +#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -452,6 +465,12 @@ struct ReLU6Functor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer initInfEngineBuilderAPI() + { + return InferenceEngine::Builder::ClampLayer("").setMinValue(minValue).setMaxValue(maxValue); + } +#else InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) { lp.type = "Clamp"; @@ -462,6 +481,7 @@ struct ReLU6Functor ieLayer->params["max"] = format("%f", maxValue); return ieLayer; } +#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -530,12 +550,19 @@ struct TanHFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer initInfEngineBuilderAPI() + { + return InferenceEngine::Builder::TanHLayer(""); + } +#else InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) { lp.type = "TanH"; std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); return ieLayer; } +#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -604,12 +631,19 @@ struct SigmoidFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer initInfEngineBuilderAPI() + { + return InferenceEngine::Builder::SigmoidLayer(""); + } +#else InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) { lp.type = "Sigmoid"; std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); return ieLayer; } +#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -680,11 +714,18 @@ struct ELUFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer initInfEngineBuilderAPI() + { + return InferenceEngine::Builder::ELULayer(""); + } +#else InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) { lp.type = "ELU"; return InferenceEngine::CNNLayerPtr(new InferenceEngine::CNNLayer(lp)); } +#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -753,6 +794,12 @@ struct AbsValFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer initInfEngineBuilderAPI() + { + return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(-1); + } +#else InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) { lp.type = "ReLU"; @@ -761,6 +808,7 @@ struct AbsValFunctor ieLayer->params["negative_slope"] = "-1.0"; return ieLayer; } +#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -808,11 +856,18 @@ struct BNLLFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer initInfEngineBuilderAPI() + { + CV_Error(Error::StsNotImplemented, ""); + } +#else InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) { CV_Error(Error::StsNotImplemented, "BNLL"); return InferenceEngine::CNNLayerPtr(); } +#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -917,6 +972,14 @@ struct PowerFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer initInfEngineBuilderAPI() + { + return InferenceEngine::Builder::PowerLayer("").setPower(power) + .setScale(scale) + .setShift(shift); + } +#else InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) { if (power == 1.0f && scale == 1.0f && shift == 0.0f) @@ -936,6 +999,7 @@ struct PowerFunctor return ieLayer; } } +#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr& top) @@ -1067,6 +1131,15 @@ struct ChannelsPReLUFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer initInfEngineBuilderAPI() + { + InferenceEngine::Builder::PReLULayer ieLayer(""); + const size_t numChannels = scale.total(); + ieLayer.setWeights(wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C)); + return ieLayer; + } +#else InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) { lp.type = "PReLU"; @@ -1075,6 +1148,7 @@ struct ChannelsPReLUFunctor ieLayer->_weights = wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C); return ieLayer; } +#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } diff --git a/modules/dnn/src/layers/eltwise_layer.cpp b/modules/dnn/src/layers/eltwise_layer.cpp index c038eb19e0..18925010d0 100644 --- a/modules/dnn/src/layers/eltwise_layer.cpp +++ b/modules/dnn/src/layers/eltwise_layer.cpp @@ -99,7 +99,7 @@ public: return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || (backendId == DNN_BACKEND_INFERENCE_ENGINE && - (preferableTarget != DNN_TARGET_MYRIAD || coeffs.empty())); + (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty())); } bool getMemoryShapes(const std::vector &inputs, @@ -420,9 +420,29 @@ public: return Ptr(); } - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE + virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::EltwiseLayer ieLayer(name); + + ieLayer.setInputPorts(std::vector(inputs.size())); + + if (op == SUM) + ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::SUM); + else if (op == PROD) + ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MUL); + else if (op == MAX) + ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MAX); + else + CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation"); + + InferenceEngine::Builder::Layer l = ieLayer; + if (!coeffs.empty()) + l.getParameters()["coeff"] = coeffs; + + return Ptr(new InfEngineBackendNode(l)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Eltwise"; @@ -438,6 +458,7 @@ public: else CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation"); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/flatten_layer.cpp b/modules/dnn/src/layers/flatten_layer.cpp index e3382f2d53..3a704dca81 100644 --- a/modules/dnn/src/layers/flatten_layer.cpp +++ b/modules/dnn/src/layers/flatten_layer.cpp @@ -152,9 +152,19 @@ public: } } - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE + virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer ieLayer(name); + ieLayer.setName(name); + ieLayer.setType("Flatten"); + ieLayer.getParameters()["axis"] = _startAxis; + ieLayer.getParameters()["end_axis"] = _endAxis; + ieLayer.setInputPorts(std::vector(1)); + ieLayer.setOutputPorts(std::vector(1)); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Flatten"; @@ -163,6 +173,7 @@ public: ieLayer->params["axis"] = format("%d", _startAxis); ieLayer->params["end_axis"] = format("%d", _endAxis); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index 78d3e809b5..3a71a872fe 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -442,6 +442,18 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::FullyConnectedLayer ieLayer(name); + + const int outNum = blobs[0].size[0]; + ieLayer.setOutputNum(outNum); + + ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW)); + if (blobs.size() > 1) + ieLayer.setBiases(wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C)); + + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "FullyConnected"; @@ -456,6 +468,7 @@ public: if (blobs.size() > 1) ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)ieLayer->_out_num}, InferenceEngine::Layout::C); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/lrn_layer.cpp b/modules/dnn/src/layers/lrn_layer.cpp index edaa212d64..f34faff338 100644 --- a/modules/dnn/src/layers/lrn_layer.cpp +++ b/modules/dnn/src/layers/lrn_layer.cpp @@ -382,6 +382,17 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::NormLayer ieLayer(name); + ieLayer.setSize(size); + ieLayer.setAlpha(alpha); + ieLayer.setBeta(beta); + ieLayer.setAcrossMaps(type == CHANNEL_NRM); + + InferenceEngine::Builder::Layer l = ieLayer; + l.getParameters()["k"] = bias; + return Ptr(new InfEngineBackendNode(l)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Norm"; @@ -394,6 +405,7 @@ public: ieLayer->_alpha = alpha; ieLayer->_isAcrossMaps = (type == CHANNEL_NRM); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/mvn_layer.cpp b/modules/dnn/src/layers/mvn_layer.cpp index 93dd5f05f6..772902ca01 100644 --- a/modules/dnn/src/layers/mvn_layer.cpp +++ b/modules/dnn/src/layers/mvn_layer.cpp @@ -371,6 +371,13 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::MVNLayer ieLayer(name); + ieLayer.setAcrossChannels(acrossChannels); + ieLayer.setNormalize(normVariance); + ieLayer.setEpsilon(eps); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "MVN"; @@ -380,6 +387,7 @@ public: ieLayer->params["normalize_variance"] = normVariance ? "1" : "0"; ieLayer->params["eps"] = format("%f", eps); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp index b3ca64f24a..4766f1704e 100644 --- a/modules/dnn/src/layers/normalize_bbox_layer.cpp +++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp @@ -264,6 +264,49 @@ public: virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); + if (input->dims.size() == 4) + { + InferenceEngine::Builder::NormalizeLayer ieLayer(name); + + ieLayer.setChannelShared(false); + ieLayer.setAcrossMaps(acrossSpatial); + ieLayer.setEpsilon(epsilon); + + InferenceEngine::Builder::Layer l = ieLayer; + const int numChannels = input->dims[2]; // NOTE: input->dims are reversed (whcn) + if (blobs.empty()) + { + auto weights = InferenceEngine::make_shared_blob(InferenceEngine::Precision::FP32, + InferenceEngine::Layout::C, + {(size_t)numChannels}); + weights->allocate(); + std::vector ones(numChannels, 1); + weights->set(ones); + l.addConstantData("weights", weights); + l.getParameters()["channel_shared"] = false; + } + else + { + CV_Assert(numChannels == blobs[0].total()); + l.addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C)); + l.getParameters()["channel_shared"] = blobs[0].total() == 1; + } + l.getParameters()["across_spatial"] = acrossSpatial; + return Ptr(new InfEngineBackendNode(l)); + } + else + { + InferenceEngine::Builder::GRNLayer ieLayer(name); + ieLayer.setBeta(epsilon); + + InferenceEngine::Builder::Layer l = ieLayer; + l.getParameters()["bias"] = epsilon; + + return Ptr(new InfEngineBackendNode(l)); + } +#else InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::LayerParams lp; @@ -307,6 +350,7 @@ public: ieLayer->params["bias"] = format("%f", epsilon); return Ptr(new InfEngineBackendNode(ieLayer)); } +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/permute_layer.cpp b/modules/dnn/src/layers/permute_layer.cpp index 00796d52b8..9e529216e9 100644 --- a/modules/dnn/src/layers/permute_layer.cpp +++ b/modules/dnn/src/layers/permute_layer.cpp @@ -373,6 +373,11 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::PermuteLayer ieLayer(name); + ieLayer.setOrder(_order); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Permute"; @@ -385,6 +390,7 @@ public: ieLayer->params["order"] += format(",%d", _order[i]); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index bf17415561..79b5e3f7d2 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -257,6 +257,48 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + if (type == MAX || type == AVE) + { + InferenceEngine::Builder::PoolingLayer ieLayer(name); + ieLayer.setKernel({kernel.height, kernel.width}); + ieLayer.setStrides({stride.height, stride.width}); + ieLayer.setPaddingsBegin({pad_t, pad_l}); + ieLayer.setPaddingsEnd({pad_b, pad_r}); + ieLayer.setPoolingType(type == MAX ? + InferenceEngine::Builder::PoolingLayer::PoolingType::MAX : + InferenceEngine::Builder::PoolingLayer::PoolingType::AVG); + ieLayer.setRoundingType(ceilMode ? + InferenceEngine::Builder::PoolingLayer::RoundingType::CEIL : + InferenceEngine::Builder::PoolingLayer::RoundingType::FLOOR); + ieLayer.setExcludePad(type == AVE && padMode == "SAME"); + + InferenceEngine::Builder::Layer l = ieLayer; + if (!padMode.empty()) + l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper"); + return Ptr(new InfEngineBackendNode(l)); + } + else if (type == ROI) + { + InferenceEngine::Builder::ROIPoolingLayer ieLayer(name); + ieLayer.setSpatialScale(spatialScale); + ieLayer.setPooled({pooledSize.height, pooledSize.width}); + ieLayer.setInputPorts(std::vector(2)); + return Ptr(new InfEngineBackendNode(ieLayer)); + } + else if (type == PSROI) + { + InferenceEngine::Builder::PSROIPoolingLayer ieLayer(name); + ieLayer.setSpatialScale(spatialScale); + ieLayer.setOutputDim(psRoiOutChannels); + ieLayer.setGroupSize(pooledSize.width); + ieLayer.setInputPorts(std::vector(2)); + return Ptr(new InfEngineBackendNode(ieLayer)); + } + else + CV_Error(Error::StsNotImplemented, "Unsupported pooling type"); + return Ptr(); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.precision = InferenceEngine::Precision::FP32; @@ -315,6 +357,7 @@ public: CV_Error(Error::StsNotImplemented, "Unsupported pooling type"); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index 44f18ec5c0..458d667cbc 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -483,6 +483,58 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + if (_explicitSizes) + { + InferenceEngine::Builder::PriorBoxClusteredLayer ieLayer(name); + + CV_Assert(_stepX == _stepY); + ieLayer.setStep(_stepX); + + CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], ""); + ieLayer.setOffset(_offsetsX[0]); + + ieLayer.setClip(_clip); + ieLayer.setFlip(false); // We already flipped aspect ratios. + + InferenceEngine::Builder::Layer l = ieLayer; + + CV_Assert_N(!_boxWidths.empty(), !_boxHeights.empty(), !_variance.empty()); + CV_Assert(_boxWidths.size() == _boxHeights.size()); + l.getParameters()["width"] = _boxWidths; + l.getParameters()["height"] = _boxHeights; + l.getParameters()["variance"] = _variance; + return Ptr(new InfEngineBackendNode(l)); + } + else + { + InferenceEngine::Builder::PriorBoxLayer ieLayer(name); + + CV_Assert(!_explicitSizes); + + ieLayer.setMinSize(_minSize); + if (_maxSize > 0) + ieLayer.setMaxSize(_maxSize); + + CV_Assert(_stepX == _stepY); + ieLayer.setStep(_stepX); + + CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], ""); + ieLayer.setOffset(_offsetsX[0]); + + ieLayer.setClip(_clip); + ieLayer.setFlip(false); // We already flipped aspect ratios. + + InferenceEngine::Builder::Layer l = ieLayer; + if (!_aspectRatios.empty()) + { + l.getParameters()["aspect_ratio"] = _aspectRatios; + } + CV_Assert(!_variance.empty()); + l.getParameters()["variance"] = _variance; + return Ptr(new InfEngineBackendNode(l)); + } +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = _explicitSizes ? "PriorBoxClustered" : "PriorBox"; @@ -538,6 +590,7 @@ public: ieLayer->params["offset"] = format("%f", _offsetsX[0]); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/proposal_layer.cpp b/modules/dnn/src/layers/proposal_layer.cpp index f559ee40e2..6514ed3a5c 100644 --- a/modules/dnn/src/layers/proposal_layer.cpp +++ b/modules/dnn/src/layers/proposal_layer.cpp @@ -328,6 +328,28 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::ProposalLayer ieLayer(name); + + ieLayer.setBaseSize(baseSize); + ieLayer.setFeatStride(featStride); + ieLayer.setMinSize(16); + ieLayer.setNMSThresh(nmsThreshold); + ieLayer.setPostNMSTopN(keepTopAfterNMS); + ieLayer.setPreNMSTopN(keepTopBeforeNMS); + + std::vector scalesVec(scales.size()); + for (int i = 0; i < scales.size(); ++i) + scalesVec[i] = scales.get(i); + ieLayer.setScale(scalesVec); + + std::vector ratiosVec(ratios.size()); + for (int i = 0; i < ratios.size(); ++i) + ratiosVec[i] = ratios.get(i); + ieLayer.setRatio(ratiosVec); + + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Proposal"; @@ -353,6 +375,7 @@ public: ieLayer->params["scale"] += format(",%f", scales.get(i)); } return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/reorg_layer.cpp b/modules/dnn/src/layers/reorg_layer.cpp index a98f690e65..3e42db5de1 100644 --- a/modules/dnn/src/layers/reorg_layer.cpp +++ b/modules/dnn/src/layers/reorg_layer.cpp @@ -181,6 +181,11 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::ReorgYoloLayer ieLayer(name); + ieLayer.setStride(reorgStride); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "ReorgYolo"; @@ -188,6 +193,7 @@ public: std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); ieLayer->params["stride"] = format("%d", reorgStride); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index 4109802a66..d6290456fa 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -203,6 +203,17 @@ public: return true; } + void finalize(InputArrayOfArrays, OutputArrayOfArrays outputs_arr) CV_OVERRIDE + { + std::vector outputs; + outputs_arr.getMatVector(outputs); + + CV_Assert(!outputs.empty()); + outShapes.resize(outputs.size()); + for (int i = 0; i < outputs.size(); ++i) + outShapes[i] = shape(outputs[i]); + } + bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals) { std::vector inputs; @@ -218,8 +229,7 @@ public: void *dst_handle = outputs[i].handle(ACCESS_WRITE); if (src_handle != dst_handle) { - MatShape outShape = shape(outputs[i]); - UMat umat = srcBlob.reshape(1, (int)outShape.size(), &outShape[0]); + UMat umat = srcBlob.reshape(1, (int)outShapes[i].size(), &outShapes[i][0]); umat.copyTo(outputs[i]); } } @@ -250,6 +260,12 @@ public: virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::ReshapeLayer ieLayer(name); + CV_Assert(outShapes.size() == 1); + ieLayer.setDims(outShapes[0]); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Reshape"; @@ -265,9 +281,13 @@ public: ieLayer->shape = std::vector(shapeSrc->dims.rbegin(), shapeSrc->dims.rend()); } return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } + +private: + std::vector outShapes; }; Ptr ReshapeLayer::create(const LayerParams& params) diff --git a/modules/dnn/src/layers/resize_layer.cpp b/modules/dnn/src/layers/resize_layer.cpp index 6aa32150b6..03d806ad2c 100644 --- a/modules/dnn/src/layers/resize_layer.cpp +++ b/modules/dnn/src/layers/resize_layer.cpp @@ -163,6 +163,33 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer ieLayer(name); + ieLayer.setName(name); + if (interpolation == "nearest") + { + ieLayer.setType("Resample"); + ieLayer.getParameters()["type"] = std::string("caffe.ResampleParameter.NEAREST"); + ieLayer.getParameters()["antialias"] = false; + if (scaleWidth != scaleHeight) + CV_Error(Error::StsNotImplemented, "resample with sw != sh"); + ieLayer.getParameters()["factor"] = 1.0 / scaleWidth; + } + else if (interpolation == "bilinear") + { + ieLayer.setType("Interp"); + ieLayer.getParameters()["pad_beg"] = 0; + ieLayer.getParameters()["pad_end"] = 0; + ieLayer.getParameters()["align_corners"] = false; + } + else + CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation); + ieLayer.getParameters()["width"] = outWidth; + ieLayer.getParameters()["height"] = outHeight; + ieLayer.setInputPorts(std::vector(1)); + ieLayer.setOutputPorts(std::vector(1)); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.precision = InferenceEngine::Precision::FP32; @@ -187,6 +214,7 @@ public: ieLayer->params["width"] = cv::format("%d", outWidth); ieLayer->params["height"] = cv::format("%d", outHeight); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } @@ -247,6 +275,18 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer ieLayer(name); + ieLayer.setName(name); + ieLayer.setType("Interp"); + ieLayer.getParameters()["pad_beg"] = 0; + ieLayer.getParameters()["pad_end"] = 0; + ieLayer.getParameters()["width"] = outWidth; + ieLayer.getParameters()["height"] = outHeight; + ieLayer.setInputPorts(std::vector(1)); + ieLayer.setOutputPorts(std::vector(1)); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Interp"; @@ -256,6 +296,7 @@ public: ieLayer->params["pad_beg"] = "0"; ieLayer->params["pad_end"] = "0"; return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index b217632584..a11fd379a2 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -197,6 +197,29 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::ScaleShiftLayer ieLayer(name); + + CV_Assert(!blobs.empty()); + const size_t numChannels = blobs[0].total(); + if (hasWeights) + { + ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C)); + } + else + { + auto weights = InferenceEngine::make_shared_blob(InferenceEngine::Precision::FP32, + {numChannels}); + weights->allocate(); + + std::vector ones(numChannels, 1); + weights->set(ones); + ieLayer.setWeights(weights); + } + if (hasBias) + ieLayer.setBiases(wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C)); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::LayerParams lp; lp.name = name; lp.type = "ScaleShift"; @@ -223,6 +246,7 @@ public: ieLayer->_biases = wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index 66f9aea440..0821979376 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -110,8 +110,15 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || - (backendId == DNN_BACKEND_INFERENCE_ENGINE && sliceRanges.size() == 1 && sliceRanges[0].size() == 4); +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE) + { + return INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) && + sliceRanges.size() == 1 && sliceRanges[0].size() == 4; + } + else +#endif + return backendId == DNN_BACKEND_OPENCV; } bool getMemoryShapes(const std::vector &inputs, @@ -254,9 +261,10 @@ public: } } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::LayerParams lp; lp.name = name; @@ -286,10 +294,11 @@ public: ieLayer->dim.push_back(sliceRanges[0][i].end - sliceRanges[0][i].start); } return Ptr(new InfEngineBackendNode(ieLayer)); - -#endif // HAVE_INF_ENGINE +#else return Ptr(); +#endif // IE < R5 } +#endif }; Ptr SliceLayer::create(const LayerParams& params) diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index 5d440c0c96..7bd2d0c20a 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -312,6 +312,13 @@ public: virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); + + InferenceEngine::Builder::SoftMaxLayer ieLayer(name); + ieLayer.setAxis(clamp(axisRaw, input->dims.size())); + return Ptr(new InfEngineBackendNode(ieLayer)); +#else InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::LayerParams lp; @@ -321,6 +328,7 @@ public: std::shared_ptr ieLayer(new InferenceEngine::SoftMaxLayer(lp)); ieLayer->axis = clamp(axisRaw, input->dims.size()); return Ptr(new InfEngineBackendNode(ieLayer)); +#endif #endif // HAVE_INF_ENGINE return Ptr(); } diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index 8bab885014..658ffd0a2e 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -18,6 +18,10 @@ namespace cv { namespace dnn { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) +InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer) + : BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {} +#else InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& _layer) : BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {} @@ -40,6 +44,7 @@ void InfEngineBackendNode::connect(std::vector >& inputs, layer->outData[0] = dataPtr; dataPtr->creatorLayer = InferenceEngine::CNNLayerWeakPtr(layer); } +#endif static std::vector > infEngineWrappers(const std::vector >& ptrs) @@ -54,6 +59,129 @@ infEngineWrappers(const std::vector >& ptrs) return wrappers; } +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + +InfEngineBackendNet::InfEngineBackendNet() : netBuilder("") +{ + hasNetOwner = false; + targetDevice = InferenceEngine::TargetDevice::eCPU; +} + +InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net) : netBuilder(""), cnn(net) +{ + hasNetOwner = true; + targetDevice = InferenceEngine::TargetDevice::eCPU; +} + +void InfEngineBackendNet::connect(const std::vector >& inputs, + const std::vector >& outputs, + const std::string& layerName) +{ + std::vector > inpWrappers = infEngineWrappers(inputs); + std::map::iterator it = layers.find(layerName); + CV_Assert(it != layers.end()); + + const int layerId = it->second; + for (int i = 0; i < inpWrappers.size(); ++i) + { + const auto& inp = inpWrappers[i]; + const std::string& inpName = inp->dataPtr->name; + int inpId; + it = layers.find(inpName); + if (it == layers.end()) + { + InferenceEngine::Builder::InputLayer inpLayer(inpName); + + std::vector shape(inp->blob->dims()); + std::reverse(shape.begin(), shape.end()); + + inpLayer.setPort(InferenceEngine::Port(shape)); + inpId = netBuilder.addLayer(inpLayer); + + layers.insert({inpName, inpId}); + } + else + inpId = it->second; + + netBuilder.connect(inpId, {layerId, i}); + unconnectedLayersIds.erase(inpId); + } + CV_Assert(!outputs.empty()); + InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[0]); + dataPtr->name = layerName; +} + +void InfEngineBackendNet::init(int targetId) +{ + if (!hasNetOwner) + { + CV_Assert(!unconnectedLayersIds.empty()); + for (int id : unconnectedLayersIds) + { + InferenceEngine::Builder::OutputLayer outLayer("myconv1"); + netBuilder.addLayer({id}, outLayer); + } + cnn = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(netBuilder.build())); + } + + switch (targetId) + { + case DNN_TARGET_CPU: + targetDevice = InferenceEngine::TargetDevice::eCPU; + break; + case DNN_TARGET_OPENCL: case DNN_TARGET_OPENCL_FP16: + targetDevice = InferenceEngine::TargetDevice::eGPU; + break; + case DNN_TARGET_MYRIAD: + targetDevice = InferenceEngine::TargetDevice::eMYRIAD; + break; + case DNN_TARGET_FPGA: + targetDevice = InferenceEngine::TargetDevice::eFPGA; + break; + default: + CV_Error(Error::StsError, format("Unknown target identifier: %d", targetId)); + } + + for (const auto& name : requestedOutputs) + { + cnn.addOutput(name); + } + + for (const auto& it : cnn.getInputsInfo()) + { + const std::string& name = it.first; + auto blobIt = allBlobs.find(name); + CV_Assert(blobIt != allBlobs.end()); + inpBlobs[name] = blobIt->second; + it.second->setPrecision(blobIt->second->precision()); + } + for (const auto& it : cnn.getOutputsInfo()) + { + const std::string& name = it.first; + auto blobIt = allBlobs.find(name); + CV_Assert(blobIt != allBlobs.end()); + outBlobs[name] = blobIt->second; + it.second->setPrecision(blobIt->second->precision()); // Should be always FP32 + } + + initPlugin(cnn); +} + +void InfEngineBackendNet::addLayer(const InferenceEngine::Builder::Layer& layer) +{ + int id = netBuilder.addLayer(layer); + const std::string& layerName = layer.getName(); + CV_Assert(layers.insert({layerName, id}).second); + unconnectedLayersIds.insert(id); +} + +void InfEngineBackendNet::addOutput(const std::string& name) +{ + requestedOutputs.push_back(name); +} + +#endif // IE >= R5 + static InferenceEngine::Layout estimateLayout(const Mat& m) { if (m.dims == 4) @@ -148,6 +276,7 @@ void InfEngineBackendWrapper::setHostDirty() } +#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) InfEngineBackendNet::InfEngineBackendNet() { targetDevice = InferenceEngine::TargetDevice::eCPU; @@ -491,6 +620,8 @@ void InfEngineBackendNet::init(int targetId) initPlugin(*this); } +#endif // IE < R5 + static std::map sharedPlugins; void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net) @@ -566,7 +697,11 @@ void InfEngineBackendNet::addBlobs(const std::vector >& ptrs auto wrappers = infEngineWrappers(ptrs); for (const auto& wrapper : wrappers) { - allBlobs.insert({wrapper->dataPtr->name, wrapper->blob}); + std::string name = wrapper->dataPtr->name; +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + name = name.empty() ? "id1" : name; // TODO: drop the magic input name. +#endif + allBlobs.insert({name, wrapper->blob}); } } diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index a1144b4b53..122de51659 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -35,6 +35,11 @@ #define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000)) #define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000)) +#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000)) + +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) +#include +#endif #endif // HAVE_INF_ENGINE @@ -42,6 +47,7 @@ namespace cv { namespace dnn { #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) class InfEngineBackendNet : public InferenceEngine::ICNNNetwork { public: @@ -146,17 +152,75 @@ private: void initPlugin(InferenceEngine::ICNNNetwork& net); }; +#else // IE < R5 + +class InfEngineBackendNet +{ +public: + InfEngineBackendNet(); + + InfEngineBackendNet(InferenceEngine::CNNNetwork& net); + + void addLayer(const InferenceEngine::Builder::Layer& layer); + + void addOutput(const std::string& name); + + void connect(const std::vector >& inputs, + const std::vector >& outputs, + const std::string& layerName); + + bool isInitialized(); + + void init(int targetId); + + void forward(); + + void initPlugin(InferenceEngine::ICNNNetwork& net); + + void addBlobs(const std::vector >& ptrs); + +private: + InferenceEngine::Builder::Network netBuilder; + + InferenceEngine::InferenceEnginePluginPtr enginePtr; + InferenceEngine::InferencePlugin plugin; + InferenceEngine::ExecutableNetwork netExec; + InferenceEngine::InferRequest infRequest; + InferenceEngine::BlobMap allBlobs; + InferenceEngine::BlobMap inpBlobs; + InferenceEngine::BlobMap outBlobs; + InferenceEngine::TargetDevice targetDevice; + + InferenceEngine::CNNNetwork cnn; + bool hasNetOwner; + + std::map layers; + std::vector requestedOutputs; + + std::set unconnectedLayersIds; +}; +#endif // IE < R5 + class InfEngineBackendNode : public BackendNode { public: +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer); +#else InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer); +#endif void connect(std::vector >& inputs, std::vector >& outputs); - InferenceEngine::CNNLayerPtr layer; // Inference Engine network object that allows to obtain the outputs of this layer. +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer layer; Ptr net; +#else + InferenceEngine::CNNLayerPtr layer; + Ptr net; +#endif }; class InfEngineBackendWrapper : public BackendWrapper diff --git a/modules/dnn/test/test_backends.cpp b/modules/dnn/test/test_backends.cpp index 75591e14e6..1d97cfc088 100644 --- a/modules/dnn/test/test_backends.cpp +++ b/modules/dnn/test/test_backends.cpp @@ -180,7 +180,7 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v2_TensorFlow) throw SkipTestException(""); Mat sample = imread(findDataFile("dnn/street.png", false)); Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false); - float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.013 : 0.0; + float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.013 : 2e-5; float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.062 : 0.0; processNet("dnn/ssd_mobilenet_v2_coco_2018_03_29.pb", "dnn/ssd_mobilenet_v2_coco_2018_03_29.pbtxt", inp, "detection_out", "", l1, lInf, 0.25); @@ -288,7 +288,7 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16) Mat inp = blobFromImage(img, 1.0, Size(320, 240), Scalar(103.939, 116.779, 123.68), false, false); // Output image has values in range [-143.526, 148.539]. float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.3 : 4e-5; - float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7.0 : 2e-3; + float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7.28 : 2e-3; processNet("dnn/fast_neural_style_eccv16_starry_night.t7", "", inp, "", "", l1, lInf); } diff --git a/modules/dnn/test/test_darknet_importer.cpp b/modules/dnn/test/test_darknet_importer.cpp index 5d41b4b916..d7c14f2714 100644 --- a/modules/dnn/test/test_darknet_importer.cpp +++ b/modules/dnn/test/test_darknet_importer.cpp @@ -306,7 +306,7 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc) // batch size 1 testDarknetModel(config_file, weights_file, ref.rowRange(0, 2), scoreDiff, iouDiff); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000 +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_MYRIAD) #endif // batch size 2 diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index 468953fe7e..9cbfb0c402 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -163,7 +163,7 @@ TEST_P(Deconvolution, Accuracy) bool hasBias = get<6>(GetParam()); Backend backendId = get<0>(get<7>(GetParam())); Target targetId = get<1>(get<7>(GetParam())); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU && + if (backendId == DNN_BACKEND_INFERENCE_ENGINE && (targetId == DNN_TARGET_CPU || targetId == DNN_TARGET_MYRIAD) && dilation.width == 2 && dilation.height == 2) throw SkipTestException(""); #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000 @@ -466,6 +466,7 @@ void testInPlaceActivation(LayerParams& lp, Backend backendId, Target targetId) pool.set("stride_w", 2); pool.set("stride_h", 2); pool.type = "Pooling"; + pool.name = "ave_pool"; Net net; int poolId = net.addLayer(pool.name, pool.type, pool); diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 4ccefd28a9..62e625f03c 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -295,10 +295,6 @@ TEST_P(Test_Caffe_layers, Eltwise) { if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) throw SkipTestException(""); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000 - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL) - throw SkipTestException("Test is disabled for OpenVINO 2018R5"); -#endif testLayerUsingCaffeModels("layer_eltwise"); } diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index deccbfb0eb..acdd66631c 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -351,6 +351,10 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR) l1 = 0.009; lInf = 0.035; } + else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) { + l1 = 4.5e-5; + lInf = 1.9e-4; + } testONNXModels("LResNet100E_IR", pb, l1, lInf); } @@ -366,6 +370,10 @@ TEST_P(Test_ONNX_nets, Emotion_ferplus) l1 = 0.021; lInf = 0.034; } + else if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_CPU || target == DNN_TARGET_OPENCL)) { + l1 = 2.4e-4; + lInf = 6e-4; + } testONNXModels("emotion_ferplus", pb, l1, lInf); } @@ -389,7 +397,7 @@ TEST_P(Test_ONNX_nets, Inception_v1) { #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - throw SkipTestException(""); + throw SkipTestException("Test is disabled for OpenVINO 2018R5"); #endif testONNXModels("inception_v1", pb); } diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index ce4997cd4e..cbf9782a44 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -351,8 +351,8 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD) Mat out = net.forward(); Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco_2017_11_17.detection_out.npy")); - float scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7e-3 : 1e-5; - float iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0098 : 1e-3; + float scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7e-3 : 1.5e-5; + float iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.012 : 1e-3; normAssertDetections(ref, out, "", 0.3, scoreDiff, iouDiff); } @@ -366,6 +366,7 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN) (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)) throw SkipTestException(""); + double scoresDiff = backend == DNN_BACKEND_INFERENCE_ENGINE ? 2.9e-5 : 1e-5; for (int i = 0; i < 2; ++i) { std::string proto = findDataFile("dnn/" + names[i] + ".pbtxt", false); @@ -381,7 +382,7 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN) Mat out = net.forward(); Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/" + names[i] + ".detection_out.npy")); - normAssertDetections(ref, out, names[i].c_str(), 0.3); + normAssertDetections(ref, out, names[i].c_str(), 0.3, scoresDiff); } } @@ -406,7 +407,7 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN) net.setInput(blob); Mat out = net.forward(); - double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.011 : default_l1; + double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.011 : 1.1e-5; double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.021 : default_lInf; normAssertDetections(ref, out, "", 0.4, scoreDiff, iouDiff); } @@ -568,10 +569,6 @@ TEST_P(Test_TensorFlow_layers, slice) if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) throw SkipTestException(""); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000 - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - throw SkipTestException(""); -#endif runTensorFlowNet("slice_4d"); } diff --git a/modules/dnn/test/test_torch_importer.cpp b/modules/dnn/test/test_torch_importer.cpp index c63cf26e45..046bd65b86 100644 --- a/modules/dnn/test/test_torch_importer.cpp +++ b/modules/dnn/test/test_torch_importer.cpp @@ -260,6 +260,11 @@ TEST_P(Test_Torch_layers, run_paralel) TEST_P(Test_Torch_layers, net_residual) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000 + if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || + target == DNN_TARGET_OPENCL_FP16)) + throw SkipTestException("Test is disabled for OpenVINO 2018R5"); +#endif runTorchNet("net_residual", "", false, true); } @@ -390,10 +395,6 @@ TEST_P(Test_Torch_nets, ENet_accuracy) // -model models/instance_norm/feathers.t7 TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000 - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - throw SkipTestException(""); -#endif checkBackend(); std::string models[] = {"dnn/fast_neural_style_eccv16_starry_night.t7", "dnn/fast_neural_style_instance_norm_feathers.t7"};