From eba696a41e3c3bd7e35c408cd74a4ec7656b76f9 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Fri, 14 Jun 2019 18:17:02 +0300 Subject: [PATCH] Merge pull request #14792 from dkurt:dnn_ie_min_version_r5 * Remove Inference Engine 2018R3 and 2018R4 * Fix 2018R5 --- .../dnn/include/opencv2/dnn/all_layers.hpp | 5 +- modules/dnn/src/dnn.cpp | 97 ----- modules/dnn/src/layers/batch_norm_layer.cpp | 19 +- modules/dnn/src/layers/blank_layer.cpp | 21 +- modules/dnn/src/layers/concat_layer.cpp | 16 +- modules/dnn/src/layers/convolution_layer.cpp | 100 +---- modules/dnn/src/layers/crop_layer.cpp | 205 --------- .../dnn/src/layers/detection_output_layer.cpp | 25 +- modules/dnn/src/layers/elementwise_layers.cpp | 114 +---- modules/dnn/src/layers/eltwise_layer.cpp | 23 +- modules/dnn/src/layers/flatten_layer.cpp | 16 +- .../dnn/src/layers/fully_connected_layer.cpp | 22 +- modules/dnn/src/layers/lrn_layer.cpp | 21 +- modules/dnn/src/layers/mvn_layer.cpp | 21 +- .../dnn/src/layers/normalize_bbox_layer.cpp | 51 +-- modules/dnn/src/layers/padding_layer.cpp | 5 +- modules/dnn/src/layers/permute_layer.cpp | 20 +- modules/dnn/src/layers/pooling_layer.cpp | 66 +-- modules/dnn/src/layers/prior_box_layer.cpp | 63 +-- modules/dnn/src/layers/proposal_layer.cpp | 33 +- modules/dnn/src/layers/reorg_layer.cpp | 15 +- modules/dnn/src/layers/reshape_layer.cpp | 23 +- modules/dnn/src/layers/resize_layer.cpp | 47 +-- modules/dnn/src/layers/scale_layer.cpp | 34 +- modules/dnn/src/layers/slice_layer.cpp | 136 ++++-- modules/dnn/src/layers/softmax_layer.cpp | 17 +- modules/dnn/src/op_inf_engine.cpp | 391 +----------------- modules/dnn/src/op_inf_engine.hpp | 121 ------ modules/dnn/test/test_backends.cpp | 7 +- modules/dnn/test/test_darknet_importer.cpp | 8 - modules/dnn/test/test_halide_layers.cpp | 14 +- modules/dnn/test/test_layers.cpp | 12 +- modules/dnn/test/test_onnx_importer.cpp | 24 +- modules/dnn/test/test_tf_importer.cpp | 24 +- modules/dnn/test/test_torch_importer.cpp | 8 +- 35 files changed, 216 insertions(+), 1608 deletions(-) delete mode 100644 modules/dnn/src/layers/crop_layer.cpp diff --git a/modules/dnn/include/opencv2/dnn/all_layers.hpp b/modules/dnn/include/opencv2/dnn/all_layers.hpp index c0ed2e028c..868a8f06d6 100644 --- a/modules/dnn/include/opencv2/dnn/all_layers.hpp +++ b/modules/dnn/include/opencv2/dnn/all_layers.hpp @@ -492,10 +492,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN class CV_EXPORTS CropLayer : public Layer { public: - int startAxis; - std::vector offset; - - static Ptr create(const LayerParams ¶ms); + static Ptr create(const LayerParams ¶ms); }; class CV_EXPORTS EltwiseLayer : public Layer diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index cdda56c1ed..d32c26f9db 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -729,20 +729,9 @@ struct DataLayer : public Layer } biases->set(biasesVec); -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name); addConstantData("weights", weights, ieLayer); addConstantData("biases", biases, ieLayer); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "ScaleShift"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); - - ieLayer->_weights = weights; - ieLayer->_biases = biases; -#endif return Ptr(new InfEngineBackendNode(ieLayer)); #endif // HAVE_INF_ENGINE return Ptr(); @@ -1459,11 +1448,7 @@ struct Net::Impl if (layerNet != ieInpNode->net) { // layerNet is empty or nodes are from different graphs. -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) ieInpNode->net->addOutput(ieInpNode->layer.getName()); -#else - ieInpNode->net->addOutput(ieInpNode->layer->name); -#endif } } } @@ -1579,25 +1564,6 @@ struct Net::Impl } } -#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) - // The same blobs wrappers cannot be shared between two Inference Engine - // networks because of explicit references between layers and blobs. - // So we need to rewrap all the external blobs. - for (int i = 0; i < ld.inputBlobsId.size(); ++i) - { - LayerPin inPin = ld.inputBlobsId[i]; - auto it = netBlobsWrappers.find(inPin); - if (it == netBlobsWrappers.end()) - { - ld.inputBlobsWrappers[i] = InfEngineBackendWrapper::create(ld.inputBlobsWrappers[i]); - netBlobsWrappers[inPin] = ld.inputBlobsWrappers[i]; - } - else - ld.inputBlobsWrappers[i] = it->second; - } - netBlobsWrappers[LayerPin(ld.id, 0)] = ld.outputBlobsWrappers[0]; -#endif // IE < R5 - Ptr node; if (!net.empty()) { @@ -1628,7 +1594,6 @@ struct Net::Impl ieNode->net = net; // Convert weights in FP16 for specific targets. -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) if ((preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_FPGA) && !fused) @@ -1670,47 +1635,6 @@ struct Net::Impl net->addBlobs(ld.inputBlobsWrappers); net->addBlobs(ld.outputBlobsWrappers); addInfEngineNetOutputs(ld); - -#else // IE >= R5 - - auto weightableLayer = std::dynamic_pointer_cast(ieNode->layer); - if ((preferableTarget == DNN_TARGET_OPENCL_FP16 || - preferableTarget == DNN_TARGET_MYRIAD || - preferableTarget == DNN_TARGET_FPGA) && !fused) - { - ieNode->layer->precision = InferenceEngine::Precision::FP16; - if (weightableLayer) - { - if (weightableLayer->_weights) - weightableLayer->_weights = convertFp16(weightableLayer->_weights); - if (weightableLayer->_biases) - weightableLayer->_biases = convertFp16(weightableLayer->_biases); - } - else - { - for (const auto& weights : {"weights", "biases"}) - { - auto it = ieNode->layer->blobs.find(weights); - if (it != ieNode->layer->blobs.end()) - it->second = convertFp16(it->second); - } - } - } - if (weightableLayer) - { - if (weightableLayer->_weights) - weightableLayer->blobs["weights"] = weightableLayer->_weights; - if (weightableLayer->_biases) - weightableLayer->blobs["biases"] = weightableLayer->_biases; - } - ieNode->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers); - net->addBlobs(ld.inputBlobsWrappers); - net->addBlobs(ld.outputBlobsWrappers); - - if (!fused) - net->addLayer(ieNode->layer); - addInfEngineNetOutputs(ld); -#endif // IE >= R5 } // Initialize all networks. @@ -1732,23 +1656,6 @@ struct Net::Impl if (!ieNode->net->isInitialized()) { -#if INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2018R4) - // For networks which is built in runtime we need to specify a - // version of it's hyperparameters. - std::string versionTrigger = "" - "" - "" - "" - "" - "1" - "" - "" - "" - "" - ""; - InferenceEngine::CNNNetReader reader; - reader.ReadNetwork(versionTrigger.data(), versionTrigger.size()); -#endif ieNode->net->init(preferableTarget); ld.skip = false; } @@ -2617,11 +2524,7 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin) Net cvNet; cvNet.setInputsNames(inputsNames); -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) Ptr backendNode(new InfEngineBackendNode(InferenceEngine::Builder::Layer(""))); -#else - Ptr backendNode(new InfEngineBackendNode(0)); -#endif backendNode->net = Ptr(new InfEngineBackendNet(ieNet)); for (auto& it : ieNet.getOutputsInfo()) { diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index b2fd75aef1..a40260884d 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -351,31 +351,16 @@ public: } #endif // HAVE_HALIDE +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name); const size_t numChannels = weights_.total(); addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer); addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "ScaleShift"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); - - const size_t numChannels = weights_.total(); - ieLayer->_weights = wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C); - ieLayer->_biases = wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C); - - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp index 7907047067..88654623ac 100644 --- a/modules/dnn/src/layers/blank_layer.cpp +++ b/modules/dnn/src/layers/blank_layer.cpp @@ -107,12 +107,12 @@ public: inputs[i].copyTo(outputs[i]); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); CV_Assert(!input->dims.empty()); -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::Layer ieLayer(name); ieLayer.setName(name); if (preferableTarget == DNN_TARGET_MYRIAD) @@ -122,7 +122,7 @@ public: else { ieLayer.setType("Split"); - ieLayer.getParameters()["axis"] = (size_t)0; + ieLayer.getParameters()["axis"] = input->dims.size() - 1; ieLayer.getParameters()["out_sizes"] = input->dims[0]; } std::vector shape(input->dims); @@ -130,21 +130,8 @@ public: ieLayer.setInputPorts({InferenceEngine::Port(shape)}); ieLayer.setOutputPorts(std::vector(1)); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Split"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::SplitLayer(lp)); -#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) - ieLayer->params["axis"] = format("%d", (int)input->dims.size() - 1); - ieLayer->params["out_sizes"] = format("%d", (int)input->dims[0]); -#endif - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE }; Ptr BlankLayer::create(const LayerParams& params) diff --git a/modules/dnn/src/layers/concat_layer.cpp b/modules/dnn/src/layers/concat_layer.cpp index 90def743a4..d7bef15f1c 100644 --- a/modules/dnn/src/layers/concat_layer.cpp +++ b/modules/dnn/src/layers/concat_layer.cpp @@ -298,29 +298,17 @@ public: return Ptr(); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::Builder::ConcatLayer ieLayer(name); ieLayer.setAxis(clamp(axis, input->dims.size())); ieLayer.setInputPorts(std::vector(inputs.size())); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Concat"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::ConcatLayer(lp)); - ieLayer->_axis = clamp(axis, input->dims.size()); - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE }; Ptr ConcatLayer::create(const LayerParams& params) diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 77778849d6..ec17d14879 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -252,8 +252,7 @@ public: { if (kernel_size.size() == 3) return preferableTarget == DNN_TARGET_CPU; - return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R4) || - (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height); + return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height); } else #endif @@ -460,9 +459,9 @@ public: return Ptr(); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector > &inputs) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); CV_Assert(input->dims.size() == 4 || input->dims.size() == 5); @@ -501,7 +500,6 @@ public: ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C); } -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::ConvolutionLayer ieLayer(name); ieLayer.setKernel(kernel_size); @@ -521,51 +519,8 @@ public: l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper"); return Ptr(new InfEngineBackendNode(l)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Convolution"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::ConvolutionLayer(lp)); - -#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) - ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width); - ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height); - ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width); - ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height); - ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width); - ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height); - ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width); - ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height); - ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width); - ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height); - ieLayer->params["output"] = format("%d", outCn); - ieLayer->params["kernel"] = format("%d,%d,%d,%d", outCn, inpGroupCn, kernel.height, kernel.width); - ieLayer->params["pads_begin"] = format("%d,%d", pad.height, pad.width); - ieLayer->params["pads_end"] = format("%d,%d", pad.height, pad.width); - ieLayer->params["strides"] = format("%d,%d", stride.height, stride.width); - ieLayer->params["dilations"] = format("%d,%d", dilation.height, dilation.width); -#else - ieLayer->_kernel_x = kernel.width; - ieLayer->_kernel_y = kernel.height; - ieLayer->_stride_x = stride.width; - ieLayer->_stride_y = stride.height; - ieLayer->_padding_x = pad.width; - ieLayer->_padding_y = pad.height; - ieLayer->_dilation_x = dilation.width; - ieLayer->_dilation_y = dilation.height; -#endif - ieLayer->_out_depth = outCn; - ieLayer->_group = group; - - ieLayer->_weights = ieWeights; - if (ieBiases) - ieLayer->_biases = ieBiases; - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE class ParallelConv : public cv::ParallelLoopBody { @@ -1184,7 +1139,7 @@ public: if (kernel_size.size() == 3) CV_Error(Error::StsNotImplemented, "Unsupported deconvolution3D layer"); - if (INF_ENGINE_RELEASE >= 2018050000 && (adjustPad.height || adjustPad.width)) + if (adjustPad.height || adjustPad.width) { if (padMode.empty()) { @@ -1793,9 +1748,9 @@ public: return Ptr(); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector > &) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE auto ieWeights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW); if (fusedWeights) { @@ -1809,7 +1764,6 @@ public: transpose(weightsMat, newWeights); } -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout const int group = numOutput / outGroupCn; @@ -1837,50 +1791,8 @@ public: if (hasBias()) addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l); return Ptr(new InfEngineBackendNode(l)); -#else - const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout - const int group = numOutput / outGroupCn; - - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Deconvolution"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::DeconvolutionLayer(lp)); - -#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) - ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width); - ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height); - ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width); - ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height); - ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width); - ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height); - ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width); - ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height); - ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width); - ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height); -#else - ieLayer->_kernel_x = kernel.width; - ieLayer->_kernel_y = kernel.height; - ieLayer->_stride_x = stride.width; - ieLayer->_stride_y = stride.height; - ieLayer->_padding_x = pad.width; - ieLayer->_padding_y = pad.height; - ieLayer->_dilation_x = dilation.width; - ieLayer->_dilation_y = dilation.height; -#endif - ieLayer->_out_depth = numOutput; - ieLayer->_group = group; - - ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW); - if (hasBias()) - { - ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C); - } - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE diff --git a/modules/dnn/src/layers/crop_layer.cpp b/modules/dnn/src/layers/crop_layer.cpp deleted file mode 100644 index c7cd99c9aa..0000000000 --- a/modules/dnn/src/layers/crop_layer.cpp +++ /dev/null @@ -1,205 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2013, OpenCV Foundation, all rights reserved. -// Copyright (C) 2017, Intel Corporation, all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#include "../precomp.hpp" -#include "../op_inf_engine.hpp" -#include "layers_common.hpp" - -namespace cv -{ -namespace dnn -{ - -class CropLayerImpl CV_FINAL : public CropLayer -{ -public: - CropLayerImpl(const LayerParams& params) - { - setParamsFrom(params); - startAxis = params.get("axis", 2); - const DictValue *paramOffset = params.ptr("offset"); - - if (paramOffset) - { - for (int i = 0; i < paramOffset->size(); i++) - offset.push_back(paramOffset->get(i)); - } - } - - virtual bool supportBackend(int backendId) CV_OVERRIDE - { -#ifdef HAVE_INF_ENGINE - if (backendId == DNN_BACKEND_INFERENCE_ENGINE) - return INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) && crop_ranges.size() == 4; - else -#endif - return backendId == DNN_BACKEND_OPENCV; - } - - bool getMemoryShapes(const std::vector &inputs, - const int requiredOutputs, - std::vector &outputs, - std::vector &internals) const CV_OVERRIDE - { - CV_Assert(inputs.size() == 2); - - MatShape dstShape = inputs[0]; - int start = clamp(startAxis, dstShape); - for (int i = start; i < dstShape.size(); i++) - { - dstShape[i] = inputs[1][i]; - } - - outputs.resize(1, dstShape); - - return false; - } - - void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE - { - std::vector inputs; - inputs_arr.getMatVector(inputs); - CV_Assert(2 == inputs.size()); - - const Mat &inpBlob = inputs[0]; - const Mat &inpSzBlob = inputs[1]; - - int dims = inpBlob.dims; - int start_axis = clamp(startAxis, dims); - - std::vector offset_final(dims, 0); - if (offset.size() == 1) - { - for (int i = start_axis; i < dims; i++) - offset_final[i] = offset[0]; - } - else if (offset.size() > 1) - { - if ((int)offset.size() != dims - start_axis) - CV_Error(Error::StsBadArg, "number of offset values specified must be " - "equal to the number of dimensions following axis."); - - for (int i = start_axis; i < dims; i++) - offset_final[i] = offset[i - start_axis]; - } - - crop_ranges.resize(dims); - for (int i = 0; i < start_axis; i++) - { - crop_ranges[i] = Range(0, inpBlob.size[i]); - } - for (int i = start_axis; i < dims; i++) - { - if (offset_final[i] < 0 || offset_final[i] + inpSzBlob.size[i] > inpBlob.size[i]) - CV_Error(Error::StsBadArg, "invalid crop parameters or blob sizes"); - - crop_ranges[i] = Range(offset_final[i], offset_final[i] + inpSzBlob.size[i]); - } - } - - void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - CV_TRACE_ARG_VALUE(name, "name", name.c_str()); - - std::vector inputs, outputs; - inputs_arr.getMatVector(inputs); - outputs_arr.getMatVector(outputs); - - Mat &input = inputs[0]; - input(&crop_ranges[0]).copyTo(outputs[0]); - } - -#ifdef HAVE_INF_ENGINE - virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE - { -#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Crop"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::CropLayer(lp)); - - CV_Assert(crop_ranges.size() == 4); - -#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) - for (int i = 0; i < 4; ++i) - { - ieLayer->axis.push_back(i); - ieLayer->offset.push_back(crop_ranges[i].start); - ieLayer->dim.push_back(crop_ranges[i].end - crop_ranges[i].start); - } -#else - ieLayer->axis.push_back(0); // batch - ieLayer->offset.push_back(crop_ranges[0].start); - ieLayer->dim.push_back(crop_ranges[0].end - crop_ranges[0].start); - - ieLayer->axis.push_back(1); // channels - ieLayer->offset.push_back(crop_ranges[1].start); - ieLayer->dim.push_back(crop_ranges[1].end - crop_ranges[1].start); - - ieLayer->axis.push_back(3); // height - ieLayer->offset.push_back(crop_ranges[2].start); - ieLayer->dim.push_back(crop_ranges[2].end - crop_ranges[2].start); - - ieLayer->axis.push_back(2); // width - ieLayer->offset.push_back(crop_ranges[3].start); - ieLayer->dim.push_back(crop_ranges[3].end - crop_ranges[3].start); -#endif - return Ptr(new InfEngineBackendNode(ieLayer)); -#else - return Ptr(); -#endif // IE < R5 - } -#endif - - std::vector crop_ranges; -}; - - -Ptr CropLayer::create(const LayerParams& params) -{ - return Ptr(new CropLayerImpl(params)); -} - -} -} diff --git a/modules/dnn/src/layers/detection_output_layer.cpp b/modules/dnn/src/layers/detection_output_layer.cpp index 043be0e7b7..fd2bf49794 100644 --- a/modules/dnn/src/layers/detection_output_layer.cpp +++ b/modules/dnn/src/layers/detection_output_layer.cpp @@ -918,10 +918,9 @@ public: } } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::DetectionOutputLayer ieLayer(name); ieLayer.setNumClasses(_numClasses); @@ -939,28 +938,8 @@ public: l.getParameters()["eta"] = std::string("1.0"); return Ptr(new InfEngineBackendNode(l)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "DetectionOutput"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - - ieLayer->params["num_classes"] = format("%d", _numClasses); - ieLayer->params["share_location"] = _shareLocation ? "1" : "0"; - ieLayer->params["background_label_id"] = format("%d", _backgroundLabelId); - ieLayer->params["nms_threshold"] = format("%f", _nmsThreshold); - ieLayer->params["top_k"] = format("%d", _topK); - ieLayer->params["keep_top_k"] = format("%d", _keepTopK); - ieLayer->params["eta"] = "1.0"; - ieLayer->params["confidence_threshold"] = format("%f", _confidenceThreshold); - ieLayer->params["variance_encoded_in_target"] = _varianceEncodedInTarget ? "1" : "0"; - ieLayer->params["code_type"] = "caffe.PriorBoxParameter." + _codeType; - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE }; float util::caffe_box_overlap(const util::NormalizedBBox& a, const util::NormalizedBBox& b) diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 86ac26f3cc..762afdd3d0 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -149,22 +149,14 @@ public: return Ptr(); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer ieLayer = func.initInfEngineBuilderAPI(); ieLayer.setName(this->name); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = this->name; - lp.precision = InferenceEngine::Precision::FP32; - return Ptr(new InfEngineBackendNode(func.initInfEngine(lp))); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE virtual bool tryFuse(Ptr& top) CV_OVERRIDE { @@ -354,21 +346,10 @@ struct ReLUFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(slope); } -#else - InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) - { - lp.type = "ReLU"; - std::shared_ptr ieLayer(new InferenceEngine::ReLULayer(lp)); - ieLayer->negative_slope = slope; - ieLayer->params["negative_slope"] = format("%f", slope); - return ieLayer; - } -#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -468,23 +449,10 @@ struct ReLU6Functor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::ClampLayer("").setMinValue(minValue).setMaxValue(maxValue); } -#else - InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) - { - lp.type = "Clamp"; - std::shared_ptr ieLayer(new InferenceEngine::ClampLayer(lp)); - ieLayer->min_value = minValue; - ieLayer->max_value = maxValue; - ieLayer->params["min"] = format("%f", minValue); - ieLayer->params["max"] = format("%f", maxValue); - return ieLayer; - } -#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -553,19 +521,10 @@ struct TanHFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::TanHLayer(""); } -#else - InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) - { - lp.type = "TanH"; - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - return ieLayer; - } -#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -634,19 +593,10 @@ struct SigmoidFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::SigmoidLayer(""); } -#else - InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) - { - lp.type = "Sigmoid"; - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - return ieLayer; - } -#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -717,18 +667,10 @@ struct ELUFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::ELULayer(""); } -#else - InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) - { - lp.type = "ELU"; - return InferenceEngine::CNNLayerPtr(new InferenceEngine::CNNLayer(lp)); - } -#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -800,21 +742,10 @@ struct AbsValFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(-1); } -#else - InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) - { - lp.type = "ReLU"; - std::shared_ptr ieLayer(new InferenceEngine::ReLULayer(lp)); - ieLayer->negative_slope = -1; - ieLayer->params["negative_slope"] = "-1.0"; - return ieLayer; - } -#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -862,18 +793,10 @@ struct BNLLFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { CV_Error(Error::StsNotImplemented, ""); } -#else - InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) - { - CV_Error(Error::StsNotImplemented, "BNLL"); - return InferenceEngine::CNNLayerPtr(); - } -#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } @@ -978,34 +901,12 @@ struct PowerFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::PowerLayer("").setPower(power) .setScale(scale) .setShift(shift); } -#else - InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) - { - if (power == 1.0f && scale == 1.0f && shift == 0.0f) - { - // It looks like there is a bug in Inference Engine for DNN_TARGET_OPENCL and DNN_TARGET_OPENCL_FP16 - // if power layer do nothing so we replace it to Identity. - lp.type = "Split"; - return std::shared_ptr(new InferenceEngine::SplitLayer(lp)); - } - else - { - lp.type = "Power"; - std::shared_ptr ieLayer(new InferenceEngine::PowerLayer(lp)); - ieLayer->power = power; - ieLayer->scale = scale; - ieLayer->offset = shift; - return ieLayer; - } - } -#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr& top) @@ -1137,7 +1038,6 @@ struct ChannelsPReLUFunctor #endif // HAVE_HALIDE #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer(""); @@ -1145,16 +1045,6 @@ struct ChannelsPReLUFunctor addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l); return l; } -#else - InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) - { - lp.type = "PReLU"; - std::shared_ptr ieLayer(new InferenceEngine::PReLULayer(lp)); - const size_t numChannels = scale.total(); - ieLayer->_weights = wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C); - return ieLayer; - } -#endif #endif // HAVE_INF_ENGINE bool tryFuse(Ptr&) { return false; } diff --git a/modules/dnn/src/layers/eltwise_layer.cpp b/modules/dnn/src/layers/eltwise_layer.cpp index 6458902fc4..f332c44ede 100644 --- a/modules/dnn/src/layers/eltwise_layer.cpp +++ b/modules/dnn/src/layers/eltwise_layer.cpp @@ -420,10 +420,9 @@ public: return Ptr(); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::EltwiseLayer ieLayer(name); ieLayer.setInputPorts(std::vector(inputs.size())); @@ -442,26 +441,8 @@ public: l.getParameters()["coeff"] = coeffs; return Ptr(new InfEngineBackendNode(l)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Eltwise"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::EltwiseLayer(lp)); - ieLayer->coeff = coeffs; - if (op == SUM) - ieLayer->_operation = InferenceEngine::EltwiseLayer::Sum; - else if (op == PROD) - ieLayer->_operation = InferenceEngine::EltwiseLayer::Prod; - else if (op == MAX) - ieLayer->_operation = InferenceEngine::EltwiseLayer::Max; - else - CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation"); - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE diff --git a/modules/dnn/src/layers/flatten_layer.cpp b/modules/dnn/src/layers/flatten_layer.cpp index 72f67529fa..f1250e7e3e 100644 --- a/modules/dnn/src/layers/flatten_layer.cpp +++ b/modules/dnn/src/layers/flatten_layer.cpp @@ -162,10 +162,9 @@ public: } } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer ieLayer(name); ieLayer.setName(name); ieLayer.setType("Flatten"); @@ -174,19 +173,8 @@ public: ieLayer.setInputPorts(std::vector(1)); ieLayer.setOutputPorts(std::vector(1)); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Flatten"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - ieLayer->params["axis"] = format("%d", _startAxis); - ieLayer->params["end_axis"] = format("%d", _endAxis); - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE int _startAxis; int _endAxis; diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index dcfa7d1dac..2e08ed6d7e 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -439,10 +439,9 @@ public: return Ptr(); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::FullyConnectedLayer ieLayer(name); const int outNum = blobs[0].size[0]; @@ -454,25 +453,8 @@ public: addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l); return Ptr(new InfEngineBackendNode(l)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "FullyConnected"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::FullyConnectedLayer(lp)); - - ieLayer->_out_num = blobs[0].size[0]; -#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) - ieLayer->params["out-size"] = format("%d", blobs[0].size[0]); -#endif - ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW); - if (blobs.size() > 1) - ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)ieLayer->_out_num}, InferenceEngine::Layout::C); - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE diff --git a/modules/dnn/src/layers/lrn_layer.cpp b/modules/dnn/src/layers/lrn_layer.cpp index 7c85fd36cc..de63f23cd6 100644 --- a/modules/dnn/src/layers/lrn_layer.cpp +++ b/modules/dnn/src/layers/lrn_layer.cpp @@ -379,13 +379,13 @@ public: #endif // HAVE_HALIDE } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE float alphaSize = alpha; if (!normBySize) alphaSize *= (type == SPATIAL_NRM ? size*size : size); -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) + InferenceEngine::Builder::NormLayer ieLayer(name); ieLayer.setSize(size); ieLayer.setAlpha(alphaSize); @@ -395,23 +395,8 @@ public: InferenceEngine::Builder::Layer l = ieLayer; l.getParameters()["k"] = bias; return Ptr(new InfEngineBackendNode(l)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Norm"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::NormLayer(lp)); - - ieLayer->_size = size; - ieLayer->_k = (int)bias; - ieLayer->_beta = beta; - ieLayer->_alpha = alphaSize; - ieLayer->_isAcrossMaps = (type == CHANNEL_NRM); - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE diff --git a/modules/dnn/src/layers/mvn_layer.cpp b/modules/dnn/src/layers/mvn_layer.cpp index ca7e7112c5..9141c110a2 100644 --- a/modules/dnn/src/layers/mvn_layer.cpp +++ b/modules/dnn/src/layers/mvn_layer.cpp @@ -118,11 +118,7 @@ public: { #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE) -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) return !zeroDev && eps <= 1e-7f; -#else - return !zeroDev && (preferableTarget == DNN_TARGET_CPU || eps <= 1e-7f); -#endif else #endif // HAVE_INF_ENGINE return backendId == DNN_BACKEND_OPENCV; @@ -369,29 +365,16 @@ public: } } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::MVNLayer ieLayer(name); ieLayer.setAcrossChannels(acrossChannels); ieLayer.setNormalize(normVariance); ieLayer.setEpsilon(eps); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "MVN"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::MVNLayer(lp)); - ieLayer->params["across_channels"] = acrossChannels ? "1" : "0"; - ieLayer->params["normalize_variance"] = normVariance ? "1" : "0"; - ieLayer->params["eps"] = format("%f", eps); - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp index 65640b6905..09fac59078 100644 --- a/modules/dnn/src/layers/normalize_bbox_layer.cpp +++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp @@ -257,10 +257,9 @@ public: } } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); if (input->dims.size() == 4) { @@ -304,54 +303,8 @@ public: return Ptr(new InfEngineBackendNode(l)); } -#else - InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); - - InferenceEngine::LayerParams lp; - lp.name = name; - lp.precision = InferenceEngine::Precision::FP32; - - if (input->dims.size() == 4) - { - const int numChannels = input->dims[2]; // NOTE: input->dims are reversed (whcn) - - lp.type = "Normalize"; - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - if (blobs.empty()) - { - auto weights = InferenceEngine::make_shared_blob(InferenceEngine::Precision::FP32, - InferenceEngine::Layout::C, - {(size_t)numChannels}); - weights->allocate(); - std::vector ones(numChannels, 1); - weights->set(ones); - ieLayer->blobs["weights"] = weights; - ieLayer->params["channel_shared"] = "0"; - } - else - { - CV_Assert(numChannels == blobs[0].total()); - ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C); - ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0"; - } - ieLayer->params["eps"] = format("%f", epsilon); - ieLayer->params["across_spatial"] = acrossSpatial ? "1" : "0"; - return Ptr(new InfEngineBackendNode(ieLayer)); - } - else - { - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "GRN"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - ieLayer->params["bias"] = format("%f", epsilon); - return Ptr(new InfEngineBackendNode(ieLayer)); - } -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE private: int startAxis, endAxis; diff --git a/modules/dnn/src/layers/padding_layer.cpp b/modules/dnn/src/layers/padding_layer.cpp index 89a05e6a4d..cffb84d692 100644 --- a/modules/dnn/src/layers/padding_layer.cpp +++ b/modules/dnn/src/layers/padding_layer.cpp @@ -182,9 +182,9 @@ public: return Ptr(); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE InferenceEngine::Builder::Layer ieLayer(name); ieLayer.setName(name); ieLayer.setType("Pad"); @@ -204,9 +204,8 @@ public: ieLayer.setInputPorts(std::vector(1)); ieLayer.setOutputPorts(std::vector(1)); return Ptr(new InfEngineBackendNode(ieLayer)); -#endif - return Ptr(); } +#endif private: std::vector > paddings; // Pairs pad before, pad after. diff --git a/modules/dnn/src/layers/permute_layer.cpp b/modules/dnn/src/layers/permute_layer.cpp index 9e529216e9..009ceeb1d5 100644 --- a/modules/dnn/src/layers/permute_layer.cpp +++ b/modules/dnn/src/layers/permute_layer.cpp @@ -370,30 +370,14 @@ public: } } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::PermuteLayer ieLayer(name); ieLayer.setOrder(_order); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Permute"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - - CV_Assert(!_order.empty()); - ieLayer->params["order"] = format("%d", _order[0]); - for (int i = 1; i < _order.size(); ++i) - ieLayer->params["order"] += format(",%d", _order[i]); - - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE size_t _count; std::vector _order; diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index db5c84978a..e50d292f27 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -278,10 +278,9 @@ public: return Ptr(); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) if (type == MAX || type == AVE) { InferenceEngine::Builder::PoolingLayer ieLayer(name); @@ -324,69 +323,8 @@ public: else CV_Error(Error::StsNotImplemented, "Unsupported pooling type"); return Ptr(); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.precision = InferenceEngine::Precision::FP32; - - std::shared_ptr ieLayer; - if (type == MAX || type == AVE) - { - lp.type = "Pooling"; - InferenceEngine::PoolingLayer* poolLayer = new InferenceEngine::PoolingLayer(lp); -#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) - poolLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width); - poolLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height); - poolLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width); - poolLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height); - poolLayer->_padding.insert(InferenceEngine::X_AXIS, pad_l); - poolLayer->_padding.insert(InferenceEngine::Y_AXIS, pad_t); - poolLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad_r); - poolLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad_b); - poolLayer->params["kernel"] = format("%d,%d", kernel.height, kernel.width); - poolLayer->params["pads_begin"] = format("%d,%d", pad_t, pad_l); - poolLayer->params["pads_end"] = format("%d,%d", pad_b, pad_r); - poolLayer->params["strides"] = format("%d,%d", stride.height, stride.width); -#else - poolLayer->_kernel_x = kernel.width; - poolLayer->_kernel_y = kernel.height; - poolLayer->_stride_x = stride.width; - poolLayer->_stride_y = stride.height; - poolLayer->_padding_x = pad_l; - poolLayer->_padding_y = pad_t; - poolLayer->params["pad-r"] = format("%d", pad_r); - poolLayer->params["pad-b"] = format("%d", pad_b); -#endif - poolLayer->_exclude_pad = type == AVE && padMode == "SAME"; - poolLayer->params["rounding-type"] = ceilMode ? "ceil" : "floor"; - poolLayer->_type = type == MAX ? InferenceEngine::PoolingLayer::PoolType::MAX : - InferenceEngine::PoolingLayer::PoolType::AVG; - ieLayer = std::shared_ptr(poolLayer); - } - else if (type == ROI) - { - lp.type = "ROIPooling"; - ieLayer = std::shared_ptr(new InferenceEngine::CNNLayer(lp)); - ieLayer->params["pooled_w"] = format("%d", pooledSize.width); - ieLayer->params["pooled_h"] = format("%d", pooledSize.height); - ieLayer->params["spatial_scale"] = format("%f", spatialScale); - } - else if (type == PSROI) - { - lp.type = "PSROIPooling"; - ieLayer = std::shared_ptr(new InferenceEngine::CNNLayer(lp)); - ieLayer->params["output_dim"] = format("%d", psRoiOutChannels); - ieLayer->params["group_size"] = format("%d", pooledSize.width); - ieLayer->params["spatial_scale"] = format("%f", spatialScale); - } - else - CV_Error(Error::StsNotImplemented, "Unsupported pooling type"); - - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE class PoolingInvoker : public ParallelLoopBody diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index b2907b7b8b..dc949d0fba 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -480,10 +480,9 @@ public: } } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) if (_explicitSizes) { InferenceEngine::Builder::PriorBoxClusteredLayer ieLayer(name); @@ -541,66 +540,8 @@ public: l.getParameters()["variance"] = _variance; return Ptr(new InfEngineBackendNode(l)); } -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = _explicitSizes ? "PriorBoxClustered" : "PriorBox"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - - if (_explicitSizes) - { - CV_Assert(!_boxWidths.empty()); CV_Assert(!_boxHeights.empty()); - CV_Assert(_boxWidths.size() == _boxHeights.size()); - ieLayer->params["width"] = format("%f", _boxWidths[0]); - ieLayer->params["height"] = format("%f", _boxHeights[0]); - for (int i = 1; i < _boxWidths.size(); ++i) - { - ieLayer->params["width"] += format(",%f", _boxWidths[i]); - ieLayer->params["height"] += format(",%f", _boxHeights[i]); - } - } - else - { - ieLayer->params["min_size"] = format("%f", _minSize); - ieLayer->params["max_size"] = _maxSize > 0 ? format("%f", _maxSize) : ""; - - if (!_aspectRatios.empty()) - { - ieLayer->params["aspect_ratio"] = format("%f", _aspectRatios[0]); - for (int i = 1; i < _aspectRatios.size(); ++i) - ieLayer->params["aspect_ratio"] += format(",%f", _aspectRatios[i]); - } - } - - ieLayer->params["flip"] = "0"; // We already flipped aspect ratios. - ieLayer->params["clip"] = _clip ? "1" : "0"; - - CV_Assert(!_variance.empty()); - ieLayer->params["variance"] = format("%f", _variance[0]); - for (int i = 1; i < _variance.size(); ++i) - ieLayer->params["variance"] += format(",%f", _variance[i]); - - if (_stepX == _stepY) - { - ieLayer->params["step"] = format("%f", _stepX); - ieLayer->params["step_h"] = "0.0"; - ieLayer->params["step_w"] = "0.0"; - } - else - { - ieLayer->params["step"] = "0.0"; - ieLayer->params["step_h"] = format("%f", _stepY); - ieLayer->params["step_w"] = format("%f", _stepX); - } - CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], ""); - ieLayer->params["offset"] = format("%f", _offsetsX[0]); - - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE diff --git a/modules/dnn/src/layers/proposal_layer.cpp b/modules/dnn/src/layers/proposal_layer.cpp index 836fc1831b..5f5c029bfd 100644 --- a/modules/dnn/src/layers/proposal_layer.cpp +++ b/modules/dnn/src/layers/proposal_layer.cpp @@ -322,10 +322,9 @@ public: layerOutputs[0].col(2).copyTo(dst); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::ProposalLayer ieLayer(name); ieLayer.setBaseSize(baseSize); @@ -346,36 +345,8 @@ public: ieLayer.setRatio(ratiosVec); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Proposal"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - - ieLayer->params["base_size"] = format("%d", baseSize); - ieLayer->params["feat_stride"] = format("%d", featStride); - ieLayer->params["min_size"] = "16"; - ieLayer->params["nms_thresh"] = format("%f", nmsThreshold); - ieLayer->params["post_nms_topn"] = format("%d", keepTopAfterNMS); - ieLayer->params["pre_nms_topn"] = format("%d", keepTopBeforeNMS); - if (ratios.size()) - { - ieLayer->params["ratio"] = format("%f", ratios.get(0)); - for (int i = 1; i < ratios.size(); ++i) - ieLayer->params["ratio"] += format(",%f", ratios.get(i)); - } - if (scales.size()) - { - ieLayer->params["scale"] = format("%f", scales.get(0)); - for (int i = 1; i < scales.size(); ++i) - ieLayer->params["scale"] += format(",%f", scales.get(i)); - } - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE private: // A first half of channels are background scores. We need only a second one. diff --git a/modules/dnn/src/layers/reorg_layer.cpp b/modules/dnn/src/layers/reorg_layer.cpp index 3e42db5de1..659a79521a 100644 --- a/modules/dnn/src/layers/reorg_layer.cpp +++ b/modules/dnn/src/layers/reorg_layer.cpp @@ -178,25 +178,14 @@ public: permute->forward(inputs, outputs, internals_arr); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::ReorgYoloLayer ieLayer(name); ieLayer.setStride(reorgStride); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "ReorgYolo"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - ieLayer->params["stride"] = format("%d", reorgStride); - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index d6290456fa..2b57a734a0 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -257,34 +257,15 @@ public: } } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::ReshapeLayer ieLayer(name); CV_Assert(outShapes.size() == 1); ieLayer.setDims(outShapes[0]); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Reshape"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::ReshapeLayer(lp)); - if (!newShapeDesc.empty()) - ieLayer->shape = newShapeDesc; - else - { - CV_Assert(inputs.size() == 2); - InferenceEngine::DataPtr shapeSrc = infEngineDataNode(inputs[1]); - // NOTE: shapeSrc->dims are reversed - ieLayer->shape = std::vector(shapeSrc->dims.rbegin(), shapeSrc->dims.rend()); - } - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE private: std::vector outShapes; diff --git a/modules/dnn/src/layers/resize_layer.cpp b/modules/dnn/src/layers/resize_layer.cpp index b890f9c9cd..339f2b7932 100644 --- a/modules/dnn/src/layers/resize_layer.cpp +++ b/modules/dnn/src/layers/resize_layer.cpp @@ -55,7 +55,7 @@ public: if (backendId == DNN_BACKEND_INFERENCE_ENGINE) { return (interpolation == "nearest" && scaleWidth == scaleHeight) || - (interpolation == "bilinear" && INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R4)); + (interpolation == "bilinear"); } #endif return backendId == DNN_BACKEND_OPENCV; @@ -162,7 +162,6 @@ public: virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer ieLayer(name); ieLayer.setName(name); if (interpolation == "nearest") @@ -188,32 +187,6 @@ public: ieLayer.setInputPorts(std::vector(1)); ieLayer.setOutputPorts(std::vector(1)); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer; - if (interpolation == "nearest") - { - lp.type = "Resample"; - ieLayer = std::shared_ptr(new InferenceEngine::CNNLayer(lp)); - ieLayer->params["type"] = "caffe.ResampleParameter.NEAREST"; - ieLayer->params["antialias"] = "0"; - } - else if (interpolation == "bilinear") - { - lp.type = "Interp"; - ieLayer = std::shared_ptr(new InferenceEngine::CNNLayer(lp)); - ieLayer->params["pad_beg"] = "0"; - ieLayer->params["pad_end"] = "0"; - ieLayer->params["align_corners"] = "0"; - } - else - CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation); - ieLayer->params["width"] = cv::format("%d", outWidth); - ieLayer->params["height"] = cv::format("%d", outHeight); - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif #endif // HAVE_INF_ENGINE return Ptr(); } @@ -271,10 +244,9 @@ public: scaleWidth = (outWidth > 1) ? (static_cast(inpWidth - 1) / (outWidth - 1)) : 0.f; } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer ieLayer(name); ieLayer.setName(name); ieLayer.setType("Interp"); @@ -285,20 +257,9 @@ public: ieLayer.setInputPorts(std::vector(1)); ieLayer.setOutputPorts(std::vector(1)); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Interp"; - lp.precision = InferenceEngine::Precision::FP32; - - std::shared_ptr ieLayer(new InferenceEngine::CNNLayer(lp)); - ieLayer->params["pad_beg"] = "0"; - ieLayer->params["pad_end"] = "0"; - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE + }; Ptr InterpLayer::create(const LayerParams& params) diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index d911905d36..5e22519c39 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -194,10 +194,9 @@ public: } #endif // HAVE_HALIDE +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name); CV_Assert(!blobs.empty()); @@ -219,37 +218,8 @@ public: if (hasBias) addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l); return Ptr(new InfEngineBackendNode(l)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "ScaleShift"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); - - CV_Assert(!blobs.empty()); - const size_t numChannels = blobs[0].total(); - if (hasWeights) - { - ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C); - } - else - { - auto weights = InferenceEngine::make_shared_blob(InferenceEngine::Precision::FP32, - {numChannels}); - weights->allocate(); - - std::vector ones(numChannels, 1); - weights->set(ones); - ieLayer->_weights = weights; - } - if (hasBias) - ieLayer->_biases = wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C); - - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE { diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index 051098c744..73d6a301ae 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -54,7 +54,7 @@ namespace cv namespace dnn { -class SliceLayerImpl CV_FINAL : public SliceLayer +class SliceLayerImpl : public SliceLayer { public: SliceLayerImpl(const LayerParams& params) @@ -112,6 +112,9 @@ public: { return backendId == DNN_BACKEND_OPENCV || (backendId == DNN_BACKEND_INFERENCE_ENGINE && +#ifdef HAVE_INF_ENGINE + INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && +#endif sliceRanges.size() == 1 && sliceRanges[0].size() == 4); } @@ -256,23 +259,24 @@ public: } #ifdef HAVE_INF_ENGINE +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { - CV_Assert(sliceRanges.size() == 1); + CV_Assert_N(sliceRanges.size() == 1, inputs.size() <= 2); std::vector axes, offsets, dims; int from, to, step; int numDims = sliceRanges[0].size(); if (preferableTarget == DNN_TARGET_MYRIAD) { - from = 1; + from = axis; to = numDims; step = 1; } else { from = numDims - 1; - to = -1; + to = axis - 1; step = -1; } for (int i = from; i != to; i += step) @@ -282,11 +286,6 @@ public: dims.push_back(sliceRanges[0][i].size()); } -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) - std::vector outShape(numDims); - for (int i = 0; i < numDims; ++i) - outShape[numDims - 1 - i] = sliceRanges[0][i].size(); - InferenceEngine::Builder::Layer ieLayer(name); ieLayer.setName(name); ieLayer.setType("Crop"); @@ -295,30 +294,106 @@ public: ieLayer.getParameters()["offset"] = offsets; ieLayer.setInputPorts(std::vector(2)); ieLayer.setOutputPorts(std::vector(1)); - ieLayer.getInputPorts()[1].setParameter("type", "weights"); - // Fake blob which will be moved to inputs (as weights). - auto shapeSource = InferenceEngine::make_shared_blob( - InferenceEngine::Precision::FP32, - InferenceEngine::Layout::ANY, outShape); - shapeSource->allocate(); - addConstantData("weights", shapeSource, ieLayer); + if (inputs.size() != 2) + { + std::vector outShape(numDims); + for (int i = 0; i < numDims; ++i) + outShape[numDims - 1 - i] = sliceRanges[0][i].size(); + ieLayer.getInputPorts()[1].setParameter("type", "weights"); + + // Fake blob which will be moved to inputs (as weights). + auto shapeSource = InferenceEngine::make_shared_blob( + InferenceEngine::Precision::FP32, + InferenceEngine::Layout::ANY, outShape); + shapeSource->allocate(); + addConstantData("weights", shapeSource, ieLayer); + } return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "Crop"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::CropLayer(lp)); - ieLayer->axis = axes; - ieLayer->offset = offsets; - ieLayer->dim = dims; - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif // IE < R5 - return Ptr(); } #endif +#endif +}; + +class CropLayerImpl CV_FINAL : public SliceLayerImpl +{ +public: + CropLayerImpl(const LayerParams& params) : SliceLayerImpl(LayerParams()) + { + setParamsFrom(params); + axis = params.get("axis", 2); + const DictValue *paramOffset = params.ptr("offset"); + + if (paramOffset) + { + for (int i = 0; i < paramOffset->size(); i++) + offset.push_back(paramOffset->get(i)); + } + } + + bool getMemoryShapes(const std::vector &inputs, + const int requiredOutputs, + std::vector &outputs, + std::vector &internals) const CV_OVERRIDE + { + CV_Assert(inputs.size() == 2); + + MatShape dstShape = inputs[0]; + int start = clamp(axis, dstShape); + for (int i = start; i < dstShape.size(); i++) + { + dstShape[i] = inputs[1][i]; + } + outputs.resize(1, dstShape); + return false; + } + + void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE + { + std::vector inputs; + inputs_arr.getMatVector(inputs); + CV_Assert(2 == inputs.size()); + + const Mat &inpBlob = inputs[0]; + const Mat &inpSzBlob = inputs[1]; + + int dims = inpBlob.dims; + int start_axis = clamp(axis, dims); + + std::vector offset_final(dims, 0); + if (offset.size() == 1) + { + for (int i = start_axis; i < dims; i++) + offset_final[i] = offset[0]; + } + else if (offset.size() > 1) + { + if ((int)offset.size() != dims - start_axis) + CV_Error(Error::StsBadArg, "number of offset values specified must be " + "equal to the number of dimensions following axis."); + + for (int i = start_axis; i < dims; i++) + offset_final[i] = offset[i - start_axis]; + } + + sliceRanges.resize(1); + sliceRanges[0].resize(dims); + for (int i = 0; i < start_axis; i++) + { + sliceRanges[0][i] = Range(0, inpBlob.size[i]); + } + for (int i = start_axis; i < dims; i++) + { + if (offset_final[i] < 0 || offset_final[i] + inpSzBlob.size[i] > inpBlob.size[i]) + CV_Error(Error::StsBadArg, "invalid crop parameters or blob sizes"); + + sliceRanges[0][i] = Range(offset_final[i], offset_final[i] + inpSzBlob.size[i]); + } + } + +private: + std::vector offset; }; Ptr SliceLayer::create(const LayerParams& params) @@ -326,5 +401,10 @@ Ptr SliceLayer::create(const LayerParams& params) return Ptr(new SliceLayerImpl(params)); } +Ptr CropLayer::create(const LayerParams& params) +{ + return Ptr(new CropLayerImpl(params)); +} + } } diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index 7bd2d0c20a..0c19f01889 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -309,29 +309,16 @@ public: return Ptr(); } +#ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); InferenceEngine::Builder::SoftMaxLayer ieLayer(name); ieLayer.setAxis(clamp(axisRaw, input->dims.size())); return Ptr(new InfEngineBackendNode(ieLayer)); -#else - InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); - - InferenceEngine::LayerParams lp; - lp.name = name; - lp.type = "SoftMax"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr ieLayer(new InferenceEngine::SoftMaxLayer(lp)); - ieLayer->axis = clamp(axisRaw, input->dims.size()); - return Ptr(new InfEngineBackendNode(ieLayer)); -#endif -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_INF_ENGINE int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index 0883f53484..a24273338e 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -26,33 +26,8 @@ namespace cv { namespace dnn { // we can use some predefined name. static std::string kDefaultInpLayerName = "empty_inp_layer_name"; -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer) : BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {} -#else -InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& _layer) - : BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {} - -void InfEngineBackendNode::connect(std::vector >& inputs, - std::vector >& outputs) -{ - layer->insData.resize(inputs.size()); - for (int i = 0; i < inputs.size(); ++i) - { - InferenceEngine::DataPtr dataPtr = infEngineDataNode(inputs[i]); - layer->insData[i] = InferenceEngine::DataWeakPtr(dataPtr); - dataPtr->inputTo[layer->name] = layer; - } - - CV_Assert(!outputs.empty()); - - layer->outData.resize(1); - InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[0]); - dataPtr->name = layer->name; - layer->outData[0] = dataPtr; - dataPtr->creatorLayer = InferenceEngine::CNNLayerWeakPtr(layer); -} -#endif static std::vector > infEngineWrappers(const std::vector >& ptrs) @@ -67,8 +42,6 @@ infEngineWrappers(const std::vector >& ptrs) return wrappers; } -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) - InfEngineBackendNet::InfEngineBackendNet() : netBuilder("") { hasNetOwner = false; @@ -238,8 +211,6 @@ void InfEngineBackendNet::addOutput(const std::string& name) requestedOutputs.push_back(name); } -#endif // IE >= R5 - static InferenceEngine::Layout estimateLayout(const Mat& m) { if (m.dims == 4) @@ -352,350 +323,6 @@ void InfEngineBackendWrapper::setHostDirty() } -#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) -InfEngineBackendNet::InfEngineBackendNet() -{ - targetDevice = InferenceEngine::TargetDevice::eCPU; - precision = InferenceEngine::Precision::FP32; - hasNetOwner = false; -} - -InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net) -{ - targetDevice = InferenceEngine::TargetDevice::eCPU; - precision = InferenceEngine::Precision::FP32; - inputs = net.getInputsInfo(); - outputs = net.getOutputsInfo(); - layers.resize(net.layerCount()); // A hack to execute InfEngineBackendNet::layerCount correctly. - netOwner = net; - hasNetOwner = true; -} - -void InfEngineBackendNet::Release() noexcept -{ - layers.clear(); - inputs.clear(); - outputs.clear(); -} - -void InfEngineBackendNet::setPrecision(InferenceEngine::Precision p) noexcept -{ - precision = p; -} - -InferenceEngine::Precision InfEngineBackendNet::getPrecision() noexcept -{ - return hasNetOwner ? netOwner.getPrecision() : precision; -} - -InferenceEngine::Precision InfEngineBackendNet::getPrecision() const noexcept -{ - return hasNetOwner ? netOwner.getPrecision() : precision; -} - -// Assume that outputs of network is unconnected blobs. -void InfEngineBackendNet::getOutputsInfo(InferenceEngine::OutputsDataMap &outputs_) noexcept -{ - const_cast(this)->getOutputsInfo(outputs_); -} -void InfEngineBackendNet::getOutputsInfo(InferenceEngine::OutputsDataMap &outputs_) const noexcept -{ - outputs_ = outputs; -} - -// Returns input references that aren't connected to internal outputs. -void InfEngineBackendNet::getInputsInfo(InferenceEngine::InputsDataMap &inputs_) noexcept -{ - const_cast(this)->getInputsInfo(inputs_); -} - -// Returns input references that aren't connected to internal outputs. -void InfEngineBackendNet::getInputsInfo(InferenceEngine::InputsDataMap &inputs_) const noexcept -{ - inputs_ = inputs; -} - -InferenceEngine::InputInfo::Ptr InfEngineBackendNet::getInput(const std::string &inputName) noexcept -{ - return const_cast(this)->getInput(inputName); -} - -InferenceEngine::InputInfo::Ptr InfEngineBackendNet::getInput(const std::string &inputName) const noexcept -{ - const auto& it = inputs.find(inputName); - CV_Assert(it != inputs.end()); - return it->second; -} - -void InfEngineBackendNet::getName(char*, size_t) noexcept -{ -} - -void InfEngineBackendNet::getName(char*, size_t) const noexcept -{ -} - -const std::string& InfEngineBackendNet::getName() const noexcept -{ - return name; -} - -InferenceEngine::StatusCode InfEngineBackendNet::serialize(const std::string&, const std::string&, InferenceEngine::ResponseDesc*) const noexcept -{ - CV_Error(Error::StsNotImplemented, ""); - return InferenceEngine::StatusCode::OK; -} - -size_t InfEngineBackendNet::layerCount() noexcept -{ - return const_cast(this)->layerCount(); -} - -size_t InfEngineBackendNet::layerCount() const noexcept -{ - return layers.size(); -} - -InferenceEngine::DataPtr& InfEngineBackendNet::getData(const char *dname) noexcept -{ - CV_Error(Error::StsNotImplemented, ""); - return outputs.begin()->second; // Just return something. -} - -void InfEngineBackendNet::addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept -{ - layers.push_back(layer); - inputs.clear(); - outputs.clear(); -} - -InferenceEngine::StatusCode -InfEngineBackendNet::addOutput(const std::string &layerName, size_t outputIndex, - InferenceEngine::ResponseDesc *resp) noexcept -{ - for (const auto& l : layers) - { - for (const InferenceEngine::DataPtr& out : l->outData) - { - if (out->name == layerName) - { - outputs[out->name] = out; - return InferenceEngine::StatusCode::OK; - } - } - } - CV_Error(Error::StsObjectNotFound, "Cannot find a layer " + layerName); - return InferenceEngine::StatusCode::OK; -} - -InferenceEngine::StatusCode -InfEngineBackendNet::getLayerByName(const char *layerName, InferenceEngine::CNNLayerPtr &out, - InferenceEngine::ResponseDesc *resp) noexcept -{ - return const_cast(this)->getLayerByName(layerName, out, resp); -} - -InferenceEngine::StatusCode InfEngineBackendNet::getLayerByName(const char *layerName, - InferenceEngine::CNNLayerPtr &out, - InferenceEngine::ResponseDesc *resp) const noexcept -{ - for (auto& l : layers) - { - if (l->name == layerName) - { - out = l; - return InferenceEngine::StatusCode::OK; - } - } - CV_Error(Error::StsObjectNotFound, cv::format("Cannot find a layer %s", layerName)); - return InferenceEngine::StatusCode::NOT_FOUND; -} - -void InfEngineBackendNet::setTargetDevice(InferenceEngine::TargetDevice device) noexcept -{ - if (device != InferenceEngine::TargetDevice::eCPU && - device != InferenceEngine::TargetDevice::eGPU && - device != InferenceEngine::TargetDevice::eMYRIAD && - device != InferenceEngine::TargetDevice::eFPGA) - CV_Error(Error::StsNotImplemented, ""); - targetDevice = device; -} - -InferenceEngine::TargetDevice InfEngineBackendNet::getTargetDevice() noexcept -{ - return const_cast(this)->getTargetDevice(); -} - -InferenceEngine::TargetDevice InfEngineBackendNet::getTargetDevice() const noexcept -{ - return targetDevice == InferenceEngine::TargetDevice::eFPGA ? - InferenceEngine::TargetDevice::eHETERO : targetDevice; -} - -InferenceEngine::StatusCode InfEngineBackendNet::setBatchSize(const size_t) noexcept -{ - CV_Error(Error::StsNotImplemented, ""); - return InferenceEngine::StatusCode::OK; -} - -InferenceEngine::StatusCode InfEngineBackendNet::setBatchSize(size_t size, InferenceEngine::ResponseDesc *responseDesc) noexcept -{ - CV_Error(Error::StsNotImplemented, ""); - return InferenceEngine::StatusCode::OK; -} - -size_t InfEngineBackendNet::getBatchSize() const noexcept -{ - size_t batchSize = 0; - for (const auto& inp : inputs) - { - CV_Assert(inp.second); - std::vector dims = inp.second->getDims(); - CV_Assert(!dims.empty()); - if (batchSize != 0) - CV_Assert(batchSize == dims.back()); - else - batchSize = dims.back(); - } - return batchSize; -} - -InferenceEngine::StatusCode InfEngineBackendNet::AddExtension(const InferenceEngine::IShapeInferExtensionPtr &extension, InferenceEngine::ResponseDesc *resp) noexcept -{ - CV_Error(Error::StsNotImplemented, ""); - return InferenceEngine::StatusCode::OK; -} - -InferenceEngine::StatusCode InfEngineBackendNet::reshape(const InferenceEngine::ICNNNetwork::InputShapes &inputShapes, InferenceEngine::ResponseDesc *resp) noexcept -{ - CV_Error(Error::StsNotImplemented, ""); - return InferenceEngine::StatusCode::OK; -} - -void InfEngineBackendNet::init(int targetId) -{ - if (inputs.empty()) - { - // Collect all external input blobs. - inputs.clear(); - std::map internalOutputs; - for (const auto& l : layers) - { - for (const InferenceEngine::DataWeakPtr& ptr : l->insData) - { - InferenceEngine::DataPtr inp(ptr); - if (internalOutputs.find(inp->name) == internalOutputs.end()) - { - InferenceEngine::InputInfo::Ptr inpInfo(new InferenceEngine::InputInfo()); - inpInfo->setInputData(inp); - if (inputs.find(inp->name) == inputs.end()) - inputs[inp->name] = inpInfo; - } - } - for (const InferenceEngine::DataPtr& out : l->outData) - { - // TODO: Replace to uniqueness assertion. - if (internalOutputs.find(out->name) == internalOutputs.end()) - internalOutputs[out->name] = out; - } - } - CV_Assert(!inputs.empty()); - -#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) - for (const auto& inp : inputs) - { - InferenceEngine::LayerParams lp; - lp.name = inp.first; - lp.type = "Input"; - lp.precision = InferenceEngine::Precision::FP32; - std::shared_ptr inpLayer(new InferenceEngine::CNNLayer(lp)); - - layers.push_back(inpLayer); - - InferenceEngine::DataPtr dataPtr = inp.second->getInputData(); - // TODO: remove precision dependency (see setInput.normalization tests) - if (dataPtr->precision == InferenceEngine::Precision::FP32) - { - inpLayer->outData.assign(1, dataPtr); - dataPtr->creatorLayer = InferenceEngine::CNNLayerWeakPtr(inpLayer); - } - } -#endif - } - - if (outputs.empty()) - { - // Add all unconnected blobs to output blobs. - InferenceEngine::OutputsDataMap unconnectedOuts; - for (const auto& l : layers) - { - if (l->type == "Input") - continue; - // Add all outputs. - for (const InferenceEngine::DataPtr& out : l->outData) - { - // TODO: Replace to uniqueness assertion. - if (unconnectedOuts.find(out->name) == unconnectedOuts.end()) - unconnectedOuts[out->name] = out; - } - // Remove internally connected outputs. - for (const InferenceEngine::DataWeakPtr& inp : l->insData) - { - unconnectedOuts.erase(InferenceEngine::DataPtr(inp)->name); - } - } - CV_Assert(!unconnectedOuts.empty()); - - for (auto it = unconnectedOuts.begin(); it != unconnectedOuts.end(); ++it) - { - outputs[it->first] = it->second; - } - } - - // Set up input blobs. - inpBlobs.clear(); - for (const auto& it : inputs) - { - CV_Assert(allBlobs.find(it.first) != allBlobs.end()); - inpBlobs[it.first] = allBlobs[it.first]; - it.second->setPrecision(inpBlobs[it.first]->precision()); - } - - // Set up output blobs. - outBlobs.clear(); - for (const auto& it : outputs) - { - CV_Assert(allBlobs.find(it.first) != allBlobs.end()); - outBlobs[it.first] = allBlobs[it.first]; - } - - switch (targetId) - { - case DNN_TARGET_CPU: setTargetDevice(InferenceEngine::TargetDevice::eCPU); break; - case DNN_TARGET_OPENCL_FP16: - setPrecision(InferenceEngine::Precision::FP16); - /* Falls through. */ - case DNN_TARGET_OPENCL: setTargetDevice(InferenceEngine::TargetDevice::eGPU); break; - case DNN_TARGET_MYRIAD: - { - setPrecision(InferenceEngine::Precision::FP16); - setTargetDevice(InferenceEngine::TargetDevice::eMYRIAD); break; - } - case DNN_TARGET_FPGA: - { - setPrecision(InferenceEngine::Precision::FP16); - setTargetDevice(InferenceEngine::TargetDevice::eFPGA); break; - } - default: - CV_Error(Error::StsError, format("Unknown target identifier: %d", targetId)); - } - - if (!isInitialized()) - initPlugin(*this); -} - -#endif // IE < R5 - static std::map& getSharedPlugins() { static std::map sharedPlugins; @@ -703,7 +330,7 @@ static std::map= 2018R5 +#endif // !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT) void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net) { @@ -834,9 +462,7 @@ void InfEngineBackendNet::addBlobs(const std::vector >& ptrs for (const auto& wrapper : wrappers) { std::string name = wrapper->dataPtr->name; -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) name = name.empty() ? kDefaultInpLayerName : name; -#endif allBlobs.insert({name, wrapper->blob}); } } @@ -993,11 +619,7 @@ bool InfEngineBackendLayer::getMemoryShapes(const std::vector &inputs, std::vector &outputs, std::vector &internals) const { -#if INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2018R3) - InferenceEngine::ICNNNetwork::InputShapes inShapes = const_cast(t_net).getInputShapes(); -#else InferenceEngine::ICNNNetwork::InputShapes inShapes = t_net.getInputShapes(); -#endif InferenceEngine::ICNNNetwork::InputShapes::iterator itr; bool equal_flag = true; size_t i = 0; @@ -1044,7 +666,6 @@ InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob) return halfs; } -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l) { @@ -1054,7 +675,6 @@ void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, l.addConstantData(name, data); #endif } -#endif #endif // HAVE_INF_ENGINE @@ -1103,7 +723,7 @@ static std::string getInferenceEngineVPUType_() { #if defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT) param_vpu_type = OPENCV_DNN_IE_VPU_TYPE_DEFAULT; -#elif INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) +#else CV_LOG_INFO(NULL, "OpenCV-DNN: running Inference Engine VPU autodetection: Myriad2/X. In case of other accelerator types specify 'OPENCV_DNN_IE_VPU_TYPE' parameter"); try { bool isMyriadX_ = detectMyriadX_(); @@ -1121,9 +741,6 @@ static std::string getInferenceEngineVPUType_() CV_LOG_WARNING(NULL, "OpenCV-DNN: Failed Inference Engine VPU autodetection. Specify 'OPENCV_DNN_IE_VPU_TYPE' parameter."); param_vpu_type.clear(); } -#else - CV_LOG_WARNING(NULL, "OpenCV-DNN: VPU auto-detection is not implemented. Consider specifying VPU type via 'OPENCV_DNN_IE_VPU_TYPE' parameter"); - param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2; #endif } CV_LOG_INFO(NULL, "OpenCV-DNN: Inference Engine VPU type='" << param_vpu_type << "'"); diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index 44ffd5e4ac..37bc5cc587 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -19,8 +19,6 @@ #ifdef HAVE_INF_ENGINE -#define INF_ENGINE_RELEASE_2018R3 2018030000 -#define INF_ENGINE_RELEASE_2018R4 2018040000 #define INF_ENGINE_RELEASE_2018R5 2018050000 #define INF_ENGINE_RELEASE_2019R1 2019010000 @@ -46,9 +44,7 @@ #include -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) #include -#endif #if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) #pragma GCC visibility pop @@ -64,111 +60,6 @@ namespace cv { namespace dnn { #ifdef HAVE_INF_ENGINE -#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) -class InfEngineBackendNet : public InferenceEngine::ICNNNetwork -{ -public: - InfEngineBackendNet(); - - InfEngineBackendNet(InferenceEngine::CNNNetwork& net); - - virtual void Release() noexcept CV_OVERRIDE; - - void setPrecision(InferenceEngine::Precision p) noexcept; - - virtual InferenceEngine::Precision getPrecision() noexcept; - - virtual InferenceEngine::Precision getPrecision() const noexcept; - - virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/; - - virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/; - - virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/; - - virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/; - - virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept; - - virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) const noexcept; - - virtual InferenceEngine::StatusCode serialize(const std::string &xmlPath, const std::string &binPath, InferenceEngine::ResponseDesc* resp) const noexcept; - - virtual void getName(char *pName, size_t len) noexcept; - - virtual void getName(char *pName, size_t len) const noexcept; - - virtual const std::string& getName() const noexcept; - - virtual size_t layerCount() noexcept; - - virtual size_t layerCount() const noexcept; - - virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE; - - virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE; - - virtual InferenceEngine::StatusCode addOutput(const std::string &layerName, - size_t outputIndex = 0, - InferenceEngine::ResponseDesc *resp = nullptr) noexcept; - - virtual InferenceEngine::StatusCode getLayerByName(const char *layerName, - InferenceEngine::CNNLayerPtr &out, - InferenceEngine::ResponseDesc *resp) noexcept; - - virtual InferenceEngine::StatusCode getLayerByName(const char *layerName, - InferenceEngine::CNNLayerPtr &out, - InferenceEngine::ResponseDesc *resp) const noexcept; - - virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE; - - virtual InferenceEngine::TargetDevice getTargetDevice() noexcept; - - virtual InferenceEngine::TargetDevice getTargetDevice() const noexcept; - - virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE; - - virtual InferenceEngine::StatusCode setBatchSize(size_t size, InferenceEngine::ResponseDesc* responseDesc) noexcept; - - virtual size_t getBatchSize() const noexcept CV_OVERRIDE; - - virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) noexcept CV_OVERRIDE; - virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) noexcept CV_OVERRIDE; - - void init(int targetId); - - void addBlobs(const std::vector >& wrappers); - - void forward(); - - bool isInitialized(); - -private: - std::vector layers; - InferenceEngine::InputsDataMap inputs; - InferenceEngine::OutputsDataMap outputs; - InferenceEngine::BlobMap inpBlobs; - InferenceEngine::BlobMap outBlobs; - InferenceEngine::BlobMap allBlobs; - InferenceEngine::TargetDevice targetDevice; - InferenceEngine::Precision precision; - InferenceEngine::InferenceEnginePluginPtr enginePtr; - InferenceEngine::InferencePlugin plugin; - InferenceEngine::ExecutableNetwork netExec; - InferenceEngine::InferRequest infRequest; - // In case of models from Model Optimizer we need to manage their lifetime. - InferenceEngine::CNNNetwork netOwner; - // There is no way to check if netOwner is initialized or not so we use - // a separate flag to determine if the model has been loaded from IR. - bool hasNetOwner; - - std::string name; - - void initPlugin(InferenceEngine::ICNNNetwork& net); -}; - -#else // IE < R5 - class InfEngineBackendNet { public: @@ -226,28 +117,18 @@ private: std::set unconnectedLayersIds; }; -#endif // IE < R5 class InfEngineBackendNode : public BackendNode { public: -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer); -#else - InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer); -#endif void connect(std::vector >& inputs, std::vector >& outputs); // Inference Engine network object that allows to obtain the outputs of this layer. -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer layer; Ptr net; -#else - InferenceEngine::CNNLayerPtr layer; - Ptr net; -#endif }; class InfEngineBackendWrapper : public BackendWrapper @@ -282,9 +163,7 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob); // Allocates memory for a new blob. InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob); -#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l); -#endif // This is a fake class to run networks from Model Optimizer. Objects of that // class simulate responses of layers are imported by OpenCV and supported by diff --git a/modules/dnn/test/test_backends.cpp b/modules/dnn/test/test_backends.cpp index 50d44c7f10..17a37a44da 100644 --- a/modules/dnn/test/test_backends.cpp +++ b/modules/dnn/test/test_backends.cpp @@ -291,6 +291,7 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target"); #endif + // output range: [-0.001, 0.97] const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.012 : 0.0; const float lInf = (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.16 : 0.0; @@ -309,6 +310,7 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target"); #endif + // The same .caffemodel but modified .prototxt // See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi_faster_4_stages.prototxt", @@ -322,9 +324,6 @@ TEST_P(DNNTestNetwork, OpenFace) #if INF_ENGINE_VER_MAJOR_EQ(2018050000) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) throw SkipTestException("Test is disabled for Myriad targets"); -#elif INF_ENGINE_VER_MAJOR_EQ(2018030000) - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) - throw SkipTestException("Test has been fixed in OpenVINO 2018R4"); #endif #endif if (backend == DNN_BACKEND_HALIDE) @@ -407,7 +406,9 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16) float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.4 : 4e-5; float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7.45 : 2e-3; processNet("dnn/fast_neural_style_eccv16_starry_night.t7", "", inp, "", "", l1, lInf); +#if defined(HAVE_INF_ENGINE) && INF_ENGINE_RELEASE >= 2019010000 expectNoFallbacksFromIE(net); +#endif } INSTANTIATE_TEST_CASE_P(/*nothing*/, DNNTestNetwork, dnnBackendsAndTargets(true, true, false)); diff --git a/modules/dnn/test/test_darknet_importer.cpp b/modules/dnn/test/test_darknet_importer.cpp index 683c22e691..e70e21141f 100644 --- a/modules/dnn/test/test_darknet_importer.cpp +++ b/modules/dnn/test/test_darknet_importer.cpp @@ -332,10 +332,6 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc) testDarknetModel(config_file, weights_file, ref.rowRange(0, 2), scoreDiff, iouDiff); } -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018040000) - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - throw SkipTestException("Test with 'batch size 2' is disabled for Myriad target (fixed in 2018R5)"); -#endif { SCOPED_TRACE("batch size 2"); testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff); @@ -389,10 +385,6 @@ INSTANTIATE_TEST_CASE_P(/**/, Test_Darknet_nets, dnnBackendsAndTargets()); TEST_P(Test_Darknet_layers, shortcut) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018040000 - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) - throw SkipTestException("Test is enabled starts from OpenVINO 2018R4"); -#endif testDarknetLayer("shortcut"); } diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index 6950ad0731..ff10842052 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -159,12 +159,6 @@ TEST_P(Deconvolution, Accuracy) Backend backendId = get<0>(get<7>(GetParam())); Target targetId = get<1>(get<7>(GetParam())); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018040000) - if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU - && hasBias && group != 1) - throw SkipTestException("Test is disabled for OpenVINO 2018R4"); -#endif - #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X @@ -278,19 +272,13 @@ TEST_P(AvePooling, Accuracy) Backend backendId = get<0>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam())); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2018050000) +#if defined(INF_ENGINE_RELEASE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X && kernel == Size(1, 1) && (stride == Size(1, 1) || stride == Size(2, 2))) throw SkipTestException("Test is disabled for MyriadX target"); #endif -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2018040000) - if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD && - stride == Size(3, 2) && kernel == Size(3, 3) && outSize != Size(1, 1)) - throw SkipTestException("Test is fixed in OpenVINO 2018R4"); -#endif - const int inWidth = (outSize.width - 1) * stride.width + kernel.width; const int inHeight = (outSize.height - 1) * stride.height + kernel.height; diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 6ce89e3033..8812714bff 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -141,10 +141,8 @@ TEST_P(Test_Caffe_layers, Convolution) TEST_P(Test_Caffe_layers, DeConvolution) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) - throw SkipTestException("Test is disabled for OpenVINO 2018R4"); -#endif + throw SkipTestException("Test is disabled for DLIE/CPU"); testLayerUsingCaffeModels("layer_deconvolution", true, false); } @@ -254,8 +252,7 @@ TEST_P(Test_Caffe_layers, Fused_Concat) #if defined(INF_ENGINE_RELEASE) if (backend == DNN_BACKEND_INFERENCE_ENGINE - && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16 - || (INF_ENGINE_RELEASE < 2018040000 && target == DNN_TARGET_CPU)) + && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16) ) throw SkipTestException("Test is disabled for DLIE"); #endif @@ -1045,11 +1042,6 @@ TEST_P(Test_DLDT_two_inputs_3dim, as_IR) int secondInpType = get<1>(GetParam()); Target targetId = get<2>(GetParam()); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018040000 - if (secondInpType == CV_8U) - throw SkipTestException("Test is enabled starts from OpenVINO 2018R4"); -#endif - std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : ""; Net net = readNet(_tf("net_two_inputs" + suffix + ".xml"), _tf("net_two_inputs.bin")); std::vector inpSize = get<3>(GetParam()); diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 6b6affc107..e132bf6923 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -86,6 +86,9 @@ TEST_P(Test_ONNX_layers, Convolution) TEST_P(Test_ONNX_layers, Convolution3D) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) + throw SkipTestException("Test is enabled starts from 2019R1"); +#endif if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) throw SkipTestException("Only DLIE backend on CPU is supported"); testONNXModels("conv3d"); @@ -94,7 +97,7 @@ TEST_P(Test_ONNX_layers, Convolution3D) TEST_P(Test_ONNX_layers, Two_convolution) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2018050000) +#if defined(INF_ENGINE_RELEASE) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X ) @@ -150,6 +153,9 @@ TEST_P(Test_ONNX_layers, AveragePooling) TEST_P(Test_ONNX_layers, MaxPooling3D) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) + throw SkipTestException("Test is enabled starts from 2019R1"); +#endif if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) throw SkipTestException("Only DLIE backend on CPU is supported"); testONNXModels("max_pool3d"); @@ -157,6 +163,9 @@ TEST_P(Test_ONNX_layers, MaxPooling3D) TEST_P(Test_ONNX_layers, AvePooling3D) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) + throw SkipTestException("Test is enabled starts from 2019R1"); +#endif if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) throw SkipTestException("Only DLIE backend on CPU is supported"); testONNXModels("ave_pool3d"); @@ -195,14 +204,18 @@ TEST_P(Test_ONNX_layers, Constant) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) - throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target"); + throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target"); #endif testONNXModels("constant"); } TEST_P(Test_ONNX_layers, Padding) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) + testONNXModels("padding", npy, 0, 0, false, false); +#else testONNXModels("padding"); +#endif } TEST_P(Test_ONNX_layers, Resize) @@ -247,7 +260,11 @@ TEST_P(Test_ONNX_layers, Reshape) TEST_P(Test_ONNX_layers, Slice) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) + testONNXModels("slice", npy, 0, 0, false, false); +#else testONNXModels("slice"); +#endif } TEST_P(Test_ONNX_layers, Softmax) @@ -504,6 +521,9 @@ TEST_P(Test_ONNX_nets, Shufflenet) TEST_P(Test_ONNX_nets, Resnet34_kinetics) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) + throw SkipTestException("Test is enabled starts from 2019R1"); +#endif if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) throw SkipTestException("Only DLIE backend on CPU is supported"); diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index c495af8cfe..dcb84c3ae9 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -133,6 +133,9 @@ TEST_P(Test_TensorFlow_layers, conv) TEST_P(Test_TensorFlow_layers, Convolution3D) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) + throw SkipTestException("Test is enabled starts from 2019R1"); +#endif if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) throw SkipTestException("Only DLIE backend on CPU is supported"); runTensorFlowNet("conv3d"); @@ -229,6 +232,9 @@ TEST_P(Test_TensorFlow_layers, ave_pool_same) TEST_P(Test_TensorFlow_layers, MaxPooling3D) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) + throw SkipTestException("Test is enabled starts from 2019R1"); +#endif if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) throw SkipTestException("Only DLIE backend on CPU is supported"); runTensorFlowNet("max_pool3d"); @@ -236,6 +242,9 @@ TEST_P(Test_TensorFlow_layers, MaxPooling3D) TEST_P(Test_TensorFlow_layers, AvePooling3D) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) + throw SkipTestException("Test is enabled starts from 2019R1"); +#endif if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) throw SkipTestException("Only DLIE backend on CPU is supported"); runTensorFlowNet("ave_pool3d"); @@ -337,10 +346,15 @@ class Test_TensorFlow_nets : public DNNTestLayer {}; TEST_P(Test_TensorFlow_nets, MobileNet_SSD) { #if defined(INF_ENGINE_RELEASE) - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD - && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X - ) - throw SkipTestException("Test is disabled for MyriadX"); + if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + { +#if INF_ENGINE_VER_MAJOR_GE(2019010000) + if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) + throw SkipTestException("Test is disabled for MyriadX"); +#else + throw SkipTestException("Test is disabled for Myriad"); +#endif + } #endif checkBackend(); @@ -364,7 +378,9 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD) double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0043 : default_l1; double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.037 : default_lInf; normAssertDetections(ref, out, "", 0.2, scoreDiff, iouDiff); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2019010000 expectNoFallbacksFromIE(net); +#endif } TEST_P(Test_TensorFlow_nets, Inception_v2_SSD) diff --git a/modules/dnn/test/test_torch_importer.cpp b/modules/dnn/test/test_torch_importer.cpp index f417a45d7e..6aaec8e1a7 100644 --- a/modules/dnn/test/test_torch_importer.cpp +++ b/modules/dnn/test/test_torch_importer.cpp @@ -136,10 +136,8 @@ TEST_P(Test_Torch_layers, run_reshape_change_batch_size) TEST_P(Test_Torch_layers, run_reshape) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - throw SkipTestException("Test is disabled for OpenVINO 2018R4"); -#endif + throw SkipTestException("Test is disabled for Myriad targets"); runTorchNet("net_reshape_batch"); runTorchNet("net_reshape_channels", "", false, true); } @@ -220,10 +218,6 @@ TEST_P(Test_Torch_layers, net_conv_gemm_lrn) TEST_P(Test_Torch_layers, net_inception_block) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018030000 - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - throw SkipTestException(""); -#endif runTorchNet("net_inception_block", "", false, true); }