Merge pull request #14792 from dkurt:dnn_ie_min_version_r5

* Remove Inference Engine 2018R3 and 2018R4

* Fix 2018R5
pull/14747/head
Dmitry Kurtaev 6 years ago committed by Alexander Alekhin
parent 54a14c877c
commit eba696a41e
  1. 5
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  2. 97
      modules/dnn/src/dnn.cpp
  3. 19
      modules/dnn/src/layers/batch_norm_layer.cpp
  4. 21
      modules/dnn/src/layers/blank_layer.cpp
  5. 16
      modules/dnn/src/layers/concat_layer.cpp
  6. 100
      modules/dnn/src/layers/convolution_layer.cpp
  7. 205
      modules/dnn/src/layers/crop_layer.cpp
  8. 25
      modules/dnn/src/layers/detection_output_layer.cpp
  9. 114
      modules/dnn/src/layers/elementwise_layers.cpp
  10. 23
      modules/dnn/src/layers/eltwise_layer.cpp
  11. 16
      modules/dnn/src/layers/flatten_layer.cpp
  12. 22
      modules/dnn/src/layers/fully_connected_layer.cpp
  13. 21
      modules/dnn/src/layers/lrn_layer.cpp
  14. 21
      modules/dnn/src/layers/mvn_layer.cpp
  15. 51
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  16. 5
      modules/dnn/src/layers/padding_layer.cpp
  17. 20
      modules/dnn/src/layers/permute_layer.cpp
  18. 66
      modules/dnn/src/layers/pooling_layer.cpp
  19. 63
      modules/dnn/src/layers/prior_box_layer.cpp
  20. 33
      modules/dnn/src/layers/proposal_layer.cpp
  21. 15
      modules/dnn/src/layers/reorg_layer.cpp
  22. 23
      modules/dnn/src/layers/reshape_layer.cpp
  23. 47
      modules/dnn/src/layers/resize_layer.cpp
  24. 34
      modules/dnn/src/layers/scale_layer.cpp
  25. 136
      modules/dnn/src/layers/slice_layer.cpp
  26. 17
      modules/dnn/src/layers/softmax_layer.cpp
  27. 391
      modules/dnn/src/op_inf_engine.cpp
  28. 121
      modules/dnn/src/op_inf_engine.hpp
  29. 7
      modules/dnn/test/test_backends.cpp
  30. 8
      modules/dnn/test/test_darknet_importer.cpp
  31. 14
      modules/dnn/test/test_halide_layers.cpp
  32. 12
      modules/dnn/test/test_layers.cpp
  33. 24
      modules/dnn/test/test_onnx_importer.cpp
  34. 24
      modules/dnn/test/test_tf_importer.cpp
  35. 8
      modules/dnn/test/test_torch_importer.cpp

@ -492,10 +492,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
class CV_EXPORTS CropLayer : public Layer
{
public:
int startAxis;
std::vector<int> offset;
static Ptr<CropLayer> create(const LayerParams &params);
static Ptr<Layer> create(const LayerParams &params);
};
class CV_EXPORTS EltwiseLayer : public Layer

@ -729,20 +729,9 @@ struct DataLayer : public Layer
}
biases->set(biasesVec);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
addConstantData("weights", weights, ieLayer);
addConstantData("biases", biases, ieLayer);
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "ScaleShift";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
ieLayer->_weights = weights;
ieLayer->_biases = biases;
#endif
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
@ -1459,11 +1448,7 @@ struct Net::Impl
if (layerNet != ieInpNode->net)
{
// layerNet is empty or nodes are from different graphs.
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
ieInpNode->net->addOutput(ieInpNode->layer.getName());
#else
ieInpNode->net->addOutput(ieInpNode->layer->name);
#endif
}
}
}
@ -1579,25 +1564,6 @@ struct Net::Impl
}
}
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
// The same blobs wrappers cannot be shared between two Inference Engine
// networks because of explicit references between layers and blobs.
// So we need to rewrap all the external blobs.
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
LayerPin inPin = ld.inputBlobsId[i];
auto it = netBlobsWrappers.find(inPin);
if (it == netBlobsWrappers.end())
{
ld.inputBlobsWrappers[i] = InfEngineBackendWrapper::create(ld.inputBlobsWrappers[i]);
netBlobsWrappers[inPin] = ld.inputBlobsWrappers[i];
}
else
ld.inputBlobsWrappers[i] = it->second;
}
netBlobsWrappers[LayerPin(ld.id, 0)] = ld.outputBlobsWrappers[0];
#endif // IE < R5
Ptr<BackendNode> node;
if (!net.empty())
{
@ -1628,7 +1594,6 @@ struct Net::Impl
ieNode->net = net;
// Convert weights in FP16 for specific targets.
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 ||
preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_FPGA) && !fused)
@ -1670,47 +1635,6 @@ struct Net::Impl
net->addBlobs(ld.inputBlobsWrappers);
net->addBlobs(ld.outputBlobsWrappers);
addInfEngineNetOutputs(ld);
#else // IE >= R5
auto weightableLayer = std::dynamic_pointer_cast<InferenceEngine::WeightableLayer>(ieNode->layer);
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 ||
preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_FPGA) && !fused)
{
ieNode->layer->precision = InferenceEngine::Precision::FP16;
if (weightableLayer)
{
if (weightableLayer->_weights)
weightableLayer->_weights = convertFp16(weightableLayer->_weights);
if (weightableLayer->_biases)
weightableLayer->_biases = convertFp16(weightableLayer->_biases);
}
else
{
for (const auto& weights : {"weights", "biases"})
{
auto it = ieNode->layer->blobs.find(weights);
if (it != ieNode->layer->blobs.end())
it->second = convertFp16(it->second);
}
}
}
if (weightableLayer)
{
if (weightableLayer->_weights)
weightableLayer->blobs["weights"] = weightableLayer->_weights;
if (weightableLayer->_biases)
weightableLayer->blobs["biases"] = weightableLayer->_biases;
}
ieNode->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers);
net->addBlobs(ld.inputBlobsWrappers);
net->addBlobs(ld.outputBlobsWrappers);
if (!fused)
net->addLayer(ieNode->layer);
addInfEngineNetOutputs(ld);
#endif // IE >= R5
}
// Initialize all networks.
@ -1732,23 +1656,6 @@ struct Net::Impl
if (!ieNode->net->isInitialized())
{
#if INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2018R4)
// For networks which is built in runtime we need to specify a
// version of it's hyperparameters.
std::string versionTrigger = "<net name=\"TestInput\" version=\"3\" batch=\"1\">"
"<layers>"
"<layer name=\"data\" type=\"Input\" precision=\"FP32\" id=\"0\">"
"<output>"
"<port id=\"0\">"
"<dim>1</dim>"
"</port>"
"</output>"
"</layer>"
"</layers>"
"</net>";
InferenceEngine::CNNNetReader reader;
reader.ReadNetwork(versionTrigger.data(), versionTrigger.size());
#endif
ieNode->net->init(preferableTarget);
ld.skip = false;
}
@ -2617,11 +2524,7 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
Net cvNet;
cvNet.setInputsNames(inputsNames);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
Ptr<InfEngineBackendNode> backendNode(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
#else
Ptr<InfEngineBackendNode> backendNode(new InfEngineBackendNode(0));
#endif
backendNode->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
for (auto& it : ieNet.getOutputsInfo())
{

@ -351,31 +351,16 @@ public:
}
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
const size_t numChannels = weights_.total();
addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "ScaleShift";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
const size_t numChannels = weights_.total();
ieLayer->_weights = wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C);
ieLayer->_biases = wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE

@ -107,12 +107,12 @@ public:
inputs[i].copyTo(outputs[i]);
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
CV_Assert(!input->dims.empty());
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
if (preferableTarget == DNN_TARGET_MYRIAD)
@ -122,7 +122,7 @@ public:
else
{
ieLayer.setType("Split");
ieLayer.getParameters()["axis"] = (size_t)0;
ieLayer.getParameters()["axis"] = input->dims.size() - 1;
ieLayer.getParameters()["out_sizes"] = input->dims[0];
}
std::vector<size_t> shape(input->dims);
@ -130,21 +130,8 @@ public:
ieLayer.setInputPorts({InferenceEngine::Port(shape)});
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Split";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::SplitLayer> ieLayer(new InferenceEngine::SplitLayer(lp));
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
ieLayer->params["axis"] = format("%d", (int)input->dims.size() - 1);
ieLayer->params["out_sizes"] = format("%d", (int)input->dims[0]);
#endif
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
};
Ptr<Layer> BlankLayer::create(const LayerParams& params)

@ -298,29 +298,17 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::ConcatLayer ieLayer(name);
ieLayer.setAxis(clamp(axis, input->dims.size()));
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Concat";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ConcatLayer> ieLayer(new InferenceEngine::ConcatLayer(lp));
ieLayer->_axis = clamp(axis, input->dims.size());
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
};
Ptr<ConcatLayer> ConcatLayer::create(const LayerParams& params)

@ -252,8 +252,7 @@ public:
{
if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU;
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R4) ||
(preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
}
else
#endif
@ -460,9 +459,9 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
CV_Assert(input->dims.size() == 4 || input->dims.size() == 5);
@ -501,7 +500,6 @@ public:
ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
ieLayer.setKernel(kernel_size);
@ -521,51 +519,8 @@ public:
l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
return Ptr<BackendNode>(new InfEngineBackendNode(l));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Convolution";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ConvolutionLayer> ieLayer(new InferenceEngine::ConvolutionLayer(lp));
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width);
ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height);
ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width);
ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height);
ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width);
ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height);
ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width);
ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height);
ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width);
ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height);
ieLayer->params["output"] = format("%d", outCn);
ieLayer->params["kernel"] = format("%d,%d,%d,%d", outCn, inpGroupCn, kernel.height, kernel.width);
ieLayer->params["pads_begin"] = format("%d,%d", pad.height, pad.width);
ieLayer->params["pads_end"] = format("%d,%d", pad.height, pad.width);
ieLayer->params["strides"] = format("%d,%d", stride.height, stride.width);
ieLayer->params["dilations"] = format("%d,%d", dilation.height, dilation.width);
#else
ieLayer->_kernel_x = kernel.width;
ieLayer->_kernel_y = kernel.height;
ieLayer->_stride_x = stride.width;
ieLayer->_stride_y = stride.height;
ieLayer->_padding_x = pad.width;
ieLayer->_padding_y = pad.height;
ieLayer->_dilation_x = dilation.width;
ieLayer->_dilation_y = dilation.height;
#endif
ieLayer->_out_depth = outCn;
ieLayer->_group = group;
ieLayer->_weights = ieWeights;
if (ieBiases)
ieLayer->_biases = ieBiases;
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
class ParallelConv : public cv::ParallelLoopBody
{
@ -1184,7 +1139,7 @@ public:
if (kernel_size.size() == 3)
CV_Error(Error::StsNotImplemented, "Unsupported deconvolution3D layer");
if (INF_ENGINE_RELEASE >= 2018050000 && (adjustPad.height || adjustPad.width))
if (adjustPad.height || adjustPad.width)
{
if (padMode.empty())
{
@ -1793,9 +1748,9 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
auto ieWeights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
if (fusedWeights)
{
@ -1809,7 +1764,6 @@ public:
transpose(weightsMat, newWeights);
}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
const int group = numOutput / outGroupCn;
@ -1837,50 +1791,8 @@ public:
if (hasBias())
addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
#else
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
const int group = numOutput / outGroupCn;
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Deconvolution";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::DeconvolutionLayer> ieLayer(new InferenceEngine::DeconvolutionLayer(lp));
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width);
ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height);
ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width);
ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height);
ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width);
ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height);
ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width);
ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height);
ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width);
ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height);
#else
ieLayer->_kernel_x = kernel.width;
ieLayer->_kernel_y = kernel.height;
ieLayer->_stride_x = stride.width;
ieLayer->_stride_y = stride.height;
ieLayer->_padding_x = pad.width;
ieLayer->_padding_y = pad.height;
ieLayer->_dilation_x = dilation.width;
ieLayer->_dilation_y = dilation.height;
#endif
ieLayer->_out_depth = numOutput;
ieLayer->_group = group;
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
if (hasBias())
{
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C);
}
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE

@ -1,205 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "layers_common.hpp"
namespace cv
{
namespace dnn
{
class CropLayerImpl CV_FINAL : public CropLayer
{
public:
CropLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
startAxis = params.get<int>("axis", 2);
const DictValue *paramOffset = params.ptr("offset");
if (paramOffset)
{
for (int i = 0; i < paramOffset->size(); i++)
offset.push_back(paramOffset->get<int>(i));
}
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
return INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5) && crop_ranges.size() == 4;
else
#endif
return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 2);
MatShape dstShape = inputs[0];
int start = clamp(startAxis, dstShape);
for (int i = start; i < dstShape.size(); i++)
{
dstShape[i] = inputs[1][i];
}
outputs.resize(1, dstShape);
return false;
}
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
CV_Assert(2 == inputs.size());
const Mat &inpBlob = inputs[0];
const Mat &inpSzBlob = inputs[1];
int dims = inpBlob.dims;
int start_axis = clamp(startAxis, dims);
std::vector<int> offset_final(dims, 0);
if (offset.size() == 1)
{
for (int i = start_axis; i < dims; i++)
offset_final[i] = offset[0];
}
else if (offset.size() > 1)
{
if ((int)offset.size() != dims - start_axis)
CV_Error(Error::StsBadArg, "number of offset values specified must be "
"equal to the number of dimensions following axis.");
for (int i = start_axis; i < dims; i++)
offset_final[i] = offset[i - start_axis];
}
crop_ranges.resize(dims);
for (int i = 0; i < start_axis; i++)
{
crop_ranges[i] = Range(0, inpBlob.size[i]);
}
for (int i = start_axis; i < dims; i++)
{
if (offset_final[i] < 0 || offset_final[i] + inpSzBlob.size[i] > inpBlob.size[i])
CV_Error(Error::StsBadArg, "invalid crop parameters or blob sizes");
crop_ranges[i] = Range(offset_final[i], offset_final[i] + inpSzBlob.size[i]);
}
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
Mat &input = inputs[0];
input(&crop_ranges[0]).copyTo(outputs[0]);
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Crop";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CropLayer> ieLayer(new InferenceEngine::CropLayer(lp));
CV_Assert(crop_ranges.size() == 4);
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
for (int i = 0; i < 4; ++i)
{
ieLayer->axis.push_back(i);
ieLayer->offset.push_back(crop_ranges[i].start);
ieLayer->dim.push_back(crop_ranges[i].end - crop_ranges[i].start);
}
#else
ieLayer->axis.push_back(0); // batch
ieLayer->offset.push_back(crop_ranges[0].start);
ieLayer->dim.push_back(crop_ranges[0].end - crop_ranges[0].start);
ieLayer->axis.push_back(1); // channels
ieLayer->offset.push_back(crop_ranges[1].start);
ieLayer->dim.push_back(crop_ranges[1].end - crop_ranges[1].start);
ieLayer->axis.push_back(3); // height
ieLayer->offset.push_back(crop_ranges[2].start);
ieLayer->dim.push_back(crop_ranges[2].end - crop_ranges[2].start);
ieLayer->axis.push_back(2); // width
ieLayer->offset.push_back(crop_ranges[3].start);
ieLayer->dim.push_back(crop_ranges[3].end - crop_ranges[3].start);
#endif
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
return Ptr<BackendNode>();
#endif // IE < R5
}
#endif
std::vector<Range> crop_ranges;
};
Ptr<CropLayer> CropLayer::create(const LayerParams& params)
{
return Ptr<CropLayer>(new CropLayerImpl(params));
}
}
}

@ -918,10 +918,9 @@ public:
}
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::DetectionOutputLayer ieLayer(name);
ieLayer.setNumClasses(_numClasses);
@ -939,28 +938,8 @@ public:
l.getParameters()["eta"] = std::string("1.0");
return Ptr<BackendNode>(new InfEngineBackendNode(l));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "DetectionOutput";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
ieLayer->params["num_classes"] = format("%d", _numClasses);
ieLayer->params["share_location"] = _shareLocation ? "1" : "0";
ieLayer->params["background_label_id"] = format("%d", _backgroundLabelId);
ieLayer->params["nms_threshold"] = format("%f", _nmsThreshold);
ieLayer->params["top_k"] = format("%d", _topK);
ieLayer->params["keep_top_k"] = format("%d", _keepTopK);
ieLayer->params["eta"] = "1.0";
ieLayer->params["confidence_threshold"] = format("%f", _confidenceThreshold);
ieLayer->params["variance_encoded_in_target"] = _varianceEncodedInTarget ? "1" : "0";
ieLayer->params["code_type"] = "caffe.PriorBoxParameter." + _codeType;
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
};
float util::caffe_box_overlap(const util::NormalizedBBox& a, const util::NormalizedBBox& b)

@ -149,22 +149,14 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer ieLayer = func.initInfEngineBuilderAPI();
ieLayer.setName(this->name);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = this->name;
lp.precision = InferenceEngine::Precision::FP32;
return Ptr<BackendNode>(new InfEngineBackendNode(func.initInfEngine(lp)));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
virtual bool tryFuse(Ptr<dnn::Layer>& top) CV_OVERRIDE
{
@ -354,21 +346,10 @@ struct ReLUFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(slope);
}
#else
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
{
lp.type = "ReLU";
std::shared_ptr<InferenceEngine::ReLULayer> ieLayer(new InferenceEngine::ReLULayer(lp));
ieLayer->negative_slope = slope;
ieLayer->params["negative_slope"] = format("%f", slope);
return ieLayer;
}
#endif
#endif // HAVE_INF_ENGINE
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
@ -468,23 +449,10 @@ struct ReLU6Functor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ClampLayer("").setMinValue(minValue).setMaxValue(maxValue);
}
#else
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
{
lp.type = "Clamp";
std::shared_ptr<InferenceEngine::ClampLayer> ieLayer(new InferenceEngine::ClampLayer(lp));
ieLayer->min_value = minValue;
ieLayer->max_value = maxValue;
ieLayer->params["min"] = format("%f", minValue);
ieLayer->params["max"] = format("%f", maxValue);
return ieLayer;
}
#endif
#endif // HAVE_INF_ENGINE
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
@ -553,19 +521,10 @@ struct TanHFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::TanHLayer("");
}
#else
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
{
lp.type = "TanH";
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
return ieLayer;
}
#endif
#endif // HAVE_INF_ENGINE
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
@ -634,19 +593,10 @@ struct SigmoidFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::SigmoidLayer("");
}
#else
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
{
lp.type = "Sigmoid";
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
return ieLayer;
}
#endif
#endif // HAVE_INF_ENGINE
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
@ -717,18 +667,10 @@ struct ELUFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ELULayer("");
}
#else
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
{
lp.type = "ELU";
return InferenceEngine::CNNLayerPtr(new InferenceEngine::CNNLayer(lp));
}
#endif
#endif // HAVE_INF_ENGINE
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
@ -800,21 +742,10 @@ struct AbsValFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(-1);
}
#else
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
{
lp.type = "ReLU";
std::shared_ptr<InferenceEngine::ReLULayer> ieLayer(new InferenceEngine::ReLULayer(lp));
ieLayer->negative_slope = -1;
ieLayer->params["negative_slope"] = "-1.0";
return ieLayer;
}
#endif
#endif // HAVE_INF_ENGINE
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
@ -862,18 +793,10 @@ struct BNLLFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
CV_Error(Error::StsNotImplemented, "");
}
#else
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
{
CV_Error(Error::StsNotImplemented, "BNLL");
return InferenceEngine::CNNLayerPtr();
}
#endif
#endif // HAVE_INF_ENGINE
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
@ -978,34 +901,12 @@ struct PowerFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
return InferenceEngine::Builder::PowerLayer("").setPower(power)
.setScale(scale)
.setShift(shift);
}
#else
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
{
if (power == 1.0f && scale == 1.0f && shift == 0.0f)
{
// It looks like there is a bug in Inference Engine for DNN_TARGET_OPENCL and DNN_TARGET_OPENCL_FP16
// if power layer do nothing so we replace it to Identity.
lp.type = "Split";
return std::shared_ptr<InferenceEngine::SplitLayer>(new InferenceEngine::SplitLayer(lp));
}
else
{
lp.type = "Power";
std::shared_ptr<InferenceEngine::PowerLayer> ieLayer(new InferenceEngine::PowerLayer(lp));
ieLayer->power = power;
ieLayer->scale = scale;
ieLayer->offset = shift;
return ieLayer;
}
}
#endif
#endif // HAVE_INF_ENGINE
bool tryFuse(Ptr<dnn::Layer>& top)
@ -1137,7 +1038,6 @@ struct ChannelsPReLUFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
{
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer("");
@ -1145,16 +1045,6 @@ struct ChannelsPReLUFunctor
addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l);
return l;
}
#else
InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp)
{
lp.type = "PReLU";
std::shared_ptr<InferenceEngine::PReLULayer> ieLayer(new InferenceEngine::PReLULayer(lp));
const size_t numChannels = scale.total();
ieLayer->_weights = wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C);
return ieLayer;
}
#endif
#endif // HAVE_INF_ENGINE
bool tryFuse(Ptr<dnn::Layer>&) { return false; }

@ -420,10 +420,9 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::EltwiseLayer ieLayer(name);
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
@ -442,26 +441,8 @@ public:
l.getParameters()["coeff"] = coeffs;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Eltwise";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::EltwiseLayer> ieLayer(new InferenceEngine::EltwiseLayer(lp));
ieLayer->coeff = coeffs;
if (op == SUM)
ieLayer->_operation = InferenceEngine::EltwiseLayer::Sum;
else if (op == PROD)
ieLayer->_operation = InferenceEngine::EltwiseLayer::Prod;
else if (op == MAX)
ieLayer->_operation = InferenceEngine::EltwiseLayer::Max;
else
CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE

@ -162,10 +162,9 @@ public:
}
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Flatten");
@ -174,19 +173,8 @@ public:
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Flatten";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
ieLayer->params["axis"] = format("%d", _startAxis);
ieLayer->params["end_axis"] = format("%d", _endAxis);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
int _startAxis;
int _endAxis;

@ -439,10 +439,9 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::FullyConnectedLayer ieLayer(name);
const int outNum = blobs[0].size[0];
@ -454,25 +453,8 @@ public:
addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "FullyConnected";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::FullyConnectedLayer> ieLayer(new InferenceEngine::FullyConnectedLayer(lp));
ieLayer->_out_num = blobs[0].size[0];
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
ieLayer->params["out-size"] = format("%d", blobs[0].size[0]);
#endif
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW);
if (blobs.size() > 1)
ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)ieLayer->_out_num}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE

@ -379,13 +379,13 @@ public:
#endif // HAVE_HALIDE
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
float alphaSize = alpha;
if (!normBySize)
alphaSize *= (type == SPATIAL_NRM ? size*size : size);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::NormLayer ieLayer(name);
ieLayer.setSize(size);
ieLayer.setAlpha(alphaSize);
@ -395,23 +395,8 @@ public:
InferenceEngine::Builder::Layer l = ieLayer;
l.getParameters()["k"] = bias;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Norm";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::NormLayer> ieLayer(new InferenceEngine::NormLayer(lp));
ieLayer->_size = size;
ieLayer->_k = (int)bias;
ieLayer->_beta = beta;
ieLayer->_alpha = alphaSize;
ieLayer->_isAcrossMaps = (type == CHANNEL_NRM);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE

@ -118,11 +118,7 @@ public:
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
return !zeroDev && eps <= 1e-7f;
#else
return !zeroDev && (preferableTarget == DNN_TARGET_CPU || eps <= 1e-7f);
#endif
else
#endif // HAVE_INF_ENGINE
return backendId == DNN_BACKEND_OPENCV;
@ -369,29 +365,16 @@ public:
}
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::MVNLayer ieLayer(name);
ieLayer.setAcrossChannels(acrossChannels);
ieLayer.setNormalize(normVariance);
ieLayer.setEpsilon(eps);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "MVN";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::MVNLayer> ieLayer(new InferenceEngine::MVNLayer(lp));
ieLayer->params["across_channels"] = acrossChannels ? "1" : "0";
ieLayer->params["normalize_variance"] = normVariance ? "1" : "0";
ieLayer->params["eps"] = format("%f", eps);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE

@ -257,10 +257,9 @@ public:
}
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
if (input->dims.size() == 4)
{
@ -304,54 +303,8 @@ public:
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#else
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::LayerParams lp;
lp.name = name;
lp.precision = InferenceEngine::Precision::FP32;
if (input->dims.size() == 4)
{
const int numChannels = input->dims[2]; // NOTE: input->dims are reversed (whcn)
lp.type = "Normalize";
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
if (blobs.empty())
{
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
InferenceEngine::Layout::C,
{(size_t)numChannels});
weights->allocate();
std::vector<float> ones(numChannels, 1);
weights->set(ones);
ieLayer->blobs["weights"] = weights;
ieLayer->params["channel_shared"] = "0";
}
else
{
CV_Assert(numChannels == blobs[0].total());
ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C);
ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0";
}
ieLayer->params["eps"] = format("%f", epsilon);
ieLayer->params["across_spatial"] = acrossSpatial ? "1" : "0";
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
else
{
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "GRN";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
ieLayer->params["bias"] = format("%f", epsilon);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
private:
int startAxis, endAxis;

@ -182,9 +182,9 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Pad");
@ -204,9 +204,8 @@ public:
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
return Ptr<BackendNode>();
}
#endif
private:
std::vector<std::pair<int, int> > paddings; // Pairs pad before, pad after.

@ -370,30 +370,14 @@ public:
}
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::PermuteLayer ieLayer(name);
ieLayer.setOrder(_order);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Permute";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
CV_Assert(!_order.empty());
ieLayer->params["order"] = format("%d", _order[0]);
for (int i = 1; i < _order.size(); ++i)
ieLayer->params["order"] += format(",%d", _order[i]);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
size_t _count;
std::vector<size_t> _order;

@ -278,10 +278,9 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
if (type == MAX || type == AVE)
{
InferenceEngine::Builder::PoolingLayer ieLayer(name);
@ -324,69 +323,8 @@ public:
else
CV_Error(Error::StsNotImplemented, "Unsupported pooling type");
return Ptr<BackendNode>();
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer;
if (type == MAX || type == AVE)
{
lp.type = "Pooling";
InferenceEngine::PoolingLayer* poolLayer = new InferenceEngine::PoolingLayer(lp);
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
poolLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width);
poolLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height);
poolLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width);
poolLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height);
poolLayer->_padding.insert(InferenceEngine::X_AXIS, pad_l);
poolLayer->_padding.insert(InferenceEngine::Y_AXIS, pad_t);
poolLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad_r);
poolLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad_b);
poolLayer->params["kernel"] = format("%d,%d", kernel.height, kernel.width);
poolLayer->params["pads_begin"] = format("%d,%d", pad_t, pad_l);
poolLayer->params["pads_end"] = format("%d,%d", pad_b, pad_r);
poolLayer->params["strides"] = format("%d,%d", stride.height, stride.width);
#else
poolLayer->_kernel_x = kernel.width;
poolLayer->_kernel_y = kernel.height;
poolLayer->_stride_x = stride.width;
poolLayer->_stride_y = stride.height;
poolLayer->_padding_x = pad_l;
poolLayer->_padding_y = pad_t;
poolLayer->params["pad-r"] = format("%d", pad_r);
poolLayer->params["pad-b"] = format("%d", pad_b);
#endif
poolLayer->_exclude_pad = type == AVE && padMode == "SAME";
poolLayer->params["rounding-type"] = ceilMode ? "ceil" : "floor";
poolLayer->_type = type == MAX ? InferenceEngine::PoolingLayer::PoolType::MAX :
InferenceEngine::PoolingLayer::PoolType::AVG;
ieLayer = std::shared_ptr<InferenceEngine::CNNLayer>(poolLayer);
}
else if (type == ROI)
{
lp.type = "ROIPooling";
ieLayer = std::shared_ptr<InferenceEngine::CNNLayer>(new InferenceEngine::CNNLayer(lp));
ieLayer->params["pooled_w"] = format("%d", pooledSize.width);
ieLayer->params["pooled_h"] = format("%d", pooledSize.height);
ieLayer->params["spatial_scale"] = format("%f", spatialScale);
}
else if (type == PSROI)
{
lp.type = "PSROIPooling";
ieLayer = std::shared_ptr<InferenceEngine::CNNLayer>(new InferenceEngine::CNNLayer(lp));
ieLayer->params["output_dim"] = format("%d", psRoiOutChannels);
ieLayer->params["group_size"] = format("%d", pooledSize.width);
ieLayer->params["spatial_scale"] = format("%f", spatialScale);
}
else
CV_Error(Error::StsNotImplemented, "Unsupported pooling type");
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
class PoolingInvoker : public ParallelLoopBody

@ -480,10 +480,9 @@ public:
}
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
if (_explicitSizes)
{
InferenceEngine::Builder::PriorBoxClusteredLayer ieLayer(name);
@ -541,66 +540,8 @@ public:
l.getParameters()["variance"] = _variance;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = _explicitSizes ? "PriorBoxClustered" : "PriorBox";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
if (_explicitSizes)
{
CV_Assert(!_boxWidths.empty()); CV_Assert(!_boxHeights.empty());
CV_Assert(_boxWidths.size() == _boxHeights.size());
ieLayer->params["width"] = format("%f", _boxWidths[0]);
ieLayer->params["height"] = format("%f", _boxHeights[0]);
for (int i = 1; i < _boxWidths.size(); ++i)
{
ieLayer->params["width"] += format(",%f", _boxWidths[i]);
ieLayer->params["height"] += format(",%f", _boxHeights[i]);
}
}
else
{
ieLayer->params["min_size"] = format("%f", _minSize);
ieLayer->params["max_size"] = _maxSize > 0 ? format("%f", _maxSize) : "";
if (!_aspectRatios.empty())
{
ieLayer->params["aspect_ratio"] = format("%f", _aspectRatios[0]);
for (int i = 1; i < _aspectRatios.size(); ++i)
ieLayer->params["aspect_ratio"] += format(",%f", _aspectRatios[i]);
}
}
ieLayer->params["flip"] = "0"; // We already flipped aspect ratios.
ieLayer->params["clip"] = _clip ? "1" : "0";
CV_Assert(!_variance.empty());
ieLayer->params["variance"] = format("%f", _variance[0]);
for (int i = 1; i < _variance.size(); ++i)
ieLayer->params["variance"] += format(",%f", _variance[i]);
if (_stepX == _stepY)
{
ieLayer->params["step"] = format("%f", _stepX);
ieLayer->params["step_h"] = "0.0";
ieLayer->params["step_w"] = "0.0";
}
else
{
ieLayer->params["step"] = "0.0";
ieLayer->params["step_h"] = format("%f", _stepY);
ieLayer->params["step_w"] = format("%f", _stepX);
}
CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
ieLayer->params["offset"] = format("%f", _offsetsX[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE

@ -322,10 +322,9 @@ public:
layerOutputs[0].col(2).copyTo(dst);
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::ProposalLayer ieLayer(name);
ieLayer.setBaseSize(baseSize);
@ -346,36 +345,8 @@ public:
ieLayer.setRatio(ratiosVec);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Proposal";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
ieLayer->params["base_size"] = format("%d", baseSize);
ieLayer->params["feat_stride"] = format("%d", featStride);
ieLayer->params["min_size"] = "16";
ieLayer->params["nms_thresh"] = format("%f", nmsThreshold);
ieLayer->params["post_nms_topn"] = format("%d", keepTopAfterNMS);
ieLayer->params["pre_nms_topn"] = format("%d", keepTopBeforeNMS);
if (ratios.size())
{
ieLayer->params["ratio"] = format("%f", ratios.get<float>(0));
for (int i = 1; i < ratios.size(); ++i)
ieLayer->params["ratio"] += format(",%f", ratios.get<float>(i));
}
if (scales.size())
{
ieLayer->params["scale"] = format("%f", scales.get<float>(0));
for (int i = 1; i < scales.size(); ++i)
ieLayer->params["scale"] += format(",%f", scales.get<float>(i));
}
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
private:
// A first half of channels are background scores. We need only a second one.

@ -178,25 +178,14 @@ public:
permute->forward(inputs, outputs, internals_arr);
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::ReorgYoloLayer ieLayer(name);
ieLayer.setStride(reorgStride);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "ReorgYolo";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
ieLayer->params["stride"] = format("%d", reorgStride);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE

@ -257,34 +257,15 @@ public:
}
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::ReshapeLayer ieLayer(name);
CV_Assert(outShapes.size() == 1);
ieLayer.setDims(outShapes[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Reshape";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ReshapeLayer> ieLayer(new InferenceEngine::ReshapeLayer(lp));
if (!newShapeDesc.empty())
ieLayer->shape = newShapeDesc;
else
{
CV_Assert(inputs.size() == 2);
InferenceEngine::DataPtr shapeSrc = infEngineDataNode(inputs[1]);
// NOTE: shapeSrc->dims are reversed
ieLayer->shape = std::vector<int>(shapeSrc->dims.rbegin(), shapeSrc->dims.rend());
}
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
private:
std::vector<MatShape> outShapes;

@ -55,7 +55,7 @@ public:
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
{
return (interpolation == "nearest" && scaleWidth == scaleHeight) ||
(interpolation == "bilinear" && INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R4));
(interpolation == "bilinear");
}
#endif
return backendId == DNN_BACKEND_OPENCV;
@ -162,7 +162,6 @@ public:
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
if (interpolation == "nearest")
@ -188,32 +187,6 @@ public:
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer;
if (interpolation == "nearest")
{
lp.type = "Resample";
ieLayer = std::shared_ptr<InferenceEngine::CNNLayer>(new InferenceEngine::CNNLayer(lp));
ieLayer->params["type"] = "caffe.ResampleParameter.NEAREST";
ieLayer->params["antialias"] = "0";
}
else if (interpolation == "bilinear")
{
lp.type = "Interp";
ieLayer = std::shared_ptr<InferenceEngine::CNNLayer>(new InferenceEngine::CNNLayer(lp));
ieLayer->params["pad_beg"] = "0";
ieLayer->params["pad_end"] = "0";
ieLayer->params["align_corners"] = "0";
}
else
CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation);
ieLayer->params["width"] = cv::format("%d", outWidth);
ieLayer->params["height"] = cv::format("%d", outHeight);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
@ -271,10 +244,9 @@ public:
scaleWidth = (outWidth > 1) ? (static_cast<float>(inpWidth - 1) / (outWidth - 1)) : 0.f;
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Interp");
@ -285,20 +257,9 @@ public:
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Interp";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
ieLayer->params["pad_beg"] = "0";
ieLayer->params["pad_end"] = "0";
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
};
Ptr<Layer> InterpLayer::create(const LayerParams& params)

@ -194,10 +194,9 @@ public:
}
#endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name);
CV_Assert(!blobs.empty());
@ -219,37 +218,8 @@ public:
if (hasBias)
addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l);
return Ptr<BackendNode>(new InfEngineBackendNode(l));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "ScaleShift";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
CV_Assert(!blobs.empty());
const size_t numChannels = blobs[0].total();
if (hasWeights)
{
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
}
else
{
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
{numChannels});
weights->allocate();
std::vector<float> ones(numChannels, 1);
weights->set(ones);
ieLayer->_weights = weights;
}
if (hasBias)
ieLayer->_biases = wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
{

@ -54,7 +54,7 @@ namespace cv
namespace dnn
{
class SliceLayerImpl CV_FINAL : public SliceLayer
class SliceLayerImpl : public SliceLayer
{
public:
SliceLayerImpl(const LayerParams& params)
@ -112,6 +112,9 @@ public:
{
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE &&
#ifdef HAVE_INF_ENGINE
INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
#endif
sliceRanges.size() == 1 && sliceRanges[0].size() == 4);
}
@ -256,23 +259,24 @@ public:
}
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
CV_Assert(sliceRanges.size() == 1);
CV_Assert_N(sliceRanges.size() == 1, inputs.size() <= 2);
std::vector<size_t> axes, offsets, dims;
int from, to, step;
int numDims = sliceRanges[0].size();
if (preferableTarget == DNN_TARGET_MYRIAD)
{
from = 1;
from = axis;
to = numDims;
step = 1;
}
else
{
from = numDims - 1;
to = -1;
to = axis - 1;
step = -1;
}
for (int i = from; i != to; i += step)
@ -282,11 +286,6 @@ public:
dims.push_back(sliceRanges[0][i].size());
}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
std::vector<size_t> outShape(numDims);
for (int i = 0; i < numDims; ++i)
outShape[numDims - 1 - i] = sliceRanges[0][i].size();
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Crop");
@ -295,30 +294,106 @@ public:
ieLayer.getParameters()["offset"] = offsets;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.getInputPorts()[1].setParameter("type", "weights");
// Fake blob which will be moved to inputs (as weights).
auto shapeSource = InferenceEngine::make_shared_blob<float>(
InferenceEngine::Precision::FP32,
InferenceEngine::Layout::ANY, outShape);
shapeSource->allocate();
addConstantData("weights", shapeSource, ieLayer);
if (inputs.size() != 2)
{
std::vector<size_t> outShape(numDims);
for (int i = 0; i < numDims; ++i)
outShape[numDims - 1 - i] = sliceRanges[0][i].size();
ieLayer.getInputPorts()[1].setParameter("type", "weights");
// Fake blob which will be moved to inputs (as weights).
auto shapeSource = InferenceEngine::make_shared_blob<float>(
InferenceEngine::Precision::FP32,
InferenceEngine::Layout::ANY, outShape);
shapeSource->allocate();
addConstantData("weights", shapeSource, ieLayer);
}
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "Crop";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CropLayer> ieLayer(new InferenceEngine::CropLayer(lp));
ieLayer->axis = axes;
ieLayer->offset = offsets;
ieLayer->dim = dims;
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // IE < R5
return Ptr<BackendNode>();
}
#endif
#endif
};
class CropLayerImpl CV_FINAL : public SliceLayerImpl
{
public:
CropLayerImpl(const LayerParams& params) : SliceLayerImpl(LayerParams())
{
setParamsFrom(params);
axis = params.get<int>("axis", 2);
const DictValue *paramOffset = params.ptr("offset");
if (paramOffset)
{
for (int i = 0; i < paramOffset->size(); i++)
offset.push_back(paramOffset->get<int>(i));
}
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 2);
MatShape dstShape = inputs[0];
int start = clamp(axis, dstShape);
for (int i = start; i < dstShape.size(); i++)
{
dstShape[i] = inputs[1][i];
}
outputs.resize(1, dstShape);
return false;
}
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
{
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
CV_Assert(2 == inputs.size());
const Mat &inpBlob = inputs[0];
const Mat &inpSzBlob = inputs[1];
int dims = inpBlob.dims;
int start_axis = clamp(axis, dims);
std::vector<int> offset_final(dims, 0);
if (offset.size() == 1)
{
for (int i = start_axis; i < dims; i++)
offset_final[i] = offset[0];
}
else if (offset.size() > 1)
{
if ((int)offset.size() != dims - start_axis)
CV_Error(Error::StsBadArg, "number of offset values specified must be "
"equal to the number of dimensions following axis.");
for (int i = start_axis; i < dims; i++)
offset_final[i] = offset[i - start_axis];
}
sliceRanges.resize(1);
sliceRanges[0].resize(dims);
for (int i = 0; i < start_axis; i++)
{
sliceRanges[0][i] = Range(0, inpBlob.size[i]);
}
for (int i = start_axis; i < dims; i++)
{
if (offset_final[i] < 0 || offset_final[i] + inpSzBlob.size[i] > inpBlob.size[i])
CV_Error(Error::StsBadArg, "invalid crop parameters or blob sizes");
sliceRanges[0][i] = Range(offset_final[i], offset_final[i] + inpSzBlob.size[i]);
}
}
private:
std::vector<int> offset;
};
Ptr<SliceLayer> SliceLayer::create(const LayerParams& params)
@ -326,5 +401,10 @@ Ptr<SliceLayer> SliceLayer::create(const LayerParams& params)
return Ptr<SliceLayer>(new SliceLayerImpl(params));
}
Ptr<Layer> CropLayer::create(const LayerParams& params)
{
return Ptr<Layer>(new CropLayerImpl(params));
}
}
}

@ -309,29 +309,16 @@ public:
return Ptr<BackendNode>();
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
ieLayer.setAxis(clamp(axisRaw, input->dims.size()));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#else
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
InferenceEngine::LayerParams lp;
lp.name = name;
lp.type = "SoftMax";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::SoftMaxLayer> ieLayer(new InferenceEngine::SoftMaxLayer(lp));
ieLayer->axis = clamp(axisRaw, input->dims.size());
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
}
#endif // HAVE_INF_ENGINE
int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE

@ -26,33 +26,8 @@ namespace cv { namespace dnn {
// we can use some predefined name.
static std::string kDefaultInpLayerName = "empty_inp_layer_name";
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {}
#else
InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& _layer)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {}
void InfEngineBackendNode::connect(std::vector<Ptr<BackendWrapper> >& inputs,
std::vector<Ptr<BackendWrapper> >& outputs)
{
layer->insData.resize(inputs.size());
for (int i = 0; i < inputs.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = infEngineDataNode(inputs[i]);
layer->insData[i] = InferenceEngine::DataWeakPtr(dataPtr);
dataPtr->inputTo[layer->name] = layer;
}
CV_Assert(!outputs.empty());
layer->outData.resize(1);
InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[0]);
dataPtr->name = layer->name;
layer->outData[0] = dataPtr;
dataPtr->creatorLayer = InferenceEngine::CNNLayerWeakPtr(layer);
}
#endif
static std::vector<Ptr<InfEngineBackendWrapper> >
infEngineWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
@ -67,8 +42,6 @@ infEngineWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
return wrappers;
}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InfEngineBackendNet::InfEngineBackendNet() : netBuilder("")
{
hasNetOwner = false;
@ -238,8 +211,6 @@ void InfEngineBackendNet::addOutput(const std::string& name)
requestedOutputs.push_back(name);
}
#endif // IE >= R5
static InferenceEngine::Layout estimateLayout(const Mat& m)
{
if (m.dims == 4)
@ -352,350 +323,6 @@ void InfEngineBackendWrapper::setHostDirty()
}
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
InfEngineBackendNet::InfEngineBackendNet()
{
targetDevice = InferenceEngine::TargetDevice::eCPU;
precision = InferenceEngine::Precision::FP32;
hasNetOwner = false;
}
InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net)
{
targetDevice = InferenceEngine::TargetDevice::eCPU;
precision = InferenceEngine::Precision::FP32;
inputs = net.getInputsInfo();
outputs = net.getOutputsInfo();
layers.resize(net.layerCount()); // A hack to execute InfEngineBackendNet::layerCount correctly.
netOwner = net;
hasNetOwner = true;
}
void InfEngineBackendNet::Release() noexcept
{
layers.clear();
inputs.clear();
outputs.clear();
}
void InfEngineBackendNet::setPrecision(InferenceEngine::Precision p) noexcept
{
precision = p;
}
InferenceEngine::Precision InfEngineBackendNet::getPrecision() noexcept
{
return hasNetOwner ? netOwner.getPrecision() : precision;
}
InferenceEngine::Precision InfEngineBackendNet::getPrecision() const noexcept
{
return hasNetOwner ? netOwner.getPrecision() : precision;
}
// Assume that outputs of network is unconnected blobs.
void InfEngineBackendNet::getOutputsInfo(InferenceEngine::OutputsDataMap &outputs_) noexcept
{
const_cast<const InfEngineBackendNet*>(this)->getOutputsInfo(outputs_);
}
void InfEngineBackendNet::getOutputsInfo(InferenceEngine::OutputsDataMap &outputs_) const noexcept
{
outputs_ = outputs;
}
// Returns input references that aren't connected to internal outputs.
void InfEngineBackendNet::getInputsInfo(InferenceEngine::InputsDataMap &inputs_) noexcept
{
const_cast<const InfEngineBackendNet*>(this)->getInputsInfo(inputs_);
}
// Returns input references that aren't connected to internal outputs.
void InfEngineBackendNet::getInputsInfo(InferenceEngine::InputsDataMap &inputs_) const noexcept
{
inputs_ = inputs;
}
InferenceEngine::InputInfo::Ptr InfEngineBackendNet::getInput(const std::string &inputName) noexcept
{
return const_cast<const InfEngineBackendNet*>(this)->getInput(inputName);
}
InferenceEngine::InputInfo::Ptr InfEngineBackendNet::getInput(const std::string &inputName) const noexcept
{
const auto& it = inputs.find(inputName);
CV_Assert(it != inputs.end());
return it->second;
}
void InfEngineBackendNet::getName(char*, size_t) noexcept
{
}
void InfEngineBackendNet::getName(char*, size_t) const noexcept
{
}
const std::string& InfEngineBackendNet::getName() const noexcept
{
return name;
}
InferenceEngine::StatusCode InfEngineBackendNet::serialize(const std::string&, const std::string&, InferenceEngine::ResponseDesc*) const noexcept
{
CV_Error(Error::StsNotImplemented, "");
return InferenceEngine::StatusCode::OK;
}
size_t InfEngineBackendNet::layerCount() noexcept
{
return const_cast<const InfEngineBackendNet*>(this)->layerCount();
}
size_t InfEngineBackendNet::layerCount() const noexcept
{
return layers.size();
}
InferenceEngine::DataPtr& InfEngineBackendNet::getData(const char *dname) noexcept
{
CV_Error(Error::StsNotImplemented, "");
return outputs.begin()->second; // Just return something.
}
void InfEngineBackendNet::addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept
{
layers.push_back(layer);
inputs.clear();
outputs.clear();
}
InferenceEngine::StatusCode
InfEngineBackendNet::addOutput(const std::string &layerName, size_t outputIndex,
InferenceEngine::ResponseDesc *resp) noexcept
{
for (const auto& l : layers)
{
for (const InferenceEngine::DataPtr& out : l->outData)
{
if (out->name == layerName)
{
outputs[out->name] = out;
return InferenceEngine::StatusCode::OK;
}
}
}
CV_Error(Error::StsObjectNotFound, "Cannot find a layer " + layerName);
return InferenceEngine::StatusCode::OK;
}
InferenceEngine::StatusCode
InfEngineBackendNet::getLayerByName(const char *layerName, InferenceEngine::CNNLayerPtr &out,
InferenceEngine::ResponseDesc *resp) noexcept
{
return const_cast<const InfEngineBackendNet*>(this)->getLayerByName(layerName, out, resp);
}
InferenceEngine::StatusCode InfEngineBackendNet::getLayerByName(const char *layerName,
InferenceEngine::CNNLayerPtr &out,
InferenceEngine::ResponseDesc *resp) const noexcept
{
for (auto& l : layers)
{
if (l->name == layerName)
{
out = l;
return InferenceEngine::StatusCode::OK;
}
}
CV_Error(Error::StsObjectNotFound, cv::format("Cannot find a layer %s", layerName));
return InferenceEngine::StatusCode::NOT_FOUND;
}
void InfEngineBackendNet::setTargetDevice(InferenceEngine::TargetDevice device) noexcept
{
if (device != InferenceEngine::TargetDevice::eCPU &&
device != InferenceEngine::TargetDevice::eGPU &&
device != InferenceEngine::TargetDevice::eMYRIAD &&
device != InferenceEngine::TargetDevice::eFPGA)
CV_Error(Error::StsNotImplemented, "");
targetDevice = device;
}
InferenceEngine::TargetDevice InfEngineBackendNet::getTargetDevice() noexcept
{
return const_cast<const InfEngineBackendNet*>(this)->getTargetDevice();
}
InferenceEngine::TargetDevice InfEngineBackendNet::getTargetDevice() const noexcept
{
return targetDevice == InferenceEngine::TargetDevice::eFPGA ?
InferenceEngine::TargetDevice::eHETERO : targetDevice;
}
InferenceEngine::StatusCode InfEngineBackendNet::setBatchSize(const size_t) noexcept
{
CV_Error(Error::StsNotImplemented, "");
return InferenceEngine::StatusCode::OK;
}
InferenceEngine::StatusCode InfEngineBackendNet::setBatchSize(size_t size, InferenceEngine::ResponseDesc *responseDesc) noexcept
{
CV_Error(Error::StsNotImplemented, "");
return InferenceEngine::StatusCode::OK;
}
size_t InfEngineBackendNet::getBatchSize() const noexcept
{
size_t batchSize = 0;
for (const auto& inp : inputs)
{
CV_Assert(inp.second);
std::vector<size_t> dims = inp.second->getDims();
CV_Assert(!dims.empty());
if (batchSize != 0)
CV_Assert(batchSize == dims.back());
else
batchSize = dims.back();
}
return batchSize;
}
InferenceEngine::StatusCode InfEngineBackendNet::AddExtension(const InferenceEngine::IShapeInferExtensionPtr &extension, InferenceEngine::ResponseDesc *resp) noexcept
{
CV_Error(Error::StsNotImplemented, "");
return InferenceEngine::StatusCode::OK;
}
InferenceEngine::StatusCode InfEngineBackendNet::reshape(const InferenceEngine::ICNNNetwork::InputShapes &inputShapes, InferenceEngine::ResponseDesc *resp) noexcept
{
CV_Error(Error::StsNotImplemented, "");
return InferenceEngine::StatusCode::OK;
}
void InfEngineBackendNet::init(int targetId)
{
if (inputs.empty())
{
// Collect all external input blobs.
inputs.clear();
std::map<std::string, InferenceEngine::DataPtr> internalOutputs;
for (const auto& l : layers)
{
for (const InferenceEngine::DataWeakPtr& ptr : l->insData)
{
InferenceEngine::DataPtr inp(ptr);
if (internalOutputs.find(inp->name) == internalOutputs.end())
{
InferenceEngine::InputInfo::Ptr inpInfo(new InferenceEngine::InputInfo());
inpInfo->setInputData(inp);
if (inputs.find(inp->name) == inputs.end())
inputs[inp->name] = inpInfo;
}
}
for (const InferenceEngine::DataPtr& out : l->outData)
{
// TODO: Replace to uniqueness assertion.
if (internalOutputs.find(out->name) == internalOutputs.end())
internalOutputs[out->name] = out;
}
}
CV_Assert(!inputs.empty());
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
for (const auto& inp : inputs)
{
InferenceEngine::LayerParams lp;
lp.name = inp.first;
lp.type = "Input";
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::CNNLayer> inpLayer(new InferenceEngine::CNNLayer(lp));
layers.push_back(inpLayer);
InferenceEngine::DataPtr dataPtr = inp.second->getInputData();
// TODO: remove precision dependency (see setInput.normalization tests)
if (dataPtr->precision == InferenceEngine::Precision::FP32)
{
inpLayer->outData.assign(1, dataPtr);
dataPtr->creatorLayer = InferenceEngine::CNNLayerWeakPtr(inpLayer);
}
}
#endif
}
if (outputs.empty())
{
// Add all unconnected blobs to output blobs.
InferenceEngine::OutputsDataMap unconnectedOuts;
for (const auto& l : layers)
{
if (l->type == "Input")
continue;
// Add all outputs.
for (const InferenceEngine::DataPtr& out : l->outData)
{
// TODO: Replace to uniqueness assertion.
if (unconnectedOuts.find(out->name) == unconnectedOuts.end())
unconnectedOuts[out->name] = out;
}
// Remove internally connected outputs.
for (const InferenceEngine::DataWeakPtr& inp : l->insData)
{
unconnectedOuts.erase(InferenceEngine::DataPtr(inp)->name);
}
}
CV_Assert(!unconnectedOuts.empty());
for (auto it = unconnectedOuts.begin(); it != unconnectedOuts.end(); ++it)
{
outputs[it->first] = it->second;
}
}
// Set up input blobs.
inpBlobs.clear();
for (const auto& it : inputs)
{
CV_Assert(allBlobs.find(it.first) != allBlobs.end());
inpBlobs[it.first] = allBlobs[it.first];
it.second->setPrecision(inpBlobs[it.first]->precision());
}
// Set up output blobs.
outBlobs.clear();
for (const auto& it : outputs)
{
CV_Assert(allBlobs.find(it.first) != allBlobs.end());
outBlobs[it.first] = allBlobs[it.first];
}
switch (targetId)
{
case DNN_TARGET_CPU: setTargetDevice(InferenceEngine::TargetDevice::eCPU); break;
case DNN_TARGET_OPENCL_FP16:
setPrecision(InferenceEngine::Precision::FP16);
/* Falls through. */
case DNN_TARGET_OPENCL: setTargetDevice(InferenceEngine::TargetDevice::eGPU); break;
case DNN_TARGET_MYRIAD:
{
setPrecision(InferenceEngine::Precision::FP16);
setTargetDevice(InferenceEngine::TargetDevice::eMYRIAD); break;
}
case DNN_TARGET_FPGA:
{
setPrecision(InferenceEngine::Precision::FP16);
setTargetDevice(InferenceEngine::TargetDevice::eFPGA); break;
}
default:
CV_Error(Error::StsError, format("Unknown target identifier: %d", targetId));
}
if (!isInitialized())
initPlugin(*this);
}
#endif // IE < R5
static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEnginePluginPtr>& getSharedPlugins()
{
static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEnginePluginPtr> sharedPlugins;
@ -703,7 +330,7 @@ static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEngineP
}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) && !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
#if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
static bool detectMyriadX_()
{
InferenceEngine::Builder::Network builder("");
@ -724,6 +351,7 @@ static bool detectMyriadX_()
}
builder.addLayer({InferenceEngine::PortInfo(clampId)}, InferenceEngine::Builder::OutputLayer());
#else
InferenceEngine::idx_t clampId = builder.addLayer({inpId}, InferenceEngine::Builder::ClampLayer());
builder.addLayer({InferenceEngine::PortInfo(clampId)},
InferenceEngine::Builder::OutputLayer().setPort(InferenceEngine::Port({},
@ -757,7 +385,7 @@ static bool detectMyriadX_()
}
return true;
}
#endif // >= 2018R5
#endif // !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
{
@ -834,9 +462,7 @@ void InfEngineBackendNet::addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs
for (const auto& wrapper : wrappers)
{
std::string name = wrapper->dataPtr->name;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
name = name.empty() ? kDefaultInpLayerName : name;
#endif
allBlobs.insert({name, wrapper->blob});
}
}
@ -993,11 +619,7 @@ bool InfEngineBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
#if INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2018R3)
InferenceEngine::ICNNNetwork::InputShapes inShapes = const_cast<InferenceEngine::CNNNetwork&>(t_net).getInputShapes();
#else
InferenceEngine::ICNNNetwork::InputShapes inShapes = t_net.getInputShapes();
#endif
InferenceEngine::ICNNNetwork::InputShapes::iterator itr;
bool equal_flag = true;
size_t i = 0;
@ -1044,7 +666,6 @@ InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob)
return halfs;
}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data,
InferenceEngine::Builder::Layer& l)
{
@ -1054,7 +675,6 @@ void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data,
l.addConstantData(name, data);
#endif
}
#endif
#endif // HAVE_INF_ENGINE
@ -1103,7 +723,7 @@ static std::string getInferenceEngineVPUType_()
{
#if defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
param_vpu_type = OPENCV_DNN_IE_VPU_TYPE_DEFAULT;
#elif INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
#else
CV_LOG_INFO(NULL, "OpenCV-DNN: running Inference Engine VPU autodetection: Myriad2/X. In case of other accelerator types specify 'OPENCV_DNN_IE_VPU_TYPE' parameter");
try {
bool isMyriadX_ = detectMyriadX_();
@ -1121,9 +741,6 @@ static std::string getInferenceEngineVPUType_()
CV_LOG_WARNING(NULL, "OpenCV-DNN: Failed Inference Engine VPU autodetection. Specify 'OPENCV_DNN_IE_VPU_TYPE' parameter.");
param_vpu_type.clear();
}
#else
CV_LOG_WARNING(NULL, "OpenCV-DNN: VPU auto-detection is not implemented. Consider specifying VPU type via 'OPENCV_DNN_IE_VPU_TYPE' parameter");
param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2;
#endif
}
CV_LOG_INFO(NULL, "OpenCV-DNN: Inference Engine VPU type='" << param_vpu_type << "'");

@ -19,8 +19,6 @@
#ifdef HAVE_INF_ENGINE
#define INF_ENGINE_RELEASE_2018R3 2018030000
#define INF_ENGINE_RELEASE_2018R4 2018040000
#define INF_ENGINE_RELEASE_2018R5 2018050000
#define INF_ENGINE_RELEASE_2019R1 2019010000
@ -46,9 +44,7 @@
#include <inference_engine.hpp>
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
#include <ie_builders.hpp>
#endif
#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
#pragma GCC visibility pop
@ -64,111 +60,6 @@ namespace cv { namespace dnn {
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
{
public:
InfEngineBackendNet();
InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
virtual void Release() noexcept CV_OVERRIDE;
void setPrecision(InferenceEngine::Precision p) noexcept;
virtual InferenceEngine::Precision getPrecision() noexcept;
virtual InferenceEngine::Precision getPrecision() const noexcept;
virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept;
virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) const noexcept;
virtual InferenceEngine::StatusCode serialize(const std::string &xmlPath, const std::string &binPath, InferenceEngine::ResponseDesc* resp) const noexcept;
virtual void getName(char *pName, size_t len) noexcept;
virtual void getName(char *pName, size_t len) const noexcept;
virtual const std::string& getName() const noexcept;
virtual size_t layerCount() noexcept;
virtual size_t layerCount() const noexcept;
virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;
virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
size_t outputIndex = 0,
InferenceEngine::ResponseDesc *resp = nullptr) noexcept;
virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
InferenceEngine::CNNLayerPtr &out,
InferenceEngine::ResponseDesc *resp) noexcept;
virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
InferenceEngine::CNNLayerPtr &out,
InferenceEngine::ResponseDesc *resp) const noexcept;
virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
virtual InferenceEngine::TargetDevice getTargetDevice() noexcept;
virtual InferenceEngine::TargetDevice getTargetDevice() const noexcept;
virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
virtual InferenceEngine::StatusCode setBatchSize(size_t size, InferenceEngine::ResponseDesc* responseDesc) noexcept;
virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) noexcept CV_OVERRIDE;
virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) noexcept CV_OVERRIDE;
void init(int targetId);
void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);
void forward();
bool isInitialized();
private:
std::vector<InferenceEngine::CNNLayerPtr> layers;
InferenceEngine::InputsDataMap inputs;
InferenceEngine::OutputsDataMap outputs;
InferenceEngine::BlobMap inpBlobs;
InferenceEngine::BlobMap outBlobs;
InferenceEngine::BlobMap allBlobs;
InferenceEngine::TargetDevice targetDevice;
InferenceEngine::Precision precision;
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
InferenceEngine::ExecutableNetwork netExec;
InferenceEngine::InferRequest infRequest;
// In case of models from Model Optimizer we need to manage their lifetime.
InferenceEngine::CNNNetwork netOwner;
// There is no way to check if netOwner is initialized or not so we use
// a separate flag to determine if the model has been loaded from IR.
bool hasNetOwner;
std::string name;
void initPlugin(InferenceEngine::ICNNNetwork& net);
};
#else // IE < R5
class InfEngineBackendNet
{
public:
@ -226,28 +117,18 @@ private:
std::set<int> unconnectedLayersIds;
};
#endif // IE < R5
class InfEngineBackendNode : public BackendNode
{
public:
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
#else
InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);
#endif
void connect(std::vector<Ptr<BackendWrapper> >& inputs,
std::vector<Ptr<BackendWrapper> >& outputs);
// Inference Engine network object that allows to obtain the outputs of this layer.
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer layer;
Ptr<InfEngineBackendNet> net;
#else
InferenceEngine::CNNLayerPtr layer;
Ptr<InfEngineBackendNet> net;
#endif
};
class InfEngineBackendWrapper : public BackendWrapper
@ -282,9 +163,7 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
// Allocates memory for a new blob.
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);
#endif
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by

@ -291,6 +291,7 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi)
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
#endif
// output range: [-0.001, 0.97]
const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.012 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.16 : 0.0;
@ -309,6 +310,7 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
#endif
// The same .caffemodel but modified .prototxt
// See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp
processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi_faster_4_stages.prototxt",
@ -322,9 +324,6 @@ TEST_P(DNNTestNetwork, OpenFace)
#if INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
#elif INF_ENGINE_VER_MAJOR_EQ(2018030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("Test has been fixed in OpenVINO 2018R4");
#endif
#endif
if (backend == DNN_BACKEND_HALIDE)
@ -407,7 +406,9 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16)
float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.4 : 4e-5;
float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7.45 : 2e-3;
processNet("dnn/fast_neural_style_eccv16_starry_night.t7", "", inp, "", "", l1, lInf);
#if defined(HAVE_INF_ENGINE) && INF_ENGINE_RELEASE >= 2019010000
expectNoFallbacksFromIE(net);
#endif
}
INSTANTIATE_TEST_CASE_P(/*nothing*/, DNNTestNetwork, dnnBackendsAndTargets(true, true, false));

@ -332,10 +332,6 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc)
testDarknetModel(config_file, weights_file, ref.rowRange(0, 2), scoreDiff, iouDiff);
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test with 'batch size 2' is disabled for Myriad target (fixed in 2018R5)");
#endif
{
SCOPED_TRACE("batch size 2");
testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff);
@ -389,10 +385,6 @@ INSTANTIATE_TEST_CASE_P(/**/, Test_Darknet_nets, dnnBackendsAndTargets());
TEST_P(Test_Darknet_layers, shortcut)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018040000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU)
throw SkipTestException("Test is enabled starts from OpenVINO 2018R4");
#endif
testDarknetLayer("shortcut");
}

@ -159,12 +159,6 @@ TEST_P(Deconvolution, Accuracy)
Backend backendId = get<0>(get<7>(GetParam()));
Target targetId = get<1>(get<7>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018040000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU
&& hasBias && group != 1)
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
@ -278,19 +272,13 @@ TEST_P(AvePooling, Accuracy)
Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2018050000)
#if defined(INF_ENGINE_RELEASE)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
&& kernel == Size(1, 1) && (stride == Size(1, 1) || stride == Size(2, 2)))
throw SkipTestException("Test is disabled for MyriadX target");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2018040000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD &&
stride == Size(3, 2) && kernel == Size(3, 3) && outSize != Size(1, 1))
throw SkipTestException("Test is fixed in OpenVINO 2018R4");
#endif
const int inWidth = (outSize.width - 1) * stride.width + kernel.width;
const int inHeight = (outSize.height - 1) * stride.height + kernel.height;

@ -141,10 +141,8 @@ TEST_P(Test_Caffe_layers, Convolution)
TEST_P(Test_Caffe_layers, DeConvolution)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU)
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
#endif
throw SkipTestException("Test is disabled for DLIE/CPU");
testLayerUsingCaffeModels("layer_deconvolution", true, false);
}
@ -254,8 +252,7 @@ TEST_P(Test_Caffe_layers, Fused_Concat)
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE
&& (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16
|| (INF_ENGINE_RELEASE < 2018040000 && target == DNN_TARGET_CPU))
&& (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
)
throw SkipTestException("Test is disabled for DLIE");
#endif
@ -1045,11 +1042,6 @@ TEST_P(Test_DLDT_two_inputs_3dim, as_IR)
int secondInpType = get<1>(GetParam());
Target targetId = get<2>(GetParam());
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018040000
if (secondInpType == CV_8U)
throw SkipTestException("Test is enabled starts from OpenVINO 2018R4");
#endif
std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
Net net = readNet(_tf("net_two_inputs" + suffix + ".xml"), _tf("net_two_inputs.bin"));
std::vector<int> inpSize = get<3>(GetParam());

@ -86,6 +86,9 @@ TEST_P(Test_ONNX_layers, Convolution)
TEST_P(Test_ONNX_layers, Convolution3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1");
#endif
if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
testONNXModels("conv3d");
@ -94,7 +97,7 @@ TEST_P(Test_ONNX_layers, Convolution3D)
TEST_P(Test_ONNX_layers, Two_convolution)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2018050000)
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
@ -150,6 +153,9 @@ TEST_P(Test_ONNX_layers, AveragePooling)
TEST_P(Test_ONNX_layers, MaxPooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1");
#endif
if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
testONNXModels("max_pool3d");
@ -157,6 +163,9 @@ TEST_P(Test_ONNX_layers, MaxPooling3D)
TEST_P(Test_ONNX_layers, AvePooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1");
#endif
if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
testONNXModels("ave_pool3d");
@ -195,14 +204,18 @@ TEST_P(Test_ONNX_layers, Constant)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
throw SkipTestException("Test is disabled for OpenVINO <= 2018R5 + MyriadX target");
#endif
testONNXModels("constant");
}
TEST_P(Test_ONNX_layers, Padding)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
testONNXModels("padding", npy, 0, 0, false, false);
#else
testONNXModels("padding");
#endif
}
TEST_P(Test_ONNX_layers, Resize)
@ -247,7 +260,11 @@ TEST_P(Test_ONNX_layers, Reshape)
TEST_P(Test_ONNX_layers, Slice)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
testONNXModels("slice", npy, 0, 0, false, false);
#else
testONNXModels("slice");
#endif
}
TEST_P(Test_ONNX_layers, Softmax)
@ -504,6 +521,9 @@ TEST_P(Test_ONNX_nets, Shufflenet)
TEST_P(Test_ONNX_nets, Resnet34_kinetics)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1");
#endif
if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");

@ -133,6 +133,9 @@ TEST_P(Test_TensorFlow_layers, conv)
TEST_P(Test_TensorFlow_layers, Convolution3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1");
#endif
if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
runTensorFlowNet("conv3d");
@ -229,6 +232,9 @@ TEST_P(Test_TensorFlow_layers, ave_pool_same)
TEST_P(Test_TensorFlow_layers, MaxPooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1");
#endif
if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
runTensorFlowNet("max_pool3d");
@ -236,6 +242,9 @@ TEST_P(Test_TensorFlow_layers, MaxPooling3D)
TEST_P(Test_TensorFlow_layers, AvePooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1");
#endif
if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
runTensorFlowNet("ave_pool3d");
@ -337,10 +346,15 @@ class Test_TensorFlow_nets : public DNNTestLayer {};
TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
{
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
{
#if INF_ENGINE_VER_MAJOR_GE(2019010000)
if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
#else
throw SkipTestException("Test is disabled for Myriad");
#endif
}
#endif
checkBackend();
@ -364,7 +378,9 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0043 : default_l1;
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.037 : default_lInf;
normAssertDetections(ref, out, "", 0.2, scoreDiff, iouDiff);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2019010000
expectNoFallbacksFromIE(net);
#endif
}
TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)

@ -136,10 +136,8 @@ TEST_P(Test_Torch_layers, run_reshape_change_batch_size)
TEST_P(Test_Torch_layers, run_reshape)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
#endif
throw SkipTestException("Test is disabled for Myriad targets");
runTorchNet("net_reshape_batch");
runTorchNet("net_reshape_channels", "", false, true);
}
@ -220,10 +218,6 @@ TEST_P(Test_Torch_layers, net_conv_gemm_lrn)
TEST_P(Test_Torch_layers, net_inception_block)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018030000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
#endif
runTorchNet("net_inception_block", "", false, true);
}

Loading…
Cancel
Save