Merge pull request #24196 from dkurt:ov_backend_cleanups

Use ngraph::Output in OpenVINO backend wrapper #24196

### Pull Request Readiness Checklist

resolves https://github.com/opencv/opencv/issues/24102

* Use `ngraph::Output<ngraph::Node>>` insead of `std::shared_ptr<ngraph::Node>` as a backend wrapper. It lets access to multi-output nodes: 588ddf1b18/modules/dnn/src/net_openvino.cpp (L501-L504)
* All layers can be customizable with OpenVINO >= 2022.1. nGraph reference code used for default layer implementation does not required CPU plugin also (might be tested by commenting CPU plugin at `/opt/intel/openvino/runtime/lib/intel64/plugins.xml`).
* Correct inference if only intermediate blobs requested.


See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
pull/24228/head
Dmitry Kurtaev 1 year ago committed by GitHub
parent 2c53e3f53d
commit 178fdbbda8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 21
      modules/dnn/src/ie_ngraph.cpp
  2. 6
      modules/dnn/src/ie_ngraph.hpp
  3. 2
      modules/dnn/src/layers/batch_norm_layer.cpp
  4. 2
      modules/dnn/src/layers/blank_layer.cpp
  5. 4
      modules/dnn/src/layers/concat_layer.cpp
  6. 8
      modules/dnn/src/layers/convolution_layer.cpp
  7. 2
      modules/dnn/src/layers/crop_and_resize_layer.cpp
  8. 28
      modules/dnn/src/layers/elementwise_layers.cpp
  9. 15
      modules/dnn/src/layers/eltwise_layer.cpp
  10. 2
      modules/dnn/src/layers/flatten_layer.cpp
  11. 2
      modules/dnn/src/layers/fully_connected_layer.cpp
  12. 2
      modules/dnn/src/layers/lrn_layer.cpp
  13. 4
      modules/dnn/src/layers/max_unpooling_layer.cpp
  14. 2
      modules/dnn/src/layers/mvn_layer.cpp
  15. 6
      modules/dnn/src/layers/nary_eltwise_layers.cpp
  16. 8
      modules/dnn/src/layers/normalize_bbox_layer.cpp
  17. 2
      modules/dnn/src/layers/pooling_layer.cpp
  18. 4
      modules/dnn/src/layers/proposal_layer.cpp
  19. 4
      modules/dnn/src/layers/region_layer.cpp
  20. 2
      modules/dnn/src/layers/resize_layer.cpp
  21. 18
      modules/dnn/src/layers/scale_layer.cpp
  22. 2
      modules/dnn/src/layers/slice_layer.cpp
  23. 2
      modules/dnn/src/layers/softmax_layer.cpp
  24. 63
      modules/dnn/src/net_openvino.cpp
  25. 11
      modules/dnn/test/test_halide_layers.cpp
  26. 9
      modules/dnn/test/test_tflite_importer.cpp

@ -383,11 +383,17 @@ public:
#endif // OpenVINO >= 2022.1 #endif // OpenVINO >= 2022.1
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node) InfEngineNgraphNode::InfEngineNgraphNode(ngraph::Output<ngraph::Node>&& _node)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {} : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {
CV_Assert(node.get_node());
CV_Assert(node.get_node_shared_ptr());
}
InfEngineNgraphNode::InfEngineNgraphNode(const std::shared_ptr<ngraph::Node>& _node) InfEngineNgraphNode::InfEngineNgraphNode(const ngraph::Output<ngraph::Node>& _node)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(_node) {} : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(_node) {
CV_Assert(node.get_node());
CV_Assert(node.get_node_shared_ptr());
}
InfEngineNgraphNode::InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& nodes, InfEngineNgraphNode::InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& nodes,
Ptr<Layer>& cvLayer_, std::vector<Mat*>& inputs, Ptr<Layer>& cvLayer_, std::vector<Mat*>& inputs,
@ -420,7 +426,7 @@ InfEngineNgraphNode::InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& n
} }
void InfEngineNgraphNode::setName(const std::string& name) { void InfEngineNgraphNode::setName(const std::string& name) {
node->set_friendly_name(name); node.get_node()->set_friendly_name(name);
} }
InfEngineNgraphNet::InfEngineNgraphNet(detail::NetImplBase& netImpl) InfEngineNgraphNet::InfEngineNgraphNet(detail::NetImplBase& netImpl)
@ -441,8 +447,7 @@ InfEngineNgraphNet::InfEngineNgraphNet(detail::NetImplBase& netImpl, InferenceEn
void InfEngineNgraphNet::addOutput(const Ptr<InfEngineNgraphNode>& node) void InfEngineNgraphNet::addOutput(const Ptr<InfEngineNgraphNode>& node)
{ {
CV_Assert(node); CV_Assert(node);
CV_Assert(node->node); const std::string& name = node->node.get_node()->get_friendly_name();
const std::string& name = node->node->get_friendly_name();
requestedOutputs.insert({name, node.get()}); requestedOutputs.insert({name, node.get()});
} }
@ -458,7 +463,7 @@ void InfEngineNgraphNet::createNet(Target targetId) {
CV_Assert(output_node_it->second); CV_Assert(output_node_it->second);
auto out = std::make_shared<ngraph::op::Result>(output_node_it->second->node); auto out = std::make_shared<ngraph::op::Result>(output_node_it->second->node);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1) #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
out->set_friendly_name(output_node_it->first + (output_node_it->second->node->get_output_size() == 1 ? "" : ".0")); out->set_friendly_name(output_node_it->first + (output_node_it->second->node.get_node()->get_output_size() == 1 ? "" : ".0"));
#endif #endif
outs.push_back(out); outs.push_back(out);
} }

@ -93,13 +93,13 @@ public:
std::vector<Mat*>& inputs, std::vector<Mat>& outputs, std::vector<Mat*>& inputs, std::vector<Mat>& outputs,
std::vector<Mat>& internals); std::vector<Mat>& internals);
InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node); InfEngineNgraphNode(ngraph::Output<ngraph::Node>&& _node);
InfEngineNgraphNode(const std::shared_ptr<ngraph::Node>& _node); InfEngineNgraphNode(const ngraph::Output<ngraph::Node>& _node);
void setName(const std::string& name); void setName(const std::string& name);
// Inference Engine network object that allows to obtain the outputs of this layer. // Inference Engine network object that allows to obtain the outputs of this layer.
std::shared_ptr<ngraph::Node> node; ngraph::Output<ngraph::Node> node;
Ptr<InfEngineNgraphNet> net; Ptr<InfEngineNgraphNet> net;
Ptr<dnn::Layer> cvLayer; Ptr<dnn::Layer> cvLayer;
}; };

@ -457,7 +457,7 @@ public:
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> shape(ieInpNode->get_shape().size(), 1); std::vector<size_t> shape(ieInpNode.get_shape().size(), 1);
shape[1] = weights_.total(); shape[1] = weights_.total();
auto weight = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), weights_.data); auto weight = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), weights_.data);
auto bias = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), bias_.data); auto bias = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), bias_.data);

@ -148,7 +148,7 @@ public:
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
ngraph::OutputVector inp{ieInpNode}; ngraph::OutputVector inp{ieInpNode};
auto blank = std::make_shared<ngraph::op::Concat>(inp, 0); auto blank = std::make_shared<ngraph::op::Concat>(inp, 0);
return Ptr<BackendNode>(new InfEngineNgraphNode(blank)); return Ptr<BackendNode>(new InfEngineNgraphNode(blank));

@ -392,7 +392,7 @@ public:
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
const int numDims = nodes[0].dynamicCast<InfEngineNgraphNode>()->node->get_shape().size(); const int numDims = nodes[0].dynamicCast<InfEngineNgraphNode>()->node.get_shape().size();
const int cAxis = normalize_axis(axis, numDims); const int cAxis = normalize_axis(axis, numDims);
std::vector<size_t> maxDims(numDims, 0); std::vector<size_t> maxDims(numDims, 0);
@ -403,7 +403,7 @@ public:
auto inp = nodes[i].dynamicCast<InfEngineNgraphNode>()->node; auto inp = nodes[i].dynamicCast<InfEngineNgraphNode>()->node;
inp_nodes.push_back(inp); inp_nodes.push_back(inp);
std::vector<size_t> inpShape = inp->get_shape(); std::vector<size_t> inpShape = inp.get_shape();
for (int i = 0; i < numDims; ++i) for (int i = 0; i < numDims; ++i)
maxDims[i] = std::max(maxDims[i], inpShape[i]); maxDims[i] = std::max(maxDims[i], inpShape[i]);
} }

@ -822,13 +822,13 @@ public:
CV_Assert(!blobs.empty()); CV_Assert(!blobs.empty());
CV_Assert_N(inputs.size() >= 1, nodes.size() >= 1); CV_Assert_N(inputs.size() >= 1, nodes.size() >= 1);
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> dims = ieInpNode->get_shape(); std::vector<size_t> dims = ieInpNode.get_shape();
CV_Check(dims.size(), dims.size() >= 3 && dims.size() <= 5, ""); CV_Check(dims.size(), dims.size() >= 3 && dims.size() <= 5, "");
std::shared_ptr<ngraph::Node> ieWeights = nodes.size() > 1 ? nodes[1].dynamicCast<InfEngineNgraphNode>()->node : nullptr; ngraph::Output<ngraph::Node> ieWeights;
if (nodes.size() > 1) if (nodes.size() > 1)
CV_Assert(ieWeights); // dynamic_cast should not fail ieWeights = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
const int inpCn = dims[1]; const int inpCn = dims[1];
const int inpGroupCn = nodes.size() > 1 ? ieWeights->get_shape()[1] : blobs[0].size[1]; const int inpGroupCn = nodes.size() > 1 ? ieWeights.get_shape()[1] : blobs[0].size[1];
const int group = inpCn / inpGroupCn; const int group = inpCn / inpGroupCn;
std::vector<size_t> kernel_shape; std::vector<size_t> kernel_shape;

@ -133,7 +133,7 @@ public:
auto input = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto input = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto rois = nodes[1].dynamicCast<InfEngineNgraphNode>()->node; auto rois = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
auto rois_shape = rois->get_shape(); auto rois_shape = rois.get_shape();
std::vector<int64_t> dims(rois_shape.begin(), rois_shape.end()), offsets(4, 0); std::vector<int64_t> dims(rois_shape.begin(), rois_shape.end()), offsets(4, 0);
offsets[3] = 2; offsets[3] = 2;
dims[3] = 7; dims[3] = 7;

@ -490,7 +490,7 @@ struct ReLUFunctor : public BaseFunctor
#endif #endif
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
if (slope) { if (slope) {
auto param = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &slope); auto param = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &slope);
@ -674,7 +674,7 @@ struct ReLU6Functor : public BaseFunctor
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
return std::make_shared<ngraph::op::Clamp>(node, minValue, maxValue); return std::make_shared<ngraph::op::Clamp>(node, minValue, maxValue);
} }
@ -796,7 +796,7 @@ struct BaseDefaultFunctor : public BaseFunctor
#endif // HAVE_CANN #endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
CV_Error(Error::StsNotImplemented, ""); CV_Error(Error::StsNotImplemented, "");
} }
@ -929,7 +929,7 @@ struct TanHFunctor : public BaseDefaultFunctor<TanHFunctor>
#endif // HAVE_CANN #endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
return std::make_shared<ngraph::op::Tanh>(node); return std::make_shared<ngraph::op::Tanh>(node);
} }
@ -998,7 +998,7 @@ struct SwishFunctor : public BaseDefaultFunctor<SwishFunctor>
#endif // HAVE_CANN #endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
auto sigmoid = std::make_shared<ngraph::op::Sigmoid>(node); auto sigmoid = std::make_shared<ngraph::op::Sigmoid>(node);
return std::make_shared<ngraph::op::v1::Multiply>(node, sigmoid); return std::make_shared<ngraph::op::v1::Multiply>(node, sigmoid);
@ -1074,7 +1074,7 @@ struct MishFunctor : public BaseDefaultFunctor<MishFunctor>
#endif // HAVE_CANN #endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
float one = 1.0f; float one = 1.0f;
auto constant = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &one); auto constant = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &one);
@ -1157,7 +1157,7 @@ struct SigmoidFunctor : public BaseDefaultFunctor<SigmoidFunctor>
#endif // HAVE_CANN #endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
return std::make_shared<ngraph::op::Sigmoid>(node); return std::make_shared<ngraph::op::Sigmoid>(node);
} }
@ -1237,7 +1237,7 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
#endif // HAVE_CANN #endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
return std::make_shared<ngraph::op::Elu>(node, alpha); return std::make_shared<ngraph::op::Elu>(node, alpha);
} }
@ -1307,7 +1307,7 @@ struct AbsValFunctor : public BaseDefaultFunctor<AbsValFunctor>
#endif // HAVE_CANN #endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
float coeff = -0.999999f; float coeff = -0.999999f;
// float coeff = preferableTarget == DNN_TARGET_MYRIAD ? -0.999f : -0.999999f; // float coeff = preferableTarget == DNN_TARGET_MYRIAD ? -0.999f : -0.999999f;
@ -1603,7 +1603,7 @@ struct SqrtFunctor : public BaseDefaultFunctor<SqrtFunctor>
#endif // HAVE_HALIDE #endif // HAVE_HALIDE
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
return std::make_shared<ngraph::op::v0::Sqrt>(node); return std::make_shared<ngraph::op::v0::Sqrt>(node);
} }
@ -2329,7 +2329,7 @@ struct PowerFunctor : public BaseFunctor
#endif // HAVE_CANN #endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape{1}, &scale); ngraph::Shape{1}, &scale);
@ -2439,7 +2439,7 @@ struct ExpFunctor : public BaseDefaultFunctor<ExpFunctor>
#endif // HAVE_HALIDE #endif // HAVE_HALIDE
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape{1}, &normScale); ngraph::Shape{1}, &normScale);
@ -2598,7 +2598,7 @@ struct ChannelsPReLUFunctor : public BaseFunctor
#endif // HAVE_CANN #endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
const size_t numChannels = scale.total(); const size_t numChannels = scale.total();
auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{numChannels}, scale.data); auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{numChannels}, scale.data);
@ -2678,7 +2678,7 @@ struct PReLUFunctor : public ChannelsPReLUFunctor
} }
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node) std::shared_ptr<ngraph::Node> initNgraphAPI(const ngraph::Output<ngraph::Node>& node)
{ {
auto shape = getShape<size_t>(scale); auto shape = getShape<size_t>(scale);
auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, shape, scale.ptr<float>()); auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, shape, scale.ptr<float>());

@ -896,12 +896,14 @@ public:
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
CV_Assert(nodes.size() >= 2);
auto curr_node = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto curr_node = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
if (!coeffs.empty()) { if (!coeffs.empty()) {
auto coeff = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &coeffs[0]); auto coeff = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &coeffs[0]);
curr_node = std::make_shared<ngraph::op::v1::Multiply>(curr_node, coeff, ngraph::op::AutoBroadcastType::NUMPY); curr_node = std::make_shared<ngraph::op::v1::Multiply>(curr_node, coeff, ngraph::op::AutoBroadcastType::NUMPY);
} }
std::shared_ptr<ngraph::Node> res;
for (size_t i = 1; i < nodes.size(); i++) for (size_t i = 1; i < nodes.size(); i++)
{ {
auto next_node = nodes[i].dynamicCast<InfEngineNgraphNode>()->node; auto next_node = nodes[i].dynamicCast<InfEngineNgraphNode>()->node;
@ -910,15 +912,16 @@ public:
next_node = std::make_shared<ngraph::op::v1::Multiply>(next_node, coeff, ngraph::op::AutoBroadcastType::NUMPY); next_node = std::make_shared<ngraph::op::v1::Multiply>(next_node, coeff, ngraph::op::AutoBroadcastType::NUMPY);
} }
switch (op) { switch (op) {
case SUM: curr_node = std::make_shared<ngraph::op::v1::Add>(curr_node, next_node); break; case SUM: res = std::make_shared<ngraph::op::v1::Add>(curr_node, next_node); break;
case PROD: curr_node = std::make_shared<ngraph::op::v1::Multiply>(curr_node, next_node); break; case PROD: res = std::make_shared<ngraph::op::v1::Multiply>(curr_node, next_node); break;
case DIV: curr_node = std::make_shared<ngraph::op::v1::Divide>(curr_node, next_node); break; case DIV: res = std::make_shared<ngraph::op::v1::Divide>(curr_node, next_node); break;
case MAX: curr_node = std::make_shared<ngraph::op::v1::Maximum>(curr_node, next_node); break; case MAX: res = std::make_shared<ngraph::op::v1::Maximum>(curr_node, next_node); break;
case MIN: curr_node = std::make_shared<ngraph::op::v1::Minimum>(curr_node, next_node); break; case MIN: res = std::make_shared<ngraph::op::v1::Minimum>(curr_node, next_node); break;
default: CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation"); default: CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
} }
curr_node = res;
} }
return Ptr<BackendNode>(new InfEngineNgraphNode(curr_node)); return Ptr<BackendNode>(new InfEngineNgraphNode(res));
} }
#endif // HAVE_DNN_NGRAPH #endif // HAVE_DNN_NGRAPH

@ -209,7 +209,7 @@ public:
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> dims = ieInpNode->get_shape(); std::vector<size_t> dims = ieInpNode.get_shape();
int numAxes = dims.size(); int numAxes = dims.size();
int startAxis = normalize_axis(_startAxis, numAxes); int startAxis = normalize_axis(_startAxis, numAxes);

@ -803,7 +803,7 @@ public:
} }
else else
{ {
std::vector<int> shape(1 + normalize_axis(axis, ieInpNode->get_shape().size()), 0); std::vector<int> shape(1 + normalize_axis(axis, ieInpNode.get_shape().size()), 0);
shape[shape.size() - 1] = -1; shape[shape.size() - 1] = -1;
auto inp = std::make_shared<ngraph::op::v1::Reshape>( auto inp = std::make_shared<ngraph::op::v1::Reshape>(
ieInpNode, ieInpNode,

@ -480,7 +480,7 @@ public:
if (type != SPATIAL_NRM) { if (type != SPATIAL_NRM) {
axes = {1}; axes = {1};
} else { } else {
axes.resize(ieInpNode->get_shape().size() - 2); axes.resize(ieInpNode.get_shape().size() - 2);
std::iota(axes.begin(), axes.end(), 2); std::iota(axes.begin(), axes.end(), 2);
} }
auto ngraph_axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes.size()}, axes.data()); auto ngraph_axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes.size()}, axes.data());

@ -194,7 +194,7 @@ public:
std::vector<MatShape> inpShapes(nodes.size()); std::vector<MatShape> inpShapes(nodes.size());
std::vector<MatShape> outShapes, internals; std::vector<MatShape> outShapes, internals;
for (int i = 0; i < nodes.size(); ++i) { for (int i = 0; i < nodes.size(); ++i) {
std::vector<size_t> shape = nodes[i].dynamicCast<InfEngineNgraphNode>()->node->get_shape(); std::vector<size_t> shape = nodes[i].dynamicCast<InfEngineNgraphNode>()->node.get_shape();
inpShapes[i] = std::vector<int>(shape.begin(), shape.end()); inpShapes[i] = std::vector<int>(shape.begin(), shape.end());
} }
getMemoryShapes(inpShapes, 1, outShapes, internals); getMemoryShapes(inpShapes, 1, outShapes, internals);
@ -213,7 +213,7 @@ public:
std::make_shared<ngraph::op::Constant>(ngraph::element::i32, ngraph::Shape{1}, &newShape), std::make_shared<ngraph::op::Constant>(ngraph::element::i32, ngraph::Shape{1}, &newShape),
true true
); );
if (indices->get_element_type() != ngraph::element::i32 && indices->get_element_type() != ngraph::element::i64) { if (indices.get_element_type() != ngraph::element::i32 && indices.get_element_type() != ngraph::element::i64) {
indices = std::make_shared<ngraph::op::Convert>(indices, ngraph::element::i64); indices = std::make_shared<ngraph::op::Convert>(indices, ngraph::element::i64);
} }

@ -390,7 +390,7 @@ public:
auto mvn = std::make_shared<ngraph::op::MVN>(ieInpNode, acrossChannels, normVariance, eps); auto mvn = std::make_shared<ngraph::op::MVN>(ieInpNode, acrossChannels, normVariance, eps);
#else #else
int64_t start_axis = acrossChannels ? 1 : 2; int64_t start_axis = acrossChannels ? 1 : 2;
std::vector<int64_t> axes_v(ieInpNode->get_shape().size() - start_axis); std::vector<int64_t> axes_v(ieInpNode.get_shape().size() - start_axis);
std::iota(axes_v.begin(), axes_v.end(), start_axis); std::iota(axes_v.begin(), axes_v.end(), start_axis);
auto axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes_v.size()}, axes_v.data()); auto axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes_v.size()}, axes_v.data());
auto mvn = std::make_shared<ngraph::op::v6::MVN>(ieInpNode, axes, normVariance, eps, ngraph::op::MVNEpsMode::INSIDE_SQRT); auto mvn = std::make_shared<ngraph::op::v6::MVN>(ieInpNode, axes, normVariance, eps, ngraph::op::MVNEpsMode::INSIDE_SQRT);

@ -900,12 +900,12 @@ public:
auto& inp0 = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto& inp0 = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto& inp1 = nodes[1].dynamicCast<InfEngineNgraphNode>()->node; auto& inp1 = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
if (inp0->get_element_type() != inp1->get_element_type()) { if (inp0.get_element_type() != inp1.get_element_type()) {
auto dtype = preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD ? auto dtype = preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD ?
ngraph::element::f16 : ngraph::element::f32; ngraph::element::f16 : ngraph::element::f32;
if (inp0->get_element_type() != dtype) if (inp0.get_element_type() != dtype)
inp0 = std::make_shared<ngraph::op::v0::Convert>(inp0, dtype); inp0 = std::make_shared<ngraph::op::v0::Convert>(inp0, dtype);
if (inp1->get_element_type() != dtype) if (inp1.get_element_type() != dtype)
inp1 = std::make_shared<ngraph::op::v0::Convert>(inp1, dtype); inp1 = std::make_shared<ngraph::op::v0::Convert>(inp1, dtype);
} }

@ -273,21 +273,21 @@ public:
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
const size_t batch = ieInpNode->get_shape()[0]; const size_t batch = ieInpNode.get_shape()[0];
const size_t numChannels = ieInpNode->get_shape()[1]; const size_t numChannels = ieInpNode.get_shape()[1];
std::vector<int64_t> axes_data; std::vector<int64_t> axes_data;
if (!acrossSpatial) { if (!acrossSpatial) {
axes_data.push_back(1); axes_data.push_back(1);
} else { } else {
axes_data.resize(ieInpNode->get_shape().size() - 1); axes_data.resize(ieInpNode.get_shape().size() - 1);
std::iota(axes_data.begin(), axes_data.end(), 1); std::iota(axes_data.begin(), axes_data.end(), 1);
} }
auto axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes_data.size()}, axes_data); auto axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes_data.size()}, axes_data);
auto norm = std::make_shared<ngraph::op::v0::NormalizeL2>(ieInpNode, axes, epsilon, ngraph::op::EpsMode::ADD); auto norm = std::make_shared<ngraph::op::v0::NormalizeL2>(ieInpNode, axes, epsilon, ngraph::op::EpsMode::ADD);
CV_Assert(blobs.empty() || numChannels == blobs[0].total()); CV_Assert(blobs.empty() || numChannels == blobs[0].total());
std::vector<size_t> shape(ieInpNode->get_shape().size(), 1); std::vector<size_t> shape(ieInpNode.get_shape().size(), 1);
shape[0] = blobs.empty() ? 1 : batch; shape[0] = blobs.empty() ? 1 : batch;
shape[1] = numChannels; shape[1] = numChannels;
if (!blobs.empty()) if (!blobs.empty())

@ -601,7 +601,7 @@ public:
return Ptr<BackendNode>(new InfEngineNgraphNode(ave_pool)); return Ptr<BackendNode>(new InfEngineNgraphNode(ave_pool));
} }
else if (type == SUM) { else if (type == SUM) {
ngraph::Shape inpShape = ieInpNode->get_shape(); ngraph::Shape inpShape = ieInpNode.get_shape();
CV_Assert(inpShape.size() == 2 + kernel_size.size()); CV_Assert(inpShape.size() == 2 + kernel_size.size());
std::vector<int64_t> axes; std::vector<int64_t> axes;
for (size_t i = 0; i < kernel_size.size(); i++) for (size_t i = 0; i < kernel_size.size(); i++)

@ -366,10 +366,10 @@ public:
auto& class_logits = nodes[1].dynamicCast<InfEngineNgraphNode>()->node; auto& class_logits = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
auto& image_shape = nodes[2].dynamicCast<InfEngineNgraphNode>()->node; auto& image_shape = nodes[2].dynamicCast<InfEngineNgraphNode>()->node;
CV_Assert_N(image_shape->get_shape().size() == 2, image_shape->get_shape().front() == 1); CV_Assert_N(image_shape.get_shape().size() == 2, image_shape.get_shape().front() == 1);
auto shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, auto shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{1}, ngraph::Shape{1},
std::vector<int64_t>{(int64_t)image_shape->get_shape().back()}); std::vector<int64_t>{(int64_t)image_shape.get_shape().back()});
auto reshape = std::make_shared<ngraph::op::v1::Reshape>(image_shape, shape, true); auto reshape = std::make_shared<ngraph::op::v1::Reshape>(image_shape, shape, true);
auto proposal = std::make_shared<ngraph::op::Proposal>(class_probs, class_logits, reshape, attr); auto proposal = std::make_shared<ngraph::op::Proposal>(class_probs, class_logits, reshape, attr);

@ -466,7 +466,7 @@ public:
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
auto& input = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto& input = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto parent_shape = input->get_shape(); auto parent_shape = input.get_shape();
int64_t b = parent_shape[0]; int64_t b = parent_shape[0];
int64_t h = parent_shape[1]; int64_t h = parent_shape[1];
int64_t w = parent_shape[2]; int64_t w = parent_shape[2];
@ -567,7 +567,7 @@ public:
int hNorm, wNorm; int hNorm, wNorm;
if (nodes.size() > 1) if (nodes.size() > 1)
{ {
auto node_1_shape = nodes[1].dynamicCast<InfEngineNgraphNode>()->node->get_shape(); auto node_1_shape = nodes[1].dynamicCast<InfEngineNgraphNode>()->node.get_shape();
hNorm = node_1_shape[2]; hNorm = node_1_shape[2];
wNorm = node_1_shape[3]; wNorm = node_1_shape[3];
} }

@ -443,7 +443,7 @@ public:
std::vector<int64_t> shape = {outHeight, outWidth}; std::vector<int64_t> shape = {outHeight, outWidth};
auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data()); auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data());
auto& input_shape = ieInpNode->get_shape(); auto& input_shape = ieInpNode.get_shape();
CV_Assert_N(input_shape[2] != 0, input_shape[3] != 0); CV_Assert_N(input_shape[2] != 0, input_shape[3] != 0);
std::vector<float> scales = {static_cast<float>(outHeight) / input_shape[2], static_cast<float>(outWidth) / input_shape[3]}; std::vector<float> scales = {static_cast<float>(outHeight) / input_shape[2], static_cast<float>(outWidth) / input_shape[3]};
auto scales_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{2}, scales.data()); auto scales_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{2}, scales.data());

@ -331,34 +331,36 @@ public:
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
auto ieInpNode0 = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto ieInpNode0 = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto ieInpNode1 = nodes.size() > 1 ? nodes[1].dynamicCast<InfEngineNgraphNode>()->node : nullptr; ngraph::Output<ngraph::Node> ieInpNode1;
if (nodes.size() > 1)
ieInpNode1 = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
size_t numChannels = 1; size_t numChannels = 1;
if (blobs.empty()) if (blobs.empty())
for (const size_t& dim : ieInpNode1->get_shape()) for (const size_t& dim : ieInpNode1.get_shape())
numChannels *= dim; numChannels *= dim;
else else
numChannels = blobs[0].total(); numChannels = blobs[0].total();
std::vector<size_t> shape(ieInpNode0->get_shape().size(), 1); std::vector<size_t> shape(ieInpNode0.get_shape().size(), 1);
int cAxis = normalize_axis(axis, shape.size()); int cAxis = normalize_axis(axis, shape.size());
shape[cAxis] = numChannels; shape[cAxis] = numChannels;
auto node = ieInpNode0; std::shared_ptr<ngraph::Node> node;
if (hasWeights) if (hasWeights)
{ {
auto weight = blobs.empty() ? ieInpNode1 : ngraph::Output<ngraph::Node> weight = blobs.empty() ? ieInpNode1 :
std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), blobs[0].data); std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), blobs[0].data);
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2021_2) #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2021_2)
node = std::make_shared<ngraph::op::v1::Multiply>(node, weight, ngraph::op::AutoBroadcastType::NUMPY); node = std::make_shared<ngraph::op::v1::Multiply>(ieInpNode0, weight, ngraph::op::AutoBroadcastType::NUMPY);
#else #else
node = std::make_shared<ngraph::op::v0::Multiply>(node, weight, ngraph::op::AutoBroadcastType::NUMPY); node = std::make_shared<ngraph::op::v0::Multiply>(ieInpNode0, weight, ngraph::op::AutoBroadcastType::NUMPY);
#endif #endif
} }
if (hasBias || !hasWeights) if (hasBias || !hasWeights)
{ {
std::shared_ptr<ngraph::Node> bias; ngraph::Output<ngraph::Node> bias;
if (hasBias) if (hasBias)
{ {
bias = blobs.empty() ? ieInpNode1 : bias = blobs.empty() ? ieInpNode1 :

@ -759,7 +759,7 @@ public:
{ {
CV_Assert_N(nodes.size() <= 2); CV_Assert_N(nodes.size() <= 2);
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
CV_Assert(finalSliceRanges[0].size() == ieInpNode->get_shape().size()); CV_Assert(finalSliceRanges[0].size() == ieInpNode.get_shape().size());
std::vector<int64_t> offsets, dims; std::vector<int64_t> offsets, dims;
for (int i = 0; i < finalSliceRanges[0].size(); ++i) for (int i = 0; i < finalSliceRanges[0].size(); ++i)

@ -385,7 +385,7 @@ public:
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
int axis = normalize_axis(axisRaw, ieInpNode->get_shape().size()); int axis = normalize_axis(axisRaw, ieInpNode.get_shape().size());
auto softmax = std::make_shared<ngraph::op::v1::Softmax>(ieInpNode, axis); auto softmax = std::make_shared<ngraph::op::v1::Softmax>(ieInpNode, axis);
if (logSoftMax) if (logSoftMax)
return Ptr<BackendNode>(new InfEngineNgraphNode(std::make_shared<ngraph::op::v0::Log>(softmax))); return Ptr<BackendNode>(new InfEngineNgraphNode(std::make_shared<ngraph::op::v0::Log>(softmax)));

@ -321,8 +321,10 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
return; return;
} }
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1)
bool supportsCPUFallback = !isArmComputePlugin() && (preferableTarget == DNN_TARGET_CPU || bool supportsCPUFallback = !isArmComputePlugin() && (preferableTarget == DNN_TARGET_CPU ||
openvino::checkTarget(DNN_TARGET_CPU)); openvino::checkTarget(DNN_TARGET_CPU));
#endif
// Build Inference Engine networks from sets of layers that support this // Build Inference Engine networks from sets of layers that support this
// backend. Split a whole model on several Inference Engine networks if // backend. Split a whole model on several Inference Engine networks if
@ -341,6 +343,10 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
bool fused = ld.skip; bool fused = ld.skip;
Ptr<Layer> layer = ld.layerInstance; Ptr<Layer> layer = ld.layerInstance;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
if (ld.id == 0)
continue;
#else
if (!fused && !layer->supportBackend(preferableBackend)) if (!fused && !layer->supportBackend(preferableBackend))
{ {
CV_LOG_DEBUG(NULL, "DNN/IE: NOT supported!"); CV_LOG_DEBUG(NULL, "DNN/IE: NOT supported!");
@ -355,17 +361,6 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
} }
} }
// TODO: fix these workarounds
if (preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_HDDL ||
preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16)
customizable &= ld.type != "Concat";
if (preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16)
customizable &= ld.type != "Power";
if (preferableTarget == DNN_TARGET_OPENCL) if (preferableTarget == DNN_TARGET_OPENCL)
customizable &= ld.type != "Eltwise"; customizable &= ld.type != "Eltwise";
@ -390,6 +385,7 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
continue; continue;
} }
} }
#endif
ld.skip = true; // Initially skip all Inference Engine supported layers. ld.skip = true; // Initially skip all Inference Engine supported layers.
// Create a new network if one of inputs from different Inference Engine graph. // Create a new network if one of inputs from different Inference Engine graph.
@ -478,7 +474,7 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
int oid = ld.inputBlobsId[i].oid; int oid = ld.inputBlobsId[i].oid;
auto ieInpNode = inputNodes[i].dynamicCast<InfEngineNgraphNode>(); auto ieInpNode = inputNodes[i].dynamicCast<InfEngineNgraphNode>();
const auto& ngraph_input_node = ieInpNode->node; const auto& ngraph_input_node = ieInpNode->node.get_node_shared_ptr();
CV_LOG_DEBUG(NULL, "DNN/IE: bind output port " << lid << ":" << oid << " (" << ngraph_input_node->get_friendly_name() << ":" << ngraph_input_node->get_type_info().name << ")"); CV_LOG_DEBUG(NULL, "DNN/IE: bind output port " << lid << ":" << oid << " (" << ngraph_input_node->get_friendly_name() << ":" << ngraph_input_node->get_type_info().name << ")");
if ((oid == 0 && ngraph_input_node->get_output_size() == 1) || lid == 0) if ((oid == 0 && ngraph_input_node->get_output_size() == 1) || lid == 0)
@ -498,10 +494,7 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
} }
CV_CheckLT((size_t)oid, ngraph_input_node->get_output_size(), ""); CV_CheckLT((size_t)oid, ngraph_input_node->get_output_size(), "");
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4) #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
// FIXIT refactor ".initNgraph()" API to use Output<Node> inputNodes[i] = new InfEngineNgraphNode(ngraph_input_node->output(oid));
// WA: use Concat to emulate Identity operation with requested output port
auto oid_node = std::make_shared<ngraph::op::Concat>(ngraph::OutputVector { ngraph_input_node->output(oid) }, 0);
inputNodes[i] = Ptr<BackendNode>(new InfEngineNgraphNode(oid_node));
#elif INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_3) #elif INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_3)
inputNodes[i] = Ptr<BackendNode>(new InfEngineNgraphNode(ieInpNode->node->get_output_as_single_output_node(oid))); inputNodes[i] = Ptr<BackendNode>(new InfEngineNgraphNode(ieInpNode->node->get_output_as_single_output_node(oid)));
#else #else
@ -556,6 +549,36 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
addNgraphOutputs(ld); addNgraphOutputs(ld);
} }
// User may choose to return only intermediate blobs but not network's result (see Test_TFLite.max_unpooling)
// Such layers should not be skipped when forwardLayer is called.
// Also, perform a sanity check that there is no double inferred networks (a single skip=false per unique net instance)
std::set<Ptr<InfEngineNgraphNet>> uniqueNets;
if (!blobsToKeep_.empty())
{
LayerPin latestLayerPin = getLatestLayerPin(blobsToKeep_);
for (MapIdToLayerData::iterator it = layers.begin(); it != layers.end(); ++it)
{
LayerData& ld = it->second;
auto iter = ld.backendNodes.find(preferableBackend);
if (iter == ld.backendNodes.end())
continue;
Ptr<BackendNode>& node = iter->second;
if (node.empty())
continue;
Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
if (ieNode.empty())
continue;
if (ld.id == latestLayerPin.lid) {
ld.skip = false;
uniqueNets.insert(ieNode->net);
break;
}
}
}
// Initialize all networks. // Initialize all networks.
for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it) for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it)
{ {
@ -578,9 +601,15 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
{ {
ieNode->net->addOutput(ieNode); ieNode->net->addOutput(ieNode);
ieNode->net->createNet((Target)preferableTarget); ieNode->net->createNet((Target)preferableTarget);
ld.skip = false; if (uniqueNets.find(ieNode->net) == uniqueNets.end()) {
ld.skip = false;
uniqueNets.insert(ieNode->net);
}
} }
} }
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
CV_Assert(uniqueNets.size() == 1);
#endif
} }

@ -425,6 +425,13 @@ TEST_P(FullyConnected, Accuracy)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
} }
#endif #endif
// https://github.com/openvinotoolkit/openvino/issues/19436
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL_FP16 && batch == 16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2023000000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL && batch == 16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
#endif
Mat weights(outChannels, inChannels * inSize.height * inSize.width, CV_32F); Mat weights(outChannels, inChannels * inSize.height * inSize.width, CV_32F);
randu(weights, -1.0f, 1.0f); randu(weights, -1.0f, 1.0f);
@ -454,11 +461,13 @@ TEST_P(FullyConnected, Accuracy)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL_FP16) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL_FP16)
{ {
l1 = 0.01; l1 = 0.01;
if (INF_ENGINE_VER_MAJOR_GE(2023000000))
lInf = 0.016;
} }
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL)
{ {
l1 = 5e-3; l1 = 5e-3;
lInf = 7e-3; lInf = INF_ENGINE_VER_MAJOR_GE(2023000000) ? 0.016 : 7e-3;
} }
#endif #endif
if (targetId == DNN_TARGET_CUDA_FP16) if (targetId == DNN_TARGET_CUDA_FP16)

@ -157,14 +157,7 @@ TEST_P(Test_TFLite, max_unpooling)
net.setInput(input); net.setInput(input);
std::vector<std::vector<Mat> > outs; std::vector<std::vector<Mat> > outs;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { net.forward(outs, {"p_re_lu_1", "max_pooling_with_argmax2d", "conv2d_86", "max_unpooling2d_2"});
// TODO: seems like a bug with a retrieving intermediate tensors
net.forward(outs, {"conv2d_transpose_4", "p_re_lu_1", "max_pooling_with_argmax2d", "conv2d_86", "max_unpooling2d_2"});
outs.erase(outs.begin());
}
else {
net.forward(outs, {"p_re_lu_1", "max_pooling_with_argmax2d", "conv2d_86", "max_unpooling2d_2"});
}
ASSERT_EQ(outs.size(), 4); ASSERT_EQ(outs.size(), 4);
ASSERT_EQ(outs[0].size(), 1); ASSERT_EQ(outs[0].size(), 1);

Loading…
Cancel
Save