diff --git a/modules/dnn/src/ie_ngraph.cpp b/modules/dnn/src/ie_ngraph.cpp index 140d4b0d2f..f9341febb5 100644 --- a/modules/dnn/src/ie_ngraph.cpp +++ b/modules/dnn/src/ie_ngraph.cpp @@ -383,11 +383,17 @@ public: #endif // OpenVINO >= 2022.1 -InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr&& _node) - : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {} +InfEngineNgraphNode::InfEngineNgraphNode(ngraph::Output&& _node) + : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) { + CV_Assert(node.get_node()); + CV_Assert(node.get_node_shared_ptr()); +} -InfEngineNgraphNode::InfEngineNgraphNode(const std::shared_ptr& _node) - : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(_node) {} +InfEngineNgraphNode::InfEngineNgraphNode(const ngraph::Output& _node) + : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(_node) { + CV_Assert(node.get_node()); + CV_Assert(node.get_node_shared_ptr()); +} InfEngineNgraphNode::InfEngineNgraphNode(const std::vector >& nodes, Ptr& cvLayer_, std::vector& inputs, @@ -420,7 +426,7 @@ InfEngineNgraphNode::InfEngineNgraphNode(const std::vector >& n } void InfEngineNgraphNode::setName(const std::string& name) { - node->set_friendly_name(name); + node.get_node()->set_friendly_name(name); } InfEngineNgraphNet::InfEngineNgraphNet(detail::NetImplBase& netImpl) @@ -441,8 +447,7 @@ InfEngineNgraphNet::InfEngineNgraphNet(detail::NetImplBase& netImpl, InferenceEn void InfEngineNgraphNet::addOutput(const Ptr& node) { CV_Assert(node); - CV_Assert(node->node); - const std::string& name = node->node->get_friendly_name(); + const std::string& name = node->node.get_node()->get_friendly_name(); requestedOutputs.insert({name, node.get()}); } @@ -458,7 +463,7 @@ void InfEngineNgraphNet::createNet(Target targetId) { CV_Assert(output_node_it->second); auto out = std::make_shared(output_node_it->second->node); #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1) - out->set_friendly_name(output_node_it->first + (output_node_it->second->node->get_output_size() == 1 ? "" : ".0")); + out->set_friendly_name(output_node_it->first + (output_node_it->second->node.get_node()->get_output_size() == 1 ? "" : ".0")); #endif outs.push_back(out); } diff --git a/modules/dnn/src/ie_ngraph.hpp b/modules/dnn/src/ie_ngraph.hpp index 7bb0ac09df..cc8f53ca5c 100644 --- a/modules/dnn/src/ie_ngraph.hpp +++ b/modules/dnn/src/ie_ngraph.hpp @@ -93,13 +93,13 @@ public: std::vector& inputs, std::vector& outputs, std::vector& internals); - InfEngineNgraphNode(std::shared_ptr&& _node); - InfEngineNgraphNode(const std::shared_ptr& _node); + InfEngineNgraphNode(ngraph::Output&& _node); + InfEngineNgraphNode(const ngraph::Output& _node); void setName(const std::string& name); // Inference Engine network object that allows to obtain the outputs of this layer. - std::shared_ptr node; + ngraph::Output node; Ptr net; Ptr cvLayer; }; diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index b90ee934ef..1d95096e60 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -457,7 +457,7 @@ public: virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE { auto ieInpNode = nodes[0].dynamicCast()->node; - std::vector shape(ieInpNode->get_shape().size(), 1); + std::vector shape(ieInpNode.get_shape().size(), 1); shape[1] = weights_.total(); auto weight = std::make_shared(ngraph::element::f32, ngraph::Shape(shape), weights_.data); auto bias = std::make_shared(ngraph::element::f32, ngraph::Shape(shape), bias_.data); diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp index 3095e2d6c9..16de23b15e 100644 --- a/modules/dnn/src/layers/blank_layer.cpp +++ b/modules/dnn/src/layers/blank_layer.cpp @@ -148,7 +148,7 @@ public: virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE { - auto& ieInpNode = nodes[0].dynamicCast()->node; + auto ieInpNode = nodes[0].dynamicCast()->node; ngraph::OutputVector inp{ieInpNode}; auto blank = std::make_shared(inp, 0); return Ptr(new InfEngineNgraphNode(blank)); diff --git a/modules/dnn/src/layers/concat_layer.cpp b/modules/dnn/src/layers/concat_layer.cpp index 6bd3dcdea5..a5af16f32e 100644 --- a/modules/dnn/src/layers/concat_layer.cpp +++ b/modules/dnn/src/layers/concat_layer.cpp @@ -392,7 +392,7 @@ public: virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE { - const int numDims = nodes[0].dynamicCast()->node->get_shape().size(); + const int numDims = nodes[0].dynamicCast()->node.get_shape().size(); const int cAxis = normalize_axis(axis, numDims); std::vector maxDims(numDims, 0); @@ -403,7 +403,7 @@ public: auto inp = nodes[i].dynamicCast()->node; inp_nodes.push_back(inp); - std::vector inpShape = inp->get_shape(); + std::vector inpShape = inp.get_shape(); for (int i = 0; i < numDims; ++i) maxDims[i] = std::max(maxDims[i], inpShape[i]); } diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 0488dc462d..d6e0aba1c6 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -822,13 +822,13 @@ public: CV_Assert(!blobs.empty()); CV_Assert_N(inputs.size() >= 1, nodes.size() >= 1); auto& ieInpNode = nodes[0].dynamicCast()->node; - std::vector dims = ieInpNode->get_shape(); + std::vector dims = ieInpNode.get_shape(); CV_Check(dims.size(), dims.size() >= 3 && dims.size() <= 5, ""); - std::shared_ptr ieWeights = nodes.size() > 1 ? nodes[1].dynamicCast()->node : nullptr; + ngraph::Output ieWeights; if (nodes.size() > 1) - CV_Assert(ieWeights); // dynamic_cast should not fail + ieWeights = nodes[1].dynamicCast()->node; const int inpCn = dims[1]; - const int inpGroupCn = nodes.size() > 1 ? ieWeights->get_shape()[1] : blobs[0].size[1]; + const int inpGroupCn = nodes.size() > 1 ? ieWeights.get_shape()[1] : blobs[0].size[1]; const int group = inpCn / inpGroupCn; std::vector kernel_shape; diff --git a/modules/dnn/src/layers/crop_and_resize_layer.cpp b/modules/dnn/src/layers/crop_and_resize_layer.cpp index eb8822870f..a6f58f8983 100644 --- a/modules/dnn/src/layers/crop_and_resize_layer.cpp +++ b/modules/dnn/src/layers/crop_and_resize_layer.cpp @@ -133,7 +133,7 @@ public: auto input = nodes[0].dynamicCast()->node; auto rois = nodes[1].dynamicCast()->node; - auto rois_shape = rois->get_shape(); + auto rois_shape = rois.get_shape(); std::vector dims(rois_shape.begin(), rois_shape.end()), offsets(4, 0); offsets[3] = 2; dims[3] = 7; diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 3bcd53f95c..4247511879 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -490,7 +490,7 @@ struct ReLUFunctor : public BaseFunctor #endif #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { if (slope) { auto param = std::make_shared(ngraph::element::f32, ngraph::Shape{1}, &slope); @@ -674,7 +674,7 @@ struct ReLU6Functor : public BaseFunctor #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { return std::make_shared(node, minValue, maxValue); } @@ -796,7 +796,7 @@ struct BaseDefaultFunctor : public BaseFunctor #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { CV_Error(Error::StsNotImplemented, ""); } @@ -929,7 +929,7 @@ struct TanHFunctor : public BaseDefaultFunctor #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { return std::make_shared(node); } @@ -998,7 +998,7 @@ struct SwishFunctor : public BaseDefaultFunctor #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { auto sigmoid = std::make_shared(node); return std::make_shared(node, sigmoid); @@ -1074,7 +1074,7 @@ struct MishFunctor : public BaseDefaultFunctor #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { float one = 1.0f; auto constant = std::make_shared(ngraph::element::f32, ngraph::Shape{1}, &one); @@ -1157,7 +1157,7 @@ struct SigmoidFunctor : public BaseDefaultFunctor #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { return std::make_shared(node); } @@ -1237,7 +1237,7 @@ struct ELUFunctor : public BaseDefaultFunctor #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { return std::make_shared(node, alpha); } @@ -1307,7 +1307,7 @@ struct AbsValFunctor : public BaseDefaultFunctor #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { float coeff = -0.999999f; // float coeff = preferableTarget == DNN_TARGET_MYRIAD ? -0.999f : -0.999999f; @@ -1603,7 +1603,7 @@ struct SqrtFunctor : public BaseDefaultFunctor #endif // HAVE_HALIDE #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { return std::make_shared(node); } @@ -2329,7 +2329,7 @@ struct PowerFunctor : public BaseFunctor #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { auto scale_node = std::make_shared(ngraph::element::f32, ngraph::Shape{1}, &scale); @@ -2439,7 +2439,7 @@ struct ExpFunctor : public BaseDefaultFunctor #endif // HAVE_HALIDE #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { auto scale_node = std::make_shared(ngraph::element::f32, ngraph::Shape{1}, &normScale); @@ -2598,7 +2598,7 @@ struct ChannelsPReLUFunctor : public BaseFunctor #endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { const size_t numChannels = scale.total(); auto slope = std::make_shared(ngraph::element::f32, ngraph::Shape{numChannels}, scale.data); @@ -2678,7 +2678,7 @@ struct PReLUFunctor : public ChannelsPReLUFunctor } #ifdef HAVE_DNN_NGRAPH - std::shared_ptr initNgraphAPI(const std::shared_ptr& node) + std::shared_ptr initNgraphAPI(const ngraph::Output& node) { auto shape = getShape(scale); auto slope = std::make_shared(ngraph::element::f32, shape, scale.ptr()); diff --git a/modules/dnn/src/layers/eltwise_layer.cpp b/modules/dnn/src/layers/eltwise_layer.cpp index 8ed1b799eb..49b3c02de3 100644 --- a/modules/dnn/src/layers/eltwise_layer.cpp +++ b/modules/dnn/src/layers/eltwise_layer.cpp @@ -896,12 +896,14 @@ public: virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE { + CV_Assert(nodes.size() >= 2); auto curr_node = nodes[0].dynamicCast()->node; if (!coeffs.empty()) { auto coeff = std::make_shared(ngraph::element::f32, ngraph::Shape{1}, &coeffs[0]); curr_node = std::make_shared(curr_node, coeff, ngraph::op::AutoBroadcastType::NUMPY); } + std::shared_ptr res; for (size_t i = 1; i < nodes.size(); i++) { auto next_node = nodes[i].dynamicCast()->node; @@ -910,15 +912,16 @@ public: next_node = std::make_shared(next_node, coeff, ngraph::op::AutoBroadcastType::NUMPY); } switch (op) { - case SUM: curr_node = std::make_shared(curr_node, next_node); break; - case PROD: curr_node = std::make_shared(curr_node, next_node); break; - case DIV: curr_node = std::make_shared(curr_node, next_node); break; - case MAX: curr_node = std::make_shared(curr_node, next_node); break; - case MIN: curr_node = std::make_shared(curr_node, next_node); break; + case SUM: res = std::make_shared(curr_node, next_node); break; + case PROD: res = std::make_shared(curr_node, next_node); break; + case DIV: res = std::make_shared(curr_node, next_node); break; + case MAX: res = std::make_shared(curr_node, next_node); break; + case MIN: res = std::make_shared(curr_node, next_node); break; default: CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation"); } + curr_node = res; } - return Ptr(new InfEngineNgraphNode(curr_node)); + return Ptr(new InfEngineNgraphNode(res)); } #endif // HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/flatten_layer.cpp b/modules/dnn/src/layers/flatten_layer.cpp index 6a502af7e9..9ff3bec38b 100644 --- a/modules/dnn/src/layers/flatten_layer.cpp +++ b/modules/dnn/src/layers/flatten_layer.cpp @@ -209,7 +209,7 @@ public: const std::vector >& nodes) CV_OVERRIDE { auto& ieInpNode = nodes[0].dynamicCast()->node; - std::vector dims = ieInpNode->get_shape(); + std::vector dims = ieInpNode.get_shape(); int numAxes = dims.size(); int startAxis = normalize_axis(_startAxis, numAxes); diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index 9cdb31023c..f03af7c1fb 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -803,7 +803,7 @@ public: } else { - std::vector shape(1 + normalize_axis(axis, ieInpNode->get_shape().size()), 0); + std::vector shape(1 + normalize_axis(axis, ieInpNode.get_shape().size()), 0); shape[shape.size() - 1] = -1; auto inp = std::make_shared( ieInpNode, diff --git a/modules/dnn/src/layers/lrn_layer.cpp b/modules/dnn/src/layers/lrn_layer.cpp index 61c2224e36..f8de64cb32 100644 --- a/modules/dnn/src/layers/lrn_layer.cpp +++ b/modules/dnn/src/layers/lrn_layer.cpp @@ -480,7 +480,7 @@ public: if (type != SPATIAL_NRM) { axes = {1}; } else { - axes.resize(ieInpNode->get_shape().size() - 2); + axes.resize(ieInpNode.get_shape().size() - 2); std::iota(axes.begin(), axes.end(), 2); } auto ngraph_axes = std::make_shared(ngraph::element::i64, ngraph::Shape{axes.size()}, axes.data()); diff --git a/modules/dnn/src/layers/max_unpooling_layer.cpp b/modules/dnn/src/layers/max_unpooling_layer.cpp index 6a599408e1..7ed6c64ae8 100644 --- a/modules/dnn/src/layers/max_unpooling_layer.cpp +++ b/modules/dnn/src/layers/max_unpooling_layer.cpp @@ -194,7 +194,7 @@ public: std::vector inpShapes(nodes.size()); std::vector outShapes, internals; for (int i = 0; i < nodes.size(); ++i) { - std::vector shape = nodes[i].dynamicCast()->node->get_shape(); + std::vector shape = nodes[i].dynamicCast()->node.get_shape(); inpShapes[i] = std::vector(shape.begin(), shape.end()); } getMemoryShapes(inpShapes, 1, outShapes, internals); @@ -213,7 +213,7 @@ public: std::make_shared(ngraph::element::i32, ngraph::Shape{1}, &newShape), true ); - if (indices->get_element_type() != ngraph::element::i32 && indices->get_element_type() != ngraph::element::i64) { + if (indices.get_element_type() != ngraph::element::i32 && indices.get_element_type() != ngraph::element::i64) { indices = std::make_shared(indices, ngraph::element::i64); } diff --git a/modules/dnn/src/layers/mvn_layer.cpp b/modules/dnn/src/layers/mvn_layer.cpp index dc23656b7a..aae53fa327 100644 --- a/modules/dnn/src/layers/mvn_layer.cpp +++ b/modules/dnn/src/layers/mvn_layer.cpp @@ -390,7 +390,7 @@ public: auto mvn = std::make_shared(ieInpNode, acrossChannels, normVariance, eps); #else int64_t start_axis = acrossChannels ? 1 : 2; - std::vector axes_v(ieInpNode->get_shape().size() - start_axis); + std::vector axes_v(ieInpNode.get_shape().size() - start_axis); std::iota(axes_v.begin(), axes_v.end(), start_axis); auto axes = std::make_shared(ngraph::element::i64, ngraph::Shape{axes_v.size()}, axes_v.data()); auto mvn = std::make_shared(ieInpNode, axes, normVariance, eps, ngraph::op::MVNEpsMode::INSIDE_SQRT); diff --git a/modules/dnn/src/layers/nary_eltwise_layers.cpp b/modules/dnn/src/layers/nary_eltwise_layers.cpp index fadbf58244..8572eee995 100644 --- a/modules/dnn/src/layers/nary_eltwise_layers.cpp +++ b/modules/dnn/src/layers/nary_eltwise_layers.cpp @@ -900,12 +900,12 @@ public: auto& inp0 = nodes[0].dynamicCast()->node; auto& inp1 = nodes[1].dynamicCast()->node; - if (inp0->get_element_type() != inp1->get_element_type()) { + if (inp0.get_element_type() != inp1.get_element_type()) { auto dtype = preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD ? ngraph::element::f16 : ngraph::element::f32; - if (inp0->get_element_type() != dtype) + if (inp0.get_element_type() != dtype) inp0 = std::make_shared(inp0, dtype); - if (inp1->get_element_type() != dtype) + if (inp1.get_element_type() != dtype) inp1 = std::make_shared(inp1, dtype); } diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp index f0ad6e6f61..431eeab82d 100644 --- a/modules/dnn/src/layers/normalize_bbox_layer.cpp +++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp @@ -273,21 +273,21 @@ public: const std::vector >& nodes) CV_OVERRIDE { auto& ieInpNode = nodes[0].dynamicCast()->node; - const size_t batch = ieInpNode->get_shape()[0]; - const size_t numChannels = ieInpNode->get_shape()[1]; + const size_t batch = ieInpNode.get_shape()[0]; + const size_t numChannels = ieInpNode.get_shape()[1]; std::vector axes_data; if (!acrossSpatial) { axes_data.push_back(1); } else { - axes_data.resize(ieInpNode->get_shape().size() - 1); + axes_data.resize(ieInpNode.get_shape().size() - 1); std::iota(axes_data.begin(), axes_data.end(), 1); } auto axes = std::make_shared(ngraph::element::i64, ngraph::Shape{axes_data.size()}, axes_data); auto norm = std::make_shared(ieInpNode, axes, epsilon, ngraph::op::EpsMode::ADD); CV_Assert(blobs.empty() || numChannels == blobs[0].total()); - std::vector shape(ieInpNode->get_shape().size(), 1); + std::vector shape(ieInpNode.get_shape().size(), 1); shape[0] = blobs.empty() ? 1 : batch; shape[1] = numChannels; if (!blobs.empty()) diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index 1337657127..a75382d8a5 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -601,7 +601,7 @@ public: return Ptr(new InfEngineNgraphNode(ave_pool)); } else if (type == SUM) { - ngraph::Shape inpShape = ieInpNode->get_shape(); + ngraph::Shape inpShape = ieInpNode.get_shape(); CV_Assert(inpShape.size() == 2 + kernel_size.size()); std::vector axes; for (size_t i = 0; i < kernel_size.size(); i++) diff --git a/modules/dnn/src/layers/proposal_layer.cpp b/modules/dnn/src/layers/proposal_layer.cpp index e9edcf1547..2f2a33cc6f 100644 --- a/modules/dnn/src/layers/proposal_layer.cpp +++ b/modules/dnn/src/layers/proposal_layer.cpp @@ -366,10 +366,10 @@ public: auto& class_logits = nodes[1].dynamicCast()->node; auto& image_shape = nodes[2].dynamicCast()->node; - CV_Assert_N(image_shape->get_shape().size() == 2, image_shape->get_shape().front() == 1); + CV_Assert_N(image_shape.get_shape().size() == 2, image_shape.get_shape().front() == 1); auto shape = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, - std::vector{(int64_t)image_shape->get_shape().back()}); + std::vector{(int64_t)image_shape.get_shape().back()}); auto reshape = std::make_shared(image_shape, shape, true); auto proposal = std::make_shared(class_probs, class_logits, reshape, attr); diff --git a/modules/dnn/src/layers/region_layer.cpp b/modules/dnn/src/layers/region_layer.cpp index 7ab8cdd93f..49952b4c83 100644 --- a/modules/dnn/src/layers/region_layer.cpp +++ b/modules/dnn/src/layers/region_layer.cpp @@ -466,7 +466,7 @@ public: const std::vector >& nodes) CV_OVERRIDE { auto& input = nodes[0].dynamicCast()->node; - auto parent_shape = input->get_shape(); + auto parent_shape = input.get_shape(); int64_t b = parent_shape[0]; int64_t h = parent_shape[1]; int64_t w = parent_shape[2]; @@ -567,7 +567,7 @@ public: int hNorm, wNorm; if (nodes.size() > 1) { - auto node_1_shape = nodes[1].dynamicCast()->node->get_shape(); + auto node_1_shape = nodes[1].dynamicCast()->node.get_shape(); hNorm = node_1_shape[2]; wNorm = node_1_shape[3]; } diff --git a/modules/dnn/src/layers/resize_layer.cpp b/modules/dnn/src/layers/resize_layer.cpp index 607adb8aa1..fe27748319 100644 --- a/modules/dnn/src/layers/resize_layer.cpp +++ b/modules/dnn/src/layers/resize_layer.cpp @@ -443,7 +443,7 @@ public: std::vector shape = {outHeight, outWidth}; auto out_shape = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, shape.data()); - auto& input_shape = ieInpNode->get_shape(); + auto& input_shape = ieInpNode.get_shape(); CV_Assert_N(input_shape[2] != 0, input_shape[3] != 0); std::vector scales = {static_cast(outHeight) / input_shape[2], static_cast(outWidth) / input_shape[3]}; auto scales_shape = std::make_shared(ngraph::element::f32, ngraph::Shape{2}, scales.data()); diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index 5338ab2215..2a4e1a05d5 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -331,34 +331,36 @@ public: virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE { auto ieInpNode0 = nodes[0].dynamicCast()->node; - auto ieInpNode1 = nodes.size() > 1 ? nodes[1].dynamicCast()->node : nullptr; + ngraph::Output ieInpNode1; + if (nodes.size() > 1) + ieInpNode1 = nodes[1].dynamicCast()->node; size_t numChannels = 1; if (blobs.empty()) - for (const size_t& dim : ieInpNode1->get_shape()) + for (const size_t& dim : ieInpNode1.get_shape()) numChannels *= dim; else numChannels = blobs[0].total(); - std::vector shape(ieInpNode0->get_shape().size(), 1); + std::vector shape(ieInpNode0.get_shape().size(), 1); int cAxis = normalize_axis(axis, shape.size()); shape[cAxis] = numChannels; - auto node = ieInpNode0; + std::shared_ptr node; if (hasWeights) { - auto weight = blobs.empty() ? ieInpNode1 : + ngraph::Output weight = blobs.empty() ? ieInpNode1 : std::make_shared(ngraph::element::f32, ngraph::Shape(shape), blobs[0].data); #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2021_2) - node = std::make_shared(node, weight, ngraph::op::AutoBroadcastType::NUMPY); + node = std::make_shared(ieInpNode0, weight, ngraph::op::AutoBroadcastType::NUMPY); #else - node = std::make_shared(node, weight, ngraph::op::AutoBroadcastType::NUMPY); + node = std::make_shared(ieInpNode0, weight, ngraph::op::AutoBroadcastType::NUMPY); #endif } if (hasBias || !hasWeights) { - std::shared_ptr bias; + ngraph::Output bias; if (hasBias) { bias = blobs.empty() ? ieInpNode1 : diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index d3675e23a5..c44d18182e 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -759,7 +759,7 @@ public: { CV_Assert_N(nodes.size() <= 2); auto& ieInpNode = nodes[0].dynamicCast()->node; - CV_Assert(finalSliceRanges[0].size() == ieInpNode->get_shape().size()); + CV_Assert(finalSliceRanges[0].size() == ieInpNode.get_shape().size()); std::vector offsets, dims; for (int i = 0; i < finalSliceRanges[0].size(); ++i) diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index b74f2b6791..faab6a565f 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -385,7 +385,7 @@ public: const std::vector >& nodes) CV_OVERRIDE { auto& ieInpNode = nodes[0].dynamicCast()->node; - int axis = normalize_axis(axisRaw, ieInpNode->get_shape().size()); + int axis = normalize_axis(axisRaw, ieInpNode.get_shape().size()); auto softmax = std::make_shared(ieInpNode, axis); if (logSoftMax) return Ptr(new InfEngineNgraphNode(std::make_shared(softmax))); diff --git a/modules/dnn/src/net_openvino.cpp b/modules/dnn/src/net_openvino.cpp index e974ce34a3..4d08edeaaa 100644 --- a/modules/dnn/src/net_openvino.cpp +++ b/modules/dnn/src/net_openvino.cpp @@ -321,8 +321,10 @@ void NetImplOpenVINO::initBackend(const std::vector& blobsToKeep_) return; } +#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1) bool supportsCPUFallback = !isArmComputePlugin() && (preferableTarget == DNN_TARGET_CPU || openvino::checkTarget(DNN_TARGET_CPU)); +#endif // Build Inference Engine networks from sets of layers that support this // backend. Split a whole model on several Inference Engine networks if @@ -341,6 +343,10 @@ void NetImplOpenVINO::initBackend(const std::vector& blobsToKeep_) bool fused = ld.skip; Ptr layer = ld.layerInstance; +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1) + if (ld.id == 0) + continue; +#else if (!fused && !layer->supportBackend(preferableBackend)) { CV_LOG_DEBUG(NULL, "DNN/IE: NOT supported!"); @@ -355,17 +361,6 @@ void NetImplOpenVINO::initBackend(const std::vector& blobsToKeep_) } } - // TODO: fix these workarounds - if (preferableTarget == DNN_TARGET_MYRIAD || - preferableTarget == DNN_TARGET_HDDL || - preferableTarget == DNN_TARGET_OPENCL || - preferableTarget == DNN_TARGET_OPENCL_FP16) - customizable &= ld.type != "Concat"; - - if (preferableTarget == DNN_TARGET_OPENCL || - preferableTarget == DNN_TARGET_OPENCL_FP16) - customizable &= ld.type != "Power"; - if (preferableTarget == DNN_TARGET_OPENCL) customizable &= ld.type != "Eltwise"; @@ -390,6 +385,7 @@ void NetImplOpenVINO::initBackend(const std::vector& blobsToKeep_) continue; } } +#endif ld.skip = true; // Initially skip all Inference Engine supported layers. // Create a new network if one of inputs from different Inference Engine graph. @@ -478,7 +474,7 @@ void NetImplOpenVINO::initBackend(const std::vector& blobsToKeep_) int oid = ld.inputBlobsId[i].oid; auto ieInpNode = inputNodes[i].dynamicCast(); - const auto& ngraph_input_node = ieInpNode->node; + const auto& ngraph_input_node = ieInpNode->node.get_node_shared_ptr(); CV_LOG_DEBUG(NULL, "DNN/IE: bind output port " << lid << ":" << oid << " (" << ngraph_input_node->get_friendly_name() << ":" << ngraph_input_node->get_type_info().name << ")"); if ((oid == 0 && ngraph_input_node->get_output_size() == 1) || lid == 0) @@ -498,10 +494,7 @@ void NetImplOpenVINO::initBackend(const std::vector& blobsToKeep_) } CV_CheckLT((size_t)oid, ngraph_input_node->get_output_size(), ""); #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4) - // FIXIT refactor ".initNgraph()" API to use Output - // WA: use Concat to emulate Identity operation with requested output port - auto oid_node = std::make_shared(ngraph::OutputVector { ngraph_input_node->output(oid) }, 0); - inputNodes[i] = Ptr(new InfEngineNgraphNode(oid_node)); + inputNodes[i] = new InfEngineNgraphNode(ngraph_input_node->output(oid)); #elif INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_3) inputNodes[i] = Ptr(new InfEngineNgraphNode(ieInpNode->node->get_output_as_single_output_node(oid))); #else @@ -556,6 +549,36 @@ void NetImplOpenVINO::initBackend(const std::vector& blobsToKeep_) addNgraphOutputs(ld); } + // User may choose to return only intermediate blobs but not network's result (see Test_TFLite.max_unpooling) + // Such layers should not be skipped when forwardLayer is called. + // Also, perform a sanity check that there is no double inferred networks (a single skip=false per unique net instance) + std::set> uniqueNets; + if (!blobsToKeep_.empty()) + { + LayerPin latestLayerPin = getLatestLayerPin(blobsToKeep_); + for (MapIdToLayerData::iterator it = layers.begin(); it != layers.end(); ++it) + { + LayerData& ld = it->second; + auto iter = ld.backendNodes.find(preferableBackend); + if (iter == ld.backendNodes.end()) + continue; + + Ptr& node = iter->second; + if (node.empty()) + continue; + + Ptr ieNode = node.dynamicCast(); + if (ieNode.empty()) + continue; + + if (ld.id == latestLayerPin.lid) { + ld.skip = false; + uniqueNets.insert(ieNode->net); + break; + } + } + } + // Initialize all networks. for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it) { @@ -578,9 +601,15 @@ void NetImplOpenVINO::initBackend(const std::vector& blobsToKeep_) { ieNode->net->addOutput(ieNode); ieNode->net->createNet((Target)preferableTarget); - ld.skip = false; + if (uniqueNets.find(ieNode->net) == uniqueNets.end()) { + ld.skip = false; + uniqueNets.insert(ieNode->net); + } } } +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1) + CV_Assert(uniqueNets.size() == 1); +#endif } diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index 3629f720fb..12e62c754a 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -425,6 +425,13 @@ TEST_P(FullyConnected, Accuracy) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); } #endif + // https://github.com/openvinotoolkit/openvino/issues/19436 + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL_FP16 && batch == 16) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2023000000) + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL && batch == 16) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL); +#endif Mat weights(outChannels, inChannels * inSize.height * inSize.width, CV_32F); randu(weights, -1.0f, 1.0f); @@ -454,11 +461,13 @@ TEST_P(FullyConnected, Accuracy) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL_FP16) { l1 = 0.01; + if (INF_ENGINE_VER_MAJOR_GE(2023000000)) + lInf = 0.016; } if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL) { l1 = 5e-3; - lInf = 7e-3; + lInf = INF_ENGINE_VER_MAJOR_GE(2023000000) ? 0.016 : 7e-3; } #endif if (targetId == DNN_TARGET_CUDA_FP16) diff --git a/modules/dnn/test/test_tflite_importer.cpp b/modules/dnn/test/test_tflite_importer.cpp index beb586f126..4f3a8b4a96 100644 --- a/modules/dnn/test/test_tflite_importer.cpp +++ b/modules/dnn/test/test_tflite_importer.cpp @@ -157,14 +157,7 @@ TEST_P(Test_TFLite, max_unpooling) net.setInput(input); std::vector > outs; - if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { - // TODO: seems like a bug with a retrieving intermediate tensors - net.forward(outs, {"conv2d_transpose_4", "p_re_lu_1", "max_pooling_with_argmax2d", "conv2d_86", "max_unpooling2d_2"}); - outs.erase(outs.begin()); - } - else { - net.forward(outs, {"p_re_lu_1", "max_pooling_with_argmax2d", "conv2d_86", "max_unpooling2d_2"}); - } + net.forward(outs, {"p_re_lu_1", "max_pooling_with_argmax2d", "conv2d_86", "max_unpooling2d_2"}); ASSERT_EQ(outs.size(), 4); ASSERT_EQ(outs[0].size(), 1);