diff --git a/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp b/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp index cda2f9bfad..4a7e9e0786 100644 --- a/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp +++ b/modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp @@ -49,6 +49,8 @@ CV_EXPORTS_W void resetMyriadDevice(); #define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2 "Myriad2" /// Intel(R) Neural Compute Stick 2, NCS2 (USB 03e7:2485), MyriadX (https://software.intel.com/ru-ru/neural-compute-stick) #define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X "MyriadX" +#define CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE "ARM_COMPUTE" +#define CV_DNN_INFERENCE_ENGINE_CPU_TYPE_X86 "X86" /** @brief Returns Inference Engine VPU type. @@ -57,6 +59,11 @@ CV_EXPORTS_W void resetMyriadDevice(); */ CV_EXPORTS_W cv::String getInferenceEngineVPUType(); +/** @brief Returns Inference Engine CPU type. + * + * Specify OpenVINO plugin: CPU or ARM. + */ +CV_EXPORTS_W cv::String getInferenceEngineCPUType(); CV__DNN_EXPERIMENTAL_NS_END }} // namespace diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 34222b9547..45be6eb97c 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -1286,17 +1286,19 @@ struct Net::Impl : public detail::NetImplBase CV_Assert(preferableBackend != DNN_BACKEND_HALIDE || preferableTarget == DNN_TARGET_CPU || preferableTarget == DNN_TARGET_OPENCL); +#ifdef HAVE_INF_ENGINE if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { CV_Assert( - preferableTarget == DNN_TARGET_CPU || + (preferableTarget == DNN_TARGET_CPU && (!isArmComputePlugin() || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) || preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_FPGA ); } +#endif if (!netWasAllocated || this->blobsToKeep != blobsToKeep_) { if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget)) @@ -1972,8 +1974,8 @@ struct Net::Impl : public detail::NetImplBase return; } - bool supportsCPUFallback = preferableTarget == DNN_TARGET_CPU || - BackendRegistry::checkIETarget(DNN_TARGET_CPU); + bool supportsCPUFallback = !isArmComputePlugin() && (preferableTarget == DNN_TARGET_CPU || + BackendRegistry::checkIETarget(DNN_TARGET_CPU)); // Build Inference Engine networks from sets of layers that support this // backend. Split a whole model on several Inference Engine networks if diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index 2624d3c53e..27c3db6c44 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -382,7 +382,11 @@ public: shape[1] = weights_.total(); auto weight = std::make_shared(ngraph::element::f32, ngraph::Shape(shape), weights_.data); auto bias = std::make_shared(ngraph::element::f32, ngraph::Shape(shape), bias_.data); +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2021_2) + auto scale_node = std::make_shared(ieInpNode, weight, ngraph::op::AutoBroadcastType::NUMPY); +#else auto scale_node = std::make_shared(ieInpNode, weight, ngraph::op::AutoBroadcastType::NUMPY); +#endif auto scale_shift = std::make_shared(scale_node, bias, ngraph::op::AutoBroadcastType::NUMPY); return Ptr(new InfEngineNgraphNode(scale_shift)); } diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index ef1dc8f29a..eeb9f73f5d 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -273,10 +273,13 @@ public: #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { - if (ksize == 1) + bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin(); + if (isArmTarget && blobs.empty()) return false; + if (ksize == 1) + return isArmTarget; if (ksize == 3) - return preferableTarget == DNN_TARGET_CPU; + return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget; if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty()) return false; return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height); @@ -578,7 +581,7 @@ public: CV_Assert_N(inputs.size() >= 1, nodes.size() >= 1); auto& ieInpNode = nodes[0].dynamicCast()->node; std::vector dims = ieInpNode->get_shape(); - CV_Assert(dims.size() == 4 || dims.size() == 5); + CV_Check(dims.size(), dims.size() >= 3 && dims.size() <= 5, ""); std::shared_ptr ieWeights = nodes.size() > 1 ? nodes[1].dynamicCast()->node : nullptr; if (nodes.size() > 1) CV_Assert(ieWeights); // dynamic_cast should not fail @@ -616,7 +619,7 @@ public: else { auto shape = std::make_shared(ngraph::element::i64, - ngraph::Shape{kernel_shape.size()}, kernel_shape.data()); + ngraph::Shape{kernel_shape.size()}, std::vector(kernel_shape.begin(), kernel_shape.end())); ieWeights = std::make_shared(ieWeights, shape, true); } @@ -651,7 +654,7 @@ public: if (nodes.size() == 3) { auto bias_shape = std::make_shared(ngraph::element::i64, - ngraph::Shape{shape.size()}, shape.data()); + ngraph::Shape{shape.size()}, std::vector(shape.begin(), shape.end())); bias = std::make_shared(nodes[2].dynamicCast()->node, bias_shape, true); } else diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index d47e08886c..e6cf714bff 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -1164,11 +1164,15 @@ struct PowerFunctor : public BaseFunctor ngraph::Shape{1}, &scale); auto shift_node = std::make_shared(ngraph::element::f32, ngraph::Shape{1}, &shift); - auto power_node = std::make_shared(ngraph::element::f32, - ngraph::Shape{1}, &power); auto mul = std::make_shared(scale_node, node, ngraph::op::AutoBroadcastType::NUMPY); auto scale_shift = std::make_shared(mul, shift_node, ngraph::op::AutoBroadcastType::NUMPY); + + if (power == 1) + return scale_shift; + + auto power_node = std::make_shared(ngraph::element::f32, + ngraph::Shape{1}, &power); return std::make_shared(scale_shift, power_node, ngraph::op::AutoBroadcastType::NUMPY); } #endif // HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp index 5def78f221..cdaa87bde5 100644 --- a/modules/dnn/src/layers/normalize_bbox_layer.cpp +++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp @@ -324,8 +324,8 @@ public: if (!acrossSpatial) { axes_data.push_back(1); } else { - axes_data.resize(ieInpNode->get_shape().size()); - std::iota(axes_data.begin(), axes_data.end(), 0); + axes_data.resize(ieInpNode->get_shape().size() - 1); + std::iota(axes_data.begin(), axes_data.end(), 1); } auto axes = std::make_shared(ngraph::element::i64, ngraph::Shape{axes_data.size()}, axes_data); auto norm = std::make_shared(ieInpNode, axes, epsilon, ngraph::op::EpsMode::ADD); @@ -334,19 +334,18 @@ public: std::vector shape(ieInpNode->get_shape().size(), 1); shape[0] = blobs.empty() ? 1 : batch; shape[1] = numChannels; - std::shared_ptr weight; - if (blobs.empty()) + if (!blobs.empty()) { - std::vector ones(numChannels, 1); - weight = std::make_shared(ngraph::element::f32, ngraph::Shape(shape), ones.data()); - } - else - { - weight = std::make_shared( + auto weight = std::make_shared( ngraph::element::f32, ngraph::Shape(shape), blobs[0].data); +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2021_2) + auto mul = std::make_shared(norm, weight, ngraph::op::AutoBroadcastType::NUMPY); +#else + auto mul = std::make_shared(norm, weight, ngraph::op::AutoBroadcastType::NUMPY); +#endif + return Ptr(new InfEngineNgraphNode(mul)); } - auto mul = std::make_shared(norm, weight, ngraph::op::AutoBroadcastType::NUMPY); - return Ptr(new InfEngineNgraphNode(mul)); + return Ptr(new InfEngineNgraphNode(norm)); } #endif // HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/padding_layer.cpp b/modules/dnn/src/layers/padding_layer.cpp index b6e1874be0..af3dacdd7a 100644 --- a/modules/dnn/src/layers/padding_layer.cpp +++ b/modules/dnn/src/layers/padding_layer.cpp @@ -97,9 +97,12 @@ public: { #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) - return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && - (preferableTarget != DNN_TARGET_MYRIAD || - (dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0)); + { + if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && preferableTarget == DNN_TARGET_MYRIAD) + return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0; + + return (dstRanges.size() <= 4 || !isArmComputePlugin()); + } #endif return backendId == DNN_BACKEND_OPENCV || (backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4); diff --git a/modules/dnn/src/layers/permute_layer.cpp b/modules/dnn/src/layers/permute_layer.cpp index e3129556ba..cb05e4ca52 100644 --- a/modules/dnn/src/layers/permute_layer.cpp +++ b/modules/dnn/src/layers/permute_layer.cpp @@ -105,6 +105,10 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { +#ifdef HAVE_INF_ENGINE + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && preferableTarget == DNN_TARGET_CPU) + return _order.size() <= 4 || !isArmComputePlugin(); +#endif return backendId == DNN_BACKEND_OPENCV || ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()); } diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index ac25bf4dae..e79aa367d3 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -205,7 +205,9 @@ public: #endif if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { - return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1; +#ifdef HAVE_DNN_NGRAPH + return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1 && (kernel_size.size() != 3 || !isArmComputePlugin()); +#endif } else if (backendId == DNN_BACKEND_OPENCV) { diff --git a/modules/dnn/src/layers/region_layer.cpp b/modules/dnn/src/layers/region_layer.cpp index c0ba4b2ccf..4a8cb724d6 100644 --- a/modules/dnn/src/layers/region_layer.cpp +++ b/modules/dnn/src/layers/region_layer.cpp @@ -393,8 +393,10 @@ public: std::vector mask(anchors, 1); region = std::make_shared(tr_input, coords, classes, anchors, useSoftmax, mask, 1, 3, anchors_vec); + auto tr_shape = tr_input->get_shape(); auto shape_as_inp = std::make_shared(ngraph::element::i64, - ngraph::Shape{tr_input->get_shape().size()}, tr_input->get_shape().data()); + ngraph::Shape{tr_shape.size()}, + std::vector(tr_shape.begin(), tr_shape.end())); region = std::make_shared(region, shape_as_inp, true); new_axes = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, std::vector{0, 2, 3, 1}); @@ -540,7 +542,7 @@ public: result = std::make_shared(result, tr_axes); if (b > 1) { - std::vector sizes = {(size_t)b, result->get_shape()[0] / b, result->get_shape()[1]}; + std::vector sizes{b, static_cast(result->get_shape()[0]) / b, static_cast(result->get_shape()[1])}; auto shape_node = std::make_shared(ngraph::element::i64, ngraph::Shape{sizes.size()}, sizes.data()); result = std::make_shared(result, shape_node, true); } diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index 058140235b..e8a01672ad 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -249,7 +249,11 @@ public: auto weight = blobs.empty() ? ieInpNode1 : std::make_shared(ngraph::element::f32, ngraph::Shape(shape), blobs[0].data); - node = std::make_shared(node, weight, ngraph::op::AutoBroadcastType::NUMPY); +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2021_2) + node = std::make_shared(node, weight, ngraph::op::AutoBroadcastType::NUMPY); +#else + node = std::make_shared(node, weight, ngraph::op::AutoBroadcastType::NUMPY); +#endif } if (hasBias || !hasWeights) { diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index 43fb5999d9..41783d10b5 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -651,6 +651,22 @@ InferenceEngine::Core& getCore(const std::string& id) } #endif +static bool detectArmPlugin_() +{ + InferenceEngine::Core& ie = getCore("CPU"); + const std::vector devices = ie.GetAvailableDevices(); + for (std::vector::const_iterator i = devices.begin(); i != devices.end(); ++i) + { + if (i->find("CPU") != std::string::npos) + { + const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as(); + CV_LOG_INFO(NULL, "CPU plugin: " << name); + return name.find("arm_compute::NEON") != std::string::npos; + } + } + return false; +} + #if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT) static bool detectMyriadX_() { @@ -1162,6 +1178,12 @@ bool isMyriadX() return myriadX; } +bool isArmComputePlugin() +{ + static bool armPlugin = getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE; + return armPlugin; +} + static std::string getInferenceEngineVPUType_() { static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_DNN_IE_VPU_TYPE", ""); @@ -1199,6 +1221,14 @@ cv::String getInferenceEngineVPUType() return vpu_type; } +cv::String getInferenceEngineCPUType() +{ + static cv::String cpu_type = detectArmPlugin_() ? + CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE : + CV_DNN_INFERENCE_ENGINE_CPU_TYPE_X86; + return cpu_type; +} + #else // HAVE_INF_ENGINE cv::String getInferenceEngineBackendType() @@ -1214,6 +1244,11 @@ cv::String getInferenceEngineVPUType() { CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support"); } + +cv::String getInferenceEngineCPUType() +{ + CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support"); +} #endif // HAVE_INF_ENGINE diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index f29af3e0b1..25844710b9 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -254,6 +254,8 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN bool isMyriadX(); +bool isArmComputePlugin(); + CV__DNN_EXPERIMENTAL_NS_END InferenceEngine::Core& getCore(const std::string& id); diff --git a/modules/dnn/test/test_common.hpp b/modules/dnn/test/test_common.hpp index aa7e49537b..e98fbf4f91 100644 --- a/modules/dnn/test/test_common.hpp +++ b/modules/dnn/test/test_common.hpp @@ -35,6 +35,7 @@ #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2 "dnn_skip_ie_myriad2" #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X "dnn_skip_ie_myriadx" #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X +#define CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU "dnn_skip_ie_arm_cpu" #ifdef HAVE_INF_ENGINE diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index eb63aa085a..ad28cad61a 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -144,6 +144,10 @@ TEST_P(Test_ONNX_layers, Convolution_variable_weight_bias) backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU && + getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + String basename = "conv_variable_wb"; Net net = readNetFromONNX(_tf("models/" + basename + ".onnx")); ASSERT_FALSE(net.empty()); @@ -717,6 +721,8 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + if (target == DNN_TARGET_CPU && getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); } String basename = "conv1d_variable_wb"; Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));