From 4a39b12a78c0f54c1c61167c835961b94f58c5a5 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 28 Feb 2020 16:46:22 +0300 Subject: [PATCH 1/6] imgcodecs(jpeg): drop unnecessary code - standard huffman tables are handled by modern libjpeg-turbo --- modules/imgcodecs/src/grfmt_jpeg.cpp | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/modules/imgcodecs/src/grfmt_jpeg.cpp b/modules/imgcodecs/src/grfmt_jpeg.cpp index 040a8edbb4..ba5dab41df 100644 --- a/modules/imgcodecs/src/grfmt_jpeg.cpp +++ b/modules/imgcodecs/src/grfmt_jpeg.cpp @@ -75,6 +75,17 @@ extern "C" { #include "jpeglib.h" } +#ifndef CV_MANUAL_JPEG_STD_HUFF_TABLES + #if defined(LIBJPEG_TURBO_VERSION_NUMBER) && LIBJPEG_TURBO_VERSION_NUMBER >= 1003090 + #define CV_MANUAL_JPEG_STD_HUFF_TABLES 0 // libjpeg-turbo handles standard huffman tables itself (jstdhuff.c) + #else + #define CV_MANUAL_JPEG_STD_HUFF_TABLES 1 + #endif +#endif +#if CV_MANUAL_JPEG_STD_HUFF_TABLES == 0 + #undef CV_MANUAL_JPEG_STD_HUFF_TABLES +#endif + namespace cv { @@ -252,6 +263,7 @@ bool JpegDecoder::readHeader() return result; } +#ifdef CV_MANUAL_JPEG_STD_HUFF_TABLES /*************************************************************************** * following code is for supporting MJPEG image files * based on a message of Laurent Pinchart on the video4linux mailing list @@ -385,6 +397,7 @@ int my_jpeg_load_dht (struct jpeg_decompress_struct *info, unsigned char *dht, * end of code for supportting MJPEG image files * based on a message of Laurent Pinchart on the video4linux mailing list ***************************************************************************/ +#endif // CV_MANUAL_JPEG_STD_HUFF_TABLES bool JpegDecoder::readData( Mat& img ) { @@ -400,6 +413,7 @@ bool JpegDecoder::readData( Mat& img ) if( setjmp( jerr->setjmp_buffer ) == 0 ) { +#ifdef CV_MANUAL_JPEG_STD_HUFF_TABLES /* check if this is a mjpeg image format */ if ( cinfo->ac_huff_tbl_ptrs[0] == NULL && cinfo->ac_huff_tbl_ptrs[1] == NULL && @@ -413,6 +427,7 @@ bool JpegDecoder::readData( Mat& img ) cinfo->ac_huff_tbl_ptrs, cinfo->dc_huff_tbl_ptrs ); } +#endif if( color ) { From 124bf8339f4cd56a53593e384f572cf837143b87 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 3 Mar 2020 08:01:44 +0000 Subject: [PATCH 2/6] dnn(IE): use HAVE_DNN_IE_NN_BUILDER_2019 for NN Builder API code - CMake option: OPENCV_DNN_IE_NN_BUILDER_2019 --- modules/dnn/CMakeLists.txt | 6 + modules/dnn/src/dnn.cpp | 38 ++++- modules/dnn/src/ie_ngraph.cpp | 144 +++++++++++++++++- modules/dnn/src/layers/batch_norm_layer.cpp | 4 +- modules/dnn/src/layers/blank_layer.cpp | 4 +- modules/dnn/src/layers/concat_layer.cpp | 4 +- modules/dnn/src/layers/const_layer.cpp | 7 +- modules/dnn/src/layers/convolution_layer.cpp | 13 +- .../dnn/src/layers/detection_output_layer.cpp | 4 +- modules/dnn/src/layers/elementwise_layers.cpp | 52 ++++--- modules/dnn/src/layers/eltwise_layer.cpp | 4 +- modules/dnn/src/layers/flatten_layer.cpp | 5 +- .../dnn/src/layers/fully_connected_layer.cpp | 4 +- modules/dnn/src/layers/lrn_layer.cpp | 4 +- modules/dnn/src/layers/mvn_layer.cpp | 15 +- .../dnn/src/layers/normalize_bbox_layer.cpp | 4 +- modules/dnn/src/layers/padding_layer.cpp | 2 +- modules/dnn/src/layers/permute_layer.cpp | 4 +- modules/dnn/src/layers/pooling_layer.cpp | 32 ++-- modules/dnn/src/layers/prior_box_layer.cpp | 4 +- modules/dnn/src/layers/proposal_layer.cpp | 4 +- modules/dnn/src/layers/reorg_layer.cpp | 4 +- modules/dnn/src/layers/reshape_layer.cpp | 4 +- modules/dnn/src/layers/resize_layer.cpp | 8 +- modules/dnn/src/layers/scale_layer.cpp | 4 +- modules/dnn/src/layers/slice_layer.cpp | 17 ++- modules/dnn/src/layers/softmax_layer.cpp | 4 +- modules/dnn/src/op_inf_engine.cpp | 71 +++++---- modules/dnn/src/op_inf_engine.hpp | 16 +- modules/dnn/test/test_common.impl.hpp | 5 +- modules/dnn/test/test_misc.cpp | 4 + modules/ts/src/ts_tags.cpp | 2 + 32 files changed, 351 insertions(+), 146 deletions(-) diff --git a/modules/dnn/CMakeLists.txt b/modules/dnn/CMakeLists.txt index 4fb5e6c8e7..c7e07c5543 100644 --- a/modules/dnn/CMakeLists.txt +++ b/modules/dnn/CMakeLists.txt @@ -92,9 +92,15 @@ endif() set(dnn_runtime_libs "") if(INF_ENGINE_TARGET) + ocv_option(OPENCV_DNN_IE_NN_BUILDER_2019 "Build with Inference Engine NN Builder API support" ON) + if(OPENCV_DNN_IE_NN_BUILDER_2019) + message(STATUS "DNN: Enabling Inference Engine NN Builder API support") + add_definitions(-DHAVE_DNN_IE_NN_BUILDER_2019=1) + endif() list(APPEND dnn_runtime_libs ${INF_ENGINE_TARGET}) endif() if(HAVE_NGRAPH) + message(STATUS "DNN: Enabling Inference Engine nGraph API support") add_definitions(-DHAVE_DNN_NGRAPH) list(APPEND dnn_runtime_libs ngraph::ngraph) endif() diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index e6baa53b4f..67ca61d9bb 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -162,30 +162,40 @@ private: #ifdef HAVE_INF_ENGINE if (checkIETarget(DNN_TARGET_CPU)) { +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_CPU)); +#endif #ifdef HAVE_DNN_NGRAPH backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_CPU)); #endif } if (checkIETarget(DNN_TARGET_MYRIAD)) { +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_MYRIAD)); +#endif #ifdef HAVE_DNN_NGRAPH backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_MYRIAD)); #endif } +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 if (checkIETarget(DNN_TARGET_FPGA)) backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_FPGA)); +#endif #ifdef HAVE_OPENCL if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel()) { if (checkIETarget(DNN_TARGET_OPENCL)) { +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL)); +#endif #ifdef HAVE_DNN_NGRAPH backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL)); #endif } if (checkIETarget(DNN_TARGET_OPENCL_FP16)) { +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL_FP16)); +#endif #ifdef HAVE_DNN_NGRAPH backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL_FP16)); #endif @@ -761,7 +771,7 @@ struct DataLayer : public Layer } } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { CV_CheckEQ(inputsData.size(), (size_t)1, ""); @@ -793,7 +803,7 @@ struct DataLayer : public Layer addConstantData("biases", biases, ieLayer); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 std::vector outNames; std::vector shapes; @@ -1051,10 +1061,10 @@ static Ptr wrapMat(int backendId, int targetId, cv::Mat& m) } else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 return Ptr(new InfEngineBackendWrapper(targetId, m)); #else - CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine API support"); + CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support"); #endif } else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) @@ -1463,10 +1473,10 @@ struct Net::Impl initHalideBackend(); else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 initInfEngineBackend(blobsToKeep_); #else - CV_Assert(false && "This OpenCV version is built without Inference Engine API support"); + CV_Assert(false && "This OpenCV version is built without Inference Engine NN Builder API support"); #endif } else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) @@ -1536,7 +1546,7 @@ struct Net::Impl } } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 // Before launching Inference Engine graph we need to specify output blobs. // This function requests output blobs based on inputs references of // layers from default backend or layers from different graphs. @@ -1841,7 +1851,7 @@ struct Net::Impl } } } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH @@ -3074,8 +3084,12 @@ struct Net::Impl CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 Ptr wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast(); return std::move(wrapper->futureMat); +#else + CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support"); +#endif } else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { @@ -3167,9 +3181,13 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe else #endif { +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 Ptr backendNodeNN(new InfEngineBackendNode(InferenceEngine::Builder::Layer(""))); backendNodeNN->net = Ptr(new InfEngineBackendNet(ieNet)); backendNode = backendNodeNN; +#else + CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support"); +#endif } for (auto& it : ieNet.getOutputsInfo()) { @@ -3195,6 +3213,7 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe else #endif { +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 Ptr cvLayer(new InfEngineBackendLayer(ieNet)); InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str()); @@ -3205,6 +3224,9 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe ld.layerInstance = cvLayer; ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019] = backendNode; +#else + CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support"); +#endif } for (int i = 0; i < inputsNames.size(); ++i) diff --git a/modules/dnn/src/ie_ngraph.cpp b/modules/dnn/src/ie_ngraph.cpp index cdb0305dc9..55ba9377a0 100644 --- a/modules/dnn/src/ie_ngraph.cpp +++ b/modules/dnn/src/ie_ngraph.cpp @@ -25,8 +25,8 @@ namespace cv { namespace dnn { // For networks with input layer which has an empty name, IE generates a name id[some_number]. // OpenCV lets users use an empty input name and to prevent unexpected naming, // we can use some predefined name. -static std::string kDefaultInpLayerName = "empty_inp_layer_name"; -static constexpr const char* kOpenCVLayersType = "OpenCVLayer"; +static std::string kDefaultInpLayerName = "opencv_ngraph_empty_inp_layer_name"; +static constexpr const char* kOpenCVLayersType = "opencv_ngraph_layer"; static std::string shapesToStr(const std::vector& mats) { @@ -77,7 +77,6 @@ public: return type_info; } - NgraphCustomOp() {}; NgraphCustomOp(const ngraph::NodeVector& inputs, const std::map& params = {}): Op(inputs), params(params) @@ -85,6 +84,11 @@ public: constructor_validate_and_infer_types(); } + ~NgraphCustomOp() + { + // nothing + } + void validate_and_infer_types() override { std::vector > shapes; @@ -116,6 +120,136 @@ private: std::map params; }; + +class InfEngineNgraphCustomLayer : public InferenceEngine::ILayerExecImpl +{ +public: + explicit InfEngineNgraphCustomLayer(const InferenceEngine::CNNLayer& layer) : cnnLayer(layer) + { + std::istringstream iss(layer.GetParamAsString("impl")); + size_t ptr; + iss >> ptr; + cvLayer = (Layer*)ptr; + + std::vector > shapes; + strToShapes(layer.GetParamAsString("internals"), shapes); + internals.resize(shapes.size()); + for (int i = 0; i < shapes.size(); ++i) + internals[i].create(std::vector(shapes[i].begin(), shapes[i].end()), CV_32F); + } + + ~InfEngineNgraphCustomLayer() + { + // nothing + } + + virtual InferenceEngine::StatusCode execute(std::vector& inputs, + std::vector& outputs, + InferenceEngine::ResponseDesc *resp) noexcept + { + std::vector inpMats, outMats; + infEngineBlobsToMats(inputs, inpMats); + infEngineBlobsToMats(outputs, outMats); + + try + { + cvLayer->forward(inpMats, outMats, internals); + return InferenceEngine::StatusCode::OK; + } + catch (...) + { + return InferenceEngine::StatusCode::GENERAL_ERROR; + } + } + + virtual InferenceEngine::StatusCode + getSupportedConfigurations(std::vector& conf, + InferenceEngine::ResponseDesc* resp) noexcept + { + std::vector inDataConfig; + std::vector outDataConfig; + for (auto& it : cnnLayer.insData) + { + InferenceEngine::DataConfig conf; + conf.desc = it.lock()->getTensorDesc(); + inDataConfig.push_back(conf); + } + + for (auto& it : cnnLayer.outData) + { + InferenceEngine::DataConfig conf; + conf.desc = it->getTensorDesc(); + outDataConfig.push_back(conf); + } + + InferenceEngine::LayerConfig layerConfig; + layerConfig.inConfs = inDataConfig; + layerConfig.outConfs = outDataConfig; + + conf.push_back(layerConfig); + return InferenceEngine::StatusCode::OK; + } + + InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config, + InferenceEngine::ResponseDesc *resp) noexcept + { + return InferenceEngine::StatusCode::OK; + } + +private: + InferenceEngine::CNNLayer cnnLayer; + dnn::Layer* cvLayer; + std::vector internals; +}; + + +class InfEngineNgraphCustomLayerFactory : public InferenceEngine::ILayerImplFactory { +public: + explicit InfEngineNgraphCustomLayerFactory(const InferenceEngine::CNNLayer* layer) : cnnLayer(*layer) + { + // nothing + } + + InferenceEngine::StatusCode + getImplementations(std::vector& impls, + InferenceEngine::ResponseDesc* resp) noexcept override + { + impls.push_back(std::make_shared(cnnLayer)); + return InferenceEngine::StatusCode::OK; + } + +private: + InferenceEngine::CNNLayer cnnLayer; +}; + + +class InfEngineNgraphExtension : public InferenceEngine::IExtension +{ +public: + virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {} + virtual void Unload() noexcept {} + virtual void Release() noexcept {} + virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {} + + virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&, + InferenceEngine::ResponseDesc*) noexcept + { + return InferenceEngine::StatusCode::OK; + } + + InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory, + const InferenceEngine::CNNLayer* cnnLayer, + InferenceEngine::ResponseDesc* resp) noexcept + { + if (cnnLayer->type != kOpenCVLayersType) + return InferenceEngine::StatusCode::NOT_IMPLEMENTED; + factory = new InfEngineNgraphCustomLayerFactory(cnnLayer); + return InferenceEngine::StatusCode::OK; + } +}; + + + InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr&& _node) : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {} @@ -423,11 +557,11 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net) // OpenCV fallbacks as extensions. try { - ie.AddExtension(std::make_shared(), "CPU"); + ie.AddExtension(std::make_shared(), "CPU"); } catch(const std::exception& e) { - CV_LOG_INFO(NULL, "DNN-IE: Can't register OpenCV custom layers extension: " << e.what()); + CV_LOG_INFO(NULL, "DNN-IE: Can't register OpenCV custom layers nGraph extension: " << e.what()); } #ifndef _WIN32 // Limit the number of CPU threads. diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index 7766a49a33..109f141352 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -354,7 +354,7 @@ public: } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name); @@ -363,7 +363,7 @@ public: addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp index 0163a2a37d..bda5f6171d 100644 --- a/modules/dnn/src/layers/blank_layer.cpp +++ b/modules/dnn/src/layers/blank_layer.cpp @@ -108,7 +108,7 @@ public: inputs[i].copyTo(outputs[i]); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); @@ -131,7 +131,7 @@ public: ieLayer.setOutputPorts(std::vector(1)); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/concat_layer.cpp b/modules/dnn/src/layers/concat_layer.cpp index bb19bbdf97..c3ca1a4afd 100644 --- a/modules/dnn/src/layers/concat_layer.cpp +++ b/modules/dnn/src/layers/concat_layer.cpp @@ -300,7 +300,7 @@ public: return Ptr(); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); @@ -310,7 +310,7 @@ public: ieLayer.setInputPorts(std::vector(inputs.size())); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/const_layer.cpp b/modules/dnn/src/layers/const_layer.cpp index 5de45252a2..bc23064bee 100644 --- a/modules/dnn/src/layers/const_layer.cpp +++ b/modules/dnn/src/layers/const_layer.cpp @@ -68,14 +68,14 @@ public: blobs[0].copyTo(outputs[0]); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::ConstLayer ieLayer(name); ieLayer.setData(wrapToInfEngineBlob(blobs[0])); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH @@ -87,7 +87,8 @@ public: blobs[0].data); return Ptr(new InfEngineNgraphNode(node)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_NGRAPH + }; Ptr ConstLayer::create(const LayerParams& params) diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 6bb8994a3c..7b75a77fdc 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -467,7 +467,7 @@ public: return Ptr(); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector > &inputs) CV_OVERRIDE { InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); @@ -528,7 +528,7 @@ public: return Ptr(new InfEngineBackendNode(l)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector > &inputs, @@ -1328,6 +1328,7 @@ public: return group == 1; } +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) { @@ -1371,9 +1372,11 @@ public: return std::accumulate(dilations.begin(), dilations.end(), 1, std::multiplies()) == 1; return true; } - else +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #endif // HAVE_INF_ENGINE + { return kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE); + } } bool getMemoryShapes(const std::vector &inputs, @@ -1952,7 +1955,7 @@ public: return Ptr(); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector > &) CV_OVERRIDE { InferenceEngine::Layout layout = blobs[0].dims == 5? InferenceEngine::Layout::NCDHW : @@ -2007,7 +2010,7 @@ public: addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l); return Ptr(new InfEngineBackendNode(l)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/detection_output_layer.cpp b/modules/dnn/src/layers/detection_output_layer.cpp index d391e01e4e..d32357b04e 100644 --- a/modules/dnn/src/layers/detection_output_layer.cpp +++ b/modules/dnn/src/layers/detection_output_layer.cpp @@ -924,7 +924,7 @@ public: } } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::DetectionOutputLayer ieLayer(name); @@ -946,7 +946,7 @@ public: return Ptr(new InfEngineBackendNode(l)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 42b277838b..776053e2b0 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -156,14 +156,14 @@ public: return Ptr(); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::Layer ieLayer = func.initInfEngineBuilderAPI(); ieLayer.setName(this->name); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE @@ -272,9 +272,11 @@ struct ReLUFunctor : public BaseFunctor bool supportBackend(int backendId, int) { -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1); +#endif +#ifdef HAVE_DNN_NGRAPH if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; #endif @@ -371,12 +373,12 @@ struct ReLUFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(slope); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -481,12 +483,12 @@ struct ReLU6Functor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::ClampLayer("").setMinValue(minValue).setMaxValue(maxValue); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -556,12 +558,12 @@ struct TanHFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::TanHLayer(""); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -631,12 +633,12 @@ struct SwishFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { CV_Error(Error::StsNotImplemented, ""); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -707,12 +709,12 @@ struct MishFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { CV_Error(Error::StsNotImplemented, ""); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -788,12 +790,12 @@ struct SigmoidFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::SigmoidLayer(""); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -863,12 +865,12 @@ struct ELUFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::ELULayer(""); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -941,12 +943,12 @@ struct AbsValFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(-0.999999f); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -1020,12 +1022,12 @@ struct BNLLFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { CV_Error(Error::StsNotImplemented, ""); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -1138,14 +1140,14 @@ struct PowerFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { return InferenceEngine::Builder::PowerLayer("").setPower(power) .setScale(scale) .setShift(shift); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) @@ -1290,7 +1292,7 @@ struct ChannelsPReLUFunctor : public BaseFunctor } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer(""); @@ -1298,7 +1300,7 @@ struct ChannelsPReLUFunctor : public BaseFunctor addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l); return l; } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH std::shared_ptr initNgraphAPI(const std::shared_ptr& node) diff --git a/modules/dnn/src/layers/eltwise_layer.cpp b/modules/dnn/src/layers/eltwise_layer.cpp index 7851596d86..77e1a1171a 100644 --- a/modules/dnn/src/layers/eltwise_layer.cpp +++ b/modules/dnn/src/layers/eltwise_layer.cpp @@ -659,7 +659,7 @@ public: return Ptr(); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { InferenceEngine::Builder::EltwiseLayer ieLayer(name); @@ -683,7 +683,7 @@ public: return Ptr(new InfEngineBackendNode(l)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/flatten_layer.cpp b/modules/dnn/src/layers/flatten_layer.cpp index b4b259928f..f3434a5751 100644 --- a/modules/dnn/src/layers/flatten_layer.cpp +++ b/modules/dnn/src/layers/flatten_layer.cpp @@ -164,7 +164,7 @@ public: } } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { InferenceEngine::Builder::Layer ieLayer(name); @@ -176,7 +176,7 @@ public: ieLayer.setOutputPorts(std::vector(1)); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, @@ -204,7 +204,6 @@ virtual Ptr initNgraph(const std::vector >& inp return Ptr(new InfEngineNgraphNode(reshape)); } #endif // HAVE_DNN_NGRAPH - // HAVE_INF_ENGINE int _startAxis; int _endAxis; diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index 065062dc99..2114d42e5b 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -444,7 +444,7 @@ public: return Ptr(); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::FullyConnectedLayer ieLayer(name); @@ -459,7 +459,7 @@ public: return Ptr(new InfEngineBackendNode(l)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/lrn_layer.cpp b/modules/dnn/src/layers/lrn_layer.cpp index 5926d9053f..434ba5ccbe 100644 --- a/modules/dnn/src/layers/lrn_layer.cpp +++ b/modules/dnn/src/layers/lrn_layer.cpp @@ -385,7 +385,7 @@ public: #endif // HAVE_HALIDE } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { float alphaSize = alpha; @@ -402,7 +402,7 @@ public: l.getParameters()["k"] = bias; return Ptr(new InfEngineBackendNode(l)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE diff --git a/modules/dnn/src/layers/mvn_layer.cpp b/modules/dnn/src/layers/mvn_layer.cpp index 94434957b1..386446e18f 100644 --- a/modules/dnn/src/layers/mvn_layer.cpp +++ b/modules/dnn/src/layers/mvn_layer.cpp @@ -118,14 +118,17 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) return !zeroDev && (preferableTarget != DNN_TARGET_MYRIAD || eps <= 1e-7f); - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) +#endif +#ifdef HAVE_DNN_NGRAPH + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) return true; - else -#endif // HAVE_INF_ENGINE +#endif + { return backendId == DNN_BACKEND_OPENCV; + } } #ifdef HAVE_OPENCL @@ -375,7 +378,7 @@ public: } } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::MVNLayer ieLayer(name); @@ -384,7 +387,7 @@ public: ieLayer.setEpsilon(eps); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp index 6c1d381feb..b546a96101 100644 --- a/modules/dnn/src/layers/normalize_bbox_layer.cpp +++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp @@ -261,7 +261,7 @@ public: } } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); @@ -310,7 +310,7 @@ public: return Ptr(new InfEngineBackendNode(l)); } } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/padding_layer.cpp b/modules/dnn/src/layers/padding_layer.cpp index f25a0873ba..b6e1874be0 100644 --- a/modules/dnn/src/layers/padding_layer.cpp +++ b/modules/dnn/src/layers/padding_layer.cpp @@ -184,7 +184,7 @@ public: return Ptr(); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::Layer ieLayer(name); diff --git a/modules/dnn/src/layers/permute_layer.cpp b/modules/dnn/src/layers/permute_layer.cpp index 43abc5697a..1931a01b5f 100644 --- a/modules/dnn/src/layers/permute_layer.cpp +++ b/modules/dnn/src/layers/permute_layer.cpp @@ -371,14 +371,14 @@ public: } } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::PermuteLayer ieLayer(name); ieLayer.setOrder(_order); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index 491a909f56..83f3df0522 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -174,15 +174,15 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { if (computeMaxIdx) return false; -#ifdef HAVE_INF_ENGINE if (kernel_size.size() == 3) return preferableTarget == DNN_TARGET_CPU; if (preferableTarget == DNN_TARGET_MYRIAD) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) +#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) { return !isMyriadX(); } @@ -191,18 +191,24 @@ public: } else return type != STOCHASTIC; -#else - return false; -#endif } - else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { +#endif + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { return !computeMaxIdx && type != STOCHASTIC; } - else - return (kernel_size.size() == 3 && backendId == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU) || - ((kernel_size.empty() || kernel_size.size() == 2) && (backendId == DNN_BACKEND_OPENCV || - (backendId == DNN_BACKEND_HALIDE && haveHalide() && - (type == MAX || (type == AVE && !pad_t && !pad_l && !pad_b && !pad_r))))); + else if (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE) + { + if (kernel_size.size() == 3) + return (backendId == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU); + if (kernel_size.empty() || kernel_size.size() == 2) + return backendId == DNN_BACKEND_OPENCV || + (backendId == DNN_BACKEND_HALIDE && haveHalide() && + (type == MAX || (type == AVE && !pad_t && !pad_l && !pad_b && !pad_r))); + else + return false; + } + return false; } #ifdef HAVE_OPENCL @@ -301,7 +307,7 @@ public: return Ptr(); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { if (type == MAX || type == AVE) @@ -347,7 +353,7 @@ public: CV_Error(Error::StsNotImplemented, "Unsupported pooling type"); return Ptr(); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index c38e61329b..dbd5b0426f 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -494,7 +494,7 @@ public: } } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { if (_explicitSizes) @@ -554,7 +554,7 @@ public: return Ptr(new InfEngineBackendNode(l)); } } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE diff --git a/modules/dnn/src/layers/proposal_layer.cpp b/modules/dnn/src/layers/proposal_layer.cpp index e0930e8abb..2420dbf579 100644 --- a/modules/dnn/src/layers/proposal_layer.cpp +++ b/modules/dnn/src/layers/proposal_layer.cpp @@ -327,7 +327,7 @@ public: layerOutputs[0].col(2).copyTo(dst); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::ProposalLayer ieLayer(name); @@ -351,7 +351,7 @@ public: return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/reorg_layer.cpp b/modules/dnn/src/layers/reorg_layer.cpp index 94f49e63bb..d6fafa664f 100644 --- a/modules/dnn/src/layers/reorg_layer.cpp +++ b/modules/dnn/src/layers/reorg_layer.cpp @@ -185,14 +185,14 @@ public: permute->forward(inputs, outputs, internals_arr); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::ReorgYoloLayer ieLayer(name); ieLayer.setStride(reorgStride); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector > &inputs, diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index 14d3ac5fef..a85a4e4a2f 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -260,7 +260,7 @@ public: } } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { InferenceEngine::Builder::ReshapeLayer ieLayer(name); @@ -268,7 +268,7 @@ public: ieLayer.setDims(outShapes[0]); return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/layers/resize_layer.cpp b/modules/dnn/src/layers/resize_layer.cpp index a628056082..c86fa7f717 100644 --- a/modules/dnn/src/layers/resize_layer.cpp +++ b/modules/dnn/src/layers/resize_layer.cpp @@ -56,8 +56,7 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { return (interpolation == "nearest" && scaleWidth == scaleHeight) || (interpolation == "bilinear"); @@ -162,9 +161,9 @@ public: CV_Error(Error::StsNotImplemented, "Unknown interpolation: " + interpolation); } +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { -#ifdef HAVE_INF_ENGINE InferenceEngine::Builder::Layer ieLayer(name); ieLayer.setName(name); if (interpolation == "nearest") @@ -190,9 +189,8 @@ public: ieLayer.setInputPorts(std::vector(1)); ieLayer.setOutputPorts(std::vector(1)); return Ptr(new InfEngineBackendNode(ieLayer)); -#endif // HAVE_INF_ENGINE - return Ptr(); } +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index ea2d117901..1f19b20955 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -197,7 +197,7 @@ public: } #endif // HAVE_HALIDE -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name); @@ -223,7 +223,7 @@ public: addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l); return Ptr(new InfEngineBackendNode(l)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index 662ade8f14..6de7e934a5 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -113,13 +113,16 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV || - (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && sliceRanges.size() == 1) || - (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && -#ifdef HAVE_INF_ENGINE - INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) + return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && + sliceRanges.size() == 1 && sliceRanges[0].size() == 4; #endif - sliceRanges.size() == 1 && sliceRanges[0].size() == 4); +#ifdef HAVE_DNN_NGRAPH + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return sliceRanges.size() == 1; +#endif + return backendId == DNN_BACKEND_OPENCV; } bool getMemoryShapes(const std::vector &inputs, @@ -263,7 +266,7 @@ public: } } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index d7ffef0bbf..a0e2b42020 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -312,7 +312,7 @@ public: return Ptr(); } -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 virtual Ptr initInfEngine(const std::vector >& inputs) CV_OVERRIDE { InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]); @@ -322,7 +322,7 @@ public: return Ptr(new InfEngineBackendNode(ieLayer)); } -#endif // HAVE_INF_ENGINE +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index a5319e1a63..cea4f3fb77 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -42,8 +42,8 @@ Backend& getInferenceEngineBackendTypeParam() { static Backend param = parseInferenceEngineBackendType( utils::getConfigurationParameterString("OPENCV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019_TYPE", -#ifdef HAVE_NGRAPH - CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API // future: CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH +#ifndef HAVE_DNN_IE_NN_BUILDER_2019 + CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH #else CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API #endif @@ -69,6 +69,36 @@ cv::String setInferenceEngineBackendType(const cv::String& newBackendType) CV__DNN_EXPERIMENTAL_NS_END + +Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) +{ + // NOTE: Inference Engine sizes are reversed. + std::vector dims = blob->getTensorDesc().getDims(); + std::vector size(dims.begin(), dims.end()); + auto precision = blob->getTensorDesc().getPrecision(); + + int type = -1; + switch (precision) + { + case InferenceEngine::Precision::FP32: type = CV_32F; break; + case InferenceEngine::Precision::U8: type = CV_8U; break; + default: + CV_Error(Error::StsNotImplemented, "Unsupported blob precision"); + } + return Mat(size, type, (void*)blob->buffer()); +} + +void infEngineBlobsToMats(const std::vector& blobs, + std::vector& mats) +{ + mats.resize(blobs.size()); + for (int i = 0; i < blobs.size(); ++i) + mats[i] = infEngineBlobToMat(blobs[i]); +} + + +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 + // For networks with input layer which has an empty name, IE generates a name id[some_number]. // OpenCV lets users use an empty input name and to prevent unexpected naming, // we can use some predefined name. @@ -556,6 +586,8 @@ void InfEngineBackendWrapper::setHostDirty() } +#endif // HAVE_DNN_IE_NN_BUILDER_2019 + #if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) static std::map& getSharedPlugins() { @@ -686,6 +718,9 @@ static bool detectMyriadX_() } #endif // !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT) + +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 + void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net) { CV_Assert(!isInitialized()); @@ -984,32 +1019,6 @@ void InfEngineBackendNet::forward(const std::vector >& outBl } } -Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) -{ - // NOTE: Inference Engine sizes are reversed. - std::vector dims = blob->getTensorDesc().getDims(); - std::vector size(dims.begin(), dims.end()); - auto precision = blob->getTensorDesc().getPrecision(); - - int type = -1; - switch (precision) - { - case InferenceEngine::Precision::FP32: type = CV_32F; break; - case InferenceEngine::Precision::U8: type = CV_8U; break; - default: - CV_Error(Error::StsNotImplemented, "Unsupported blob precision"); - } - return Mat(size, type, (void*)blob->buffer()); -} - -void infEngineBlobsToMats(const std::vector& blobs, - std::vector& mats) -{ - mats.resize(blobs.size()); - for (int i = 0; i < blobs.size(); ++i) - mats[i] = infEngineBlobToMat(blobs[i]); -} - bool InfEngineBackendLayer::getMemoryShapes(const std::vector &inputs, const int requiredOutputs, std::vector &outputs, @@ -1076,6 +1085,8 @@ void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, #endif } +#endif // HAVE_DNN_IE_NN_BUILDER_2019 + #endif // HAVE_INF_ENGINE bool haveInfEngine() @@ -1091,11 +1102,13 @@ void forwardInfEngine(const std::vector >& outBlobsWrappers, Ptr& node, bool isAsync) { CV_Assert(haveInfEngine()); -#ifdef HAVE_INF_ENGINE +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 CV_Assert(!node.empty()); Ptr ieNode = node.dynamicCast(); CV_Assert(!ieNode.empty()); ieNode->net->forward(outBlobsWrappers, isAsync); +#else + CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support"); #endif // HAVE_INF_ENGINE } diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index 7cf8a324aa..5b37c4dbf0 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -41,6 +41,7 @@ #pragma GCC diagnostic ignored "-Wsuggest-override" #endif +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 //#define INFERENCE_ENGINE_DEPRECATED // turn off deprecation warnings from IE //there is no way to suppress warnings from IE only at this moment, so we are forced to suppress warnings globally #if defined(__GNUC__) @@ -49,6 +50,7 @@ #ifdef _MSC_VER #pragma warning(disable: 4996) // was declared deprecated #endif +#endif // HAVE_DNN_IE_NN_BUILDER_2019 #if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1) #pragma GCC visibility push(default) @@ -74,6 +76,13 @@ namespace cv { namespace dnn { Backend& getInferenceEngineBackendTypeParam(); +Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob); + +void infEngineBlobsToMats(const std::vector& blobs, + std::vector& mats); + +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 + class InfEngineBackendNet { public: @@ -180,11 +189,6 @@ InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector& ptr); -Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob); - -void infEngineBlobsToMats(const std::vector& blobs, - std::vector& mats); - // Convert Inference Engine blob with FP32 precision to FP16 precision. // Allocates memory for a new blob. InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob); @@ -232,6 +236,8 @@ public: InferenceEngine::ResponseDesc* resp) noexcept; }; +#endif // HAVE_DNN_IE_NN_BUILDER_2019 + CV__DNN_EXPERIMENTAL_NS_BEGIN diff --git a/modules/dnn/test/test_common.impl.hpp b/modules/dnn/test/test_common.impl.hpp index c74e293a43..ee5c1e958b 100644 --- a/modules/dnn/test/test_common.impl.hpp +++ b/modules/dnn/test/test_common.impl.hpp @@ -371,7 +371,10 @@ void initDNNTests() #ifdef HAVE_DNN_NGRAPH CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, #endif - CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 + CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, +#endif + "" ); #endif registerGlobalSkipTag( diff --git a/modules/dnn/test/test_misc.cpp b/modules/dnn/test/test_misc.cpp index 10929b29bc..9fc9fcadbd 100644 --- a/modules/dnn/test/test_misc.cpp +++ b/modules/dnn/test/test_misc.cpp @@ -130,14 +130,18 @@ void test_readNet_IE_do_not_call_setInput(Backend backendId) EXPECT_TRUE(res.empty()) << res.size; } +#ifdef HAVE_DNN_IE_NN_BUILDER_2019 TEST(readNet, do_not_call_setInput_IE_NN_BUILDER_2019) { test_readNet_IE_do_not_call_setInput(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019); } +#endif +#ifdef HAVE_DNN_NGRAPH TEST(readNet, do_not_call_setInput_IE_NGRAPH) { test_readNet_IE_do_not_call_setInput(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH); } +#endif #endif // HAVE_INF_ENGINE typedef testing::TestWithParam > dump; diff --git a/modules/ts/src/ts_tags.cpp b/modules/ts/src/ts_tags.cpp index 906f0d74c9..8bed1b739f 100644 --- a/modules/ts/src/ts_tags.cpp +++ b/modules/ts/src/ts_tags.cpp @@ -62,6 +62,8 @@ static std::vector& getTestTagsSkipList() void registerGlobalSkipTag(const std::string& skipTag) { + if (skipTag.empty()) + return; // do nothing std::vector& skipTags = getTestTagsSkipList(); for (size_t i = 0; i < skipTags.size(); ++i) { From ad16c243cacda5cb4b6aa8901669167cfc192afc Mon Sep 17 00:00:00 2001 From: Jan Solanti Date: Tue, 3 Mar 2020 14:16:32 +0200 Subject: [PATCH 3/6] core(ocl): Don't query image formats when none exist clGetSupportedImageFormats returns CL_INVALID_VALUE if called with num_entries 0 and a non-NULL image_formats pointer so let's not do that. --- modules/core/src/ocl.cpp | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/modules/core/src/ocl.cpp b/modules/core/src/ocl.cpp index dbebf02f7f..52533484a0 100644 --- a/modules/core/src/ocl.cpp +++ b/modules/core/src/ocl.cpp @@ -6445,16 +6445,19 @@ struct Image2D::Impl CL_MEM_OBJECT_IMAGE2D, numFormats, NULL, &numFormats); CV_OCL_DBG_CHECK_RESULT(err, "clGetSupportedImageFormats(CL_MEM_OBJECT_IMAGE2D, NULL)"); - AutoBuffer formats(numFormats); - err = clGetSupportedImageFormats(context, CL_MEM_READ_WRITE, - CL_MEM_OBJECT_IMAGE2D, numFormats, - formats.data(), NULL); - CV_OCL_DBG_CHECK_RESULT(err, "clGetSupportedImageFormats(CL_MEM_OBJECT_IMAGE2D, formats)"); - for (cl_uint i = 0; i < numFormats; ++i) - { - if (!memcmp(&formats[i], &format, sizeof(format))) + if (numFormats > 0) + { + AutoBuffer formats(numFormats); + err = clGetSupportedImageFormats(context, CL_MEM_READ_WRITE, + CL_MEM_OBJECT_IMAGE2D, numFormats, + formats.data(), NULL); + CV_OCL_DBG_CHECK_RESULT(err, "clGetSupportedImageFormats(CL_MEM_OBJECT_IMAGE2D, formats)"); + for (cl_uint i = 0; i < numFormats; ++i) { - return true; + if (!memcmp(&formats[i], &format, sizeof(format))) + { + return true; + } } } return false; From 9e332dc5fbfabca99fc98a4e663500bea0df2cc6 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Thu, 5 Mar 2020 23:53:50 +0300 Subject: [PATCH 4/6] Broadcasting from ONNX --- modules/dnn/src/layers/scale_layer.cpp | 51 ++++++++++------ modules/dnn/src/onnx/onnx_importer.cpp | 81 +++++++++++++++++++------ modules/dnn/test/test_onnx_importer.cpp | 41 ++++++------- 3 files changed, 111 insertions(+), 62 deletions(-) diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index ea2d117901..7453c38a1c 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -46,14 +46,14 @@ public: { std::vector inputs; inputs_arr.getMatVector(inputs); - hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias); + hasWeights = blobs.size() == 2 || (blobs.size() <= 1 && !hasBias); CV_Assert((inputs.size() == 2 && blobs.empty()) || blobs.size() == (int)hasWeights + (int)hasBias); } virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && axis == 1) || + (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && axis == 1 && !blobs.empty()) || (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && axis > 0); } @@ -78,10 +78,9 @@ public: Mat &outBlob = outputs[0]; // There is a mode when we multiply a first blob by a second one // instead of trainable weights. - Mat weights = blobs.empty() ? inputs[1] : (hasWeights ? blobs[0] : Mat()); - Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat(); - if (!weights.empty()) - weights = weights.reshape(1, 1); + Mat weights = hasWeights ? (blobs.empty() ? inputs[1] : blobs[0]).reshape(1, 1) : Mat();; + Mat bias = hasBias ? (blobs.empty() ? inputs[1] : blobs.back()).reshape(1, 1) : Mat(); + MatShape inpShape = shape(inpBlob); const int numWeights = !weights.empty() ? weights.total() : bias.total(); CV_Assert(numWeights != 0); @@ -229,28 +228,40 @@ public: #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE { - CV_Assert(!blobs.empty()); - const size_t numChannels = blobs[0].total(); - auto ieInpNode = nodes[0].dynamicCast()->node; + auto ieInpNode0 = nodes[0].dynamicCast()->node; + auto ieInpNode1 = nodes.size() > 1 ? nodes[1].dynamicCast()->node : nullptr; + + size_t numChannels = 1; + if (blobs.empty()) + for (const size_t& dim : ieInpNode1->get_shape()) + numChannels *= dim; + else + numChannels = blobs[0].total(); - std::vector shape(ieInpNode->get_shape().size(), 1); + std::vector shape(ieInpNode0->get_shape().size(), 1); int cAxis = clamp(axis, shape.size()); shape[cAxis] = numChannels; - auto node = ieInpNode; + auto node = ieInpNode0; if (hasWeights) { - auto weight = std::make_shared(ngraph::element::f32, - ngraph::Shape(shape), blobs[0].data); + auto weight = blobs.empty() ? ieInpNode1 : + std::make_shared(ngraph::element::f32, ngraph::Shape(shape), blobs[0].data); + node = std::make_shared(node, weight, ngraph::op::AutoBroadcastType::NUMPY); } if (hasBias || !hasWeights) { - auto bias = hasBias ? - std::make_shared(ngraph::element::f32, - ngraph::Shape(shape), blobs.back().data) : - std::make_shared(ngraph::element::f32, - ngraph::Shape(shape), std::vector(numChannels, 0).data()); + std::shared_ptr bias; + if (hasBias) + { + bias = blobs.empty() ? ieInpNode1 : + std::make_shared(ngraph::element::f32, + ngraph::Shape(shape), blobs.back().data); + } + else + bias = std::make_shared(ngraph::element::f32, + ngraph::Shape(shape), std::vector(numChannels, 0).data()); node = std::make_shared(node, bias, ngraph::op::AutoBroadcastType::NUMPY); } return Ptr(new InfEngineNgraphNode(node)); @@ -259,8 +270,8 @@ public: void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE { - scale = hasWeights ? blobs[0] : Mat(); - shift = hasBias ? blobs.back() : Mat(); + scale = (hasWeights && !blobs.empty()) ? blobs[0] : Mat(); + shift = (hasBias && !blobs.empty()) ? blobs.back() : Mat(); } virtual int64 getFLOPS(const std::vector &inputs, diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 3d7e33a37f..0ca909597e 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -427,24 +427,57 @@ void ONNXImporter::populateNet(Net dstNet) } layerParams.type = "Slice"; } - else if (layer_type == "Add" || layer_type == "Sum") + else if (layer_type == "Add" || layer_type == "Sum" || layer_type == "Sub") { + bool isSub = layer_type == "Sub"; + CV_CheckEQ(node_proto.input_size(), 2, ""); if (layer_id.find(node_proto.input(1)) == layer_id.end()) { Mat blob = getBlob(node_proto, constBlobs, 1); blob = blob.reshape(1, 1); if (blob.total() == 1) { layerParams.type = "Power"; - layerParams.set("shift", blob.at(0)); + layerParams.set("shift", (isSub ? -1 : 1) * blob.at(0)); } else { layerParams.type = "Scale"; layerParams.set("bias_term", true); - layerParams.blobs.push_back(blob); + layerParams.blobs.push_back((isSub ? -1 : 1) * blob); } } - else { + else if (outShapes[node_proto.input(0)] == outShapes[node_proto.input(1)]) + { layerParams.type = "Eltwise"; + if (isSub) + { + static float subCoeffs[] = {1.f, -1.f}; + layerParams.set("coeff", DictValue::arrayReal(subCoeffs, 2)); + } + } + else + { + if (isSub) + { + LayerParams powerParams; + powerParams.name = layerParams.name + "/neg"; + powerParams.type = "Power"; + powerParams.set("scale", -1); + + //Create Power layer + int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams); + //Connect to input + layerId = layer_id.find(node_proto.input(1)); + CV_Assert(layerId != layer_id.end()); + dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0); + //Add shape + layer_id.insert(std::make_pair(powerParams.name, LayerInfo(id, 0))); + outShapes[powerParams.name] = outShapes[node_proto.input(1)]; + + //Replace input to Power + node_proto.set_input(1, powerParams.name); + } + layerParams.type = "Scale"; + layerParams.set("bias_term", true); } } else if (layer_type == "Max") @@ -452,19 +485,6 @@ void ONNXImporter::populateNet(Net dstNet) layerParams.type = "Eltwise"; layerParams.set("operation", "max"); } - else if (layer_type == "Sub") - { - Mat blob = getBlob(node_proto, constBlobs, 1); - if (blob.total() == 1) { - layerParams.type = "Power"; - layerParams.set("shift", -blob.at(0)); - } - else { - layerParams.type = "Scale"; - layerParams.set("has_bias", true); - layerParams.blobs.push_back(-1.0f * blob.reshape(1, 1)); - } - } else if (layer_type == "Neg") { layerParams.type = "Power"; @@ -643,10 +663,35 @@ void ONNXImporter::populateNet(Net dstNet) layerParams.type = "Scale"; } } - else { + else if (outShapes[node_proto.input(0)] == outShapes[node_proto.input(1)]) + { layerParams.type = "Eltwise"; layerParams.set("operation", isDiv ? "div" : "prod"); } + else + { + if (isDiv) + { + LayerParams powerParams; + powerParams.name = layerParams.name + "/inv"; + powerParams.type = "Power"; + powerParams.set("power", -1); + + //Create Power layer + int id = dstNet.addLayer(powerParams.name, powerParams.type, powerParams); + //Connect to input + layerId = layer_id.find(node_proto.input(1)); + CV_Assert(layerId != layer_id.end()); + dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, 0); + //Add shape + layer_id.insert(std::make_pair(powerParams.name, LayerInfo(id, 0))); + outShapes[powerParams.name] = outShapes[node_proto.input(1)]; + + //Replace input to Power + node_proto.set_input(1, powerParams.name); + } + layerParams.type = "Scale"; + } if (!haveVariables) { diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 2838a72ea7..f284eed45b 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -32,29 +32,33 @@ public: void testONNXModels(const String& basename, const Extension ext = npy, const double l1 = 0, const float lInf = 0, const bool useSoftmax = false, - bool checkNoFallbacks = true) + bool checkNoFallbacks = true, int numInps = 1) { String onnxmodel = _tf("models/" + basename + ".onnx", required); - Mat inp, ref; + std::vector inps(numInps); + Mat ref; if (ext == npy) { - inp = blobFromNPY(_tf("data/input_" + basename + ".npy")); + for (int i = 0; i < numInps; ++i) + inps[i] = blobFromNPY(_tf("data/input_" + basename + (numInps > 1 ? format("_%d", i) : "") + ".npy")); ref = blobFromNPY(_tf("data/output_" + basename + ".npy")); } else if (ext == pb) { - inp = readTensorFromONNX(_tf("data/input_" + basename + ".pb")); + for (int i = 0; i < numInps; ++i) + inps[i] = readTensorFromONNX(_tf("data/input_" + basename + (numInps > 1 ? format("_%d", i) : "") + ".pb")); ref = readTensorFromONNX(_tf("data/output_" + basename + ".pb")); } else CV_Error(Error::StsUnsupportedFormat, "Unsupported extension"); - checkBackend(&inp, &ref); + checkBackend(&inps[0], &ref); Net net = readNetFromONNX(onnxmodel); ASSERT_FALSE(net.empty()); net.setPreferableBackend(backend); net.setPreferableTarget(target); - net.setInput(inp); + for (int i = 0; i < numInps; ++i) + net.setInput(inps[i], numInps > 1 ? format("%d", i) : ""); Mat out = net.forward(""); if (useSoftmax) @@ -328,25 +332,14 @@ TEST_P(Test_ONNX_layers, ResizeUnfused) TEST_P(Test_ONNX_layers, MultyInputs) { - const String model = _tf("models/multy_inputs.onnx"); - - Net net = readNetFromONNX(model); - ASSERT_FALSE(net.empty()); - - net.setPreferableBackend(backend); - net.setPreferableTarget(target); - - Mat inp1 = blobFromNPY(_tf("data/input_multy_inputs_0.npy")); - Mat inp2 = blobFromNPY(_tf("data/input_multy_inputs_1.npy")); - Mat ref = blobFromNPY(_tf("data/output_multy_inputs.npy")); - checkBackend(&inp1, &ref); - - net.setInput(inp1, "0"); - net.setInput(inp2, "1"); - Mat out = net.forward(); + testONNXModels("multy_inputs", npy, 0, 0, false, true, 2); +} - normAssert(ref, out, "", default_l1, default_lInf); - expectNoFallbacksFromIE(net); +TEST_P(Test_ONNX_layers, Broadcast) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + testONNXModels("channel_broadcast", npy, 0, 0, false, true, 2); } TEST_P(Test_ONNX_layers, Div) From 880d2afb67e7afe0d9e0c3a169b3c592bac1e4fb Mon Sep 17 00:00:00 2001 From: Manoj Gupta Date: Thu, 5 Mar 2020 14:26:35 -0800 Subject: [PATCH 5/6] Fix building with ToT libc++ ToT libc++ (LLVM) no longer includes as part of which breaks building opencv. Include header explcitly to fix this. --- modules/core/include/opencv2/core/cvstd.inl.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/core/include/opencv2/core/cvstd.inl.hpp b/modules/core/include/opencv2/core/cvstd.inl.hpp index ed37cacb30..36c83e285e 100644 --- a/modules/core/include/opencv2/core/cvstd.inl.hpp +++ b/modules/core/include/opencv2/core/cvstd.inl.hpp @@ -46,6 +46,7 @@ #include #include +#include //! @cond IGNORED From 34530da66e9e5d9fdba091b11ceef16ae267ae0c Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 6 Mar 2020 18:01:55 +0000 Subject: [PATCH 6/6] core: fix coverity issues --- modules/core/src/minmax.cpp | 1 + modules/core/src/norm.cpp | 136 ++++++++++++++++++++---------------- 2 files changed, 76 insertions(+), 61 deletions(-) diff --git a/modules/core/src/minmax.cpp b/modules/core/src/minmax.cpp index b4e5e4632a..43785d839b 100644 --- a/modules/core/src/minmax.cpp +++ b/modules/core/src/minmax.cpp @@ -1089,6 +1089,7 @@ bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int* minLoc getMinMaxRes }; + CV_Assert(ddepth <= CV_64F); getMinMaxResFunc func = functab[ddepth]; int locTemp[2]; diff --git a/modules/core/src/norm.cpp b/modules/core/src/norm.cpp index 148c25a9a3..9aaed8e980 100644 --- a/modules/core/src/norm.cpp +++ b/modules/core/src/norm.cpp @@ -710,51 +710,58 @@ double cv::norm( InputArray _src, int normType, InputArray _mask ) result; result.d = 0; NAryMatIterator it(arrays, ptrs); - int j, total = (int)it.size, blockSize = total, intSumBlockSize = 0, count = 0; - bool blockSum = (normType == NORM_L1 && depth <= CV_16S) || - ((normType == NORM_L2 || normType == NORM_L2SQR) && depth <= CV_8S); - int isum = 0; - int *ibuf = &result.i; - size_t esz = 0; - - if( blockSum ) - { - intSumBlockSize = (normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15))/cn; - blockSize = std::min(blockSize, intSumBlockSize); - ibuf = &isum; - esz = src.elemSize(); - } + CV_CheckLT((size_t)it.size, (size_t)INT_MAX, ""); - for( size_t i = 0; i < it.nplanes; i++, ++it ) + if ((normType == NORM_L1 && depth <= CV_16S) || + ((normType == NORM_L2 || normType == NORM_L2SQR) && depth <= CV_8S)) { - for( j = 0; j < total; j += blockSize ) + // special case to handle "integer" overflow in accumulator + const size_t esz = src.elemSize(); + const int total = (int)it.size; + const int intSumBlockSize = (normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15))/cn; + const int blockSize = std::min(total, intSumBlockSize); + int isum = 0; + int count = 0; + + for (size_t i = 0; i < it.nplanes; i++, ++it) { - int bsz = std::min(total - j, blockSize); - func( ptrs[0], ptrs[1], (uchar*)ibuf, bsz, cn ); - count += bsz; - if( blockSum && (count + blockSize >= intSumBlockSize || (i+1 >= it.nplanes && j+bsz >= total)) ) + for (int j = 0; j < total; j += blockSize) { - result.d += isum; - isum = 0; - count = 0; + int bsz = std::min(total - j, blockSize); + func(ptrs[0], ptrs[1], (uchar*)&isum, bsz, cn); + count += bsz; + if (count + blockSize >= intSumBlockSize || (i+1 >= it.nplanes && j+bsz >= total)) + { + result.d += isum; + isum = 0; + count = 0; + } + ptrs[0] += bsz*esz; + if (ptrs[1]) + ptrs[1] += bsz; } - ptrs[0] += bsz*esz; - if( ptrs[1] ) - ptrs[1] += bsz; + } + } + else + { + // generic implementation + for (size_t i = 0; i < it.nplanes; i++, ++it) + { + func(ptrs[0], ptrs[1], (uchar*)&result, (int)it.size, cn); } } if( normType == NORM_INF ) { if( depth == CV_64F ) - ; + return result.d; else if( depth == CV_32F ) - result.d = result.f; + return result.f; else - result.d = result.i; + return result.i; } else if( normType == NORM_L2 ) - result.d = std::sqrt(result.d); + return std::sqrt(result.d); return result.d; } @@ -1170,52 +1177,59 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m result; result.d = 0; NAryMatIterator it(arrays, ptrs); - int j, total = (int)it.size, blockSize = total, intSumBlockSize = 0, count = 0; - bool blockSum = (normType == NORM_L1 && depth <= CV_16S) || - ((normType == NORM_L2 || normType == NORM_L2SQR) && depth <= CV_8S); - unsigned isum = 0; - unsigned *ibuf = &result.u; - size_t esz = 0; - - if( blockSum ) - { - intSumBlockSize = normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15); - blockSize = std::min(blockSize, intSumBlockSize); - ibuf = &isum; - esz = src1.elemSize(); - } + CV_CheckLT((size_t)it.size, (size_t)INT_MAX, ""); - for( size_t i = 0; i < it.nplanes; i++, ++it ) + if ((normType == NORM_L1 && depth <= CV_16S) || + ((normType == NORM_L2 || normType == NORM_L2SQR) && depth <= CV_8S)) { - for( j = 0; j < total; j += blockSize ) + // special case to handle "integer" overflow in accumulator + const size_t esz = src1.elemSize(); + const int total = (int)it.size; + const int intSumBlockSize = normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15); + const int blockSize = std::min(total, intSumBlockSize); + int isum = 0; + int count = 0; + + for (size_t i = 0; i < it.nplanes; i++, ++it) { - int bsz = std::min(total - j, blockSize); - func( ptrs[0], ptrs[1], ptrs[2], (uchar*)ibuf, bsz, cn ); - count += bsz; - if( blockSum && (count + blockSize >= intSumBlockSize || (i+1 >= it.nplanes && j+bsz >= total)) ) + for (int j = 0; j < total; j += blockSize) { - result.d += isum; - isum = 0; - count = 0; + int bsz = std::min(total - j, blockSize); + func(ptrs[0], ptrs[1], ptrs[2], (uchar*)&isum, bsz, cn); + count += bsz; + if (count + blockSize >= intSumBlockSize || (i+1 >= it.nplanes && j+bsz >= total)) + { + result.d += isum; + isum = 0; + count = 0; + } + ptrs[0] += bsz*esz; + ptrs[1] += bsz*esz; + if (ptrs[2]) + ptrs[2] += bsz; } - ptrs[0] += bsz*esz; - ptrs[1] += bsz*esz; - if( ptrs[2] ) - ptrs[2] += bsz; + } + } + else + { + // generic implementation + for (size_t i = 0; i < it.nplanes; i++, ++it) + { + func(ptrs[0], ptrs[1], ptrs[2], (uchar*)&result, (int)it.size, cn); } } if( normType == NORM_INF ) { if( depth == CV_64F ) - ; + return result.d; else if( depth == CV_32F ) - result.d = result.f; + return result.f; else - result.d = result.u; + return result.u; } else if( normType == NORM_L2 ) - result.d = std::sqrt(result.d); + return std::sqrt(result.d); return result.d; }