From 3eaeca58da1b23c1c23581b0fc9f70150ba92354 Mon Sep 17 00:00:00 2001 From: Maxim Pashchenkov Date: Wed, 13 Jan 2021 00:31:15 +0300 Subject: [PATCH] Merge pull request #18902 from mpashchenkov:mp/onnx-const-input G-API: ONNX. Const input * Added const input for ONNX backend * Returned initMatrixRandu, added some comments, rebase --- modules/gapi/src/backends/ie/giebackend.cpp | 8 - .../gapi/src/backends/onnx/gonnxbackend.cpp | 75 +++- .../gapi/test/infer/gapi_infer_onnx_test.cpp | 324 ++++++++++++++---- 3 files changed, 315 insertions(+), 92 deletions(-) diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp index 3cbe24364a..ad80f605a6 100644 --- a/modules/gapi/src/backends/ie/giebackend.cpp +++ b/modules/gapi/src/backends/ie/giebackend.cpp @@ -263,14 +263,6 @@ struct IEUnit { // Still, constant data is to set only once. this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second)); } - // Bind const data to infer request - for (auto &&p : params.const_inputs) { - // FIXME: SetBlob is known to be inefficient, - // it is worth to make a customizable "initializer" and pass the - // cv::Mat-wrapped blob there to support IE's optimal "GetBlob idiom" - // Still, constant data is to set only once. - this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second)); - } return {this_plugin, this_network, this_request}; } diff --git a/modules/gapi/src/backends/onnx/gonnxbackend.cpp b/modules/gapi/src/backends/onnx/gonnxbackend.cpp index 31794fa2b5..980bac431f 100644 --- a/modules/gapi/src/backends/onnx/gonnxbackend.cpp +++ b/modules/gapi/src/backends/onnx/gonnxbackend.cpp @@ -16,6 +16,7 @@ #include #include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK! +#include "logger.hpp" namespace { struct ONNXCallContext; @@ -30,12 +31,35 @@ enum TensorPosition : int { OUTPUT }; +static std::string pdims(const std::vector &dims) { + std::stringstream ss; + auto it = dims.begin(); + ss << *it++; + for (; it != dims.end(); ++it) { + ss << '/' << *it; + } + return ss.str(); +} + struct TensorInfo { TensorInfo() = default; explicit TensorInfo(const Ort::TensorTypeAndShapeInfo& info) : dims(info.GetShape()) , type(info.GetElementType()) , is_dynamic(std::find(dims.begin(), dims.end(), -1) != dims.end()) { + + // Double-check if the tensor is really dynamic + // Allow N to be -1 + if (is_dynamic + && dims[0] == -1 + && dims.size() > 1 + && std::find(dims.begin() + 1, dims.end(), -1) == dims.end()) { + + GAPI_LOG_WARNING(NULL, "Promoting N=-1 to N=1 for tensor " << pdims(dims)); + dims[0] = 1; + is_dynamic = false; + } + if (!is_dynamic) { size = std::accumulate(dims.begin(), dims.end(), @@ -81,6 +105,7 @@ class ONNXCompiled { std::vector in_tensor_info; std::vector out_tensor_info; bool is_dynamic = false; + bool is_postproc = false; // G-API description gapi::onnx::detail::ParamDesc params; @@ -95,6 +120,7 @@ class ONNXCompiled { void Run(const std::vector& ins, const std::vector& outs); + std::vector in_names_without_const; public: explicit ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp); @@ -142,6 +168,7 @@ inline int toCV(ONNXTensorElementDataType prec) { switch (prec) { case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U; case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F; + case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: return CV_32S; default: GAPI_Assert(false && "Unsupported data type"); } return -1; @@ -308,6 +335,8 @@ inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info, return createTensor(memory_info, tensor_params, data); case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return createTensor(memory_info, tensor_params, data); + case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: + return createTensor(memory_info, tensor_params, data); default: GAPI_Assert(false && "Unsupported data type"); } @@ -523,7 +552,6 @@ namespace onnx { ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp) : params(pp) { - // Validate input parameters before allocating any resources if (params.num_in > 1u && params.num_in != params.input_names.size()) { cv::util::throw_error(std::logic_error("Please specify input layer names for " @@ -553,6 +581,7 @@ ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp) "Please provide a custom post-processing function " "(.cfgPostProc) in network parameters")); } + is_postproc = (params.custom_post_proc != nullptr); // Update parameters based on session information if (params.num_in == 1u && params.input_names.empty()) { @@ -563,8 +592,6 @@ ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp) } // Validate what is supported currently - GAPI_Assert(params.const_inputs.empty() - && "Const inputs are not currently supported"); GAPI_Assert(std::all_of(in_tensor_info.begin(), in_tensor_info.end(), [](const cv::gimpl::onnx::TensorInfo &p) { @@ -593,6 +620,17 @@ ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp) } } + if (!params.const_inputs.empty()) { + // Form input names order without const input names + in_names_without_const.clear(); + std::copy_if(params.input_names.begin(), params.input_names.end(), + std::back_inserter(in_names_without_const), + [&](const std::string& name) { + const auto it = params.const_inputs.find(name); + return it == params.const_inputs.end(); + }); + } + // Pre-allocate vectors (not buffers) for runtime info in_data.resize(params.num_in); out_data.resize(params.num_out); @@ -626,9 +664,9 @@ std::vector ONNXCompiled::getTensorInfo(TensorPosition pos) { } cv::GMatDesc ONNXCompiled::outMeta(int idx) const { - if (is_dynamic) { + if (is_dynamic || is_postproc) { GAPI_Assert(!params.out_metas.empty() - && "Metadata must be specified if NN has dynamic inputs!"); + && "Metadata must be specified if NN has dynamic inputs or post-processing function is used!"); return params.out_metas.at(idx); } const auto ort_idx = getIdxByName(out_tensor_info, params.output_names[idx]); @@ -678,9 +716,12 @@ void ONNXCompiled::Run(const std::vector& ins, const std::vector& outs) { std::vector in_tensors, out_tensors; - auto in_run_names = getCharNames(params.input_names); - - for (const auto it : ade::util::indexed(params.input_names)) { + // Layer names order for run + auto input_names = (in_names_without_const.empty() && params.const_inputs.empty()) + ? params.input_names + : in_names_without_const; + // Creates tensors for unique names that don't contain constant input + for (const auto it : ade::util::indexed(input_names)) { auto i = ade::util::index(it); auto in_name = ade::util::value(it); const auto idx = getIdxByName(in_tensor_info, in_name); @@ -689,7 +730,19 @@ void ONNXCompiled::Run(const std::vector& ins, ins[i])); } - if (!is_dynamic) { + for (auto &&c_in_pair : params.const_inputs) { + const auto idx = getIdxByName(in_tensor_info, c_in_pair.first); + in_tensors.emplace_back(createTensor(this_memory_info, + in_tensor_info[idx], + c_in_pair.second.first)); + // Puts const input names in sequence for Run + // ONNXRuntime can match input tensors to CNN inputs by names + input_names.emplace_back(c_in_pair.first); + } + GAPI_Assert(input_names.size() == this_session.GetInputCount()); + + auto in_run_names = getCharNames(input_names); + if (!is_dynamic && !is_postproc) { // Easy path - just run the session which is bound to G-API's // internal data for (auto i : ade::util::iota(params.output_names.size())) { @@ -701,7 +754,7 @@ void ONNXCompiled::Run(const std::vector& ins, this_session.Run(Ort::RunOptions{nullptr}, in_run_names.data(), &in_tensors.front(), - params.input_names.size(), + input_names.size(), out_run_names.data(), &out_tensors.front(), params.output_names.size()); @@ -716,7 +769,7 @@ void ONNXCompiled::Run(const std::vector& ins, auto outputs = this_session.Run(Ort::RunOptions{nullptr}, in_run_names.data(), &in_tensors.front(), - params.input_names.size(), + input_names.size(), out_names.data(), out_names.size()); std::unordered_map onnx_outputs; diff --git a/modules/gapi/test/infer/gapi_infer_onnx_test.cpp b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp index 350eeb8668..222331d0cb 100644 --- a/modules/gapi/test/infer/gapi_infer_onnx_test.cpp +++ b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp @@ -81,8 +81,24 @@ cv::Mat initMatrixRandU(const int type, const cv::Size& sz_in) { namespace opencv_test { namespace { +void initTestDataPath() +{ +#ifndef WINRT + static bool initialized = false; + if (!initialized) + { + // Since G-API has no own test data (yet), it is taken from the common space + const char* testDataPath = getenv("OPENCV_TEST_DATA_PATH"); + if (testDataPath) { + cvtest::addDataSearchPath(testDataPath); + } + initialized = true; + } +#endif // WINRT +} + // FIXME: taken from the DNN module -void normAssert(const cv::InputArray& ref, const cv::InputArray& test, +void normAssert(cv::InputArray& ref, cv::InputArray& test, const char *comment /*= ""*/, const double l1 = 0.00001, const double lInf = 0.0001) { const double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); @@ -109,6 +125,7 @@ inline int toCV(const ONNXTensorElementDataType prec) { switch (prec) { case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U; case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F; + case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: return CV_32S; default: GAPI_Assert(false && "Unsupported data type"); } return -1; @@ -126,46 +143,97 @@ inline std::vector getCharNames(const std::vector& nam return out_vec; } -inline void copyToOut(const cv::Mat& in, cv::Mat& out) { - GAPI_Assert(in.depth() == CV_32F); - GAPI_Assert(in.size == out.size); - const float* const inptr = in.ptr(); - float* const optr = out.ptr(); - const int size = in.total(); - for (int i = 0; i < size; ++i) { - optr[i] = inptr[i]; +template +void copyToOut(const cv::Mat& in, cv::Mat& out) { + const size_t size = std::min(out.total(), in.total()); + std::copy(in.begin(), in.begin() + size, out.begin()); + if (size < out.total()) { + T* const optr = out.ptr(); + optr[size] = static_cast(-1); // end data mark } } void remapYolo(const std::unordered_map &onnx, - std::unordered_map &gapi) { + std::unordered_map &gapi) { GAPI_Assert(onnx.size() == 1u); GAPI_Assert(gapi.size() == 1u); // Result from Run method const cv::Mat& in = onnx.begin()->second; + GAPI_Assert(in.depth() == CV_32F); // Configured output cv::Mat& out = gapi.begin()->second; // Simple copy - copyToOut(in, out); + copyToOut(in, out); } -void remapSsdPorts(const std::unordered_map &onnx, - std::unordered_map &gapi) { - // Result from Run method - const cv::Mat& in_num = onnx.at("num_detections:0"); - const cv::Mat& in_boxes = onnx.at("detection_boxes:0"); - const cv::Mat& in_scores = onnx.at("detection_scores:0"); - const cv::Mat& in_classes = onnx.at("detection_classes:0"); - // Configured outputs - cv::Mat& out_boxes = gapi.at("out1"); - cv::Mat& out_classes = gapi.at("out2"); - cv::Mat& out_scores = gapi.at("out3"); - cv::Mat& out_num = gapi.at("out4"); +void remapYoloV3(const std::unordered_map &onnx, + std::unordered_map &gapi) { // Simple copy for outputs - copyToOut(in_num, out_num); - copyToOut(in_boxes, out_boxes); - copyToOut(in_scores, out_scores); - copyToOut(in_classes, out_classes); + const cv::Mat& in_boxes = onnx.at("yolonms_layer_1/ExpandDims_1:0"); + const cv::Mat& in_scores = onnx.at("yolonms_layer_1/ExpandDims_3:0"); + const cv::Mat& in_indices = onnx.at("yolonms_layer_1/concat_2:0"); + GAPI_Assert(in_boxes.depth() == CV_32F); + GAPI_Assert(in_scores.depth() == CV_32F); + GAPI_Assert(in_indices.depth() == CV_32S); + + cv::Mat& out_boxes = gapi.at("out1"); + cv::Mat& out_scores = gapi.at("out2"); + cv::Mat& out_indices = gapi.at("out3"); + + copyToOut(in_boxes, out_boxes); + copyToOut(in_scores, out_scores); + copyToOut(in_indices, out_indices); +} + +void remapToIESSDOut(const std::vector &detections, + cv::Mat &ssd_output) { + for (const auto &det_el : detections) { + GAPI_Assert(det_el.depth() == CV_32F); + GAPI_Assert(!det_el.empty()); + } + + // SSD-MobilenetV1 structure check + ASSERT_EQ(detections[0].total(), 1u); + ASSERT_EQ(detections[2].total(), detections[0].total() * 100); + ASSERT_EQ(detections[2].total(), detections[3].total()); + ASSERT_EQ((detections[2].total() * 4), detections[1].total()); + + const int num_objects = static_cast(detections[0].ptr()[0]); + GAPI_Assert(num_objects <= (ssd_output.size[2] - 1)); + const float *in_boxes = detections[1].ptr(); + const float *in_scores = detections[2].ptr(); + const float *in_classes = detections[3].ptr(); + float *ptr = ssd_output.ptr(); + + for (int i = 0; i < num_objects; i++) { + ptr[0] = 0.f; // "image_id" + ptr[1] = in_classes[i]; // "label" + ptr[2] = in_scores[i]; // "confidence" + ptr[3] = in_boxes[4 * i + 1]; // left + ptr[4] = in_boxes[4 * i + 0]; // top + ptr[5] = in_boxes[4 * i + 3]; // right + ptr[6] = in_boxes[4 * i + 2]; // bottom + + ptr += 7; + in_boxes += 4; + } + + if (num_objects < ssd_output.size[2] - 1) { + // put a -1 mark at the end of output blob if there is space left + ptr[0] = -1.f; + } +} + +void remapSSDPorts(const std::unordered_map &onnx, + std::unordered_map &gapi) { + // Assemble ONNX-processed outputs back to a single 1x1x200x7 blob + // to preserve compatibility with OpenVINO-based SSD pipeline + const cv::Mat &num_detections = onnx.at("num_detections:0"); + const cv::Mat &detection_boxes = onnx.at("detection_boxes:0"); + const cv::Mat &detection_scores = onnx.at("detection_scores:0"); + const cv::Mat &detection_classes = onnx.at("detection_classes:0"); + cv::Mat &ssd_output = gapi.at("detection_output"); + remapToIESSDOut({num_detections, detection_boxes, detection_scores, detection_classes}, ssd_output); } class ONNXtest : public ::testing::Test { @@ -177,18 +245,17 @@ public: cv::Mat in_mat1; ONNXtest() { + initTestDataPath(); env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "test"); memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); out_gapi.resize(1); out_onnx.resize(1); - // FIXME: All tests chek "random" image - // Ideally it should be a real image - in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); + // FIXME: It should be an image from own (gapi) directory in opencv extra + in_mat1 = cv::imread(findDataFile("cv/dpm/cat.png")); } template - void infer(const std::vector& ins, - std::vector& outs) { + void infer(const std::vector& ins, std::vector& outs) { // Prepare session session = Ort::Session(env, model_path.data(), session_options); num_in = session.GetInputCount(); @@ -241,10 +308,15 @@ public: template void infer(const cv::Mat& in, cv::Mat& out) { std::vector result; - infer({in}, result); + infer(std::vector{in}, result); GAPI_Assert(result.size() == 1u); out = result.front(); } + // One input overload + template + void infer(const cv::Mat& in, std::vector& outs) { + infer(std::vector{in}, outs); + } void validate() { GAPI_Assert(!out_gapi.empty() && !out_onnx.empty()); @@ -275,6 +347,12 @@ public: const cv::Scalar mean = { 0.485, 0.456, 0.406 }; const cv::Scalar std = { 0.229, 0.224, 0.225 }; + // Rois for InferList, InferList2 + const std::vector rois = { + cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), + cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), + }; + void preprocess(const cv::Mat& src, cv::Mat& dst) { const int new_h = 224; const int new_w = 224; @@ -317,6 +395,55 @@ public: dst = dst.reshape(1, {1, 1, new_h, new_w}); } }; + +class ONNXWithRemap : public ONNXtest { +public: + // You can specify any size of the outputs, since we don't know infer result + // Tests validate a range with results and don't compare empty space + void validate() { + GAPI_Assert(!out_gapi.empty() && !out_onnx.empty()); + ASSERT_EQ(out_gapi.size(), out_onnx.size()); + const auto size = out_onnx.size(); + for (size_t i = 0; i < size; ++i) { + float* op = out_onnx.at(i).ptr(); + float* gp = out_gapi.at(i).ptr(); + const auto out_size = std::min(out_onnx.at(i).total(), out_gapi.at(i).total()); + GAPI_Assert(out_size != 0u); + for (size_t d_idx = 0; d_idx < out_size; ++d_idx) { + if (gp[d_idx] == -1) { + break; // end of detections + } + ASSERT_EQ(op[d_idx], gp[d_idx]); + } + } + } +}; + +class ONNXYoloV3MultiInput : public ONNXWithRemap { +public: + std::vector ins; + +private: + virtual void SetUp() { + const int yolo_in_h = 416; + const int yolo_in_w = 416; + cv::Mat yolov3_input, shape, prep_mat; + cv::resize(in_mat1, yolov3_input, cv::Size(yolo_in_w, yolo_in_h)); + shape.create(cv::Size(2, 1), CV_32F); + float* ptr = shape.ptr(); + ptr[0] = in_mat1.cols; + ptr[1] = in_mat1.rows; + preprocess(yolov3_input, prep_mat); + ins = {prep_mat, shape}; + } + + void preprocess(const cv::Mat& src, cv::Mat& dst) { + cv::Mat cvt; + src.convertTo(cvt, CV_32F, 1.f / 255.f); + toCHW(cvt, dst); + dst = dst.reshape(1, {1, 3, 416, 416}); + } +}; } // anonymous namespace TEST_F(ONNXClassificationTest, Infer) @@ -341,15 +468,12 @@ TEST_F(ONNXClassificationTest, Infer) validate(); } -TEST_F(ONNXtest, InferTensor) +TEST_F(ONNXClassificationTest, InferTensor) { useModel("classification/squeezenet/model/squeezenet1.0-9"); // Create tensor - // FIXME: Test cheks "random" image - // Ideally it should be a real image - const cv::Mat rand_mat = initMatrixRandU(CV_32FC3, cv::Size{224, 224}); - const std::vector dims = {1, rand_mat.channels(), rand_mat.rows, rand_mat.cols}; - const cv::Mat tensor(dims, CV_32F, rand_mat.data); + cv::Mat tensor; + preprocess(in_mat1, tensor); // ONNX_API code infer(tensor, out_onnx.front()); // G_API code @@ -368,7 +492,7 @@ TEST_F(ONNXtest, InferTensor) TEST_F(ONNXClassificationTest, InferROI) { useModel("classification/squeezenet/model/squeezenet1.0-9"); - const cv::Rect ROI(cv::Point{0, 0}, cv::Size{250, 250}); + const auto ROI = rois.at(1); // ONNX_API code cv::Mat roi_mat; preprocess(in_mat1(ROI), roi_mat); @@ -392,10 +516,6 @@ TEST_F(ONNXClassificationTest, InferROI) TEST_F(ONNXClassificationTest, InferROIList) { useModel("classification/squeezenet/model/squeezenet1.0-9"); - const std::vector rois = { - cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), - cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), - }; // ONNX_API code out_onnx.resize(rois.size()); for (size_t i = 0; i < rois.size(); ++i) { @@ -422,10 +542,6 @@ TEST_F(ONNXClassificationTest, InferROIList) TEST_F(ONNXClassificationTest, Infer2ROIList) { useModel("classification/squeezenet/model/squeezenet1.0-9"); - const std::vector rois = { - cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), - cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), - }; // ONNX_API code out_onnx.resize(rois.size()); for (size_t i = 0; i < rois.size(); ++i) { @@ -449,27 +565,26 @@ TEST_F(ONNXClassificationTest, Infer2ROIList) validate(); } -TEST_F(ONNXtest, InferDynamicInputTensor) +TEST_F(ONNXWithRemap, InferDynamicInputTensor) { useModel("object_detection_segmentation/tiny-yolov2/model/tinyyolov2-8"); // Create tensor - // FIXME: Test cheks "random" image - // Ideally it should be a real image - const cv::Mat rand_mat = initMatrixRandU(CV_32FC3, cv::Size{416, 416}); - const std::vector dims = {1, rand_mat.channels(), rand_mat.rows, rand_mat.cols}; - cv::Mat tensor(dims, CV_32F, rand_mat.data); - const cv::Mat in_tensor = tensor / 255.f; + cv::Mat cvt, rsz, tensor; + cv::resize(in_mat1, rsz, cv::Size{416, 416}); + rsz.convertTo(cvt, CV_32F, 1.f / 255.f); + toCHW(cvt, tensor); + tensor = tensor.reshape(1, {1, 3, 416, 416}); // ONNX_API code - infer(in_tensor, out_onnx.front()); + infer(tensor, out_onnx.front()); // G_API code G_API_NET(YoloNet, , "YoloNet"); cv::GMat in; cv::GMat out = cv::gapi::infer(in); cv::GComputation comp(cv::GIn(in), cv::GOut(out)); - auto net = cv::gapi::onnx::Params{model_path} + auto net = cv::gapi::onnx::Params{ model_path } .cfgPostProc({cv::GMatDesc{CV_32F, {1, 125, 13, 13}}}, remapYolo) .cfgOutputLayers({"out"}); - comp.apply(cv::gin(in_tensor), + comp.apply(cv::gin(tensor), cv::gout(out_gapi.front()), cv::compile_args(cv::gapi::networks(net))); // Validate @@ -497,28 +612,26 @@ TEST_F(ONNXGRayScaleTest, InferImage) validate(); } -TEST_F(ONNXtest, InferMultOutput) +TEST_F(ONNXWithRemap, InferMultiOutput) { useModel("object_detection_segmentation/ssd-mobilenetv1/model/ssd_mobilenet_v1_10"); // ONNX_API code const auto prep_mat = in_mat1.reshape(1, {1, in_mat1.rows, in_mat1.cols, in_mat1.channels()}); - infer({prep_mat}, out_onnx); + infer(prep_mat, out_onnx); + cv::Mat onnx_conv_out({1, 1, 200, 7}, CV_32F); + remapToIESSDOut({out_onnx[3], out_onnx[0], out_onnx[2], out_onnx[1]}, onnx_conv_out); + out_onnx.clear(); + out_onnx.push_back(onnx_conv_out); // G_API code - using SSDOut = std::tuple; - G_API_NET(MobileNet, , "ssd_mobilenet"); + G_API_NET(MobileNet, , "ssd_mobilenet"); cv::GMat in; - cv::GMat out1, out2, out3, out4; - std::tie(out1, out2, out3, out4) = cv::gapi::infer(in); - cv::GComputation comp(cv::GIn(in), cv::GOut(out1, out2, out3, out4)); - auto net = cv::gapi::onnx::Params{model_path} - .cfgOutputLayers({"out1", "out2", "out3", "out4"}) - .cfgPostProc({cv::GMatDesc{CV_32F, {1, 100, 4}}, - cv::GMatDesc{CV_32F, {1, 100}}, - cv::GMatDesc{CV_32F, {1, 100}}, - cv::GMatDesc{CV_32F, {1, 1}}}, remapSsdPorts); - out_gapi.resize(num_out); + cv::GMat out = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out)); + auto net = cv::gapi::onnx::Params{ model_path } + .cfgOutputLayers({"detection_output"}) + .cfgPostProc({cv::GMatDesc{CV_32F, {1, 1, 200, 7}}}, remapSSDPorts); comp.apply(cv::gin(in_mat1), - cv::gout(out_gapi[0], out_gapi[1], out_gapi[2], out_gapi[3]), + cv::gout(out_gapi.front()), cv::compile_args(cv::gapi::networks(net))); // Validate validate(); @@ -733,6 +846,71 @@ TEST_F(ONNXMediaFrameTest, InferList2YUV) // Validate validate(); } + +TEST_F(ONNXYoloV3MultiInput, InferConstInput) +{ + useModel("object_detection_segmentation/yolov3/model/yolov3-10"); + // ONNX_API code + infer(ins, out_onnx); + // G_API code + using OUT = std::tuple; + G_API_NET(YoloNet, , "yolov3"); + auto net = cv::gapi::onnx::Params{model_path} + .constInput("image_shape", ins[1]) + .cfgInputLayers({"input_1"}) + .cfgOutputLayers({"out1", "out2", "out3"}) + .cfgPostProc({cv::GMatDesc{CV_32F, {1, 10000, 4}}, + cv::GMatDesc{CV_32F, {1, 80, 10000}}, + cv::GMatDesc{CV_32S, {5, 3}}}, remapYoloV3); + cv::GMat in, out1, out2, out3; + std::tie(out1, out2, out3) = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out1, out2, out3)); + out_gapi.resize(num_out); + comp.apply(cv::gin(ins[0]), + cv::gout(out_gapi[0], out_gapi[1], out_gapi[2]), + cv::compile_args(cv::gapi::networks(net))); + // Validate + validate(); +} + +TEST_F(ONNXYoloV3MultiInput, InferBSConstInput) +{ + // This test checks the case when a const input is used + // and all input layer names are specified. + // Const input has the advantage. It is expected behavior. + useModel("object_detection_segmentation/yolov3/model/yolov3-10"); + // Tensor with incorrect image size + // is used for check case when InputLayers and constInput have same names + cv::Mat bad_shape; + bad_shape.create(cv::Size(2, 1), CV_32F); + float* ptr = bad_shape.ptr(); + ptr[0] = 590; + ptr[1] = 12; + // ONNX_API code + infer(ins, out_onnx); + // G_API code + using OUT = std::tuple; + G_API_NET(YoloNet, , "yolov3"); + auto net = cv::gapi::onnx::Params{model_path} + // Data from const input will be used to infer + .constInput("image_shape", ins[1]) + // image_shape - const_input has same name + .cfgInputLayers({"input_1", "image_shape"}) + .cfgOutputLayers({"out1", "out2", "out3"}) + .cfgPostProc({cv::GMatDesc{CV_32F, {1, 10000, 4}}, + cv::GMatDesc{CV_32F, {1, 80, 10000}}, + cv::GMatDesc{CV_32S, {5, 3}}}, remapYoloV3); + cv::GMat in1, in2, out1, out2, out3; + std::tie(out1, out2, out3) = cv::gapi::infer(in1, in2); + cv::GComputation comp(cv::GIn(in1, in2), cv::GOut(out1, out2, out3)); + out_gapi.resize(num_out); + comp.apply(cv::gin(ins[0], bad_shape), + cv::gout(out_gapi[0], out_gapi[1], out_gapi[2]), + cv::compile_args(cv::gapi::networks(net))); + // Validate + validate(); +} + } // namespace opencv_test #endif // HAVE_ONNX