diff --git a/modules/gapi/src/backends/onnx/gonnxbackend.cpp b/modules/gapi/src/backends/onnx/gonnxbackend.cpp index e465a1ccc7..0d9a16a7bd 100644 --- a/modules/gapi/src/backends/onnx/gonnxbackend.cpp +++ b/modules/gapi/src/backends/onnx/gonnxbackend.cpp @@ -123,7 +123,7 @@ class ONNXCompiled { std::vector out_data; void Run(const std::vector& ins, - const std::vector& outs); + std::vector& outs); std::vector in_names_without_const; public: @@ -322,22 +322,20 @@ inline std::vector toORT(const cv::MatSize &sz) { inline void preprocess(const cv::Mat& src, const cv::gimpl::onnx::TensorInfo& ti, cv::Mat& dst) { - GAPI_Assert(src.depth() == CV_32F || src.depth() == CV_8U); // CNN input type const auto type = toCV(ti.type); - if (src.depth() == CV_32F) { + if (src.depth() != CV_8U) { // Just pass the tensor as-is. // No layout or dimension transformations done here! // TODO: This needs to be aligned across all NN backends. - GAPI_Assert(type == CV_32F && "Only 32F model input is supported for 32F input data"); const auto tensor_dims = toORT(src.size); if (tensor_dims.size() == ti.dims.size()) { for (size_t i = 0; i < ti.dims.size(); ++i) { GAPI_Assert((ti.dims[i] == -1 || ti.dims[i] == tensor_dims[i]) && - "32F tensor dimensions should match with all non-dynamic NN input dimensions"); + "Non-U8 tensor dimensions should match with all non-dynamic NN input dimensions"); } } else { - GAPI_Error("32F tensor size should match with NN input"); + GAPI_Error("Non-U8 tensor size should match with NN input"); } dst = src; @@ -471,6 +469,25 @@ inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info, return createTensor(memory_info, tensor_params, data); case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: return createTensor(memory_info, tensor_params, data); + case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:{ + // cv::Mat does not support int64 data directly. + // Following steps are applied to create an ONNX tensor from cv::Mat data: + // - First create a new ONNX tensor 'i64_tensor' with data type int64_t using the default allocator + // - Next retrieve a pointer to the mutable data buffer of 'i64_tensor' + // - Convert the data from int32 (see toCV function) to int64 and deep copy it into 'i64_tensor' + auto ort_dims = toORT(data.size); + + Ort::AllocatorWithDefaultOptions allocator; + Ort::Value i64_tensor = Ort::Value::CreateTensor(allocator, + ort_dims.data(), + ort_dims.size()); + int64_t* tensor_data = i64_tensor.GetTensorMutableData(); + + cv::gimpl::convertInt32ToInt64(data.ptr(), + tensor_data, + data.total()); + return i64_tensor; + } default: GAPI_Error("ONNX. Unsupported data type"); } @@ -747,9 +764,11 @@ ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp) in_tensor_info.end(), [](const cv::gimpl::onnx::TensorInfo &p) { return p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT - || p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8; + || p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8 + || p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32 + || p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64; }) - && "Only FP32 and U8 inputs for NN are supported"); + && "Only FP32, INT32, INT64 and U8 inputs for NN are supported"); // Put mean and std in appropriate tensor params if (!params.mean.empty() || !params.stdev.empty()) { @@ -864,7 +883,7 @@ cv::Mat ONNXCompiled::allocOutput(int i) const { } void ONNXCompiled::Run(const std::vector& ins, - const std::vector& outs) { + std::vector& outs) { std::vector in_tensors, out_tensors; // Layer names order for run @@ -909,6 +928,17 @@ void ONNXCompiled::Run(const std::vector& ins, out_run_names.data(), &out_tensors.front(), params.output_names.size()); + if (out_tensor_info[0].type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64) { + // cv::Mat does not support int64 output data. + // Conversion from int 64 to int 32 is carried in the copyFromONNX function + // The output is written to out_mat + for (auto &&iter : ade::util::zip(ade::util::toRange(out_tensors), + ade::util::toRange(outs))) { + auto &out_tensor = std::get<0>(iter); + auto &out_mat = std::get<1>(iter); + copyFromONNX(out_tensor, out_mat); + } + } } else { // Hard path - run session & user-defined post-processing // NOTE: use another list of output names here