Merge pull request #25817 from lamiayous:ly/extend_onnxrt_gapi_backend_handle_i32_i64_type

Handling I32/I64 data types in G-API ONNX back-end #25817

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [ ] I agree to contribute to the project under Apache 2 License.
- [ ] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [ ] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [ ] The feature is well documented and sample code can be built with the project CMake
pull/25930/head
lamiayous 4 months ago committed by GitHub
parent c6ace77e21
commit 78195bc3df
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 48
      modules/gapi/src/backends/onnx/gonnxbackend.cpp

@ -123,7 +123,7 @@ class ONNXCompiled {
std::vector<cv::Mat> out_data; std::vector<cv::Mat> out_data;
void Run(const std::vector<cv::Mat>& ins, void Run(const std::vector<cv::Mat>& ins,
const std::vector<cv::Mat>& outs); std::vector<cv::Mat>& outs);
std::vector<std::string> in_names_without_const; std::vector<std::string> in_names_without_const;
public: public:
@ -322,22 +322,20 @@ inline std::vector<int64_t> toORT(const cv::MatSize &sz) {
inline void preprocess(const cv::Mat& src, inline void preprocess(const cv::Mat& src,
const cv::gimpl::onnx::TensorInfo& ti, const cv::gimpl::onnx::TensorInfo& ti,
cv::Mat& dst) { cv::Mat& dst) {
GAPI_Assert(src.depth() == CV_32F || src.depth() == CV_8U);
// CNN input type // CNN input type
const auto type = toCV(ti.type); const auto type = toCV(ti.type);
if (src.depth() == CV_32F) { if (src.depth() != CV_8U) {
// Just pass the tensor as-is. // Just pass the tensor as-is.
// No layout or dimension transformations done here! // No layout or dimension transformations done here!
// TODO: This needs to be aligned across all NN backends. // TODO: This needs to be aligned across all NN backends.
GAPI_Assert(type == CV_32F && "Only 32F model input is supported for 32F input data");
const auto tensor_dims = toORT(src.size); const auto tensor_dims = toORT(src.size);
if (tensor_dims.size() == ti.dims.size()) { if (tensor_dims.size() == ti.dims.size()) {
for (size_t i = 0; i < ti.dims.size(); ++i) { for (size_t i = 0; i < ti.dims.size(); ++i) {
GAPI_Assert((ti.dims[i] == -1 || ti.dims[i] == tensor_dims[i]) && GAPI_Assert((ti.dims[i] == -1 || ti.dims[i] == tensor_dims[i]) &&
"32F tensor dimensions should match with all non-dynamic NN input dimensions"); "Non-U8 tensor dimensions should match with all non-dynamic NN input dimensions");
} }
} else { } else {
GAPI_Error("32F tensor size should match with NN input"); GAPI_Error("Non-U8 tensor size should match with NN input");
} }
dst = src; dst = src;
@ -471,6 +469,25 @@ inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info,
return createTensor<float>(memory_info, tensor_params, data); return createTensor<float>(memory_info, tensor_params, data);
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
return createTensor<int32_t>(memory_info, tensor_params, data); return createTensor<int32_t>(memory_info, tensor_params, data);
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:{
// cv::Mat does not support int64 data directly.
// Following steps are applied to create an ONNX tensor from cv::Mat data:
// - First create a new ONNX tensor 'i64_tensor' with data type int64_t using the default allocator
// - Next retrieve a pointer to the mutable data buffer of 'i64_tensor'
// - Convert the data from int32 (see toCV function) to int64 and deep copy it into 'i64_tensor'
auto ort_dims = toORT(data.size);
Ort::AllocatorWithDefaultOptions allocator;
Ort::Value i64_tensor = Ort::Value::CreateTensor<int64_t>(allocator,
ort_dims.data(),
ort_dims.size());
int64_t* tensor_data = i64_tensor.GetTensorMutableData<int64_t>();
cv::gimpl::convertInt32ToInt64(data.ptr<int>(),
tensor_data,
data.total());
return i64_tensor;
}
default: default:
GAPI_Error("ONNX. Unsupported data type"); GAPI_Error("ONNX. Unsupported data type");
} }
@ -747,9 +764,11 @@ ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp)
in_tensor_info.end(), in_tensor_info.end(),
[](const cv::gimpl::onnx::TensorInfo &p) { [](const cv::gimpl::onnx::TensorInfo &p) {
return p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT return p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8; || p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64;
}) })
&& "Only FP32 and U8 inputs for NN are supported"); && "Only FP32, INT32, INT64 and U8 inputs for NN are supported");
// Put mean and std in appropriate tensor params // Put mean and std in appropriate tensor params
if (!params.mean.empty() || !params.stdev.empty()) { if (!params.mean.empty() || !params.stdev.empty()) {
@ -864,7 +883,7 @@ cv::Mat ONNXCompiled::allocOutput(int i) const {
} }
void ONNXCompiled::Run(const std::vector<cv::Mat>& ins, void ONNXCompiled::Run(const std::vector<cv::Mat>& ins,
const std::vector<cv::Mat>& outs) { std::vector<cv::Mat>& outs) {
std::vector<Ort::Value> in_tensors, out_tensors; std::vector<Ort::Value> in_tensors, out_tensors;
// Layer names order for run // Layer names order for run
@ -909,6 +928,17 @@ void ONNXCompiled::Run(const std::vector<cv::Mat>& ins,
out_run_names.data(), out_run_names.data(),
&out_tensors.front(), &out_tensors.front(),
params.output_names.size()); params.output_names.size());
if (out_tensor_info[0].type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64) {
// cv::Mat does not support int64 output data.
// Conversion from int 64 to int 32 is carried in the copyFromONNX function
// The output is written to out_mat
for (auto &&iter : ade::util::zip(ade::util::toRange(out_tensors),
ade::util::toRange(outs))) {
auto &out_tensor = std::get<0>(iter);
auto &out_mat = std::get<1>(iter);
copyFromONNX(out_tensor, out_mat);
}
}
} else { } else {
// Hard path - run session & user-defined post-processing // Hard path - run session & user-defined post-processing
// NOTE: use another list of output names here // NOTE: use another list of output names here

Loading…
Cancel
Save