diff --git a/modules/gapi/src/backends/ov/govbackend.cpp b/modules/gapi/src/backends/ov/govbackend.cpp index 7e99af4f77..abbe5f9f5b 100644 --- a/modules/gapi/src/backends/ov/govbackend.cpp +++ b/modules/gapi/src/backends/ov/govbackend.cpp @@ -794,13 +794,24 @@ public: } } - void cfgScaleMean(const std::string &input_name) { + void cfgScaleMean(const std::string &input_name, + const GMetaArg &input_meta) { auto &input_info = m_ppp.input(input_name); + const auto mean_vec = lookUp(m_mean_values, input_name); + const auto scale_vec = lookUp(m_scale_values, input_name); + + if (mean_vec || scale_vec) { + GAPI_Assert(cv::util::holds_alternative(input_meta)); + const auto depth = cv::util::get(input_meta).depth; + const bool depth_is_real = (depth == CV_32F) || (depth == CV_16F); + if (!depth_is_real) { + input_info.preprocess().convert_element_type(toOV(CV_32F)); + } + } if (mean_vec) { input_info.preprocess().mean(*mean_vec); } - const auto scale_vec = lookUp(m_scale_values, input_name); if (scale_vec) { input_info.preprocess().scale(*scale_vec); } @@ -974,7 +985,7 @@ struct Infer: public cv::detail::KernelTag { ppp.cfgLayouts(input_name); ppp.cfgPreProcessing(input_name, mm); - ppp.cfgScaleMean(input_name); + ppp.cfgScaleMean(input_name, mm); } ppp.cfgPostProcessing(); ppp.finalize(); @@ -1062,7 +1073,7 @@ struct InferROI: public cv::detail::KernelTag { ppp.cfgLayouts(input_name); ppp.cfgPreProcessing(input_name, mm, true /*disable_img_resize*/); - ppp.cfgScaleMean(input_name); + ppp.cfgScaleMean(input_name, mm); ppp.cfgPostProcessing(); ppp.finalize(); } @@ -1148,7 +1159,7 @@ struct InferList: public cv::detail::KernelTag { ppp.cfgLayouts(input_name); ppp.cfgPreProcessing(input_name, mm, true /*disable_img_resize*/); - ppp.cfgScaleMean(input_name); + ppp.cfgScaleMean(input_name, mm); } ppp.cfgPostProcessing(); ppp.finalize(); @@ -1267,7 +1278,7 @@ struct InferList2: public cv::detail::KernelTag { GAPI_Assert(op.k.inKinds[idx] == cv::detail::OpaqueKind::CV_MAT); } - ppp.cfgScaleMean(input_name); + ppp.cfgScaleMean(input_name, mm_0); idx++; // NB: Never forget to increment the counter } ppp.cfgPostProcessing(); diff --git a/modules/gapi/test/infer/gapi_infer_ov_tests.cpp b/modules/gapi/test/infer/gapi_infer_ov_tests.cpp index abce82b329..8a15d5e741 100644 --- a/modules/gapi/test/infer/gapi_infer_ov_tests.cpp +++ b/modules/gapi/test/infer/gapi_infer_ov_tests.cpp @@ -657,6 +657,187 @@ TEST_F(TestAgeGenderListOV, InferList2Generic_Image) { validate(); } +static ov::element::Type toOV(int depth) { + switch (depth) { + case CV_8U: return ov::element::u8; + case CV_32S: return ov::element::i32; + case CV_32F: return ov::element::f32; + case CV_16F: return ov::element::f16; + default: GAPI_Error("OV Backend: Unsupported data type"); + } + return ov::element::undefined; +} + +struct TestMeanScaleOV : public ::testing::TestWithParam{ + G_API_NET(IdentityNet, , "test-identity-net"); + + static cv::GComputation create() { + cv::GMat in; + cv::GMat out; + out = cv::gapi::infer(in); + + return cv::GComputation{cv::GIn(in), cv::GOut(out)}; + } + + using Params = cv::gapi::ov::Params; + static Params params(const std::string &xml_path, + const std::string &bin_path, + const std::string &device) { + return Params { + xml_path, bin_path, device + }.cfgInputModelLayout("NHWC") + .cfgOutputLayers({ "output" }); + } + + TestMeanScaleOV() { + initDLDTDataPath(); + + m_model_path = findDataFile("gapi/ov/identity_net_100x100.xml"); + m_weights_path = findDataFile("gapi/ov/identity_net_100x100.bin"); + m_device_id = "CPU"; + + m_ov_model = cv::gapi::ov::wrap::getCore() + .read_model(m_model_path, m_weights_path); + + auto input_depth = GetParam(); + auto input = cv::imread(findDataFile("gapi/gapi_logo.jpg")); + input.convertTo(m_in_mat, input_depth); + } + + void addPreprocToOV( + std::function f) { + + auto input_depth = GetParam(); + + ov::preprocess::PrePostProcessor ppp(m_ov_model); + ppp.input().tensor().set_layout(ov::Layout("NHWC")) + .set_element_type(toOV(input_depth)) + .set_shape({ 1, 100, 100, 3 }); + ppp.input().model().set_layout(ov::Layout("NHWC")); + f(ppp); + m_ov_model = ppp.build(); + } + + void runOV() { + auto compiled_model = cv::gapi::ov::wrap::getCore() + .compile_model(m_ov_model, m_device_id); + auto infer_request = compiled_model.create_infer_request(); + + auto input_tensor = infer_request.get_input_tensor(); + cv::gapi::ov::util::to_ov(m_in_mat, input_tensor); + + infer_request.infer(); + + auto out_tensor = infer_request.get_tensor("output"); + m_out_mat_ov.create(cv::gapi::ov::util::to_ocv(out_tensor.get_shape()), + cv::gapi::ov::util::to_ocv(out_tensor.get_element_type())); + cv::gapi::ov::util::to_ocv(out_tensor, m_out_mat_ov); + } + + std::string m_model_path; + std::string m_weights_path; + std::string m_device_id; + + std::shared_ptr m_ov_model; + + cv::Mat m_in_mat; + cv::Mat m_out_mat_gapi; + cv::Mat m_out_mat_ov; +}; + +TEST_P(TestMeanScaleOV, Mean) +{ + int input_depth = GetParam(); + + std::vector mean_values{ 220.1779, 218.9857, 217.8986 }; + + // Run OV reference pipeline: + { + addPreprocToOV([&](ov::preprocess::PrePostProcessor& ppp) { + if (input_depth == CV_8U || input_depth == CV_32S) { + ppp.input().preprocess().convert_element_type(ov::element::f32); + } + ppp.input().preprocess().mean(mean_values); + }); + runOV(); + } + + // Run G-API + GComputation comp = create(); + auto pp = params(m_model_path, m_weights_path, m_device_id); + pp.cfgMean(mean_values); + + comp.apply(cv::gin(m_in_mat), cv::gout(m_out_mat_gapi), + cv::compile_args(cv::gapi::networks(pp))); + + // Validate OV results against G-API ones: + normAssert(m_out_mat_ov, m_out_mat_gapi, "Test output"); +} + +TEST_P(TestMeanScaleOV, Scale) +{ + int input_depth = GetParam(); + + std::vector scale_values{ 2., 2., 2. }; + + // Run OV reference pipeline: + { + addPreprocToOV([&](ov::preprocess::PrePostProcessor& ppp) { + if (input_depth == CV_8U || input_depth == CV_32S) { + ppp.input().preprocess().convert_element_type(ov::element::f32); + } + ppp.input().preprocess().scale(scale_values); + }); + runOV(); + } + + // Run G-API + GComputation comp = create(); + auto pp = params(m_model_path, m_weights_path, m_device_id); + pp.cfgScale(scale_values); + + comp.apply(cv::gin(m_in_mat), cv::gout(m_out_mat_gapi), + cv::compile_args(cv::gapi::networks(pp))); + + // Validate OV results against G-API ones: + normAssert(m_out_mat_ov, m_out_mat_gapi, "Test output"); +} + +TEST_P(TestMeanScaleOV, MeanAndScale) +{ + int input_depth = GetParam(); + + std::vector mean_values{ 220.1779, 218.9857, 217.8986 }; + std::vector scale_values{ 2., 2., 2. }; + + // Run OV reference pipeline: + { + addPreprocToOV([&](ov::preprocess::PrePostProcessor& ppp) { + if (input_depth == CV_8U || input_depth == CV_32S) { + ppp.input().preprocess().convert_element_type(ov::element::f32); + } + ppp.input().preprocess().mean(mean_values); + ppp.input().preprocess().scale(scale_values); + }); + runOV(); + } + + // Run G-API + GComputation comp = create(); + auto pp = params(m_model_path, m_weights_path, m_device_id); + pp.cfgMean(mean_values); + pp.cfgScale(scale_values); + + comp.apply(cv::gin(m_in_mat), cv::gout(m_out_mat_gapi), + cv::compile_args(cv::gapi::networks(pp))); + + // Validate OV results against G-API ones: + normAssert(m_out_mat_ov, m_out_mat_gapi, "Test output"); +} + +INSTANTIATE_TEST_CASE_P(Instantiation, TestMeanScaleOV, + Values(CV_8U, CV_32S, CV_16F, CV_32F)); + } // namespace opencv_test #endif // HAVE_INF_ENGINE && INF_ENGINE_RELEASE >= 2022010000