Merge pull request #21566 from TolyaTalamanov:at/gapi-modeling-tool_fix_handling_fp16_precision

[G-API] Pipeline modeling tool - Fix generating FP16 Mat

* Fix handling fp16 precision

* Disable preproc in case FP16 input inside IE backend

* Fix isApplicableForResize function
pull/21579/head
Anatoliy Talamanov 3 years ago committed by GitHub
parent 57d3002ee1
commit 08356007c9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 7
      modules/gapi/samples/pipeline_modeling_tool/dummy_source.hpp
  2. 2
      modules/gapi/samples/pipeline_modeling_tool/pipeline_builder.hpp
  3. 17
      modules/gapi/samples/pipeline_modeling_tool/utils.hpp
  4. 29
      modules/gapi/src/backends/ie/giebackend.cpp

@ -28,11 +28,8 @@ private:
DummySource::DummySource(const double latency,
const OutputDescr& output)
: m_latency(latency), m_mat(output.dims, output.precision) {
if (output.dims.size() == 1) {
//FIXME: Well-known 1D mat WA
m_mat.dims = 1;
}
: m_latency(latency) {
utils::createNDMat(m_mat, output.dims, output.precision);
utils::generateRandom(m_mat);
}

@ -73,7 +73,7 @@ struct DummyCall {
std::shared_ptr<DummyState>& state,
const cv::GCompileArgs& /*args*/) {
state.reset(new DummyState{});
state->mat.create(output.dims, output.precision);
utils::createNDMat(state->mat, output.dims, output.precision);
utils::generateRandom(state->mat);
}

@ -14,6 +14,16 @@ struct OutputDescr {
};
namespace utils {
inline void createNDMat(cv::Mat& mat, const std::vector<int>& dims, int depth) {
GAPI_Assert(!dims.empty());
mat.create(dims, depth);
if (dims.size() == 1) {
//FIXME: Well-known 1D mat WA
mat.dims = 1;
}
}
inline void generateRandom(cv::Mat& out) {
switch (out.depth()) {
case CV_8U:
@ -23,7 +33,12 @@ inline void generateRandom(cv::Mat& out) {
cv::randu(out, 0.f, 1.f);
break;
case CV_16F: {
cv::Mat fp32_mat(out.size(), CV_MAKETYPE(CV_32F, out.channels()));
std::vector<int> dims;
for (int i = 0; i < out.size.dims(); ++i) {
dims.push_back(out.size[i]);
}
cv::Mat fp32_mat;
createNDMat(fp32_mat, dims, CV_32F);
cv::randu(fp32_mat, 0.f, 1.f);
fp32_mat.convertTo(out, out.type());
break;

@ -855,6 +855,13 @@ static void configureInputInfo(const IE::InputInfo::Ptr& ii, const cv::GMetaArg
}
}
static bool isApplicableForResize(const IE::TensorDesc& desc) {
const auto layout = desc.getLayout();
const auto prec = desc.getPrecision();
return (layout == IE::Layout::NCHW || layout == IE::Layout::NHWC) &&
(prec == IE::Precision::FP32 || prec == IE::Precision::U8);
}
static IE::PreProcessInfo configurePreProcInfo(const IE::InputInfo::CPtr& ii,
const cv::GMetaArg& mm) {
IE::PreProcessInfo info;
@ -864,9 +871,7 @@ static IE::PreProcessInfo configurePreProcInfo(const IE::InputInfo::CPtr& ii,
info.setColorFormat(IE::ColorFormat::NV12);
}
}
const auto layout = ii->getTensorDesc().getLayout();
if (layout == IE::Layout::NCHW ||
layout == IE::Layout::NHWC) {
if (isApplicableForResize(ii->getTensorDesc())) {
info.setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
return info;
@ -986,11 +991,7 @@ struct Infer: public cv::detail::KernelTag {
configureInputReshapeByImage(ii, mm, input_reshape_table);
}
// NB: Configure resize only for NCHW/NHWC layout,
// since it isn't supposed to work with others.
auto layout = ii->getTensorDesc().getLayout();
if (layout == IE::Layout::NCHW ||
layout == IE::Layout::NHWC) {
if (isApplicableForResize(ii->getTensorDesc())) {
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
}
@ -1095,7 +1096,9 @@ struct InferROI: public cv::detail::KernelTag {
uu.params.layer_names_to_reshape.end()) {
configureInputReshapeByImage(ii, mm, input_reshape_table);
}
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
if (isApplicableForResize(ii->getTensorDesc())) {
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
// FIXME: This isn't the best place to call reshape function.
// Сorrect solution would be to do this in compile() method of network,
@ -1193,7 +1196,9 @@ struct InferList: public cv::detail::KernelTag {
uu.params.layer_names_to_reshape.end()) {
configureInputReshapeByImage(ii, mm, input_reshape_table);
}
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
if (isApplicableForResize(ii->getTensorDesc())) {
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
}
// FIXME: This isn't the best place to call reshape function.
@ -1346,7 +1351,9 @@ struct InferList2: public cv::detail::KernelTag {
uu.params.layer_names_to_reshape.end()) {
configureInputReshapeByImage(ii, mm_0, input_reshape_table);
}
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
if (isApplicableForResize(ii->getTensorDesc())) {
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
// FIXME: This isn't the best place to call reshape function.
// Сorrect solution would be to do this in compile() method of network,

Loading…
Cancel
Save