Merge pull request #20112 from mpashchenkov:mp/ocv-gapi-docs-part1

G-API: Documentation for Params (IE and ONNX).

* Applying comments

* Removed type of model from PramsDesc

* Added message for onnx ParamDesc

* Whitespaces

* Review

* Fix comments to review

* Fix comments

Co-authored-by: Anatoliy Talamanov <anatoliy.talamanov@intel.com>
pull/20257/head
Maxim Pashchenkov 4 years ago committed by GitHub
parent bc1af6227a
commit 8e386ac71f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 179
      modules/gapi/include/opencv2/gapi/infer/ie.hpp
  2. 185
      modules/gapi/include/opencv2/gapi/infer/onnx.hpp

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2019 Intel Corporation
// Copyright (C) 2019-2021 Intel Corporation
#ifndef OPENCV_GAPI_INFER_IE_HPP
#define OPENCV_GAPI_INFER_IE_HPP
@ -29,7 +29,7 @@ namespace ie {
GAPI_EXPORTS cv::gapi::GBackend backend();
/**
* Specify how G-API and IE should trait input data
* Specifies how G-API and IE should trait input data
*
* In OpenCV, the same cv::Mat is used to represent both
* image and tensor data. Sometimes those are hardly distinguishable,
@ -47,34 +47,30 @@ enum class TraitAs: int
using IEConfig = std::map<std::string, std::string>;
namespace detail {
struct ParamDesc {
std::string model_path;
std::string weights_path;
std::string device_id;
struct ParamDesc {
std::string model_path;
std::string weights_path;
std::string device_id;
// NB: Here order follows the `Net` API
std::vector<std::string> input_names;
std::vector<std::string> output_names;
std::vector<std::string> input_names;
std::vector<std::string> output_names;
using ConstInput = std::pair<cv::Mat, TraitAs>;
std::unordered_map<std::string, ConstInput> const_inputs;
using ConstInput = std::pair<cv::Mat, TraitAs>;
std::unordered_map<std::string, ConstInput> const_inputs;
// NB: nun_* may differ from topology's real input/output port numbers
// (e.g. topology's partial execution)
std::size_t num_in; // How many inputs are defined in the operation
std::size_t num_out; // How many outputs are defined in the operation
std::size_t num_in;
std::size_t num_out;
enum class Kind { Load, Import };
Kind kind;
bool is_generic;
IEConfig config;
enum class Kind {Load, Import};
Kind kind;
bool is_generic;
IEConfig config;
std::map<std::string, std::vector<std::size_t>> reshape_table;
std::unordered_set<std::string> layer_names_to_reshape;
std::map<std::string, std::vector<std::size_t>> reshape_table;
std::unordered_set<std::string> layer_names_to_reshape;
// NB: Number of asyncrhonious infer requests
size_t nireq;
};
size_t nireq;
};
} // namespace detail
// FIXME: this is probably a shared (reusable) thing
@ -88,8 +84,21 @@ struct PortCfg {
, std::tuple_size<typename Net::OutArgs>::value >;
};
/**
* @brief This structure provides functions
* that fill inference parameters for "OpenVINO Toolkit" model.
*/
template<typename Net> class Params {
public:
/** @brief Class constructor.
Constructs Params based on model information and specifies default values for other
inference description parameters. Model is loaded and compiled using "OpenVINO Toolkit".
@param model Path to topology IR (.xml file).
@param weights Path to weights (.bin file).
@param device target device to use.
*/
Params(const std::string &model,
const std::string &weights,
const std::string &device)
@ -104,6 +113,13 @@ public:
, 1u} {
};
/** @overload
Use this constructor to work with pre-compiled network.
Model is imported from a pre-compiled blob.
@param model Path to model.
@param device target device to use.
*/
Params(const std::string &model,
const std::string &device)
: desc{ model, {}, device, {}, {}, {}
@ -117,22 +133,53 @@ public:
, 1u} {
};
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) {
/** @brief Specifies sequence of network input layers names for inference.
The function is used to associate cv::gapi::infer<> inputs with the model inputs.
Number of names has to match the number of network inputs as defined in G_API_NET().
In case a network has only single input layer, there is no need to specify name manually.
@param layer_names std::array<std::string, N> where N is the number of inputs
as defined in the @ref G_API_NET. Contains names of input layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &layer_names) {
desc.input_names.clear();
desc.input_names.reserve(ll.size());
std::copy(ll.begin(), ll.end(),
desc.input_names.reserve(layer_names.size());
std::copy(layer_names.begin(), layer_names.end(),
std::back_inserter(desc.input_names));
return *this;
}
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &ll) {
/** @brief Specifies sequence of network output layers names for inference.
The function is used to associate cv::gapi::infer<> outputs with the model outputs.
Number of names has to match the number of network outputs as defined in G_API_NET().
In case a network has only single output layer, there is no need to specify name manually.
@param layer_names std::array<std::string, N> where N is the number of outputs
as defined in the @ref G_API_NET. Contains names of output layers.
@return reference to this parameter structure.
*/
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &layer_names) {
desc.output_names.clear();
desc.output_names.reserve(ll.size());
std::copy(ll.begin(), ll.end(),
desc.output_names.reserve(layer_names.size());
std::copy(layer_names.begin(), layer_names.end(),
std::back_inserter(desc.output_names));
return *this;
}
/** @brief Specifies a constant input.
The function is used to set a constant input. This input has to be
a preprocessed tensor if its type is TENSOR. Need to provide name of the
network layer which will receive provided data.
@param layer_name Name of network layer.
@param data cv::Mat that contains data which will be associated with network layer.
@param hint Input type @sa cv::gapi::ie::TraitAs.
@return reference to this parameter structure.
*/
Params<Net>& constInput(const std::string &layer_name,
const cv::Mat &data,
TraitAs hint = TraitAs::TENSOR) {
@ -140,52 +187,100 @@ public:
return *this;
}
Params& pluginConfig(IEConfig&& cfg) {
desc.config = std::move(cfg);
/** @brief Specifies OpenVINO plugin configuration.
The function is used to set configuration for OpenVINO plugin. Some parameters
can be different for each plugin. Please follow https://docs.openvinotoolkit.org/latest/index.html
to check information about specific plugin.
@param cfg Map of pairs: (config parameter name, config parameter value).
@return reference to this parameter structure.
*/
Params& pluginConfig(const IEConfig& cfg) {
desc.config = cfg;
return *this;
}
Params& pluginConfig(const IEConfig& cfg) {
desc.config = cfg;
/** @overload
Function with a rvalue parameter.
@param cfg rvalue map of pairs: (config parameter name, config parameter value).
@return reference to this parameter structure.
*/
Params& pluginConfig(IEConfig&& cfg) {
desc.config = std::move(cfg);
return *this;
}
/** @brief Specifies number of asynchronous inference requests.
@param nireq Number of inference asynchronous requests.
@return reference to this parameter structure.
*/
Params& cfgNumRequests(size_t nireq) {
GAPI_Assert(nireq > 0 && "Number of infer requests must be greater than zero!");
desc.nireq = nireq;
return *this;
}
Params<Net>& cfgInputReshape(std::map<std::string, std::vector<std::size_t>>&& reshape_table) {
desc.reshape_table = std::move(reshape_table);
return *this;
}
/** @brief Specifies new input shapes for the network inputs.
The function is used to specify new input shapes for the network inputs.
Follow https://docs.openvinotoolkit.org/latest/classInferenceEngine_1_1networkNetwork.html
for additional information.
@param reshape_table Map of pairs: name of corresponding data and its dimension.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputReshape(const std::map<std::string, std::vector<std::size_t>>& reshape_table) {
desc.reshape_table = reshape_table;
return *this;
}
Params<Net>& cfgInputReshape(std::string&& layer_name, std::vector<size_t>&& layer_dims) {
desc.reshape_table.emplace(layer_name, layer_dims);
/** @overload */
Params<Net>& cfgInputReshape(std::map<std::string, std::vector<std::size_t>>&& reshape_table) {
desc.reshape_table = std::move(reshape_table);
return *this;
}
/** @overload
@param layer_name Name of layer.
@param layer_dims New dimensions for this layer.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputReshape(const std::string& layer_name, const std::vector<size_t>& layer_dims) {
desc.reshape_table.emplace(layer_name, layer_dims);
return *this;
}
Params<Net>& cfgInputReshape(std::unordered_set<std::string>&& layer_names) {
desc.layer_names_to_reshape = std::move(layer_names);
/** @overload */
Params<Net>& cfgInputReshape(std::string&& layer_name, std::vector<size_t>&& layer_dims) {
desc.reshape_table.emplace(layer_name, layer_dims);
return *this;
}
/** @overload
@param layer_names set of names of network layers that will be used for network reshape.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputReshape(const std::unordered_set<std::string>& layer_names) {
desc.layer_names_to_reshape = layer_names;
return *this;
}
/** @overload
@param layer_names rvalue set of the selected layers will be reshaped automatically
its input image size.
@return reference to this parameter structure.
*/
Params<Net>& cfgInputReshape(std::unordered_set<std::string>&& layer_names) {
desc.layer_names_to_reshape = std::move(layer_names);
return *this;
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::ie::backend(); }
std::string tag() const { return Net::tag(); }

@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2020-2021 Intel Corporation
#ifndef OPENCV_GAPI_INFER_ONNX_HPP
#define OPENCV_GAPI_INFER_ONNX_HPP
@ -34,32 +34,35 @@ enum class TraitAs: int {
using PostProc = std::function<void(const std::unordered_map<std::string, cv::Mat> &,
std::unordered_map<std::string, cv::Mat> &)>;
namespace detail {
/**
* @brief This structure contains description of inference parameters
* which is specific to ONNX models.
*/
struct ParamDesc {
std::string model_path;
std::string model_path; //!< Path to model.
// NB: nun_* may differ from topology's real input/output port numbers
// (e.g. topology's partial execution)
std::size_t num_in; // How many inputs are defined in the operation
std::size_t num_out; // How many outputs are defined in the operation
std::size_t num_in; //!< How many inputs are defined in the operation
std::size_t num_out; //!< How many outputs are defined in the operation
// NB: Here order follows the `Net` API
std::vector<std::string> input_names;
std::vector<std::string> output_names;
std::vector<std::string> input_names; //!< Names of input network layers.
std::vector<std::string> output_names; //!< Names of output network layers.
using ConstInput = std::pair<cv::Mat, TraitAs>;
std::unordered_map<std::string, ConstInput> const_inputs;
std::unordered_map<std::string, ConstInput> const_inputs; //!< Map with pair of name of network layer and ConstInput which will be associated with this.
std::vector<cv::Scalar> mean;
std::vector<cv::Scalar> stdev;
std::vector<cv::Scalar> mean; //!< Mean values for preprocessing.
std::vector<cv::Scalar> stdev; //!< Standard deviation values for preprocessing.
std::vector<cv::GMatDesc> out_metas;
PostProc custom_post_proc;
std::vector<cv::GMatDesc> out_metas; //!< Out meta information about your output (type, dimension).
PostProc custom_post_proc; //!< Post processing function.
std::vector<bool> normalize;
std::vector<bool> normalize; //!< Vector of bool values that enabled or disabled normalize of input data.
std::vector<std::string> names_to_remap;
std::vector<std::string> names_to_remap; //!< Names of output layers that will be processed in PostProc function.
};
} // namespace detail
@ -79,30 +82,71 @@ struct PortCfg {
, std::tuple_size<typename Net::InArgs>::value >;
};
/**
* Contains description of inference parameters and kit of functions that
* fill this parameters.
*/
template<typename Net> class Params {
public:
/** @brief Class constructor.
Constructs Params based on model information and sets default values for other
inference description parameters.
@param model Path to model (.onnx file).
*/
Params(const std::string &model) {
desc.model_path = model;
desc.num_in = std::tuple_size<typename Net::InArgs>::value;
desc.num_out = std::tuple_size<typename Net::OutArgs>::value;
};
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::onnx::backend(); }
std::string tag() const { return Net::tag(); }
cv::util::any params() const { return { desc }; }
// END(G-API's network parametrization API)
/** @brief Specifies sequence of network input layers names for inference.
The function is used to associate data of graph inputs with input layers of
network topology. Number of names has to match the number of network inputs. If a network
has only one input layer, there is no need to call it as the layer is
associated with input automatically but this doesn't prevent you from
doing it yourself. Count of names has to match to number of network inputs.
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) {
desc.input_names.assign(ll.begin(), ll.end());
@param layer_names std::array<std::string, N> where N is the number of inputs
as defined in the @ref G_API_NET. Contains names of input layers.
@return the reference on modified object.
*/
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &layer_names) {
desc.input_names.assign(layer_names.begin(), layer_names.end());
return *this;
}
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &ll) {
desc.output_names.assign(ll.begin(), ll.end());
/** @brief Specifies sequence of output layers names for inference.
The function is used to associate data of graph outputs with output layers of
network topology. If a network has only one output layer, there is no need to call it
as the layer is associated with ouput automatically but this doesn't prevent
you from doing it yourself. Count of names has to match to number of network
outputs or you can set your own output but for this case you have to
additionally use @ref cfgPostProc function.
@param layer_names std::array<std::string, N> where N is the number of outputs
as defined in the @ref G_API_NET. Contains names of output layers.
@return the reference on modified object.
*/
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &layer_names) {
desc.output_names.assign(layer_names.begin(), layer_names.end());
return *this;
}
/** @brief Sets a constant input.
The function is used to set constant input. This input has to be
a prepared tensor since preprocessing is disabled for this case. You should
provide name of network layer which will receive provided data.
@param layer_name Name of network layer.
@param data cv::Mat that contains data which will be associated with network layer.
@param hint Type of input (TENSOR).
@return the reference on modified object.
*/
Params<Net>& constInput(const std::string &layer_name,
const cv::Mat &data,
TraitAs hint = TraitAs::TENSOR) {
@ -110,6 +154,17 @@ public:
return *this;
}
/** @brief Specifies mean value and standard deviation for preprocessing.
The function is used to set mean value and standard deviation for preprocessing
of input data.
@param m std::array<cv::Scalar, N> where N is the number of inputs
as defined in the @ref G_API_NET. Contains mean values.
@param s std::array<cv::Scalar, N> where N is the number of inputs
as defined in the @ref G_API_NET. Contains standard deviation values.
@return the reference on modified object.
*/
Params<Net>& cfgMeanStd(const typename PortCfg<Net>::NormCoefs &m,
const typename PortCfg<Net>::NormCoefs &s) {
desc.mean.assign(m.begin(), m.end());
@ -117,75 +172,103 @@ public:
return *this;
}
/** @brief Configures graph output and sets the post processing function from user.
/** @brief Configures graph output and provides the post processing function from user.
The function is used for the case of infer of networks with dynamic outputs.
Since these networks haven't known output parameters needs provide them for
construction of output of graph.
The function provides meta information of outputs and post processing function.
Post processing function is used for copy information from ONNX infer's result
to output of graph which is allocated by out meta information.
The function is used when you work with networks with dynamic outputs.
Since we can't know dimensions of inference result needs provide them for
construction of graph output. This dimensions can differ from inference result.
So you have to provide @ref PostProc function that gets information from inference
result and fill output which is constructed by dimensions from out_metas.
@param out_metas out meta information.
@param pp post processing function, which has two parameters. First is onnx
@param out_metas Out meta information about your output (type, dimension).
@param remap_function Post processing function, which has two parameters. First is onnx
result, second is graph output. Both parameters is std::map that contain pair of
layer's name and cv::Mat.
@return reference to object of class Params.
@return the reference on modified object.
*/
Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &out_metas,
const PostProc &pp) {
const PostProc &remap_function) {
desc.out_metas = out_metas;
desc.custom_post_proc = pp;
desc.custom_post_proc = remap_function;
return *this;
}
/** @overload
The function has rvalue parameters.
Function with a rvalue parameters.
@param out_metas rvalue out meta information about your output (type, dimension).
@param remap_function rvalue post processing function, which has two parameters. First is onnx
result, second is graph output. Both parameters is std::map that contain pair of
layer's name and cv::Mat.
@return the reference on modified object.
*/
Params<Net>& cfgPostProc(std::vector<cv::GMatDesc> &&out_metas,
PostProc &&pp) {
PostProc &&remap_function) {
desc.out_metas = std::move(out_metas);
desc.custom_post_proc = std::move(pp);
desc.custom_post_proc = std::move(remap_function);
return *this;
}
/** @overload
The function has additional parameter names_to_remap. This parameter provides
information about output layers which will be used for infer and in post
information about output layers which will be used for inference and post
processing function.
@param out_metas out meta information.
@param pp post processing function.
@param names_to_remap contains names of output layers. CNN's infer will be done on these layers.
Infer's result will be processed in post processing function using these names.
@return reference to object of class Params.
@param out_metas Out meta information.
@param remap_function Post processing function.
@param names_to_remap Names of output layers. network's inference will
be done on these layers. Inference's result will be processed in post processing
function using these names.
@return the reference on modified object.
*/
Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &out_metas,
const PostProc &pp,
const PostProc &remap_function,
const std::vector<std::string> &names_to_remap) {
desc.out_metas = out_metas;
desc.custom_post_proc = pp;
desc.custom_post_proc = remap_function;
desc.names_to_remap = names_to_remap;
return *this;
}
/** @overload
The function has rvalue parameters.
Function with a rvalue parameters and additional parameter names_to_remap.
@param out_metas rvalue out meta information.
@param remap_function rvalue post processing function.
@param names_to_remap rvalue names of output layers. network's inference will
be done on these layers. Inference's result will be processed in post processing
function using these names.
@return the reference on modified object.
*/
Params<Net>& cfgPostProc(std::vector<cv::GMatDesc> &&out_metas,
PostProc &&pp,
PostProc &&remap_function,
std::vector<std::string> &&names_to_remap) {
desc.out_metas = std::move(out_metas);
desc.custom_post_proc = std::move(pp);
desc.custom_post_proc = std::move(remap_function);
desc.names_to_remap = std::move(names_to_remap);
return *this;
}
Params<Net>& cfgNormalize(const typename PortCfg<Net>::Normalize &n) {
desc.normalize.assign(n.begin(), n.end());
/** @brief Specifies normalize parameter for preprocessing.
The function is used to set normalize parameter for preprocessing of input data.
@param normalizations std::array<cv::Scalar, N> where N is the number of inputs
as defined in the @ref G_API_NET. Сontains bool values that enabled or disabled
normalize of input data.
@return the reference on modified object.
*/
Params<Net>& cfgNormalize(const typename PortCfg<Net>::Normalize &normalizations) {
desc.normalize.assign(normalizations.begin(), normalizations.end());
return *this;
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::onnx::backend(); }
std::string tag() const { return Net::tag(); }
cv::util::any params() const { return { desc }; }
// END(G-API's network parametrization API)
protected:
detail::ParamDesc desc;
};

Loading…
Cancel
Save