Merge pull request #15090 from dmatveev:dm/ng-0001-g-api-inference-api
* G-API-NG/API: Introduced inference API and IE-based backend - Very quick-n-dirty implementation - OpenCV's own DNN module is not used - No tests so far * G-API-NG/IE: Refined IE backend, added more tests * G-API-NG/IE: Fixed various CI warnings & build issues + tests - Added tests on multi-dimensional own::Mat - Added tests on GMatDesc with dimensions - Documentation on infer.hpp - Fixed more warnings + added a ROI list test - Fix descr_of clash for vector<Mat> & standalone mode - Fix build issue with gcc-4.8x - Addressed review comments * G-API-NG/IE: Addressed review comments - Pass `false` to findDataFile() - Add deprecation warning suppression macros for IEpull/15242/head
parent
59b0314a0e
commit
0757a51e2b
32 changed files with 1974 additions and 85 deletions
@ -0,0 +1,231 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
|
||||
#ifndef OPENCV_GAPI_INFER_HPP |
||||
#define OPENCV_GAPI_INFER_HPP |
||||
|
||||
// FIXME: Inference API is currently only available in full mode
|
||||
#if !defined(GAPI_STANDALONE) |
||||
|
||||
#include <functional> |
||||
#include <string> // string |
||||
#include <utility> // tuple |
||||
|
||||
#include <opencv2/gapi/util/any.hpp> // any<> |
||||
#include <opencv2/gapi/gkernel.hpp> // GKernelType[M], GBackend |
||||
#include <opencv2/gapi/garg.hpp> // GArg |
||||
#include <opencv2/gapi/gcommon.hpp> // CompileArgTag |
||||
#include <opencv2/gapi/gmetaarg.hpp> // GMetaArg |
||||
|
||||
namespace cv { |
||||
|
||||
namespace detail { |
||||
// This tiny class eliminates the semantic difference between
|
||||
// GKernelType and GKernelTypeM.
|
||||
// FIXME: Something similar can be reused for regular kernels
|
||||
template<typename, typename> |
||||
struct KernelTypeMedium; |
||||
|
||||
template<class K, typename... R, typename... Args> |
||||
struct KernelTypeMedium<K, std::function<std::tuple<R...>(Args...)> >: |
||||
public GKernelTypeM<K, std::function<std::tuple<R...>(Args...)> > {}; |
||||
|
||||
template<class K, typename R, typename... Args> |
||||
struct KernelTypeMedium<K, std::function<R(Args...)> >: |
||||
public GKernelType<K, std::function<R(Args...)> > {}; |
||||
|
||||
} // namespace detail
|
||||
|
||||
template<typename, typename> class GNetworkType; |
||||
|
||||
// TODO: maybe tuple_wrap_helper from util.hpp may help with this.
|
||||
// Multiple-return-value network definition (specialized base class)
|
||||
template<typename K, typename... R, typename... Args> |
||||
class GNetworkType<K, std::function<std::tuple<R...>(Args...)> > |
||||
{ |
||||
public: |
||||
using InArgs = std::tuple<Args...>; |
||||
using OutArgs = std::tuple<R...>; |
||||
|
||||
using Result = OutArgs; |
||||
using API = std::function<Result(Args...)>; |
||||
|
||||
using ResultL = std::tuple< cv::GArray<R>... >; |
||||
using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>; |
||||
}; |
||||
|
||||
// Single-return-value network definition (specialized base class)
|
||||
template<typename K, typename R, typename... Args> |
||||
class GNetworkType<K, std::function<R(Args...)> > |
||||
{ |
||||
public: |
||||
using InArgs = std::tuple<Args...>; |
||||
using OutArgs = std::tuple<R>; |
||||
|
||||
using Result = R; |
||||
using API = std::function<R(Args...)>; |
||||
|
||||
using ResultL = cv::GArray<R>; |
||||
using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>; |
||||
}; |
||||
|
||||
// Base "Infer" kernel. Note - for whatever network, kernel ID
|
||||
// is always the same. Different inference calls are distinguished by
|
||||
// network _tag_ (an extra field in GCall)
|
||||
//
|
||||
// getOutMeta is a stub callback collected by G-API kernel subsystem
|
||||
// automatically. This is a rare case when this callback is defined by
|
||||
// a particular backend, not by a network itself.
|
||||
struct GInferBase { |
||||
static constexpr const char * id() { |
||||
return "org.opencv.dnn.infer"; // Universal stub
|
||||
} |
||||
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { |
||||
return GMetaArgs{}; // One more universal stub
|
||||
} |
||||
}; |
||||
|
||||
|
||||
// Base "Infer list" kernel.
|
||||
// All notes from "Infer" kernel apply here as well.
|
||||
struct GInferListBase { |
||||
static constexpr const char * id() { |
||||
return "org.opencv.dnn.infer-roi"; // Universal stub
|
||||
} |
||||
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { |
||||
return GMetaArgs{}; // One more universal stub
|
||||
} |
||||
}; |
||||
|
||||
// A generic inference kernel. API (::on()) is fully defined by the Net
|
||||
// template parameter.
|
||||
// Acts as a regular kernel in graph (via KernelTypeMedium).
|
||||
template<typename Net> |
||||
struct GInfer final |
||||
: public GInferBase |
||||
, public detail::KernelTypeMedium< GInfer<Net> |
||||
, typename Net::API > { |
||||
using GInferBase::getOutMeta; // FIXME: name lookup conflict workaround?
|
||||
|
||||
static constexpr const char* tag() { return Net::tag(); } |
||||
}; |
||||
|
||||
// A generic roi-list inference kernel. API (::on()) is derived from
|
||||
// the Net template parameter (see more in infer<> overload).
|
||||
template<typename Net> |
||||
struct GInferList final |
||||
: public GInferListBase |
||||
, public detail::KernelTypeMedium< GInferList<Net> |
||||
, typename Net::APIList > { |
||||
using GInferListBase::getOutMeta; // FIXME: name lookup conflict workaround?
|
||||
|
||||
static constexpr const char* tag() { return Net::tag(); } |
||||
}; |
||||
|
||||
} // namespace cv
|
||||
|
||||
// FIXME: Probably the <API> signature makes a function/tuple/function round-trip
|
||||
#define G_API_NET(Class, API, Tag) \ |
||||
struct Class final: public cv::GNetworkType<Class, std::function API> { \
|
||||
static constexpr const char * tag() { return Tag; } \
|
||||
} |
||||
|
||||
namespace cv { |
||||
namespace gapi { |
||||
|
||||
|
||||
/** @brief Calculates responses for the specified network (template
|
||||
* parameter) for every region in the source image. |
||||
* |
||||
* @tparam A network type defined with G_API_NET() macro. |
||||
* @param roi a list of rectangles describing regions of interest |
||||
* in the source image. Usually an output of object detector or tracker. |
||||
* @param args network's input parameters as specified in G_API_NET() macro. |
||||
* NOTE: verified to work reliably with 1-input topologies only. |
||||
* @return a list of objects of return type as defined in G_API_NET(). |
||||
* If a network has multiple return values (defined with a tuple), a tuple of |
||||
* GArray<> objects is returned with the appropriate types inside. |
||||
* @sa G_API_NET() |
||||
*/ |
||||
template<typename Net, typename... Args> |
||||
typename Net::ResultL infer(cv::GArray<cv::Rect> roi, Args&&... args) { |
||||
return GInferList<Net>::on(roi, std::forward<Args>(args)...); |
||||
} |
||||
|
||||
/**
|
||||
* @brief Calculates response for the specified network (template |
||||
* parameter) given the input data. |
||||
* |
||||
* @tparam A network type defined with G_API_NET() macro. |
||||
* @param args network's input parameters as specified in G_API_NET() macro. |
||||
* @return an object of return type as defined in G_API_NET(). |
||||
* If a network has multiple return values (defined with a tuple), a tuple of |
||||
* objects of apprpriate type is returned. |
||||
* @sa G_API_NET() |
||||
*/ |
||||
template<typename Net, typename... Args> |
||||
typename Net::Result infer(Args&&... args) { |
||||
return GInfer<Net>::on(std::forward<Args>(args)...); |
||||
} |
||||
|
||||
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // GAPI_STANDALONE
|
||||
|
||||
namespace cv { |
||||
namespace gapi { |
||||
|
||||
// Note: the below code _is_ part of STANDALONE build,
|
||||
// just to make our compiler code compileable.
|
||||
|
||||
// A type-erased form of network parameters.
|
||||
// Similar to how a type-erased GKernel is represented and used.
|
||||
struct GAPI_EXPORTS GNetParam { |
||||
std::string tag; // FIXME: const?
|
||||
GBackend backend; // Specifies the execution model
|
||||
util::any params; // Backend-interpreted parameter structure
|
||||
}; |
||||
|
||||
/**
|
||||
* @brief A container class for network configurations. Similar to |
||||
* GKernelPackage.Use cv::gapi::networks() to construct this object. |
||||
* |
||||
* @sa cv::gapi::networks |
||||
*/ |
||||
struct GAPI_EXPORTS GNetPackage { |
||||
explicit GNetPackage(std::initializer_list<GNetParam> &&ii = {}); |
||||
std::vector<GBackend> backends() const; |
||||
std::vector<GNetParam> networks; |
||||
}; |
||||
} // namespace gapi
|
||||
|
||||
namespace detail { |
||||
template<typename T> |
||||
gapi::GNetParam strip(T&& t) { |
||||
return gapi::GNetParam { t.tag() |
||||
, t.backend() |
||||
, t.params() |
||||
}; |
||||
} |
||||
|
||||
template<> struct CompileArgTag<cv::gapi::GNetPackage> { |
||||
static const char* tag() { return "gapi.net_package"; } |
||||
}; |
||||
|
||||
} // namespace cv::detail
|
||||
|
||||
namespace gapi { |
||||
template<typename... Args> |
||||
cv::gapi::GNetPackage networks(Args&&... args) { |
||||
return cv::gapi::GNetPackage({ cv::detail::strip(args)... }); |
||||
} |
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // OPENCV_GAPI_INFER_HPP
|
@ -0,0 +1,106 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_INFER_IE_HPP |
||||
#define OPENCV_GAPI_INFER_IE_HPP |
||||
|
||||
#ifdef HAVE_INF_ENGINE |
||||
|
||||
#include <unordered_map> |
||||
#include <string> |
||||
#include <array> |
||||
#include <tuple> // tuple, tuple_size |
||||
|
||||
#include <opencv2/gapi/opencv_includes.hpp> |
||||
#include <opencv2/gapi/util/any.hpp> |
||||
|
||||
namespace cv { |
||||
namespace gapi { |
||||
// FIXME: introduce a new sub-namespace for NN?
|
||||
namespace ie { |
||||
|
||||
GAPI_EXPORTS cv::gapi::GBackend backend(); |
||||
|
||||
namespace detail { |
||||
struct ParamDesc { |
||||
std::string model_path; |
||||
std::string weights_path; |
||||
std::string device_id; |
||||
|
||||
// NB: Here order follows the `Net` API
|
||||
std::vector<std::string> input_names; |
||||
std::vector<std::string> output_names; |
||||
|
||||
std::unordered_map<std::string, cv::Mat> const_inputs; |
||||
|
||||
// NB: nun_* may differ from topology's real input/output port numbers
|
||||
// (e.g. topology's partial execution)
|
||||
std::size_t num_in; // How many inputs are defined in the operation
|
||||
std::size_t num_out; // How many outputs are defined in the operation
|
||||
}; |
||||
} // namespace detail
|
||||
|
||||
// FIXME: this is probably a shared (reusable) thing
|
||||
template<typename Net> |
||||
struct PortCfg { |
||||
using In = std::array |
||||
< std::string |
||||
, std::tuple_size<typename Net::InArgs>::value >; |
||||
using Out = std::array |
||||
< std::string |
||||
, std::tuple_size<typename Net::OutArgs>::value >; |
||||
}; |
||||
|
||||
template<typename Net> class Params { |
||||
public: |
||||
Params(const std::string &model, |
||||
const std::string &weights, |
||||
const std::string &device) |
||||
: desc{ model, weights, device, {}, {}, {} |
||||
, std::tuple_size<typename Net::InArgs>::value |
||||
, std::tuple_size<typename Net::OutArgs>::value |
||||
} { |
||||
}; |
||||
|
||||
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) { |
||||
desc.input_names.clear(); |
||||
desc.input_names.reserve(ll.size()); |
||||
std::copy(ll.begin(), ll.end(), |
||||
std::back_inserter(desc.input_names)); |
||||
return *this; |
||||
} |
||||
|
||||
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &ll) { |
||||
desc.output_names.clear(); |
||||
desc.output_names.reserve(ll.size()); |
||||
std::copy(ll.begin(), ll.end(), |
||||
std::back_inserter(desc.output_names)); |
||||
return *this; |
||||
} |
||||
|
||||
Params<Net>& constInput(const std::string &layer_name, |
||||
const cv::Mat &data) { |
||||
desc.const_inputs[layer_name] = data; |
||||
return *this; |
||||
} |
||||
|
||||
// BEGIN(G-API's network parametrization API)
|
||||
GBackend backend() const { return cv::gapi::ie::backend(); } |
||||
std::string tag() const { return Net::tag(); } |
||||
cv::util::any params() const { return { desc }; } |
||||
// END(G-API's network parametrization API)
|
||||
|
||||
protected: |
||||
detail::ParamDesc desc; |
||||
}; |
||||
|
||||
} // namespace ie
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // HAVE_INF_ENGINE
|
||||
|
||||
#endif // OPENCV_GAPI_INFER_HPP
|
@ -0,0 +1,31 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_INFER_IE_UTIL_HPP |
||||
#define OPENCV_GAPI_INFER_IE_UTIL_HPP |
||||
|
||||
#ifdef HAVE_INF_ENGINE |
||||
|
||||
// NOTE: This file is not included by default in infer/ie.hpp
|
||||
// and won't be. infer/ie.hpp doesn't depend on IE headers itself.
|
||||
// This file does -- so needs to be included separately by those who care.
|
||||
|
||||
#include "inference_engine.hpp" |
||||
|
||||
namespace cv { |
||||
namespace gapi { |
||||
namespace ie { |
||||
namespace util { |
||||
|
||||
GAPI_EXPORTS std::vector<int> to_ocv(const InferenceEngine::SizeVector &dims); |
||||
|
||||
GAPI_EXPORTS cv::Mat to_ocv(InferenceEngine::Blob::Ptr blob); |
||||
GAPI_EXPORTS InferenceEngine::Blob::Ptr to_ie(cv::Mat &blob); |
||||
|
||||
}}}} |
||||
|
||||
#endif // HAVE_INF_ENGINE
|
||||
#endif // OPENCV_GAPI_INFER_IE_UTIL_HPP
|
@ -0,0 +1,27 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018-2019 Intel Corporation
|
||||
|
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#include <functional> // hash |
||||
#include <numeric> // accumulate |
||||
#include <unordered_set> |
||||
#include <iterator> |
||||
|
||||
#include <ade/util/algorithm.hpp> |
||||
|
||||
#include <opencv2/gapi/infer.hpp> |
||||
|
||||
cv::gapi::GNetPackage::GNetPackage(std::initializer_list<GNetParam> &&ii) |
||||
: networks(std::move(ii)) { |
||||
} |
||||
|
||||
std::vector<cv::gapi::GBackend> cv::gapi::GNetPackage::backends() const { |
||||
std::unordered_set<cv::gapi::GBackend> unique_set; |
||||
for (const auto &nn : networks) unique_set.insert(nn.backend); |
||||
return std::vector<cv::gapi::GBackend>(unique_set.begin(), unique_set.end()); |
||||
} |
@ -0,0 +1,604 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#ifdef HAVE_INF_ENGINE |
||||
|
||||
#if INF_ENGINE_RELEASE <= 2018050000 |
||||
# error G-API IE module supports only OpenVINO IE >= 2019 R1 |
||||
#endif |
||||
|
||||
#include <functional> |
||||
#include <unordered_set> |
||||
|
||||
#include <ade/util/algorithm.hpp> |
||||
|
||||
#include <ade/util/range.hpp> |
||||
#include <ade/util/zip_range.hpp> |
||||
#include <ade/util/chain_range.hpp> |
||||
#include <ade/typed_graph.hpp> |
||||
|
||||
#include <opencv2/gapi/gcommon.hpp> |
||||
#include <opencv2/gapi/garray.hpp> |
||||
#include <opencv2/gapi/util/any.hpp> |
||||
#include <opencv2/gapi/gtype_traits.hpp> |
||||
|
||||
#include <opencv2/gapi/infer.hpp> |
||||
#include <opencv2/gapi/infer/ie/util.hpp> |
||||
|
||||
#include "compiler/gobjref.hpp" |
||||
#include "compiler/gmodel.hpp" |
||||
|
||||
#include "backends/ie/giebackend.hpp" |
||||
|
||||
#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK! |
||||
|
||||
namespace IE = InferenceEngine; |
||||
|
||||
namespace { |
||||
|
||||
inline IE::ROI toIE(const cv::Rect &rc) { |
||||
return IE::ROI |
||||
{ 0u |
||||
, static_cast<std::size_t>(rc.x) |
||||
, static_cast<std::size_t>(rc.y) |
||||
, static_cast<std::size_t>(rc.width) |
||||
, static_cast<std::size_t>(rc.height) |
||||
}; |
||||
} |
||||
|
||||
inline IE::SizeVector toIE(const cv::MatSize &sz) { |
||||
return cv::to_own<IE::SizeVector::value_type>(sz); |
||||
} |
||||
inline std::vector<int> toCV(const IE::SizeVector &vsz) { |
||||
std::vector<int> result; |
||||
result.reserve(vsz.size()); |
||||
for (auto sz : vsz) { |
||||
result.push_back(ade::util::checked_cast<int>(sz)); |
||||
} |
||||
return result; |
||||
} |
||||
|
||||
inline IE::Precision toIE(int depth) { |
||||
switch (depth) { |
||||
case CV_8U: return IE::Precision::U8; |
||||
case CV_32F: return IE::Precision::FP32; |
||||
default: GAPI_Assert(false && "Unsupported data type"); |
||||
} |
||||
return IE::Precision::UNSPECIFIED; |
||||
} |
||||
inline int toCV(IE::Precision prec) { |
||||
switch (prec) { |
||||
case IE::Precision::U8: return CV_8U; |
||||
case IE::Precision::FP32: return CV_32F; |
||||
default: GAPI_Assert(false && "Unsupported data type"); |
||||
} |
||||
return -1; |
||||
} |
||||
|
||||
inline IE::TensorDesc toIE(const cv::Mat &mat) { |
||||
const auto &sz = mat.size; |
||||
|
||||
// NB: For some reason RGB image is 2D image
|
||||
// (since channel component is not counted here).
|
||||
if (sz.dims() == 2) { |
||||
// NB: This logic is mainly taken from IE samples
|
||||
const size_t channels = mat.channels(); |
||||
const size_t height = mat.size().height; |
||||
const size_t width = mat.size().width; |
||||
|
||||
const size_t strideH = mat.step.buf[0]; |
||||
const size_t strideW = mat.step.buf[1]; |
||||
|
||||
const bool is_dense = |
||||
strideW == channels && |
||||
strideH == channels * width; |
||||
|
||||
if (!is_dense) |
||||
cv::util::throw_error(std::logic_error("Doesn't support conversion" |
||||
" from non-dense cv::Mat")); |
||||
|
||||
return IE::TensorDesc(toIE(mat.depth()), |
||||
IE::SizeVector{1, channels, height, width}, |
||||
IE::Layout::NHWC); |
||||
} |
||||
|
||||
GAPI_Assert(sz.dims() == 4); // NB: Will relax when needed (to known use)
|
||||
return IE::TensorDesc(toIE(mat.depth()), toIE(sz), IE::Layout::NCHW); |
||||
} |
||||
|
||||
inline IE::Blob::Ptr wrapIE(const cv::Mat &mat) { |
||||
const auto tDesc = toIE(mat); |
||||
switch (mat.depth()) { |
||||
// NB: Seems there's no way to create an untyped (T-less) Blob::Ptr
|
||||
// in IE given only precision via TensorDesc. So we have to do this:
|
||||
#define HANDLE(E,T) \ |
||||
case CV_##E: return IE::make_shared_blob<T>(tDesc, const_cast<T*>(mat.ptr<T>())) |
||||
HANDLE(8U, uint8_t); |
||||
HANDLE(32F, float); |
||||
#undef HANDLE |
||||
default: GAPI_Assert(false && "Unsupported data type"); |
||||
} |
||||
return IE::Blob::Ptr{}; |
||||
} |
||||
|
||||
template<class MatType> |
||||
inline void copyFromIE(const IE::Blob::Ptr &blob, MatType &mat) { |
||||
switch (blob->getTensorDesc().getPrecision()) { |
||||
#define HANDLE(E,T) \ |
||||
case IE::Precision::E: std::copy_n(blob->buffer().as<T*>(), \
|
||||
mat.total(), \
|
||||
reinterpret_cast<T*>(mat.data)); \
|
||||
break; |
||||
HANDLE(U8, uint8_t); |
||||
HANDLE(FP32, float); |
||||
#undef HANDLE |
||||
default: GAPI_Assert(false && "Unsupported data type"); |
||||
} |
||||
} |
||||
|
||||
// IE-specific metadata, represents a network with its parameters
|
||||
struct IEUnit { |
||||
static const char *name() { return "IEModelConfig"; } |
||||
|
||||
cv::gapi::ie::detail::ParamDesc params; |
||||
IE::CNNNetwork net; |
||||
IE::InputsDataMap inputs; |
||||
IE::OutputsDataMap outputs; |
||||
|
||||
explicit IEUnit(const cv::gapi::ie::detail::ParamDesc &pp) |
||||
: params(pp) { |
||||
|
||||
IE::CNNNetReader reader; |
||||
reader.ReadNetwork(params.model_path); |
||||
reader.ReadWeights(params.weights_path); |
||||
net = reader.getNetwork(); |
||||
inputs = net.getInputsInfo(); |
||||
outputs = net.getOutputsInfo(); |
||||
|
||||
// The practice shows that not all inputs and not all outputs
|
||||
// are mandatory to specify in IE model.
|
||||
// So what we're concerned here about is:
|
||||
// if opeation's (not topology's) input/output number is
|
||||
// greater than 1, then we do care about input/output layer
|
||||
// names. Otherwise, names are picked up automatically.
|
||||
// TODO: Probably this check could be done at the API entry point? (gnet)
|
||||
if (params.num_in > 1u && params.num_in != params.input_names.size()) { |
||||
cv::util::throw_error(std::logic_error("Please specify input layer names for " |
||||
+ params.model_path)); |
||||
} |
||||
if (params.num_out > 1u && params.num_out != params.output_names.size()) { |
||||
cv::util::throw_error(std::logic_error("Please specify output layer names for " |
||||
+ params.model_path)); |
||||
} |
||||
if (params.num_in == 1u && params.input_names.empty()) { |
||||
params.input_names = { inputs.begin()->first }; |
||||
} |
||||
if (params.num_out == 1u && params.output_names.empty()) { |
||||
params.output_names = { outputs.begin()->first }; |
||||
} |
||||
} |
||||
|
||||
// This method is [supposed to be] called at Island compilation stage
|
||||
cv::gimpl::ie::IECompiled compile() const { |
||||
auto this_plugin = IE::PluginDispatcher().getPluginByDevice(params.device_id); |
||||
auto this_network = this_plugin.LoadNetwork(net, {}); // FIXME: 2nd parameter to be
|
||||
// configurable via the API
|
||||
auto this_request = this_network.CreateInferRequest(); |
||||
|
||||
// Bind const data to infer request
|
||||
for (auto &&p : params.const_inputs) { |
||||
this_request.SetBlob(p.first, wrapIE(p.second)); |
||||
} |
||||
|
||||
return {this_plugin, this_network, this_request}; |
||||
} |
||||
}; |
||||
|
||||
struct IECallContext |
||||
{ |
||||
// Input parameters passed to an inference operation.
|
||||
std::vector<cv::GArg> args; |
||||
|
||||
//FIXME: avoid conversion of arguments from internal representaion to OpenCV one on each call
|
||||
//to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run,
|
||||
//once on enter for input and output arguments, and once before return for output arguments only
|
||||
//FIXME: check if the above applies to this backend (taken from CPU)
|
||||
std::unordered_map<std::size_t, cv::GRunArgP> results; |
||||
|
||||
// Generic accessor API
|
||||
template<typename T> |
||||
const T& inArg(std::size_t input) { return args.at(input).get<T>(); } |
||||
|
||||
// Syntax sugar
|
||||
const cv::gapi::own::Mat& inMat(std::size_t input) { |
||||
return inArg<cv::gapi::own::Mat>(input); |
||||
} |
||||
cv::gapi::own::Mat& outMatR(std::size_t output) { |
||||
return *cv::util::get<cv::gapi::own::Mat*>(results.at(output)); |
||||
} |
||||
|
||||
template<typename T> std::vector<T>& outVecR(std::size_t output) { // FIXME: the same issue
|
||||
return outVecRef(output).wref<T>(); |
||||
} |
||||
cv::detail::VectorRef& outVecRef(std::size_t output) { |
||||
return cv::util::get<cv::detail::VectorRef>(results.at(output)); |
||||
} |
||||
}; |
||||
|
||||
struct IECallable { |
||||
static const char *name() { return "IERequestCallable"; } |
||||
// FIXME: Make IECallContext manage them all? (3->1)
|
||||
using Run = std::function<void(cv::gimpl::ie::IECompiled &, const IEUnit &, IECallContext &)>; |
||||
Run run; |
||||
}; |
||||
|
||||
struct KImpl { |
||||
cv::gimpl::CustomMetaFunction::CM customMetaFunc; |
||||
IECallable::Run run; |
||||
}; |
||||
|
||||
// FIXME: Is there a way to take a typed graph (our GModel),
|
||||
// and create a new typed graph _ATOP_ of that (by extending with a couple of
|
||||
// new types?).
|
||||
// Alternatively, is there a way to compose types graphs?
|
||||
//
|
||||
// If not, we need to introduce that!
|
||||
using GIEModel = ade::TypedGraph |
||||
< cv::gimpl::Protocol |
||||
, cv::gimpl::Op |
||||
, cv::gimpl::NetworkParams |
||||
, cv::gimpl::CustomMetaFunction |
||||
, IEUnit |
||||
, IECallable |
||||
>; |
||||
|
||||
// FIXME: Same issue with Typed and ConstTyped
|
||||
using GConstGIEModel = ade::ConstTypedGraph |
||||
< cv::gimpl::Protocol |
||||
, cv::gimpl::Op |
||||
, cv::gimpl::NetworkParams |
||||
, cv::gimpl::CustomMetaFunction |
||||
, IEUnit |
||||
, IECallable |
||||
>; |
||||
} // anonymous namespace
|
||||
|
||||
// GCPUExcecutable implementation //////////////////////////////////////////////
|
||||
cv::gimpl::ie::GIEExecutable::GIEExecutable(const ade::Graph &g, |
||||
const std::vector<ade::NodeHandle> &nodes) |
||||
: m_g(g), m_gm(m_g) { |
||||
// FIXME: Currently this backend is capable to run a single inference node only.
|
||||
// Need to extend our island fusion with merge/not-to-merge decision making parametrization
|
||||
GConstGIEModel iem(g); |
||||
|
||||
for (auto &nh : nodes) { |
||||
switch (m_gm.metadata(nh).get<NodeType>().t) { |
||||
case NodeType::OP: |
||||
if (this_nh == nullptr) { |
||||
this_nh = nh; |
||||
this_iec = iem.metadata(this_nh).get<IEUnit>().compile(); |
||||
} |
||||
else |
||||
util::throw_error(std::logic_error("Multi-node inference is not supported!")); |
||||
break; |
||||
|
||||
case NodeType::DATA: { |
||||
m_dataNodes.push_back(nh); |
||||
const auto &desc = m_gm.metadata(nh).get<Data>(); |
||||
if (desc.storage == Data::Storage::CONST_VAL) { |
||||
util::throw_error(std::logic_error("No const data please!")); |
||||
} |
||||
if (desc.storage == Data::Storage::INTERNAL) { |
||||
util::throw_error(std::logic_error("No internal data please!")); |
||||
} |
||||
break; |
||||
} |
||||
default: util::throw_error(std::logic_error("Unsupported NodeType type")); |
||||
} |
||||
} |
||||
} |
||||
|
||||
// FIXME: Document what it does
|
||||
cv::GArg cv::gimpl::ie::GIEExecutable::packArg(const cv::GArg &arg) { |
||||
// No API placeholders allowed at this point
|
||||
// FIXME: this check has to be done somewhere in compilation stage.
|
||||
GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT |
||||
&& arg.kind != cv::detail::ArgKind::GSCALAR |
||||
&& arg.kind != cv::detail::ArgKind::GARRAY); |
||||
|
||||
if (arg.kind != cv::detail::ArgKind::GOBJREF) { |
||||
util::throw_error(std::logic_error("Inference supports G-types ONLY!")); |
||||
} |
||||
GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF); |
||||
|
||||
// Wrap associated CPU object (either host or an internal one)
|
||||
// FIXME: object can be moved out!!! GExecutor faced that.
|
||||
const cv::gimpl::RcDesc &ref = arg.get<cv::gimpl::RcDesc>(); |
||||
switch (ref.shape) |
||||
{ |
||||
case GShape::GMAT: return GArg(m_res.slot<cv::gapi::own::Mat>()[ref.id]); |
||||
|
||||
// Note: .at() is intentional for GArray as object MUST be already there
|
||||
// (and constructed by either bindIn/Out or resetInternal)
|
||||
case GShape::GARRAY: return GArg(m_res.slot<cv::detail::VectorRef>().at(ref.id)); |
||||
|
||||
default: |
||||
util::throw_error(std::logic_error("Unsupported GShape type")); |
||||
break; |
||||
} |
||||
} |
||||
|
||||
void cv::gimpl::ie::GIEExecutable::run(std::vector<InObj> &&input_objs, |
||||
std::vector<OutObj> &&output_objs) { |
||||
// Update resources with run-time information - what this Island
|
||||
// has received from user (or from another Island, or mix...)
|
||||
// FIXME: Check input/output objects against GIsland protocol
|
||||
|
||||
for (auto& it : input_objs) magazine::bindInArg (m_res, it.first, it.second); |
||||
for (auto& it : output_objs) magazine::bindOutArg(m_res, it.first, it.second); |
||||
|
||||
// FIXME: Running just a single node now.
|
||||
// Not sure if need to support many of them, though
|
||||
// FIXME: Make this island-unmergeable?
|
||||
const auto &op = m_gm.metadata(this_nh).get<Op>(); |
||||
|
||||
// Initialize kernel's execution context:
|
||||
// - Input parameters
|
||||
IECallContext context; |
||||
context.args.reserve(op.args.size()); |
||||
using namespace std::placeholders; |
||||
ade::util::transform(op.args, |
||||
std::back_inserter(context.args), |
||||
std::bind(&GIEExecutable::packArg, this, _1)); |
||||
|
||||
// - Output parameters.
|
||||
for (const auto &out_it : ade::util::indexed(op.outs)) { |
||||
// FIXME: Can the same GArg type resolution mechanism be reused here?
|
||||
const auto out_port = ade::util::index(out_it); |
||||
const auto out_desc = ade::util::value(out_it); |
||||
context.results[out_port] = magazine::getObjPtr(m_res, out_desc); |
||||
} |
||||
|
||||
// And now trigger the execution
|
||||
GConstGIEModel giem(m_g); |
||||
const auto &uu = giem.metadata(this_nh).get<IEUnit>(); |
||||
const auto &kk = giem.metadata(this_nh).get<IECallable>(); |
||||
kk.run(this_iec, uu, context); |
||||
|
||||
for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second); |
||||
} |
||||
|
||||
namespace cv { |
||||
namespace gimpl { |
||||
namespace ie { |
||||
|
||||
struct Infer: public cv::detail::KernelTag { |
||||
using API = cv::GInferBase; |
||||
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); } |
||||
static KImpl kernel() { return KImpl{outMeta, run}; } |
||||
|
||||
static cv::GMetaArgs outMeta(const ade::Graph &gr, |
||||
const ade::NodeHandle &nh, |
||||
const cv::GMetaArgs &in_metas, |
||||
const cv::GArgs &/*in_args*/) { |
||||
// Specify network's output layer metadata to the framework
|
||||
// Also specify the input information to the IE from the framework
|
||||
// NB: Have no clue if network's input [dimensions] may ever define
|
||||
// its output dimensions. It seems possible with OpenCV DNN APIs
|
||||
|
||||
cv::GMetaArgs result; |
||||
|
||||
GConstGIEModel gm(gr); |
||||
const auto &uu = gm.metadata(nh).get<IEUnit>(); |
||||
|
||||
// Initialize input information
|
||||
// Note our input layers list order matches the API order and so
|
||||
// meta order.
|
||||
GAPI_Assert(uu.params.input_names.size() == in_metas.size() |
||||
&& "Known input layers count doesn't match input meta count"); |
||||
|
||||
for (auto &&it : ade::util::zip(ade::util::toRange(uu.params.input_names), |
||||
ade::util::toRange(in_metas))) { |
||||
auto &&ii = uu.inputs.at(std::get<0>(it)); |
||||
const auto & mm = std::get<1>(it); |
||||
|
||||
GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm) |
||||
&& "Non-GMat inputs are not supported"); |
||||
|
||||
const auto &meta = util::get<cv::GMatDesc>(mm); |
||||
ii->setPrecision(toIE(meta.depth)); |
||||
ii->setLayout(meta.isND() ? IE::Layout::NCHW : IE::Layout::NHWC); |
||||
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); |
||||
} |
||||
|
||||
// FIXME: It would be nice here to have an exact number of network's
|
||||
// input/output parameters. Probably GCall should store it here for us.
|
||||
// It doesn't, as far as I know..
|
||||
for (const auto &out_name : uu.params.output_names) { |
||||
// NOTE: our output_names vector follows the API order
|
||||
// of this operation's outputs
|
||||
const IE::DataPtr& ie_out = uu.outputs.at(out_name); |
||||
const IE::SizeVector dims = ie_out->getTensorDesc().getDims(); |
||||
|
||||
cv::GMatDesc outm(toCV(ie_out->getPrecision()), |
||||
toCV(ie_out->getTensorDesc().getDims())); |
||||
result.emplace_back(outm); |
||||
} |
||||
return result; |
||||
} |
||||
|
||||
static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) { |
||||
// non-generic version for now:
|
||||
// - assumes all inputs/outputs are always Mats
|
||||
for (auto i : ade::util::iota(uu.params.num_in)) { |
||||
// TODO: Ideally we shouldn't do SetBlob() but GetBlob() instead,
|
||||
// and redirect our data producers to this memory
|
||||
// (A memory dialog comes to the picture again)
|
||||
|
||||
const cv::Mat this_mat = to_ocv(ctx.inMat(i)); |
||||
IE::Blob::Ptr this_blob = wrapIE(this_mat); |
||||
iec.this_request.SetBlob(uu.params.input_names[i], this_blob); |
||||
} |
||||
iec.this_request.Infer(); |
||||
for (auto i : ade::util::iota(uu.params.num_out)) { |
||||
// TODO: Think on avoiding copying here.
|
||||
// Either we should ask IE to use our memory (what is not always the
|
||||
// best policy) or use IE-allocated buffer inside (and pass it to the graph).
|
||||
// Not a <very> big deal for classifiers and detectors,
|
||||
// but may be critical to segmentation.
|
||||
|
||||
cv::gapi::own::Mat& out_mat = ctx.outMatR(i); |
||||
IE::Blob::Ptr this_blob = iec.this_request.GetBlob(uu.params.output_names[i]); |
||||
copyFromIE(this_blob, out_mat); |
||||
} |
||||
} |
||||
}; |
||||
|
||||
struct InferList: public cv::detail::KernelTag { |
||||
using API = cv::GInferListBase; |
||||
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); } |
||||
static KImpl kernel() { return KImpl{outMeta, run}; } |
||||
|
||||
static cv::GMetaArgs outMeta(const ade::Graph &gr, |
||||
const ade::NodeHandle &nh, |
||||
const cv::GMetaArgs &in_metas, |
||||
const cv::GArgs &/*in_args*/) { |
||||
// Specify the input information to the IE from the framework
|
||||
// NB: Have no clue if network's input [dimensions] may ever define
|
||||
// its output dimensions. It seems possible with OpenCV DNN APIs
|
||||
|
||||
GConstGIEModel gm(gr); |
||||
const auto &uu = gm.metadata(nh).get<IEUnit>(); |
||||
|
||||
// Initialize input information
|
||||
// Note our input layers list order matches the API order and so
|
||||
// meta order.
|
||||
GAPI_Assert(uu.params.input_names.size() == (in_metas.size() - 1u) |
||||
&& "Known input layers count doesn't match input meta count"); |
||||
|
||||
std::size_t idx = 1u; |
||||
for (auto &&input_name : uu.params.input_names) { |
||||
auto &&ii = uu.inputs.at(input_name); |
||||
const auto & mm = in_metas[idx++]; |
||||
|
||||
GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm) |
||||
&& "Non-GMat inputs are not supported"); |
||||
|
||||
const auto &meta = util::get<cv::GMatDesc>(mm); |
||||
ii->setPrecision(toIE(meta.depth)); |
||||
ii->setLayout(meta.isND() ? IE::Layout::NCHW : IE::Layout::NHWC); |
||||
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); |
||||
} |
||||
|
||||
// roi-list version is much easier at the moment.
|
||||
// All our outputs are vectors which don't have
|
||||
// metadata at the moment - so just create a vector of
|
||||
// "empty" array metadatas of the required size.
|
||||
return cv::GMetaArgs(uu.params.output_names.size(), |
||||
cv::GMetaArg{cv::empty_array_desc()}); |
||||
} |
||||
|
||||
static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) { |
||||
// non-generic version for now:
|
||||
// - assumes zero input is always ROI list
|
||||
// - assumes all inputs/outputs are always Mats
|
||||
GAPI_Assert(uu.params.num_in == 1); // roi list is not counted in net's inputs
|
||||
|
||||
const auto& in_roi_vec = ctx.inArg<cv::detail::VectorRef>(0u).rref<cv::Rect>(); |
||||
const cv::Mat this_mat = to_ocv(ctx.inMat(1u)); |
||||
IE::Blob::Ptr this_blob = wrapIE(this_mat); |
||||
|
||||
// FIXME: This could be done ONCE at graph compile stage!
|
||||
std::vector< std::vector<int> > cached_dims(uu.params.num_out); |
||||
for (auto i : ade::util::iota(uu.params.num_out)) { |
||||
const IE::DataPtr& ie_out = uu.outputs.at(uu.params.output_names[i]); |
||||
cached_dims[i] = toCV(ie_out->getTensorDesc().getDims()); |
||||
ctx.outVecR<cv::Mat>(i).clear(); |
||||
// FIXME: Isn't this should be done automatically
|
||||
// by some resetInternalData(), etc? (Probably at the GExecutor level)
|
||||
} |
||||
|
||||
for (const auto &rc : in_roi_vec) { |
||||
// FIXME: Assumed only 1 input
|
||||
IE::Blob::Ptr roi_blob = IE::make_shared_blob(this_blob, toIE(rc)); |
||||
iec.this_request.SetBlob(uu.params.input_names[0u], roi_blob); |
||||
iec.this_request.Infer(); |
||||
|
||||
// While input is fixed to be 1,
|
||||
// there may be still multiple outputs
|
||||
for (auto i : ade::util::iota(uu.params.num_out)) { |
||||
std::vector<cv::Mat> &out_vec = ctx.outVecR<cv::Mat>(i); |
||||
|
||||
IE::Blob::Ptr out_blob = iec.this_request.GetBlob(uu.params.output_names[i]); |
||||
|
||||
cv::Mat out_mat(cached_dims[i], toCV(out_blob->getTensorDesc().getPrecision())); |
||||
copyFromIE(out_blob, out_mat); // FIXME: Avoid data copy. Not sure if it is possible though
|
||||
out_vec.push_back(std::move(out_mat)); |
||||
} |
||||
} |
||||
} |
||||
}; |
||||
|
||||
} // namespace ie
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
|
||||
// IE backend implementation of GBackend::Priv ///////////////////////
|
||||
namespace { |
||||
class GIEBackendImpl final: public cv::gapi::GBackend::Priv { |
||||
virtual void unpackKernel(ade::Graph &gr, |
||||
const ade::NodeHandle &nh, |
||||
const cv::GKernelImpl &ii) override { |
||||
using namespace cv::gimpl; |
||||
// FIXME: Introduce a DNNBackend interface which'd specify
|
||||
// the framework for this???
|
||||
GIEModel gm(gr); |
||||
const auto &np = gm.metadata(nh).get<NetworkParams>(); |
||||
const auto &pp = cv::util::any_cast<cv::gapi::ie::detail::ParamDesc>(np.opaque); |
||||
const auto &ki = cv::util::any_cast<KImpl>(ii.opaque); |
||||
gm.metadata(nh).set(IEUnit{pp}); |
||||
gm.metadata(nh).set(IECallable{ki.run}); |
||||
gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc}); |
||||
} |
||||
|
||||
virtual EPtr compile(const ade::Graph &graph, |
||||
const cv::GCompileArgs &, |
||||
const std::vector<ade::NodeHandle> &nodes) const override { |
||||
return EPtr{new cv::gimpl::ie::GIEExecutable(graph, nodes)}; |
||||
} |
||||
|
||||
virtual cv::gapi::GKernelPackage auxiliaryKernels() const override { |
||||
return cv::gapi::kernels< cv::gimpl::ie::Infer |
||||
, cv::gimpl::ie::InferList |
||||
>(); |
||||
} |
||||
}; |
||||
} |
||||
|
||||
cv::gapi::GBackend cv::gapi::ie::backend() { |
||||
static cv::gapi::GBackend this_backend(std::make_shared<GIEBackendImpl>()); |
||||
return this_backend; |
||||
} |
||||
|
||||
cv::Mat cv::gapi::ie::util::to_ocv(InferenceEngine::Blob::Ptr blob) { |
||||
const auto& tdesc = blob->getTensorDesc(); |
||||
return cv::Mat(toCV(tdesc.getDims()), |
||||
toCV(tdesc.getPrecision()), |
||||
blob->buffer().as<uint8_t*>()); |
||||
} |
||||
|
||||
std::vector<int> cv::gapi::ie::util::to_ocv(const InferenceEngine::SizeVector &dims) { |
||||
return toCV(dims); |
||||
} |
||||
|
||||
InferenceEngine::Blob::Ptr cv::gapi::ie::util::to_ie(cv::Mat &blob) { |
||||
return wrapIE(blob); |
||||
} |
||||
|
||||
#endif // HAVE_INF_ENGINE
|
@ -0,0 +1,89 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018 Intel Corporation
|
||||
|
||||
#ifndef OPENCV_GAPI_GIEBACKEND_HPP |
||||
#define OPENCV_GAPI_GIEBACKEND_HPP |
||||
|
||||
#ifdef HAVE_INF_ENGINE |
||||
|
||||
#include <ade/util/algorithm.hpp> // type_list_index |
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// FIXME: Suppress deprecation warnings for OpenVINO 2019R2+
|
||||
// BEGIN {{{
|
||||
#if defined(__GNUC__) |
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations" |
||||
#endif |
||||
#ifdef _MSC_VER |
||||
#pragma warning(disable: 4996) // was declared deprecated
|
||||
#endif |
||||
|
||||
#if defined(__GNUC__) |
||||
#pragma GCC visibility push(default) |
||||
#endif |
||||
|
||||
#include <inference_engine.hpp> |
||||
|
||||
#if defined(__GNUC__) |
||||
#pragma GCC visibility pop |
||||
#endif |
||||
// END }}}
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include <opencv2/gapi/garg.hpp> |
||||
#include <opencv2/gapi/gproto.hpp> |
||||
#include <opencv2/gapi/infer/ie.hpp> |
||||
|
||||
#include "api/gorigin.hpp" |
||||
#include "backends/common/gbackend.hpp" |
||||
#include "compiler/gislandmodel.hpp" |
||||
|
||||
namespace cv { |
||||
namespace gimpl { |
||||
namespace ie { |
||||
|
||||
struct IECompiled { |
||||
InferenceEngine::InferencePlugin this_plugin; |
||||
InferenceEngine::ExecutableNetwork this_network; |
||||
InferenceEngine::InferRequest this_request; |
||||
}; |
||||
|
||||
class GIEExecutable final: public GIslandExecutable |
||||
{ |
||||
const ade::Graph &m_g; |
||||
GModel::ConstGraph m_gm; |
||||
|
||||
// The only executable stuff in this graph
|
||||
// (assuming it is always single-op)
|
||||
ade::NodeHandle this_nh; |
||||
IECompiled this_iec; |
||||
|
||||
// List of all resources in graph (both internal and external)
|
||||
std::vector<ade::NodeHandle> m_dataNodes; |
||||
|
||||
// Actual data of all resources in graph (both internal and external)
|
||||
Mag m_res; |
||||
|
||||
// Execution helpers
|
||||
GArg packArg(const GArg &arg); |
||||
|
||||
public: |
||||
GIEExecutable(const ade::Graph &graph, |
||||
const std::vector<ade::NodeHandle> &nodes); |
||||
|
||||
virtual inline bool canReshape() const override { return false; } |
||||
virtual inline void reshape(ade::Graph&, const GCompileArgs&) override { |
||||
GAPI_Assert(false); // Not implemented yet
|
||||
} |
||||
|
||||
virtual void run(std::vector<InObj> &&input_objs, |
||||
std::vector<OutObj> &&output_objs) override; |
||||
}; |
||||
|
||||
}}} |
||||
|
||||
#endif // HAVE_INF_ENGINE
|
||||
#endif // OPENCV_GAPI_GIEBACKEND_HPP
|
@ -0,0 +1,281 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2019 Intel Corporation
|
||||
|
||||
#include "../test_precomp.hpp" |
||||
|
||||
#ifdef HAVE_INF_ENGINE |
||||
|
||||
#include <stdexcept> |
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// FIXME: Suppress deprecation warnings for OpenVINO 2019R2+
|
||||
// BEGIN {{{
|
||||
#if defined(__GNUC__) |
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations" |
||||
#endif |
||||
#ifdef _MSC_VER |
||||
#pragma warning(disable: 4996) // was declared deprecated
|
||||
#endif |
||||
|
||||
#if defined(__GNUC__) |
||||
#pragma GCC visibility push(default) |
||||
#endif |
||||
|
||||
#include <inference_engine.hpp> |
||||
|
||||
#if defined(__GNUC__) |
||||
#pragma GCC visibility pop |
||||
#endif |
||||
// END }}}
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include <ade/util/iota_range.hpp> |
||||
|
||||
#include <opencv2/gapi/infer/ie.hpp> |
||||
#include <opencv2/gapi/infer/ie/util.hpp> |
||||
|
||||
namespace opencv_test |
||||
{ |
||||
namespace { |
||||
|
||||
// FIXME: taken from DNN module
|
||||
static void initDLDTDataPath() |
||||
{ |
||||
#ifndef WINRT |
||||
static bool initialized = false; |
||||
if (!initialized) |
||||
{ |
||||
const char* omzDataPath = getenv("OPENCV_OPEN_MODEL_ZOO_DATA_PATH"); |
||||
if (omzDataPath) |
||||
cvtest::addDataSearchPath(omzDataPath); |
||||
const char* dnnDataPath = getenv("OPENCV_DNN_TEST_DATA_PATH"); |
||||
if (dnnDataPath) { |
||||
// Add the dnnDataPath itself - G-API is using some images there directly
|
||||
cvtest::addDataSearchPath(dnnDataPath); |
||||
cvtest::addDataSearchPath(dnnDataPath + std::string("/omz_intel_models")); |
||||
} |
||||
initialized = true; |
||||
} |
||||
#endif // WINRT
|
||||
} |
||||
|
||||
// FIXME: taken from the DNN module
|
||||
void normAssert(cv::InputArray ref, cv::InputArray test, |
||||
const char *comment /*= ""*/, |
||||
double l1 = 0.00001, double lInf = 0.0001) |
||||
{ |
||||
double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); |
||||
EXPECT_LE(normL1, l1) << comment; |
||||
|
||||
double normInf = cvtest::norm(ref, test, cv::NORM_INF); |
||||
EXPECT_LE(normInf, lInf) << comment; |
||||
} |
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// TODO: Probably DNN/IE part can be further parametrized with a template
|
||||
// NOTE: here ".." is used to leave the default "gapi/" search scope
|
||||
TEST(TestAgeGenderIE, InferBasicTensor) |
||||
{ |
||||
initDLDTDataPath(); |
||||
|
||||
const std::string path = "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013"; |
||||
const auto topology_path = findDataFile(path + ".xml", false); |
||||
const auto weights_path = findDataFile(path + ".bin", false); |
||||
|
||||
// Load IE network, initialize input data using that.
|
||||
namespace IE = InferenceEngine; |
||||
cv::Mat in_mat; |
||||
cv::Mat gapi_age, gapi_gender; |
||||
|
||||
IE::Blob::Ptr ie_age, ie_gender; |
||||
{ |
||||
IE::CNNNetReader reader; |
||||
reader.ReadNetwork(topology_path); |
||||
reader.ReadWeights(weights_path); |
||||
auto net = reader.getNetwork(); |
||||
|
||||
const auto &iedims = net.getInputsInfo().begin()->second->getDims(); |
||||
auto cvdims = cv::gapi::ie::util::to_ocv(iedims); |
||||
std::reverse(cvdims.begin(), cvdims.end()); |
||||
in_mat.create(cvdims, CV_32F); |
||||
cv::randu(in_mat, -1, 1); |
||||
|
||||
auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU"); |
||||
auto plugin_net = plugin.LoadNetwork(net, {}); |
||||
auto infer_request = plugin_net.CreateInferRequest(); |
||||
|
||||
infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat)); |
||||
infer_request.Infer(); |
||||
ie_age = infer_request.GetBlob("age_conv3"); |
||||
ie_gender = infer_request.GetBlob("prob"); |
||||
} |
||||
|
||||
// Configure & run G-API
|
||||
using AGInfo = std::tuple<cv::GMat, cv::GMat>; |
||||
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender"); |
||||
|
||||
cv::GMat in; |
||||
cv::GMat age, gender; |
||||
std::tie(age, gender) = cv::gapi::infer<AgeGender>(in); |
||||
cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender)); |
||||
|
||||
auto pp = cv::gapi::ie::Params<AgeGender> { |
||||
topology_path, weights_path, "CPU" |
||||
}.cfgOutputLayers({ "age_conv3", "prob" }); |
||||
comp.apply(cv::gin(in_mat), cv::gout(gapi_age, gapi_gender), |
||||
cv::compile_args(cv::gapi::networks(pp))); |
||||
|
||||
// Validate with IE itself (avoid DNN module dependency here)
|
||||
normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" ); |
||||
normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output"); |
||||
} |
||||
|
||||
TEST(TestAgeGenderIE, InferBasicImage) |
||||
{ |
||||
initDLDTDataPath(); |
||||
|
||||
const std::string path = "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013"; |
||||
const auto topology_path = findDataFile(path + ".xml", false); |
||||
const auto weights_path = findDataFile(path + ".bin", false); |
||||
|
||||
// FIXME: Ideally it should be an image from disk
|
||||
// cv::Mat in_mat = cv::imread(findDataFile("grace_hopper_227.png"));
|
||||
cv::Mat in_mat(cv::Size(320, 240), CV_8UC3); |
||||
cv::randu(in_mat, 0, 255); |
||||
|
||||
cv::Mat gapi_age, gapi_gender; |
||||
|
||||
// Load & run IE network
|
||||
namespace IE = InferenceEngine; |
||||
IE::Blob::Ptr ie_age, ie_gender; |
||||
{ |
||||
IE::CNNNetReader reader; |
||||
reader.ReadNetwork(topology_path); |
||||
reader.ReadWeights(weights_path); |
||||
auto net = reader.getNetwork(); |
||||
auto &ii = net.getInputsInfo().at("data"); |
||||
ii->setPrecision(IE::Precision::U8); |
||||
ii->setLayout(IE::Layout::NHWC); |
||||
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); |
||||
|
||||
auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU"); |
||||
auto plugin_net = plugin.LoadNetwork(net, {}); |
||||
auto infer_request = plugin_net.CreateInferRequest(); |
||||
|
||||
infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat)); |
||||
infer_request.Infer(); |
||||
ie_age = infer_request.GetBlob("age_conv3"); |
||||
ie_gender = infer_request.GetBlob("prob"); |
||||
} |
||||
|
||||
// Configure & run G-API
|
||||
using AGInfo = std::tuple<cv::GMat, cv::GMat>; |
||||
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender"); |
||||
|
||||
cv::GMat in; |
||||
cv::GMat age, gender; |
||||
std::tie(age, gender) = cv::gapi::infer<AgeGender>(in); |
||||
cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender)); |
||||
|
||||
auto pp = cv::gapi::ie::Params<AgeGender> { |
||||
topology_path, weights_path, "CPU" |
||||
}.cfgOutputLayers({ "age_conv3", "prob" }); |
||||
comp.apply(cv::gin(in_mat), cv::gout(gapi_age, gapi_gender), |
||||
cv::compile_args(cv::gapi::networks(pp))); |
||||
|
||||
// Validate with IE itself (avoid DNN module dependency here)
|
||||
normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" ); |
||||
normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output"); |
||||
} |
||||
|
||||
TEST(TestAgeGenderIE, InferROIList) |
||||
{ |
||||
initDLDTDataPath(); |
||||
|
||||
const std::string path = "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013"; |
||||
const auto topology_path = findDataFile(path + ".xml", false); |
||||
const auto weights_path = findDataFile(path + ".bin", false); |
||||
|
||||
// FIXME: Ideally it should be an image from disk
|
||||
// cv::Mat in_mat = cv::imread(findDataFile("grace_hopper_227.png"));
|
||||
cv::Mat in_mat(cv::Size(640, 480), CV_8UC3); |
||||
cv::randu(in_mat, 0, 255); |
||||
|
||||
std::vector<cv::Rect> rois = { |
||||
cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), |
||||
cv::Rect(cv::Point{50, 100}, cv::Size{96, 160}), |
||||
}; |
||||
|
||||
std::vector<cv::Mat> gapi_age, gapi_gender; |
||||
|
||||
// Load & run IE network
|
||||
namespace IE = InferenceEngine; |
||||
std::vector<cv::Mat> ie_age, ie_gender; |
||||
{ |
||||
IE::CNNNetReader reader; |
||||
reader.ReadNetwork(topology_path); |
||||
reader.ReadWeights(weights_path); |
||||
auto net = reader.getNetwork(); |
||||
auto &ii = net.getInputsInfo().at("data"); |
||||
ii->setPrecision(IE::Precision::U8); |
||||
ii->setLayout(IE::Layout::NHWC); |
||||
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); |
||||
|
||||
auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU"); |
||||
auto plugin_net = plugin.LoadNetwork(net, {}); |
||||
auto infer_request = plugin_net.CreateInferRequest(); |
||||
auto frame_blob = cv::gapi::ie::util::to_ie(in_mat); |
||||
|
||||
for (auto &&rc : rois) { |
||||
const auto ie_rc = IE::ROI { |
||||
0u |
||||
, static_cast<std::size_t>(rc.x) |
||||
, static_cast<std::size_t>(rc.y) |
||||
, static_cast<std::size_t>(rc.width) |
||||
, static_cast<std::size_t>(rc.height) |
||||
}; |
||||
infer_request.SetBlob("data", IE::make_shared_blob(frame_blob, ie_rc)); |
||||
infer_request.Infer(); |
||||
|
||||
using namespace cv::gapi::ie::util; |
||||
ie_age.push_back(to_ocv(infer_request.GetBlob("age_conv3")).clone()); |
||||
ie_gender.push_back(to_ocv(infer_request.GetBlob("prob")).clone()); |
||||
} |
||||
} |
||||
|
||||
// Configure & run G-API
|
||||
using AGInfo = std::tuple<cv::GMat, cv::GMat>; |
||||
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender"); |
||||
|
||||
cv::GArray<cv::Rect> rr; |
||||
cv::GMat in; |
||||
cv::GArray<cv::GMat> age, gender; |
||||
std::tie(age, gender) = cv::gapi::infer<AgeGender>(rr, in); |
||||
cv::GComputation comp(cv::GIn(in, rr), cv::GOut(age, gender)); |
||||
|
||||
auto pp = cv::gapi::ie::Params<AgeGender> { |
||||
topology_path, weights_path, "CPU" |
||||
}.cfgOutputLayers({ "age_conv3", "prob" }); |
||||
comp.apply(cv::gin(in_mat, rois), cv::gout(gapi_age, gapi_gender), |
||||
cv::compile_args(cv::gapi::networks(pp))); |
||||
|
||||
// Validate with IE itself (avoid DNN module dependency here)
|
||||
ASSERT_EQ(2u, ie_age.size() ); |
||||
ASSERT_EQ(2u, ie_gender.size()); |
||||
ASSERT_EQ(2u, gapi_age.size() ); |
||||
ASSERT_EQ(2u, gapi_gender.size()); |
||||
|
||||
normAssert(ie_age [0], gapi_age [0], "0: Test age output"); |
||||
normAssert(ie_gender[0], gapi_gender[0], "0: Test gender output"); |
||||
normAssert(ie_age [1], gapi_age [1], "1: Test age output"); |
||||
normAssert(ie_gender[1], gapi_gender[1], "1: Test gender output"); |
||||
} |
||||
|
||||
|
||||
} // namespace opencv_test
|
||||
|
||||
#endif // HAVE_INF_ENGINE
|
Loading…
Reference in new issue