diff --git a/modules/gapi/include/opencv2/gapi/garray.hpp b/modules/gapi/include/opencv2/gapi/garray.hpp index 35b1f00996..3f18af9229 100644 --- a/modules/gapi/include/opencv2/gapi/garray.hpp +++ b/modules/gapi/include/opencv2/gapi/garray.hpp @@ -49,6 +49,31 @@ std::ostream& operator<<(std::ostream& os, const cv::GArrayDesc &desc); namespace detail { + // FIXME: This type spec needs to be: + // 1) shared with GOpaque (not needed right now) + // 2) unified with the serialization (S11N, not merged right now). + // Adding it to type traits is problematic due to our header deps + // (which also need to be fixed). + enum class TypeSpec: int { + OPAQUE_SPEC, + MAT, + RECT + }; + // FIXME: Reuse the below from "opaque traits" of S11N! + template struct GTypeSpec; + template struct GTypeSpec + { + static constexpr const TypeSpec spec = TypeSpec::OPAQUE_SPEC; + }; + template<> struct GTypeSpec + { + static constexpr const TypeSpec spec = TypeSpec::MAT; + }; + template<> struct GTypeSpec + { + static constexpr const TypeSpec spec = TypeSpec::RECT; + }; + // ConstructVec is a callback which stores information about T and is used by // G-API runtime to construct arrays in host memory (T remains opaque for G-API). // ConstructVec is carried into G-API internals by GArrayU. @@ -110,12 +135,15 @@ namespace detail class BasicVectorRef { public: + // These fields are set by the derived class(es) std::size_t m_elemSize = 0ul; cv::GArrayDesc m_desc; + TypeSpec m_spec; virtual ~BasicVectorRef() {} virtual void mov(BasicVectorRef &ref) = 0; virtual const void* ptr() const = 0; + virtual std::size_t size() const = 0; }; template class VectorRefT final: public BasicVectorRef @@ -135,6 +163,7 @@ namespace detail { m_elemSize = sizeof(T); if (vec) m_desc = cv::descr_of(*vec); + m_spec = GTypeSpec::spec; } public: @@ -209,7 +238,9 @@ namespace detail wref() = std::move(tv->wref()); } + virtual const void* ptr() const override { return &rref(); } + virtual std::size_t size() const override { return rref().size(); } }; // This class strips type information from VectorRefT<> and makes it usable @@ -265,8 +296,18 @@ namespace detail return m_ref->m_desc; } + std::size_t size() const + { + return m_ref->size(); + } + // May be used to uniquely identify this object internally const void *ptr() const { return m_ref->ptr(); } + + TypeSpec spec() const + { + return m_ref->m_spec; + } }; // Helper (FIXME: work-around?) diff --git a/modules/gapi/include/opencv2/gapi/gkernel.hpp b/modules/gapi/include/opencv2/gapi/gkernel.hpp index 8fc029e683..7a18d8966f 100644 --- a/modules/gapi/include/opencv2/gapi/gkernel.hpp +++ b/modules/gapi/include/opencv2/gapi/gkernel.hpp @@ -26,6 +26,7 @@ namespace cv { +using GSpecs = std::vector; using GShapes = std::vector; // GKernel describes kernel API to the system @@ -38,8 +39,11 @@ struct GAPI_EXPORTS GKernel const std::string name; // kernel ID, defined by its API (signature) const std::string tag; // some (implementation-specific) tag const M outMeta; // generic adaptor to API::outMeta(...) + const GSpecs inSpecs; // specs of kernel's inputs (FIXME: below) const GShapes outShapes; // types (shapes) kernel's outputs }; +// TODO: It's questionable if inSpecs should really be here. Instead, +// this information could come from meta. // GKernelImpl describes particular kernel implementation to the system struct GAPI_EXPORTS GKernelImpl @@ -203,10 +207,15 @@ public: using InArgs = std::tuple; using OutArgs = std::tuple; + // TODO: Args&&... here? static std::tuple on(Args... args) { - cv::GCall call(GKernel{K::id(), K::tag(), &K::getOutMeta, {detail::GTypeTraits::shape...}}); - call.pass(args...); + cv::GCall call(GKernel{ K::id() + , K::tag() + , &K::getOutMeta + , {detail::GTypeTraits::spec...} + , {detail::GTypeTraits::shape...}}); + call.pass(args...); // TODO: std::forward() here? return yield(call, typename detail::MkSeq::type()); } }; @@ -226,7 +235,11 @@ public: static R on(Args... args) { - cv::GCall call(GKernel{K::id(), K::tag(), &K::getOutMeta, {detail::GTypeTraits::shape}}); + cv::GCall call(GKernel{ K::id() + , K::tag() + , &K::getOutMeta + , {detail::GTypeTraits::spec...} + , {detail::GTypeTraits::shape}}); call.pass(args...); return detail::Yield::yield(call, 0); } diff --git a/modules/gapi/include/opencv2/gapi/gopaque.hpp b/modules/gapi/include/opencv2/gapi/gopaque.hpp index 46c070a822..7d3d663398 100644 --- a/modules/gapi/include/opencv2/gapi/gopaque.hpp +++ b/modules/gapi/include/opencv2/gapi/gopaque.hpp @@ -46,6 +46,7 @@ std::ostream& operator<<(std::ostream& os, const cv::GOpaqueDesc &desc); namespace detail { + // ConstructOpaque is a callback which stores information about T and is used by // G-API runtime to construct an object in host memory (T remains opaque for G-API). // ConstructOpaque is carried into G-API internals by GOpaqueU. diff --git a/modules/gapi/include/opencv2/gapi/gtype_traits.hpp b/modules/gapi/include/opencv2/gapi/gtype_traits.hpp index 1dd6146a24..0f22e3b26f 100644 --- a/modules/gapi/include/opencv2/gapi/gtype_traits.hpp +++ b/modules/gapi/include/opencv2/gapi/gtype_traits.hpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #ifndef OPENCV_GAPI_GTYPE_TRAITS_HPP @@ -41,6 +41,32 @@ namespace detail GOPAQUE, // a cv::GOpaqueU (note - exactly GOpaqueU, not GOpaque!) }; + // This enum captures some information about T in GArray and GOpaque + enum class ArgSpec: int + { + OPAQUE_SPEC, // Unknown, generic, opaque-to-GAPI data type + GMAT, // a GMat + RECT, // a cv::Rect + // NB: Add more types when required + }; + + // Describe specialization types of interest first + // FIXME: It comes to GArg but ideally it should go to *Desc{} + // type family. Bringing it there is a more massive change though. + template struct GSpecTraits; + template struct GSpecTraits + { + static constexpr const ArgSpec spec = ArgSpec::OPAQUE_SPEC; + }; + template<> struct GSpecTraits + { + static constexpr const ArgSpec spec = ArgSpec::GMAT; + }; + template<> struct GSpecTraits + { + static constexpr const ArgSpec spec = ArgSpec::RECT; + }; + enum class OpaqueKind: int { CV_UNKNOWN, // Unknown, generic, opaque-to-GAPI data type unsupported in graph seriallization @@ -69,35 +95,44 @@ namespace detail // cv::GArg to store meta information about types passed into // operation arguments. Please note that cv::GComputation is // defined on GProtoArgs, not GArgs! + // + // spec is a type specialization (makes sense for GArray<> and GOpaque<>) + // for the rest, it is just OPAQUE_VAL by default. template struct GTypeTraits; template struct GTypeTraits { static constexpr const ArgKind kind = ArgKind::OPAQUE_VAL; + static constexpr const ArgSpec spec = ArgSpec::OPAQUE_SPEC; }; template<> struct GTypeTraits { static constexpr const ArgKind kind = ArgKind::GMAT; static constexpr const GShape shape = GShape::GMAT; + static constexpr const ArgSpec spec = ArgSpec::OPAQUE_SPEC; }; template<> struct GTypeTraits { static constexpr const ArgKind kind = ArgKind::GMATP; static constexpr const GShape shape = GShape::GMAT; + static constexpr const ArgSpec spec = ArgSpec::OPAQUE_SPEC; }; template<> struct GTypeTraits { static constexpr const ArgKind kind = ArgKind::GFRAME; static constexpr const GShape shape = GShape::GMAT; + static constexpr const ArgSpec spec = ArgSpec::OPAQUE_SPEC; }; template<> struct GTypeTraits { static constexpr const ArgKind kind = ArgKind::GSCALAR; static constexpr const GShape shape = GShape::GSCALAR; + static constexpr const ArgSpec spec = ArgSpec::OPAQUE_SPEC; }; template struct GTypeTraits > { static constexpr const ArgKind kind = ArgKind::GARRAY; static constexpr const GShape shape = GShape::GARRAY; + static constexpr const ArgSpec spec = GSpecTraits::spec; using host_type = std::vector; using strip_type = cv::detail::VectorRef; static cv::detail::GArrayU wrap_value(const cv::GArray &t) { return t.strip();} @@ -108,6 +143,7 @@ namespace detail { static constexpr const ArgKind kind = ArgKind::GOPAQUE; static constexpr const GShape shape = GShape::GOPAQUE; + static constexpr const ArgSpec spec = GSpecTraits::spec; using host_type = T; using strip_type = cv::detail::OpaqueRef; static cv::detail::GOpaqueU wrap_value(const cv::GOpaque &t) { return t.strip();} @@ -140,6 +176,7 @@ namespace detail template<> struct GTypeOf { using type = cv::GScalar; }; template struct GTypeOf > { using type = cv::GArray; }; template struct GTypeOf { using type = cv::GOpaque;}; + // FIXME: This is not quite correct since IStreamSource may produce not only Mat but also Scalar // and vector data. TODO: Extend the type dispatching on these types too. template<> struct GTypeOf { using type = cv::GMat;}; diff --git a/modules/gapi/include/opencv2/gapi/infer.hpp b/modules/gapi/include/opencv2/gapi/infer.hpp index 5a4caff1f3..d108ca136c 100644 --- a/modules/gapi/include/opencv2/gapi/infer.hpp +++ b/modules/gapi/include/opencv2/gapi/infer.hpp @@ -14,6 +14,7 @@ #include #include // string #include // tuple +#include // is_same, false_type #include // any<> #include // GKernelType[M], GBackend @@ -25,6 +26,43 @@ namespace cv { template class GNetworkType; +namespace detail { + template + struct valid_infer2_types; + + // Terminal case 1 (50/50 success) + template + struct valid_infer2_types< std::tuple, std::tuple > { + // By default, Nets are limited to GMat argument types only + // for infer2, every GMat argument may translate to either + // GArray or GArray. GArray<> part is stripped + // already at this point. + static constexpr const auto value = + std::is_same::type, cv::GMat>::value + || std::is_same::type, cv::Rect>::value; + }; + + // Terminal case 2 (100% failure) + template + struct valid_infer2_types< std::tuple<>, std::tuple > + : public std::false_type { + }; + + // Terminal case 3 (100% failure) + template + struct valid_infer2_types< std::tuple, std::tuple<> > + : public std::false_type { + }; + + // Recursion -- generic + template + struct valid_infer2_types< std::tuple, std::tuple > { + static constexpr const auto value = + valid_infer2_types< std::tuple, std::tuple >::value + && valid_infer2_types< std::tuple, std::tuple >::value; + }; +} // namespace detail + // TODO: maybe tuple_wrap_helper from util.hpp may help with this. // Multiple-return-value network definition (specialized base class) template @@ -39,6 +77,15 @@ public: using ResultL = std::tuple< cv::GArray... >; using APIList = std::function, Args...)>; + + // APIList2 is also template to allow different calling options + // (GArray vs GArray per input) + template + using APIList2 = typename std::enable_if + < cv::detail::valid_infer2_types< std::tuple + , std::tuple >::value, + std::function...)> + >::type; }; // Single-return-value network definition (specialized base class) @@ -54,6 +101,15 @@ public: using ResultL = cv::GArray; using APIList = std::function, Args...)>; + + // APIList2 is also template to allow different calling options + // (GArray vs GArray per input) + template + using APIList2 = typename std::enable_if + < cv::detail::valid_infer2_types< std::tuple + , std::tuple >::value, + std::function...)> + >::type; }; // Base "Infer" kernel. Note - for whatever network, kernel ID @@ -77,10 +133,21 @@ struct GInferBase { // All notes from "Infer" kernel apply here as well. struct GInferListBase { static constexpr const char * id() { - return "org.opencv.dnn.infer-roi"; // Universal stub + return "org.opencv.dnn.infer-roi"; // Universal stub } static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { - return GMetaArgs{}; // One more universal stub + return GMetaArgs{}; // One more universal stub + } +}; + +// Base "Infer list 2" kernel. +// All notes from "Infer" kernel apply here as well. +struct GInferList2Base { + static constexpr const char * id() { + return "org.opencv.dnn.infer-roi-list"; // Universal stub + } + static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { + return GMetaArgs{}; // One more universal stub } }; @@ -109,6 +176,21 @@ struct GInferList final static constexpr const char* tag() { return Net::tag(); } }; +// An even more generic roi-list inference kernel. API (::on()) is +// derived from the Net template parameter (see more in infer<> +// overload). +// Takes an extra variadic template list to reflect how this network +// was called (with Rects or GMats as array parameters) +template +struct GInferList2 final + : public GInferList2Base + , public detail::KernelTypeMedium< GInferList2 + , typename Net::template APIList2 > { + using GInferList2Base::getOutMeta; // FIXME: name lookup conflict workaround? + + static constexpr const char* tag() { return Net::tag(); } +}; + } // namespace cv // FIXME: Probably the signature makes a function/tuple/function round-trip @@ -139,6 +221,30 @@ typename Net::ResultL infer(cv::GArray roi, Args&&... args) { return GInferList::on(roi, std::forward(args)...); } +/** @brief Calculates responses for the specified network (template + * parameter) for every region in the source image, extended version. + * + * @tparam A network type defined with G_API_NET() macro. + * @param image A source image containing regions of interest + * @param args GArray<> objects of cv::Rect or cv::GMat, one per every + * network input: + * - If a cv::GArray is passed, the appropriate + * regions are taken from `image` and preprocessed to this particular + * network input; + * - If a cv::GArray is passed, the underlying data traited + * as tensor (no automatic preprocessing happen). + * @return a list of objects of return type as defined in G_API_NET(). + * If a network has multiple return values (defined with a tuple), a tuple of + * GArray<> objects is returned with the appropriate types inside. + * @sa G_API_NET() + */ +template +typename Net::ResultL infer2(cv::GMat image, cv::GArray... args) { + // FIXME: Declared as "2" because in the current form it steals + // overloads from the regular infer + return GInferList2::on(image, args...); +} + /** * @brief Calculates response for the specified network (template * parameter) given the input data. diff --git a/modules/gapi/include/opencv2/gapi/infer/ie.hpp b/modules/gapi/include/opencv2/gapi/infer/ie.hpp index 6e8c2c3abd..c6d7f272a8 100644 --- a/modules/gapi/include/opencv2/gapi/infer/ie.hpp +++ b/modules/gapi/include/opencv2/gapi/infer/ie.hpp @@ -78,8 +78,8 @@ public: const std::string &weights, const std::string &device) : desc{ model, weights, device, {}, {}, {} - , std::tuple_size::value - , std::tuple_size::value + , std::tuple_size::value // num_in + , std::tuple_size::value // num_out } { }; diff --git a/modules/gapi/samples/gaze_estimation.cpp b/modules/gapi/samples/gaze_estimation.cpp new file mode 100644 index 0000000000..6396d500c1 --- /dev/null +++ b/modules/gapi/samples/gaze_estimation.cpp @@ -0,0 +1,432 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include // CommandLineParser + +const std::string about = + "This is an OpenCV-based version of Gaze Estimation example"; +const std::string keys = + "{ h help | | Print this help message }" + "{ input | | Path to the input video file }" + "{ facem | face-detection-retail-0005.xml | Path to OpenVINO face detection model (.xml) }" + "{ faced | CPU | Target device for the face detection (e.g. CPU, GPU, VPU, ...) }" + "{ landm | facial-landmarks-35-adas-0002.xml | Path to OpenVINO landmarks detector model (.xml) }" + "{ landd | CPU | Target device for the landmarks detector (e.g. CPU, GPU, VPU, ...) }" + "{ headm | head-pose-estimation-adas-0001.xml | Path to OpenVINO head pose estimation model (.xml) }" + "{ headd | CPU | Target device for the head pose estimation inference (e.g. CPU, GPU, VPU, ...) }" + "{ gazem | gaze-estimation-adas-0002.xml | Path to OpenVINO gaze vector estimaiton model (.xml) }" + "{ gazed | CPU | Target device for the gaze vector estimation inference (e.g. CPU, GPU, VPU, ...) }" + ; + +namespace { +std::string weights_path(const std::string &model_path) { + const auto EXT_LEN = 4u; + const auto sz = model_path.size(); + CV_Assert(sz > EXT_LEN); + + auto ext = model_path.substr(sz - EXT_LEN); + auto lower = [](unsigned char c) { + return static_cast(std::tolower(c)); + }; + std::transform(ext.begin(), ext.end(), ext.begin(), lower); + CV_Assert(ext == ".xml"); + return model_path.substr(0u, sz - EXT_LEN) + ".bin"; +} +} // anonymous namespace + +namespace custom { +namespace { +using GMat3 = std::tuple; +using GMats = cv::GArray; +using GRects = cv::GArray; +using GSize = cv::GOpaque; +G_API_NET(Faces, , "face-detector" ); +G_API_NET(Landmarks, , "facial-landmarks"); +G_API_NET(HeadPose, < GMat3(cv::GMat)>, "head-pose"); +G_API_NET(Gaze, , "gaze-vector"); + +G_API_OP(Size, , "custom.gapi.size") { + static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) { + return cv::empty_gopaque_desc(); + } +}; + +G_API_OP(ParseSSD, + , + "custom.gaze_estimation.parseSSD") { + static cv::GArrayDesc outMeta( const cv::GMatDesc & + , const cv::GOpaqueDesc & + , bool) { + return cv::empty_array_desc(); + } +}; + +// Left/Right eye per every face +G_API_OP(ParseEyes, + (GMats, GRects, GSize)>, + "custom.gaze_estimation.parseEyes") { + static std::tuple + outMeta( const cv::GArrayDesc & + , const cv::GArrayDesc & + , const cv::GOpaqueDesc &) { + return std::make_tuple(cv::empty_array_desc(), cv::empty_array_desc()); + } +}; + +// Combine three scalars into a 1x3 vector (per every face) +G_API_OP(ProcessPoses, + , + "custom.gaze_estimation.processPoses") { + static cv::GArrayDesc outMeta( const cv::GArrayDesc & + , const cv::GArrayDesc & + , const cv::GArrayDesc &) { + return cv::empty_array_desc(); + } +}; + +void adjustBoundingBox(cv::Rect& boundingBox) { + auto w = boundingBox.width; + auto h = boundingBox.height; + + boundingBox.x -= static_cast(0.067 * w); + boundingBox.y -= static_cast(0.028 * h); + + boundingBox.width += static_cast(0.15 * w); + boundingBox.height += static_cast(0.13 * h); + + if (boundingBox.width < boundingBox.height) { + auto dx = (boundingBox.height - boundingBox.width); + boundingBox.x -= dx / 2; + boundingBox.width += dx; + } else { + auto dy = (boundingBox.width - boundingBox.height); + boundingBox.y -= dy / 2; + boundingBox.height += dy; + } +} + +void gazeVectorToGazeAngles(const cv::Point3f& gazeVector, + cv::Point2f& gazeAngles) { + auto r = cv::norm(gazeVector); + + double v0 = static_cast(gazeVector.x); + double v1 = static_cast(gazeVector.y); + double v2 = static_cast(gazeVector.z); + + gazeAngles.x = static_cast(180.0 / M_PI * (M_PI_2 + std::atan2(v2, v0))); + gazeAngles.y = static_cast(180.0 / M_PI * (M_PI_2 - std::acos(v1 / r))); +} + +GAPI_OCV_KERNEL(OCVSize, Size) { + static void run(const cv::Mat &in, cv::Size &out) { + out = in.size(); + } +}; + +GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) { + static void run(const cv::Mat &in_ssd_result, + const cv::Size &upscale, + const bool filter_out_of_bounds, + std::vector &out_objects) { + const auto &in_ssd_dims = in_ssd_result.size; + CV_Assert(in_ssd_dims.dims() == 4u); + + const int MAX_PROPOSALS = in_ssd_dims[2]; + const int OBJECT_SIZE = in_ssd_dims[3]; + CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size + + const cv::Rect surface({0,0}, upscale); + out_objects.clear(); + + const float *data = in_ssd_result.ptr(); + for (int i = 0; i < MAX_PROPOSALS; i++) { + const float image_id = data[i * OBJECT_SIZE + 0]; + const float label = data[i * OBJECT_SIZE + 1]; + const float confidence = data[i * OBJECT_SIZE + 2]; + const float rc_left = data[i * OBJECT_SIZE + 3]; + const float rc_top = data[i * OBJECT_SIZE + 4]; + const float rc_right = data[i * OBJECT_SIZE + 5]; + const float rc_bottom = data[i * OBJECT_SIZE + 6]; + (void) label; + if (image_id < 0.f) { + break; // marks end-of-detections + } + if (confidence < 0.5f) { + continue; // skip objects with low confidence + } + cv::Rect rc; // map relative coordinates to the original image scale + rc.x = static_cast(rc_left * upscale.width); + rc.y = static_cast(rc_top * upscale.height); + rc.width = static_cast(rc_right * upscale.width) - rc.x; + rc.height = static_cast(rc_bottom * upscale.height) - rc.y; + adjustBoundingBox(rc); // TODO: new option? + + const auto clipped_rc = rc & surface; // TODO: new option? + if (filter_out_of_bounds) { + if (clipped_rc.area() != rc.area()) { + continue; + } + } + out_objects.emplace_back(clipped_rc); + } + } +}; + +cv::Rect eyeBox(const cv::Rect &face_rc, + float p1_x, float p1_y, float p2_x, float p2_y, + float scale = 1.8f) { + const auto &up = face_rc.size(); + const cv::Point p1 = { + static_cast(p1_x*up.width), + static_cast(p1_y*up.height) + }; + const cv::Point p2 = { + static_cast(p2_x*up.width), + static_cast(p2_y*up.height) + }; + cv::Rect result; + + const auto size = static_cast(cv::norm(p1 - p2)); + const auto midpoint = (p1 + p2) / 2; + + result.width = static_cast(scale * size); + result.height = result.width; + result.x = face_rc.x + midpoint.x - (result.width / 2); + result.y = face_rc.y + midpoint.y - (result.height / 2); + // Shift result to the original frame's absolute coordinates + return result; +} + +GAPI_OCV_KERNEL(OCVParseEyes, ParseEyes) { + static void run(const std::vector &in_landmarks_per_face, + const std::vector &in_face_rcs, + const cv::Size &frame_size, + std::vector &out_left_eyes, + std::vector &out_right_eyes) { + const size_t numFaces = in_landmarks_per_face.size(); + const cv::Rect surface(cv::Point(0,0), frame_size); + GAPI_Assert(numFaces == in_face_rcs.size()); + out_left_eyes.clear(); + out_right_eyes.clear(); + out_left_eyes.reserve(numFaces); + out_right_eyes.reserve(numFaces); + + for (std::size_t i = 0u; i < numFaces; i++) { + const auto &lm = in_landmarks_per_face[i]; + const auto &rc = in_face_rcs[i]; + // Left eye is defined by points 0/1 (x2), + // Right eye is defined by points 2/3 (x2) + const float *data = lm.ptr(); + out_left_eyes .push_back(surface & eyeBox(rc, data[0], data[1], data[2], data[3])); + out_right_eyes.push_back(surface & eyeBox(rc, data[4], data[5], data[6], data[7])); + } + } +}; + +GAPI_OCV_KERNEL(OCVProcessPoses, ProcessPoses) { + static void run(const std::vector &in_ys, + const std::vector &in_ps, + const std::vector &in_rs, + std::vector &out_poses) { + const std::size_t sz = in_ys.size(); + GAPI_Assert(sz == in_ps.size() && sz == in_rs.size()); + out_poses.clear(); + for (std::size_t idx = 0u; idx < sz; idx++) { + cv::Mat pose(1, 3, CV_32FC1); + float *ptr = pose.ptr(); + ptr[0] = in_ys[idx].ptr()[0]; + ptr[1] = in_ps[idx].ptr()[0]; + ptr[2] = in_rs[idx].ptr()[0]; + out_poses.push_back(std::move(pose)); + } + } +}; +} // anonymous namespace +} // namespace custom + +namespace vis { +namespace { +cv::Point2f midp(const cv::Rect &rc) { + return (rc.tl() + rc.br()) / 2; +}; +void bbox(cv::Mat &m, const cv::Rect &rc) { + cv::rectangle(m, rc, cv::Scalar{0,255,0}, 2, cv::LINE_8, 0); +}; +void pose(cv::Mat &m, const cv::Mat &p, const cv::Rect &face_rc) { + const auto *posePtr = p.ptr(); + const auto yaw = static_cast(posePtr[0]); + const auto pitch = static_cast(posePtr[1]); + const auto roll = static_cast(posePtr[2]); + + const auto sinY = std::sin(yaw * M_PI / 180.0); + const auto sinP = std::sin(pitch * M_PI / 180.0); + const auto sinR = std::sin(roll * M_PI / 180.0); + + const auto cosY = std::cos(yaw * M_PI / 180.0); + const auto cosP = std::cos(pitch * M_PI / 180.0); + const auto cosR = std::cos(roll * M_PI / 180.0); + + const auto axisLength = 0.4 * face_rc.width; + const auto xCenter = face_rc.x + face_rc.width / 2; + const auto yCenter = face_rc.y + face_rc.height / 2; + + const auto center = cv::Point{xCenter, yCenter}; + const auto axisln = cv::Point2d{axisLength, axisLength}; + const auto ctr = cv::Matx(cosR*cosY, sinY*sinP*sinR, 0.f, cosP*sinR); + const auto ctt = cv::Matx(cosR*sinY*sinP, cosY*sinR, 0.f, -cosP*cosR); + const auto ctf = cv::Matx(sinY*cosP, 0.f, 0.f, sinP); + + // center to right + cv::line(m, center, center + static_cast(ctr*axisln), cv::Scalar(0, 0, 255), 2); + // center to top + cv::line(m, center, center + static_cast(ctt*axisln), cv::Scalar(0, 255, 0), 2); + // center to forward + cv::line(m, center, center + static_cast(ctf*axisln), cv::Scalar(255, 0, 255), 2); +} +void vvec(cv::Mat &m, const cv::Mat &v, const cv::Rect &face_rc, + const cv::Rect &left_rc, const cv::Rect &right_rc) { + const auto scale = 0.002 * face_rc.width; + + cv::Point3f gazeVector; + const auto *gazePtr = v.ptr(); + gazeVector.x = gazePtr[0]; + gazeVector.y = gazePtr[1]; + gazeVector.z = gazePtr[2]; + gazeVector = gazeVector / cv::norm(gazeVector); + + const double arrowLength = 0.4 * face_rc.width; + const auto left_mid = midp(left_rc); + const auto right_mid = midp(right_rc); + + cv::Point2f gazeArrow; + gazeArrow.x = gazeVector.x; + gazeArrow.y = -gazeVector.y; + gazeArrow *= arrowLength; + + cv::arrowedLine(m, left_mid, left_mid + gazeArrow, cv::Scalar(255, 0, 0), 2); + cv::arrowedLine(m, right_mid, right_mid + gazeArrow, cv::Scalar(255, 0, 0), 2); + + cv::Point2f gazeAngles; + custom::gazeVectorToGazeAngles(gazeVector, gazeAngles); + + cv::putText(m, + cv::format("gaze angles: (h=%0.0f, v=%0.0f)", + static_cast(std::round(gazeAngles.x)), + static_cast(std::round(gazeAngles.y))), + cv::Point(static_cast(face_rc.tl().x), + static_cast(face_rc.br().y + 12. * face_rc.width / 100.)), + cv::FONT_HERSHEY_PLAIN, scale * 2, cv::Scalar::all(255), 1); +}; +} // anonymous namespace +} // namespace vis + +int main(int argc, char *argv[]) +{ + cv::CommandLineParser cmd(argc, argv, keys); + cmd.about(about); + if (cmd.has("help")) { + cmd.printMessage(); + return 0; + } + + cv::GMat in; + cv::GMat faces = cv::gapi::infer(in); + cv::GOpaque sz = custom::Size::on(in); // FIXME + cv::GArray faces_rc = custom::ParseSSD::on(faces, sz, true); + cv::GArray angles_y, angles_p, angles_r; + std::tie(angles_y, angles_p, angles_r) = cv::gapi::infer(faces_rc, in); + cv::GArray heads_pos = custom::ProcessPoses::on(angles_y, angles_p, angles_r); + cv::GArray landmarks = cv::gapi::infer(faces_rc, in); + cv::GArray left_eyes, right_eyes; + std::tie(left_eyes, right_eyes) = custom::ParseEyes::on(landmarks, faces_rc, sz); + cv::GArray gaze_vectors = cv::gapi::infer2( in + , left_eyes + , right_eyes + , heads_pos); + cv::GComputation graph(cv::GIn(in), + cv::GOut( cv::gapi::copy(in) + , faces_rc + , left_eyes + , right_eyes + , heads_pos + , gaze_vectors)); + + const auto input_file_name = cmd.get("input"); + const auto face_model_path = cmd.get("facem"); + const auto head_model_path = cmd.get("headm"); + const auto lmrk_model_path = cmd.get("landm"); + const auto gaze_model_path = cmd.get("gazem"); + + auto face_net = cv::gapi::ie::Params { + face_model_path, // path to topology IR + weights_path(face_model_path), // path to weights + cmd.get("faced"), /// device specifier + }; + auto head_net = cv::gapi::ie::Params { + head_model_path, // path to topology IR + weights_path(head_model_path), // path to weights + cmd.get("headd"), // device specifier + }.cfgOutputLayers({"angle_y_fc", "angle_p_fc", "angle_r_fc"}); + auto landmarks_net = cv::gapi::ie::Params { + lmrk_model_path, // path to topology IR + weights_path(lmrk_model_path), // path to weights + cmd.get("landd"), // device specifier + }; + auto gaze_net = cv::gapi::ie::Params { + gaze_model_path, // path to topology IR + weights_path(gaze_model_path), // path to weights + cmd.get("gazed"), // device specifier + }.cfgInputLayers({"left_eye_image", "right_eye_image", "head_pose_angles"}); + + auto kernels = cv::gapi::kernels< custom::OCVSize + , custom::OCVParseSSD + , custom::OCVParseEyes + , custom::OCVProcessPoses>(); + auto networks = cv::gapi::networks(face_net, head_net, landmarks_net, gaze_net); + auto pipeline = graph.compileStreaming(cv::compile_args(networks, kernels)); + + cv::TickMeter tm; + cv::Mat image; + std::vector out_faces, out_right_eyes, out_left_eyes; + std::vector out_poses; + std::vector out_gazes; + std::size_t frames = 0u; + std::cout << "Reading " << input_file_name << std::endl; + + pipeline.setSource(cv::gapi::wip::make_src(input_file_name)); + pipeline.start(); + tm.start(); + while (pipeline.pull(cv::gout( image + , out_faces + , out_left_eyes + , out_right_eyes + , out_poses + , out_gazes))) { + frames++; + // Visualize results on the frame + for (auto &&rc : out_faces) vis::bbox(image, rc); + for (auto &&rc : out_left_eyes) vis::bbox(image, rc); + for (auto &&rc : out_right_eyes) vis::bbox(image, rc); + for (std::size_t i = 0u; i < out_faces.size(); i++) { + vis::pose(image, out_poses[i], out_faces[i]); + vis::vvec(image, out_gazes[i], out_faces[i], out_left_eyes[i], out_right_eyes[i]); + } + tm.stop(); + const auto fps_str = std::to_string(frames / tm.getTimeSec()) + " FPS"; + cv::putText(image, fps_str, {0,32}, cv::FONT_HERSHEY_SIMPLEX, 1.0, {0,255,0}, 2); + cv::imshow("Out", image); + cv::waitKey(1); + tm.start(); + } + tm.stop(); + std::cout << "Processed " << frames << " frames" + << " (" << frames / tm.getTimeSec() << " FPS)" << std::endl; + return 0; +} diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp index b168685cf1..68a104c9d5 100644 --- a/modules/gapi/src/backends/ie/giebackend.cpp +++ b/modules/gapi/src/backends/ie/giebackend.cpp @@ -489,7 +489,6 @@ struct Infer: public cv::detail::KernelTag { const auto &meta = util::get(mm); ii->setPrecision(toIE(meta.depth)); - ii->setLayout(meta.isND() ? IE::Layout::NCHW : IE::Layout::NHWC); ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); } @@ -570,7 +569,6 @@ struct InferList: public cv::detail::KernelTag { const auto &meta = util::get(mm); ii->setPrecision(toIE(meta.depth)); - ii->setLayout(meta.isND() ? IE::Layout::NCHW : IE::Layout::NHWC); ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); } @@ -624,6 +622,137 @@ struct InferList: public cv::detail::KernelTag { } }; +struct InferList2: public cv::detail::KernelTag { + using API = cv::GInferList2Base; + static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + // Specify the input information to the IE from the framework + // NB: Have no clue if network's input [dimensions] may ever define + // its output dimensions. It seems possible with OpenCV DNN APIs + + GConstGIEModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + + // Initialize input information + // Note our input layers list order matches the API order and so + // meta order. + GAPI_Assert(uu.params.input_names.size() == (in_metas.size() - 1u) + && "Known input layers count doesn't match input meta count"); + + const auto &op = gm.metadata(nh).get(); + + // In contrast to InferList, the InferList2 has only one + // "full-frame" image argument, and all the rest are arrays of + // ether ROI or blobs. So here we set the 0th arg image format + // to all inputs which are ROI-based (skipping the + // "blob"-based ones) + // FIXME: this is filtering not done, actually! GArrayDesc has + // no hint for its underlying type! + const auto &mm_0 = in_metas[0u]; + const auto &meta_0 = util::get(mm_0); + GAPI_Assert( !meta_0.isND() + && !meta_0.planar + && "Only images are supported as the 0th argument"); + std::size_t idx = 1u; + for (auto &&input_name : uu.params.input_names) { + auto &ii = uu.inputs.at(input_name); + const auto &mm = in_metas[idx]; + GAPI_Assert(util::holds_alternative(mm) + && "Non-array inputs are not supported"); + + if (op.k.inSpecs[idx] == cv::detail::ArgSpec::RECT) { + // This is a cv::Rect -- configure the IE preprocessing + ii->setPrecision(toIE(meta_0.depth)); + ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); + } else { + // This is a cv::GMat (equals to: cv::Mat) + // Just validate that it is really the type + // (other types are prohibited here) + GAPI_Assert(op.k.inSpecs[idx] == cv::detail::ArgSpec::GMAT); + } + idx++; // NB: Never forget to increment the counter + } + + // roi-list version is much easier at the moment. + // All our outputs are vectors which don't have + // metadata at the moment - so just create a vector of + // "empty" array metadatas of the required size. + return cv::GMetaArgs(uu.params.output_names.size(), + cv::GMetaArg{cv::empty_array_desc()}); + } + + static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) { + GAPI_Assert(ctx.args.size() > 1u + && "This operation must have at least two arguments"); + + // Since we do a ROI list inference, always assume our input buffer is image + const cv::Mat mat_0 = ctx.inMat(0u); + IE::Blob::Ptr blob_0 = wrapIE(mat_0, cv::gapi::ie::TraitAs::IMAGE); + + // Take the next argument, which must be vector (of any kind). + // Use it only to obtain the ROI list size (sizes of all other + // vectors must be equal to this one) + const auto list_size = ctx.inArg(1u).size(); + + // FIXME: This could be done ONCE at graph compile stage! + std::vector< std::vector > cached_dims(uu.params.num_out); + for (auto i : ade::util::iota(uu.params.num_out)) { + const IE::DataPtr& ie_out = uu.outputs.at(uu.params.output_names[i]); + cached_dims[i] = toCV(ie_out->getTensorDesc().getDims()); + ctx.outVecR(i).clear(); + // FIXME: Isn't this should be done automatically + // by some resetInternalData(), etc? (Probably at the GExecutor level) + } + + // For every ROI in the list {{{ + for (const auto &list_idx : ade::util::iota(list_size)) { + // For every input of the net {{{ + for (auto in_idx : ade::util::iota(uu.params.num_in)) { + const auto &this_vec = ctx.inArg(in_idx+1u); + GAPI_Assert(this_vec.size() == list_size); + // Prepare input {{{ + IE::Blob::Ptr this_blob; + if (this_vec.spec() == cv::detail::TypeSpec::RECT) { + // ROI case - create an ROI blob + const auto &vec = this_vec.rref(); + this_blob = IE::make_shared_blob(blob_0, toIE(vec[list_idx])); + } else if (this_vec.spec() == cv::detail::TypeSpec::MAT) { + // Mat case - create a regular blob + // FIXME: NOW Assume Mats are always BLOBS (not + // images) + const auto &vec = this_vec.rref(); + const auto &mat = vec[list_idx]; + this_blob = wrapIE(mat, cv::gapi::ie::TraitAs::TENSOR); + } else { + GAPI_Assert(false && "Only Rect and Mat types are supported for infer list 2!"); + } + iec.this_request.SetBlob(uu.params.input_names[in_idx], this_blob); + // }}} (Preapre input) + } // }}} (For every input of the net) + + // Run infer request {{{ + iec.this_request.Infer(); + // }}} (Run infer request) + + // For every output of the net {{{ + for (auto i : ade::util::iota(uu.params.num_out)) { + // Push results to the list {{{ + std::vector &out_vec = ctx.outVecR(i); + IE::Blob::Ptr out_blob = iec.this_request.GetBlob(uu.params.output_names[i]); + cv::Mat out_mat(cached_dims[i], toCV(out_blob->getTensorDesc().getPrecision())); + copyFromIE(out_blob, out_mat); // FIXME: Avoid data copy. Not sure if it is possible though + out_vec.push_back(std::move(out_mat)); + // }}} (Push results to the list) + } // }}} (For every output of the net) + } // }}} (For every ROI in the list) + } +}; + } // namespace ie } // namespace gapi } // namespace cv @@ -656,6 +785,7 @@ namespace { virtual cv::gapi::GKernelPackage auxiliaryKernels() const override { return cv::gapi::kernels< cv::gimpl::ie::Infer , cv::gimpl::ie::InferList + , cv::gimpl::ie::InferList2 >(); } }; diff --git a/modules/gapi/src/compiler/gobjref.hpp b/modules/gapi/src/compiler/gobjref.hpp index acbb64bb5a..3387070ac0 100644 --- a/modules/gapi/src/compiler/gobjref.hpp +++ b/modules/gapi/src/compiler/gobjref.hpp @@ -55,6 +55,7 @@ namespace detail template<> struct GTypeTraits { static constexpr const ArgKind kind = ArgKind::GOBJREF; + static constexpr const ArgSpec spec = ArgSpec::OPAQUE_SPEC; }; } diff --git a/modules/gapi/test/gapi_array_tests.cpp b/modules/gapi/test/gapi_array_tests.cpp index 922a2ebddb..a109bb5df9 100644 --- a/modules/gapi/test/gapi_array_tests.cpp +++ b/modules/gapi/test/gapi_array_tests.cpp @@ -187,4 +187,19 @@ TEST(GArray_VectorRef, TestMov) EXPECT_EQ(V{}, vref.rref()); EXPECT_EQ(V{}, vtest); } + +TEST(GArray_VectorRef, Spec) +{ + cv::detail::VectorRef v1(std::vector{}); + EXPECT_EQ(cv::detail::TypeSpec::RECT, v1.spec()); + + cv::detail::VectorRef v2(std::vector{}); + EXPECT_EQ(cv::detail::TypeSpec::MAT, v2.spec()); + + cv::detail::VectorRef v3(std::vector{}); + EXPECT_EQ(cv::detail::TypeSpec::OPAQUE_SPEC, v3.spec()); + + cv::detail::VectorRef v4(std::vector{}); + EXPECT_EQ(cv::detail::TypeSpec::OPAQUE_SPEC, v4.spec()); +} } // namespace opencv_test diff --git a/modules/gapi/test/infer/gapi_infer_ie_test.cpp b/modules/gapi/test/infer/gapi_infer_ie_test.cpp index d81285fff0..5c17bb66cf 100644 --- a/modules/gapi/test/infer/gapi_infer_ie_test.cpp +++ b/modules/gapi/test/infer/gapi_infer_ie_test.cpp @@ -75,6 +75,44 @@ void normAssert(cv::InputArray ref, cv::InputArray test, EXPECT_LE(normInf, lInf) << comment; } +std::vector modelPathByName(const std::string &model_name) { + // Handle OMZ model layout changes among OpenVINO versions here + static const std::unordered_multimap map = { + {"age-gender-recognition-retail-0013", + "2020.3.0/intel/age-gender-recognition-retail-0013/FP32"}, + {"age-gender-recognition-retail-0013", + "Retail/object_attributes/age_gender/dldt"}, + }; + const auto range = map.equal_range(model_name); + std::vector result; + for (auto it = range.first; it != range.second; ++it) { + result.emplace_back(it->second); + } + return result; +} + +std::tuple findModel(const std::string &model_name) { + const auto candidates = modelPathByName(model_name); + CV_Assert(!candidates.empty() && "No model path candidates found at all"); + + for (auto &&path : candidates) { + std::string model_xml, model_bin; + try { + model_xml = findDataFile(path + "/" + model_name + ".xml", false); + model_bin = findDataFile(path + "/" + model_name + ".bin", false); + // Return the first file which actually works + return std::make_tuple(model_xml, model_bin); + } catch (SkipTestException&) { + // This is quite ugly but it is a way for OpenCV to let us know + // this file wasn't found. + continue; + } + } + + // Default behavior if reached here. + throw SkipTestException("Files for " + model_name + " were not found"); +} + } // anonymous namespace // TODO: Probably DNN/IE part can be further parametrized with a template @@ -83,9 +121,8 @@ TEST(TestAgeGenderIE, InferBasicTensor) { initDLDTDataPath(); - const std::string path = "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013"; - const auto topology_path = findDataFile(path + ".xml", false); - const auto weights_path = findDataFile(path + ".bin", false); + std::string topology_path, weights_path; + std::tie(topology_path, weights_path) = findModel("age-gender-recognition-retail-0013"); // Load IE network, initialize input data using that. namespace IE = InferenceEngine; @@ -138,9 +175,8 @@ TEST(TestAgeGenderIE, InferBasicImage) { initDLDTDataPath(); - const std::string path = "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013"; - const auto topology_path = findDataFile(path + ".xml", false); - const auto weights_path = findDataFile(path + ".bin", false); + std::string topology_path, weights_path; + std::tie(topology_path, weights_path) = findModel("age-gender-recognition-retail-0013"); // FIXME: Ideally it should be an image from disk // cv::Mat in_mat = cv::imread(findDataFile("grace_hopper_227.png")); @@ -159,7 +195,6 @@ TEST(TestAgeGenderIE, InferBasicImage) auto net = reader.getNetwork(); auto &ii = net.getInputsInfo().at("data"); ii->setPrecision(IE::Precision::U8); - ii->setLayout(IE::Layout::NHWC); ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU"); @@ -192,65 +227,86 @@ TEST(TestAgeGenderIE, InferBasicImage) normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output"); } -TEST(TestAgeGenderIE, InferROIList) -{ - initDLDTDataPath(); +struct ROIList: public ::testing::Test { + std::string m_model_path; + std::string m_weights_path; - const std::string path = "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013"; - const auto topology_path = findDataFile(path + ".xml", false); - const auto weights_path = findDataFile(path + ".bin", false); + cv::Mat m_in_mat; + std::vector m_roi_list; - // FIXME: Ideally it should be an image from disk - // cv::Mat in_mat = cv::imread(findDataFile("grace_hopper_227.png")); - cv::Mat in_mat(cv::Size(640, 480), CV_8UC3); - cv::randu(in_mat, 0, 255); - - std::vector rois = { - cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), - cv::Rect(cv::Point{50, 100}, cv::Size{96, 160}), - }; - - std::vector gapi_age, gapi_gender; - - // Load & run IE network - namespace IE = InferenceEngine; - std::vector ie_age, ie_gender; - { - IE::CNNNetReader reader; - reader.ReadNetwork(topology_path); - reader.ReadWeights(weights_path); - auto net = reader.getNetwork(); - auto &ii = net.getInputsInfo().at("data"); - ii->setPrecision(IE::Precision::U8); - ii->setLayout(IE::Layout::NHWC); - ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); + std::vector m_out_ie_ages; + std::vector m_out_ie_genders; - auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU"); - auto plugin_net = plugin.LoadNetwork(net, {}); - auto infer_request = plugin_net.CreateInferRequest(); - auto frame_blob = cv::gapi::ie::util::to_ie(in_mat); - - for (auto &&rc : rois) { - const auto ie_rc = IE::ROI { - 0u - , static_cast(rc.x) - , static_cast(rc.y) - , static_cast(rc.width) - , static_cast(rc.height) - }; - infer_request.SetBlob("data", IE::make_shared_blob(frame_blob, ie_rc)); - infer_request.Infer(); - - using namespace cv::gapi::ie::util; - ie_age.push_back(to_ocv(infer_request.GetBlob("age_conv3")).clone()); - ie_gender.push_back(to_ocv(infer_request.GetBlob("prob")).clone()); - } - } + std::vector m_out_gapi_ages; + std::vector m_out_gapi_genders; - // Configure & run G-API using AGInfo = std::tuple; G_API_NET(AgeGender, , "test-age-gender"); + ROIList() { + initDLDTDataPath(); + std::tie(m_model_path, m_weights_path) = findModel("age-gender-recognition-retail-0013"); + + // FIXME: it must be cv::imread(findDataFile("../dnn/grace_hopper_227.png", false)); + m_in_mat = cv::Mat(cv::Size(320, 240), CV_8UC3); + cv::randu(m_in_mat, 0, 255); + + // both ROIs point to the same face, with a slightly changed geometry + m_roi_list = { + cv::Rect(cv::Point{64, 60}, cv::Size{ 96, 96}), + cv::Rect(cv::Point{50, 32}, cv::Size{128, 160}), + }; + + // Load & run IE network + namespace IE = InferenceEngine; + { + IE::CNNNetReader reader; + reader.ReadNetwork(m_model_path); + reader.ReadWeights(m_weights_path); + auto net = reader.getNetwork(); + auto &ii = net.getInputsInfo().at("data"); + ii->setPrecision(IE::Precision::U8); + ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); + + auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU"); + auto plugin_net = plugin.LoadNetwork(net, {}); + auto infer_request = plugin_net.CreateInferRequest(); + auto frame_blob = cv::gapi::ie::util::to_ie(m_in_mat); + + for (auto &&rc : m_roi_list) { + const auto ie_rc = IE::ROI { + 0u + , static_cast(rc.x) + , static_cast(rc.y) + , static_cast(rc.width) + , static_cast(rc.height) + }; + infer_request.SetBlob("data", IE::make_shared_blob(frame_blob, ie_rc)); + infer_request.Infer(); + + using namespace cv::gapi::ie::util; + m_out_ie_ages.push_back(to_ocv(infer_request.GetBlob("age_conv3")).clone()); + m_out_ie_genders.push_back(to_ocv(infer_request.GetBlob("prob")).clone()); + } + } // namespace IE = .. + } // ROIList() + + void validate() { + // Validate with IE itself (avoid DNN module dependency here) + ASSERT_EQ(2u, m_out_ie_ages.size()); + ASSERT_EQ(2u, m_out_ie_genders.size()); + ASSERT_EQ(2u, m_out_gapi_ages.size()); + ASSERT_EQ(2u, m_out_gapi_genders.size()); + + normAssert(m_out_ie_ages [0], m_out_gapi_ages [0], "0: Test age output"); + normAssert(m_out_ie_genders[0], m_out_gapi_genders[0], "0: Test gender output"); + normAssert(m_out_ie_ages [1], m_out_gapi_ages [1], "1: Test age output"); + normAssert(m_out_ie_genders[1], m_out_gapi_genders[1], "1: Test gender output"); + } +}; // ROIList + +TEST_F(ROIList, TestInfer) +{ cv::GArray rr; cv::GMat in; cv::GArray age, gender; @@ -258,23 +314,30 @@ TEST(TestAgeGenderIE, InferROIList) cv::GComputation comp(cv::GIn(in, rr), cv::GOut(age, gender)); auto pp = cv::gapi::ie::Params { - topology_path, weights_path, "CPU" + m_model_path, m_weights_path, "CPU" }.cfgOutputLayers({ "age_conv3", "prob" }); - comp.apply(cv::gin(in_mat, rois), cv::gout(gapi_age, gapi_gender), + comp.apply(cv::gin(m_in_mat, m_roi_list), + cv::gout(m_out_gapi_ages, m_out_gapi_genders), cv::compile_args(cv::gapi::networks(pp))); - - // Validate with IE itself (avoid DNN module dependency here) - ASSERT_EQ(2u, ie_age.size() ); - ASSERT_EQ(2u, ie_gender.size()); - ASSERT_EQ(2u, gapi_age.size() ); - ASSERT_EQ(2u, gapi_gender.size()); - - normAssert(ie_age [0], gapi_age [0], "0: Test age output"); - normAssert(ie_gender[0], gapi_gender[0], "0: Test gender output"); - normAssert(ie_age [1], gapi_age [1], "1: Test age output"); - normAssert(ie_gender[1], gapi_gender[1], "1: Test gender output"); + validate(); } +TEST_F(ROIList, TestInfer2) +{ + cv::GArray rr; + cv::GMat in; + cv::GArray age, gender; + std::tie(age, gender) = cv::gapi::infer2(in, rr); + cv::GComputation comp(cv::GIn(in, rr), cv::GOut(age, gender)); + + auto pp = cv::gapi::ie::Params { + m_model_path, m_weights_path, "CPU" + }.cfgOutputLayers({ "age_conv3", "prob" }); + comp.apply(cv::gin(m_in_mat, m_roi_list), + cv::gout(m_out_gapi_ages, m_out_gapi_genders), + cv::compile_args(cv::gapi::networks(pp))); + validate(); +} } // namespace opencv_test diff --git a/modules/gapi/test/infer/gapi_infer_tests.cpp b/modules/gapi/test/infer/gapi_infer_tests.cpp new file mode 100644 index 0000000000..8ab7af32f5 --- /dev/null +++ b/modules/gapi/test/infer/gapi_infer_tests.cpp @@ -0,0 +1,79 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "../test_precomp.hpp" + +// These tests verify some parts of cv::gapi::infer<> API +// regardless of the backend used + +namespace opencv_test { +namespace { +template using Check = cv::detail::valid_infer2_types; + +TEST(Infer, ValidInfer2Types) +{ + // Compiled == passed! + + // Argument block 1 + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == true, "Must work"); + + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == true, "Must work"); + + // Argument block 2 + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == true, "Must work"); + + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == true, "Must work"); + + // Argument block 3 (mixed cases) + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == true, "Must work"); + + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == true, "Must work"); + + // Argument block 4 (super-mixed) + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == true, "Must work"); + + // Argument block 5 (mainly negative) + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == false, "This type(s) shouldn't pass"); + + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == false, "This type(s) shouldn't pass"); + + static_assert(Check< std::tuple // Net + , std::tuple >// Call + ::value == false, "This type(s) shouldn't pass"); + + // Argument block 5 (wrong args length) + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == false, "Should fail -- not enough args"); + + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == false, "Should fail -- not enough args"); + + static_assert(Check< std::tuple // Net + , std::tuple > // Call + ::value == false, "Should fail -- too much args"); +} +} // anonymous namespace +} // namespace opencv_test diff --git a/modules/gapi/test/internal/gapi_int_garg_test.cpp b/modules/gapi/test/internal/gapi_int_garg_test.cpp index a8793721af..5bb9024e89 100644 --- a/modules/gapi/test/internal/gapi_int_garg_test.cpp +++ b/modules/gapi/test/internal/gapi_int_garg_test.cpp @@ -8,7 +8,7 @@ #include "../test_precomp.hpp" namespace opencv_test { -// Tests on T/Kind matching //////////////////////////////////////////////////// +// Tests on T/Spec/Kind matching /////////////////////////////////////////////// // {{ template @@ -76,6 +76,60 @@ TYPED_TEST(GArgKind, RValue) EXPECT_EQ(TestFixture::Kind, arg.kind); } +// Repeat the same for Spec + +template +struct ExpectedS +{ + using type = T; + static const constexpr cv::detail::ArgSpec spec = Exp; +}; + +template +struct ArgSpec: public ::testing::Test +{ + using Type = typename T::type; + const cv::detail::ArgSpec Spec = T::spec; +}; + +using Arg_Spec_Types = ::testing::Types + < + // G-API types + ExpectedS + , ExpectedS + , ExpectedS + , ExpectedS + , ExpectedS, cv::detail::ArgSpec::OPAQUE_SPEC> + , ExpectedS, cv::detail::ArgSpec::OPAQUE_SPEC> + , ExpectedS, cv::detail::ArgSpec::OPAQUE_SPEC> + , ExpectedS, cv::detail::ArgSpec::RECT> + , ExpectedS, cv::detail::ArgSpec::GMAT> + , ExpectedS, cv::detail::ArgSpec::OPAQUE_SPEC> + , ExpectedS, cv::detail::ArgSpec::OPAQUE_SPEC> + , ExpectedS, cv::detail::ArgSpec::OPAQUE_SPEC> + , ExpectedS, cv::detail::ArgSpec::RECT> +// FIXME: causes internal conflicts in GOpaque/descr_of +// , ExpectedS, cv::detail::ArgSpec::GMAT> + + // Built-in types + , ExpectedS + , ExpectedS + , ExpectedS + , ExpectedS + , ExpectedS + , ExpectedS + , ExpectedS, cv::detail::ArgSpec::OPAQUE_SPEC> + , ExpectedS, cv::detail::ArgSpec::OPAQUE_SPEC> + >; + +TYPED_TEST_CASE(ArgSpec, Arg_Spec_Types); + +TYPED_TEST(ArgSpec, Basic) +{ + const auto this_spec = cv::detail::GTypeTraits::spec; + EXPECT_EQ(TestFixture::Spec, this_spec); +} + // }} //////////////////////////////////////////////////////////////////////////////// diff --git a/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp b/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp index 3b4608cb72..a97fc35c2d 100644 --- a/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp +++ b/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp @@ -21,14 +21,23 @@ namespace test namespace { + namespace D = cv::detail; cv::GMat unaryOp(cv::GMat m) { - return cv::GCall(cv::GKernel{"gapi.test.unaryop", "", nullptr, { GShape::GMAT } }).pass(m).yield(0); + return cv::GCall(cv::GKernel{ "gapi.test.unaryop" + , "" + , nullptr + , { D::ArgSpec::OPAQUE_SPEC } + , { GShape::GMAT } }).pass(m).yield(0); } cv::GMat binaryOp(cv::GMat m1, cv::GMat m2) { - return cv::GCall(cv::GKernel{"gapi.test.binaryOp", "", nullptr, { GShape::GMAT } }).pass(m1, m2).yield(0); + return cv::GCall(cv::GKernel{ "gapi.test.binaryOp" + , "" + , nullptr + , { D::ArgSpec::OPAQUE_SPEC, D::ArgSpec::OPAQUE_SPEC } + , { GShape::GMAT } }).pass(m1, m2).yield(0); } std::vector collectOperations(const cv::gimpl::GModel::Graph& gr)