Merge pull request #21106 from mpashchenkov:mp/ocv-gapi-clean-samples

G-API: Cleaning samples

* parseSSD + removed render details from gcpukernel

* self-rev

* Applying comment

* Added operators

* warnings
pull/21140/head
Maxim Pashchenkov 3 years ago committed by GitHub
parent cc1fbe0956
commit b95d71af2b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 9
      modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp
  2. 85
      modules/gapi/samples/gaze_estimation.cpp
  3. 8
      modules/gapi/samples/infer_ie_onnx_hybrid.cpp
  4. 94
      modules/gapi/samples/infer_single_roi.cpp
  5. 78
      modules/gapi/samples/infer_ssd_onnx.cpp
  6. 74
      modules/gapi/samples/onevpl_infer_single_roi.cpp
  7. 63
      modules/gapi/samples/privacy_masking_camera.cpp
  8. 18
      modules/gapi/samples/semantic_segmentation.cpp

@ -28,14 +28,6 @@ namespace gimpl
{
// Forward-declare an internal class
class GCPUExecutable;
namespace render
{
namespace ocv
{
class GRenderExecutable;
}
}
} // namespace gimpl
namespace gapi
@ -133,7 +125,6 @@ protected:
std::unordered_map<std::size_t, GRunArgP> m_results;
friend class gimpl::GCPUExecutable;
friend class gimpl::render::ocv::GRenderExecutable;
};
class GAPI_EXPORTS GCPUKernel

@ -9,6 +9,7 @@
#include <opencv2/gapi/streaming/cap.hpp>
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/highgui.hpp> // CommandLineParser
#include <opencv2/gapi/infer/parsers.hpp>
const std::string about =
"This is an OpenCV-based version of Gaze Estimation example";
@ -58,16 +59,6 @@ G_API_OP(Size, <GSize(cv::GMat)>, "custom.gapi.size") {
}
};
G_API_OP(ParseSSD,
<GRects(cv::GMat, GSize, bool)>,
"custom.gaze_estimation.parseSSD") {
static cv::GArrayDesc outMeta( const cv::GMatDesc &
, const cv::GOpaqueDesc &
, bool) {
return cv::empty_array_desc();
}
};
// Left/Right eye per every face
G_API_OP(ParseEyes,
<std::tuple<GRects, GRects>(GMats, GRects, GSize)>,
@ -91,27 +82,6 @@ G_API_OP(ProcessPoses,
}
};
void adjustBoundingBox(cv::Rect& boundingBox) {
auto w = boundingBox.width;
auto h = boundingBox.height;
boundingBox.x -= static_cast<int>(0.067 * w);
boundingBox.y -= static_cast<int>(0.028 * h);
boundingBox.width += static_cast<int>(0.15 * w);
boundingBox.height += static_cast<int>(0.13 * h);
if (boundingBox.width < boundingBox.height) {
auto dx = (boundingBox.height - boundingBox.width);
boundingBox.x -= dx / 2;
boundingBox.width += dx;
} else {
auto dy = (boundingBox.width - boundingBox.height);
boundingBox.y -= dy / 2;
boundingBox.height += dy;
}
}
void gazeVectorToGazeAngles(const cv::Point3f& gazeVector,
cv::Point2f& gazeAngles) {
auto r = cv::norm(gazeVector);
@ -130,55 +100,6 @@ GAPI_OCV_KERNEL(OCVSize, Size) {
}
};
GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) {
static void run(const cv::Mat &in_ssd_result,
const cv::Size &upscale,
const bool filter_out_of_bounds,
std::vector<cv::Rect> &out_objects) {
const auto &in_ssd_dims = in_ssd_result.size;
CV_Assert(in_ssd_dims.dims() == 4u);
const int MAX_PROPOSALS = in_ssd_dims[2];
const int OBJECT_SIZE = in_ssd_dims[3];
CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size
const cv::Rect surface({0,0}, upscale);
out_objects.clear();
const float *data = in_ssd_result.ptr<float>();
for (int i = 0; i < MAX_PROPOSALS; i++) {
const float image_id = data[i * OBJECT_SIZE + 0];
const float label = data[i * OBJECT_SIZE + 1];
const float confidence = data[i * OBJECT_SIZE + 2];
const float rc_left = data[i * OBJECT_SIZE + 3];
const float rc_top = data[i * OBJECT_SIZE + 4];
const float rc_right = data[i * OBJECT_SIZE + 5];
const float rc_bottom = data[i * OBJECT_SIZE + 6];
(void) label;
if (image_id < 0.f) {
break; // marks end-of-detections
}
if (confidence < 0.5f) {
continue; // skip objects with low confidence
}
cv::Rect rc; // map relative coordinates to the original image scale
rc.x = static_cast<int>(rc_left * upscale.width);
rc.y = static_cast<int>(rc_top * upscale.height);
rc.width = static_cast<int>(rc_right * upscale.width) - rc.x;
rc.height = static_cast<int>(rc_bottom * upscale.height) - rc.y;
adjustBoundingBox(rc); // TODO: new option?
const auto clipped_rc = rc & surface; // TODO: new option?
if (filter_out_of_bounds) {
if (clipped_rc.area() != rc.area()) {
continue;
}
}
out_objects.emplace_back(clipped_rc);
}
}
};
cv::Rect eyeBox(const cv::Rect &face_rc,
float p1_x, float p1_y, float p2_x, float p2_y,
float scale = 1.8f) {
@ -335,11 +256,10 @@ int main(int argc, char *argv[])
cmd.printMessage();
return 0;
}
cv::GMat in;
cv::GMat faces = cv::gapi::infer<custom::Faces>(in);
cv::GOpaque<cv::Size> sz = cv::gapi::streaming::size(in);
cv::GArray<cv::Rect> faces_rc = custom::ParseSSD::on(faces, sz, true);
cv::GArray<cv::Rect> faces_rc = cv::gapi::parseSSD(faces, sz, 0.5f, true, true);
cv::GArray<cv::GMat> angles_y, angles_p, angles_r;
std::tie(angles_y, angles_p, angles_r) = cv::gapi::infer<custom::HeadPose>(faces_rc, in);
cv::GArray<cv::GMat> heads_pos = custom::ProcessPoses::on(angles_y, angles_p, angles_r);
@ -386,7 +306,6 @@ int main(int argc, char *argv[])
}.cfgInputLayers({"left_eye_image", "right_eye_image", "head_pose_angles"});
auto kernels = cv::gapi::kernels< custom::OCVSize
, custom::OCVParseSSD
, custom::OCVParseEyes
, custom::OCVProcessPoses>();
auto networks = cv::gapi::networks(face_net, head_net, landmarks_net, gaze_net);

@ -156,7 +156,6 @@ int main(int argc, char *argv[])
auto in_src = cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input);
pipeline.setSource(cv::gin(in_src));
pipeline.start();
cv::util::optional<cv::Mat> out_frame;
cv::util::optional<std::vector<cv::Rect>> out_faces;
@ -167,8 +166,13 @@ int main(int argc, char *argv[])
std::vector<cv::Mat> last_emotions;
cv::VideoWriter writer;
cv::TickMeter tm;
std::size_t frames = 0u;
tm.start();
pipeline.start();
while (pipeline.pull(cv::gout(out_frame, out_faces, out_emotions))) {
++frames;
if (out_faces && out_emotions) {
last_faces = *out_faces;
last_emotions = *out_emotions;
@ -191,5 +195,7 @@ int main(int argc, char *argv[])
cv::waitKey(1);
}
}
tm.stop();
std::cout << "Processed " << frames << " frames" << " (" << frames / tm.getTimeSec() << " FPS)" << std::endl;
return 0;
}

@ -13,6 +13,7 @@
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/gapi/streaming/cap.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/gapi/infer/parsers.hpp>
const std::string keys =
"{ h help | | Print this help message }"
@ -69,36 +70,18 @@ using GRect = cv::GOpaque<cv::Rect>;
using GSize = cv::GOpaque<cv::Size>;
using GPrims = cv::GArray<cv::gapi::wip::draw::Prim>;
G_API_OP(GetSize, <GSize(cv::GMat)>, "sample.custom.get-size") {
static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) {
return cv::empty_gopaque_desc();
}
};
G_API_OP(LocateROI, <GRect(cv::GMat)>, "sample.custom.locate-roi") {
static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) {
return cv::empty_gopaque_desc();
}
};
G_API_OP(ParseSSD, <GDetections(cv::GMat, GRect, GSize)>, "sample.custom.parse-ssd") {
static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &, const cv::GOpaqueDesc &) {
return cv::empty_array_desc();
}
};
G_API_OP(BBoxes, <GPrims(GDetections, GRect)>, "sample.custom.b-boxes") {
static cv::GArrayDesc outMeta(const cv::GArrayDesc &, const cv::GOpaqueDesc &) {
return cv::empty_array_desc();
}
};
GAPI_OCV_KERNEL(OCVGetSize, GetSize) {
static void run(const cv::Mat &in, cv::Size &out) {
out = {in.cols, in.rows};
}
};
GAPI_OCV_KERNEL(OCVLocateROI, LocateROI) {
// This is the place where we can run extra analytics
// on the input image frame and select the ROI (region
@ -124,55 +107,6 @@ GAPI_OCV_KERNEL(OCVLocateROI, LocateROI) {
}
};
GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) {
static void run(const cv::Mat &in_ssd_result,
const cv::Rect &in_roi,
const cv::Size &in_parent_size,
std::vector<cv::Rect> &out_objects) {
const auto &in_ssd_dims = in_ssd_result.size;
CV_Assert(in_ssd_dims.dims() == 4u);
const int MAX_PROPOSALS = in_ssd_dims[2];
const int OBJECT_SIZE = in_ssd_dims[3];
CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size
const cv::Size up_roi = in_roi.size();
const cv::Rect surface({0,0}, in_parent_size);
out_objects.clear();
const float *data = in_ssd_result.ptr<float>();
for (int i = 0; i < MAX_PROPOSALS; i++) {
const float image_id = data[i * OBJECT_SIZE + 0];
const float label = data[i * OBJECT_SIZE + 1];
const float confidence = data[i * OBJECT_SIZE + 2];
const float rc_left = data[i * OBJECT_SIZE + 3];
const float rc_top = data[i * OBJECT_SIZE + 4];
const float rc_right = data[i * OBJECT_SIZE + 5];
const float rc_bottom = data[i * OBJECT_SIZE + 6];
(void) label; // unused
if (image_id < 0.f) {
break; // marks end-of-detections
}
if (confidence < 0.5f) {
continue; // skip objects with low confidence
}
// map relative coordinates to the original image scale
// taking the ROI into account
cv::Rect rc;
rc.x = static_cast<int>(rc_left * up_roi.width);
rc.y = static_cast<int>(rc_top * up_roi.height);
rc.width = static_cast<int>(rc_right * up_roi.width) - rc.x;
rc.height = static_cast<int>(rc_bottom * up_roi.height) - rc.y;
rc.x += in_roi.x;
rc.y += in_roi.y;
out_objects.emplace_back(rc & surface);
}
}
};
GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) {
// This kernel converts the rectangles into G-API's
// rendering primitives
@ -211,9 +145,7 @@ int main(int argc, char *argv[])
cmd.get<std::string>("faced"), // device specifier
};
auto kernels = cv::gapi::kernels
< custom::OCVGetSize
, custom::OCVLocateROI
, custom::OCVParseSSD
<custom::OCVLocateROI
, custom::OCVBBoxes>();
auto networks = cv::gapi::networks(face_net);
@ -222,16 +154,17 @@ int main(int argc, char *argv[])
cv::GStreamingCompiled pipeline;
auto inputs = cv::gin(cv::gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(input));
cv::GMat in;
cv::GOpaque<cv::Size> sz = cv::gapi::streaming::size(in);
if (opt_roi.has_value()) {
// Use the value provided by user
std::cout << "Will run inference for static region "
<< opt_roi.value()
<< " only"
<< std::endl;
cv::GMat in;
cv::GOpaque<cv::Rect> in_roi;
auto blob = cv::gapi::infer<custom::FaceDetector>(in_roi, in);
auto rcs = custom::ParseSSD::on(blob, in_roi, custom::GetSize::on(in));
cv::GArray<cv::Rect> rcs = cv::gapi::parseSSD(blob, sz, 0.5f, true, true);
auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs, in_roi));
pipeline = cv::GComputation(cv::GIn(in, in_roi), cv::GOut(out))
.compileStreaming(cv::compile_args(kernels, networks));
@ -242,10 +175,9 @@ int main(int argc, char *argv[])
// Automatically detect ROI to infer. Make it output parameter
std::cout << "ROI is not set or invalid. Locating it automatically"
<< std::endl;
cv::GMat in;
cv::GOpaque<cv::Rect> roi = custom::LocateROI::on(in);
auto blob = cv::gapi::infer<custom::FaceDetector>(roi, in);
auto rcs = custom::ParseSSD::on(blob, roi, custom::GetSize::on(in));
cv::GArray<cv::Rect> rcs = cv::gapi::parseSSD(blob, sz, 0.5f, true, true);
auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs, roi));
pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out))
.compileStreaming(cv::compile_args(kernels, networks));
@ -256,17 +188,15 @@ int main(int argc, char *argv[])
pipeline.start();
cv::Mat out;
int framesCount = 0;
cv::TickMeter t;
t.start();
size_t frames = 0u;
cv::TickMeter tm;
tm.start();
while (pipeline.pull(cv::gout(out))) {
cv::imshow("Out", out);
cv::waitKey(1);
framesCount++;
++frames;
}
t.stop();
std::cout << "Elapsed time: " << t.getTimeSec() << std::endl;
std::cout << "FPS: " << framesCount / (t.getTimeSec() ? t.getTimeSec() : 1) << std::endl;
std::cout << "framesCount: " << framesCount << std::endl;
tm.stop();
std::cout << "Processed " << frames << " frames" << " (" << frames / tm.getTimeSec() << " FPS)" << std::endl;
return 0;
}

@ -14,6 +14,7 @@
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/gapi/streaming/cap.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/gapi/infer/parsers.hpp>
namespace custom {
@ -23,71 +24,12 @@ using GDetections = cv::GArray<cv::Rect>;
using GSize = cv::GOpaque<cv::Size>;
using GPrims = cv::GArray<cv::gapi::wip::draw::Prim>;
G_API_OP(GetSize, <GSize(cv::GMat)>, "sample.custom.get-size") {
static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) {
return cv::empty_gopaque_desc();
}
};
G_API_OP(ParseSSD, <GDetections(cv::GMat, GSize)>, "sample.custom.parse-ssd") {
static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &) {
return cv::empty_array_desc();
}
};
G_API_OP(BBoxes, <GPrims(GDetections)>, "sample.custom.b-boxes") {
static cv::GArrayDesc outMeta(const cv::GArrayDesc &) {
return cv::empty_array_desc();
}
};
GAPI_OCV_KERNEL(OCVGetSize, GetSize) {
static void run(const cv::Mat &in, cv::Size &out) {
out = {in.cols, in.rows};
}
};
GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) {
static void run(const cv::Mat &in_ssd_result,
const cv::Size &in_parent_size,
std::vector<cv::Rect> &out_objects) {
const auto &in_ssd_dims = in_ssd_result.size;
CV_Assert(in_ssd_dims.dims() == 4u);
const int MAX_PROPOSALS = in_ssd_dims[2];
const int OBJECT_SIZE = in_ssd_dims[3];
CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size
const cv::Rect surface({0,0}, in_parent_size);
out_objects.clear();
const float *data = in_ssd_result.ptr<float>();
for (int i = 0; i < MAX_PROPOSALS; i++) {
const float image_id = data[i * OBJECT_SIZE + 0];
const float label = data[i * OBJECT_SIZE + 1];
const float confidence = data[i * OBJECT_SIZE + 2];
const float rc_left = data[i * OBJECT_SIZE + 3];
const float rc_top = data[i * OBJECT_SIZE + 4];
const float rc_right = data[i * OBJECT_SIZE + 5];
const float rc_bottom = data[i * OBJECT_SIZE + 6];
(void) label; // unused
if (image_id < 0.f) {
break; // marks end-of-detections
}
if (confidence < 0.5f) {
continue; // skip objects with low confidence
}
// map relative coordinates to the original image scale
cv::Rect rc;
rc.x = static_cast<int>(rc_left * in_parent_size.width);
rc.y = static_cast<int>(rc_top * in_parent_size.height);
rc.width = static_cast<int>(rc_right * in_parent_size.width) - rc.x;
rc.height = static_cast<int>(rc_bottom * in_parent_size.height) - rc.y;
out_objects.emplace_back(rc & surface);
}
}
};
GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) {
// This kernel converts the rectangles into G-API's
// rendering primitives
@ -151,7 +93,6 @@ void remap_ssd_ports(const std::unordered_map<std::string, cv::Mat> &onnx,
}
} // anonymous namespace
const std::string keys =
"{ h help | | Print this help message }"
"{ input | | Path to the input video file }"
@ -175,15 +116,14 @@ int main(int argc, char *argv[])
auto obj_net = cv::gapi::onnx::Params<custom::ObjDetector>{obj_model_path}
.cfgOutputLayers({"detection_output"})
.cfgPostProc({cv::GMatDesc{CV_32F, {1,1,200,7}}}, remap_ssd_ports);
auto kernels = cv::gapi::kernels< custom::OCVGetSize
, custom::OCVParseSSD
, custom::OCVBBoxes>();
auto kernels = cv::gapi::kernels<custom::OCVBBoxes>();
auto networks = cv::gapi::networks(obj_net);
// Now build the graph
cv::GMat in;
auto blob = cv::gapi::infer<custom::ObjDetector>(in);
auto rcs = custom::ParseSSD::on(blob, custom::GetSize::on(in));
cv::GArray<cv::Rect> rcs =
cv::gapi::parseSSD(blob, cv::gapi::streaming::size(in), 0.5f, true, true);
auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs));
cv::GStreamingCompiled pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out))
.compileStreaming(cv::compile_args(kernels, networks));
@ -192,12 +132,16 @@ int main(int argc, char *argv[])
// The execution part
pipeline.setSource(std::move(inputs));
pipeline.start();
cv::TickMeter tm;
cv::VideoWriter writer;
size_t frames = 0u;
cv::Mat outMat;
tm.start();
pipeline.start();
while (pipeline.pull(cv::gout(outMat))) {
++frames;
cv::imshow("Out", outMat);
cv::waitKey(1);
if (!output.empty()) {
@ -209,5 +153,7 @@ int main(int argc, char *argv[])
writer << outMat;
}
}
tm.stop();
std::cout << "Processed " << frames << " frames" << " (" << frames / tm.getTimeSec() << " FPS)" << std::endl;
return 0;
}

@ -13,6 +13,7 @@
#include <opencv2/gapi/streaming/onevpl/source.hpp>
#include <opencv2/gapi/streaming/onevpl/data_provider_interface.hpp>
#include <opencv2/highgui.hpp> // CommandLineParser
#include <opencv2/gapi/infer/parsers.hpp>
#ifdef HAVE_INF_ENGINE
#include <inference_engine.hpp> // ParamMap
@ -126,12 +127,6 @@ G_API_OP(LocateROI, <GRect(GSize)>, "sample.custom.locate-roi") {
}
};
G_API_OP(ParseSSD, <GDetections(cv::GMat, GRect, GSize)>, "sample.custom.parse-ssd") {
static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &, const cv::GOpaqueDesc &) {
return cv::empty_array_desc();
}
};
G_API_OP(BBoxes, <GPrims(GDetections, GRect)>, "sample.custom.b-boxes") {
static cv::GArrayDesc outMeta(const cv::GArrayDesc &, const cv::GOpaqueDesc &) {
return cv::empty_array_desc();
@ -163,55 +158,6 @@ GAPI_OCV_KERNEL(OCVLocateROI, LocateROI) {
}
};
GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) {
static void run(const cv::Mat &in_ssd_result,
const cv::Rect &in_roi,
const cv::Size &in_parent_size,
std::vector<cv::Rect> &out_objects) {
const auto &in_ssd_dims = in_ssd_result.size;
CV_Assert(in_ssd_dims.dims() == 4u);
const int MAX_PROPOSALS = in_ssd_dims[2];
const int OBJECT_SIZE = in_ssd_dims[3];
CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size
const cv::Size up_roi = in_roi.size();
const cv::Rect surface({0,0}, in_parent_size);
out_objects.clear();
const float *data = in_ssd_result.ptr<float>();
for (int i = 0; i < MAX_PROPOSALS; i++) {
const float image_id = data[i * OBJECT_SIZE + 0];
const float label = data[i * OBJECT_SIZE + 1];
const float confidence = data[i * OBJECT_SIZE + 2];
const float rc_left = data[i * OBJECT_SIZE + 3];
const float rc_top = data[i * OBJECT_SIZE + 4];
const float rc_right = data[i * OBJECT_SIZE + 5];
const float rc_bottom = data[i * OBJECT_SIZE + 6];
(void) label; // unused
if (image_id < 0.f) {
break; // marks end-of-detections
}
if (confidence < 0.5f) {
continue; // skip objects with low confidence
}
// map relative coordinates to the original image scale
// taking the ROI into account
cv::Rect rc;
rc.x = static_cast<int>(rc_left * up_roi.width);
rc.y = static_cast<int>(rc_top * up_roi.height);
rc.width = static_cast<int>(rc_right * up_roi.width) - rc.x;
rc.height = static_cast<int>(rc_bottom * up_roi.height) - rc.y;
rc.x += in_roi.x;
rc.y += in_roi.y;
out_objects.emplace_back(rc & surface);
}
}
};
GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) {
// This kernel converts the rectangles into G-API's
// rendering primitives
@ -350,7 +296,6 @@ int main(int argc, char *argv[]) {
auto kernels = cv::gapi::kernels
< custom::OCVLocateROI
, custom::OCVParseSSD
, custom::OCVBBoxes>();
auto networks = cv::gapi::networks(face_net);
@ -379,7 +324,7 @@ int main(int argc, char *argv[]) {
auto size = cv::gapi::streaming::size(in);
auto roi = custom::LocateROI::on(size);
auto blob = cv::gapi::infer<custom::FaceDetector>(roi, in);
auto rcs = custom::ParseSSD::on(blob, roi, size);
cv::GArray<cv::Rect> rcs = cv::gapi::parseSSD(blob, size, 0.5f, true, true);
auto out_frame = cv::gapi::wip::draw::renderFrame(in, custom::BBoxes::on(rcs, roi));
auto out = cv::gapi::streaming::BGR(out_frame);
@ -398,8 +343,8 @@ int main(int argc, char *argv[]) {
pipeline.setSource(std::move(cap));
pipeline.start();
int framesCount = 0;
cv::TickMeter t;
size_t frames = 0u;
cv::TickMeter tm;
cv::VideoWriter writer;
if (!output.empty() && !writer.isOpened()) {
const auto sz = cv::Size{frame_descr.size.width, frame_descr.size.height};
@ -408,20 +353,17 @@ int main(int argc, char *argv[]) {
}
cv::Mat outMat;
t.start();
tm.start();
while (pipeline.pull(cv::gout(outMat))) {
cv::imshow("Out", outMat);
cv::waitKey(1);
if (!output.empty()) {
writer << outMat;
}
framesCount++;
++frames;
}
t.stop();
std::cout << "Elapsed time: " << t.getTimeSec() << std::endl;
std::cout << "FPS: " << framesCount / t.getTimeSec() << std::endl;
std::cout << "framesCount: " << framesCount << std::endl;
tm.stop();
std::cout << "Processed " << frames << " frames" << " (" << frames / tm.getTimeSec() << " FPS)" << std::endl;
return 0;
}

@ -13,6 +13,7 @@
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/gapi/streaming/cap.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/gapi/infer/parsers.hpp>
const std::string about =
"This is an OpenCV-based version of Privacy Masking Camera example";
@ -49,12 +50,6 @@ G_API_NET(FaceDetector, <cv::GMat(cv::GMat)>, "face-detector"
using GDetections = cv::GArray<cv::Rect>;
G_API_OP(ParseSSD, <GDetections(cv::GMat, cv::GMat, int)>, "custom.privacy_masking.postproc") {
static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GMatDesc &, int) {
return cv::empty_array_desc();
}
};
using GPrims = cv::GArray<cv::gapi::wip::draw::Prim>;
G_API_OP(ToMosaic, <GPrims(GDetections, GDetections)>, "custom.privacy_masking.to_mosaic") {
@ -63,53 +58,6 @@ G_API_OP(ToMosaic, <GPrims(GDetections, GDetections)>, "custom.privacy_masking.t
}
};
GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) {
static void run(const cv::Mat &in_ssd_result,
const cv::Mat &in_frame,
const int filter_label,
std::vector<cv::Rect> &out_objects) {
const auto &in_ssd_dims = in_ssd_result.size;
CV_Assert(in_ssd_dims.dims() == 4u);
const int MAX_PROPOSALS = in_ssd_dims[2];
const int OBJECT_SIZE = in_ssd_dims[3];
CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size
const cv::Size upscale = in_frame.size();
const cv::Rect surface({0,0}, upscale);
out_objects.clear();
const float *data = in_ssd_result.ptr<float>();
for (int i = 0; i < MAX_PROPOSALS; i++) {
const float image_id = data[i * OBJECT_SIZE + 0];
const float label = data[i * OBJECT_SIZE + 1];
const float confidence = data[i * OBJECT_SIZE + 2];
const float rc_left = data[i * OBJECT_SIZE + 3];
const float rc_top = data[i * OBJECT_SIZE + 4];
const float rc_right = data[i * OBJECT_SIZE + 5];
const float rc_bottom = data[i * OBJECT_SIZE + 6];
if (image_id < 0.f) {
break; // marks end-of-detections
}
if (confidence < 0.5f) {
continue; // skip objects with low confidence
}
if (filter_label != -1 && static_cast<int>(label) != filter_label) {
continue; // filter out object classes if filter is specified
}
cv::Rect rc; // map relative coordinates to the original image scale
rc.x = static_cast<int>(rc_left * upscale.width);
rc.y = static_cast<int>(rc_top * upscale.height);
rc.width = static_cast<int>(rc_right * upscale.width) - rc.x;
rc.height = static_cast<int>(rc_bottom * upscale.height) - rc.y;
out_objects.emplace_back(rc & surface);
}
}
};
GAPI_OCV_KERNEL(OCVToMosaic, ToMosaic) {
static void run(const std::vector<cv::Rect> &in_plate_rcs,
const std::vector<cv::Rect> &in_face_rcs,
@ -150,10 +98,13 @@ int main(int argc, char *argv[])
cv::GMat blob_faces = cv::gapi::infer<custom::FaceDetector>(in);
// VehLicDetector from Open Model Zoo marks vehicles with label "1" and
// license plates with label "2", filter out license plates only.
cv::GArray<cv::Rect> rc_plates = custom::ParseSSD::on(blob_plates, in, 2);
cv::GOpaque<cv::Size> sz = cv::gapi::streaming::size(in);
cv::GArray<cv::Rect> rc_plates, rc_faces;
cv::GArray<int> labels;
std::tie(rc_plates, labels) = cv::gapi::parseSSD(blob_plates, sz, 0.5f, 2);
// Face detector produces faces only so there's no need to filter by label,
// pass "-1".
cv::GArray<cv::Rect> rc_faces = custom::ParseSSD::on(blob_faces, in, -1);
std::tie(rc_faces, labels) = cv::gapi::parseSSD(blob_faces, sz, 0.5f, -1);
cv::GMat out = cv::gapi::wip::draw::render3ch(in, custom::ToMosaic::on(rc_plates, rc_faces));
cv::GComputation graph(in, out);
@ -169,7 +120,7 @@ int main(int argc, char *argv[])
weights_path(face_model_path), // path to weights
cmd.get<std::string>("faced"), // device specifier
};
auto kernels = cv::gapi::kernels<custom::OCVParseSSD, custom::OCVToMosaic>();
auto kernels = cv::gapi::kernels<custom::OCVToMosaic>();
auto networks = cv::gapi::networks(plate_net, face_net);
cv::TickMeter tm;

@ -2,6 +2,7 @@
#include <opencv2/gapi/infer/ie.hpp>
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/gapi/streaming/cap.hpp>
#include <opencv2/gapi/operators.hpp>
#include <opencv2/highgui.hpp>
const std::string keys =
@ -117,10 +118,7 @@ GAPI_OCV_KERNEL(OCVPostProcessing, PostProcessing) {
cv::Mat mask_img;
classesToColors(classes, mask_img);
cv::resize(mask_img, out, in.size());
const float blending = 0.3f;
out = in * blending + out * (1 - blending);
}
};
} // namespace custom
@ -148,7 +146,10 @@ int main(int argc, char *argv[]) {
// Now build the graph
cv::GMat in;
cv::GMat out_blob = cv::gapi::infer<SemSegmNet>(in);
cv::GMat out = custom::PostProcessing::on(in, out_blob);
cv::GMat post_proc_out = custom::PostProcessing::on(in, out_blob);
cv::GMat blending_in = in * 0.3f;
cv::GMat blending_out = post_proc_out * 0.7f;
cv::GMat out = blending_in + blending_out;
cv::GStreamingCompiled pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out))
.compileStreaming(cv::compile_args(kernels, networks));
@ -156,11 +157,16 @@ int main(int argc, char *argv[]) {
// The execution part
pipeline.setSource(std::move(inputs));
pipeline.start();
cv::VideoWriter writer;
cv::TickMeter tm;
cv::Mat outMat;
std::size_t frames = 0u;
tm.start();
pipeline.start();
while (pipeline.pull(cv::gout(outMat))) {
++frames;
cv::imshow("Out", outMat);
cv::waitKey(1);
if (!output.empty()) {
@ -172,5 +178,7 @@ int main(int argc, char *argv[]) {
writer << outMat;
}
}
tm.stop();
std::cout << "Processed " << frames << " frames" << " (" << frames / tm.getTimeSec() << " FPS)" << std::endl;
return 0;
}

Loading…
Cancel
Save