upgraded remaining demos

pull/3471/head
kallaballa 2 years ago
parent 7a91be9d15
commit 31989b4902
  1. 4
      Makefile
  2. 297
      src/beauty/beauty-demo.cpp
  3. 78
      src/pedestrian/pedestrian-demo.cpp

@ -45,9 +45,9 @@ dirs: docs
${MAKE} -C src/video/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS} ${MAKE} -C src/video/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS}
${MAKE} -C src/nanovg/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS} ${MAKE} -C src/nanovg/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS}
${MAKE} -C src/optflow/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS} ${MAKE} -C src/optflow/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS}
# ${MAKE} -C src/beauty/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS} ${MAKE} -C src/beauty/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS}
${MAKE} -C src/font/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS} ${MAKE} -C src/font/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS}
# ${MAKE} -C src/pedestrian/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS} ${MAKE} -C src/pedestrian/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS}
debian-release: debian-release:
${MAKE} -C src/common/ ${MAKEFLAGS} CXX=${CXX} release ${MAKE} -C src/common/ ${MAKEFLAGS} CXX=${CXX} release

@ -1,6 +1,8 @@
#define CL_TARGET_OPENCL_VERSION 120 #define CL_TARGET_OPENCL_VERSION 120
#include "../common/subsystems.hpp" #include "../common/viz2d.hpp"
#include "../common/nvg.hpp"
#include "../common/util.hpp"
#include <vector> #include <vector>
#include <string> #include <string>
@ -16,7 +18,7 @@ constexpr unsigned int WIDTH = 1920;
constexpr unsigned int HEIGHT = 1080; constexpr unsigned int HEIGHT = 1080;
constexpr double SCALE = 0.125; constexpr double SCALE = 0.125;
constexpr bool OFFSCREEN = false; constexpr bool OFFSCREEN = false;
constexpr const char* OUTPUT_FILENAME = "beauty-demo.mkv"; constexpr const char *OUTPUT_FILENAME = "beauty-demo.mkv";
constexpr int VA_HW_DEVICE_INDEX = 0; constexpr int VA_HW_DEVICE_INDEX = 0;
constexpr unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT)); constexpr unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT));
@ -109,36 +111,38 @@ struct FaceFeatures {
} }
}; };
void draw_face_bg_mask(NVGcontext* vg, const vector<FaceFeatures> &lm) { void draw_face_bg_mask(const vector<FaceFeatures> &lm) {
using namespace kb::viz2d;
for (size_t i = 0; i < lm.size(); i++) { for (size_t i = 0; i < lm.size(); i++) {
vector<vector<cv::Point2f>> features = lm[i].features(); vector<vector<cv::Point2f>> features = lm[i].features();
cv::RotatedRect rotRect = cv::fitEllipse(features[0]); cv::RotatedRect rotRect = cv::fitEllipse(features[0]);
nvgBeginPath(vg); nvg::beginPath();
nvgFillColor(vg, nvgRGBA(255, 255, 255, 255)); nvg::fillColor(cv::Scalar(255, 255, 255, 255));
nvgEllipse(vg, rotRect.center.x, rotRect.center.y * 1.5, rotRect.size.width / 2, rotRect.size.height / 2); nvg::ellipse(rotRect.center.x, rotRect.center.y * 1.5, rotRect.size.width / 2, rotRect.size.height / 2);
nvgRotate(vg, rotRect.angle); nvg::rotate(rotRect.angle);
nvgFill(vg); nvg::fill();
} }
} }
void draw_face_fg_mask(NVGcontext* vg, const vector<FaceFeatures> &lm) { void draw_face_fg_mask(const vector<FaceFeatures> &lm) {
using namespace kb::viz2d;
for (size_t i = 0; i < lm.size(); i++) { for (size_t i = 0; i < lm.size(); i++) {
vector<vector<cv::Point2f>> features = lm[i].features(); vector<vector<cv::Point2f>> features = lm[i].features();
for (size_t j = 5; j < 8; ++j) { for (size_t j = 5; j < 8; ++j) {
nvgBeginPath(vg); nvg::beginPath();
nvgFillColor(vg, nvgRGBA(255, 255, 255, 255)); nvg::fillColor(cv::Scalar(255, 255, 255, 255));
nvgMoveTo(vg, features[j][0].x, features[j][0].y); nvg::moveTo(features[j][0].x, features[j][0].y);
for (size_t k = 1; k < features[j].size(); ++k) { for (size_t k = 1; k < features[j].size(); ++k) {
nvgLineTo(vg, features[j][k].x, features[j][k].y); nvg::lineTo(features[j][k].x, features[j][k].y);
} }
nvgClosePath(vg); nvg::closePath();
nvgFill(vg); nvg::fill();
} }
} }
} }
void reduce_shadows(const cv::UMat& srcBGR, cv::UMat& dstBGR, double to_percent) { void reduce_shadows(const cv::UMat &srcBGR, cv::UMat &dstBGR, double to_percent) {
assert(srcBGR.type() == CV_8UC3); assert(srcBGR.type() == CV_8UC3);
static cv::UMat hsv; static cv::UMat hsv;
static vector<cv::UMat> hsvChannels; static vector<cv::UMat> hsvChannels;
@ -146,7 +150,7 @@ void reduce_shadows(const cv::UMat& srcBGR, cv::UMat& dstBGR, double to_percent)
cvtColor(srcBGR, hsv, cv::COLOR_BGR2HSV); cvtColor(srcBGR, hsv, cv::COLOR_BGR2HSV);
cv::split(hsv, hsvChannels); cv::split(hsv, hsvChannels);
hsvChannels[2].convertTo(valueFloat, CV_32F, 1.0/255.0); hsvChannels[2].convertTo(valueFloat, CV_32F, 1.0 / 255.0);
double minIn, maxIn; double minIn, maxIn;
cv::minMaxLoc(valueFloat, &minIn, &maxIn); cv::minMaxLoc(valueFloat, &minIn, &maxIn);
@ -161,7 +165,7 @@ void reduce_shadows(const cv::UMat& srcBGR, cv::UMat& dstBGR, double to_percent)
cvtColor(hsv, dstBGR, cv::COLOR_HSV2BGR); cvtColor(hsv, dstBGR, cv::COLOR_HSV2BGR);
} }
void unsharp_mask(const cv::UMat& src, cv::UMat& dst, const float strength) { void unsharp_mask(const cv::UMat &src, cv::UMat &dst, const float strength) {
static cv::UMat blurred; static cv::UMat blurred;
cv::medianBlur(src, blurred, 3); cv::medianBlur(src, blurred, 3);
cv::UMat laplacian; cv::UMat laplacian;
@ -171,166 +175,143 @@ void unsharp_mask(const cv::UMat& src, cv::UMat& dst, const float strength) {
} }
int main(int argc, char **argv) { int main(int argc, char **argv) {
using namespace kb; using namespace kb::viz2d;
if (argc != 2) { if (argc != 2) {
std::cerr << "Usage: beauty-demo <input-video-file>" << endl; std::cerr << "Usage: beauty-demo <input-video-file>" << endl;
exit(1); exit(1);
} }
//Initialize the application cv::Ptr<Viz2D> v2d = new Viz2D(cv::Size(WIDTH, HEIGHT), cv::Size(WIDTH, HEIGHT), OFFSCREEN, "Beauty Demo");
app::init("Beauty Demo", WIDTH, HEIGHT, WIDTH, HEIGHT, OFFSCREEN); print_system_info();
//Print system information if (!v2d->isOffscreen())
app::print_system_info(); v2d->setVisible(true);
app::run([&]() {
cv::Size frameBufferSize(app::frame_buffer_width, app::frame_buffer_height);
cv::VideoCapture capture(argv[1], cv::CAP_FFMPEG, {
cv::CAP_PROP_HW_DEVICE, VA_HW_DEVICE_INDEX,
cv::CAP_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI,
cv::CAP_PROP_HW_ACCELERATION_USE_OPENCL, 1
});
//Copy OpenCL Context for VAAPI. Must be called right after first VideoWriter/VideoCapture initialization.
va::copy();
if (!capture.isOpened()) { auto capture = v2d->makeVACapture(argv[1], VA_HW_DEVICE_INDEX);
cerr << "ERROR! Unable to open video input" << endl;
return;
}
double fps = capture.get(cv::CAP_PROP_FPS); if (!capture.isOpened()) {
int w = capture.get(cv::CAP_PROP_FRAME_WIDTH); cerr << "ERROR! Unable to open video input" << endl;
int h = capture.get(cv::CAP_PROP_FRAME_HEIGHT); exit(-1);
}
cv::Ptr<cv::FaceDetectorYN> detector = cv::FaceDetectorYN::create("assets/face_detection_yunet_2022mar.onnx", "", cv::Size(w * SCALE, h * SCALE), 0.9, 0.3, 5000, cv::dnn::DNN_BACKEND_OPENCV, cv::dnn::DNN_TARGET_OPENCL);
cv::Ptr<cv::face::Facemark> facemark = cv::face::createFacemarkLBF();
facemark->loadModel("assets/lbfmodel.yaml");
cv::detail::MultiBandBlender blender(true);
cv::VideoWriter writer(OUTPUT_FILENAME, cv::CAP_FFMPEG, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, frameBufferSize, { float fps = capture.get(cv::CAP_PROP_FPS);
cv::VIDEOWRITER_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI, float width = capture.get(cv::CAP_PROP_FRAME_WIDTH);
cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, 1 float height = capture.get(cv::CAP_PROP_FRAME_HEIGHT);
v2d->makeVAWriter(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, v2d->getFrameBufferSize(), VA_HW_DEVICE_INDEX);
cv::Ptr<cv::FaceDetectorYN> detector = cv::FaceDetectorYN::create("assets/face_detection_yunet_2022mar.onnx", "", cv::Size(width * SCALE, height * SCALE), 0.9, 0.3, 5000, cv::dnn::DNN_BACKEND_OPENCV, cv::dnn::DNN_TARGET_OPENCL);
cv::Ptr<cv::face::Facemark> facemark = cv::face::createFacemarkLBF();
facemark->loadModel("assets/lbfmodel.yaml");
cv::detail::MultiBandBlender blender(true);
//BGR
cv::UMat rgb, down, faceBgMask, diff, blurred, reduced, sharpened, masked;
cv::UMat frameOut(HEIGHT, WIDTH, CV_8UC3);
cv::UMat lhalf(HEIGHT * SCALE, WIDTH * SCALE, CV_8UC3);
cv::UMat rhalf(lhalf.size(), lhalf.type());
//GREY
cv::UMat downGrey, faceBgMaskGrey, faceBgMaskInvGrey, faceFgMaskGrey, resMaskGrey;
//BGR-Float
cv::UMat frameOutFloat;
cv::Mat faces;
vector<cv::Rect> faceRects;
vector<vector<cv::Point2f>> shapes;
vector<FaceFeatures> featuresList;
while (true) {
if(!v2d->captureVA())
break;
v2d->opencl([&](cv::UMat &frameBuffer) {
cvtColor(frameBuffer, rgb, cv::COLOR_BGRA2RGB);
cv::resize(rgb, down, cv::Size(0, 0), SCALE, SCALE);
cvtColor(down, downGrey, cv::COLOR_BGRA2GRAY);
detector->detect(down, faces);
}); });
//BGR faceRects.clear();
cv::UMat rgb, down, faceBgMask, diff, blurred, reduced, sharpened, masked; for (int i = 0; i < faces.rows; i++) {
cv::UMat frameOut(HEIGHT, WIDTH, CV_8UC3); faceRects.push_back(cv::Rect(int(faces.at<float>(i, 0)), int(faces.at<float>(i, 1)), int(faces.at<float>(i, 2)), int(faces.at<float>(i, 3))));
cv::UMat lhalf(HEIGHT * SCALE, WIDTH * SCALE, CV_8UC3); }
cv::UMat rhalf(lhalf.size(), lhalf.type());
//GREY
cv::UMat downGrey, faceBgMaskGrey, faceBgMaskInvGrey, faceFgMaskGrey, resMaskGrey;
//BGR-Float
cv::UMat frameOutFloat;
cv::Mat faces;
vector<cv::Rect> faceRects;
vector<vector<cv::Point2f>> shapes;
vector<FaceFeatures> featuresList;
while (true) {
bool success = va::read([&capture](cv::UMat& videoFrame){
//videoFrame will be converted to BGRA and stored in the frameBuffer.
capture >> videoFrame;
});
if(!success)
break;
cl::compute([&](cv::UMat& frameBuffer){ shapes.clear();
cvtColor(frameBuffer,rgb,cv::COLOR_BGRA2RGB);
cv::resize(rgb, down, cv::Size(0, 0), SCALE, SCALE);
cvtColor(down, downGrey, cv::COLOR_BGRA2GRAY);
detector->detect(down, faces);
});
faceRects.clear(); if (!faceRects.empty() && facemark->fit(downGrey, faceRects, shapes)) {
for (int i = 0; i < faces.rows; i++) { featuresList.clear();
faceRects.push_back(cv::Rect(int(faces.at<float>(i, 0)), int(faces.at<float>(i, 1)), int(faces.at<float>(i, 2)), int(faces.at<float>(i, 3)))); for (size_t i = 0; i < faceRects.size(); ++i) {
featuresList.push_back(FaceFeatures(faceRects[i], shapes[i], float(down.size().width) / WIDTH));
} }
shapes.clear(); v2d->nanovg([&](const cv::Size& sz) {
v2d->clear();
if (!faceRects.empty() && facemark->fit(downGrey, faceRects, shapes)) { //Draw the face background mask (= face oval)
featuresList.clear(); draw_face_bg_mask(featuresList);
for (size_t i = 0; i < faceRects.size(); ++i) { });
featuresList.push_back(FaceFeatures(faceRects[i], shapes[i], float(down.size().width) / WIDTH));
}
nvg::render([&](NVGcontext* vg, int w, int h) {
nvg::clear();
//Draw the face background mask (= face oval)
draw_face_bg_mask(vg, featuresList);
});
cl::compute([&](cv::UMat &frameBuffer) {
//Convert/Copy the mask
cvtColor(frameBuffer, faceBgMask, cv::COLOR_BGRA2BGR);
cvtColor(frameBuffer, faceBgMaskGrey, cv::COLOR_BGRA2GRAY);
});
nvg::render([&](NVGcontext* vg, int w, int h) {
nvg::clear();
//Draw the face forground mask (= eyes and outer lips)
draw_face_fg_mask(vg, featuresList);
});
cl::compute([&](cv::UMat &frameBuffer) {
//Convert/Copy the mask
cvtColor(frameBuffer, faceFgMaskGrey, cv::COLOR_BGRA2GRAY);
//Dilate the face forground mask to make eyes and mouth areas wider
int morph_size = 1;
cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(2 * morph_size + 1, 2 * morph_size + 1), cv::Point(morph_size, morph_size));
cv::morphologyEx(faceFgMaskGrey, faceFgMaskGrey, cv::MORPH_DILATE, element, cv::Point(element.cols >> 1, element.rows >> 1), DILATE_ITERATIONS, cv::BORDER_CONSTANT, cv::morphologyDefaultBorderValue());
cv::subtract(faceBgMaskGrey, faceFgMaskGrey, faceBgMaskGrey);
cv::bitwise_not(faceBgMaskGrey, faceBgMaskInvGrey);
unsharp_mask(rgb, sharpened, UNSHARP_STRENGTH);
reduce_shadows(rgb, reduced, REDUCE_SHADOW);
blender.prepare(cv::Rect(0, 0, WIDTH, HEIGHT));
blender.feed(reduced, faceBgMaskGrey, cv::Point(0, 0));
blender.feed(sharpened, faceBgMaskInvGrey, cv::Point(0, 0));
blender.blend(frameOutFloat, resMaskGrey);
frameOutFloat.convertTo(frameOut, CV_8U, 1.0);
cv::boxFilter(frameOut, blurred, -1, cv::Size(BLUR_KERNEL_SIZE, BLUR_KERNEL_SIZE), cv::Point(-1, -1), true, cv::BORDER_REPLICATE);
cv::subtract(blurred, rgb, diff);
bitwise_and(diff, faceBgMask, masked);
cv::add(frameOut, masked, reduced);
cv::resize(rgb, lhalf, cv::Size(0, 0), 0.5, 0.5);
cv::resize(reduced, rhalf, cv::Size(0, 0), 0.5, 0.5);
frameOut = cv::Scalar::all(0);
lhalf.copyTo(frameOut(cv::Rect(0, 0, lhalf.size().width, lhalf.size().height)));
rhalf.copyTo(frameOut(cv::Rect(rhalf.size().width, 0, rhalf.size().width, rhalf.size().height)));
cvtColor(frameOut, frameBuffer, cv::COLOR_BGR2RGBA);
});
} else {
cl::compute([&](cv::UMat &frameBuffer) {
frameOut = cv::Scalar::all(0);
cv::resize(rgb, lhalf, cv::Size(0, 0), 0.5, 0.5);
lhalf.copyTo(frameOut(cv::Rect(0, 0, lhalf.size().width, lhalf.size().height)));
lhalf.copyTo(frameOut(cv::Rect(lhalf.size().width, 0, lhalf.size().width, lhalf.size().height)));
cvtColor(frameOut, frameBuffer, cv::COLOR_BGR2RGBA);
});
}
va::write([&writer](const cv::UMat& videoFrame){ v2d->opencl([&](cv::UMat &frameBuffer) {
//videoFrame is the frameBuffer converted to BGR. Ready to be written. //Convert/Copy the mask
writer << videoFrame; cvtColor(frameBuffer, faceBgMask, cv::COLOR_BGRA2BGR);
cvtColor(frameBuffer, faceBgMaskGrey, cv::COLOR_BGRA2GRAY);
}); });
app::update_fps(); v2d->nanovg([&](const cv::Size& sz) {
v2d->clear();
//Draw the face forground mask (= eyes and outer lips)
draw_face_fg_mask(featuresList);
});
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed. v2d->opencl([&](cv::UMat &frameBuffer) {
if(!app::display()) //Convert/Copy the mask
break; cvtColor(frameBuffer, faceFgMaskGrey, cv::COLOR_BGRA2GRAY);
//Dilate the face forground mask to make eyes and mouth areas wider
int morph_size = 1;
cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(2 * morph_size + 1, 2 * morph_size + 1), cv::Point(morph_size, morph_size));
cv::morphologyEx(faceFgMaskGrey, faceFgMaskGrey, cv::MORPH_DILATE, element, cv::Point(element.cols >> 1, element.rows >> 1), DILATE_ITERATIONS, cv::BORDER_CONSTANT, cv::morphologyDefaultBorderValue());
cv::subtract(faceBgMaskGrey, faceFgMaskGrey, faceBgMaskGrey);
cv::bitwise_not(faceBgMaskGrey, faceBgMaskInvGrey);
unsharp_mask(rgb, sharpened, UNSHARP_STRENGTH);
reduce_shadows(rgb, reduced, REDUCE_SHADOW);
blender.prepare(cv::Rect(0, 0, WIDTH, HEIGHT));
blender.feed(reduced, faceBgMaskGrey, cv::Point(0, 0));
blender.feed(sharpened, faceBgMaskInvGrey, cv::Point(0, 0));
blender.blend(frameOutFloat, resMaskGrey);
frameOutFloat.convertTo(frameOut, CV_8U, 1.0);
cv::boxFilter(frameOut, blurred, -1, cv::Size(BLUR_KERNEL_SIZE, BLUR_KERNEL_SIZE), cv::Point(-1, -1), true, cv::BORDER_REPLICATE);
cv::subtract(blurred, rgb, diff);
bitwise_and(diff, faceBgMask, masked);
cv::add(frameOut, masked, reduced);
cv::resize(rgb, lhalf, cv::Size(0, 0), 0.5, 0.5);
cv::resize(reduced, rhalf, cv::Size(0, 0), 0.5, 0.5);
frameOut = cv::Scalar::all(0);
lhalf.copyTo(frameOut(cv::Rect(0, 0, lhalf.size().width, lhalf.size().height)));
rhalf.copyTo(frameOut(cv::Rect(rhalf.size().width, 0, rhalf.size().width, rhalf.size().height)));
cvtColor(frameOut, frameBuffer, cv::COLOR_BGR2RGBA);
});
} else {
v2d->opencl([&](cv::UMat &frameBuffer) {
frameOut = cv::Scalar::all(0);
cv::resize(rgb, lhalf, cv::Size(0, 0), 0.5, 0.5);
lhalf.copyTo(frameOut(cv::Rect(0, 0, lhalf.size().width, lhalf.size().height)));
lhalf.copyTo(frameOut(cv::Rect(lhalf.size().width, 0, lhalf.size().width, lhalf.size().height)));
cvtColor(frameOut, frameBuffer, cv::COLOR_BGR2RGBA);
});
} }
app::terminate(); update_fps(v2d, true);
});
v2d->writeVA();
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if(!v2d->display())
break;
}
return 0; return 0;
} }

@ -1,6 +1,9 @@
#define CL_TARGET_OPENCL_VERSION 120 #define CL_TARGET_OPENCL_VERSION 120
#include "../common/subsystems.hpp" #include "../common/viz2d.hpp"
#include "../common/nvg.hpp"
#include "../common/util.hpp"
#include <string> #include <string>
#include <opencv2/objdetect/objdetect.hpp> #include <opencv2/objdetect/objdetect.hpp>
@ -107,40 +110,29 @@ void composite_layers(const cv::UMat background, const cv::UMat foreground, cons
} }
int main(int argc, char **argv) { int main(int argc, char **argv) {
using namespace kb; using namespace kb::viz2d;
if (argc != 2) { if (argc != 2) {
std::cerr << "Usage: pedestrian-demo <video-input>" << endl; std::cerr << "Usage: pedestrian-demo <video-input>" << endl;
exit(1); exit(1);
} }
//Initialize the application cv::Ptr<Viz2D> v2d = new Viz2D(cv::Size(WIDTH, HEIGHT), cv::Size(WIDTH, HEIGHT), OFFSCREEN, "Beauty Demo");
app::init("Pedestrian Demo", WIDTH, HEIGHT, WIDTH, HEIGHT, OFFSCREEN); print_system_info();
//Print system information if (!v2d->isOffscreen())
app::print_system_info(); v2d->setVisible(true);
app::run([&]() {
cv::Size frameBufferSize(app::frame_buffer_width, app::frame_buffer_height);
cv::VideoCapture capture(argv[1], cv::CAP_FFMPEG, {
cv::CAP_PROP_HW_DEVICE, VA_HW_DEVICE_INDEX,
cv::CAP_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI,
cv::CAP_PROP_HW_ACCELERATION_USE_OPENCL, 1
});
va::copy(); auto capture = v2d->makeVACapture(argv[1], VA_HW_DEVICE_INDEX);
if (!capture.isOpened()) { if (!capture.isOpened()) {
cerr << "ERROR! Unable to open video-input" << endl; cerr << "ERROR! Unable to open video input" << endl;
return; exit(-1);
} }
double fps = capture.get(cv::CAP_PROP_FPS); float fps = capture.get(cv::CAP_PROP_FPS);
cerr << "Detected FPS: " << fps << endl; float width = capture.get(cv::CAP_PROP_FRAME_WIDTH);
cv::VideoWriter writer(OUTPUT_FILENAME, cv::CAP_FFMPEG, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, frameBufferSize, { float height = capture.get(cv::CAP_PROP_FRAME_HEIGHT);
cv::VIDEOWRITER_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI, v2d->makeVAWriter(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, v2d->getFrameBufferSize(), VA_HW_DEVICE_INDEX);
cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, 1
});
//BGRA //BGRA
cv::UMat background, foreground(HEIGHT, WIDTH, CV_8UC4, cv::Scalar::all(0)); cv::UMat background, foreground(HEIGHT, WIDTH, CV_8UC4, cv::Scalar::all(0));
//RGB //RGB
@ -156,15 +148,10 @@ int main(int argc, char **argv) {
vector<double> probs; vector<double> probs;
while (true) { while (true) {
bool success = va::read([&capture](cv::UMat& videoFrame){ if(!v2d->captureVA())
//videoFrame will be converted to BGRA and stored in the frameBuffer. break;
capture >> videoFrame;
});
if(!success) v2d->opencl([&](cv::UMat& frameBuffer){
break;
cl::compute([&](cv::UMat& frameBuffer){
cvtColor(frameBuffer,videoFrameUp,cv::COLOR_BGRA2RGB); cvtColor(frameBuffer,videoFrameUp,cv::COLOR_BGRA2RGB);
cv::resize(frameBuffer, videoFrameDown, cv::Size(DOWNSIZE_WIDTH, DOWNSIZE_HEIGHT)); cv::resize(frameBuffer, videoFrameDown, cv::Size(DOWNSIZE_WIDTH, DOWNSIZE_HEIGHT));
cv::cvtColor(videoFrameDown, videoFrameDownGrey, cv::COLOR_RGB2GRAY); cv::cvtColor(videoFrameDown, videoFrameDownGrey, cv::COLOR_RGB2GRAY);
@ -189,35 +176,30 @@ int main(int argc, char **argv) {
} }
} }
nvg::render([&](NVGcontext* vg, int w, int h) { v2d->nanovg([&](const cv::Size& sz) {
nvg::clear(); v2d->clear();
nvgBeginPath(vg); nvg::beginPath();
nvgStrokeWidth(vg, std::fmax(2.0, WIDTH / 960.0)); nvg::strokeWidth(std::fmax(2.0, WIDTH / 960.0));
nvgStrokeColor(vg, nvgHSLA(0.0, 1, 0.5, 200)); nvg::strokeColor(kb::viz2d::convert(cv::Scalar(0, 127, 255, 200), cv::COLOR_HLS2BGR));
for (size_t i = 0; i < maxLocations.size(); i++) { for (size_t i = 0; i < maxLocations.size(); i++) {
nvgRect(vg, maxLocations[i].x * WIDTH_FACTOR, maxLocations[i].y * HEIGHT_FACTOR, maxLocations[i].width * WIDTH_FACTOR, maxLocations[i].height * HEIGHT_FACTOR); nvg::rect(maxLocations[i].x * WIDTH_FACTOR, maxLocations[i].y * HEIGHT_FACTOR, maxLocations[i].width * WIDTH_FACTOR, maxLocations[i].height * HEIGHT_FACTOR);
} }
nvgStroke(vg); nvg::stroke();
}); });
cl::compute([&](cv::UMat& frameBuffer){ v2d->opencl([&](cv::UMat& frameBuffer){
//Put it all together //Put it all together
composite_layers(background, foreground, frameBuffer, frameBuffer, BLUR_KERNEL_SIZE, fg_loss); composite_layers(background, foreground, frameBuffer, frameBuffer, BLUR_KERNEL_SIZE, fg_loss);
}); });
va::write([&writer](const cv::UMat& videoFrame){ update_fps(v2d, true);
//videoFrame is the frameBuffer converted to BGR. Ready to be written.
writer << videoFrame;
});
app::update_fps(); v2d->writeVA();
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed. //If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if (!app::display()) if(!v2d->display())
break; break;
} }
app::terminate();
});
return 0; return 0;
} }

Loading…
Cancel
Save