implemented a source/sink mechanism and ported all demos to it

pull/3471/head
kallaballa 2 years ago
parent 0b13d89080
commit 2d9b15e493
  1. 96
      src/beauty/beauty-demo.cpp
  2. 2
      src/common/Makefile
  3. 30
      src/common/sink.cpp
  4. 25
      src/common/sink.hpp
  5. 35
      src/common/source.cpp
  6. 29
      src/common/source.hpp
  7. 80
      src/common/util.cpp
  8. 17
      src/common/util.hpp
  9. 126
      src/common/viz2d.cpp
  10. 19
      src/common/viz2d.hpp
  11. 7
      src/font/font-demo.cpp
  12. 13
      src/nanovg/nanovg-demo.cpp
  13. 35
      src/optflow/optflow-demo.cpp
  14. 13
      src/pedestrian/pedestrian-demo.cpp
  15. 27
      src/shader/shader-demo.cpp
  16. 15
      src/tetra/tetra-demo.cpp
  17. 18
      src/video/video-demo.cpp

@ -13,6 +13,11 @@
#include <opencv2/stitching/detail/blenders.hpp>
#include <opencv2/tracking.hpp>
using std::cerr;
using std::endl;
using std::vector;
using std::string;
#ifdef __EMSCRIPTEN__
//enables KCF tracking instead of continuous detection.
#define USE_TRACKER 1;
@ -46,51 +51,6 @@ static cv::Ptr<cv::face::Facemark> facemark = cv::face::createFacemarkLBF();
#ifdef USE_TRACKER
static cv::Ptr<cv::Tracker> tracker = cv::TrackerKCF::create();
#endif
using std::cerr;
using std::endl;
using std::vector;
using std::string;
#ifdef __EMSCRIPTEN__
# include <emscripten.h>
# include <emscripten/bind.h>
# include <fstream>
using namespace emscripten;
std::string pushImage(std::string filename){
try {
std::ifstream fs(filename, std::fstream::in | std::fstream::binary);
fs.seekg(0, std::ios::end);
auto length = fs.tellg();
fs.seekg(0, std::ios::beg);
v2d->capture([&](cv::UMat &videoFrame) {
if(videoFrame.empty())
videoFrame.create(HEIGHT, WIDTH, CV_8UC3);
if (length == (videoFrame.elemSize() + 1) * videoFrame.total()) {
cv::Mat tmp;
cv::Mat v = videoFrame.getMat(cv::ACCESS_RW);
cvtColor(v, tmp, cv::COLOR_RGB2BGRA);
fs.read((char*)(tmp.data), tmp.elemSize() * tmp.total());
cvtColor(tmp, v, cv::COLOR_BGRA2RGB);
v.release();
tmp.release();
} else {
cerr << "mismatch" << endl;
}
});
return "success";
} catch(std::exception& ex) {
return string(ex.what());
}
}
EMSCRIPTEN_BINDINGS(my_module)
{
function("push_image", &pushImage);
}
#endif
struct FaceFeatures {
cv::Rect faceRect_;
@ -199,13 +159,13 @@ void draw_face_fg_mask(const vector<FaceFeatures> &lm) {
}
}
void adjust_saturation(const cv::UMat &srcBGR, cv::UMat &dstBGR, float by) {
void adjust_saturation(const cv::UMat &srcBGR, cv::UMat &dstBGR, float multiplier) {
static vector<cv::UMat> channels;
static cv::UMat hls;
cvtColor(srcBGR, hls, cv::COLOR_BGR2HLS);
split(hls, channels);
cv::multiply(channels[2], by, channels[2]);
cv::multiply(channels[2], multiplier, channels[2]);
merge(channels, hls);
cvtColor(hls, dstBGR, cv::COLOR_HLS2BGR);
}
@ -256,7 +216,7 @@ void iteration() {
static cv::Ptr<cv::FaceDetectorYN> detector = cv::FaceDetectorYN::create("assets/face_detection_yunet_2022mar.onnx", "", cv::Size(v2d->getFrameBufferSize().width * SCALE, v2d->getFrameBufferSize().height * SCALE), 0.9, 0.3, 5000, cv::dnn::DNN_BACKEND_OPENCV, cv::dnn::DNN_TARGET_OPENCL);
static cv::detail::MultiBandBlender blender(false, 5);
//BGR
static cv::UMat bgr, down, faceBgMask, blurred, adjusted, saturated, skin;
static cv::UMat input, down, faceBgMask, blurred, contrast, eyesAndLips, skin;
static cv::UMat frameOut(HEIGHT, WIDTH, CV_8UC3);
static cv::UMat lhalf(HEIGHT * SCALE, WIDTH * SCALE, CV_8UC3);
static cv::UMat rhalf(lhalf.size(), lhalf.type());
@ -276,10 +236,10 @@ void iteration() {
exit(0);
#endif
v2d->clgl([&](cv::UMat &frameBuffer) {
cvtColor(frameBuffer, bgr, cv::COLOR_BGRA2BGR);
cvtColor(frameBuffer, input, cv::COLOR_BGRA2BGR);
});
cv::resize(bgr, down, cv::Size(0, 0), SCALE, SCALE);
cv::resize(input, down, cv::Size(0, 0), SCALE, SCALE);
shapes.clear();
faceRects.clear();
@ -335,26 +295,26 @@ void iteration() {
cv::bitwise_not(faceFgMaskGrey,faceFgMaskInvGrey);
//boost saturation of eyes and lips
adjust_saturation(bgr,saturated, eyes_and_lips_saturation);
adjust_saturation(input,eyesAndLips, eyes_and_lips_saturation);
//reduce skin contrast
multiply(bgr, cv::Scalar::all(skin_contrast), adjusted);
multiply(input, cv::Scalar::all(skin_contrast), contrast);
//fix skin brightness
add(adjusted, cv::Scalar::all((1.0 - skin_contrast) / 2.0) * 255.0, adjusted);
add(contrast, cv::Scalar::all((1.0 - skin_contrast) / 2.0) * 255.0, contrast);
//blur the skin
cv::boxFilter(adjusted, blurred, -1, cv::Size(blur_skin_kernel_size, blur_skin_kernel_size), cv::Point(-1, -1), true, cv::BORDER_REPLICATE);
cv::boxFilter(contrast, blurred, -1, cv::Size(blur_skin_kernel_size, blur_skin_kernel_size), cv::Point(-1, -1), true, cv::BORDER_REPLICATE);
//boost skin saturation
adjust_saturation(blurred,skin, skin_saturation);
//piece it all together
blender.prepare(cv::Rect(0, 0, WIDTH, HEIGHT));
blender.feed(skin, faceBgMaskGrey, cv::Point(0, 0));
blender.feed(bgr, faceFgMaskInvGrey, cv::Point(0, 0));
blender.feed(saturated, faceFgMaskGrey, cv::Point(0, 0));
blender.feed(input, faceFgMaskInvGrey, cv::Point(0, 0));
blender.feed(eyesAndLips, faceFgMaskGrey, cv::Point(0, 0));
blender.blend(frameOutFloat, cv::UMat());
frameOutFloat.convertTo(frameOut, CV_8U, 1.0);
if (side_by_side) {
cv::resize(bgr, lhalf, cv::Size(0, 0), 0.5, 0.5);
cv::resize(input, lhalf, cv::Size(0, 0), 0.5, 0.5);
cv::resize(frameOut, rhalf, cv::Size(0, 0), 0.5, 0.5);
frameOut = cv::Scalar::all(0);
@ -368,11 +328,11 @@ void iteration() {
} else {
if (side_by_side) {
frameOut = cv::Scalar::all(0);
cv::resize(bgr, lhalf, cv::Size(0, 0), 0.5, 0.5);
cv::resize(input, lhalf, cv::Size(0, 0), 0.5, 0.5);
lhalf.copyTo(frameOut(cv::Rect(0, 0, lhalf.size().width, lhalf.size().height)));
lhalf.copyTo(frameOut(cv::Rect(lhalf.size().width, 0, lhalf.size().width, lhalf.size().height)));
} else {
bgr.copyTo(frameOut);
input.copyTo(frameOut);
}
v2d->clgl([&](cv::UMat &frameBuffer) {
@ -414,22 +374,18 @@ int main(int argc, char **argv) {
}
#ifndef __EMSCRIPTEN__
auto capture = v2d->makeVACapture(argv[1], VA_HW_DEVICE_INDEX);
if (!capture.isOpened()) {
cerr << "ERROR! Unable to open video input" << endl;
exit(-1);
}
Source src = make_va_source(v2d, argv[1], VA_HW_DEVICE_INDEX);
v2d->setSource(src);
float fps = capture.get(cv::CAP_PROP_FPS);
float width = capture.get(cv::CAP_PROP_FRAME_WIDTH);
float height = capture.get(cv::CAP_PROP_FRAME_HEIGHT);
v2d->makeVAWriter(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, cv::Size(width, height), VA_HW_DEVICE_INDEX);
Sink sink = make_va_sink(v2d, OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), src.fps(), cv::Size(WIDTH, HEIGHT), VA_HW_DEVICE_INDEX);
v2d->setSink(sink);
while (true)
iteration();
#else
emscripten_set_main_loop(iteration, -1, false);
Source src = make_webcam_source(v2d, WIDTH, HEIGHT);
v2d->setSource(src);
emscripten_set_main_loop(iteration, -1, true);
#endif
return 0;
}

@ -4,7 +4,7 @@ else
TARGET := libviz2d.so
endif
SRCS := detail/clglcontext.cpp detail/clvacontext.cpp detail/nanovgcontext.cpp viz2d.cpp util.cpp nvg.cpp
SRCS := detail/clglcontext.cpp detail/clvacontext.cpp detail/nanovgcontext.cpp viz2d.cpp util.cpp nvg.cpp source.cpp sink.cpp
#precompiled headers
HEADERS :=

@ -0,0 +1,30 @@
#include "sink.hpp"
namespace kb {
namespace viz2d {
Sink::Sink(std::function<bool(const cv::UMat&)> consumer) : consumer_(consumer) {
}
Sink::Sink() {
}
Sink::~Sink() {
}
bool Sink::isReady() {
if(consumer_)
return true;
else
return false;
}
bool Sink::isOpen() {
return open_;
}
void Sink::operator()(const cv::UMat& frame) {
open_ = consumer_(frame);
}
} /* namespace viz2d */
} /* namespace kb */

@ -0,0 +1,25 @@
#ifndef SRC_COMMON_SINK_HPP_
#define SRC_COMMON_SINK_HPP_
#include <functional>
#include <opencv2/opencv.hpp>
namespace kb {
namespace viz2d {
class Sink {
bool open_ = true;
std::function<bool(const cv::UMat&)> consumer_;
public:
Sink(std::function<bool(const cv::UMat&)> consumer);
Sink();
virtual ~Sink();
bool isReady();
bool isOpen();
void operator()(const cv::UMat& frame);
};
} /* namespace viz2d */
} /* namespace kb */
#endif /* SRC_COMMON_SINK_HPP_ */

@ -0,0 +1,35 @@
#include "source.hpp"
namespace kb {
namespace viz2d {
Source::Source(std::function<bool(cv::UMat&)> generator, float fps) : generator_(generator), fps_(fps) {
}
Source::Source() : fps_(0) {
}
Source::~Source() {
}
bool Source::isReady() {
if(generator_)
return true;
else
return false;
}
bool Source::isOpen() {
return open_;
}
float Source::fps() {
return fps_;
}
std::pair<uint64_t, cv::UMat&> Source::operator()() {
open_ = generator_(frame_);
return {count_++, frame_};
}
} /* namespace viz2d */
} /* namespace kb */

@ -0,0 +1,29 @@
#ifndef SRC_COMMON_SOURCE_HPP_
#define SRC_COMMON_SOURCE_HPP_
#include <functional>
#include <opencv2/opencv.hpp>
namespace kb {
namespace viz2d {
class Source {
bool open_ = true;
std::function<bool(cv::UMat&)> generator_;
cv::UMat frame_;
uint64_t count_ = 0;
float fps_;
public:
Source(std::function<bool(cv::UMat&)> generator, float fps);
Source();
virtual ~Source();
bool isReady();
bool isOpen();
float fps();
std::pair<uint64_t, cv::UMat&> operator()();
};
} /* namespace viz2d */
} /* namespace kb */
#endif /* SRC_COMMON_SOURCE_HPP_ */

@ -3,6 +3,11 @@
#include "viz2d.hpp"
#include "nvg.hpp"
#ifdef __EMSCRIPTEN__
# include <emscripten.h>
# include <fstream>
#endif
namespace kb {
namespace viz2d {
@ -81,5 +86,80 @@ void update_fps(cv::Ptr<kb::viz2d::Viz2D> v2d, bool graphically) {
++cnt;
}
#ifndef __EMSCRIPTEN__
Sink make_va_sink(cv::Ptr<Viz2D> v2d, const string &outputFilename, const int fourcc, const float fps, const cv::Size &frameSize, const int vaDeviceIndex) {
cv::Ptr<cv::VideoWriter> writer = new cv::VideoWriter(outputFilename, cv::CAP_FFMPEG, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, frameSize, { cv::VIDEOWRITER_PROP_HW_DEVICE, vaDeviceIndex, cv::VIDEOWRITER_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI, cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, 1 });
v2d->setVideoFrameSize(frameSize);
return Sink([=](const cv::UMat& frame){
(*writer) << frame;
return writer->isOpened();
});
}
Source make_va_source(cv::Ptr<Viz2D> v2d, const string &inputFilename, const int vaDeviceIndex) {
cv::Ptr<cv::VideoCapture> capture = new cv::VideoCapture(inputFilename, cv::CAP_FFMPEG, { cv::CAP_PROP_HW_DEVICE, vaDeviceIndex, cv::CAP_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI, cv::CAP_PROP_HW_ACCELERATION_USE_OPENCL, 1 });
float fps = capture->get(cv::CAP_PROP_FPS);
float w = capture->get(cv::CAP_PROP_FRAME_WIDTH);
float h = capture->get(cv::CAP_PROP_FRAME_HEIGHT);
v2d->setVideoFrameSize(cv::Size(w,h));
return Source([=](cv::UMat& frame){
(*capture) >> frame;
return !frame.empty();
}, fps);
}
Sink make_writer_sink(cv::Ptr<Viz2D> v2d, const string &outputFilename, const int fourcc, const float fps, const cv::Size &frameSize, const int vaDeviceIndex) {
cv::Ptr<cv::VideoWriter> writer = new cv::VideoWriter(outputFilename, cv::CAP_FFMPEG, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, frameSize, { cv::VIDEOWRITER_PROP_HW_DEVICE, vaDeviceIndex, cv::VIDEOWRITER_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI, cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, 1 });
v2d->setVideoFrameSize(frameSize);
return Sink([=](const cv::UMat& frame){
(*writer) << frame;
return writer->isOpened();
});
}
Source make_capture_source(cv::Ptr<Viz2D> v2d, const string &inputFilename, const int vaDeviceIndex) {
cv::Ptr<cv::VideoCapture> capture = new cv::VideoCapture(inputFilename, cv::CAP_FFMPEG, { cv::CAP_PROP_HW_DEVICE, vaDeviceIndex, cv::CAP_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI, cv::CAP_PROP_HW_ACCELERATION_USE_OPENCL, 1 });
float fps = capture->get(cv::CAP_PROP_FPS);
float w = capture->get(cv::CAP_PROP_FRAME_WIDTH);
float h = capture->get(cv::CAP_PROP_FRAME_HEIGHT);
v2d->setVideoFrameSize(cv::Size(w,h));
return Source([=](cv::UMat& frame){
(*capture) >> frame;
return !frame.empty();
}, fps);
}
#else
Source make_webcam_source(cv::Ptr<Viz2D> v2d, int width, int height) {
using namespace std;
static cv::Mat tmp(height, width, CV_8UC4);
return Source([=](cv::UMat& frame) {
try {
if (frame.empty())
frame.create(v2d->getInitialSize(), CV_8UC3);
std::ifstream fs("canvas.raw", std::fstream::in | std::fstream::binary);
fs.seekg(0, std::ios::end);
auto length = fs.tellg();
fs.seekg(0, std::ios::beg);
if (length == (frame.elemSize() + 1) * frame.total()) {
cv::Mat v = frame.getMat(cv::ACCESS_WRITE);
fs.read((char*) (tmp.data), tmp.elemSize() * tmp.total());
cvtColor(tmp, v, cv::COLOR_BGRA2RGB);
v.release();
}
} catch(std::exception& ex) {
cerr << ex.what() << endl;
}
return true;
}, 0);
}
#endif
}
}

@ -1,18 +1,35 @@
#ifndef SRC_COMMON_UTIL_HPP_
#define SRC_COMMON_UTIL_HPP_
#include "source.hpp"
#include "sink.hpp"
#include <string>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core/ocl.hpp>
#ifdef __EMSCRIPTEN__
# include <emscripten.h>
# include <emscripten/bind.h>
# include <fstream>
#endif
namespace kb {
namespace viz2d {
using std::string;
class Viz2D;
std::string get_gl_info();
std::string get_cl_info();
void print_system_info();
void update_fps(cv::Ptr<Viz2D> viz2d, bool graphical);
#ifndef __EMSCRIPTEN__
Sink make_va_sink(cv::Ptr<Viz2D> v2d, const string &outputFilename, const int fourcc, const float fps, const cv::Size &frameSize, const int vaDeviceIndex);
Source make_va_source(cv::Ptr<Viz2D> v2d, const string &inputFilename, const int vaDeviceIndex);
Sink make_writer_sink(cv::Ptr<Viz2D> v2d, const string &outputFilename, const int fourcc, const float fps, const cv::Size &frameSize, const int vaDeviceIndex);
Source make_capture_source(cv::Ptr<Viz2D> v2d, const string &inputFilename, const int vaDeviceIndex);
#else
Source make_webcam_source(cv::Ptr<Viz2D> v2d, int width, int height);
#endif
}
}

@ -381,20 +381,37 @@ void Viz2D::clgl(std::function<void(cv::UMat&)> fn) {
void Viz2D::nvg(std::function<void(const cv::Size&)> fn) {
nvg().render(fn);
}
void Viz2D::setSource(const Source &src) {
if (!clva().hasContext())
clva().copyContext();
source_ = src;
}
bool Viz2D::capture() {
return clva().capture([=, this](cv::UMat &videoFrame) {
*(this->capture_) >> videoFrame;
return this->capture([&](cv::UMat &videoFrame) {
if(source_.isReady())
videoFrame = source_().second;
});
}
bool Viz2D::capture(std::function<void(cv::UMat&)> fn) {
return clva().capture(fn);
return clva().capture(fn);
}
bool Viz2D::isSourceReady() {
return source_.isReady();
}
void Viz2D::setSink(const Sink &sink) {
if (!clva().hasContext())
clva().copyContext();
sink_ = sink;
}
void Viz2D::write() {
clva().write([=, this](const cv::UMat &videoFrame) {
*(this->writer_) << videoFrame;
this->write([&](const cv::UMat &videoFrame) {
if(sink_.isReady())
sink_(videoFrame);
});
}
@ -402,62 +419,18 @@ void Viz2D::write(std::function<void(const cv::UMat&)> fn) {
clva().write(fn);
}
bool Viz2D::isSinkReady() {
return sink_.isReady();
}
void Viz2D::makeCurrent() {
glfwMakeContextCurrent(getGLFWWindow());
}
void Viz2D::makeUncurrent() {
void Viz2D::makeNoneCurrent() {
glfwMakeContextCurrent(nullptr);
}
#ifndef __EMSCRIPTEN__
cv::VideoWriter& Viz2D::makeVAWriter(const string &outputFilename, const int fourcc, const float fps, const cv::Size &frameSize, const int vaDeviceIndex) {
writerPath_ = outputFilename;
vaWriterDeviceIndex_ = vaDeviceIndex;
writer_ = new cv::VideoWriter(outputFilename, cv::CAP_FFMPEG, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, frameSize, { cv::VIDEOWRITER_PROP_HW_DEVICE, vaDeviceIndex, cv::VIDEOWRITER_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI, cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, 1 });
setVideoFrameSize(frameSize);
if (!clva().hasContext()) {
clva().copyContext();
}
return *writer_;
}
cv::VideoCapture& Viz2D::makeVACapture(const string &inputFilename, const int vaDeviceIndex) {
capturePath_ = inputFilename;
vaCaptureDeviceIndex_ = vaDeviceIndex;
capture_ = new cv::VideoCapture(inputFilename, cv::CAP_FFMPEG, { cv::CAP_PROP_HW_DEVICE, vaDeviceIndex, cv::CAP_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI, cv::CAP_PROP_HW_ACCELERATION_USE_OPENCL, 1 });
float w = capture_->get(cv::CAP_PROP_FRAME_WIDTH);
float h = capture_->get(cv::CAP_PROP_FRAME_HEIGHT);
setVideoFrameSize(cv::Size(w,h));
if (!clva().hasContext()) {
clva().copyContext();
}
return *capture_;
}
cv::VideoWriter& Viz2D::makeWriter(const string &outputFilename, const int fourcc, const float fps, const cv::Size &frameSize) {
writerPath_ = outputFilename;
writer_ = new cv::VideoWriter(outputFilename, cv::CAP_FFMPEG, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, frameSize, {});
setVideoFrameSize(frameSize);
return *writer_;
}
cv::VideoCapture& Viz2D::makeCapture(const string &inputFilename) {
capturePath_ = inputFilename;
capture_ = new cv::VideoCapture(inputFilename, cv::CAP_FFMPEG, {});
float w = capture_->get(cv::CAP_PROP_FRAME_WIDTH);
float h = capture_->get(cv::CAP_PROP_FRAME_HEIGHT);
setVideoFrameSize(cv::Size(w,h));
return *capture_;
}
#endif
void Viz2D::clear(const cv::Scalar &rgba) {
const float &r = rgba[0] / 255.0f;
const float &g = rgba[1] / 255.0f;
@ -702,51 +675,6 @@ nanogui::ColorPicker* Viz2D::makeColorPicker(const string& label, nanogui::Color
nanogui::Button* Viz2D::makeButton(const string& caption, std::function<void()> fn) {
return this->form()->add_button(caption, fn);
}
bool Viz2D::isAccelerated() {
return cv::ocl::useOpenCL();
}
void Viz2D::setAccelerated(bool a) {
#ifndef __EMSCRIPTEN__
if(a != cv::ocl::useOpenCL()) {
clglContext_->getCLExecContext().setUseOpenCL(a);
clvaContext_->getCLExecContext().setUseOpenCL(a);
cv::ocl::setUseOpenCL(a);
double w = 0;
double h = 0;
double fps = 0;
double fourcc = 0;
if(writer_) {
w = writer_->get(cv::CAP_PROP_FRAME_WIDTH);
h = writer_->get(cv::CAP_PROP_FRAME_HEIGHT);
fps = writer_->get(cv::CAP_PROP_FPS);
fourcc = writer_->get(cv::CAP_PROP_FOURCC);
}
if(a) {
if(capture_) {
delete capture_;
makeVACapture(capturePath_, vaCaptureDeviceIndex_);
}
if(writer_) {
delete writer_;
makeVAWriter(writerPath_, fourcc, fps, cv::Size(w, h), vaWriterDeviceIndex_);
}
} else {
if(capture_) {
delete capture_;
makeCapture(capturePath_);
}
if(writer_) {
delete writer_;
makeWriter(writerPath_, fourcc, fps, cv::Size(w, h));
}
}
}
#endif
}
bool Viz2D::display() {
bool result = true;

@ -1,6 +1,9 @@
#ifndef SRC_COMMON_VIZ2D_HPP_
#define SRC_COMMON_VIZ2D_HPP_
#include "source.hpp"
#include "sink.hpp"
#include <filesystem>
#include <iostream>
#include <set>
@ -89,12 +92,14 @@ class Viz2D {
int vaWriterDeviceIndex_ = 0;
bool mouseDrag_ = false;
nanogui::Screen* screen_ = nullptr;
Source source_;
Sink sink_;
public:
Viz2D(const cv::Size &initialSize, const cv::Size& frameBufferSize, bool offscreen, const string &title, int major = 4, int minor = 6, int samples = 0, bool debug = false);
virtual ~Viz2D();
bool initializeWindowing();
void makeCurrent();
void makeUncurrent();
void makeNoneCurrent();
cv::ogl::Texture2D& texture();
@ -103,14 +108,16 @@ public:
void nvg(std::function<void(const cv::Size&)> fn);
void clear(const cv::Scalar& rgba = cv::Scalar(0,0,0,255));
bool capture();
bool capture(std::function<void(cv::UMat&)> fn);
void write();
void write(std::function<void(const cv::UMat&)> fn);
cv::VideoWriter& makeVAWriter(const string& outputFilename, const int fourcc, const float fps, const cv::Size& frameSize, const int vaDeviceIndex);
cv::VideoCapture& makeVACapture(const string& intputFilename, const int vaDeviceIndex);
cv::VideoWriter& makeWriter(const string& outputFilename, const int fourcc, const float fps, const cv::Size& frameSize);
cv::VideoCapture& makeCapture(const string& intputFilename);
virtual void setSource(const Source& src);
virtual bool isSourceReady();
virtual void setSink(const Sink& sink);
virtual bool isSinkReady();
void setMouseDrag(bool d);
bool isMouseDrag();
void pan(int x, int y);
@ -139,8 +146,6 @@ public:
void setStretching(bool s);
bool isStretching();
bool isClosed();
bool isAccelerated();
void setAccelerated(bool u);
void close();
bool display();

@ -208,9 +208,6 @@ int main(int argc, char **argv) {
setup_gui(v2d);
v2d->setVisible(true);
}
#ifndef __EMSCRIPTEN__
v2d->makeVAWriter(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), FPS, v2d->getFrameBufferSize(), VA_HW_DEVICE_INDEX);
#endif
//The text to display
string txt = cv::getBuildInformation();
@ -222,10 +219,12 @@ int main(int argc, char **argv) {
}
#ifndef __EMSCRIPTEN__
Sink sink = make_va_sink(v2d, OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), FPS, cv::Size(WIDTH, HEIGHT), VA_HW_DEVICE_INDEX);
v2d->setSink(sink);
while(true)
iteration();
#else
emscripten_set_main_loop(iteration, -1, false);
emscripten_set_main_loop(iteration, -1, true);
#endif
} catch(std::exception& ex) {

@ -127,16 +127,11 @@ int main(int argc, char **argv) {
if (!v2d->isOffscreen())
v2d->setVisible(true);
auto capture = v2d->makeVACapture(argv[1], VA_HW_DEVICE_INDEX);
if (!capture.isOpened()) {
cerr << "ERROR! Unable to open video input" << endl;
exit(-1);
}
Source src = make_va_source(v2d, argv[1], VA_HW_DEVICE_INDEX);
v2d->setSource(src);
float fps = capture.get(cv::CAP_PROP_FPS);
float width = capture.get(cv::CAP_PROP_FRAME_WIDTH);
float height = capture.get(cv::CAP_PROP_FRAME_HEIGHT);
v2d->makeVAWriter(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, cv::Size(width, height), VA_HW_DEVICE_INDEX);
Sink sink = make_va_sink(v2d, OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), src.fps(), cv::Size(WIDTH, HEIGHT), VA_HW_DEVICE_INDEX);
v2d->setSink(sink);
std::vector<cv::UMat> hsvChannels;
cv::UMat rgb;

@ -157,7 +157,7 @@ PostProcModes post_proc_mode = GLOW;
PostProcModes post_proc_mode = NONE;
#endif
// Intensity of glow or bloom defined by kernel size. The default scales with the image diagonal.
int kernel_size = std::max(int(DIAG / 100 % 2 == 0 ? DIAG / 100 + 1 : DIAG / 100), 1);
int GLOW_KERNEL_SIZE = std::max(int(DIAG / 100 % 2 == 0 ? DIAG / 100 + 1 : DIAG / 100), 1);
//The lightness selection threshold
int bloom_thresh = 210;
//The intensity of the bloom filter
@ -374,20 +374,20 @@ void setup_gui(cv::Ptr<kb::viz2d::Viz2D> v2d, cv::Ptr<kb::viz2d::Viz2D> v2dMenu)
v2d->makeWindow(220, 30, "Post Processing");
auto* postPocMode = v2d->makeComboBox("Mode",post_proc_mode, {"Glow", "Bloom", "None"});
auto* kernelSize = v2d->makeFormVariable("Kernel Size", kernel_size, 1, 63, true, "", "Intensity of glow defined by kernel size");
auto* kernelSize = v2d->makeFormVariable("Kernel Size", GLOW_KERNEL_SIZE, 1, 63, true, "", "Intensity of glow defined by kernel size");
kernelSize->set_callback([=](const int& k) {
static int lastKernelSize = kernel_size;
static int lastKernelSize = GLOW_KERNEL_SIZE;
if(k == lastKernelSize)
return;
if(k <= lastKernelSize) {
kernel_size = std::max(int(k % 2 == 0 ? k - 1 : k), 1);
GLOW_KERNEL_SIZE = std::max(int(k % 2 == 0 ? k - 1 : k), 1);
} else if(k > lastKernelSize)
kernel_size = std::max(int(k % 2 == 0 ? k + 1 : k), 1);
GLOW_KERNEL_SIZE = std::max(int(k % 2 == 0 ? k + 1 : k), 1);
lastKernelSize = k;
kernelSize->set_value(kernel_size);
kernelSize->set_value(GLOW_KERNEL_SIZE);
});
auto* thresh = v2d->makeFormVariable("Threshold", bloom_thresh, 1, 255, true, "", "The lightness selection threshold", true, false);
auto* gain = v2d->makeFormVariable("Gain", bloom_gain, 0.1f, 20.0f, true, "", "Intensity of the effect defined by gain", true, false);
@ -476,7 +476,7 @@ void iteration() {
v2d->clgl([&](cv::UMat& frameBuffer){
//Put it all together (OpenCL)
composite_layers(background, foreground, frameBuffer, frameBuffer, kernel_size, fg_loss, background_mode, post_proc_mode);
composite_layers(background, foreground, frameBuffer, frameBuffer, GLOW_KERNEL_SIZE, fg_loss, background_mode, post_proc_mode);
#ifndef __EMSCRIPTEN__
cvtColor(frameBuffer, menuFrame, cv::COLOR_BGRA2RGB);
#endif
@ -520,23 +520,18 @@ int main(int argc, char **argv) {
}
#ifndef __EMSCRIPTEN__
auto capture = v2d->makeVACapture(argv[1], VA_HW_DEVICE_INDEX);
Source src = make_va_source(v2d, argv[1], VA_HW_DEVICE_INDEX);
v2d->setSource(src);
if (!capture.isOpened()) {
cerr << "ERROR! Unable to open video input" << endl;
exit(-1);
}
float fps = capture.get(cv::CAP_PROP_FPS);
float width = capture.get(cv::CAP_PROP_FRAME_WIDTH);
float height = capture.get(cv::CAP_PROP_FRAME_HEIGHT);
Sink sink = make_va_sink(v2d, OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), src.fps(), cv::Size(WIDTH, HEIGHT), VA_HW_DEVICE_INDEX);
v2d->setSink(sink);
v2d->makeVAWriter(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, cv::Size(width, height), VA_HW_DEVICE_INDEX);
while (true) {
while (true)
iteration();
}
#else
emscripten_set_main_loop(iteration, -1, false);
Source src = make_webcam_source(v2d, WIDTH, HEIGHT);
v2d->setSource(src);
emscripten_set_main_loop(iteration, -1, true);
#endif

@ -118,17 +118,12 @@ int main(int argc, char **argv) {
if (!v2d->isOffscreen())
v2d->setVisible(true);
auto capture = v2d->makeVACapture(argv[1], VA_HW_DEVICE_INDEX);
Source src = make_va_source(v2d, argv[1], VA_HW_DEVICE_INDEX);
v2d->setSource(src);
if (!capture.isOpened()) {
cerr << "ERROR! Unable to open video input" << endl;
exit(-1);
}
Sink sink = make_va_sink(v2d, OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), src.fps(), cv::Size(WIDTH, HEIGHT), VA_HW_DEVICE_INDEX);
v2d->setSink(sink);
float fps = capture.get(cv::CAP_PROP_FPS);
float width = capture.get(cv::CAP_PROP_FRAME_WIDTH);
float height = capture.get(cv::CAP_PROP_FRAME_HEIGHT);
v2d->makeVAWriter(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, cv::Size(width, height), VA_HW_DEVICE_INDEX);
//BGRA
cv::UMat background;
//RGB

@ -27,7 +27,7 @@ int max_iterations = 500;
float center_x = -0.32487;
float center_y = 0.000001;
float zoom = 1.0;
float zoom_multiplier = 0.99;
float zoom_add = 0.99;
long iterations = 0;
/** GL uniform handles **/
@ -91,7 +91,7 @@ void load_buffer_data(){
#endif
}
//workaround: required with emscripten + nanogui on every iteration before renderin
//workaround: required with emscripten + nanogui on every iteration before rendering
void rebind_buffers() {
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
@ -247,6 +247,10 @@ void load_shader(){
shader_program_hdl = init_shader(vert.c_str(), frag.c_str(), "fragColor");
}
float easeInOutQubic(float x) {
return x < 0.5f ? 4.0f * x * x * x : 1.0f - std::pow(-2.0f * x + 2.0f, 3.0f) / 2.0f;
}
void init_scene(const cv::Size& sz) {
load_shader();
load_buffer_data();
@ -264,11 +268,11 @@ void init_scene(const cv::Size& sz) {
void render_scene(const cv::Size& sz) {
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
if(zoom > 1) {
zoom_multiplier = 0.99;
if(zoom >= 1) {
zoom_add = -0.01;
iterations = 0;
} else if(zoom < 2.5e-06) {
zoom_multiplier = 1.01;
zoom_add = +0.01;
iterations = 0;
}
@ -278,7 +282,7 @@ void render_scene(const cv::Size& sz) {
glUniform1i(max_iterations_hdl, max_iterations);
glUniform1f(center_y_hdl, center_y);
glUniform1f(center_x_hdl, center_x);
glUniform1f(zoom_hdl, zoom*=zoom_multiplier);
glUniform1f(zoom_hdl, easeInOutQubic(zoom+=zoom_add));
#ifndef __EMSCRIPTEN__
glBindVertexArray(VAO);
@ -338,7 +342,6 @@ void setup_gui(cv::Ptr<kb::viz2d::Viz2D> v2d) {
});
v2d->makeFormVariable("Alpha", alpha, 0.0f, 1.0f, true, "", "The opacity of the fractal visualization");
v2d->makeFormVariable("Contrast boost", contrast_boost, 1, 255, true, "", "Boost contrast by this factor");
}
void iteration() {
@ -378,16 +381,18 @@ int main(int argc, char **argv) {
setup_gui(v2d);
v2d->setVisible(true);
}
#ifndef __EMSCRIPTEN__
v2d->makeVAWriter(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), FPS, v2d->getFrameBufferSize(), 0);
#endif
v2d->gl(init_scene);
#ifndef __EMSCRIPTEN__
Sink sink = make_va_sink(v2d, OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), FPS, cv::Size(WIDTH, HEIGHT), VA_HW_DEVICE_INDEX);
v2d->setSink(sink);
while(true)
iteration();
#else
emscripten_set_main_loop(iteration, -1, false);
Source src = make_webcam_source(v2d, WIDTH, HEIGHT);
v2d->setSource(src);
emscripten_set_main_loop(iteration, -1, true);
#endif
} catch(std::exception& ex) {
cerr << "Exception: " << ex.what() << endl;

@ -6,12 +6,12 @@
constexpr long unsigned int WIDTH = 1920;
constexpr long unsigned int HEIGHT = 1080;
constexpr double FPS = 60;
constexpr bool OFFSCREEN = false;
constexpr bool OFFSCREEN = true;
constexpr const char* OUTPUT_FILENAME = "tetra-demo.mkv";
constexpr const int VA_HW_DEVICE_INDEX = 0;
constexpr unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT));
constexpr int kernel_size = std::max(int(DIAG / 138 % 2 == 0 ? DIAG / 138 + 1 : DIAG / 138), 1);
constexpr int GLOW_KERNEL_SIZE = std::max(int(DIAG / 138 % 2 == 0 ? DIAG / 138 + 1 : DIAG / 138), 1);
using std::cerr;
using std::endl;
@ -88,7 +88,8 @@ int main(int argc, char **argv) {
if(!v2d->isOffscreen())
v2d->setVisible(true);
v2d->makeVAWriter(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), FPS, v2d->getFrameBufferSize(), 0);
Sink sink = make_va_sink(v2d, OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), FPS, cv::Size(WIDTH, HEIGHT), VA_HW_DEVICE_INDEX);
v2d->setSink(sink);
v2d->gl(init_scene);
@ -97,15 +98,15 @@ int main(int argc, char **argv) {
v2d->gl(render_scene);
//Aquire the frame buffer for use by OpenCL
v2d->clgl([](cv::UMat &frameBuffer) {
v2d->clgl([&](cv::UMat &frameBuffer) {
//Glow effect (OpenCL)
glow_effect(frameBuffer, frameBuffer, kernel_size);
glow_effect(frameBuffer, frameBuffer, GLOW_KERNEL_SIZE);
});
update_fps(v2d, true);
v2d->write();
update_fps(v2d, true);
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if (!v2d->display())
break;

@ -12,7 +12,7 @@ constexpr bool OFFSCREEN = false;
constexpr const char* OUTPUT_FILENAME = "video-demo.mkv";
constexpr unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT));
constexpr int kernel_size = std::max(int(DIAG / 500 % 2 == 0 ? DIAG / 500 + 1 : DIAG / 500), 1);
constexpr int GLOW_KERNEL_SIZE = std::max(int(DIAG / 500 % 2 == 0 ? DIAG / 500 + 1 : DIAG / 500), 1);
using std::cerr;
using std::endl;
using std::string;
@ -92,17 +92,11 @@ int main(int argc, char **argv) {
if(!v2d->isOffscreen())
v2d->setVisible(true);
auto capture = v2d->makeVACapture(argv[1], VA_HW_DEVICE_INDEX);
Source src = make_va_source(v2d, argv[1], VA_HW_DEVICE_INDEX);
v2d->setSource(src);
if (!capture.isOpened()) {
cerr << "ERROR! Unable to open video input" << endl;
exit(-1);
}
float fps = capture.get(cv::CAP_PROP_FPS);
float width = capture.get(cv::CAP_PROP_FRAME_WIDTH);
float height = capture.get(cv::CAP_PROP_FRAME_HEIGHT);
v2d->makeVAWriter(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, cv::Size(width, height), VA_HW_DEVICE_INDEX);
Sink sink = make_va_sink(v2d, OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), src.fps(), cv::Size(WIDTH, HEIGHT), VA_HW_DEVICE_INDEX);
v2d->setSink(sink);
v2d->gl(init_scene);
@ -114,7 +108,7 @@ int main(int argc, char **argv) {
v2d->clgl([&](cv::UMat& frameBuffer){
//Glow effect (OpenCL)
glow_effect(frameBuffer, frameBuffer, kernel_size);
glow_effect(frameBuffer, frameBuffer, GLOW_KERNEL_SIZE);
});
update_fps(v2d, true);

Loading…
Cancel
Save