ported to the new api

pull/3471/head
kallaballa 2 years ago
parent e657896a7c
commit f4b8f9ec84
  1. 3
      Makefile
  2. 154
      src/beauty/beauty-demo.cpp
  3. 22
      src/common/subsystems.hpp
  4. 66
      src/font/font-demo.cpp
  5. 84
      src/nanovg/nanovg-demo.cpp
  6. 62
      src/optflow/optflow-demo.cpp
  7. 56
      src/pedestrian/pedestrian-demo.cpp
  8. 39
      src/tetra/tetra-demo.cpp
  9. 4
      src/video/Makefile
  10. 72
      src/video/video-demo.cpp
  11. 46
      src/video2/Makefile
  12. 159
      src/video2/video-demo2.cpp

@ -50,7 +50,6 @@ dirs:
${MAKE} -C src/beauty/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS}
${MAKE} -C src/font/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS}
${MAKE} -C src/pedestrian/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS}
${MAKE} -C src/video2/ ${MAKEFLAGS} CXX=${CXX} ${MAKECMDGOALS}
debian-release:
${MAKE} -C src/tetra/ ${MAKEFLAGS} CXX=${CXX} release
@ -60,7 +59,6 @@ debian-release:
${MAKE} -C src/beauty/ ${MAKEFLAGS} CXX=${CXX} release
${MAKE} -C src/font/ ${MAKEFLAGS} CXX=${CXX} release
${MAKE} -C src/pedestrian/ ${MAKEFLAGS} CXX=${CXX} release
${MAKE} -C src/video2/ ${MAKEFLAGS} CXX=${CXX} release
debian-clean:
${MAKE} -C src/tetra/ ${MAKEFLAGS} CXX=${CXX} clean
@ -70,7 +68,6 @@ debian-clean:
${MAKE} -C src/beauty/ ${MAKEFLAGS} CXX=${CXX} clean
${MAKE} -C src/font/ ${MAKEFLAGS} CXX=${CXX} clean
${MAKE} -C src/pedestrian/ ${MAKEFLAGS} CXX=${CXX} clean
${MAKE} -C src/video2/ ${MAKEFLAGS} CXX=${CXX} clean
install: ${TARGET}
true

@ -215,34 +215,37 @@ int main(int argc, char **argv) {
cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, 1
});
//BGRA
cv::UMat frameBuffer;
//BGR
cv::UMat videoFrameIn, resized, down, faceBgMask, diff, blurred, reduced, sharpened, masked;
cv::UMat videoFrameOut(HEIGHT, WIDTH, CV_8UC3);
cv::UMat rgb, resized, down, faceBgMask, diff, blurred, reduced, sharpened, masked;
cv::UMat frameOut(HEIGHT, WIDTH, CV_8UC3);
cv::UMat lhalf(HEIGHT * SCALE, WIDTH * SCALE, CV_8UC3);
cv::UMat rhalf(lhalf.size(), lhalf.type());
//GREY
cv::UMat downGrey, faceBgMaskGrey, faceBgMaskInvGrey, faceFgMaskGrey, resMaskGrey;
//BGR-Float
cv::UMat videoFrameOutFloat;
cv::UMat frameOutFloat;
cv::Mat faces;
vector<cv::Rect> faceRects;
vector<vector<cv::Point2f>> shapes;
vector<FaceFeatures> featuresList;
va::bind();
while (true) {
capture >> videoFrameIn;
if (videoFrameIn.empty())
break;
bool success = va::read([&capture](cv::UMat& videoFrame){
//videoFrame will be converted to BGRA and stored in the frameBuffer.
capture >> videoFrame;
});
cv::resize(videoFrameIn, resized, cv::Size(WIDTH, HEIGHT));
cv::resize(videoFrameIn, down, cv::Size(0, 0), SCALE, SCALE);
cvtColor(down, downGrey, cv::COLOR_BGRA2GRAY);
if(!success)
break;
detector->detect(down, faces);
cl::compute([&](cv::UMat& frameBuffer){
cvtColor(frameBuffer,rgb,cv::COLOR_BGRA2RGB);
cv::resize(rgb, resized, cv::Size(WIDTH, HEIGHT));
cv::resize(rgb, down, cv::Size(0, 0), SCALE, SCALE);
cvtColor(down, downGrey, cv::COLOR_BGRA2GRAY);
detector->detect(down, faces);
});
faceRects.clear();
for (int i = 0; i < faces.rows; i++) {
@ -251,79 +254,80 @@ int main(int argc, char **argv) {
shapes.clear();
gl::bind();
if (!faceRects.empty() && facemark->fit(downGrey, faceRects, shapes)) {
featuresList.clear();
for (size_t i = 0; i < faceRects.size(); ++i) {
featuresList.push_back(FaceFeatures(faceRects[i], shapes[i], float(down.size().width) / frameBuffer.size().width));
featuresList.push_back(FaceFeatures(faceRects[i], shapes[i], float(down.size().width) / WIDTH));
}
nvg::begin();
nvg::clear();
//Draw the face background mask (= face oval)
draw_face_bg_mask(featuresList);
nvg::end();
cl::acquire_from_gl(frameBuffer);
//Convert/Copy the mask
cvtColor(frameBuffer, faceBgMask, cv::COLOR_BGRA2BGR);
cvtColor(frameBuffer, faceBgMaskGrey, cv::COLOR_BGRA2GRAY);
cl::release_to_gl(frameBuffer);
nvg::begin();
nvg::clear();
//Draw the face forground mask (= eyes and outer lips)
draw_face_fg_mask(featuresList);
nvg::end();
cl::acquire_from_gl(frameBuffer);
//Convert/Copy the mask
cvtColor(frameBuffer, faceFgMaskGrey, cv::COLOR_BGRA2GRAY);
//Dilate the face forground mask to make eyes and mouth areas wider
int morph_size = 1;
cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(2 * morph_size + 1, 2 * morph_size + 1), cv::Point(morph_size, morph_size));
cv::morphologyEx(faceFgMaskGrey, faceFgMaskGrey, cv::MORPH_DILATE, element, cv::Point(element.cols >> 1, element.rows >> 1), DILATE_ITERATIONS, cv::BORDER_CONSTANT, cv::morphologyDefaultBorderValue());
cv::subtract(faceBgMaskGrey, faceFgMaskGrey, faceBgMaskGrey);
cv::bitwise_not(faceBgMaskGrey, faceBgMaskInvGrey);
unsharp_mask(resized, sharpened, UNSHARP_STRENGTH);
reduce_shadows(resized, reduced, REDUCE_SHADOW);
blender.prepare(cv::Rect(0,0, WIDTH,HEIGHT));
blender.feed(reduced, faceBgMaskGrey, cv::Point(0,0));
blender.feed(sharpened, faceBgMaskInvGrey, cv::Point(0,0));
blender.blend(videoFrameOutFloat, resMaskGrey);
videoFrameOutFloat.convertTo(videoFrameOut, CV_8U, 1.0);
cv::boxFilter(videoFrameOut, blurred, -1, cv::Size(BLUR_KERNEL_SIZE, BLUR_KERNEL_SIZE), cv::Point(-1, -1), true, cv::BORDER_REPLICATE);
cv::subtract(blurred, resized, diff);
bitwise_and(diff, faceBgMask, masked);
cv::add(videoFrameOut, masked, reduced);
cv::resize(resized, lhalf, cv::Size(0, 0), 0.5, 0.5);
cv::resize(reduced, rhalf, cv::Size(0, 0), 0.5, 0.5);
videoFrameOut = cv::Scalar::all(0);
lhalf.copyTo(videoFrameOut(cv::Rect(0, 0, lhalf.size().width, lhalf.size().height)));
rhalf.copyTo(videoFrameOut(cv::Rect(rhalf.size().width, 0, rhalf.size().width, rhalf.size().height)));
cvtColor(videoFrameOut, frameBuffer, cv::COLOR_BGR2RGBA);
cl::release_to_gl(frameBuffer);
nvg::render([&](int w, int h) {
nvg::clear();
//Draw the face background mask (= face oval)
draw_face_bg_mask(featuresList);
});
cl::compute([&](cv::UMat &frameBuffer) {
//Convert/Copy the mask
cvtColor(frameBuffer, faceBgMask, cv::COLOR_BGRA2BGR);
cvtColor(frameBuffer, faceBgMaskGrey, cv::COLOR_BGRA2GRAY);
});
nvg::render([&](int w, int h) {
nvg::clear();
//Draw the face forground mask (= eyes and outer lips)
draw_face_fg_mask(featuresList);
});
cl::compute([&](cv::UMat &frameBuffer) {
//Convert/Copy the mask
cvtColor(frameBuffer, faceFgMaskGrey, cv::COLOR_BGRA2GRAY);
//Dilate the face forground mask to make eyes and mouth areas wider
int morph_size = 1;
cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(2 * morph_size + 1, 2 * morph_size + 1), cv::Point(morph_size, morph_size));
cv::morphologyEx(faceFgMaskGrey, faceFgMaskGrey, cv::MORPH_DILATE, element, cv::Point(element.cols >> 1, element.rows >> 1), DILATE_ITERATIONS, cv::BORDER_CONSTANT, cv::morphologyDefaultBorderValue());
cv::subtract(faceBgMaskGrey, faceFgMaskGrey, faceBgMaskGrey);
cv::bitwise_not(faceBgMaskGrey, faceBgMaskInvGrey);
unsharp_mask(resized, sharpened, UNSHARP_STRENGTH);
reduce_shadows(resized, reduced, REDUCE_SHADOW);
blender.prepare(cv::Rect(0, 0, WIDTH, HEIGHT));
blender.feed(reduced, faceBgMaskGrey, cv::Point(0, 0));
blender.feed(sharpened, faceBgMaskInvGrey, cv::Point(0, 0));
blender.blend(frameOutFloat, resMaskGrey);
frameOutFloat.convertTo(frameOut, CV_8U, 1.0);
cv::boxFilter(frameOut, blurred, -1, cv::Size(BLUR_KERNEL_SIZE, BLUR_KERNEL_SIZE), cv::Point(-1, -1), true, cv::BORDER_REPLICATE);
cv::subtract(blurred, resized, diff);
bitwise_and(diff, faceBgMask, masked);
cv::add(frameOut, masked, reduced);
cv::resize(resized, lhalf, cv::Size(0, 0), 0.5, 0.5);
cv::resize(reduced, rhalf, cv::Size(0, 0), 0.5, 0.5);
frameOut = cv::Scalar::all(0);
lhalf.copyTo(frameOut(cv::Rect(0, 0, lhalf.size().width, lhalf.size().height)));
rhalf.copyTo(frameOut(cv::Rect(rhalf.size().width, 0, rhalf.size().width, rhalf.size().height)));
cvtColor(frameOut, frameBuffer, cv::COLOR_BGR2RGBA);
});
} else {
cl::acquire_from_gl(frameBuffer);
videoFrameOut = cv::Scalar::all(0);
cv::resize(resized, lhalf, cv::Size(0, 0), 0.5, 0.5);
lhalf.copyTo(videoFrameOut(cv::Rect(0, 0, lhalf.size().width, lhalf.size().height)));
lhalf.copyTo(videoFrameOut(cv::Rect(lhalf.size().width, 0, lhalf.size().width, lhalf.size().height)));
cvtColor(videoFrameOut, frameBuffer, cv::COLOR_BGR2RGBA);
cl::release_to_gl(frameBuffer);
cl::compute([&](cv::UMat &frameBuffer) {
frameOut = cv::Scalar::all(0);
cv::resize(resized, lhalf, cv::Size(0, 0), 0.5, 0.5);
lhalf.copyTo(frameOut(cv::Rect(0, 0, lhalf.size().width, lhalf.size().height)));
lhalf.copyTo(frameOut(cv::Rect(lhalf.size().width, 0, lhalf.size().width, lhalf.size().height)));
cvtColor(frameOut, frameBuffer, cv::COLOR_BGR2RGBA);
});
}
if (!app::display())
break;
va::bind();
writer << videoFrameOut;
va::write([&writer](const cv::UMat& videoFrame){
//videoFrame is the frameBuffer converted to BGR. Ready to be written.
writer << videoFrame;
});
app::print_fps();
}

@ -458,14 +458,15 @@ void end() {
GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, 0));
GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, 0));
//glFlush seems enough
GL_CHECK(glFlush());
GL_CHECK(glFinish());
// GL_CHECK(glFinish());
}
void render(std::function<void()> fn) {
void render(std::function<void(int,int)> fn) {
gl::bind();
gl::begin();
fn();
fn(app::window_width, app::window_height);
gl::end();
}
@ -523,7 +524,7 @@ void release_to_gl(cv::UMat& m) {
gl::end();
}
void work(std::function<void(cv::UMat& m)> fn) {
void compute(std::function<void(cv::UMat& m)> fn) {
gl::bind();
acquire_from_gl(frameBuffer);
fn(frameBuffer);
@ -573,8 +574,8 @@ void begin() {
w = ws.first;
h = ws.second;
#else
w = glfw::framebuffer_width;
h = glfw::framebuffer_height;
w = app::window_width;
h = app::window_height;
#endif
GL_CHECK(glBindFramebuffer(GL_DRAW_FRAMEBUFFER, kb::gl::frame_buf));
nvgSave(vg);
@ -588,10 +589,10 @@ void end() {
gl::end();
}
void render(std::function<void()> fn) {
void render(std::function<void(int,int)> fn) {
gl::bind();
nvg::begin();
fn();
fn(app::window_width, app::window_height);
nvg::end();
}
@ -629,14 +630,17 @@ void bind() {
va::context.bind();
}
void read(std::function<void(cv::UMat&)> fn) {
bool read(std::function<void(cv::UMat&)> fn) {
va::bind();
fn(va::videoFrame);
gl::bind();
cl::acquire_from_gl(cl::frameBuffer);
if(va::videoFrame.empty())
return false;
//Color-conversion from RGB to BGRA (OpenCL)
cv::cvtColor(va::videoFrame, cl::frameBuffer, cv::COLOR_RGB2BGRA);
cl::release_to_gl(cl::frameBuffer);
return true;
}
void write(std::function<void(const cv::UMat&)> fn) {

@ -53,9 +53,7 @@ int main(int argc, char **argv) {
va::copy();
//BGRA
cv::UMat frameBuffer, stars, warped;
//BGR
cv::UMat videoFrame;
cv::UMat stars, warped;
//The text to display
string text = cv::getBuildInformation();
@ -72,12 +70,8 @@ int main(int argc, char **argv) {
cv::Mat tm = cv::getPerspectiveTransform(quad1, quad2);
cv::RNG rng(cv::getTickCount());
//Activate the OpenCL context for OpenGL.
gl::bind();
//Begin a nanovg frame.
nvg::begin();
nvg::clear(0,0,0,1);
{
nvg::render([&](int w, int h) {
nvg::clear();
//draw stars
using kb::nvg::vg;
int numStars = rng.uniform(MIN_STAR_COUNT, MAX_STAR_COUNT);
@ -88,31 +82,21 @@ int main(int argc, char **argv) {
nvgCircle(vg, rng.uniform(0, WIDTH) , rng.uniform(0, HEIGHT), MAX_STAR_SIZE);
nvgStroke(vg);
}
}
//End a nanovg frame
nvg::end();
});
//Aquire frame buffer from OpenGL.
cl::acquire_from_gl(frameBuffer);
//Copy the star rendering.
frameBuffer.copyTo(stars);
//Release frame buffer to OpenGL.
cl::release_to_gl(frameBuffer);
cl::compute([&](cv::UMat& frameBuffer){
frameBuffer.copyTo(stars);
});
//Frame count.
size_t cnt = 0;
//Y-position of the current line in pixels.
float y;
while (true) {
y = 0;
gl::bind();
//Begin a nanovg frame.
nvg::begin();
//Clear the screen with black.
nvg::clear();
{
nvg::render([&](int w, int h) {
nvg::clear();
using kb::nvg::vg;
nvgBeginPath(vg);
nvgFontSize(vg, FONT_SIZE);
@ -139,38 +123,32 @@ int main(int argc, char **argv) {
break;
}
}
}
//End a nanovg frame
nvg::end();
});
if(y == 0) {
//Nothing drawn, exit.
break;
}
//Aquire frame buffer from OpenGL.
cl::acquire_from_gl(frameBuffer);
//Pseudo 3D text effect.
cv::warpPerspective(frameBuffer, warped, tm, videoFrame.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar());
//Combine layers
cv::add(stars, warped, frameBuffer);
//Color-conversion from BGRA to RGB. OpenCV/OpenCL.
cv::cvtColor(frameBuffer, videoFrame, cv::COLOR_BGRA2RGB);
//Transfer buffer ownership back to OpenGL.
cl::release_to_gl(frameBuffer);
cl::compute([&](cv::UMat& frameBuffer){
//Pseudo 3D text effect.
cv::warpPerspective(frameBuffer, warped, tm, frameBuffer.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar());
//Combine layers
cv::add(stars, warped, frameBuffer);
});
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if(!app::display())
break;
//Activate the OpenCL context for VAAPI.
va::bind();
//Encode the frame using VAAPI on the GPU.
writer << videoFrame;
va::write([&writer](const cv::UMat& videoFrame){
//videoFrame is the frameBuffer converted to BGR. Ready to be written.
writer << videoFrame;
});
++cnt;
//Wrap the cnt around if it becomes to big.
if(cnt == std::numeric_limits<size_t>().max())
if(cnt > std::numeric_limits<size_t>().max() / 2.0)
cnt = 0;
app::print_fps();

@ -151,23 +151,12 @@ int main(int argc, char **argv) {
cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, 1
});
cv::UMat frameBuffer;
cv::UMat videoFrame;
cv::UMat videoFrameBGRA;
cv::UMat videoFrameHSV;
cv::UMat rgb;
cv::UMat bgra;
cv::UMat hsv;
cv::UMat hueChannel;
//Bind the OpenCL context for VAAPI
va::bind();
while (true) {
//Decode a frame on the GPU using VAAPI
capture >> videoFrame;
if (videoFrame.empty()) {
cerr << "End of stream. Exiting" << endl;
break;
}
//we use time to calculated the current hue
float time = cv::getTickCount() / cv::getTickFrequency();
//nanovg hue fading between 0.0f and 1.0f
@ -175,48 +164,45 @@ int main(int argc, char **argv) {
//opencv hue fading between 0 and 255
int cvHue = (42 + uint8_t(std::round(((1.0 - sinf(time*0.12f))+1.0f) * 128.0))) % 255;
//Color-conversion from RGB to HSV. (OpenCL)
cv::cvtColor(videoFrame, videoFrameHSV, cv::COLOR_RGB2HSV_FULL);
//Extract the hue channel
cv::extractChannel(videoFrameHSV, hueChannel, 0);
//Set the current hue
hueChannel.setTo(cvHue);
//Insert the hue channel
cv::insertChannel(hueChannel, videoFrameHSV, 0);
//Color-conversion from HSV to RGB. (OpenCL)
cv::cvtColor(videoFrameHSV, videoFrame, cv::COLOR_HSV2RGB_FULL);
//Color-conversion from RGB to BGRA. (OpenCL)
cv::cvtColor(videoFrame, videoFrameBGRA, cv::COLOR_RGB2BGRA);
//Bind the OpenCL context for OpenGL
gl::bind();
//Aquire the framebuffer so we can write the video frame to it
cl::acquire_from_gl(frameBuffer);
//Resize the frame if necessary. (OpenCL)
cv::resize(videoFrameBGRA, frameBuffer, cv::Size(WIDTH, HEIGHT));
//Release the frame buffer for use by OpenGL
cl::release_to_gl(frameBuffer);
bool success = va::read([&capture](cv::UMat& videoFrame){
//videoFrame will be converted to BGRA and stored in the frameBuffer.
capture >> videoFrame;
});
//Render using nanovg
nvg::begin();
drawColorwheel(nvg::vg, WIDTH - 300, HEIGHT - 300, 250.0f, 250.0f, nvgHue);
nvg::end();
if(!success)
break;
cl::compute([&](cv::UMat& frameBuffer){
cvtColor(frameBuffer,rgb,cv::COLOR_BGRA2RGB);
//Color-conversion from RGB to HSV. (OpenCL)
cv::cvtColor(rgb, hsv, cv::COLOR_RGB2HSV_FULL);
//Extract the hue channel
cv::extractChannel(hsv, hueChannel, 0);
//Set the current hue
hueChannel.setTo(cvHue);
//Insert the hue channel
cv::insertChannel(hueChannel, hsv, 0);
//Color-conversion from HSV to RGB. (OpenCL)
cv::cvtColor(hsv, rgb, cv::COLOR_HSV2RGB_FULL);
//Color-conversion from RGB to BGRA. (OpenCL)
cv::cvtColor(rgb, bgra, cv::COLOR_RGB2BGRA);
//Resize the frame if necessary. (OpenCL)
cv::resize(bgra, frameBuffer, cv::Size(WIDTH, HEIGHT));
});
//Aquire frame buffer from OpenGL
cl::acquire_from_gl(frameBuffer);
//Color-conversion from BGRA to RGB. OpenCV/OpenCL.
cv::cvtColor(frameBuffer, videoFrame, cv::COLOR_BGRA2RGB);
//Transfer buffer ownership back to OpenGL
cl::release_to_gl(frameBuffer);
//Render using nanovg
nvg::render([&](int w, int h) {
drawColorwheel(nvg::vg, w - 300, h - 300, 250.0f, 250.0f, nvgHue);
});
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if(!app::display())
break;
//Activate the OpenCL context for VAAPI
va::bind();
//Encode the frame using VAAPI on the GPU.
writer << videoFrame;
va::write([&writer](const cv::UMat& videoFrame){
//videoFrame is the frameBuffer converted to BGR. Ready to be written.
writer << videoFrame;
});
app::print_fps();
}

@ -212,53 +212,61 @@ int main(int argc, char **argv) {
cv::Size frameBufferSize(WIDTH, HEIGHT);
cv::Size scaledSize(WIDTH * FG_SCALE, HEIGHT * FG_SCALE);
//BGRA
cv::UMat frameBuffer, background, foreground(frameBufferSize, CV_8UC4, cv::Scalar::all(0));
cv::UMat background, foreground(frameBufferSize, CV_8UC4, cv::Scalar::all(0));
//RGB
cv::UMat videoFrame, resized, down;
cv::UMat rgb, resized, down;
//GREY
cv::UMat backgroundGrey, downPrevGrey, downNextGrey, downMotionMaskGrey;
vector<cv::Point2f> detectedPoints;
va::bind();
while (true) {
capture >> videoFrame;
if (videoFrame.empty())
bool success = va::read([&capture](cv::UMat& videoFrame){
//videoFrame will be converted to BGRA and stored in the frameBuffer.
capture >> videoFrame;
});
if(!success)
break;
cv::resize(videoFrame, resized, frameBufferSize);
cv::resize(videoFrame, down, scaledSize);
cv::cvtColor(resized, background, cv::COLOR_RGB2BGRA);
cv::cvtColor(down, downNextGrey, cv::COLOR_RGB2GRAY);
//Subtract the background to create a motion mask
prepare_motion_mask(downNextGrey, downMotionMaskGrey);
cl::compute([&](cv::UMat& frameBuffer){
cvtColor(frameBuffer,rgb,cv::COLOR_BGRA2RGB);
cv::resize(rgb, resized, frameBufferSize);
cv::resize(rgb, down, scaledSize);
cv::cvtColor(resized, background, cv::COLOR_RGB2BGRA);
cv::cvtColor(down, downNextGrey, cv::COLOR_RGB2GRAY);
//Subtract the background to create a motion mask
prepare_motion_mask(downNextGrey, downMotionMaskGrey);
});
//Detect trackable points in the motion mask
detect_points(downMotionMaskGrey, detectedPoints);
gl::bind();
nvg::begin();
nvg::clear();
if (!downPrevGrey.empty()) {
//We don't want the algorithm to get out of hand when there is a scene change, so we suppress it when we detect one.
if (!detect_scene_change(downMotionMaskGrey, SCENE_CHANGE_THRESH, SCENE_CHANGE_THRESH_DIFF)) {
//Visualize the sparse optical flow using nanovg
visualize_sparse_optical_flow(downPrevGrey, downNextGrey, detectedPoints, FG_SCALE, MAX_STROKE, EFFECT_COLOR, MAX_POINTS, POINT_LOSS);
nvg::render([&](int w, int h) {
nvg::clear();
if (!downPrevGrey.empty()) {
//We don't want the algorithm to get out of hand when there is a scene change, so we suppress it when we detect one.
if (!detect_scene_change(downMotionMaskGrey, SCENE_CHANGE_THRESH, SCENE_CHANGE_THRESH_DIFF)) {
//Visualize the sparse optical flow using nanovg
visualize_sparse_optical_flow(downPrevGrey, downNextGrey, detectedPoints, FG_SCALE, MAX_STROKE, EFFECT_COLOR, MAX_POINTS, POINT_LOSS);
}
}
}
nvg::end();
});
downPrevGrey = downNextGrey.clone();
cl::acquire_from_gl(frameBuffer);
composite_layers(background, foreground, frameBuffer, frameBuffer, GLOW_KERNEL_SIZE, FG_LOSS);
cv::cvtColor(frameBuffer, videoFrame, cv::COLOR_BGRA2RGB);
cl::release_to_gl(frameBuffer);
cl::compute([&](cv::UMat& frameBuffer){
//Put it all together (OpenCL)
composite_layers(background, foreground, frameBuffer, frameBuffer, GLOW_KERNEL_SIZE, FG_LOSS);
});
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if(!app::display())
break;
va::bind();
writer << videoFrame;
va::write([&writer](const cv::UMat& videoFrame){
//videoFrame is the frameBuffer converted to BGR. Ready to be written.
writer << videoFrame;
});
app::print_fps();
}

@ -119,7 +119,7 @@ int main(int argc, char **argv) {
//Print system information
app::print_system_info();
cv::VideoCapture cap(argv[1], cv::CAP_FFMPEG, {
cv::VideoCapture capture(argv[1], cv::CAP_FFMPEG, {
cv::CAP_PROP_HW_DEVICE, VA_HW_DEVICE_INDEX,
cv::CAP_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI,
cv::CAP_PROP_HW_ACCELERATION_USE_OPENCL, 1
@ -127,12 +127,12 @@ int main(int argc, char **argv) {
va::copy();
if (!cap.isOpened()) {
if (!capture.isOpened()) {
cerr << "ERROR! Unable to open video-input" << endl;
return -1;
}
double fps = cap.get(cv::CAP_PROP_FPS);
double fps = capture.get(cv::CAP_PROP_FPS);
cerr << "Detected FPS: " << fps << endl;
cv::VideoWriter writer(OUTPUT_FILENAME, cv::CAP_FFMPEG, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, cv::Size(WIDTH, HEIGHT), {
cv::VIDEOWRITER_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI,
@ -140,9 +140,9 @@ int main(int argc, char **argv) {
});
//BGRA
cv::UMat frameBuffer, background, foreground(HEIGHT, WIDTH, CV_8UC4, cv::Scalar::all(0));
cv::UMat background, foreground(HEIGHT, WIDTH, CV_8UC4, cv::Scalar::all(0));
//RGB
cv::UMat videoFrame, videoFrameUp, videoFrameDown;
cv::UMat rgb, videoFrameUp, videoFrameDown;
//GREY
cv::UMat videoFrameDownGrey;
@ -154,14 +154,23 @@ int main(int argc, char **argv) {
vector<double> probs;
va::bind();
while (true) {
cap >> videoFrame;
if (videoFrame.empty())
bool success = va::read([&capture](cv::UMat& videoFrame){
//videoFrame will be converted to BGRA and stored in the frameBuffer.
capture >> videoFrame;
});
if(!success)
break;
cv::resize(videoFrame, videoFrameUp, cv::Size(WIDTH, HEIGHT));
cv::resize(videoFrame, videoFrameDown, cv::Size(DOWNSIZE_WIDTH, DOWNSIZE_HEIGHT));
cv::cvtColor(videoFrameDown, videoFrameDownGrey, cv::COLOR_RGB2GRAY);
hog.detectMultiScale(videoFrameDownGrey, locations, 0, cv::Size(), cv::Size(), 1.025, 2.0, false);
cl::compute([&](cv::UMat& frameBuffer){
cvtColor(frameBuffer,rgb,cv::COLOR_BGRA2RGB);
cv::resize(rgb, videoFrameUp, cv::Size(WIDTH, HEIGHT));
cv::resize(rgb, videoFrameDown, cv::Size(DOWNSIZE_WIDTH, DOWNSIZE_HEIGHT));
cv::cvtColor(videoFrameDown, videoFrameDownGrey, cv::COLOR_RGB2GRAY);
cv::cvtColor(videoFrameUp, background, cv::COLOR_RGB2BGRA);
hog.detectMultiScale(videoFrameDownGrey, locations, 0, cv::Size(), cv::Size(), 1.025, 2.0, false);
});
maxLocations.clear();
if (!locations.empty()) {
boxes.clear();
@ -179,12 +188,8 @@ int main(int argc, char **argv) {
}
}
cv::cvtColor(videoFrameUp, background, cv::COLOR_RGB2BGRA);
gl::bind();
nvg::begin();
nvg::clear();
{
nvg::render([&](int w, int h) {
nvg::clear();
using kb::nvg::vg;
nvgBeginPath(vg);
nvgStrokeWidth(vg, std::fmax(2.0, WIDTH / 960.0));
@ -193,20 +198,21 @@ int main(int argc, char **argv) {
nvgRect(vg, maxLocations[i].x * WIDTH_FACTOR, maxLocations[i].y * HEIGHT_FACTOR, maxLocations[i].width * WIDTH_FACTOR, maxLocations[i].height * HEIGHT_FACTOR);
}
nvgStroke(vg);
}
nvg::end();
});
cl::acquire_from_gl(frameBuffer);
composite_layers(background, foreground, frameBuffer, frameBuffer, BLUR_KERNEL_SIZE, FG_LOSS);
cv::cvtColor(frameBuffer, videoFrame, cv::COLOR_BGRA2RGB);
cl::release_to_gl(frameBuffer);
cl::compute([&](cv::UMat& frameBuffer){
//Put it all together
composite_layers(background, foreground, frameBuffer, frameBuffer, BLUR_KERNEL_SIZE, FG_LOSS);
});
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if (!app::display())
break;
va::bind();
writer << videoFrame;
va::write([&writer](const cv::UMat& videoFrame){
//videoFrame is the frameBuffer converted to BGR. Ready to be written.
writer << videoFrame;
});
app::print_fps();
}

@ -5,7 +5,7 @@
constexpr long unsigned int WIDTH = 1920;
constexpr long unsigned int HEIGHT = 1080;
constexpr double FPS = 60;
constexpr bool OFFSCREEN = true;
constexpr bool OFFSCREEN = false;
constexpr const char* OUTPUT_FILENAME = "tetra-demo.mkv";
constexpr const int VA_HW_DEVICE_INDEX = 0;
constexpr unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT));
@ -96,39 +96,32 @@ int main(int argc, char **argv) {
//Copy OpenCL Context for VAAPI. Must be called right after first VideoWriter/VideoCapture initialization.
va::copy();
//Initialize the OpenGL scene
init_scene(WIDTH, HEIGHT);
//BGRA
cv::UMat frameBuffer;
//RGB
cv::UMat videoFrame;
gl::render([](int w, int h) {
//Initialize the OpenGL scene
init_scene(WIDTH, HEIGHT);
});
while (true) {
//Activate the OpenCL context for OpenGL
gl::bind();
//Render using OpenGL
gl::begin();
render_scene(WIDTH, HEIGHT);
gl::end();
gl::render([](int w, int h) {
render_scene(w, h);
});
//Aquire the frame buffer for use by OpenCL
cl::acquire_from_gl(frameBuffer);
//Glow effect (OpenCL)
glow_effect(frameBuffer, frameBuffer, GLOW_KERNEL_SIZE);
//Color-conversion from BGRA to RGB. OpenCV/OpenCL.
cv::cvtColor(frameBuffer, videoFrame, cv::COLOR_BGRA2RGB);
//Release the frame buffer for use by OpenGL
cl::release_to_gl(frameBuffer);
cl::compute([](cv::UMat &frameBuffer) {
//Glow effect (OpenCL)
glow_effect(frameBuffer, frameBuffer, GLOW_KERNEL_SIZE);
});
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if(!app::display())
break;
//Activate the OpenCL context for VAAPI
va::bind();
//Encode the frame using VAAPI on the GPU.
writer << videoFrame;
va::write([&writer](const cv::UMat& videoFrame){
//videoFrame is the frameBuffer converted to BGR. Ready to be written.
writer << videoFrame;
});
app::print_fps();
}

@ -1,6 +1,6 @@
TARGET := video-demo
TARGET := video-demo2
SRCS := video-demo.cpp
SRCS := video-demo2.cpp
#precompiled headers
HEADERS :=

@ -115,58 +115,42 @@ int main(int argc, char **argv) {
cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, 1
});
init_scene(WIDTH, HEIGHT);
//BGRA
cv::UMat frameBuffer, tmpVideoFrame;
//RGB
cv::UMat videoFrame;
//Activate the OpenCL context for VAAPI
va::bind();
gl::render([](int w, int h) {
init_scene(w, h);
});
while (true) {
//Decode a frame on the GPU using VAAPI
capture >> videoFrame;
if (videoFrame.empty()) {
cerr << "End of stream. Exiting" << endl;
bool success = va::read([&capture](cv::UMat& videoFrame){
//videoFrame will be converted to BGRA and stored in the frameBuffer.
capture >> videoFrame;
});
if(!success)
break;
}
//Color-conversion from RGB to BGRA. (OpenCL)
cv::cvtColor(videoFrame, tmpVideoFrame, cv::COLOR_RGB2BGRA);
//Activate the OpenCL context for OpenGL
gl::bind();
//Initially aquire the framebuffer so we can write the video frame to it
cl::acquire_from_gl(frameBuffer);
//Resize the frame if necessary. (OpenCL)
cv::resize(tmpVideoFrame, frameBuffer, cv::Size(WIDTH, HEIGHT));
//Release the frame buffer for use by OpenGL
cl::release_to_gl(frameBuffer);
//Render using OpenGL
gl::begin();
render_scene(WIDTH, HEIGHT);
gl::end();
//Aquire the frame buffer for use by OpenCL
cl::acquire_from_gl(frameBuffer);
//Glow effect (OpenCL)
glow_effect(frameBuffer, frameBuffer, GLOW_KERNEL_SIZE);
//Color-conversion from BGRA to RGB. (OpenCL)
cv::cvtColor(frameBuffer, videoFrame, cv::COLOR_BGRA2RGB);
//Release the frame buffer for use by OpenGL
cl::release_to_gl(frameBuffer);
cl::compute([](cv::UMat& frameBuffer){
//Resize the frame if necessary. (OpenCL)
cv::resize(frameBuffer, frameBuffer, cv::Size(WIDTH, HEIGHT));
});
gl::render([](int w, int h) {
//Render using OpenGL
render_scene(w, h);
});
cl::compute([&GLOW_KERNEL_SIZE](cv::UMat& frameBuffer){
//Glow effect (OpenCL)
glow_effect(frameBuffer, frameBuffer, GLOW_KERNEL_SIZE);
});
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if(!app::display())
break;
//Activate the OpenCL context for VAAPI
va::bind();
//Encode the frame using VAAPI on the GPU.
writer << videoFrame;
va::write([&writer](const cv::UMat& videoFrame){
//videoFrame is the frameBuffer converted to BGR. Ready to be written.
writer << videoFrame;
});
app::print_fps();
}

@ -1,46 +0,0 @@
TARGET := video-demo2
SRCS := video-demo2.cpp
#precompiled headers
HEADERS :=
OBJS := ${SRCS:.cpp=.o}
DEPS := ${SRCS:.cpp=.dep}
CXXFLAGS += -fpic
LDFLAGS +=
LIBS += -lm
.PHONY: all release debug clean distclean
all: release
release: ${TARGET}
debug: ${TARGET}
info: ${TARGET}
profile: ${TARGET}
unsafe: ${TARGET}
asan: ${TARGET}
${TARGET}: ${OBJS}
${CXX} ${LDFLAGS} -o $@ $^ ${LIBS}
${OBJS}: %.o: %.cpp %.dep ${GCH}
${CXX} ${CXXFLAGS} -o $@ -c $<
${DEPS}: %.dep: %.cpp Makefile
${CXX} ${CXXFLAGS} -MM $< > $@
${GCH}: %.gch: ${HEADERS}
${CXX} ${CXXFLAGS} -o $@ -c ${@:.gch=.hpp}
install:
mkdir -p ${DESTDIR}/${PREFIX}
cp ${TARGET} ${DESTDIR}/${PREFIX}
uninstall:
rm ${DESTDIR}/${PREFIX}/${TARGET}
clean:
rm -f *~ ${DEPS} ${OBJS} ${CUO} ${GCH} ${TARGET}
distclean: clean

@ -1,159 +0,0 @@
#define CL_TARGET_OPENCL_VERSION 120
#include "../common/subsystems.hpp"
#include <string>
constexpr long unsigned int WIDTH = 1920;
constexpr long unsigned int HEIGHT = 1080;
constexpr const int VA_HW_DEVICE_INDEX = 0;
constexpr bool OFFSCREEN = true;
constexpr const char* OUTPUT_FILENAME = "video-demo.mkv";
constexpr unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT));
constexpr int GLOW_KERNEL_SIZE = std::max(int(DIAG / 138 % 2 == 0 ? DIAG / 138 + 1 : DIAG / 138), 1);
using std::cerr;
using std::endl;
using std::string;
void init_scene(unsigned long w, unsigned long h) {
//Initialize the OpenGL scene
glViewport(0, 0, w, h);
glColor3f(1.0, 1.0, 1.0);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-2, 2, -1.5, 1.5, 1, 40);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0, 0, -3);
glRotatef(50, 1, 0, 0);
glRotatef(70, 0, 1, 0);
}
void render_scene(unsigned long w, unsigned long h) {
//Render a tetrahedron using immediate mode because the code is more concise for a demo
glViewport(0, 0, w, h);
glRotatef(1, 0, 1, 0);
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
glBegin(GL_TRIANGLE_STRIP);
glColor3f(1, 1, 1);
glVertex3f(0, 2, 0);
glColor3f(1, 0, 0);
glVertex3f(-1, 0, 1);
glColor3f(0, 1, 0);
glVertex3f(1, 0, 1);
glColor3f(0, 0, 1);
glVertex3f(0, 0, -1.4);
glColor3f(1, 1, 1);
glVertex3f(0, 2, 0);
glColor3f(1, 0, 0);
glVertex3f(-1, 0, 1);
glEnd();
}
void glow_effect(const cv::UMat &src, cv::UMat &dst, const int ksize) {
static cv::UMat resize;
static cv::UMat blur;
static cv::UMat dst16;
cv::bitwise_not(src, dst);
//Resize for some extra performance
cv::resize(dst, resize, cv::Size(), 0.5, 0.5);
//Cheap blur
cv::boxFilter(resize, resize, -1, cv::Size(ksize, ksize), cv::Point(-1,-1), true, cv::BORDER_REPLICATE);
//Back to original size
cv::resize(resize, blur, src.size());
//Multiply the src image with a blurred version of itself
cv::multiply(dst, blur, dst16, 1, CV_16U);
//Normalize and convert back to CV_8U
cv::divide(dst16, cv::Scalar::all(255.0), dst, 1, CV_8U);
cv::bitwise_not(dst, dst);
}
int main(int argc, char **argv) {
using namespace kb;
if(argc != 2) {
cerr << "Usage: video-demo <video-file>" << endl;
exit(1);
}
//Initialize the application
app::init("Video Demo", WIDTH, HEIGHT, OFFSCREEN);
//Print system information
app::print_system_info();
//Initialize MJPEG HW decoding using VAAPI
cv::VideoCapture capture(argv[1], cv::CAP_FFMPEG, {
cv::CAP_PROP_HW_DEVICE, VA_HW_DEVICE_INDEX,
cv::CAP_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI,
cv::CAP_PROP_HW_ACCELERATION_USE_OPENCL, 1
});
//Copy OpenCL Context for VAAPI. Must be called right after first VideoWriter/VideoCapture initialization.
va::copy();
if (!capture.isOpened()) {
cerr << "ERROR! Unable to open video input" << endl;
return -1;
}
double fps = capture.get(cv::CAP_PROP_FPS);
//Initialize VP9 HW encoding using VAAPI. We don't need to specify the hardware device twice. only generates a warning.
cv::VideoWriter writer(OUTPUT_FILENAME, cv::CAP_FFMPEG, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, cv::Size(WIDTH, HEIGHT), {
cv::VIDEOWRITER_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI,
cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, 1
});
gl::render([WIDTH, HEIGHT]() {
init_scene(WIDTH, HEIGHT);
});
while (true) {
va::read([&capture](cv::UMat& videoFrame){
//videoFrame will be converted to BGRA and stored in the frameBuffer.
capture >> videoFrame;
assert(!videoFrame.empty());
});
cl::work([](cv::UMat& frameBuffer){
//Resize the frame if necessary. (OpenCL)
cv::resize(frameBuffer, frameBuffer, cv::Size(WIDTH, HEIGHT));
});
gl::render([&WIDTH, &HEIGHT]() {
//Render using OpenGL
render_scene(WIDTH, HEIGHT);
});
cl::work([&GLOW_KERNEL_SIZE](cv::UMat& frameBuffer){
//Glow effect (OpenCL)
glow_effect(frameBuffer, frameBuffer, GLOW_KERNEL_SIZE);
});
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if(!app::display())
break;
va::write([&writer](const cv::UMat& videoFrame){
//videoFrame is the frameBuffer converted to BGR. Ready to be written.
writer << videoFrame;
});
app::print_fps();
}
app::terminate();
return 0;
}
Loading…
Cancel
Save