we don't need to capture thread_local variables

pull/3471/head
kallaballa 2 years ago
parent d2c47d0dd0
commit 2d3e74da84
  1. 12
      modules/v4d/samples/beauty-demo.cpp
  2. 2
      modules/v4d/samples/cube-demo.cpp
  3. 8
      modules/v4d/samples/font-demo.cpp
  4. 6
      modules/v4d/samples/many_cubes-demo.cpp
  5. 6
      modules/v4d/samples/nanovg-demo.cpp
  6. 12
      modules/v4d/samples/optflow-demo.cpp
  7. 6
      modules/v4d/samples/pedestrian-demo.cpp
  8. 2
      modules/v4d/samples/video-demo.cpp

@ -229,7 +229,7 @@ static bool iteration(cv::Ptr<V4D> window) {
return false; return false;
//Save the video frame as BGR //Save the video frame as BGR
window->fb([&](cv::UMat &frameBuffer) { window->fb([](cv::UMat &frameBuffer) {
cvtColor(frameBuffer, input, cv::COLOR_BGRA2BGR); cvtColor(frameBuffer, input, cv::COLOR_BGRA2BGR);
}); });
@ -249,22 +249,22 @@ static bool iteration(cv::Ptr<V4D> window) {
if (!faceRect.empty() && facemark->fit(down, faceRects, shapes)) { if (!faceRect.empty() && facemark->fit(down, faceRects, shapes)) {
FaceFeatures features(faceRect, shapes[0], float(down.size().width) / WIDTH); FaceFeatures features(faceRect, shapes[0], float(down.size().width) / WIDTH);
window->nvg([&]() { window->nvg([&features]() {
//Draw the face oval of the first face //Draw the face oval of the first face
draw_face_oval_mask(features); draw_face_oval_mask(features);
}); });
window->fb([&](cv::UMat &frameBuffer) { window->fb([](cv::UMat &frameBuffer) {
//Convert/Copy the mask //Convert/Copy the mask
cvtColor(frameBuffer, faceOval, cv::COLOR_BGRA2GRAY); cvtColor(frameBuffer, faceOval, cv::COLOR_BGRA2GRAY);
}); });
window->nvg([&]() { window->nvg([&features]() {
//Draw eyes eyes and lips areas of the first face //Draw eyes eyes and lips areas of the first face
draw_face_eyes_and_lips_mask(features); draw_face_eyes_and_lips_mask(features);
}); });
window->fb([&](cv::UMat &frameBuffer) { window->fb([](cv::UMat &frameBuffer) {
//Convert/Copy the mask //Convert/Copy the mask
cvtColor(frameBuffer, eyesAndLipsMaskGrey, cv::COLOR_BGRA2GRAY); cvtColor(frameBuffer, eyesAndLipsMaskGrey, cv::COLOR_BGRA2GRAY);
}); });
@ -316,7 +316,7 @@ static bool iteration(cv::Ptr<V4D> window) {
} }
//write the result to the framebuffer //write the result to the framebuffer
window->fb([&](cv::UMat &frameBuffer) { window->fb([](cv::UMat &frameBuffer) {
cvtColor(frameOut, frameBuffer, cv::COLOR_BGR2BGRA); cvtColor(frameOut, frameBuffer, cv::COLOR_BGR2BGRA);
}); });

@ -232,7 +232,7 @@ static bool iteration(cv::Ptr<V4D> window) {
//To slow for WASM //To slow for WASM
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
//Aquire the frame buffer for use by OpenCV //Aquire the frame buffer for use by OpenCV
window->fb([&](cv::UMat& framebuffer) { window->fb([](cv::UMat& framebuffer) {
glow_effect(framebuffer, framebuffer, glow_kernel_size); glow_effect(framebuffer, framebuffer, glow_kernel_size);
}); });
#endif #endif

@ -96,7 +96,7 @@ static bool iteration(cv::Ptr<V4D> window) {
int32_t translateY = HEIGHT - cnt; int32_t translateY = HEIGHT - cnt;
if(update_stars) { if(update_stars) {
window->nvg([&](const cv::Size& sz) { window->nvg([](const cv::Size& sz) {
using namespace cv::v4d::nvg; using namespace cv::v4d::nvg;
clear(); clear();
@ -112,7 +112,7 @@ static bool iteration(cv::Ptr<V4D> window) {
} }
}); });
window->fb([&](cv::UMat& frameBuffer){ window->fb([](cv::UMat& frameBuffer){
frameBuffer.copyTo(stars); frameBuffer.copyTo(stars);
}); });
update_stars = false; update_stars = false;
@ -129,7 +129,7 @@ static bool iteration(cv::Ptr<V4D> window) {
update_perspective = false; update_perspective = false;
} }
window->nvg([&](const cv::Size& sz) { window->nvg([translateY](const cv::Size& sz) {
using namespace cv::v4d::nvg; using namespace cv::v4d::nvg;
clear(); clear();
fontSize(font_size); fontSize(font_size);
@ -148,7 +148,7 @@ static bool iteration(cv::Ptr<V4D> window) {
} }
}); });
window->fb([&](cv::UMat& framebuffer) { window->fb([](cv::UMat& framebuffer) {
//Pseudo 3D text effect. //Pseudo 3D text effect.
cv::warpPerspective(framebuffer, warped, tm, framebuffer.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar()); cv::warpPerspective(framebuffer, warped, tm, framebuffer.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar());
//Combine layers //Combine layers

@ -233,7 +233,7 @@ static bool iteration(cv::Ptr<V4D> window) {
window->gl([=](const cv::Size sz){ init_scene(sz, i); }, i); window->gl([=](const cv::Size sz){ init_scene(sz, i); }, i);
}); });
window->gl([=](){ window->gl([](){
//Clear the background //Clear the background
glClearColor(0.2, 0.24, 0.4, 1); glClearColor(0.2, 0.24, 0.4, 1);
glClear(GL_COLOR_BUFFER_BIT); glClear(GL_COLOR_BUFFER_BIT);
@ -241,7 +241,7 @@ static bool iteration(cv::Ptr<V4D> window) {
//Render using multiple OpenGL contexts //Render using multiple OpenGL contexts
for(size_t i = 0; i < NUMBER_OF_CUBES; ++i) { for(size_t i = 0; i < NUMBER_OF_CUBES; ++i) {
window->gl([=](){ window->gl([i](){
double pos = (((double(i) / NUMBER_OF_CUBES) * 2.0) - 1) + (1.0 / NUMBER_OF_CUBES); double pos = (((double(i) / NUMBER_OF_CUBES) * 2.0) - 1) + (1.0 / NUMBER_OF_CUBES);
double angle = sin((double(i) / NUMBER_OF_CUBES) * 2 * M_PI); double angle = sin((double(i) / NUMBER_OF_CUBES) * 2 * M_PI);
render_scene(pos, pos, angle, i); render_scene(pos, pos, angle, i);
@ -250,7 +250,7 @@ static bool iteration(cv::Ptr<V4D> window) {
//To slow for WASM //To slow for WASM
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
//Aquire the frame buffer for use by OpenCV //Aquire the frame buffer for use by OpenCV
window->fb([&](cv::UMat& framebuffer) { window->fb([](cv::UMat& framebuffer) {
glow_effect(framebuffer, framebuffer, glow_kernel_size); glow_effect(framebuffer, framebuffer, glow_kernel_size);
}); });
#endif #endif

@ -141,7 +141,7 @@ static bool iteration(cv::Ptr<V4D> window) {
return false; return false;
//Acquire the framebuffer and convert it to RGB //Acquire the framebuffer and convert it to RGB
window->fb([&](cv::UMat &framebuffer) { window->fb([](cv::UMat &framebuffer) {
cvtColor(framebuffer, rgb, cv::COLOR_BGRA2RGB); cvtColor(framebuffer, rgb, cv::COLOR_BGRA2RGB);
}); });
@ -159,12 +159,12 @@ static bool iteration(cv::Ptr<V4D> window) {
cv::cvtColor(hsv, rgb, cv::COLOR_HSV2RGB_FULL); cv::cvtColor(hsv, rgb, cv::COLOR_HSV2RGB_FULL);
//Acquire the framebuffer and convert the rgb into it //Acquire the framebuffer and convert the rgb into it
window->fb([&](cv::UMat &framebuffer) { window->fb([](cv::UMat &framebuffer) {
cv::cvtColor(rgb, framebuffer, cv::COLOR_BGR2BGRA); cv::cvtColor(rgb, framebuffer, cv::COLOR_BGR2BGRA);
}); });
//Render using nanovg //Render using nanovg
window->nvg([&](const cv::Size &sz) { window->nvg([hue](const cv::Size &sz) {
draw_color_wheel(sz.width - 300, sz.height - 300, 250.0f, 250.0f, hue); draw_color_wheel(sz.width - 300, sz.height - 300, 250.0f, 250.0f, hue);
}); });

@ -406,18 +406,20 @@ static bool iteration(cv::Ptr<V4D> window) {
if(!window->capture()) if(!window->capture())
return false; return false;
static thread_local cv::Size fbSz = window->framebufferSize();
//BGRA //BGRA
static thread_local cv::UMat background, down; static thread_local cv::UMat background, down;
static thread_local cv::UMat foreground(window->framebufferSize(), CV_8UC4, cv::Scalar::all(0)); static thread_local cv::UMat foreground(fbSz, CV_8UC4, cv::Scalar::all(0));
//BGR //BGR
static thread_local cv::UMat miniFrame; static thread_local cv::UMat miniFrame;
//GREY //GREY
static thread_local cv::UMat downPrevGrey, downNextGrey, downMotionMaskGrey; static thread_local cv::UMat downPrevGrey, downNextGrey, downMotionMaskGrey;
static thread_local vector<cv::Point2f> detectedPoints; static thread_local vector<cv::Point2f> detectedPoints;
window->fb([&](cv::UMat& frameBuffer) {
window->fb([](cv::UMat& frameBuffer) {
//resize to foreground scale //resize to foreground scale
cv::resize(frameBuffer, down, cv::Size(window->framebufferSize().width * fg_scale, window->framebufferSize().height * fg_scale)); cv::resize(frameBuffer, down, cv::Size(fbSz.width * fg_scale, fbSz.height * fg_scale));
//save video background //save video background
frameBuffer.copyTo(background); frameBuffer.copyTo(background);
}); });
@ -428,7 +430,7 @@ static bool iteration(cv::Ptr<V4D> window) {
//Detect trackable points in the motion mask //Detect trackable points in the motion mask
detect_points(downMotionMaskGrey, detectedPoints); detect_points(downMotionMaskGrey, detectedPoints);
window->nvg([&]() { window->nvg([]() {
cv::v4d::nvg::clear(); cv::v4d::nvg::clear();
if (!downPrevGrey.empty()) { if (!downPrevGrey.empty()) {
//We don't want the algorithm to get out of hand when there is a scene change, so we suppress it when we detect one. //We don't want the algorithm to get out of hand when there is a scene change, so we suppress it when we detect one.
@ -442,7 +444,7 @@ static bool iteration(cv::Ptr<V4D> window) {
downPrevGrey = downNextGrey.clone(); downPrevGrey = downNextGrey.clone();
window->fb([&](cv::UMat& framebuffer){ window->fb([](cv::UMat& framebuffer){
//Put it all together (OpenCL) //Put it all together (OpenCL)
composite_layers(background, foreground, framebuffer, framebuffer, glow_kernel_size, fg_loss, background_mode, post_proc_mode); composite_layers(background, foreground, framebuffer, framebuffer, glow_kernel_size, fg_loss, background_mode, post_proc_mode);
cvtColor(framebuffer, miniFrame, cv::COLOR_BGRA2RGB); cvtColor(framebuffer, miniFrame, cv::COLOR_BGRA2RGB);

@ -148,7 +148,7 @@ static bool iteration(cv::Ptr<V4D> window) {
if(!window->capture()) if(!window->capture())
return false; return false;
window->fb([&](cv::UMat& frameBuffer){ window->fb([](cv::UMat& frameBuffer){
//copy video frame //copy video frame
cvtColor(frameBuffer,videoFrame,cv::COLOR_BGRA2RGB); cvtColor(frameBuffer,videoFrame,cv::COLOR_BGRA2RGB);
//downsample video frame for hog detection //downsample video frame for hog detection
@ -200,7 +200,7 @@ static bool iteration(cv::Ptr<V4D> window) {
} }
//Draw an ellipse around the tracked pedestrian //Draw an ellipse around the tracked pedestrian
window->nvg([&](const cv::Size& sz) { window->nvg([](const cv::Size& sz) {
using namespace cv::v4d::nvg; using namespace cv::v4d::nvg;
clear(); clear();
beginPath(); beginPath();
@ -215,7 +215,7 @@ static bool iteration(cv::Ptr<V4D> window) {
}); });
//Put it all together //Put it all together
window->fb([&](cv::UMat& frameBuffer){ window->fb([](cv::UMat& frameBuffer){
composite_layers(background, frameBuffer, frameBuffer, BLUR_KERNEL_SIZE); composite_layers(background, frameBuffer, frameBuffer, BLUR_KERNEL_SIZE);
}); });

@ -200,7 +200,7 @@ static bool iteration(cv::Ptr<V4D> window) {
window->gl(render_scene); window->gl(render_scene);
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
window->fb([&](cv::UMat& frameBuffer) { window->fb([](cv::UMat& frameBuffer) {
glow_effect(frameBuffer, frameBuffer, glow_kernel_size); glow_effect(frameBuffer, frameBuffer, glow_kernel_size);
}); });
#endif #endif

Loading…
Cancel
Save