emscripten adaptions

pull/3471/head
kallaballa 2 years ago
parent cbc6082fcf
commit 15a792fdd1
  1. 2
      modules/v4d/include/opencv2/v4d/detail/framebuffercontext.hpp
  2. 7
      modules/v4d/include/opencv2/v4d/util.hpp
  3. 80
      modules/v4d/include/opencv2/v4d/v4d.hpp
  4. 357
      modules/v4d/samples/beauty-demo.cpp
  5. 19
      modules/v4d/samples/cube-demo.cpp
  6. 4
      modules/v4d/samples/custom_source_and_sink.cpp
  7. 12
      modules/v4d/samples/display_image.cpp
  8. 6
      modules/v4d/samples/display_image_fb.cpp
  9. 2
      modules/v4d/samples/display_image_nvg.cpp
  10. 2
      modules/v4d/samples/font-demo.cpp
  11. 7
      modules/v4d/samples/many_cubes-demo.cpp
  12. 12
      modules/v4d/samples/nanovg-demo.cpp
  13. 20
      modules/v4d/samples/optflow-demo.cpp
  14. 3
      modules/v4d/samples/render_opengl.cpp
  15. 15
      modules/v4d/samples/shader-demo.cpp
  16. 4
      modules/v4d/samples/vector_graphics_and_fb.cpp
  17. 16
      modules/v4d/samples/video-demo.cpp
  18. 9
      modules/v4d/samples/video_editing.cpp
  19. 17
      modules/v4d/src/detail/framebuffercontext.cpp
  20. 4
      modules/v4d/src/detail/glcontext.cpp
  21. 4
      modules/v4d/src/detail/nanovgcontext.cpp
  22. 32
      modules/v4d/src/detail/sourcecontext.cpp
  23. 9
      modules/v4d/src/source.cpp
  24. 10
      modules/v4d/src/util.cpp
  25. 71
      modules/v4d/src/v4d.cpp

@ -286,7 +286,7 @@ protected:
void setup(const cv::Size& sz);
void teardown();
void initWebGLCopy(const size_t& index);
void doWebGLCopy(FrameBufferContext& other);
void doWebGLCopy(cv::Ptr<FrameBufferContext> other);
/*!
* The UMat used to copy or bind (depending on cl-gl interop capability) the OpenGL framebuffer.
*/

@ -38,9 +38,10 @@ namespace detail {
using std::cout;
using std::endl;
static thread_local std::mutex mtx_;
static thread_local bool sync_run_ = false;
class ThreadLocal {
static thread_local std::mutex mtx_;
static thread_local bool sync_run_;
public:
static bool& sync_run() {
return sync_run_;
@ -342,7 +343,7 @@ CV_EXPORTS cv::Ptr<Source> makeCaptureSource(cv::Ptr<V4D> window, const string&
* @param height The frame height to capture (usually the initial height of the V4D object)
* @return A WebCam source object.
*/
CV_EXPORTS cv::Ptr<Source> makeCaptureSource(int width, int height, cv::Ptr<V4D> window);
CV_EXPORTS cv::Ptr<Source> makeCaptureSource(cv::Ptr<V4D> window);
#endif
void resizePreserveAspectRatio(const cv::UMat& src, cv::UMat& output, const cv::Size& dstSize, const cv::Scalar& bgcolor = {0,0,0,255});

@ -477,17 +477,22 @@ public:
}
void capture(cv::UMat& frame) {
auto isTrue = [](const bool& b){ return b; };
capture([](const cv::UMat& inputFrame, cv::UMat& f){
inputFrame.copyTo(f);
if(!inputFrame.empty())
inputFrame.copyTo(f);
}, frame);
graph(isTrue, frame.empty());
fb([](cv::UMat& frameBuffer, const cv::UMat& f) {
f.copyTo(frameBuffer);
if(!f.empty())
f.copyTo(frameBuffer);
}, frame);
endgraph(isTrue, frame.empty());
}
void capture() {
static thread_local cv::UMat tmp(fbSize(), CV_8UC3);
static thread_local cv::UMat tmp;
capture(tmp);
}
@ -507,7 +512,7 @@ public:
}
void write() {
static thread_local cv::UMat frame(fbSize(), CV_8UC3);
static thread_local cv::UMat frame(fbSize(), CV_8UC4);
fb([](const cv::UMat& frameBuffer, cv::UMat& f) {
frameBuffer.copyTo(f);
@ -594,12 +599,9 @@ public:
#ifdef __EMSCRIPTEN__
bool first = true;
static void do_frame(void* void_fn_ptr) {
if(first) {
glfwSwapInterval(0);
first = false;
}
glfwSwapInterval(0);
auto* fn_ptr = reinterpret_cast<std::function<bool()>*>(void_fn_ptr);
if (fn_ptr) {
auto& fn = *fn_ptr;
@ -671,37 +673,41 @@ public:
// if(this->isMain())
// this->makeCurrent();
#ifndef __EMSCRIPTEN__
bool success = true;
CLExecScope_t scope(this->fbCtx()->getCLExecContext());
plan->setup(self());
this->makePlan();
this->runPlan();
this->display();
this->clearPlan();
plan->infer(self());
this->makePlan();
do {
this->runPlan();
} while(this->display());
plan->teardown(self());
this->makePlan();
#ifndef __EMSCRIPTEN__
CLExecScope_t scope(this->fbCtx()->getCLExecContext());
#endif
plan->setup(self());
this->makePlan();
this->runPlan();
this->clearPlan();
plan->infer(self());
this->makePlan();
#ifndef __EMSCRIPTEN__
if(this->isMain())
this->printSystemInfo();
do {
this->runPlan();
this->display();
this->clearPlan();
#else
} while(this->display());
#else
if(this->isMain()) {
std::function<bool()> fnFrame([=,this](){
return fn(self());
std::function<bool()> fnFrame([this](){
this->printSystemInfo();
do {
this->runPlan();
} while(this->display());
return false;
});
emscripten_set_main_loop_arg(do_frame, &fnFrame, -1, true);
} else {
while (true) {
fn(self());
}
do {
this->runPlan();
} while(this->display());
}
#endif
#endif
plan->teardown(self());
this->makePlan();
this->runPlan();
this->clearPlan();
if(this->isMain()) {
for(auto& t : threads)
@ -711,12 +717,12 @@ public:
/*!
* Called to feed an image directly to the framebuffer
*/
CV_EXPORTS void feed(cv::InputArray in);
void feed(cv::UMat& in);
/*!
* Fetches a copy of frambuffer
* @return a copy of the framebuffer
*/
CV_EXPORTS cv::_InputArray fetch();
CV_EXPORTS cv::UMat fetch();
/*!
* Set the current #cv::viz::Source object. Usually created using #makeCaptureSource().
@ -865,8 +871,6 @@ public:
*/
CV_EXPORTS void printSystemInfo();
CV_EXPORTS void makeCurrent();
CV_EXPORTS GLFWwindow* getGLFWWindow();
CV_EXPORTS cv::Ptr<FrameBufferContext> fbCtx();

@ -215,154 +215,159 @@ class BeautyDemoPlan : public Plan {
bool faceFound_ = false;
FaceFeatures features_;
public:
void setup(cv::Ptr<V4D> window) override {
window->parallel([](cv::Ptr<cv::face::Facemark>& facemark){
#ifndef __EMSCRIPTEN__
facemark->loadModel("modules/v4d/assets/models/lbfmodel.yaml");
#else
facemark->loadModel("assets/models/lbfmodel.yaml");
#endif
cerr << "Loading finished" << endl;
}, facemark_);
}
void infer(cv::Ptr<V4D> window) override {
auto always = [](){ return true; };
auto isTrue = [](bool& ff){ return ff; };
auto isFalse = [](bool& ff){ return !ff; };
//Face landmark detector
try {
#ifndef __EMSCRIPTEN__
facemark_->loadModel("modules/v4d/assets/models/lbfmodel.yaml");
#else
facemark_->loadModel("assets/models/lbfmodel.yaml");
#endif
window->graph(always);
{
//Save the video frame as BGR
window->capture([](const cv::UMat &videoFrame, cv::UMat& in, cv::UMat& d) {
cvtColor(videoFrame, in, cv::COLOR_BGRA2BGR);
//Downscale the video frame for face detection
cv::resize(in, d, cv::Size(DOWNSIZE_WIDTH, DOWNSIZE_HEIGHT));
}, input_, down_);
window->parallel([](cv::Ptr<cv::FaceDetectorYN>& de, cv::Ptr<cv::face::Facemark>& fm, vector<vector<cv::Point2f>>& sh, const cv::UMat& d, std::vector<cv::Rect>& fr, bool& ff, FaceFeatures& ft) {
sh.clear();
cv::Mat faces;
//Detect faces in the down-scaled image
de->detect(d, faces);
//Only add the first face
cv::Rect faceRect;
if(!faces.empty())
faceRect = cv::Rect(int(faces.at<float>(0, 0)), int(faces.at<float>(0, 1)), int(faces.at<float>(0, 2)), int(faces.at<float>(0, 3)));
fr = {faceRect};
//find landmarks if faces have been detected
ff = !faceRect.empty() && fm->fit(d, fr, sh);
if(ff)
ft = FaceFeatures(fr[0], sh[0], float(d.size().width) / WIDTH);
}, detector_, facemark_, shapes_, down_, faceRects_, faceFound_, features_);
try {
window->graph(always);
{
window->capture();
//Save the video frame as BGR
window->fb([](const cv::UMat &framebuffer, cv::UMat& in, cv::UMat& d) {
cvtColor(framebuffer, in, cv::COLOR_BGRA2BGR);
//Downscale the video frame for face detection
cv::resize(in, d, cv::Size(DOWNSIZE_WIDTH, DOWNSIZE_HEIGHT));
}, input_, down_);
window->parallel([](cv::Ptr<cv::FaceDetectorYN>& de, cv::Ptr<cv::face::Facemark>& fm, vector<vector<cv::Point2f>>& sh, const cv::UMat& d, std::vector<cv::Rect>& fr, bool& ff, FaceFeatures& ft) {
sh.clear();
cv::Mat faces;
//Detect faces in the down-scaled image
de->detect(d, faces);
//Only add the first face
cv::Rect faceRect;
if(!faces.empty())
faceRect = cv::Rect(int(faces.at<float>(0, 0)), int(faces.at<float>(0, 1)), int(faces.at<float>(0, 2)), int(faces.at<float>(0, 3)));
fr = {faceRect};
//find landmarks if faces have been detected
ff = !faceRect.empty() && fm->fit(d, fr, sh);
if(ff)
ft = FaceFeatures(fr[0], sh[0], float(d.size().width) / WIDTH);
}, detector_, facemark_, shapes_, down_, faceRects_, faceFound_, features_);
}
window->endgraph(always);
window->graph(isTrue, faceFound_);
{
window->nvg([](const FaceFeatures& f) {
//Draw the face oval of the first face
draw_face_oval_mask(f);
}, features_);
window->fb([](const cv::UMat& frameBuffer, cv::UMat& fo) {
//Convert/Copy the mask
cvtColor(frameBuffer, fo, cv::COLOR_BGRA2GRAY);
}, faceOval_);
window->nvg([](const FaceFeatures& f) {
//Draw eyes eyes and lips areas of the first face
draw_face_eyes_and_lips_mask(f);
}, features_);
window->fb([](const cv::UMat &frameBuffer, cv::UMat& ealmg) {
//Convert/Copy the mask
cvtColor(frameBuffer, ealmg, cv::COLOR_BGRA2GRAY);
}, eyesAndLipsMaskGrey_);
window->parallel([](const cv::UMat& fo, const cv::UMat& ealmg, cv::UMat& fsmg, cv::UMat& bmg) {
//Create the skin mask
cv::subtract(fo, ealmg, fsmg);
//Create the background mask
cv::bitwise_not(ealmg, bmg);
}, faceOval_, eyesAndLipsMaskGrey_, faceSkinMaskGrey_, backgroundMaskGrey_);
window->parallel([](const cv::UMat& in, cv::UMat& eal, float& eals, cv::UMat& c, cv::UMat& s) {
//boost saturation of eyes and lips
adjust_saturation(in, eal, eals);
//reduce skin contrast
multiply(in, cv::Scalar::all(skin_contrast), c);
//fix skin brightness
add(c, cv::Scalar::all((1.0 - skin_contrast) / 2.0) * 255.0, c);
//blur the skin_
cv::boxFilter(c, c, -1, cv::Size(blur_skin_kernel_size, blur_skin_kernel_size), cv::Point(-1, -1), true, cv::BORDER_REPLICATE);
//boost skin saturation
adjust_saturation(c, s, skin_saturation);
}, input_, eyesAndLips_, eyes_and_lips_saturation, contrast_, skin_);
window->parallel([](cv::Ptr<cv::detail::MultiBandBlender>& bl,
const cv::UMat& s, const cv::UMat& fsmg,
const cv::UMat& in, const cv::UMat& bmg,
const cv::UMat& eal, const cv::UMat& ealmg,
cv::UMat& fout) {
cv:: UMat foFloat;
//FIXME do it once?
CV_Assert(!s.empty());
CV_Assert(!in.empty());
CV_Assert(!eal.empty());
//piece it all together
//FIXME prepare only once?
bl->prepare(cv::Rect(0, 0, WIDTH, HEIGHT));
bl->feed(s, fsmg, cv::Point(0, 0));
bl->feed(in, bmg, cv::Point(0, 0));
bl->feed(eal, ealmg, cv::Point(0, 0));
bl->blend(foFloat, cv::UMat());
CV_Assert(!foFloat.empty());
foFloat.convertTo(fout, CV_8U, 1.0);
}, blender_, skin_, faceSkinMaskGrey_, input_, backgroundMaskGrey_, eyesAndLips_, eyesAndLipsMaskGrey_, frameOut_);
window->parallel([](cv::UMat& fout, const cv::UMat& in, cv::UMat& lh, cv::UMat& rh) {
if (side_by_side) {
//create side-by-side view with a result
cv::resize(in, lh, cv::Size(0, 0), 0.5, 0.5);
cv::resize(fout, rh, cv::Size(0, 0), 0.5, 0.5);
fout = cv::Scalar::all(0);
lh.copyTo(fout(cv::Rect(0, 0, lh.size().width, lh.size().height)));
rh.copyTo(fout(cv::Rect(rh.size().width, 0, rh.size().width, rh.size().height)));
}
}, frameOut_, input_, lhalf_, rhalf_);
}
window->endgraph(isTrue, faceFound_);
window->graph(isFalse, faceFound_);
{
window->parallel([](cv::UMat& fout, const cv::UMat& in, cv::UMat& lh) {
if (side_by_side) {
//create side-by-side view without a result (using the input image for both sides)
fout = cv::Scalar::all(0);
cv::resize(in, lh, cv::Size(0, 0), 0.5, 0.5);
lh.copyTo(fout(cv::Rect(0, 0, lh.size().width, lh.size().height)));
lh.copyTo(fout(cv::Rect(lh.size().width, 0, lh.size().width, lh.size().height)));
} else {
in.copyTo(fout);
}
}, frameOut_, input_, lhalf_);
}
window->endgraph(isFalse, faceFound_);
window->graph(always);
{
//write the result to the framebuffer
window->fb([](cv::UMat &frameBuffer, const cv::UMat& f) {
cvtColor(f, frameBuffer, cv::COLOR_BGR2BGRA);
}, frameOut_);
}
window->endgraph(always);
} catch (std::exception &ex) {
cerr << ex.what() << endl;
}
window->endgraph(always);
window->graph(isTrue, faceFound_);
{
window->nvg([](const FaceFeatures& f) {
//Draw the face oval of the first face
draw_face_oval_mask(f);
}, features_);
window->fb([](const cv::UMat& frameBuffer, cv::UMat& fo) {
//Convert/Copy the mask
cvtColor(frameBuffer, fo, cv::COLOR_BGRA2GRAY);
}, faceOval_);
window->nvg([](const FaceFeatures& f) {
//Draw eyes eyes and lips areas of the first face
draw_face_eyes_and_lips_mask(f);
}, features_);
window->fb([](const cv::UMat &frameBuffer, cv::UMat& ealmg) {
//Convert/Copy the mask
cvtColor(frameBuffer, ealmg, cv::COLOR_BGRA2GRAY);
}, eyesAndLipsMaskGrey_);
window->parallel([](const cv::UMat& fo, const cv::UMat& ealmg, cv::UMat& fsmg, cv::UMat& bmg) {
//Create the skin mask
cv::subtract(fo, ealmg, fsmg);
//Create the background mask
cv::bitwise_not(ealmg, bmg);
}, faceOval_, eyesAndLipsMaskGrey_, faceSkinMaskGrey_, backgroundMaskGrey_);
window->parallel([](const cv::UMat& in, cv::UMat& eal, float& eals, cv::UMat& c, cv::UMat& s) {
//boost saturation of eyes and lips
adjust_saturation(in, eal, eals);
//reduce skin contrast
multiply(in, cv::Scalar::all(skin_contrast), c);
//fix skin brightness
add(c, cv::Scalar::all((1.0 - skin_contrast) / 2.0) * 255.0, c);
//blur the skin_
cv::boxFilter(c, c, -1, cv::Size(blur_skin_kernel_size, blur_skin_kernel_size), cv::Point(-1, -1), true, cv::BORDER_REPLICATE);
//boost skin saturation
adjust_saturation(c, s, skin_saturation);
}, input_, eyesAndLips_, eyes_and_lips_saturation, contrast_, skin_);
window->parallel([](cv::Ptr<cv::detail::MultiBandBlender>& bl,
const cv::UMat& s, const cv::UMat& fsmg,
const cv::UMat& in, const cv::UMat& bmg,
const cv::UMat& eal, const cv::UMat& ealmg,
cv::UMat& fout) {
cv:: UMat foFloat;
//FIXME do it once?
CV_Assert(!s.empty());
CV_Assert(!in.empty());
CV_Assert(!eal.empty());
//piece it all together
//FIXME prepare only once?
bl->prepare(cv::Rect(0, 0, WIDTH, HEIGHT));
bl->feed(s, fsmg, cv::Point(0, 0));
bl->feed(in, bmg, cv::Point(0, 0));
bl->feed(eal, ealmg, cv::Point(0, 0));
bl->blend(foFloat, cv::UMat());
CV_Assert(!foFloat.empty());
foFloat.convertTo(fout, CV_8U, 1.0);
}, blender_, skin_, faceSkinMaskGrey_, input_, backgroundMaskGrey_, eyesAndLips_, eyesAndLipsMaskGrey_, frameOut_);
window->parallel([](cv::UMat& fout, const cv::UMat& in, cv::UMat& lh, cv::UMat& rh) {
if (side_by_side) {
//create side-by-side view with a result
cv::resize(in, lh, cv::Size(0, 0), 0.5, 0.5);
cv::resize(fout, rh, cv::Size(0, 0), 0.5, 0.5);
fout = cv::Scalar::all(0);
lh.copyTo(fout(cv::Rect(0, 0, lh.size().width, lh.size().height)));
rh.copyTo(fout(cv::Rect(rh.size().width, 0, rh.size().width, rh.size().height)));
}
}, frameOut_, input_, lhalf_, rhalf_);
}
window->endgraph(isTrue, faceFound_);
window->graph(isFalse, faceFound_);
{
window->parallel([](cv::UMat& fout, const cv::UMat& in, cv::UMat& lh) {
if (side_by_side) {
//create side-by-side view without a result (using the input image for both sides)
fout = cv::Scalar::all(0);
cv::resize(in, lh, cv::Size(0, 0), 0.5, 0.5);
lh.copyTo(fout(cv::Rect(0, 0, lh.size().width, lh.size().height)));
lh.copyTo(fout(cv::Rect(lh.size().width, 0, lh.size().width, lh.size().height)));
} else {
in.copyTo(fout);
}
}, frameOut_, input_, lhalf_);
}
window->endgraph(isFalse, faceFound_);
window->graph(always);
{
//write the result to the framebuffer
window->fb([](cv::UMat &frameBuffer, const cv::UMat& f) {
cvtColor(f, frameBuffer, cv::COLOR_BGR2BGRA);
}, frameOut_);
}
window->endgraph(always);
} catch (std::exception &ex) {
cerr << ex.what() << endl;
}
}
}
};
#ifndef __EMSCRIPTEN__
@ -376,51 +381,51 @@ int main() {
#endif
using namespace cv::v4d;
cv::Ptr<V4D> window = V4D::make(WIDTH, HEIGHT, "Beautification Demo", ALL, OFFSCREEN);
window->printSystemInfo();
// window->printSystemInfo();
window->setStretching(stretch);
if (!OFFSCREEN) {
window->imgui([window](ImGuiContext* ctx){
using namespace ImGui;
SetCurrentContext(ctx);
Begin("Effect");
Text("Display");
Checkbox("Side by side", &side_by_side);
if(Checkbox("Stetch", &stretch)) {
window->setStretching(true);
} else
window->setStretching(false);
#ifndef __EMSCRIPTEN__
if(Button("Fullscreen")) {
window->setFullscreen(!window->isFullscreen());
};
#endif
if(Button("Offscreen")) {
window->setVisible(!window->isVisible());
};
Text("Face Skin");
SliderInt("Blur", &blur_skin_kernel_size, 0, 128);
SliderFloat("Saturation", &skin_saturation, 0.0f, 100.0f);
SliderFloat("Contrast", &skin_contrast, 0.0f, 1.0f);
Text("Eyes and Lips");
SliderFloat("Saturation ", &eyes_and_lips_saturation, 0.0f, 100.0f);
End();
});
}
// if (!OFFSCREEN) {
// window->imgui([window](ImGuiContext* ctx){
// using namespace ImGui;
// SetCurrentContext(ctx);
// Begin("Effect");
// Text("Display");
// Checkbox("Side by side", &side_by_side);
// if(Checkbox("Stetch", &stretch)) {
// window->setStretching(true);
// } else
// window->setStretching(false);
//
// #ifndef __EMSCRIPTEN__
// if(Button("Fullscreen")) {
// window->setFullscreen(!window->isFullscreen());
// };
// #endif
//
// if(Button("Offscreen")) {
// window->setVisible(!window->isVisible());
// };
//
// Text("Face Skin");
// SliderInt("Blur", &blur_skin_kernel_size, 0, 128);
// SliderFloat("Saturation", &skin_saturation, 0.0f, 100.0f);
// SliderFloat("Contrast", &skin_contrast, 0.0f, 1.0f);
// Text("Eyes and Lips");
// SliderFloat("Saturation ", &eyes_and_lips_saturation, 0.0f, 100.0f);
// End();
// });
// }
#ifndef __EMSCRIPTEN__
auto src = makeCaptureSource(window, argv[1]);
window->setSource(src);
// Sink sink = makeWriterSink(window, OUTPUT_FILENAME, src.fps(), cv::Size(WIDTH, HEIGHT));
// window->setSink(sink);
#else
Source src = makeCaptureSource(WIDTH, HEIGHT, window);
auto src = makeCaptureSource(window);
window->setSource(src);
#endif
window->run<BeautyDemoPlan>(2);
window->run<BeautyDemoPlan>(0);
return 0;
}

@ -12,7 +12,6 @@
constexpr long unsigned int WIDTH = 1280;
constexpr long unsigned int HEIGHT = 720;
#else
constexpr size_t NUMBER_OF_CUBES = 5;
constexpr long unsigned int WIDTH = 960;
constexpr long unsigned int HEIGHT = 960;
#endif
@ -210,30 +209,32 @@ static void glow_effect(const cv::UMat& src, cv::UMat& dst, const int ksize) {
using namespace cv::v4d;
class CubeDemoPlan : public Plan {
cv::UMat frame_;
GLuint vao;
GLuint shaderProgram;
GLuint uniformTransform;
GLuint vao_;
GLuint shaderProgram_;
GLuint uniformTransform_;
public:
void setup(cv::Ptr<V4D> window) {
window->gl([](const cv::Size& sz, GLuint& v, GLuint& sp, GLuint& ut){
init_scene(sz, v, sp, ut);
}, window->fbSize(), vao, shaderProgram, uniformTransform);
}, window->fbSize(), vao_, shaderProgram_, uniformTransform_);
}
void infer(cv::Ptr<V4D> window) {
window->gl([](){
//Clear the background
glClearColor(0.2, 0.24, 0.4, 1);
glClearColor(0.2f, 0.24f, 0.4f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
});
//Render using multiple OpenGL contexts
window->gl([](GLuint& v, GLuint& sp, GLuint& ut){
render_scene(v, sp, ut);
}, vao, shaderProgram, uniformTransform);
}, vao_, shaderProgram_, uniformTransform_);
//Aquire the frame buffer for use by OpenCV
window->fb([](cv::UMat& framebuffer, cv::UMat& f) {
#ifndef __EMSCRIPTEN__
glow_effect(framebuffer, framebuffer, glow_kernel_size);
#endif
framebuffer.copyTo(f);
}, frame_);
@ -244,9 +245,7 @@ public:
};
int main() {
cv::Ptr<V4D> window = V4D::make(WIDTH, HEIGHT, "Many Cubes Demo", IMGUI, OFFSCREEN);
window->printSystemInfo();
cv::Ptr<V4D> window = V4D::make(WIDTH, HEIGHT, "Cube Demo", ALL, OFFSCREEN);
#ifndef __EMSCRIPTEN__
//Creates a writer sink (which might be hardware accelerated)
auto sink = makeWriterSink(window, OUTPUT_FILENAME, FPS, cv::Size(WIDTH, HEIGHT));

@ -40,7 +40,7 @@ int main() {
class CustomSourceAndSinkPlan : public Plan {
string hr_ = "Hello Rainbow!";
public:
void infer(cv::Ptr<V4D> win) override {
win->capture();
@ -58,5 +58,7 @@ int main() {
win->write();
}
};
window->run<CustomSourceAndSinkPlan>(0);
}

@ -11,14 +11,16 @@ int main() {
cv::Ptr<V4D> window = V4D::make(960, 960, "Display an Image");
class DisplayImagePlan : public Plan {
UMat image_ = imread(samples::findFile("lena.jpg")).getUMat(ACCESS_READ);
UMat image_;
public:
void setup(Ptr<V4D> win) override {
win->parallel([](cv::UMat& image){
#ifdef __EMSCRIPTEN__
image_ = read_embedded_image("doc/lena.png").getUMat(ACCESS_READ);
image = read_embedded_image("doc/lena.png").getUMat(ACCESS_READ);
#else
image_ = imread(samples::findFile("lena.jpg")).getUMat(ACCESS_READ);
image = imread(samples::findFile("lena.jpg")).getUMat(ACCESS_READ);
#endif
}, image_);
}
//Display the framebuffer in the native window in an endless loop.
void infer(Ptr<V4D> win) override {
@ -26,4 +28,6 @@ int main() {
win->feed(image_);
}
};
window->run<DisplayImagePlan>(0);
}

@ -8,11 +8,11 @@ int main() {
//Creates a V4D object
Ptr<V4D> window = V4D::make(960, 960, "Display an Image through direct FB access");
class DislayImageFB : public Plan {
class DisplayImageFB : public Plan {
UMat image_;
UMat resized_;
UMat converted_;
public:
void setup(cv::Ptr<V4D> win) {
win->parallel([](cv::UMat& image, cv::UMat& resized, cv::UMat& converted, const cv::Size& sz){
//Loads an image as a UMat (just in case we have hardware acceleration available)
@ -36,4 +36,6 @@ int main() {
}, converted_);
}
};
window->run<DisplayImageFB>(0);
}

@ -13,7 +13,7 @@ struct Image_t {
};
int main() {
cv::Ptr<V4D> window = V4D::make(960, 960, "Display an Image using NanoVG");
cv::Ptr<V4D> window = V4D::make(960, 960, "Display an Image using NanoVG", ALL, false, true);
class DisplayImageNVG : public Plan {
Image_t image_;

@ -198,7 +198,7 @@ int main() {
setup_gui(window);
}
window->printSystemInfo();
// window->printSystemInfo();
//The text to display
string txt = cv::getBuildInformation();

@ -194,6 +194,7 @@ static void render_scene(const double& x, const double& y, const double& angleMo
glDrawElements(GL_TRIANGLES, triangles * 3, GL_UNSIGNED_SHORT, NULL);
}
#ifndef __EMSCRIPTEN__
//applies a glow effect to an image
static void glow_effect(const cv::UMat& src, cv::UMat& dst, const int ksize) {
thread_local cv::UMat resize;
@ -217,6 +218,7 @@ static void glow_effect(const cv::UMat& src, cv::UMat& dst, const int ksize) {
cv::bitwise_not(dst, dst);
}
#endif
using namespace cv::v4d;
class ManyCubesDemoPlan : public Plan {
@ -250,7 +252,9 @@ public:
//Aquire the frame buffer for use by OpenCV
window->fb([](cv::UMat& framebuffer, cv::UMat& f) {
#ifndef __EMSCRIPTEN__
glow_effect(framebuffer, framebuffer, glow_kernel_size);
#endif
framebuffer.copyTo(f);
}, frame_);
@ -262,14 +266,13 @@ public:
int main() {
cv::Ptr<V4D> window = V4D::make(WIDTH, HEIGHT, "Many Cubes Demo", IMGUI, OFFSCREEN);
window->printSystemInfo();
#ifndef __EMSCRIPTEN__
//Creates a writer sink (which might be hardware accelerated)
auto sink = makeWriterSink(window, OUTPUT_FILENAME, FPS, cv::Size(WIDTH, HEIGHT));
window->setSink(sink);
#endif
window->run<ManyCubesDemoPlan>(9);
window->run<ManyCubesDemoPlan>(0);
return 0;
}

@ -131,16 +131,17 @@ class NanoVGDemoPlan : public Plan {
cv::UMat bgra_;
cv::UMat hsv_;
cv::UMat hueChannel_;
long cnt_ = 0;
double hue_;
public:
void infer(cv::Ptr<V4D> window) override {
window->parallel([](const uint64_t& frameCount, double& hue){
window->parallel([](long& cnt, double& hue){
//we use frame count to calculate the current hue
float t = frameCount / 60.0;
double t = ++cnt / 60.0;
//nanovg hue fading depending on t
hue = (sinf(t * 0.12) + 1.0) * 127.5;
}, window->frameCount(), hue_);
}, cnt_, hue_);
window->capture();
@ -149,7 +150,7 @@ public:
cvtColor(framebuffer, rgb, cv::COLOR_BGRA2RGB);
}, rgb_);
window->parallel([](cv::UMat& rgb, cv::UMat& hsv, std::vector<cv::UMat>& hsvChannels, double hue){
window->parallel([](cv::UMat& rgb, cv::UMat& hsv, std::vector<cv::UMat>& hsvChannels, double& hue){
//Color-conversion from RGB to HSV
cv::cvtColor(rgb, hsv, cv::COLOR_RGB2HSV_FULL);
@ -171,6 +172,7 @@ public:
//Render using nanovg
window->nvg([](const cv::Size &sz, const double& h) {
cerr << "HUE: " << h << endl;
draw_color_wheel(sz.width - 300, sz.height - 300, 250.0f, 250.0f, h);
}, window->fbSize(), hue_);
@ -196,7 +198,7 @@ int main() {
window->setSource(src);
window->setSink(sink);
#else
Source src = makeCaptureSource(WIDTH, HEIGHT, window);
auto src = makeCaptureSource(window);
window->setSource(src);
#endif

@ -404,11 +404,13 @@ class OptflowPlan : public Plan {
public:
virtual ~OptflowPlan() override {};
virtual void infer(cv::Ptr<V4D> window) override {
window->capture([](const cv::UMat& videoFrame, cv::UMat& d, cv::UMat& b) {
window->capture();
window->fb([](const cv::UMat& framebuffer, cv::UMat& d, cv::UMat& b) {
//resize to foreground scale
cv::resize(videoFrame, d, cv::Size(videoFrame.size().width * fg_scale, videoFrame.size().height * fg_scale));
cv::resize(framebuffer, d, cv::Size(framebuffer.size().width * fg_scale, framebuffer.size().height * fg_scale));
//save video background
videoFrame.copyTo(b);
framebuffer.copyTo(b);
}, down, background);
window->parallel([](const cv::UMat& d, cv::UMat& dng, cv::UMat& dmmg, std::vector<cv::Point2f>& dp){
@ -435,15 +437,12 @@ public:
dpg = dng.clone();
}, downPrevGrey, downNextGrey);
window->fb([](cv::UMat& framebuffer, cv::UMat& b, cv::UMat& f, cv::UMat& r) {
window->fb([](cv::UMat& framebuffer, cv::UMat& b, cv::UMat& f) {
//Put it all together (OpenCL)
composite_layers(b, f, framebuffer, framebuffer, glow_kernel_size, fg_loss, background_mode, post_proc_mode);
framebuffer.copyTo(r);
}, background, foreground, result);
}, background, foreground);
window->write([](cv::UMat& videoFrame, cv::UMat& r) {
r.copyTo(videoFrame);
}, result);
window->write();
}
};
@ -461,7 +460,6 @@ int main(int argc, char **argv) {
try {
using namespace cv::v4d;
cv::Ptr<V4D> window = V4D::make(WIDTH, HEIGHT, "Sparse Optical Flow Demo", ALL, OFFSCREEN);
window->printSystemInfo();
window->setStretching(stretch);
if (!OFFSCREEN) {
#ifndef __EMSCRIPTEN__
@ -477,7 +475,7 @@ int main(int argc, char **argv) {
auto sink = makeWriterSink(window, OUTPUT_FILENAME, src->fps(), cv::Size(WIDTH, HEIGHT));
window->setSink(sink);
#else
cv::Ptr<Source> src = makeCaptureSource(WIDTH, HEIGHT, window);
cv::Ptr<Source> src = makeCaptureSource(window);
window->setSource(src);
#endif

@ -5,8 +5,6 @@ using namespace cv;
using namespace cv::v4d;
int main() {
Ptr<V4D> window = V4D::make(960, 960, "GL Blue Screen");
class RenderOpenGLPlan : public Plan {
public:
void setup(Ptr<V4D> win) override {
@ -23,6 +21,7 @@ int main() {
}
};
Ptr<V4D> window = V4D::make(960, 960, "GL Blue Screen");
window->run<RenderOpenGLPlan>(0);
}

@ -268,7 +268,6 @@ static void setup_gui(cv::Ptr<V4D> window) {
}
class ShaderDemoPlan : public Plan {
cv::UMat frame_;
public:
void setup(cv::Ptr<V4D> window) override {
window->gl([](const cv::Size &sz) {
@ -277,20 +276,19 @@ void setup(cv::Ptr<V4D> window) override {
}
void infer(cv::Ptr<V4D> window) override {
window->capture(frame_);
window->capture();
window->gl([](const cv::Size &sz) {
render_scene(sz);
}, window->fbSize());
#ifndef __EMSCRIPTEN__
window->fb([](cv::UMat& framebuffer, cv::UMat& f) {
window->fb([](cv::UMat& framebuffer) {
glow_effect(framebuffer, framebuffer, glow_kernel_size);
framebuffer.copyTo(f);
}, frame_);
});
#endif
window->write(frame_);
window->write();
}
};
@ -309,7 +307,6 @@ int main() {
setup_gui(window);
}
window->printSystemInfo();
#ifndef __EMSCRIPTEN__
auto src = makeCaptureSource(window, argv[1]);
@ -317,11 +314,11 @@ int main() {
auto sink = makeWriterSink(window, OUTPUT_FILENAME, src->fps(), cv::Size(WIDTH, HEIGHT));
window->setSink(sink);
#else
Source src = makeCaptureSource(WIDTH, HEIGHT, window);
auto src = makeCaptureSource(window);
window->setSource(src);
#endif
window->run<ShaderDemoPlan>(3);
window->run<ShaderDemoPlan>(0);
} catch (std::exception& ex) {
cerr << "Exception: " << ex.what() << endl;
}

@ -5,8 +5,6 @@ using namespace cv;
using namespace cv::v4d;
int main() {
Ptr<V4D> window = V4D::make(960, 960, "Vector Graphics and Framebuffer");
class VectorGraphicsAndFBPlan : public Plan {
public:
void infer(Ptr<V4D> win) override {
@ -101,7 +99,7 @@ int main() {
});
}
};
Ptr<V4D> window = V4D::make(960, 960, "Vector Graphics and Framebuffer");
window->run<VectorGraphicsAndFBPlan>(0);
}

@ -190,7 +190,6 @@ static void glow_effect(const cv::UMat& src, cv::UMat& dst, const int ksize) {
using namespace cv::v4d;
class VideoDemoPlan: public Plan {
cv::UMat frame_;
public:
void setup(cv::Ptr<V4D> window) override {
window->gl([]() {
@ -198,19 +197,21 @@ public:
});
}
void infer(cv::Ptr<V4D> window) override {
window->capture(frame_);
window->capture();
window->gl([]() {
render_scene();
});
window->fb([](cv::UMat &framebuffer, cv::UMat& f) {
#ifndef __EMSCRIPTEN__
window->fb([](cv::UMat &framebuffer) {
glow_effect(framebuffer, framebuffer, glow_kernel_size);
framebuffer.copyTo(f);
}, frame_);
});
#endif
//Ignored in WebAssmebly builds because there is no sink set.
window->write(frame_);
window->write();
}
};
@ -225,7 +226,6 @@ int main() {
#endif
using namespace cv::v4d;
cv::Ptr<V4D> window = V4D::make(WIDTH, HEIGHT, "Video Demo", NONE, OFFSCREEN, false, 0);
window->printSystemInfo();
#ifndef __EMSCRIPTEN__
//Creates a source from a file or a device
@ -236,7 +236,7 @@ int main() {
window->setSink(sink);
#else
//Creates a webcam source is available
auto src = makeCaptureSource(WIDTH, HEIGHT, window);
auto src = makeCaptureSource(window);
window->setSource(src);
#endif

@ -13,7 +13,12 @@ int main(int argc, char** argv) {
class VideoEditingPlan : public Plan {
cv::UMat frame_;
const string hv_ = "Hello Video!";
cv::Size fbSz_;
public:
void setup(Ptr<V4D> win) override {
fbSz_ = win->fbSize();
}
void infer(Ptr<V4D> win) override {
//Capture video from the source
win->capture();
@ -27,7 +32,7 @@ int main(int argc, char** argv) {
fillColor(Scalar(255, 0, 0, 255));
textAlign(NVG_ALIGN_CENTER | NVG_ALIGN_TOP);
text(sz.width / 2.0, sz.height / 2.0, str.c_str(), str.c_str() + str.size());
}, win->fbSize(), hv_);
}, fbSz_, hv_);
//Write video to the sink (do nothing in case of WebAssembly)
win->write();
@ -45,7 +50,7 @@ int main(int argc, char** argv) {
window->setSink(sink);
#else
//Make a webcam Source
Source src = makeCaptureSource(960, 960, window);
auto src = makeCaptureSource(window);
//Attach webcam source
window->setSource(src);
#endif

@ -133,16 +133,17 @@ void FrameBufferContext::initWebGLCopy(const size_t& index) {
#endif
}
void FrameBufferContext::doWebGLCopy(FrameBufferContext& other) {
void FrameBufferContext::doWebGLCopy(cv::Ptr<FrameBufferContext> other) {
#ifdef __EMSCRIPTEN__
size_t index = other.getIndex();
cerr << "do copy" << endl;
size_t index = other->getIndex();
this->makeCurrent();
int width = getWindowSize().width;
int height = getWindowSize().height;
{
FrameBufferContext::GLScope glScope(*this, GL_READ_FRAMEBUFFER);
other.blitFrameBufferToFrameBuffer(
cv::Rect(0,0, other.size().width, other.size().height),
FrameBufferContext::GLScope glScope(self(), GL_READ_FRAMEBUFFER);
other->blitFrameBufferToFrameBuffer(
cv::Rect(0,0, other->size().width, other->size().height),
this->getWindowSize(),
0, false);
emscripten_webgl_commit_frame();
@ -703,7 +704,7 @@ void FrameBufferContext::begin(GLenum framebufferTarget) {
}
void FrameBufferContext::end() {
GL_CHECK(glFlush());
GL_CHECK(glFinish());
this->makeNoneCurrent();
// this->fence();
}
@ -793,7 +794,9 @@ void FrameBufferContext::makeCurrent() {
}
void FrameBufferContext::makeNoneCurrent() {
glfwMakeContextCurrent(nullptr);
#ifndef __EMSCRIPTEN__
glfwMakeContextCurrent(nullptr);
#endif
}

@ -13,7 +13,7 @@ GLContext::GLContext(cv::Ptr<FrameBufferContext> fbContext) :
mainFbContext_(fbContext), glFbContext_(new FrameBufferContext(*fbContext->getV4D(), "OpenGL", *fbContext)) {
#ifdef __EMSCRIPTEN__
run_sync_on_main<19>([&,this](){
mainFbContext_.initWebGLCopy(fbCtx()->getIndex());
mainFbContext_->initWebGLCopy(fbCtx()->getIndex());
});
#endif
}
@ -42,7 +42,7 @@ void GLContext::execute(std::function<void()> fn) {
}
if(!fbCtx()->isShared()) {
#ifdef __EMSCRIPTEN__
mainFbContext_.doWebGLCopy(fbCtx());
mainFbContext_->doWebGLCopy(fbCtx());
#else
UMat tmp;
fbCtx()->copyTo(tmp);

@ -35,7 +35,7 @@ NanoVGContext::NanoVGContext(cv::Ptr<FrameBufferContext> fbContext) :
nvgCreateFont(context_, "sans-bold", "modules/v4d/assets/fonts/Roboto-Bold.ttf");
#endif
#ifdef __EMSCRIPTEN__
mainFbContext_.initWebGLCopy(fbCtx()->getIndex());
mainFbContext_->initWebGLCopy(fbCtx()->getIndex());
#endif
}
});
@ -63,7 +63,7 @@ void NanoVGContext::execute(std::function<void()> fn) {
}
if (!fbCtx()->isShared()) {
#ifdef __EMSCRIPTEN__
mainFbContext_.doWebGLCopy(fbCtx());
mainFbContext_->doWebGLCopy(fbCtx());
#else
UMat tmp;
fbCtx()->copyTo(tmp);

@ -23,20 +23,36 @@ void SourceContext::execute(std::function<void()> fn) {
CLExecScope_t scope(getCLExecContext());
#endif
if (mainFbContext_->getV4D()->hasSource()) {
auto p = mainFbContext_->getV4D()->getSource()->operator ()();
currentSeqNr_ = p.first;
resizePreserveAspectRatio(p.second, captureBufferRGB_, mainFbContext_->size());
cv::cvtColor(captureBufferRGB_, sourceBuffer(), cv::COLOR_RGB2BGRA);
auto src = mainFbContext_->getV4D()->getSource();
if(src->isOpen()) {
auto p = src->operator ()();
currentSeqNr_ = p.first;
if(p.second.empty())
p.second.create(mainFbContext_->size(), CV_8UC3);
resizePreserveAspectRatio(p.second, captureBufferRGB_, mainFbContext_->size());
cv::cvtColor(captureBufferRGB_, sourceBuffer(), cv::COLOR_RGB2BGRA);
}
}
fn();
#ifndef __EMSCRIPTEN__
} else {
if (mainFbContext_->getV4D()->hasSource()) {
auto p = mainFbContext_->getV4D()->getSource()->operator ()();
currentSeqNr_ = p.first;
resizePreserveAspectRatio(p.second, captureBufferRGB_, mainFbContext_->size());
cv::cvtColor(captureBufferRGB_, sourceBuffer(), cv::COLOR_RGB2BGRA);
auto src = mainFbContext_->getV4D()->getSource();
if(src->isOpen()) {
auto p = src->operator ()();
currentSeqNr_ = p.first;
if(p.second.empty())
p.second.create(mainFbContext_->size(), CV_8UC3);
resizePreserveAspectRatio(p.second, captureBufferRGB_, mainFbContext_->size());
cv::cvtColor(captureBufferRGB_, sourceBuffer(), cv::COLOR_RGB2BGRA);
}
}
fn();
}

@ -19,15 +19,8 @@ Source::Source() :
Source::~Source() {
}
bool Source::isReady() {
if (generator_)
return true;
else
return false;
}
bool Source::isOpen() {
return open_;
return generator_ && open_;
}
bool Source::isAsync() {

@ -39,8 +39,6 @@ namespace cv {
namespace v4d {
namespace detail {
thread_local std::mutex ThreadLocal::mtx_;
thread_local bool ThreadLocal::sync_run_;
std::mutex Global::mtx_;
uint64_t Global::frame_cnt_ = 0;
uint64_t Global::start_time_ = get_epoch_nanos();
@ -509,8 +507,8 @@ public:
};
cv::Ptr<HTML5Capture> capture = nullptr;
int capture_width = 0;
int capture_height = 0;
static thread_local int capture_width = 0;
static thread_local int capture_height = 0;
extern "C" {
@ -522,10 +520,10 @@ void v4dInitCapture(int width, int height) {
}
cv::Ptr<Source> makeCaptureSource(int width, int height, cv::Ptr<V4D> window) {
cv::Ptr<Source> makeCaptureSource(cv::Ptr<V4D> window) {
using namespace std;
return new Source([=](cv::UMat& frame) {
return new Source([window](cv::UMat& frame) {
if(capture_width > 0 && capture_height > 0) {
try {
run_sync_on_main<17>([&]() {

@ -204,31 +204,26 @@ bool V4D::hasSource() {
return source_ != nullptr;
}
void V4D::feed(cv::InputArray in) {
CV_Assert(false);
//#ifndef __EMSCRIPTEN__
// CLExecScope_t scope(fbCtx()->getCLExecContext());
//#endif
// TimeTracker::getInstance()->execute("feed", [this, &in](){
// cv::UMat frame;
// captureCtx().capture([&](cv::UMat& videoFrame) {
// in.copyTo(videoFrame);
// }, frame);
//
// fb([](cv::UMat& fb, const cv::UMat& f) {
// f.copyTo(fb);
// }, frame);
// });
}
cv::_InputArray V4D::fetch() {
CV_Assert(false);
void V4D::feed(cv::UMat& in) {
static thread_local cv::UMat frame;
parallel([](cv::UMat& src, cv::UMat& f, const cv::Size sz) {
cv::UMat rgb;
resizePreserveAspectRatio(src, rgb, sz);
cv::cvtColor(rgb, f, cv::COLOR_RGB2BGRA);
}, in, frame, mainFbContext_->size());
fb([](cv::UMat& frameBuffer, const cv::UMat& f) {
f.copyTo(frameBuffer);
}, frame);
}
cv::UMat V4D::fetch() {
cv::UMat frame;
// TimeTracker::getInstance()->execute("copyTo", [this, &frame](){
// fb([](const cv::UMat& fb, cv::UMat& f) {
// fb.copyTo(f);
// }, frame);
// });
fb([](const cv::UMat& fb, cv::UMat& f) {
fb.copyTo(f);
}, frame);
return frame;
}
@ -351,9 +346,21 @@ bool V4D::isFocused() {
}
void V4D::swapContextBuffers() {
{
FrameBufferContext::GLScope glScope(glCtx(-1)->fbCtx(), GL_READ_FRAMEBUFFER);
glCtx(-1)->fbCtx()->blitFrameBufferToFrameBuffer(viewport(), glCtx(-1)->fbCtx()->getWindowSize(), 0, isStretching());
// GL_CHECK(glFinish());
#ifndef __EMSCRIPTEN__
glfwSwapBuffers(glCtx(-1)->fbCtx()->getGLFWWindow());
#else
emscripten_webgl_commit_frame();
#endif
}
for(size_t i = 0; i < numGlCtx(); ++i) {
FrameBufferContext::GLScope glScope(glCtx(i)->fbCtx(), GL_READ_FRAMEBUFFER);
glCtx(i)->fbCtx()->blitFrameBufferToFrameBuffer(viewport(), glCtx(i)->fbCtx()->getWindowSize(), 0, isStretching());
// GL_CHECK(glFinish());
#ifndef __EMSCRIPTEN__
glfwSwapBuffers(glCtx(i)->fbCtx()->getGLFWWindow());
#else
@ -364,6 +371,7 @@ void V4D::swapContextBuffers() {
if(hasNvgCtx()) {
FrameBufferContext::GLScope glScope(nvgCtx()->fbCtx(), GL_READ_FRAMEBUFFER);
nvgCtx()->fbCtx()->blitFrameBufferToFrameBuffer(viewport(), nvgCtx()->fbCtx()->getWindowSize(), 0, isStretching());
// GL_CHECK(glFinish());
#ifndef __EMSCRIPTEN__
glfwSwapBuffers(nvgCtx()->fbCtx()->getGLFWWindow());
#else
@ -395,16 +403,21 @@ bool V4D::display() {
Global::fps() = (fcnt / diff_seconds);
cerr << "\rFPS:" << Global::fps() << endl;
}
#ifndef __EMSCRIPTEN__
if(debug_) {
swapContextBuffers();
}
#else
swapContextBuffers();
#endif
{
FrameBufferContext::GLScope glScope(fbCtx(), GL_READ_FRAMEBUFFER);
fbCtx()->blitFrameBufferToFrameBuffer(viewport(), fbCtx()->getWindowSize(), 0, isStretching());
}
if(hasImguiCtx())
imguiCtx()->render(getShowFPS());
#ifndef __EMSCRIPTEN__
if(debug_)
swapContextBuffers();
#endif
fbCtx()->makeCurrent();
#ifndef __EMSCRIPTEN__
glfwSwapBuffers(fbCtx()->getGLFWWindow());
#else
@ -416,7 +429,7 @@ bool V4D::display() {
{
FrameBufferContext::GLScope glScope(fbCtx(), GL_DRAW_FRAMEBUFFER);
GL_CHECK(glViewport(0, 0, fbCtx()->size().width, fbCtx()->size().height));
GL_CHECK(glClearColor(0,0,0,255));
GL_CHECK(glClearColor(0,0,0,0));
GL_CHECK(glClear(GL_COLOR_BUFFER_BIT));
}
#ifndef __EMSCRIPTEN__

Loading…
Cancel
Save