many small fixes

pull/3471/head
kallaballa 2 years ago
parent 8b0ad49bca
commit 8b896b2a46
  1. 41
      modules/v4d/CMakeLists.txt
  2. 2
      modules/v4d/include/opencv2/v4d/nvg.hpp
  3. 26
      modules/v4d/include/opencv2/v4d/v4d.hpp
  4. 37
      modules/v4d/samples/beauty-demo.cpp
  5. 17
      modules/v4d/samples/cube-demo.cpp
  6. 6
      modules/v4d/samples/custom_source_and_sink.cpp
  7. 7
      modules/v4d/samples/display_image.cpp
  8. 11
      modules/v4d/samples/display_image_fb.cpp
  9. 34
      modules/v4d/samples/example_v4d_beauty-demo.html
  10. 63
      modules/v4d/samples/example_v4d_cube-demo.html
  11. 20
      modules/v4d/samples/example_v4d_custom_source_and_sink.html
  12. 30
      modules/v4d/samples/example_v4d_display_image.html
  13. 20
      modules/v4d/samples/example_v4d_display_image_fb.html
  14. 20
      modules/v4d/samples/example_v4d_font-demo.html
  15. 20
      modules/v4d/samples/example_v4d_font_rendering.html
  16. 20
      modules/v4d/samples/example_v4d_font_with_gui.html
  17. 36
      modules/v4d/samples/example_v4d_nanovg-demo.html
  18. 34
      modules/v4d/samples/example_v4d_optflow-demo.html
  19. 36
      modules/v4d/samples/example_v4d_pedestrian-demo.html
  20. 23
      modules/v4d/samples/example_v4d_render_opengl.html
  21. 34
      modules/v4d/samples/example_v4d_shader-demo.html
  22. 21
      modules/v4d/samples/example_v4d_vector_graphics.html
  23. 22
      modules/v4d/samples/example_v4d_vector_graphics_and_fb.html
  24. 2
      modules/v4d/samples/example_v4d_video-demo.html
  25. 36
      modules/v4d/samples/example_v4d_video_editing.html
  26. 39
      modules/v4d/samples/font-demo.cpp
  27. 7
      modules/v4d/samples/font_rendering.cpp
  28. 7
      modules/v4d/samples/font_with_gui.cpp
  29. 6
      modules/v4d/samples/nanovg-demo.cpp
  30. 12
      modules/v4d/samples/optflow-demo.cpp
  31. 10
      modules/v4d/samples/pedestrian-demo.cpp
  32. 6
      modules/v4d/samples/render_opengl.cpp
  33. 5
      modules/v4d/samples/shader-demo.cpp
  34. 7
      modules/v4d/samples/vector_graphics.cpp
  35. 11
      modules/v4d/samples/vector_graphics_and_fb.cpp
  36. 7
      modules/v4d/samples/video-demo.cpp
  37. 5
      modules/v4d/samples/video_editing.cpp
  38. 18
      modules/v4d/src/detail/clvacontext.cpp
  39. 7
      modules/v4d/src/detail/clvacontext.hpp
  40. 108
      modules/v4d/src/detail/framebuffercontext.cpp
  41. 11
      modules/v4d/src/detail/framebuffercontext.hpp
  42. 32
      modules/v4d/src/detail/glcontext.cpp
  43. 83
      modules/v4d/src/detail/nanoguicontext.cpp
  44. 16
      modules/v4d/src/detail/nanoguicontext.hpp
  45. 56
      modules/v4d/src/detail/nanovgcontext.cpp
  46. 10
      modules/v4d/src/detail/nanovgcontext.hpp
  47. 182
      modules/v4d/src/detail/pbodownloader.cpp
  48. 37
      modules/v4d/src/detail/pbodownloader.hpp
  49. 1
      modules/v4d/src/formhelper.cpp
  50. 11
      modules/v4d/src/nvg.cpp
  51. 59
      modules/v4d/src/util.cpp
  52. 117
      modules/v4d/src/v4d.cpp
  53. 19
      modules/v4d/tutorials/00-intro.markdown
  54. 2
      modules/v4d/tutorials/17-beauty.markdown

@ -26,7 +26,7 @@ macro(configure_files srcDir destDir)
endmacro(configure_files)
macro(add_emscripten_sample sample source)
macro(add_emscripten_sample sample source assets)
ocv_add_executable(${sample} ${source})
ocv_target_include_modules(${sample} opencv_core opencv_imgproc opencv_videoio opencv_video opencv_imgcodecs opencv_v4d opencv_face opencv_tracking opencv_objdetect opencv_stitching opencv_optflow opencv_imgcodecs opencv_features2d opencv_dnn opencv_flann)
ocv_target_link_libraries(${sample} LINK_PRIVATE opencv_core opencv_imgproc opencv_videoio opencv_video opencv_imgcodecs
@ -39,6 +39,9 @@ macro(add_emscripten_sample sample source)
COMMAND ${CMAKE_COMMAND} -E copy
"${CMAKE_CURRENT_LIST_DIR}/samples/${sample}.html"
"${CMAKE_CURRENT_BINARY_DIR}/../../bin/${sample}.html")
if(${assets})
target_link_options(${sample} PRIVATE --preload-file assets)
endif()
endmacro()
macro(add_binary_sample sample)
@ -49,7 +52,7 @@ macro(add_binary_sample sample)
endmacro()
if(EMSCRIPTEN)
set(EM_LINKER_FLAGS "-sENVIRONMENT=web,worker -sOFFSCREENCANVAS_SUPPORT -sSTANDALONE_WASM -sOFFSCREENCANVASES_TO_PTHREAD=#offscreenCanvas -sEXPORTED_FUNCTIONS=_main,_v4dInitCapture -sEXPORTED_RUNTIME_METHODS=ccall -sPROXY_TO_PTHREAD=1 --use-preload-plugins --preload-file doc/lena.png -sINITIAL_MEMORY=128MB -sALLOW_MEMORY_GROWTH=1 -sUSE_GLFW=3 -sMIN_WEBGL_VERSION=2 -sMAX_WEBGL_VERSION=2 --bind")
set(EM_LINKER_FLAGS "-sENVIRONMENT=web,worker -sWASM_BIGINT=1 -sOFFSCREENCANVAS_SUPPORT -sOFFSCREENCANVASES_TO_PTHREAD=#offscreenCanvas -sEXPORTED_FUNCTIONS=_main,_v4dInitCapture -sEXPORTED_RUNTIME_METHODS=ccall -sPROXY_TO_PTHREAD=1 --use-preload-plugins --preload-file doc/lena.png -sINITIAL_MEMORY=128MB -sALLOW_MEMORY_GROWTH=1 -sUSE_GLFW=3 -sMIN_WEBGL_VERSION=2 -sMAX_WEBGL_VERSION=2 --bind")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${EM_LINKER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${EM_LINKER_FLAGS}")
@ -118,23 +121,23 @@ if(BUILD_EXAMPLES)
set(NANOGUI_BUILD_GLFW OFF)
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/third/nanogui/ext/glfw/include/")
# add_emscripten_sample(example_v4d_display_image samples/display_image.cpp)
# add_emscripten_sample(example_v4d_display_image_fb samples/display_image_fb.cpp)
# add_emscripten_sample(example_v4d_vector_graphics samples/vector_graphics.cpp)
# add_emscripten_sample(example_v4d_vector_graphics_and_fb samples/vector_graphics_and_fb.cpp)
# add_emscripten_sample(example_v4d_render_opengl samples/render_opengl.cpp)
# add_emscripten_sample(example_v4d_custom_source_and_sink samples/custom_source_and_sink.cpp)
# add_emscripten_sample(example_v4d_font_rendering samples/font_rendering.cpp)
# add_emscripten_sample(example_v4d_font_with_gui samples/font_with_gui.cpp)
# add_emscripten_sample(example_v4d_video_editing samples/video_editing.cpp)
# add_emscripten_sample(example_v4d_cube-demo samples/cube-demo.cpp)
add_emscripten_sample(example_v4d_video-demo samples/video-demo.cpp)
# add_emscripten_sample(example_v4d_nanovg-demo samples/nanovg-demo.cpp)
# add_emscripten_sample(example_v4d_font-demo samples/font-demo.cpp)
# add_emscripten_sample(example_v4d_shader-demo samples/shader-demo.cpp)
# add_emscripten_sample(example_v4d_pedestrian-demo samples/pedestrian-demo.cpp)
# add_emscripten_sample(example_v4d_optflow-demo samples/optflow-demo.cpp)
# add_emscripten_sample(example_v4d_beauty-demo samples/beauty-demo.cpp)
# add_emscripten_sample(example_v4d_display_image samples/display_image.cpp false)
# add_emscripten_sample(example_v4d_display_image_fb samples/display_image_fb.cpp false)
# add_emscripten_sample(example_v4d_vector_graphics samples/vector_graphics.cpp false)
# add_emscripten_sample(example_v4d_vector_graphics_and_fb samples/vector_graphics_and_fb.cpp false)
# add_emscripten_sample(example_v4d_render_opengl samples/render_opengl.cpp false)
# add_emscripten_sample(example_v4d_custom_source_and_sink samples/custom_source_and_sink.cpp false)
# add_emscripten_sample(example_v4d_font_rendering samples/font_rendering.cpp false)
# add_emscripten_sample(example_v4d_font_with_gui samples/font_with_gui.cpp false)
# add_emscripten_sample(example_v4d_video_editing samples/video_editing.cpp false)
# add_emscripten_sample(example_v4d_cube-demo samples/cube-demo.cpp false)
# add_emscripten_sample(example_v4d_video-demo samples/video-demo.cpp false)
# add_emscripten_sample(example_v4d_nanovg-demo samples/nanovg-demo.cpp false)
add_emscripten_sample(example_v4d_font-demo samples/font-demo.cpp false)
# add_emscripten_sample(example_v4d_shader-demo samples/shader-demo.cpp false)
# add_emscripten_sample(example_v4d_pedestrian-demo samples/pedestrian-demo.cpp false)
# add_emscripten_sample(example_v4d_optflow-demo samples/optflow-demo.cpp false)
add_emscripten_sample(example_v4d_beauty-demo samples/beauty-demo.cpp true)
else()
add_binary_sample(example_v4d_display_image)
add_binary_sample(example_v4d_custom_source_and_sink)

@ -488,6 +488,8 @@ CV_EXPORTS void intersectScissor(float x, float y, float w, float h);
* A forward to nvgRresetScissor. See https://github.com/memononen/nanovg/blob/master/src/nanovg.h
*/
CV_EXPORTS void resetScissor();
CV_EXPORTS void clear(const cv::Scalar& bgra = cv::Scalar(0, 0, 0, 255));
}
}
}

@ -116,6 +116,7 @@ class NVG;
class CV_EXPORTS V4D {
friend class detail::NanoVGContext;
friend class detail::FrameBufferContext;
friend class HTML5Capture;
cv::Size initialSize_;
const string& title_;
int major_;
@ -144,8 +145,8 @@ class CV_EXPORTS V4D {
std::future<void> futureWriter_;
std::function<bool(int key, int scancode, int action, int modifiers)> keyEventCb_;
uint64_t frameCnt_ = 0;
cv::TickMeter tick_;
float fps_ = 0;
bool showFPS_ = true;
bool printFPS_ = true;
public:
/*!
* Creates a V4D object which is the central object to perform visualizations with.
@ -218,11 +219,6 @@ public:
* @param fn A functor that will be called repeatetly until the application terminates or the functor returns false
*/
CV_EXPORTS void run(std::function<bool()> fn);
/*!
* Clear the framebuffer.
* @param bgra The color to use for clearing.
*/
CV_EXPORTS void clear(const cv::Scalar& bgra = cv::Scalar(0, 0, 0, 255));
/*!
* Called to feed an image directly to the framebuffer
*/
@ -333,6 +329,12 @@ public:
* Get the frambuffer size.
* @return The framebuffer size.
*/
CV_EXPORTS bool getShowFPS();
CV_EXPORTS void setShowFPS(bool s);
CV_EXPORTS bool getPrintFPS();
CV_EXPORTS void setPrintFPS(bool p);
CV_EXPORTS bool isFullscreen();
/*!
* Enable or disable fullscreen mode.
@ -392,13 +394,8 @@ public:
* Print basic system information to stderr
*/
CV_EXPORTS void printSystemInfo();
/*!
* Updates and prints the current fps to stderr and/or renders the fps on screen.
* @param print if true prints the current fps to stderr
* @param graphical if true renders the fps on screen
*/
CV_EXPORTS void showFps(bool print = true, bool graphical = true);
FrameBufferContext& fbCtx();
CV_EXPORTS void makeCurrent();
private:
V4D(const cv::Size& size, const cv::Size& fbsize,
const string& title, bool offscreen, bool debug, int major, int minor, bool compat, int samples);
@ -413,6 +410,7 @@ private:
bool keyboard_event(int key, int scancode, int action, int modifiers);
void setMousePosition(int x, int y);
FrameBufferContext& fbCtx();
CLVAContext& clvaCtx();
NanoVGContext& nvgCtx();
NanoguiContext& nguiCtx();

@ -27,7 +27,7 @@ using std::string;
/** Application parameters **/
constexpr unsigned int WIDTH = 1280;
constexpr unsigned int HEIGHT = 720;
constexpr double SCALE = 0.125; //Scale at which face detection is performed
constexpr float SCALE = 0.125;
constexpr bool OFFSCREEN = false;
#ifndef __EMSCRIPTEN__
constexpr const char *OUTPUT_FILENAME = "beauty-demo.mkv";
@ -48,10 +48,13 @@ bool side_by_side = false;
bool stretch = false;
#endif
static cv::Ptr<cv::v4d::V4D> v4d = cv::v4d::V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Beauty Demo", OFFSCREEN);
static cv::Ptr<cv::face::Facemark> facemark = cv::face::createFacemarkLBF(); //Face landmark detection
cv::Ptr<cv::v4d::V4D> v4d;
cv::Ptr<cv::face::Facemark> facemark = cv::face::createFacemarkLBF(); //Face landmark detection
cv::detail::MultiBandBlender blender(false, 5); //Blender (used to put the different face parts back together)
#ifdef USE_TRACKER
static cv::Ptr<cv::Tracker> tracker = cv::TrackerKCF::create(); //Instead of continues face detection we can use a tracker
cv::Ptr<cv::Tracker> tracker = cv::TrackerKCF::create(); //Instead of continues face detection we can use a tracker
#endif
/*!
@ -141,6 +144,7 @@ struct FaceFeatures {
//based on the detected FaceFeatures guesses a decent face oval and draws a mask.
static void draw_face_oval_mask(const vector<FaceFeatures> &lm) {
using namespace cv::v4d::nvg;
clear();
for (size_t i = 0; i < lm.size(); i++) {
vector<vector<cv::Point2f>> features = lm[i].features();
cv::RotatedRect rotRect = cv::fitEllipse(features[0]);
@ -156,6 +160,7 @@ static void draw_face_oval_mask(const vector<FaceFeatures> &lm) {
//Draws a mask consisting of eyes and lips areas (deduced from FaceFeatures)
static void draw_face_eyes_and_lips_mask(const vector<FaceFeatures> &lm) {
using namespace cv::v4d::nvg;
clear();
for (size_t i = 0; i < lm.size(); i++) {
vector<vector<cv::Point2f>> features = lm[i].features();
for (size_t j = 5; j < 8; ++j) {
@ -239,10 +244,11 @@ static bool iteration() {
static bool trackerInitalized = false;
#endif
//Face detector
static cv::Ptr<cv::FaceDetectorYN> detector = cv::FaceDetectorYN::create("face_detection_yunet_2022mar.onnx", "", cv::Size(v4d->framebufferSize().width * SCALE, v4d->framebufferSize().height * SCALE), 0.9, 0.3, 5000, cv::dnn::DNN_BACKEND_OPENCV, cv::dnn::DNN_TARGET_OPENCL);
//Blender (used to put the different face parts back together)
static cv::detail::MultiBandBlender blender(false, 5);
blender.prepare(cv::Rect(0, 0, WIDTH, HEIGHT));
#ifndef __EMSCRIPTEN__
static cv::Ptr<cv::FaceDetectorYN> detector = cv::FaceDetectorYN::create("assets/face_detection_yunet_2023mar.onnx", "", cv::Size(v4d->framebufferSize().width * SCALE, v4d->framebufferSize().height * SCALE), 0.9, 0.3, 5000, cv::dnn::DNN_BACKEND_OPENCV, cv::dnn::DNN_TARGET_OPENCL);
#else
static cv::Ptr<cv::FaceDetectorYN> detector = cv::FaceDetectorYN::create("assets/face_detection_yunet_2023mar.onnx", "", cv::Size(v4d->framebufferSize().width * SCALE, v4d->framebufferSize().height * SCALE), 0.9, 0.3, 5000, cv::dnn::DNN_BACKEND_OPENCV, cv::dnn::DNN_TARGET_CPU);
#endif
//BGR
static cv::UMat input, down, blurred, contrast, faceOval, eyesAndLips, skin;
static cv::UMat frameOut(HEIGHT, WIDTH, CV_8UC3);
@ -286,7 +292,7 @@ static bool iteration() {
//Detect faces in the down-scaled image
detector->detect(down, faces);
//Collect face bounding rectangles thought we will only use the first
//Collect face bounding rectangles though we will only use the first
for (int i = 0; i < faces.rows; i++) {
faceRects.push_back(cv::Rect(int(faces.at<float>(i, 0)), int(faces.at<float>(i, 1)), int(faces.at<float>(i, 2)), int(faces.at<float>(i, 3))));
}
@ -307,7 +313,6 @@ static bool iteration() {
featuresList.push_back(FaceFeatures(faceRects[i], shapes[i], float(down.size().width) / WIDTH));
}
v4d->clear();
v4d->nvg([&]() {
//Draw the face oval of the first face
draw_face_oval_mask(featuresList);
@ -318,7 +323,6 @@ static bool iteration() {
cvtColor(frameBuffer, faceOval, cv::COLOR_BGRA2GRAY);
});
v4d->clear();
v4d->nvg([&]() {
//Draw eyes eyes and lips areas of the first face
draw_face_eyes_and_lips_mask(featuresList);
@ -345,6 +349,8 @@ static bool iteration() {
//boost skin saturation
adjust_saturation(blurred,skin, skin_saturation);
blender.prepare(cv::Rect(0, 0, WIDTH, HEIGHT));
//piece it all together
blender.feed(skin, faceSkinMaskGrey, cv::Point(0, 0));
blender.feed(input, backgroundMaskGrey, cv::Point(0, 0));
@ -379,8 +385,6 @@ static bool iteration() {
});
}
v4d->showFps();
#ifndef __EMSCRIPTEN__
v4d->write();
#endif
@ -404,8 +408,8 @@ int main(int argc, char **argv) {
int main() {
#endif
using namespace cv::v4d;
facemark->loadModel("lbfmodel.yaml");
v4d = V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Beauty Demo", OFFSCREEN);
facemark->loadModel("assets/lbfmodel.yaml");
v4d->setFrameBufferScaling(stretch);
@ -420,6 +424,9 @@ int main() {
v4d->setSource(src);
Sink sink = makeWriterSink(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), src.fps(), cv::Size(WIDTH, HEIGHT));
v4d->setSink(sink);
#else
Source src = makeCaptureSource(WIDTH, HEIGHT, v4d);
v4d->setSource(src);
#endif
v4d->run(iteration);

@ -27,7 +27,7 @@ unsigned int shader_program;
unsigned int vao;
unsigned int uniform_transform;
static cv::Ptr<cv::v4d::V4D> v4d = cv::v4d::V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Cube Demo", OFFSCREEN);
cv::Ptr<cv::v4d::V4D> v4d;
static GLuint load_shader() {
#ifndef OPENCV_V4D_USE_ES3
@ -205,6 +205,9 @@ static void glow_effect(const cv::UMat& src, cv::UMat& dst, const int ksize) {
static bool iteration() {
using namespace cv::v4d;
// if(!v4d->capture())
// return false;
//Render using OpenGL
v4d->gl(render_scene);
@ -217,15 +220,10 @@ static bool iteration() {
});
#endif
v4d->showFps();
v4d->write();
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if (!v4d->display())
return false;
return true;
return v4d->display();
}
#ifndef __EMSCRIPTEN__
@ -234,13 +232,16 @@ int main(int argc, char** argv) {
int main() {
#endif
using namespace cv::v4d;
v4d = cv::v4d::V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Cube Demo", OFFSCREEN);
v4d->printSystemInfo();
#ifndef __EMSCRIPTEN__
Sink sink = makeWriterSink(OUTPUT_FILENAME, cv::VideoWriter::fourcc('V', 'P', '9', '0'), FPS,
cv::Size(WIDTH, HEIGHT));
v4d->setSink(sink);
//#else
// Source src = makeCaptureSource(WIDTH, HEIGHT, v4d);
// v4d->setSource(src);
#endif
v4d->gl(init_scene);
v4d->run(iteration);

@ -6,9 +6,9 @@
using namespace cv;
using namespace cv::v4d;
static Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Custom Source/Sink");
int main() {
Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Custom Source/Sink");
string hr = "Hello Rainbow!";
//Make a Source that generates rainbow frames.
Source src([](cv::UMat& frame){
@ -56,8 +56,6 @@ int main() {
text(sz.width / 2.0, sz.height / 2.0, hr.c_str(), hr.c_str() + hr.size());
});
window->showFps();
window->write(); //Write video to the Sink
return window->display(); //Display the framebuffer in the native window
});

@ -4,10 +4,10 @@
using namespace cv;
using namespace cv::v4d;
//Creates a V4D window for on screen rendering
static Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Show image");
int main() {
//Creates a V4D window for on screen rendering
Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Display image");
//An image
#ifdef __EMSCRIPTEN__
Mat image = read_embedded_image("doc/lena.png");
@ -21,7 +21,6 @@ int main() {
window->run([=](){
//Feeds the image to the video pipeline
window->feed(image);
window->showFps();
return window->display();
});
}

@ -4,10 +4,10 @@
using namespace cv;
using namespace cv::v4d;
//Creates a V4D object for on screen rendering
static Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Show image");
int main() {
//Creates a V4D object for on screen rendering
Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Display image and FB");
//Read an image as UMat
#ifdef __EMSCRIPTEN__
UMat image = read_embedded_image("doc/lena.png").getUMat(ACCESS_READ);
@ -21,8 +21,9 @@ int main() {
//Display the framebuffer in the native window in an endless loop
window->run([=](){
window->copyFrom(converted);
window->showFps();
window->fb([&](UMat& framebuffer){
converted.copyTo(framebuffer);
});
return window->display();
});
}

@ -4,7 +4,7 @@
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no">
<title>Beautifcation Demo</title>
<title>Beautification Demo</title>
<style>
body {
font-family: arial;
@ -110,7 +110,7 @@
</span>
<canvas id="offscreenCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<video id="v4dVideoElement" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div>
@ -128,7 +128,7 @@
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var fsButton = document.querySelector("#fullscreenBtn");
var cameraBtn = document.querySelector("captureBtn");
var captureBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#v4dVideoElement");
var cameraCanvas = document.querySelector("#cameraCanvas");
@ -142,9 +142,7 @@
var Module = {
onRuntimeInitialized: function() {
fixCanvasSize();
Module.videoBuffer = Module._malloc(1280 * 720 * 4);
Module.cameraCtx = null;
Module.ccall('v4dSetVideoFramePointer', 'void', ['number', 'number', 'number'], [Module.videoBuffer, 1280, 720]);
Module._v4dInitCapture(1280, 720);
},
preRun: [],
postRun: [],
@ -207,25 +205,16 @@
};
Module.setStatus('Downloading...');
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
var playing = false;
var timeupdate = false;
function checkReady() {
if (playing && timeupdate) {
Module.doCapture = true;
globalThis.doCapture = true;
}
}
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
captureBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: { width: 1280, height: 720 } , audio: false });
videoElement.addEventListener(
"playing",
() => {
@ -246,6 +235,15 @@
videoElement.srcObject = stream;
});
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)
});

@ -78,14 +78,6 @@
width: 300px;
}
/* #controls {
display: inline-block;
float: right;
vertical-align: top;
margin-top: 30px;
margin-right: 20px;
}*/
#output {
width: 100%;
height: 200px;
@ -103,30 +95,29 @@
}
</style>
</head>
<body>
<body>
<span id='controls'>
<span><button id="fullscreenBtn">Fullscreen</button>
<span>
<button id="fullscreenBtn">Fullscreen</button>
</span>
</span>
</span>
<canvas id="offscreenCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<div class="emscripten_border">
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div>
<script type="text/javascript" src="/virtual-webgl2.js"></script>
<script type="text/javascript" src="virtual-webgl2.js"></script>
<!-- <script type="text/javascript" src="/webgl-lint.js"></script> -->
<!-- <script type="text/javascript" src="webgl-lint.js"></script> -->
<script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var fsButton = document.querySelector("#fullscreenBtn");
function fixCanvasSize() {
Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
@ -138,6 +129,7 @@
var Module = {
onRuntimeInitialized: function() {
fixCanvasSize();
Module._v4dInitCapture(1280, 720);
},
preRun: [],
postRun: [],
@ -200,6 +192,14 @@
};
Module.setStatus('Downloading...');
var playing = false;
var timeupdate = false;
function checkReady() {
if (playing && timeupdate) {
globalThis.doCapture = true;
}
}
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
@ -209,28 +209,6 @@
};
};
let fsButton = document.querySelector("#fullscreenBtn");
let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas");
function capture() {
let ctx = cameraCanvas.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement, 0, 0, cameraCanvas.width, cameraCanvas.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas.width, cameraCanvas.height);
let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+');
if(imageData) {
FS.write(stream, imageData.data, 0, imageData.data.length, 0);
FS.close(stream);
}
}
function runCapture() {
capture();
requestAnimationFrame(runCapture);
}
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)
});
@ -244,8 +222,7 @@
}
});
</script>
<script async type="text/javascript" src="/example_v4d_cube-demo/get.php?res=example_v4d_cube-demo.js"></script>
</body>
<script type="text/javascript" src="example_v4d_cube-demo.js" defer></script>
<script type="text/javascript" src="/example_v4d_cube-demo/get.php?res=example_v4d_cube-demo.js" defer></script>
</body>
</html>

@ -212,26 +212,6 @@
};
let fsButton = document.querySelector("#fullscreenBtn");
let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas");
function capture() {
let ctx = cameraCanvas.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement, 0, 0, cameraCanvas.width, cameraCanvas.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas.width, cameraCanvas.height);
let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+');
if(imageData) {
FS.write(stream, imageData.data, 0, imageData.data.length, 0);
FS.close(stream);
}
}
function runCapture() {
capture();
requestAnimationFrame(runCapture);
}
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)

@ -105,20 +105,13 @@
</head>
<body>
<span id='controls'>
<span><button id="fullscreenBtn">Fullscreen</button>
<span><button id="fullscreenBtn">Fullscreen</button></span>
</span>
</span>
<canvas id="offscreenCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<div class="emscripten_border">
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div>
@ -141,7 +134,6 @@
},
preRun: [],
postRun: function() {
},
print: (function() {
var element = document.getElementById('output');
@ -212,26 +204,6 @@
};
let fsButton = document.querySelector("#fullscreenBtn");
let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas");
function capture() {
let ctx = cameraCanvas.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement, 0, 0, cameraCanvas.width, cameraCanvas.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas.width, cameraCanvas.height);
let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+');
if(imageData) {
FS.write(stream, imageData.data, 0, imageData.data.length, 0);
FS.close(stream);
}
}
function runCapture() {
capture();
requestAnimationFrame(runCapture);
}
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)

@ -212,26 +212,6 @@
};
let fsButton1 = document.querySelector("#fullscreenBtn");
let videoElement1 = document.querySelector("#video");
let cameraCanvas1 = document.querySelector("#cameraCanvas");
function capture() {
let ctx = cameraCanvas1.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement1, 0, 0, cameraCanvas1.width, cameraCanvas1.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas1.width, cameraCanvas1.height);
let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+');
if(imageData) {
FS.write(stream, imageData.data, 0, imageData.data.length, 0);
FS.close(stream);
}
}
function runCapture() {
capture();
requestAnimationFrame(runCapture);
}
fsButton1.addEventListener('click', async function () {
Module.requestFullscreen(false, false)

@ -210,26 +210,6 @@
};
let fsButton = document.querySelector("#fullscreenBtn");
let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas");
function capture() {
let ctx = cameraCanvas.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement, 0, 0, cameraCanvas.width, cameraCanvas.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas.width, cameraCanvas.height);
let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+');
if(imageData) {
FS.write(stream, imageData.data, 0, imageData.data.length, 0);
FS.close(stream);
}
}
function runCapture() {
capture();
requestAnimationFrame(runCapture);
}
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)

@ -212,26 +212,6 @@
};
let fsButton = document.querySelector("#fullscreenBtn");
let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas");
function capture() {
let ctx = cameraCanvas.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement, 0, 0, cameraCanvas.width, cameraCanvas.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas.width, cameraCanvas.height);
let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+');
if(imageData) {
FS.write(stream, imageData.data, 0, imageData.data.length, 0);
FS.close(stream);
}
}
function runCapture() {
capture();
requestAnimationFrame(runCapture);
}
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)

@ -213,26 +213,6 @@
};
let fsButton = document.querySelector("#fullscreenBtn");
let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas");
function capture() {
let ctx = cameraCanvas.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement, 0, 0, cameraCanvas.width, cameraCanvas.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas.width, cameraCanvas.height);
let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+');
if(imageData) {
FS.write(stream, imageData.data, 0, imageData.data.length, 0);
FS.close(stream);
}
}
function runCapture() {
capture();
requestAnimationFrame(runCapture);
}
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)

@ -4,7 +4,7 @@
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no">
<title>NanoVG Demo</title>
<title>Nanovg Demo</title>
<style>
body {
font-family: arial;
@ -110,7 +110,7 @@
</span>
<canvas id="offscreenCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<video id="v4dVideoElement" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div>
@ -128,8 +128,8 @@
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var fsButton = document.querySelector("#fullscreenBtn");
var cameraBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#video");
var captureBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#v4dVideoElement");
var cameraCanvas = document.querySelector("#cameraCanvas");
function fixCanvasSize() {
@ -142,9 +142,7 @@
var Module = {
onRuntimeInitialized: function() {
fixCanvasSize();
Module.videoBuffer = Module._malloc(1280 * 720 * 4);
Module.cameraCtx = null;
Module.ccall('v4dSetVideoFramePointer', 'void', ['number', 'number', 'number'], [Module.videoBuffer, 1280, 720]);
Module._v4dInitCapture(1280, 720);
},
preRun: [],
postRun: [],
@ -207,25 +205,16 @@
};
Module.setStatus('Downloading...');
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
var playing = false;
var timeupdate = false;
function checkReady() {
if (playing && timeupdate) {
Module.doCapture = true;
globalThis.doCapture = true;
}
}
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
captureBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: { width: 1280, height: 720 } , audio: false });
videoElement.addEventListener(
"playing",
() => {
@ -246,6 +235,15 @@
videoElement.srcObject = stream;
});
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)
});

@ -110,7 +110,7 @@
</span>
<canvas id="offscreenCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<video id="v4dVideoElement" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div>
@ -128,8 +128,8 @@
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var fsButton = document.querySelector("#fullscreenBtn");
var cameraBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#video");
var captureBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#v4dVideoElement");
var cameraCanvas = document.querySelector("#cameraCanvas");
function fixCanvasSize() {
@ -142,8 +142,7 @@
var Module = {
onRuntimeInitialized: function() {
fixCanvasSize();
Module.videoBuffer = Module._malloc(1280 * 720 * 4);
Module.ccall('v4dSetVideoFramePointer', 'void', ['number', 'number', 'number'], [Module.videoBuffer, 1280, 720]);
Module._v4dInitCapture(1280, 720);
},
preRun: [],
postRun: [],
@ -206,25 +205,16 @@
};
Module.setStatus('Downloading...');
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
var playing = false;
var timeupdate = false;
function checkReady() {
if (playing && timeupdate) {
Module.doCapture = true;
globalThis.doCapture = true;
}
}
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
captureBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: { width: 1280, height: 720 } , audio: false });
videoElement.addEventListener(
"playing",
() => {
@ -243,9 +233,17 @@
true
);
videoElement.srcObject = stream;
// runCapture();
});
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)
});

@ -4,7 +4,7 @@
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no">
<title>Pedestrian Detection Demo</title>
<title>Pedestrian Tracking Demo</title>
<style>
body {
font-family: arial;
@ -110,7 +110,7 @@
</span>
<canvas id="offscreenCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<video id="v4dVideoElement" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div>
@ -128,8 +128,8 @@
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var fsButton = document.querySelector("#fullscreenBtn");
var cameraBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#video");
var captureBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#v4dVideoElement");
var cameraCanvas = document.querySelector("#cameraCanvas");
function fixCanvasSize() {
@ -142,9 +142,7 @@
var Module = {
onRuntimeInitialized: function() {
fixCanvasSize();
Module.videoBuffer = Module._malloc(1280 * 720 * 4);
Module.cameraCtx = null;
Module.ccall('v4dSetVideoFramePointer', 'void', ['number', 'number', 'number'], [Module.videoBuffer, 1280, 720]);
Module._v4dInitCapture(1280, 720);
},
preRun: [],
postRun: [],
@ -207,25 +205,16 @@
};
Module.setStatus('Downloading...');
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
var playing = false;
var timeupdate = false;
function checkReady() {
if (playing && timeupdate) {
Module.doCapture = true;
globalThis.doCapture = true;
}
}
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
captureBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: { width: 1280, height: 720 } , audio: false });
videoElement.addEventListener(
"playing",
() => {
@ -246,6 +235,15 @@
videoElement.srcObject = stream;
});
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)
});

@ -109,8 +109,6 @@
</span>
</span>
<canvas id="offscreenCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div>
@ -138,6 +136,7 @@
var Module = {
onRuntimeInitialized: function() {
fixCanvasSize();
Module._v4dInitCapture(1280, 720);
},
preRun: [],
postRun: function() {
@ -211,26 +210,6 @@
};
let fsButton = document.querySelector("#fullscreenBtn");
let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas");
function capture() {
let ctx = cameraCanvas.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement, 0, 0, cameraCanvas.width, cameraCanvas.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas.width, cameraCanvas.height);
let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+');
if(imageData) {
FS.write(stream, imageData.data, 0, imageData.data.length, 0);
FS.close(stream);
}
}
function runCapture() {
capture();
requestAnimationFrame(runCapture);
}
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)

@ -110,7 +110,7 @@
</span>
<canvas id="offscreenCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<video id="v4dVideoElement" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div>
@ -128,8 +128,8 @@
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var fsButton = document.querySelector("#fullscreenBtn");
var cameraBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#video");
var captureBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#v4dVideoElement");
var cameraCanvas = document.querySelector("#cameraCanvas");
function fixCanvasSize() {
@ -142,9 +142,7 @@
var Module = {
onRuntimeInitialized: function() {
fixCanvasSize();
Module.videoBuffer = Module._malloc(1280 * 720 * 4);
Module.cameraCtx = null;
Module.ccall('v4dSetVideoFramePointer', 'void', ['number', 'number', 'number'], [Module.videoBuffer, 1280, 720]);
Module._v4dInitCapture(1280, 720);
},
preRun: [],
postRun: [],
@ -207,25 +205,16 @@
};
Module.setStatus('Downloading...');
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
var playing = false;
var timeupdate = false;
function checkReady() {
if (playing && timeupdate) {
Module.doCapture = true;
globalThis.doCapture = true;
}
}
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
captureBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: { width: 1280, height: 720 } , audio: false });
videoElement.addEventListener(
"playing",
() => {
@ -246,6 +235,15 @@
videoElement.srcObject = stream;
});
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)
});

@ -138,6 +138,7 @@
var Module = {
onRuntimeInitialized: function() {
fixCanvasSize();
Module._v4dInitCapture(1280, 720);
},
preRun: [],
postRun: function() {
@ -212,26 +213,6 @@
};
let fsButton = document.querySelector("#fullscreenBtn");
let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas");
function capture() {
let ctx = cameraCanvas.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement, 0, 0, cameraCanvas.width, cameraCanvas.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas.width, cameraCanvas.height);
let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+');
if(imageData) {
FS.write(stream, imageData.data, 0, imageData.data.length, 0);
FS.close(stream);
}
}
function runCapture() {
capture();
requestAnimationFrame(runCapture);
}
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)

@ -124,6 +124,8 @@
</div>
<script type="text/javascript" src="/virtual-webgl2.js"></script>
<script type="text/javascript" src="virtual-webgl2.js"></script>
<script type="text/javascript" src="/webgl-lint.js"></script>
<script type="text/javascript" src="webgl-lint.js"></script>
<script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
@ -138,6 +140,7 @@
var Module = {
onRuntimeInitialized: function() {
fixCanvasSize();
Module._v4dInitCapture(1280, 720);
},
preRun: [],
postRun: function() {
@ -211,25 +214,6 @@
};
let fsButton1 = document.querySelector("#fullscreenBtn");
let videoElement1 = document.querySelector("#video");
let cameraCanvas1 = document.querySelector("#cameraCanvas");
function capture() {
let ctx = cameraCanvas1.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement1, 0, 0, cameraCanvas1.width, cameraCanvas1.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas1.width, cameraCanvas1.height);
let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+');
if(imageData) {
FS.write(stream, imageData.data, 0, imageData.data.length, 0);
FS.close(stream);
}
}
function runCapture() {
capture();
requestAnimationFrame(runCapture);
}
fsButton1.addEventListener('click', async function () {
Module.requestFullscreen(false, false)

@ -214,7 +214,7 @@
}
captureBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
let stream = await navigator.mediaDevices.getUserMedia({ video: { width: 1280, height: 720 } , audio: false });
videoElement.addEventListener(
"playing",
() => {

@ -4,7 +4,7 @@
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no">
<title>Video editing example</title>
<title>Video Editing Example</title>
<style>
body {
font-family: arial;
@ -110,7 +110,7 @@
</span>
<canvas id="offscreenCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<video id="v4dVideoElement" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div>
@ -128,8 +128,8 @@
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var fsButton = document.querySelector("#fullscreenBtn");
var cameraBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#video");
var captureBtn = document.querySelector("#captureBtn");
var videoElement = document.querySelector("#v4dVideoElement");
var cameraCanvas = document.querySelector("#cameraCanvas");
function fixCanvasSize() {
@ -142,9 +142,7 @@
var Module = {
onRuntimeInitialized: function() {
fixCanvasSize();
Module.videoBuffer = Module._malloc(1280 * 720 * 4);
Module.cameraCtx = null;
Module.ccall('v4dSetVideoFramePointer', 'void', ['number', 'number', 'number'], [Module.videoBuffer, 1280, 720]);
Module._v4dInitCapture(1280, 720);
},
preRun: [],
postRun: [],
@ -207,25 +205,16 @@
};
Module.setStatus('Downloading...');
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
var playing = false;
var timeupdate = false;
function checkReady() {
if (playing && timeupdate) {
Module.doCapture = true;
globalThis.doCapture = true;
}
}
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
captureBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: { width: 1280, height: 720 } , audio: false });
videoElement.addEventListener(
"playing",
() => {
@ -246,6 +235,15 @@
videoElement.srcObject = stream;
});
window.onerror = function(event) {
// TODO: do not warn on ok events like simulating an infinite loop or exitStatus
Module.setStatus('Exception thrown, see JavaScript console');
//spinnerElement.style.display = 'none';
Module.setStatus = function(text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false)
});

@ -45,10 +45,10 @@ using std::string;
using std::vector;
using std::istringstream;
static cv::Ptr<cv::v4d::V4D> v4d = cv::v4d::V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Font Demo", OFFSCREEN);
cv::Ptr<cv::v4d::V4D> v4d;
vector<string> lines;
static bool update_stars = true;
static bool update_perspective = true;
bool update_stars = true;
bool update_perspective = true;
static void setup_gui(cv::Ptr<cv::v4d::V4D> v4dMain) {
v4dMain->nanogui([&](cv::v4d::FormHelper& form){
@ -69,23 +69,23 @@ static void setup_gui(cv::Ptr<cv::v4d::V4D> v4dMain) {
form.makeFormVariable("Alpha", text_alpha, 0.0f, 1.0f, true, "", "The opacity of the text");
form.makeGroup("Stars");
form.makeFormVariable("Min Star Size", min_star_size, 0.5f, 1.0f, true, "px", "Generate stars with this minimum size")->set_callback([](const float &s) {
form.makeFormVariable("Min Star Size", min_star_size, 0.5f, 1.0f, true, "px", "Generate stars with this minimum size")->set_callback([&](const float &s) {
update_stars = true;
min_star_size = s;
});
form.makeFormVariable("Max Star Size", max_star_size, 1.0f, 10.0f, true, "px", "Generate stars with this maximum size")->set_callback([](const float &s) {
form.makeFormVariable("Max Star Size", max_star_size, 1.0f, 10.0f, true, "px", "Generate stars with this maximum size")->set_callback([&](const float &s) {
update_stars = true;
max_star_size = s;
});
form.makeFormVariable("Min Star Count", min_star_count, 1, 1000, true, "", "Generate this minimum of stars")->set_callback([](const int &cnt) {
form.makeFormVariable("Min Star Count", min_star_count, 1, 1000, true, "", "Generate this minimum of stars")->set_callback([&](const int &cnt) {
update_stars = true;
min_star_count = cnt;
});
form.makeFormVariable("Max Star Count", max_star_count, 1000, 5000, true, "", "Generate this maximum of stars")->set_callback([](const int &cnt) {
form.makeFormVariable("Max Star Count", max_star_count, 1000, 5000, true, "", "Generate this maximum of stars")->set_callback([&](const int &cnt) {
update_stars = true;
max_star_count = cnt;
});
form.makeFormVariable("Min Star Alpha", star_alpha, 0.2f, 1.0f, true, "", "Minimum opacity of stars")->set_callback([](const float &a) {
form.makeFormVariable("Min Star Alpha", star_alpha, 0.2f, 1.0f, true, "", "Minimum opacity of stars")->set_callback([&](const float &a) {
update_stars = true;
star_alpha = a;
});
@ -123,9 +123,10 @@ static bool iteration() {
int32_t translateY = HEIGHT - cnt;
if(update_stars) {
v4d->clear();
v4d->nvg([&](const cv::Size& sz) {
using namespace cv::v4d::nvg;
clear();
//draw stars
int numStars = rng.uniform(min_star_count, max_star_count);
for(int i = 0; i < numStars; ++i) {
@ -137,6 +138,7 @@ static bool iteration() {
stroke();
}
});
v4d->fb([&](cv::UMat& frameBuffer){
frameBuffer.copyTo(stars);
});
@ -154,10 +156,9 @@ static bool iteration() {
update_perspective = false;
}
v4d->clear();
v4d->nvg([&](const cv::Size& sz) {
using namespace cv::v4d::nvg;
clear();
fontSize(font_size);
fontFace("sans-bold");
fillColor(cv::Scalar(text_color.b() * 255.0f, text_color.g() * 255.0f, text_color.r() * 255.0f, text_alpha * 255.0f));
@ -174,12 +175,14 @@ static bool iteration() {
}
});
v4d->fb([&](cv::UMat& frameBuffer){
v4d->fb([&](cv::UMat& framebuffer){
//Pseudo 3D text effect.
cv::warpPerspective(frameBuffer, warped, tm, frameBuffer.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar());
cv::resize(stars,stars,warped.size());
// cerr << "fb:" << cv::v4d::detail::cnz(framebuffer) << endl;
cv::warpPerspective(framebuffer, warped, tm, framebuffer.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar());
//Combine layers
cv::add(stars, warped, frameBuffer);
// cerr << "stars:" << cv::v4d::detail::cnz(stars) << endl;
// cerr << "warped:" << cv::v4d::detail::cnz(warped) << endl;
cv::add(stars, warped, framebuffer);
});
if(-translateY > textHeight) {
@ -187,11 +190,7 @@ static bool iteration() {
cnt = 0;
}
v4d->showFps();
#ifndef __EMSCRIPTEN__
v4d->write();
#endif
++cnt;
//Wrap the cnt around if it becomes to big.
@ -205,7 +204,7 @@ static bool iteration() {
int main() {
try {
using namespace cv::v4d;
v4d = V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Font Demo", OFFSCREEN);
if(!OFFSCREEN) {
setup_gui(v4d);
}

@ -3,25 +3,24 @@
using namespace cv;
using namespace cv::v4d;
static Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Font Rendering");
int main() {
Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Font Rendering");
//The text to render
string hw = "Hello World";
//Display the framebuffer in the native window in an endless loop
window->run([=](){
window->clear();
//Render the text at the center of the screen
window->nvg([&](const Size& sz) {
using namespace cv::v4d::nvg;
clear();
fontSize(40.0f);
fontFace("sans-bold");
fillColor(Scalar(255, 0, 0, 255));
textAlign(NVG_ALIGN_CENTER | NVG_ALIGN_TOP);
text(sz.width / 2.0, sz.height / 2.0, hw.c_str(), hw.c_str() + hw.size());
});
window->showFps();
return window->display();
});

@ -3,9 +3,9 @@
using namespace cv;
using namespace cv::v4d;
static Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Font Rendering with GUI");
int main() {
Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Font Rendering with GUI");
//The text color. NanoGUI uses rgba with floating point
nanogui::Color textColor = {0.0f, 0.0f, 1.0f, 1.0f};
//The font size
@ -25,10 +25,10 @@ int main() {
});
window->run([&]() {
window->clear();
//Render the text at the center of the screen
window->nvg([&](const Size& sz) {
using namespace cv::v4d::nvg;
clear();
fontSize(size);
fontFace("sans-bold");
fillColor(Scalar(textColor.b() * 255, textColor.g() * 255, textColor.r() * 255, 255));
@ -36,7 +36,6 @@ int main() {
text(sz.width / 2.0, sz.height / 2.0, hw.c_str(), hw.c_str() + hw.size());
});
window->showFps();
//Display the framebuffer in the native window
return window->display();
});

@ -15,7 +15,7 @@ constexpr const char *OUTPUT_FILENAME = "nanovg-demo.mkv";
using std::cerr;
using std::endl;
static cv::Ptr<cv::v4d::V4D> v4d = cv::v4d::V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "NanoVG Demo", OFFSCREEN);
cv::Ptr<cv::v4d::V4D> v4d;
static void draw_color_wheel(float x, float y, float w, float h, float hue) {
//color wheel drawing code taken from https://github.com/memononen/nanovg/blob/master/example/demo.c
@ -162,8 +162,6 @@ static bool iteration() {
draw_color_wheel(sz.width - 300, sz.height - 300, 250.0f, 250.0f, hue);
});
v4d->showFps();
v4d->write();
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
@ -180,7 +178,7 @@ int main(int argc, char **argv) {
int main() {
#endif
using namespace cv::v4d;
v4d = V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "NanoVG Demo", OFFSCREEN);
v4d->printSystemInfo();
#ifndef __EMSCRIPTEN__

@ -47,9 +47,9 @@ constexpr const char* OUTPUT_FILENAME = "optflow-demo.mkv";
#endif
constexpr bool OFFSCREEN = false;
static cv::Ptr<cv::v4d::V4D> v4d = cv::v4d::V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Sparse Optical Flow Demo", OFFSCREEN);
cv::Ptr<cv::v4d::V4D> v4d;
#ifndef __EMSCRIPTEN__
static cv::Ptr<cv::v4d::V4D> v4d2 = cv::v4d::V4D::make(cv::Size(240, 360), cv::Size(), "Display Settings", OFFSCREEN);
cv::Ptr<cv::v4d::V4D> v4d2;
#endif
/** Visualization parameters **/
@ -423,8 +423,8 @@ static bool iteration() {
//Detect trackable points in the motion mask
detect_points(downMotionMaskGrey, detectedPoints);
v4d->clear();
v4d->nvg([=]() {
cv::v4d::nvg::clear();
if (!downPrevGrey.empty()) {
//We don't want the algorithm to get out of hand when there is a scene change, so we suppress it when we detect one.
if (!detect_scene_change(downMotionMaskGrey, scene_change_thresh, scene_change_thresh_diff)) {
@ -446,8 +446,6 @@ static bool iteration() {
#endif
});
v4d->showFps();
#ifndef __EMSCRIPTEN__
v4d->write();
@ -471,6 +469,10 @@ int main() {
#endif
try {
using namespace cv::v4d;
v4d = V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Sparse Optical Flow Demo", OFFSCREEN);
#ifndef __EMSCRIPTEN__
v4d2 = V4D::make(cv::Size(240, 360), cv::Size(), "Display Settings", OFFSCREEN);
#endif
v4d->printSystemInfo();

@ -29,8 +29,8 @@ using std::endl;
using std::vector;
using std::string;
static cv::Ptr<cv::v4d::V4D> v4d = cv::v4d::V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Pedestrian Demo", OFFSCREEN);
static cv::HOGDescriptor hog;
cv::Ptr<cv::v4d::V4D> v4d;
cv::HOGDescriptor hog;
//adapted from cv::dnn_objdetect::InferBbox
static inline bool pair_comparator(std::pair<double, size_t> l1, std::pair<double, size_t> l2) {
@ -176,10 +176,9 @@ static bool iteration() {
}
}
v4d->clear();
v4d->nvg([&](const cv::Size& sz) {
using namespace cv::v4d::nvg;
clear();
beginPath();
strokeWidth(std::fmax(2.0, sz.width / 960.0));
strokeColor(cv::v4d::colorConvert(cv::Scalar(0, 127, 255, 200), cv::COLOR_HLS2BGR));
@ -196,8 +195,6 @@ static bool iteration() {
composite_layers(background, frameBuffer, frameBuffer, BLUR_KERNEL_SIZE);
});
v4d->showFps();
v4d->write();
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
@ -214,6 +211,7 @@ int main(int argc, char **argv) {
int main() {
#endif
using namespace cv::v4d;
v4d = V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Pedestrian Demo", OFFSCREEN);
hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());
v4d->printSystemInfo();

@ -4,9 +4,9 @@
using namespace cv;
using namespace cv::v4d;
static Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "GL Blue Screen");
int main() {
Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "GL Blue Screen");
window->gl([](){
//Sets the clear color to blue
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
@ -17,8 +17,6 @@ int main() {
glClear(GL_COLOR_BUFFER_BIT);
});
window->showFps();
//If onscreen rendering is enabled it displays the framebuffer in the native window.
//Returns false if the window was closed.
return window->display();

@ -18,7 +18,7 @@ constexpr double FPS = 60;
constexpr const char* OUTPUT_FILENAME = "shader-demo.mkv";
#endif
static cv::Ptr<cv::v4d::V4D> v4d = cv::v4d::V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Shader Demo", OFFSCREEN);
cv::Ptr<cv::v4d::V4D> v4d;
int glow_kernel_size = std::max(int(DIAG / 200 % 2 == 0 ? DIAG / 200 + 1 : DIAG / 200), 1);
@ -327,8 +327,6 @@ static bool iteration() {
});
#endif
v4d->showFps();
#ifndef __EMSCRIPTEN__
v4d->write();
#endif
@ -348,6 +346,7 @@ int main() {
#endif
try {
using namespace cv::v4d;
v4d = V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Shader Demo", OFFSCREEN);
if (!OFFSCREEN) {
setup_gui(v4d);

@ -3,15 +3,16 @@
using namespace cv;
using namespace cv::v4d;
static Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Vector Graphics");
int main() {
Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Vector Graphics");
//Display the framebuffer in the native window in an endless loop
window->run([=]() {
//Creates a NanoVG context and draws eyes
window->nvg([](const Size& sz) {
//Calls from this namespace may only be used inside a nvg context
using namespace cv::v4d::nvg;
clear();
float t = cv::getTickCount();
float x = 0;
float y = 0;
@ -88,8 +89,6 @@ int main() {
fill();
});
window->showFps();
return window->display();
});
}

@ -1,18 +1,19 @@
#include <opencv2/v4d/v4d.hpp>
#include <opencv2/v4d/util.hpp>
using namespace cv;
using namespace cv::v4d;
static Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Vector Graphics and Framebuffer");
int main() {
Ptr<V4D> window = V4D::make(Size(1280, 720), cv::Size(), "Vector Graphics and Framebuffer");
//Display the framebuffer in the native window in an endless loop
window->run([=]() {
//Creates a NanoVG context and draws eyes
window->nvg([](const Size& sz) {
//Calls from this namespace may only be used inside a nvg context
using namespace cv::v4d::nvg;
clear();
float t = cv::getTickCount();
float x = 0;
float y = 0;
@ -90,10 +91,10 @@ int main() {
});
window->fb([](UMat& framebuffer) {
//Heavily blurs the crosshair using a cheap boxFilter
// cerr << "COUNT1:" << cv::v4d::detail::cnz(framebuffer) << endl;
//Heavily blurs the eyes using a cheap boxFilter
boxFilter(framebuffer, framebuffer, -1, Size(15, 15), Point(-1,-1), true, BORDER_REPLICATE);
});
window->showFps();
return window->display();
});

@ -25,8 +25,7 @@ unsigned int shader_program;
unsigned int vao;
unsigned int uniform_transform;
static cv::Ptr<cv::v4d::V4D> v4d = cv::v4d::V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(),
"Video Demo", OFFSCREEN, true);
cv::Ptr<cv::v4d::V4D> v4d;
static GLuint load_shader() {
#ifndef OPENCV_V4D_USE_ES3
@ -204,8 +203,6 @@ static bool iteration() {
});
#endif
v4d->showFps();
v4d->write();
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
@ -222,7 +219,7 @@ int main(int argc, char** argv) {
int main() {
#endif
using namespace cv::v4d;
v4d = cv::v4d::V4D::make(cv::Size(WIDTH, HEIGHT), cv::Size(), "Video Demo", OFFSCREEN);
v4d->printSystemInfo();
#ifndef __EMSCRIPTEN__

@ -3,9 +3,9 @@
using namespace cv;
using namespace cv::v4d;
static Ptr<V4D> window = V4D::make(cv::Size(1280, 720), cv::Size(), "Video Editing");
int main(int argc, char** argv) {
Ptr<V4D> window = V4D::make(cv::Size(1280, 720), cv::Size(), "Video Editing");
try {
//In case of emscripten
CV_UNUSED(argc);
@ -45,7 +45,6 @@ int main(int argc, char** argv) {
text(sz.width / 2.0, sz.height / 2.0, hv.c_str(), hv.c_str() + hv.size());
});
window->showFps();
window->write(); //Write video to the Sink
return window->display(); //Display the framebuffer in the native window

@ -14,31 +14,21 @@ CLVAContext::CLVAContext(V4D& v4d, FrameBufferContext& mainFbContext) :
mainFbContext_(mainFbContext), clvaFbContext_(v4d, "CLVA", mainFbContext) {
}
cv::Size CLVAContext::getVideoFrameSize() {
assert(inputVideoFrameSize_ == cv::Size(0, 0) || "Video frame size not initialized");
return inputVideoFrameSize_;
}
bool CLVAContext::capture(std::function<void(cv::UMat&)> fn, cv::UMat& output) {
cv::Size fbSize = fbCtx().size();
if (!context_.empty()) {
{
#ifndef __EMSCRIPTEN__
CLExecScope_t scope(context_);
#endif
fn(readFrame_);
}
if (readFrame_.empty())
return false;
inputVideoFrameSize_ = readFrame_.size();
resizePreserveAspectRatio(readFrame_, output, fbCtx().size());
} else {
fn(readFrame_);
}
if (readFrame_.empty())
return false;
inputVideoFrameSize_ = readFrame_.size();
resizePreserveAspectRatio(readFrame_, output, fbCtx().size());
}
resizePreserveAspectRatio(readFrame_, readRGBBuffer_, fbCtx().size());
cv::cvtColor(readRGBBuffer_, output, cv::COLOR_RGB2BGRA);
return true;
}

@ -22,11 +22,9 @@ class CLVAContext {
FrameBufferContext& mainFbContext_;
FrameBufferContext clvaFbContext_;
cv::UMat readFrame_;
cv::UMat writeFrame_;
cv::UMat readRGBBuffer_;
cv::UMat writeRGBBuffer_;
bool hasContext_ = false;
cv::Size inputVideoFrameSize_;
CLExecContext_t getCLExecContext();
FrameBufferContext& fbCtx();
public:
@ -35,11 +33,6 @@ public:
* @param fbContext The corresponding framebuffer context
*/
CLVAContext(V4D& v4d, FrameBufferContext& fbContext);
/*!
* Get the current video frame size
* @return The current video frame size
*/
cv::Size getVideoFrameSize();
/*!
* Called to capture from a function object.
* The functor fn is passed a UMat which it writes to which in turn is captured to the framebuffer.

@ -12,6 +12,7 @@
#include "opencv2/core/opengl.hpp"
#include <exception>
namespace cv {
namespace v4d {
namespace detail {
@ -23,10 +24,10 @@ static bool contains_absolute(nanogui::Widget* w, const nanogui::Vector2i& p) {
FrameBufferContext::FrameBufferContext(V4D& v4d, const string& title, const FrameBufferContext& other) : FrameBufferContext(v4d, other.frameBufferSize_, true, title, other.major_, other.minor_, other.compat_, other.samples_, other.debug_, other.glfwWindow_, &other) {
}
FrameBufferContext::FrameBufferContext(V4D& v4d, const cv::Size& frameBufferSize, bool offscreen,
FrameBufferContext::FrameBufferContext(V4D& v4d, const cv::Size& framebufferSize, bool offscreen,
const string& title, int major, int minor, bool compat, int samples, bool debug, GLFWwindow* sharedWindow, const FrameBufferContext* parent) :
v4d_(&v4d), offscreen_(offscreen), title_(title), major_(major), minor_(
minor), compat_(compat), samples_(samples), debug_(debug), viewport_(0, 0, frameBufferSize.width, frameBufferSize.height), frameBufferSize_(frameBufferSize), isShared_(false), sharedWindow_(sharedWindow), parent_(parent) {
minor), compat_(compat), samples_(samples), debug_(debug), viewport_(0, 0, framebufferSize.width, framebufferSize.height), frameBufferSize_(framebufferSize), isShared_(false), sharedWindow_(sharedWindow), parent_(parent), framebuffer_(framebufferSize, CV_8UC4) {
run_sync_on_main<1>([this](){ init(); });
}
@ -78,7 +79,11 @@ void FrameBufferContext::init() {
glfwWindowHint(GLFW_STENCIL_BITS, 8);
glfwWindowHint(GLFW_DEPTH_BITS, 24);
glfwWindowHint(GLFW_RESIZABLE, GLFW_TRUE);
#ifndef __EMSCRIPTEN__
glfwWindowHint(GLFW_VISIBLE, offscreen_ ? GLFW_FALSE : GLFW_TRUE );
#else
glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
#endif
glfwWindowHint(GLFW_DOUBLEBUFFER, GLFW_TRUE);
glfwWindow_ = glfwCreateWindow(frameBufferSize_.width, frameBufferSize_.height, title_.c_str(), nullptr,
@ -88,7 +93,10 @@ void FrameBufferContext::init() {
assert(false);
}
this->makeCurrent();
#ifndef __EMSCRIPTEN__
glfwSwapInterval(0);
#endif
#ifndef OPENCV_V4D_USE_ES3
if (!gladLoadGLLoader((GLADloadproc) glfwGetProcAddress))
throw std::runtime_error("Could not initialize GLAD!");
@ -215,7 +223,6 @@ void FrameBufferContext::init() {
// });
}
void FrameBufferContext::setup(const cv::Size& sz) {
frameBufferSize_ = sz;
this->makeCurrent();
if(!isShared_) {
@ -228,20 +235,28 @@ void FrameBufferContext::setup(const cv::Size& sz) {
cerr << "main: " << frameBufferID_ << ":" << textureID_ << endl;
texture_ = new cv::ogl::Texture2D(sz, cv::ogl::Texture2D::RGBA, textureID_);
GL_CHECK(glPixelStorei(GL_UNPACK_ALIGNMENT, 1));
Mat m = framebuffer_.getMat(ACCESS_READ);
GL_CHECK(
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sz.width, sz.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, m.data));
m.release();
GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
// glBindTexture(GL_TEXTURE_2D, 0);
GL_CHECK(
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sz.width, sz.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0));
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0));
GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, renderBufferID_));
GL_CHECK(
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, sz.width, sz.height));
// GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, 0));
GL_CHECK(
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, renderBufferID_));
GL_CHECK(
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0));
assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
} else {
assert(parent_ != nullptr);
GL_CHECK(glGenFramebuffers(1, &frameBufferID_));
GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, frameBufferID_));
GL_CHECK(glBindTexture(GL_TEXTURE_2D, textureID_));
@ -249,17 +264,25 @@ void FrameBufferContext::setup(const cv::Size& sz) {
cerr << "leaf: " << frameBufferID_ << ":" << textureID_ << endl;
texture_ = new cv::ogl::Texture2D(sz, cv::ogl::Texture2D::RGBA, textureID_);
GL_CHECK(glPixelStorei(GL_UNPACK_ALIGNMENT, 1));
Mat m = framebuffer_.getMat(ACCESS_READ);
GL_CHECK(
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sz.width, sz.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, m.data));
m.release();
GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
// GL_CHECK(glBindTexture(GL_TEXTURE_2D, 0));
GL_CHECK(
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sz.width, sz.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0));
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0));
GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, renderBufferID_));
GL_CHECK(
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, sz.width, sz.height));
// GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, 0));
GL_CHECK(
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, renderBufferID_));
GL_CHECK(
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0));
assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
}
// GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, 0));
}
@ -389,13 +412,11 @@ cv::Size FrameBufferContext::size() {
}
void FrameBufferContext::copyTo(cv::UMat& dst) {
run_sync_on_main<18>([&,this](){
if(framebuffer_.empty())
framebuffer_.create(size(), CV_8UC4);
run_sync_on_main<7>([&,this](){
#ifndef __EMSCRIPTEN__
CLExecScope_t clExecScope(getCLExecContext());
#endif
FrameBufferContext::GLScope glScope(*this);
FrameBufferContext::GLScope glScope(*this, GL_READ_FRAMEBUFFER);
FrameBufferContext::FrameBufferScope fbScope(*this, framebuffer_);
framebuffer_.copyTo(dst);
});
@ -403,12 +424,10 @@ void FrameBufferContext::copyTo(cv::UMat& dst) {
void FrameBufferContext::copyFrom(const cv::UMat& src) {
run_sync_on_main<18>([&,this](){
if(framebuffer_.empty())
framebuffer_.create(size(), CV_8UC4);
#ifndef __EMSCRIPTEN__
CLExecScope_t clExecScope(getCLExecContext());
#endif
FrameBufferContext::GLScope glScope(*this);
FrameBufferContext::GLScope glScope(*this, GL_DRAW_FRAMEBUFFER);
FrameBufferContext::FrameBufferScope fbScope(*this, framebuffer_);
src.copyTo(framebuffer_);
});
@ -416,12 +435,10 @@ void FrameBufferContext::copyFrom(const cv::UMat& src) {
void FrameBufferContext::execute(std::function<void(cv::UMat&)> fn) {
run_sync_on_main<2>([&,this](){
if(framebuffer_.empty())
framebuffer_.create(size(), CV_8UC4);
#ifndef __EMSCRIPTEN__
CLExecScope_t clExecScope(getCLExecContext());
#endif
FrameBufferContext::GLScope glScope(*this);
FrameBufferContext::GLScope glScope(*this, GL_FRAMEBUFFER);
FrameBufferContext::FrameBufferScope fbScope(*this, framebuffer_);
fn(framebuffer_);
});
@ -461,10 +478,6 @@ CLExecContext_t& FrameBufferContext::getCLExecContext() {
void FrameBufferContext::blitFrameBufferToScreen(const cv::Rect& viewport,
const cv::Size& windowSize, bool stretch) {
GL_CHECK(glReadBuffer(GL_COLOR_ATTACHMENT0));
GL_CHECK(glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0));
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
double hf = double(windowSize.height) / frameBufferSize_.height;
double wf = double(windowSize.width) / frameBufferSize_.width;
double f = std::min(hf, wf);
@ -486,16 +499,37 @@ void FrameBufferContext::blitFrameBufferToScreen(const cv::Rect& viewport,
GLint dstX1 = stretch ? wn : frameBufferSize_.width;
GLint dstY1 = stretch ? hn : frameBufferSize_.height;
glBlitFramebuffer( srcX0, srcY0, srcX1, srcY1,
GLint readFboID = 0;
GL_CHECK(glGetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &readFboID));
GL_CHECK(glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0));
GL_CHECK(glClear(GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT |GL_DEPTH_BUFFER_BIT));
this->makeCurrent();
GL_CHECK(glBindFramebuffer(GL_READ_FRAMEBUFFER, readFboID));
GL_CHECK(glBlitFramebuffer( srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
GL_COLOR_BUFFER_BIT, GL_NEAREST);
GL_COLOR_BUFFER_BIT, GL_NEAREST));
//#else
// cerr << "BLIT!!!" << endl;
// EM_ASM({
// var readFbo = Module.ctx.getParameter(Module.ctx.READ_FRAMEBUFFER_BINDING);
// var drawFbo = Module.ctx.getParameter(Module.ctx.DRAW_FRAMEBUFFER_BINDING);
// console.log(readFbo);
// console.log(drawFbo);
// Module.ctx.bindFramebuffer(Module.ctx.DRAW_FRAMEBUFFER, null);
// Module.ctx.bindFramebuffer(Module.ctx.READ_FRAMEBUFFER, readFbo);
// Module.ctx.clear(Module.ctx.DEPTH_BUFFER_BIT | Module.ctx.STENCIL_BUFFER_BIT);
// Module.ctx.blitFramebuffer($0, $1, $2, $3, $4, $5, $6, $7,
// Module.ctx.COLOR_BUFFER_BIT, Module.ctx.NEAREST);
// }, srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1);
//#endif
// GL_CHECK(glBindFramebuffer(GL_READ_FRAMEBUFFER, readFboID));
// GL_CHECK(glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0));
}
void FrameBufferContext::begin(GLenum framebufferTarget) {
this->makeCurrent();
glGetIntegerv( GL_VIEWPORT, viewport_ );
glGetError();
GL_CHECK(glGetIntegerv( GL_VIEWPORT, viewport_ ));
GL_CHECK(glBindFramebuffer(framebufferTarget, frameBufferID_));
GL_CHECK(glBindTexture(GL_TEXTURE_2D, textureID_));
GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, renderBufferID_));
@ -506,15 +540,14 @@ void FrameBufferContext::begin(GLenum framebufferTarget) {
GL_CHECK(
glFramebufferTexture2D(framebufferTarget, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0));
assert(glCheckFramebufferStatus(framebufferTarget) == GL_FRAMEBUFFER_COMPLETE);
glViewport(0, 0, frameBufferSize_.width, frameBufferSize_.height);
glGetError();
GL_CHECK(glViewport(0, 0, frameBufferSize_.width, frameBufferSize_.height));
}
void FrameBufferContext::end() {
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glGetError();
glViewport(viewport_[0], viewport_[1], viewport_[2], viewport_[3]);
glGetError();
// GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, 0));
// GL_CHECK(glBindTexture(GL_TEXTURE_2D, 0));
// GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, 0));
GL_CHECK(glViewport(viewport_[0], viewport_[1], viewport_[2], viewport_[3]));
GL_CHECK(glFlush());
GL_CHECK(glFinish());
}
@ -522,7 +555,6 @@ void FrameBufferContext::end() {
void FrameBufferContext::download(cv::UMat& m) {
cv::Mat tmp = m.getMat(cv::ACCESS_WRITE);
assert(tmp.data != nullptr);
//this should use a PBO for the pixel transfer, but i couldn't get it to work for both opengl and webgl at the same time
GL_CHECK(glReadPixels(0, 0, tmp.cols, tmp.rows, GL_RGBA, GL_UNSIGNED_BYTE, tmp.data));
tmp.release();
}
@ -539,8 +571,6 @@ void FrameBufferContext::acquireFromGL(cv::UMat& m) {
if (clglSharing_) {
GL_CHECK(fromGLTexture2D(getTexture2D(), m));
} else {
if(m.empty())
m.create(size(), CV_8UC4);
download(m);
GL_CHECK(glFlush());
GL_CHECK(glFinish());
@ -555,11 +585,7 @@ void FrameBufferContext::releaseToGL(cv::UMat& m) {
if (clglSharing_) {
GL_CHECK(toGLTexture2D(m, getTexture2D()));
} else {
if(m.empty())
m.create(size(), CV_8UC4);
upload(m);
GL_CHECK(glFlush());
GL_CHECK(glFinish());
}
}

@ -14,10 +14,14 @@
#include "opencv2/v4d/detail/cl.hpp"
#include <opencv2/core/ocl.hpp>
#include "opencv2/v4d/util.hpp"
#include "pbodownloader.hpp"
#include <iostream>
using namespace poly;
#ifdef OPENCV_V4D_USE_ES3
#define GLFW_INCLUDE_ES3
#define GLFW_INCLUDE_GLEXT
#endif
#include <GLFW/glfw3.h>
struct GLFWwindow;
namespace cv {
@ -50,6 +54,7 @@ class FrameBufferContext {
GLFWwindow* glfwWindow_ = nullptr;
bool clglSharing_ = true;
GLuint frameBufferID_ = 0;
GLuint onscreenTextureID_ = 0;
GLuint textureID_ = 0;
GLuint renderBufferID_ = 0;
GLint viewport_[4];
@ -160,7 +165,7 @@ protected:
void setup(const cv::Size& sz);
void teardown();
/*!
* The UMat used to copy or bind (depending on cl-gl sharing capability) the OpenGL framebuffer.
* The UMat used to copy or bind (depending on cl-gl interop capability) the OpenGL framebuffer.
*/
/*!
* The internal framebuffer exposed as OpenGL Texture2D.

@ -4,7 +4,6 @@
// Copyright Amir Hassan (kallaballa) <amir@viel-zu.org>
#include "glcontext.hpp"
#include "opencv2/v4d/v4d.hpp"
namespace cv {
namespace v4d {
namespace detail {
@ -14,37 +13,8 @@ GLContext::GLContext(V4D& v4d, FrameBufferContext& fbContext) :
void GLContext::render(std::function<void(const cv::Size&)> fn) {
run_sync_on_main<15>([&,this](){
#ifdef __EMSCRIPTEN__
// fb_.create(mainFbContext_.size(), CV_8UC4);
// preFB_.create(mainFbContext_.size(), CV_8UC4);
// postFB_.create(mainFbContext_.size(), CV_8UC4);
// {
// FrameBufferContext::GLScope mainGlScope(mainFbContext_);
// FrameBufferContext::FrameBufferScope fbScope(mainFbContext_, fb_);
// fb_.copyTo(preFB_);
// }
// {
// FrameBufferContext::GLScope glGlScope(glFbContext_);
// FrameBufferContext::FrameBufferScope fbScope(glFbContext_, fb_);
// preFB_.copyTo(fb_);
// }
#endif
{
FrameBufferContext::GLScope glScope(fbCtx());
FrameBufferContext::GLScope glScope(fbCtx(), GL_FRAMEBUFFER);
fn(fbCtx().size());
}
#ifdef __EMSCRIPTEN__
// {
// FrameBufferContext::GLScope glScope(fbCtx());
// FrameBufferContext::FrameBufferScope fbScope(fbCtx(), fb_);
// fb_.copyTo(postFB_);
// }
// {
// FrameBufferContext::GLScope mainGlScope(mainFbContext_);
// FrameBufferContext::FrameBufferScope fbScope(mainFbContext_, fb_);
// postFB_.copyTo(fb_);
// }
#endif
});
}

@ -11,59 +11,58 @@ namespace v4d {
namespace detail {
NanoguiContext::NanoguiContext(V4D& v4d, FrameBufferContext& fbContext) :
mainFbContext_(fbContext), nguiFbContext_(v4d, "NanoGUI", fbContext) {
run_sync_on_main<3>([this](){ init(); });
NanoVGContext(v4d, fbContext) {
}
void NanoguiContext::init() {
void NanoguiContext::render() {
run_sync_on_main<4>([&,this](){
FrameBufferContext::GLScope glScope(fbCtx(), GL_DRAW_FRAMEBUFFER);
glClear(GL_STENCIL_BUFFER_BIT);
screen_ = new nanogui::Screen();
screen_->initialize(nguiFbContext_.getGLFWWindow(), false);
fbCtx().setWindowSize(fbCtx().size());
form_ = new cv::v4d::FormHelper(screen_);
// glClear(GL_STENCIL_BUFFER_BIT);
screen().draw_widgets();
});
}
void NanoguiContext::render() {
run_sync_on_main<4>([&,this](){
#ifdef __EMSCRIPTEN__
// fb_.create(mainFbContext_.size(), CV_8UC4);
// preFB_.create(mainFbContext_.size(), CV_8UC4);
// postFB_.create(mainFbContext_.size(), CV_8UC4);
// {
// FrameBufferContext::GLScope mainGlScope(mainFbContext_);
// FrameBufferContext::FrameBufferScope fbScope(mainFbContext_, fb_);
// fb_.copyTo(preFB_);
// }
// {
// FrameBufferContext::GLScope glGlScope(fbCtx());
// FrameBufferContext::FrameBufferScope fbScope(fbCtx(), fb_);
// preFB_.copyTo(fb_);
// } glClear(GL_DEPTH_BUFFER_BIT|GL_STENCIL_BUFFER_BIT);
void NanoguiContext::updateFps(bool print, bool graphical) {
if (!first_) {
tick_.stop();
if (tick_.getTimeMilli() > 50) {
if(print) {
cerr << "FPS : " << (fps_ = tick_.getFPS());
#ifndef __EMSCRIPTEN__
cerr << '\r';
#else
cerr << endl;
#endif
{
FrameBufferContext::GLScope glScope(fbCtx(), GL_FRAMEBUFFER);
glClear(GL_STENCIL_BUFFER_BIT);
screen().draw_widgets();
}
#ifdef __EMSCRIPTEN__
// {
// FrameBufferContext::GLScope glScope(fbCtx());
// FrameBufferContext::FrameBufferScope fbScope(fbCtx(), fb_);
// fb_.copyTo(postFB_);
// }
// {
// FrameBufferContext::GLScope mainGlScope(mainFbContext_);
// FrameBufferContext::FrameBufferScope fbScope(mainFbContext_, fb_);
// postFB_.copyTo(fb_);
// }
#endif
tick_.reset();
}
if (graphical) {
NanoVGContext::render([this](const Size sz){
CV_UNUSED(sz);
using namespace cv::v4d::nvg;
string txt = "FPS: " + std::to_string(fps_);
beginPath();
roundedRect(5, 5, 15 * txt.size() + 5, 30, 5);
fillColor(cv::Scalar(255, 255, 255, 180));
fill();
fontSize(30.0f);
fontFace("mono");
fillColor(cv::Scalar(90, 90, 90, 255));
textAlign(NVG_ALIGN_LEFT | NVG_ALIGN_MIDDLE);
text(10, 20, txt.c_str(), nullptr);
});
}
}
first_ = false;
tick_.start();
}
void NanoguiContext::build(std::function<void(cv::v4d::FormHelper&)> fn) {
run_sync_on_main<5>([fn,this](){
FrameBufferContext::GLScope glScope(fbCtx());
FrameBufferContext::GLScope glScope(fbCtx(), GL_FRAMEBUFFER);
fn(form());
screen().perform_layout();
});
@ -78,7 +77,7 @@ cv::v4d::FormHelper& NanoguiContext::form() {
}
FrameBufferContext& NanoguiContext::fbCtx() {
return nguiFbContext_;
return NanoVGContext::nvgFbContext_;
}
}
}

@ -7,6 +7,8 @@
#define SRC_OPENCV_NANOGUICONTEXT_HPP_
#include "framebuffercontext.hpp"
#include "nanovgcontext.hpp"
#ifdef __EMSCRIPTEN__
#include <emscripten.h>
@ -22,18 +24,14 @@ namespace detail {
/*!
* Used to setup a nanogui context
*/
class NanoguiContext {
nanogui::Screen* screen_;
cv::v4d::FormHelper* form_;
FrameBufferContext& mainFbContext_;
FrameBufferContext nguiFbContext_;
cv::UMat preFB_;
cv::UMat fb_;
cv::UMat postFB_;
class NanoguiContext : public NanoVGContext {
cv::TickMeter tick_;
float fps_ = 0;
bool first_ = true;
public:
NanoguiContext(V4D& v4d, FrameBufferContext& fbContext);
void init();
void render();
void updateFps(bool print, bool graphical);
void build(std::function<void(cv::v4d::FormHelper&)> fn);
nanogui::Screen& screen();
cv::v4d::FormHelper& form();

@ -4,62 +4,44 @@
// Copyright Amir Hassan (kallaballa) <amir@viel-zu.org>
#include "nanovgcontext.hpp"
#include "opencv2/v4d/v4d.hpp"
#include "opencv2/v4d/nvg.hpp"
namespace cv {
namespace v4d {
namespace detail {
NanoVGContext::NanoVGContext(V4D& v4d, FrameBufferContext& fbContext) :
v4d_(v4d), context_(nullptr), mainFbContext_(fbContext), nvgFbContext_(v4d, "NanoVG", fbContext) {
run_sync_on_main<13>([this](){ init(); });
}
v4d_(v4d), mainFbContext_(fbContext), nvgFbContext_(v4d, "NanoVG", fbContext), context_(
nullptr) {
UMat tmp(fbCtx().size(), CV_8UC4);
void NanoVGContext::init() {
FrameBufferContext::GLScope glScope(fbCtx(), GL_DRAW_FRAMEBUFFER);
glClear(GL_STENCIL_BUFFER_BIT);
run_sync_on_main<13>([this, &tmp]() {
{
//Workaround for first frame glitch
FrameBufferContext::GLScope glScope(fbCtx(), GL_FRAMEBUFFER);
FrameBufferContext::FrameBufferScope fbScope(fbCtx(), tmp);
}
{
FrameBufferContext::GLScope glScope(fbCtx(), GL_FRAMEBUFFER);
screen_ = new nanogui::Screen();
screen_->initialize(fbCtx().getGLFWWindow(), false);
fbCtx().setWindowSize(fbCtx().size());
context_ = screen_->nvg_context();
form_ = new cv::v4d::FormHelper(screen_);
if (!context_)
throw std::runtime_error("Could not initialize NanoVG!");
}
});
tmp.release();
}
void NanoVGContext::render(std::function<void(const cv::Size&)> fn) {
run_sync_on_main<14>([&,this](){
#ifdef __EMSCRIPTEN__
// {
// FrameBufferContext::GLScope mainGlScope(mainFbContext_);
// FrameBufferContext::FrameBufferScope fbScope(mainFbContext_, fb_);
// fb_.copyTo(preFB_);
// }
// {
// FrameBufferContext::GLScope nvgGlScope(nvgFbContext_);
// FrameBufferContext::FrameBufferScope fbScope(nvgFbContext_, fb_);
// preFB_.copyTo(fb_);
// }
#endif
{
FrameBufferContext::GLScope glScope(fbCtx());
glClear(GL_STENCIL_BUFFER_BIT);
run_sync_on_main<14>([this, fn](){
FrameBufferContext::GLScope glScope(fbCtx(), GL_FRAMEBUFFER);
NanoVGContext::Scope nvgScope(*this);
cv::v4d::nvg::detail::NVG::initializeContext(context_);
fn(fbCtx().size());
}
#ifdef __EMSCRIPTEN__
// {
// FrameBufferContext::GLScope nvgGlScope(nvgFbContext_);
// FrameBufferContext::FrameBufferScope fbScope(nvgFbContext_, fb_);
// fb_.copyTo(postFB_);
// }
// {
// FrameBufferContext::GLScope mainGlScope(mainFbContext_);
// FrameBufferContext::FrameBufferScope fbScope(mainFbContext_, fb_);
// postFB_.copyTo(fb_);
// }
#endif
});
}

@ -7,6 +7,7 @@
#define SRC_OPENCV_NANOVGCONTEXT_HPP_
#include "framebuffercontext.hpp"
#include "../formhelper.hpp"
#ifdef __EMSCRIPTEN__
#include <emscripten.h>
@ -25,13 +26,12 @@ namespace detail {
*/
class NanoVGContext {
V4D& v4d_;
protected:
nanogui::Screen* screen_;
NVGcontext* context_;
cv::v4d::FormHelper* form_;
FrameBufferContext& mainFbContext_;
FrameBufferContext nvgFbContext_;
cv::UMat preFB_;
cv::UMat fb_;
cv::UMat postFB_;
NVGcontext* context_;
public:
/*!
* Makes sure #NanoVGContext::begin and #NanoVGContext::end are both called
@ -54,6 +54,7 @@ public:
ctx_.end();
}
};
/*!
* Creates a NanoVGContext
* @param v4d The V4D object used in conjunction with this context
@ -61,7 +62,6 @@ public:
* @param fbContext The framebuffer context
*/
NanoVGContext(V4D& v4d, FrameBufferContext& fbContext);
void init();
/*!
* Execute function object fn inside a nanovg context.

@ -1,182 +0,0 @@
#include <cstring>
#include <iostream>
#include <cassert>
#include <chrono>
#include "pbodownloader.hpp"
namespace poly {
#ifdef __EMSCRIPTEN__
#define USE_PBO 0
#else
#define USE_PBO 1
#endif
#ifdef OPENCV_V4D_USE_ES3
#define GL_BGRA GL_RGBA
#define GL_BGR GL_RGB
#endif
#define SX_ERROR_(msg) printf("%s\n", msg);
#define SX_WARNING_(msg) printf("%s\n", msg);
#define SX_DEBUG_(msg) printf("%s\n", msg);
#define SX_VERBOSE_(msg) printf("%s\n", msg);
#define SX_ERROR(fmt, ...) printf(fmt, __VA_ARGS__);
#define SX_WARNING(fmt, ...) printf(fmt, __VA_ARGS__);
#define SX_DEBUG(fmt, ...) printf(fmt, __VA_ARGS__);
#define SX_VERBOSE(fmt, ...) printf(fmt, __VA_ARGS__);
//#define SX_ERROR_(msg)
//#define SX_WARNING_(msg)
//#define SX_DEBUG_(msg)
//#define SX_VERBOSE_(msg)
//#define SX_ERROR(fmt, ...)
//#define SX_WARNING(fmt, ...)
//#define SX_DEBUG(fmt, ...)
//#define SX_VERBOSE(fmt, ...)
void myGetBufferSubData(GLenum theTarget, GLintptr theOffset, GLsizeiptr theSize, void* theData) {
#ifdef __EMSCRIPTEN__
EM_ASM_(
{
Module.ctx.getBufferSubData($0, $1, HEAPU8.subarray($2, $2 + $3));
}, theTarget, theOffset, theData, theSize);
#else
glGetBufferSubData(theTarget, theOffset, theSize, theData);
#endif
}
PboDownloader::PboDownloader(GLenum format, int w, int h, int num) :
fmt(0), pbos(NULL), num_pbos(0), dx(0), num_downloads(0), width(0), height(0), nbytes(0), pixels(
NULL) {
if (NULL != pbos) {
SX_ERROR_("Already initialized. Not necessary to initialize again; or shutdown first.");
assert(false);
}
if (0 >= num) {
SX_ERROR("Invalid number of PBOs: %d\n", num);
assert(false);
}
if (num > 10) {
SX_WARNING_("Asked to create more then 10 buffers; that is probaly a bit too much.");
}
fmt = format;
width = w;
height = h;
num_pbos = num;
if (GL_RED == fmt || GL_GREEN == fmt || GL_BLUE == fmt) {
nbytes = width * height;
} else if (GL_RGB == fmt || GL_BGR == fmt) {
nbytes = width * height * 3;
} else if (GL_RGBA == fmt || GL_BGRA == fmt) {
nbytes = width * height * 4;
} else {
SX_ERROR_("Unhandled pixel format, use GL_R, GL_RG, GL_RGB or GL_RGBA.");
assert(false);
}
if (0 == nbytes) {
SX_ERROR("Invalid width or height given: %d x %d\n", width, height);
assert(false);
}
pbos = new GLuint[num];
if (NULL == pbos) {
SX_ERROR_("Cannot allocate pbos.");
assert(false);
}
pixels = new unsigned char[nbytes];
if (NULL == pixels) {
SX_ERROR_("Cannot allocate pixel buffer.");
assert(false);
}
glGenBuffers(num, pbos);
for (int i = 0; i < num; ++i) {
SX_VERBOSE("pbodownloader.pbos[%d] = %d, nbytes: %d\n", i, pbos[i], nbytes)
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[i]);
glBufferData(GL_PIXEL_PACK_BUFFER, nbytes, NULL, GL_STREAM_READ);
}
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
}
PboDownloader::~PboDownloader() {
if (NULL != pixels) {
delete[] pixels;
pixels = NULL;
}
}
uint64_t nanos() {
return std::chrono::high_resolution_clock::now().time_since_epoch().count();
}
void PboDownloader::download() {
unsigned char* ptr;
uint64_t start_ns = nanos();
uint64_t end_ns = 0;
uint64_t delta_ns = 0;
#if USE_PBO
if (num_downloads < num_pbos) {
/*
First we need to make sure all our pbos are bound, so glMap/Unmap will
read from the oldest bound buffer first.
*/
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[dx]);
glReadPixels(0, 0, width, height, fmt, GL_UNSIGNED_BYTE, 0); /* When a GL_PIXEL_PACK_BUFFER is bound, the last 0 is used as offset into the buffer to read into. */
SX_DEBUG("glReadPixels() with pbo: %d\n", pbos[dx]);
} else {
SX_DEBUG("glMapBuffer() with pbo: %d\n", pbos[dx]);
/* Read from the oldest bound pbo. */
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[dx]);
#ifdef __EMSCRIPTEN__
std::unique_ptr<uint8_t> clientBuffer = std::make_unique<uint8_t>(nbytes);
myGetBufferSubData(GL_PIXEL_PACK_BUFFER, 0, nbytes, clientBuffer.get());
ptr = clientBuffer.get();
#else
ptr = (unsigned char*) glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
#endif
if (NULL != ptr) {
cerr << "read" << endl;
memcpy(pixels, ptr, nbytes);
#ifndef __EMSCRIPTEN__
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
#endif
} else {
SX_ERROR_("Failed to map the buffer\n");
}
/* Trigger the next read. */
SX_DEBUG("glReadPixels() with pbo: %d\n", pbos[dx]);
glReadPixels(0, 0, width, height, fmt, GL_UNSIGNED_BYTE, 0);
}
++dx;
dx = dx % num_pbos;
num_downloads++;
if (num_downloads == UINT64_MAX) {
num_downloads = num_pbos;
}
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
#else
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); /* just make sure we're not accidentilly using a PBO. */
glReadPixels(0, 0, width, height, fmt, GL_UNSIGNED_BYTE, pixels);
#endif
end_ns = nanos();
delta_ns = end_ns - start_ns;
SX_VERBOSE("Download took: %f ms. \n", ((double)delta_ns) / 1000000.0);
}
} /* namespace poly */

@ -1,37 +0,0 @@
#ifndef POLY_PBO_DOWNLOADER_H
#define POLY_PBO_DOWNLOADER_H
#define __STDC_LIMIT_MACROS
#include "opencv2/v4d/v4d.hpp"
#include <stdint.h>
#ifdef OPENCV_V4D_USE_ES3
#define GLFW_INCLUDE_ES3
#define GLFW_INCLUDE_GLEXT
#endif
#include <GLFW/glfw3.h>
namespace poly {
class PboDownloader {
public:
PboDownloader(GLenum format, int w, int h, int num);
~PboDownloader();
void download();
public:
GLenum fmt;
GLuint* pbos;
uint64_t num_pbos;
uint64_t dx;
uint64_t num_downloads;
int width;
int height;
int nbytes; /* number of bytes in the pbo buffer. */
unsigned char* pixels; /* the downloaded pixels. */
};
} /* namespace poly */
#endif

@ -10,6 +10,7 @@ namespace v4d {
FormHelper::FormHelper(nanogui::Screen* screen) :
nanogui::FormHelper(screen) {
assert(screen != nullptr);
}
FormHelper::~FormHelper() {

@ -4,7 +4,7 @@
// Copyright Amir Hassan (kallaballa) <amir@viel-zu.org>
#include "opencv2/v4d/nvg.hpp"
#include "opencv2/v4d/v4d.hpp"
namespace cv {
namespace v4d {
namespace nvg {
@ -653,6 +653,15 @@ void intersectScissor(float x, float y, float w, float h) {
void resetScissor() {
detail::NVG::getCurrentContext()->resetScissor();
}
void clear(const cv::Scalar& bgra) {
const float& b = bgra[0] / 255.0f;
const float& g = bgra[1] / 255.0f;
const float& r = bgra[2] / 255.0f;
const float& a = bgra[3] / 255.0f;
GL_CHECK(glClearColor(r, g, b, a));
GL_CHECK(glClear(GL_COLOR_BUFFER_BIT));
}
}
}
}

@ -143,7 +143,10 @@ unsigned int initShader(const char* vShader, const char* fShader, const char* ou
}
std::string getGlInfo() {
return reinterpret_cast<const char*>(glGetString(GL_VERSION));
std::ostringstream oss;
oss << "\n\t" << reinterpret_cast<const char*>(glGetString(GL_VERSION))
<< "\n\t" << reinterpret_cast<const char*>(glGetString(GL_RENDERER)) << endl;
return oss.str();
}
std::string getClInfo() {
@ -318,13 +321,13 @@ private:
cv::Ptr<V4D> window_;
int width_;
int height_;
UMat tmp_;
UMat fb_;
GLuint framebuffer = 0;
GLuint texture = 0;
public:
HTML5Capture(cv::Ptr<V4D> window, int width, int height) :
window_(window), width_(width), height_(height), tmp_(cv::Size(width, height), CV_8UC4) {
cerr << "start constr" << endl;
window_(window), width_(width), height_(height), fb_(cv::Size(width, height), CV_8UC4) {
// cerr << "start constr" << endl;
EM_ASM({
globalThis.playing = false;
globalThis.timeupdate = false;
@ -335,13 +338,13 @@ public:
globalThis.v4dCopyCanvasElement.height = $1;
globalThis.v4dCopyCanvasElement.style.display = "none";
}, width, height);
cerr << "end constr" << endl;
// cerr << "end constr" << endl;
}
bool captureGPU(UMat& dst) {
cerr << "start capture" << endl;
// cerr << "start capture" << endl;
FrameBufferContext::GLScope scope(window_->fbCtx());
cerr << "start em" << endl;
// cerr << "start em" << endl;
int ret = EM_ASM_INT(
if(typeof Module.ctx !== 'undefined' && Module.ctx != null && globalThis.doCapture) {
@ -353,21 +356,21 @@ public:
return 0;
}
);
cerr << "en em: " << ret << endl;
// cerr << "en em: " << ret << endl;
if(ret) {
cerr << "1" << endl;
// cerr << "1" << endl;
if(framebuffer == 0) {
GL_CHECK(glGenFramebuffers(1, &framebuffer));
}
GL_CHECK(glBindFramebuffer(GL_READ_FRAMEBUFFER, framebuffer));
cerr << "2" << endl;
// cerr << "2" << endl;
if(texture == 0) {
GL_CHECK(glGenTextures(1, &texture));
}
GL_CHECK(glBindTexture(GL_TEXTURE_2D, texture));
cerr << "3" << endl;
// cerr << "3" << endl;
EM_ASM(
const level = 0;
const internalFormat = globalThis.gl.RGBA;
@ -383,21 +386,23 @@ public:
globalThis.v4dVideoElement
);
);
cerr << "4" << endl;
// cerr << "4" << endl;
GL_CHECK(glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0));
EM_ASM(
globalThis.gl.bindFramebuffer(globalThis.gl.DRAW_FRAMEBUFFER, globalThis.v4dMainFrameBuffer);
globalThis.gl.bindTexture(globalThis.gl.TEXTURE_2D, globalThis.v4dMainTexture);
globalThis.gl.pixelStorei(globalThis.gl.UNPACK_FLIP_Y_WEBGL, true);
// globalThis.gl.pixelStorei(globalThis.gl.UNPACK_FLIP_Y_WEBGL, true);
globalThis.gl.framebufferTexture2D(globalThis.gl.DRAW_FRAMEBUFFER, globalThis.gl.COLOR_ATTACHMENT0, globalThis.gl.TEXTURE_2D, globalThis.v4dMainTexture, 0);
);
cerr << "5" << endl;
FrameBufferContext::FrameBufferScope fbScope(window_->fbCtx(), tmp_);
cvtColor(tmp_, dst, COLOR_BGRA2RGB);
cerr << "captured" << endl;
// cerr << "5" << endl;
FrameBufferContext::FrameBufferScope fbScope(window_->fbCtx(), fb_);
flip(fb_, fb_, 0);
cvtColor(fb_, dst, COLOR_BGRA2RGB);
cerr << "flipped" << endl;
return true;
}
cerr << "not captured" << endl;
// cerr << "not captured" << endl;
return false;
}
@ -435,24 +440,20 @@ Source makeCaptureSource(int width, int height, cv::Ptr<V4D> window) {
using namespace std;
return Source([=](cv::UMat& frame) {
if(capture == nullptr && capture_width > 0 && capture_height > 0)
// run_sync_on_main<16>([&](){
// capture = new HTML5Capture(window, capture_width, capture_height);
// });
if(capture_width > 0 && capture_height > 0) {
try {
if(frame.empty())
frame.create(cv::Size(width, height), CV_8UC3);
if(capture != nullptr) {
// run_sync_on_main<17>([&](){
// capture->captureGPU(frame);
// });
} else {
std::cerr << "Nothing captured" << endl;
}
run_sync_on_main<17>([&](){
if(capture == nullptr)
capture = new HTML5Capture(window, capture_width, capture_height);
capture->captureGPU(frame);
});
} catch(std::exception& ex) {
cerr << ex.what() << endl;
}
}
return true;
}, 0);
}

@ -25,7 +25,7 @@ void glfw_error_callback(int error, const char* description) {
void gl_check_error(const std::filesystem::path& file, unsigned int line, const char* expression) {
int errorCode = glGetError();
cerr << "TRACE: " << file.filename() << " (" << line << ") : " << expression << " => code: " << errorCode << endl;
if (errorCode != 0) {
std::stringstream ss;
ss << "GL failed in " << file.filename() << " (" << line << ") : " << "\nExpression:\n "
@ -185,7 +185,12 @@ void V4D::copyFrom(cv::InputArray m) {
}
#ifdef __EMSCRIPTEN__
bool first = true;
static void do_frame(void* void_fn_ptr) {
if(first) {
glfwSwapInterval(0);
first = false;
}
auto* fn_ptr = reinterpret_cast<std::function<bool()>*>(void_fn_ptr);
if (fn_ptr) {
auto& fn = *fn_ptr;
@ -196,8 +201,8 @@ static void do_frame(void* void_fn_ptr) {
void V4D::run(std::function<bool()> fn) {
#ifndef __EMSCRIPTEN__
while (keepRunning() && fn())
;
while (keepRunning() && fn()) {
}
#else
emscripten_set_main_loop_arg(do_frame, &fn, -1, true);
#endif
@ -216,7 +221,7 @@ void V4D::feed(cv::InputArray& in) {
}, frame);
fb([frame](cv::UMat& frameBuffer){
cvtColor(frame,frameBuffer, cv::COLOR_RGB2BGRA);
frame.copyTo(frameBuffer);
});
}
@ -261,7 +266,7 @@ bool V4D::capture(std::function<void(cv::UMat&)> fn) {
}, this, fn, nextReaderFrame_);
fb([this](cv::UMat& frameBuffer){
cvtColor(currentReaderFrame_,frameBuffer, cv::COLOR_RGB2BGRA);
currentReaderFrame_.copyTo(frameBuffer);
});
return true;
}
@ -303,17 +308,6 @@ bool V4D::isSinkReady() {
return sink_.isReady();
}
void V4D::clear(const cv::Scalar& bgra) {
this->gl([&]() {
const float& b = bgra[0] / 255.0f;
const float& g = bgra[1] / 255.0f;
const float& r = bgra[2] / 255.0f;
const float& a = bgra[3] / 255.0f;
GL_CHECK(glClearColor(r, g, b, a));
GL_CHECK(glClear(GL_COLOR_BUFFER_BIT));
});
}
void V4D::showGui(bool s) {
auto children = nguiCtx().screen().children();
for (auto* child : children) {
@ -443,6 +437,22 @@ void V4D::setWindowSize(const cv::Size& sz) {
fbCtx().setWindowSize(sz);
}
bool V4D::getShowFPS() {
return showFPS_;
}
bool V4D::getPrintFPS() {
return printFPS_;
}
void V4D::setShowFPS(bool s) {
showFPS_ = s;
}
void V4D::setPrintFPS(bool p) {
printFPS_ = p;
}
bool V4D::isFullscreen() {
return fbCtx().isFullscreen();
}
@ -497,28 +507,43 @@ void V4D::setDefaultKeyboardEventCallback() {
void V4D::swapContextBuffers() {
run_sync_on_main<9>([this]() {
FrameBufferContext::GLScope glScope(clvaCtx().fbCtx());
FrameBufferContext::GLScope glScope(clvaCtx().fbCtx(), GL_READ_FRAMEBUFFER);
clvaCtx().fbCtx().blitFrameBufferToScreen(viewport(), clvaCtx().fbCtx().getWindowSize(), isFrameBufferScaling());
clvaCtx().fbCtx().makeCurrent();
#ifndef __EMSCRIPTEN__
glfwSwapBuffers(clvaCtx().fbCtx().getGLFWWindow());
#else
emscripten_webgl_commit_frame();
#endif
});
run_sync_on_main<10>([this]() {
FrameBufferContext::GLScope glScope(glCtx().fbCtx());
FrameBufferContext::GLScope glScope(glCtx().fbCtx(), GL_READ_FRAMEBUFFER);
glCtx().fbCtx().blitFrameBufferToScreen(viewport(), glCtx().fbCtx().getWindowSize(), isFrameBufferScaling());
glCtx().fbCtx().makeCurrent();
#ifndef __EMSCRIPTEN__
glfwSwapBuffers(glCtx().fbCtx().getGLFWWindow());
#else
emscripten_webgl_commit_frame();
#endif
});
run_sync_on_main<11>([this]() {
FrameBufferContext::GLScope glScope(nvgCtx().fbCtx());
FrameBufferContext::GLScope glScope(nvgCtx().fbCtx(), GL_READ_FRAMEBUFFER);
nvgCtx().fbCtx().blitFrameBufferToScreen(viewport(), nvgCtx().fbCtx().getWindowSize(), isFrameBufferScaling());
nvgCtx().fbCtx().makeCurrent();
#ifndef __EMSCRIPTEN__
glfwSwapBuffers(nvgCtx().fbCtx().getGLFWWindow());
#else
emscripten_webgl_commit_frame();
#endif
});
run_sync_on_main<12>([this]() {
FrameBufferContext::GLScope glScope(nguiCtx().fbCtx());
FrameBufferContext::GLScope glScope(nguiCtx().fbCtx(), GL_READ_FRAMEBUFFER);
nguiCtx().fbCtx().blitFrameBufferToScreen(viewport(), nguiCtx().fbCtx().getWindowSize(), isFrameBufferScaling());
nguiCtx().fbCtx().makeCurrent();
#ifndef __EMSCRIPTEN__
glfwSwapBuffers(nguiCtx().fbCtx().getGLFWWindow());
#else
emscripten_webgl_commit_frame();
#endif
});
}
@ -529,8 +554,11 @@ bool V4D::display() {
#else
if (true) {
#endif
nguiCtx().updateFps(printFPS_, showFPS_);
nguiCtx().render();
// swapContextBuffers();
run_sync_on_main<6>([&, this](){
FrameBufferContext::GLScope glScope(fbCtx(), GL_READ_FRAMEBUFFER);
fbCtx().blitFrameBufferToScreen(viewport(), fbCtx().getWindowSize(), isFrameBufferScaling());
@ -570,47 +598,14 @@ GLFWwindow* V4D::getGLFWWindow() {
void V4D::printSystemInfo() {
run_sync_on_main<8>([this](){
fbCtx().makeCurrent();
cerr << "OpenGL Version: " << getGlInfo() << endl;
cerr << "OpenGL: " << getGlInfo() << endl;
cerr << "OpenCL Platforms: " << getClInfo() << endl;
});
}
void V4D::showFps(bool print, bool graphical) {
if (frameCount() > 0) {
tick_.stop();
if (tick_.getTimeMilli() > 50) {
if(print) {
cerr << "FPS : " << (fps_ = tick_.getFPS());
#ifndef __EMSCRIPTEN__
cerr << '\r';
#else
cerr << endl;
#endif
}
tick_.reset();
}
if (graphical) {
this->nvg([this]() {
glClear(GL_DEPTH_BUFFER_BIT);
using namespace cv::v4d::nvg;
string txt = "FPS: " + std::to_string(fps_);
beginPath();
roundedRect(5, 5, 15 * txt.size() + 5, 30, 5);
fillColor(cv::Scalar(255, 255, 255, 180));
fill();
fontSize(30.0f);
fontFace("mono");
fillColor(cv::Scalar(90, 90, 90, 255));
textAlign(NVG_ALIGN_LEFT | NVG_ALIGN_MIDDLE);
text(10, 20, txt.c_str(), nullptr);
});
}
}
tick_.start();
void V4D::makeCurrent() {
fbCtx().makeCurrent();
}
}
}

@ -10,13 +10,8 @@
# What is V4D?
V4D offers a way of writing graphical (on- and offscreen) high performance applications with OpenCV. It is light-weight and unencumbered by QT or GTK licenses. It features vector graphics using [NanoVG](https://github.com/inniyah/nanovg) a GUI based on [NanoGUI](https://github.com/mitsuba-renderer/nanogui) and (on supported systems) OpenCL/OpenGL and OpenCL/VAAPI interoperability. It should be included in [OpenCV-contrib](https://github.com/opencv/opencv_contrib) once it is ready.
# Showcase
Please note that all renderings and videos were created on an Intel Tigerlake CPU and an Intel Iris Xe iGPU. Also the demos in the videos might run slower for various reasons (better implementation by now, screen capturing, etc.) than they would normally do.
@youtube{yYnWkkZSK7Q}
# Why V4D?
Please refer to the online demos in the following section to see at a glance what it can do for you.
Please refer to the online demos in the \ref v4d_tutorials and \ref v4d_demos section to see at a glance what it can do for you.
* **OpenGL**: Easy access to OpenGL.
* **GUI**: Simple yet powerful user interfaces through NanoGUI.
@ -27,14 +22,6 @@ Please refer to the online demos in the following section to see at a glance wha
* **No more highgui** with it's heavy dependencies, licenses and limitations.
* **\ref v4d_webassembly_support**.
# Online Demos
Please note that the following online demos are slower and/or have less features than the native versions.
* https://viel-zu.org/opencv/shader
* https://viel-zu.org/opencv/font
* https://viel-zu.org/opencv/optflow
* https://viel-zu.org/opencv/beauty
# Design Notes
* V4D is not thread safe. Though it is possible to have several V4D objects in one or more threads and synchronize them using ```V4D::makeNonCurrent()``` and ```V4D::makeCurrent()```. This is a limitation of GLFW3. That said, OpenCV algorithms are multi-threaded as usual.
* V4D uses InputArray/OutputArray/InputOutputArray which gives you the option to work with Mat, std::vector and UMat. Anyway, you should prefer to use UMat whenever possible to automatically use hardware capabilities where available.
@ -72,7 +59,7 @@ v4d->gl([](const Size sz) {
* [OpenCV Contrib 4.x](https://github.com/opencv/opencv_contrib)
* If you want CL-GL sharing on a recent Intel Platform (Gen8 - Gen12) you currently **need to build** [compute-runtime](https://github.com/intel/compute-runtime) and [my OpenCV 4.x fork](https://github.com/kallaballa/opencv/tree/GCV)
# Tutorials
# Tutorials {#v4d_tutorials}
The tutorials are designed to be read one after the other to give you a good overview over the key concepts of V4D. After that you can move on to the samples.
* \ref v4d_display_image_pipeline
@ -85,7 +72,7 @@ The tutorials are designed to be read one after the other to give you a good ove
* \ref v4d_custom_source_and_sink
* \ref v4d_font_with_gui
# Demos
# Demos {#v4d_demos}
The goal of the demos is to show how to use V4D to the fullest. Also they show how to use V4D to create programs that run mostly (the part the matters) on the GPU (when driver capabilities allow). They are also a good starting point for your own applications because they touch many key aspects and algorithms of OpenCV.
* \ref v4d_cube

@ -9,6 +9,6 @@
Face beautification using face landmark detection (OpenCV/OpenCL), nanovg (OpenGL) for drawing masks and multi-band blending (CPU) to put it all together.
@youtube{SWxDMkZGDCk}
\htmlinclude "../samples/example_v4d_beauty-demo.html"
@include samples/beauty-demo.cpp

Loading…
Cancel
Save