huge refactoring

pull/3471/head
kallaballa 2 years ago
parent df33e5e3a7
commit 23a83adb23
  1. 1
      .gitignore
  2. 39
      modules/v4d/CMakeLists.txt
  3. BIN
      modules/v4d/doc/lena.png
  4. 381
      modules/v4d/include/opencv2/v4d/cxxpool.hpp
  5. 132
      modules/v4d/include/opencv2/v4d/detail/threadpool.hpp
  6. 12
      modules/v4d/include/opencv2/v4d/nvg.hpp
  7. 64
      modules/v4d/include/opencv2/v4d/util.hpp
  8. 59
      modules/v4d/include/opencv2/v4d/v4d.hpp
  9. 4
      modules/v4d/samples/beauty-demo.cpp
  10. 4
      modules/v4d/samples/cube-demo.cpp
  11. 10
      modules/v4d/samples/custom_source_and_sink.cpp
  12. 17
      modules/v4d/samples/display_image.cpp
  13. 23
      modules/v4d/samples/display_image_fb.cpp
  14. 12
      modules/v4d/samples/example_v4d_beauty-demo.html
  15. 61
      modules/v4d/samples/example_v4d_cube-demo.html
  16. 32
      modules/v4d/samples/example_v4d_custom_source_and_sink.html
  17. 32
      modules/v4d/samples/example_v4d_display_image.html
  18. 48
      modules/v4d/samples/example_v4d_display_image_fb.html
  19. 26
      modules/v4d/samples/example_v4d_font-demo.html
  20. 30
      modules/v4d/samples/example_v4d_font_rendering.html
  21. 33
      modules/v4d/samples/example_v4d_font_with_gui.html
  22. 17
      modules/v4d/samples/example_v4d_nanovg-demo.html
  23. 19
      modules/v4d/samples/example_v4d_optflow-demo.html
  24. 19
      modules/v4d/samples/example_v4d_pedestrian-demo.html
  25. 30
      modules/v4d/samples/example_v4d_render_opengl.html
  26. 13
      modules/v4d/samples/example_v4d_shader-demo.html
  27. 32
      modules/v4d/samples/example_v4d_vector_graphics.html
  28. 45
      modules/v4d/samples/example_v4d_vector_graphics_and_fb.html
  29. 52
      modules/v4d/samples/example_v4d_video-demo.html
  30. 26
      modules/v4d/samples/example_v4d_video_editing.html
  31. 4
      modules/v4d/samples/font-demo.cpp
  32. 5
      modules/v4d/samples/font_rendering.cpp
  33. 1
      modules/v4d/samples/font_with_gui.cpp
  34. 50
      modules/v4d/samples/nanovg-demo.cpp
  35. 8
      modules/v4d/samples/optflow-demo.cpp
  36. 4
      modules/v4d/samples/pedestrian-demo.cpp
  37. 5
      modules/v4d/samples/render_opengl.cpp
  38. 8
      modules/v4d/samples/shader-demo.cpp
  39. 33
      modules/v4d/samples/vector_graphics.cpp
  40. 42
      modules/v4d/samples/vector_graphics_and_fb.cpp
  41. 8
      modules/v4d/samples/video-demo.cpp
  42. 11
      modules/v4d/samples/video_editing.cpp
  43. 77
      modules/v4d/src/detail/clvacontext.cpp
  44. 16
      modules/v4d/src/detail/clvacontext.hpp
  45. 315
      modules/v4d/src/detail/framebuffercontext.cpp
  46. 74
      modules/v4d/src/detail/framebuffercontext.hpp
  47. 9
      modules/v4d/src/detail/glcontext.cpp
  48. 12
      modules/v4d/src/detail/glcontext.hpp
  49. 45
      modules/v4d/src/detail/nanoguicontext.cpp
  50. 44
      modules/v4d/src/detail/nanoguicontext.hpp
  51. 41
      modules/v4d/src/detail/nanovgcontext.cpp
  52. 17
      modules/v4d/src/detail/nanovgcontext.hpp
  53. 87
      modules/v4d/src/util.cpp
  54. 357
      modules/v4d/src/v4d.cpp
  55. 4
      modules/v4d/tutorials/00-intro.markdown
  56. 15
      modules/v4d/tutorials/01-dislay_image.markdown
  57. 16
      modules/v4d/tutorials/02-dislay_image_fb.markdown
  58. 16
      modules/v4d/tutorials/03-vector_graphics.markdown
  59. 15
      modules/v4d/tutorials/04-vector_graphics_and_fb.markdown
  60. 3
      modules/v4d/tutorials/05-render_opengl.markdown
  61. 4
      modules/v4d/tutorials/06-font_rendering.markdown
  62. 3
      modules/v4d/tutorials/07-video_editing.markdown
  63. 4
      modules/v4d/tutorials/08-custom_source_and_sink.markdown
  64. 3
      modules/v4d/tutorials/09-font_with_gui.markdown
  65. 2
      modules/v4d/tutorials/10-cube.markdown
  66. 2
      modules/v4d/tutorials/11-video.markdown
  67. 2
      modules/v4d/tutorials/12-nanovg.markdown
  68. 2
      modules/v4d/tutorials/13-shader.markdown
  69. 2
      modules/v4d/tutorials/14-font.markdown
  70. 2
      modules/v4d/tutorials/15-pedestrian.markdown
  71. 2
      modules/v4d/tutorials/16-optflow.markdown
  72. 0
      modules/v4d/tutorials/20-wasm.markdown

1
.gitignore vendored

@ -10,3 +10,4 @@ tags
tegra/ tegra/
*.i *.i
.download* .download*
/Debug/

@ -10,7 +10,7 @@ macro(add_emscripten_sample sample source)
ocv_add_executable(${sample} ${source}) ocv_add_executable(${sample} ${source})
ocv_target_include_modules(${sample} opencv_core opencv_imgproc opencv_videoio opencv_video opencv_imgcodecs opencv_v4d opencv_face opencv_tracking opencv_objdetect opencv_stitching opencv_optflow opencv_imgcodecs opencv_features2d opencv_dnn opencv_flann) ocv_target_include_modules(${sample} opencv_core opencv_imgproc opencv_videoio opencv_video opencv_imgcodecs opencv_v4d opencv_face opencv_tracking opencv_objdetect opencv_stitching opencv_optflow opencv_imgcodecs opencv_features2d opencv_dnn opencv_flann)
ocv_target_link_libraries(${sample} LINK_PRIVATE opencv_core opencv_imgproc opencv_videoio opencv_video opencv_imgcodecs ocv_target_link_libraries(${sample} LINK_PRIVATE opencv_core opencv_imgproc opencv_videoio opencv_video opencv_imgcodecs
opencv_v4d opencv_face opencv_tracking opencv_objdetect opencv_stitching opencv_optflow opencv_imgcodecs opencv_features2d opencv_dnn opencv_flann nanogui ${GLEW_LIBRARIES}) opencv_v4d opencv_face opencv_tracking opencv_objdetect opencv_stitching opencv_optflow opencv_imgcodecs opencv_features2d opencv_dnn opencv_flann nanogui)
target_link_directories(${sample} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/../../lib") target_link_directories(${sample} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/../../lib")
target_compile_features(${sample} PRIVATE cxx_std_20) target_compile_features(${sample} PRIVATE cxx_std_20)
set_target_properties(${sample} PROPERTIES SUFFIX ".js") set_target_properties(${sample} PROPERTIES SUFFIX ".js")
@ -23,13 +23,25 @@ endmacro()
macro(add_binary_sample sample) macro(add_binary_sample sample)
target_compile_features(${sample} PRIVATE cxx_std_20) target_compile_features(${sample} PRIVATE cxx_std_20)
target_link_libraries(${sample} PRIVATE nanogui GLEW::glew OpenGL::GL) target_link_directories(${sample} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/../../lib")
target_include_directories(${sample} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/include/opencv2/v4d/detail/")
target_link_libraries(${sample} PRIVATE nanogui OpenGL::GL)
endmacro() endmacro()
if(EMSCRIPTEN) if(EMSCRIPTEN)
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -sINITIAL_MEMORY=512MB -sTOTAL_MEMORY=512MB -sUSE_GLFW=3 -sMIN_WEBGL_VERSION=2 -sMAX_WEBGL_VERSION=2") set(EM_LINKER_FLAGS "-sPROXY_TO_PTHREAD=1 --use-preload-plugins --preload-file doc/lena.png -sINITIAL_MEMORY=128MB -sALLOW_MEMORY_GROWTH=1 -sUSE_GLFW=3 -sMIN_WEBGL_VERSION=2 -sMAX_WEBGL_VERSION=2")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -sINITIAL_MEMORY=512MB -sTOTAL_MEMORY=512MB -sUSE_GLFW=3 -sMIN_WEBGL_VERSION=2 -sMAX_WEBGL_VERSION=2") set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${EM_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_LD_FLAGS} -sINITIAL_MEMORY=512MB -sTOTAL_MEMORY=512MB -sUSE_GLFW=3 -sMIN_WEBGL_VERSION=2 -sMAX_WEBGL_VERSION=2") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${EM_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${EM_LINKER_FLAGS}")
# if(NOT "${CMAKE_BUILD_TYPE}" STREQUAL "Release")
# set(EM_DEBUG_FLAGS "-fsanitize=address -sSTACK_OVERFLOW_CHECK=2 -sASSERTIONS=2 -sNO_DISABLE_EXCEPTION_CATCHING -sEXCEPTION_DEBUG=1")
# set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${EM_DEBUG_FLAGS}")
# set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${EM_DEBUG_FLAGS}")
# set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${EM_DEBUG_FLAGS}")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address")
# endif()
else() else()
if(NOT GLEW_FOUND) if(NOT GLEW_FOUND)
message(STATUS "Module opencv_v4d disabled because GLEW was not found") message(STATUS "Module opencv_v4d disabled because GLEW was not found")
@ -65,8 +77,6 @@ endif()
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/third/nanogui/include/") include_directories("${CMAKE_CURRENT_SOURCE_DIR}/third/nanogui/include/")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/third/nanogui/ext/nanovg/src/") include_directories("${CMAKE_CURRENT_SOURCE_DIR}/third/nanogui/ext/nanovg/src/")
# include_directories(${GLEW_INCLUDE_DIRS})
# link_directories(${GLEW_LIBRARY_DIRS})
ocv_add_module(v4d opencv_core opencv_imgproc opencv_videoio opencv_video) ocv_add_module(v4d opencv_core opencv_imgproc opencv_videoio opencv_video)
ocv_glob_module_sources() ocv_glob_module_sources()
@ -74,6 +84,13 @@ ocv_module_include_directories()
ocv_create_module() ocv_create_module()
ocv_add_samples(opencv_core opencv_imgproc opencv_videoio opencv_video opencv_v4d opencv_face opencv_tracking opencv_objdetect opencv_stitching opencv_optflow opencv_imgcodecs opencv_features2d opencv_dnn opencv_flann) ocv_add_samples(opencv_core opencv_imgproc opencv_videoio opencv_video opencv_v4d opencv_face opencv_tracking opencv_objdetect opencv_stitching opencv_optflow opencv_imgcodecs opencv_features2d opencv_dnn opencv_flann)
add_custom_command(
TARGET ${the_module} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
"${CMAKE_CURRENT_LIST_DIR}/doc/lena.png"
"${CMAKE_CURRENT_BINARY_DIR}/doc/lena.png")
if(BUILD_EXAMPLES) if(BUILD_EXAMPLES)
if(EMSCRIPTEN) if(EMSCRIPTEN)
set(NANOGUI_BUILD_GLFW OFF) set(NANOGUI_BUILD_GLFW OFF)
@ -95,7 +112,7 @@ if(BUILD_EXAMPLES)
add_emscripten_sample(example_v4d_shader-demo samples/shader-demo.cpp) add_emscripten_sample(example_v4d_shader-demo samples/shader-demo.cpp)
add_emscripten_sample(example_v4d_pedestrian-demo samples/pedestrian-demo.cpp) add_emscripten_sample(example_v4d_pedestrian-demo samples/pedestrian-demo.cpp)
add_emscripten_sample(example_v4d_optflow-demo samples/optflow-demo.cpp) add_emscripten_sample(example_v4d_optflow-demo samples/optflow-demo.cpp)
add_emscripten_sample(example_v4d_beauty-demo samples/beauty-demo.cpp) # add_emscripten_sample(example_v4d_beauty-demo samples/beauty-demo.cpp)
else() else()
add_binary_sample(example_v4d_display_image) add_binary_sample(example_v4d_display_image)
add_binary_sample(example_v4d_custom_source_and_sink) add_binary_sample(example_v4d_custom_source_and_sink)
@ -135,17 +152,17 @@ else()
endif() endif()
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/third/nanogui) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/third/nanogui)
add_library(nanovg "third/nanogui/ext/nanovg/src/nanovg.c") add_library(nanovg "third/nanogui/ext/nanovg/src/nanovg.c")
target_compile_features(opencv_v4d PRIVATE cxx_std_20) target_compile_features(opencv_v4d PRIVATE cxx_std_20)
target_compile_features(nanogui PRIVATE cxx_std_20) target_compile_features(nanogui PRIVATE cxx_std_20)
target_compile_features(nanovg PRIVATE cxx_std_20) target_compile_features(nanovg PRIVATE cxx_std_20)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wdeprecated-enum-enum-conversion) ocv_warnings_disable(CMAKE_CXX_FLAGS -Wdeprecated-enum-enum-conversion)
target_link_directories(${the_module} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../../lib") target_link_directories(${the_module} PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/../../lib")
target_include_directories(${the_module} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/include/opencv2/v4d/detail/")
if(EMSCRIPTEN) if(EMSCRIPTEN)
ocv_target_link_libraries(${the_module} -lnanogui) ocv_target_link_libraries(${the_module} -lnanogui)
else() else()
ocv_target_link_libraries(${the_module} -lOpenCL -lnanogui OpenGL::OpenGL GLEW::glew) ocv_target_link_libraries(${the_module} -lOpenCL -lnanogui OpenGL::OpenGL)
endif() endif()

Binary file not shown.

After

Width:  |  Height:  |  Size: 463 KiB

@ -1,381 +0,0 @@
// cxxpool is a header-only thread pool for C++
// Repository: https://github.com/bloomen/cxxpool
// Copyright: 2022 Christian Blume
// License: http://www.opensource.org/licenses/mit-license.php
#pragma once
#include <thread>
#include <mutex>
#include <future>
#include <stdexcept>
#include <queue>
#include <utility>
#include <functional>
#include <vector>
#include <chrono>
#include <cstddef>
namespace cxxpool {
namespace detail {
template<typename Iterator>
struct future_info {
typedef typename std::iterator_traits<Iterator>::value_type future_type;
typedef decltype(std::declval<future_type>().get()) value_type;
static constexpr bool is_void = std::is_void<value_type>::value;
};
} // detail
// Waits until all futures contain results
template<typename Iterator>
inline
void wait(Iterator first, Iterator last) {
for (; first != last; ++first)
first->wait();
}
// Waits until all futures contain results with a given timeout duration and
// returns a container of std::future::status
template<typename Result, typename Iterator, typename Rep, typename Period>
inline
Result wait_for(Iterator first, Iterator last,
const std::chrono::duration<Rep, Period>& timeout_duration,
Result result) {
for (; first != last; ++first)
result.push_back(first->wait_for(timeout_duration));
return result;
}
// Waits until all futures contain results with a given timeout duration and
// returns a vector of std::future::status
template<typename Iterator, typename Rep, typename Period>
inline
std::vector<std::future_status> wait_for(Iterator first, Iterator last,
const std::chrono::duration<Rep, Period>& timeout_duration) {
return wait_for(first, last, timeout_duration, std::vector<std::future_status>{});
}
// Waits until all futures contain results with a given timeout time and
// returns a container of std::future::status
template<typename Result, typename Iterator, typename Clock, typename Duration>
inline
Result wait_until(Iterator first, Iterator last,
const std::chrono::time_point<Clock, Duration>& timeout_time,
Result result) {
for (; first != last; ++first)
result.push_back(first->wait_until(timeout_time));
return result;
}
// Waits until all futures contain results with a given timeout time and
// returns a vector of std::future::status
template<typename Iterator, typename Clock, typename Duration>
inline
std::vector<std::future_status> wait_until(Iterator first, Iterator last,
const std::chrono::time_point<Clock, Duration>& timeout_time) {
return wait_until(first, last, timeout_time, std::vector<std::future_status>{});
}
// Calls get() on all futures
template<typename Iterator,
typename = typename std::enable_if<cxxpool::detail::future_info<Iterator>::is_void>::type>
inline
void get(Iterator first, Iterator last) {
for (; first != last; ++first)
first->get();
}
// Calls get() on all futures and stores the results in the returned container
template<typename Result, typename Iterator,
typename = typename std::enable_if<!cxxpool::detail::future_info<Iterator>::is_void>::type>
inline
Result get(Iterator first, Iterator last, Result result) {
for (; first != last; ++first)
result.push_back(first->get());
return result;
}
// Calls get() on all futures and stores the results in the returned vector
template<typename Iterator,
typename = typename std::enable_if<!detail::future_info<Iterator>::is_void>::type>
inline
std::vector<typename detail::future_info<Iterator>::value_type>
get(Iterator first, Iterator last) {
return cxxpool::get(first, last, std::vector<typename cxxpool::detail::future_info<Iterator>::value_type>{});
}
namespace detail {
template<typename Index, Index max = std::numeric_limits<Index>::max()>
class infinite_counter {
public:
infinite_counter()
: count_{0}
{}
infinite_counter& operator++() {
if (count_.back() == max)
count_.push_back(0);
else
++count_.back();
return *this;
}
bool operator>(const infinite_counter& other) const {
if (count_.size() == other.count_.size()) {
return count_.back() > other.count_.back();
} else {
return count_.size() > other.count_.size();
}
}
private:
std::vector<Index> count_;
};
class priority_task {
public:
typedef std::size_t counter_elem_t;
priority_task()
: callback_{}, priority_{}, order_{}
{}
// cppcheck-suppress passedByValue
priority_task(std::function<void()> callback, std::size_t priority, cxxpool::detail::infinite_counter<counter_elem_t> order)
: callback_{std::move(callback)}, priority_(priority), order_{std::move(order)}
{}
bool operator<(const priority_task& other) const {
if (priority_ == other.priority_) {
return order_ > other.order_;
} else {
return priority_ < other.priority_;
}
}
void operator()() const {
return callback_();
}
private:
std::function<void()> callback_;
std::size_t priority_;
cxxpool::detail::infinite_counter<counter_elem_t> order_;
};
} // detail
// Exception thrown by the thread_pool class
class thread_pool_error : public std::runtime_error {
public:
explicit thread_pool_error(const std::string& message)
: std::runtime_error{message}
{}
};
// A thread pool for C++
//
// Constructing the thread pool launches the worker threads while
// destructing it joins them. The threads will be alive for as long as the
// thread pool is not destructed. One may call add_threads() to add more
// threads to the thread pool.
//
// Tasks can be pushed into the pool with and w/o providing a priority >= 0.
// Not providing a priority is equivalent to providing a priority of 0.
// Those tasks are processed first that have the highest priority.
// If priorities are equal those tasks are processed first that were pushed
// first into the pool (FIFO).
class thread_pool {
public:
// Constructor. No threads are launched
thread_pool()
: done_{false}, paused_{false}, threads_{}, tasks_{}, task_counter_{},
task_cond_var_{}, task_mutex_{}, thread_mutex_{}
{}
// Constructor. Launches the desired number of threads. Passing 0 is
// equivalent to calling the no-argument constructor
explicit thread_pool(std::size_t n_threads)
: thread_pool{}
{
if (n_threads > 0) {
std::lock_guard<std::mutex> thread_lock(thread_mutex_);
const auto n_target = threads_.size() + n_threads;
while (threads_.size() < n_target) {
std::thread thread;
try {
thread = std::thread{&thread_pool::worker, this};
} catch (...) {
shutdown();
throw;
}
try {
threads_.push_back(std::move(thread));
} catch (...) {
shutdown();
thread.join();
throw;
}
}
}
}
// Destructor. Joins all threads launched. Waits for all running tasks
// to complete
~thread_pool() {
shutdown();
}
// deleting copy/move semantics
thread_pool(const thread_pool&) = delete;
thread_pool& operator=(const thread_pool&) = delete;
thread_pool(thread_pool&&) = delete;
thread_pool& operator=(thread_pool&&) = delete;
// Adds new threads to the pool and launches them
void add_threads(std::size_t n_threads) {
if (n_threads > 0) {
{
std::lock_guard<std::mutex> task_lock(task_mutex_);
if (done_)
throw thread_pool_error{"add_threads called while pool is shutting down"};
}
std::lock_guard<std::mutex> thread_lock(thread_mutex_);
const auto n_target = threads_.size() + n_threads;
while (threads_.size() < n_target) {
std::thread thread(&thread_pool::worker, this);
try {
threads_.push_back(std::move(thread));
} catch (...) {
shutdown();
thread.join();
throw;
}
}
}
}
// Returns the number of threads launched
std::size_t n_threads() const {
{
std::lock_guard<std::mutex> task_lock(task_mutex_);
if (done_)
throw thread_pool_error{"n_threads called while pool is shutting down"};
}
std::lock_guard<std::mutex> thread_lock(thread_mutex_);
return threads_.size();
}
// Pushes a new task into the thread pool and returns the associated future.
// The task will have a priority of 0
template<typename Functor, typename... Args>
auto push(Functor&& functor, Args&&... args) -> std::future<decltype(functor(args...))> {
return push(0, std::forward<Functor>(functor), std::forward<Args>(args)...);
}
// Pushes a new task into the thread pool while providing a priority and
// returns the associated future. Higher priorities are processed first
template<typename Functor, typename... Args>
auto push(std::size_t priority, Functor&& functor, Args&&... args) -> std::future<decltype(functor(args...))> {
typedef decltype(functor(args...)) result_type;
auto pack_task = std::make_shared<std::packaged_task<result_type()>>(
std::bind(std::forward<Functor>(functor), std::forward<Args>(args)...));
auto future = pack_task->get_future();
{
std::lock_guard<std::mutex> task_lock(task_mutex_);
if (done_)
throw cxxpool::thread_pool_error{"push called while pool is shutting down"};
tasks_.emplace([pack_task]{ (*pack_task)(); }, priority, ++task_counter_);
}
task_cond_var_.notify_one();
return future;
}
// Returns the current number of queued tasks
std::size_t n_tasks() const {
std::lock_guard<std::mutex> task_lock(task_mutex_);
return tasks_.size();
}
// Clears all queued tasks, not affecting currently running tasks
void clear() {
std::lock_guard<std::mutex> task_lock(task_mutex_);
decltype(tasks_) empty;
tasks_.swap(empty);
}
// If enabled then pauses the processing of tasks, not affecting currently
// running tasks. Disabling it will resume the processing of tasks
void set_pause(bool enabled) {
{
std::lock_guard<std::mutex> task_lock(task_mutex_);
paused_ = enabled;
}
if (!paused_)
task_cond_var_.notify_all();
}
private:
void worker() {
for (;;) {
cxxpool::detail::priority_task task;
{
std::unique_lock<std::mutex> task_lock(task_mutex_);
task_cond_var_.wait(task_lock, [this]{
return !paused_ && (done_ || !tasks_.empty());
});
if (done_ && tasks_.empty())
break;
task = tasks_.top();
tasks_.pop();
}
task();
}
}
void shutdown() {
{
std::lock_guard<std::mutex> task_lock(task_mutex_);
done_ = true;
paused_ = false;
}
task_cond_var_.notify_all();
std::lock_guard<std::mutex> thread_lock(thread_mutex_);
for (auto& thread : threads_)
thread.join();
}
bool done_;
bool paused_;
std::vector<std::thread> threads_;
std::priority_queue<cxxpool::detail::priority_task> tasks_;
cxxpool::detail::infinite_counter<
typename detail::priority_task::counter_elem_t> task_counter_;
std::condition_variable task_cond_var_;
mutable std::mutex task_mutex_;
mutable std::mutex thread_mutex_;
};
} // cxxpool

@ -0,0 +1,132 @@
/*
* Simple header-only thread pool implementation in modern C++.
*
* Created: Aug 9, 2020.
* Repository: https://github.com/leiless/threadpool.hpp
* LICENSE: BSD-2-Clause
*/
#ifndef THE_THREADPOOL_HPP
#define THE_THREADPOOL_HPP
#include <functional>
#include <mutex>
#include <condition_variable>
#include <queue>
#include <thread>
#include <future>
#define THE_NAMESPACE_BEGIN(name) namespace name {
#define THE_NAMESPACE_END() }
THE_NAMESPACE_BEGIN(concurrent)
class threadpool {
public:
explicit threadpool(size_t threads) : alive(true) {
if (threads == 0) {
throw std::runtime_error("thread pool size cannot be zero");
}
for (auto i = 0llu; i < threads; i++) {
std::cerr << "work" << std::endl;
workers.emplace_back([this] { worker_main(); });
}
}
// see: https://stackoverflow.com/a/23771245/13600780
threadpool(const threadpool &) = delete;
threadpool & operator=(const threadpool &) = delete;
~threadpool() noexcept {
{
std::lock_guard<decltype(mtx)> lock(mtx);
alive = false;
}
cv.notify_all();
for (auto & worker : workers) {
worker.join();
}
}
template<typename Fn, typename... Args>
decltype(auto) enqueue(Fn && fn, Args &&... args) {
return enqueue(false, fn, args...);
}
template<typename Fn, typename... Args>
decltype(auto) enqueue_r(Fn && fn, Args &&... args) {
return enqueue(true, fn, args...);
}
private:
template<typename Fn, typename... Args>
decltype(auto) enqueue(bool block_on_shutdown, Fn && fn, Args &&... args) {
using return_type = std::invoke_result_t<Fn, Args...>;
using pack_task = std::packaged_task<return_type()>;
auto t = std::make_shared<pack_task>(
std::bind(std::forward<Fn>(fn), std::forward<Args>(args)...)
);
auto future = t->get_future();
{
std::lock_guard<decltype(mtx)> lock(mtx);
if (!alive) {
throw std::runtime_error("enqueue on stopped thread pool");
}
tasks.emplace([t = std::move(t)] { (*t)(); }, block_on_shutdown);
}
cv.notify_one();
return future;
}
using task = std::pair<std::function<void()>, bool>;
[[nodiscard]] inline task poll_task() noexcept {
task t;
std::unique_lock<decltype(mtx)> lock(mtx);
cv.wait(lock, [this] { return !tasks.empty() || !alive; });
while (!tasks.empty()) {
if (!alive && !tasks.front().second) {
tasks.pop();
continue;
}
t = std::move(tasks.front());
tasks.pop();
break;
}
return t;
}
void worker_main() {
while (true) {
std::cerr << "I" << std::endl;
task t = poll_task();
std::cerr << "II" << std::endl;
// The thread pool is going to shutdown
if (t.first == nullptr) break;
std::cerr << "III" << std::endl;
t.first();
std::cerr << "IIII" << std::endl;
}
}
bool alive;
std::mutex mtx;
std::condition_variable cv;
std::queue<task> tasks;
std::vector<std::thread> workers;
};
THE_NAMESPACE_END()
#endif

@ -8,18 +8,6 @@
#include "v4d.hpp" #include "v4d.hpp"
#ifndef OPENCV_V4D_USE_ES3
# ifndef NANOGUI_USE_OPENGL
# define NANOGUI_USE_OPENGL
# endif
#else
# ifndef NANOGUI_USE_GLES
# define NANOGUI_USE_GLES
# define NANOGUI_GLES_VERSION 3
# endif
#endif
#include <nanogui/opengl.h>
namespace cv { namespace cv {
namespace v4d { namespace v4d {
/*! /*!

@ -16,13 +16,77 @@
#ifdef __EMSCRIPTEN__ #ifdef __EMSCRIPTEN__
# include <emscripten.h> # include <emscripten.h>
# include <emscripten/bind.h> # include <emscripten/bind.h>
# include <emscripten/threading.h>
# include <fstream> # include <fstream>
#endif #endif
#include <unistd.h>
#include <thread>
#include <chrono>
#include <mutex>
#include <functional>
#include <iostream>
#include <cmath>
namespace cv { namespace cv {
namespace v4d { namespace v4d {
namespace detail {
template <const size_t _UniqueId, typename _Res, typename... _ArgTypes>
struct fun_ptr_helper
{
public:
typedef std::function<_Res(_ArgTypes...)> function_type;
static void bind(function_type&& f)
{ instance().fn_.swap(f); }
static void bind(const function_type& f)
{ instance().fn_=f; }
static _Res invoke(_ArgTypes... args)
{ return instance().fn_(args...); }
typedef decltype(&fun_ptr_helper::invoke) pointer_type;
static pointer_type ptr()
{ return &invoke; }
private:
static fun_ptr_helper& instance()
{
static fun_ptr_helper inst_;
return inst_;
}
fun_ptr_helper() {}
function_type fn_;
};
template <const size_t _UniqueId, typename _Res, typename... _ArgTypes>
typename fun_ptr_helper<_UniqueId, _Res, _ArgTypes...>::pointer_type
get_fn_ptr(const std::function<_Res(_ArgTypes...)>& f)
{
fun_ptr_helper<_UniqueId, _Res, _ArgTypes...>::bind(f);
return fun_ptr_helper<_UniqueId, _Res, _ArgTypes...>::ptr();
}
template<typename T>
std::function<typename std::enable_if<std::is_function<T>::value, T>::type>
make_function(T *t)
{
return {t};
}
long proxy_to_mainl(std::function<long()> fn);
void proxy_to_mainv(std::function<void()> fn);
bool proxy_to_mainb(std::function<bool()> fn);
}
using std::string; using std::string;
class V4D; class V4D;
#ifdef __EMSCRIPTEN__
CV_EXPORTS Mat read_image(const string &path);
#endif
CV_EXPORTS unsigned int init_shader(const char* vShader, const char* fShader, const char* outputAttributeName); CV_EXPORTS unsigned int init_shader(const char* vShader, const char* fShader, const char* outputAttributeName);
/*! /*!

@ -6,6 +6,23 @@
#ifndef SRC_OPENCV_V4D_V4D_HPP_ #ifndef SRC_OPENCV_V4D_V4D_HPP_
#define SRC_OPENCV_V4D_V4D_HPP_ #define SRC_OPENCV_V4D_V4D_HPP_
#define GL_GLEXT_PROTOTYPES 1
#include <GLFW/glfw3.h>
#include <GL/gl.h>
#include <GL/glext.h>
#ifndef OPENCV_V4D_USE_ES3
# ifndef NANOGUI_USE_OPENGL
# define NANOGUI_USE_OPENGL
# endif
#else
# ifndef NANOGUI_USE_GLES
# define NANOGUI_USE_GLES
# define NANOGUI_GLES_VERSION 3
# endif
#endif
#include <nanogui/opengl.h>
#include "source.hpp" #include "source.hpp"
#include "sink.hpp" #include "sink.hpp"
#include "dialog.hpp" #include "dialog.hpp"
@ -19,17 +36,9 @@
#include <opencv2/imgproc.hpp> #include <opencv2/imgproc.hpp>
#include <opencv2/videoio.hpp> #include <opencv2/videoio.hpp>
#include "cxxpool.hpp" #include "detail/threadpool.hpp"
#ifndef OPENCV_V4D_USE_ES3
# include <GL/glew.h>
# define GLFW_INCLUDE_GLCOREARB
#else
# define GLFW_INCLUDE_ES3
# define GLFW_INCLUDE_GLEXT
#endif
#include <GLFW/glfw3.h>
#include <nanogui/nanogui.h>
#ifdef __EMSCRIPTEN__ #ifdef __EMSCRIPTEN__
#include <emscripten.h> #include <emscripten.h>
@ -55,6 +64,7 @@ class FrameBufferContext;
class CLVAContext; class CLVAContext;
class GLContext; class GLContext;
class NanoVGContext; class NanoVGContext;
class NanoguiContext;
/*! /*!
* The GFLW error callback. * The GFLW error callback.
@ -75,7 +85,7 @@ bool contains_absolute(nanogui::Widget* w, const nanogui::Vector2i& p);
* @param parent The parent widget * @param parent The parent widget
* @param widgets A vector of widgets of type T to append newly found widgets to. * @param widgets A vector of widgets of type T to append newly found widgets to.
*/ */
template<typename T> void find_widgets(nanogui::Widget* parent, std::vector<T>& widgets) { template<typename T> void find_widgets(const nanogui::Widget* parent, std::vector<T>& widgets) {
T w; T w;
for (auto* child : parent->children()) { for (auto* child : parent->children()) {
find_widgets(child, widgets); find_widgets(child, widgets);
@ -108,7 +118,7 @@ void gl_check_error(const std::filesystem::path& file, unsigned int line, const
*/ */
CV_EXPORTS cv::Scalar colorConvert(const cv::Scalar& src, cv::ColorConversionCodes code); CV_EXPORTS cv::Scalar colorConvert(const cv::Scalar& src, cv::ColorConversionCodes code);
CV_EXPORTS void resizeKeepAspectRatio(const cv::UMat& src, cv::UMat& output, const cv::Size& dstSize, CV_EXPORTS void resizePreserveAspectRatio(const cv::UMat& src, cv::UMat& output, const cv::Size& dstSize,
const cv::Scalar& bgcolor = {0,0,0,255}); const cv::Scalar& bgcolor = {0,0,0,255});
using namespace cv::v4d::detail; using namespace cv::v4d::detail;
@ -119,27 +129,27 @@ class CV_EXPORTS V4D {
friend class detail::NanoVGContext; friend class detail::NanoVGContext;
friend class detail::FrameBufferContext; friend class detail::FrameBufferContext;
const cv::Size initialSize_; const cv::Size initialSize_;
bool offscreen_;
const string& title_;
int major_;
int minor_;
bool compat_;
int samples_;
bool debug_;
cv::Rect viewport_; cv::Rect viewport_;
float scale_; float scale_;
cv::Vec2f mousePos_; cv::Vec2f mousePos_;
bool stretch_; bool stretch_;
bool offscreen_;
FrameBufferContext* mainFbContext_ = nullptr; FrameBufferContext* mainFbContext_ = nullptr;
CLVAContext* clvaContext_ = nullptr; CLVAContext* clvaContext_ = nullptr;
GLContext* glContext_ = nullptr; GLContext* glContext_ = nullptr;
NanoVGContext* nvgContext_ = nullptr; NanoVGContext* nvgContext_ = nullptr;
cv::VideoCapture* capture_ = nullptr; NanoguiContext* nguiContext_ = nullptr;
cv::VideoWriter* writer_ = nullptr;
FormHelper* form_ = nullptr;
bool closed_ = false; bool closed_ = false;
cv::Size videoFrameSize_ = cv::Size(0, 0);
int vaCaptureDeviceIndex_ = 0;
int vaWriterDeviceIndex_ = 0;
bool mouseDrag_ = false; bool mouseDrag_ = false;
nanogui::Screen* screen_ = nullptr;
Source source_; Source source_;
Sink sink_; Sink sink_;
cxxpool::thread_pool pool{2}; //two threads. one for reading and one for writing concurrent::threadpool pool_;
bool captureSuccessful_ = true; bool captureSuccessful_ = true;
cv::UMat currentReaderFrame_; cv::UMat currentReaderFrame_;
cv::UMat nextReaderFrame_; cv::UMat nextReaderFrame_;
@ -172,11 +182,12 @@ public:
* @param debug Create a debug OpenGL context. * @param debug Create a debug OpenGL context.
*/ */
CV_EXPORTS static cv::Ptr<V4D> make(const cv::Size& initialSize, bool offscreen, const string& title, int major = 3, CV_EXPORTS static cv::Ptr<V4D> make(const cv::Size& initialSize, bool offscreen, const string& title, int major = 3,
int minor = 2, bool compat = false, int samples = 0, bool debug = false); int minor = 2, bool compat = false, int samples = 0, bool debug = true);
/*! /*!
* Default destructor * Default destructor
*/ */
CV_EXPORTS virtual ~V4D(); CV_EXPORTS virtual ~V4D();
CV_EXPORTS void init();
/*! /*!
* The internal framebuffer exposed as OpenGL Texture2D. * The internal framebuffer exposed as OpenGL Texture2D.
* @return The texture object. * @return The texture object.
@ -311,6 +322,7 @@ public:
* Set the window size. * Set the window size.
* @param sz The new window size. * @param sz The new window size.
*/ */
CV_EXPORTS void resizeWindow(const cv::Size& sz);
CV_EXPORTS void setWindowSize(const cv::Size& sz); CV_EXPORTS void setWindowSize(const cv::Size& sz);
/*! /*!
* Get the window size * Get the window size
@ -414,11 +426,10 @@ private:
cv::Vec2f getMousePosition(); cv::Vec2f getMousePosition();
bool keyboard_event(int key, int scancode, int action, int modifiers); bool keyboard_event(int key, int scancode, int action, int modifiers);
void setMousePosition(int x, int y); void setMousePosition(int x, int y);
nanogui::Screen& screen();
FormHelper& form();
FrameBufferContext& fbCtx(); FrameBufferContext& fbCtx();
CLVAContext& clvaCtx(); CLVAContext& clvaCtx();
NanoVGContext& nvgCtx(); NanoVGContext& nvgCtx();
NanoguiContext& nguiCtx();
GLContext& glCtx(); GLContext& glCtx();
GLFWwindow* getGLFWWindow(); GLFWwindow* getGLFWWindow();
}; };

@ -25,8 +25,8 @@ using std::string;
#endif #endif
/** Application parameters **/ /** Application parameters **/
constexpr unsigned int WIDTH = 1920; constexpr unsigned int WIDTH = 1280;
constexpr unsigned int HEIGHT = 1080; constexpr unsigned int HEIGHT = 720;
constexpr double SCALE = 0.125; //Scale at which face detection is performed constexpr double SCALE = 0.125; //Scale at which face detection is performed
constexpr bool OFFSCREEN = false; constexpr bool OFFSCREEN = false;
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__

@ -6,8 +6,8 @@
#include <opencv2/v4d/v4d.hpp> #include <opencv2/v4d/v4d.hpp>
//adapted from https://gitlab.com/wikibooks-opengl/modern-tutorials/-/blob/master/tut05_cube/cube.cpp //adapted from https://gitlab.com/wikibooks-opengl/modern-tutorials/-/blob/master/tut05_cube/cube.cpp
constexpr long unsigned int WIDTH = 1920; constexpr long unsigned int WIDTH = 1280;
constexpr long unsigned int HEIGHT = 1080; constexpr long unsigned int HEIGHT = 720;
constexpr bool OFFSCREEN = false; constexpr bool OFFSCREEN = false;
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
constexpr double FPS = 60; constexpr double FPS = 60;

@ -12,18 +12,19 @@ int main() {
v4d->setVisible(true); v4d->setVisible(true);
//Make a Source that generates rainbow frames. //Make a Source that generates rainbow frames.
Source src([](cv::UMat& frame){ Source src([](cv::UMat& frame){
static long cnt = 0;
//The source is responsible for initializing the frame. The frame stays allocated, which makes create() have no effect in further iterations. //The source is responsible for initializing the frame. The frame stays allocated, which makes create() have no effect in further iterations.
frame.create(Size(1280, 720), CV_8UC3); frame.create(Size(1280, 720), CV_8UC3);
frame = colorConvert(Scalar(int((cv::getTickCount() / cv::getTickFrequency()) * 50) % 180, 128, 128, 255), COLOR_HLS2BGR); frame = colorConvert(Scalar(++cnt % 180, 128, 128, 255), COLOR_HLS2BGR);
return true; return true;
}, 60.0f); }, 60.0f);
//Make a Sink the saves each frame to a PNG file. //Make a Sink the saves each frame to a PNG file.
Sink sink([](const cv::UMat& frame){ Sink sink([](const cv::UMat& frame){
static long cnt = 0;
try { try {
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
imwrite(std::to_string(cnt++) + ".png", frame); static long cnt = 0;
imwrite(std::to_string(cnt++) + ".png", frame);
#else #else
CV_UNUSED(frame); CV_UNUSED(frame);
#endif #endif
@ -53,6 +54,7 @@ int main() {
textAlign(NVG_ALIGN_CENTER | NVG_ALIGN_TOP); textAlign(NVG_ALIGN_CENTER | NVG_ALIGN_TOP);
text(sz.width / 2.0, sz.height / 2.0, hr.c_str(), hr.c_str() + hr.size()); text(sz.width / 2.0, sz.height / 2.0, hr.c_str(), hr.c_str() + hr.size());
}); });
updateFps(v4d,true);
v4d->write(); //Write video to the Sink v4d->write(); //Write video to the Sink
return v4d->display(); //Display the framebuffer in the native window return v4d->display(); //Display the framebuffer in the native window
}); });

@ -1,6 +1,10 @@
#include <opencv2/v4d/v4d.hpp> #include <opencv2/v4d/v4d.hpp>
#include <opencv2/imgcodecs.hpp> #include <opencv2/imgcodecs.hpp>
#ifdef __EMSCRIPTEN__
# include <fstream>
#endif
int main() { int main() {
using namespace cv; using namespace cv;
using namespace cv::v4d; using namespace cv::v4d;
@ -9,13 +13,18 @@ int main() {
Ptr<V4D> v4d = V4D::make(Size(1280, 720), "Show image"); Ptr<V4D> v4d = V4D::make(Size(1280, 720), "Show image");
v4d->setVisible(true); v4d->setVisible(true);
//An image //An image
#ifdef __EMSCRIPTEN__
Mat image = read_image("doc/lena.png");
#else
Mat image = imread(samples::findFile("lena.jpg")); Mat image = imread(samples::findFile("lena.jpg"));
//Feeds the image to the video pipeline #endif
v4d->feed(image);
//Display the framebuffer in the native window in an endless loop. //Display the framebuffer in the native window in an endless loop.
//V4D::run() though it takes a functor is not a context. It is simply an abstraction //V4D::run() though it takes a functor is not a context. It is simply an abstraction
//of a run loop for portability reasons and executes the functor until the application //of a run loop for portability reasons and executes the functor until the application
//terminates or the functor returns false. //terminates or the functor returns false.
v4d->run([=](){ return v4d->display(); }); v4d->run([=](){
//Feeds the image to the video pipeline
v4d->feed(image);
return v4d->display();
});
} }

@ -9,15 +9,20 @@ int main() {
Ptr<V4D> v4d = V4D::make(Size(1280, 720), "Show image"); Ptr<V4D> v4d = V4D::make(Size(1280, 720), "Show image");
v4d->setVisible(true); v4d->setVisible(true);
//Read an image as UMat //Read an image as UMat
UMat image = imread(samples::findFile("lena.jpg")).getUMat(ACCESS_READ); #ifdef __EMSCRIPTEN__
UMat resized; UMat image = read_image("doc/lena.png").getUMat(ACCESS_READ);
//Resize the image to framebuffer size #else
resize(image, resized, v4d->getFrameBufferSize()); UMat image = imread(samples::findFile("lena.jpg")).getUMat(ACCESS_READ);
v4d->fb([&](const UMat& framebuffer) { #endif
//Color convert the resized UMat. The framebuffer has alpha. UMat resized;
cvtColor(resized, framebuffer, COLOR_RGB2BGRA); //Resize and color convert the image to framebuffer size
}); v4d->fb([&](const UMat& framebuffer) {
resize(image, resized, v4d->getFrameBufferSize());
cvtColor(resized, framebuffer, COLOR_RGB2BGRA);
});
//Display the framebuffer in the native window in an endless loop //Display the framebuffer in the native window in an endless loop
v4d->run([=](){ return v4d->display(); }); v4d->run([=](){
return v4d->display();
});
} }

@ -122,15 +122,16 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1920 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (1080 / window.devicePixelRatio) + "px";
Module.canvas.width = 1920;
Module.canvas.height = 1080;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize(); fixCanvasSize();
@ -247,6 +248,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_beauty-demo/get.php?res=example_v4d_beauty-demo.js"></script>
<script async type="text/javascript" src="example_v4d_beauty-demo.js"></script> <script async type="text/javascript" src="example_v4d_beauty-demo.js"></script>
</body> </body>
</html> </html>

@ -105,15 +105,11 @@
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="canvas1" width="1920" height="1080" style="display: none;"></canvas> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<canvas id="canvas2" width="1920" height="1080" style="display: none;"></canvas>
<canvas id="canvas3" width="1920" height="1080" style="display: none;"></canvas>
<canvas id="canvas4" width="1920" height="1080" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -126,14 +122,16 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px";
}
var statusElement = document.getElementById('status'); var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress'); var progressElement = document.getElementById('progress');
function fixCanvasSize() {
Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
}
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize(); fixCanvasSize();
@ -171,36 +169,6 @@
return canvas; return canvas;
})(), })(),
canvas1: (function() {
var canvas = document.getElementById('canvas1');
// As a default initial behavior, pop up an alert when webgl context is lost. To make your
// application robust, you may want to override this behavior before shipping!
// See http://www.khronos.org/registry/webgl/specs/latest/1.0/#5.15.2
canvas.addEventListener("webglcontextlost", function(e) { alert('WebGL context lost. You will need to reload the page.'); e.preventDefault(); }, false);
return canvas;
})(),
canvas2: (function() {
var canvas = document.getElementById('canvas2');
// As a default initial behavior, pop up an alert when webgl context is lost. To make your
// application robust, you may want to override this behavior before shipping!
// See http://www.khronos.org/registry/webgl/specs/latest/1.0/#5.15.2
canvas.addEventListener("webglcontextlost", function(e) { alert('WebGL context lost. You will need to reload the page.'); e.preventDefault(); }, false);
return canvas;
})(),
canvas3: (function() {
var canvas = document.getElementById('canvas3');
// As a default initial behavior, pop up an alert when webgl context is lost. To make your
// application robust, you may want to override this behavior before shipping!
// See http://www.khronos.org/registry/webgl/specs/latest/1.0/#5.15.2
canvas.addEventListener("webglcontextlost", function(e) { alert('WebGL context lost. You will need to reload the page.'); e.preventDefault(); }, false);
return canvas;
})(),
setStatus: function(text) { setStatus: function(text) {
if (!Module.setStatus.last) Module.setStatus.last = { time: Date.now(), text: '' }; if (!Module.setStatus.last) Module.setStatus.last = { time: Date.now(), text: '' };
if (text === Module.setStatus.last.text) return; if (text === Module.setStatus.last.text) return;
@ -239,7 +207,6 @@
}; };
let fsButton = document.querySelector("#fullscreenBtn"); let fsButton = document.querySelector("#fullscreenBtn");
let cameraBtn = document.querySelector("#captureBtn");
let videoElement = document.querySelector("#video"); let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas"); let cameraCanvas = document.querySelector("#cameraCanvas");
@ -261,12 +228,6 @@
requestAnimationFrame(runCapture); requestAnimationFrame(runCapture);
} }
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
videoElement.srcObject = stream;
runCapture();
});
fsButton.addEventListener('click', async function () { fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
@ -280,7 +241,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="example_v4d_cube-demo.js"></script> <script async type="text/javascript" src="/example_v4d_cube-demo/get.php?res=example_v4d_cube-demo.js"></script>
</body> </body>
</html> </html>

@ -4,7 +4,7 @@
<meta charset="utf-8"> <meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no"> <meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no">
<title>Custom Source And Sink Example</title> <title>Custom Source and Sink Example</title>
<style> <style>
body { body {
font-family: arial; font-family: arial;
@ -105,11 +105,11 @@
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,21 +122,23 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize();
}, },
preRun: [], preRun: [],
postRun: [], postRun: function() {
fixCanvasSize();
},
print: (function() { print: (function() {
var element = document.getElementById('output'); var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache if (element) element.value = ''; // clear browser cache
@ -206,7 +208,6 @@
}; };
let fsButton = document.querySelector("#fullscreenBtn"); let fsButton = document.querySelector("#fullscreenBtn");
let cameraBtn = document.querySelector("#captureBtn");
let videoElement = document.querySelector("#video"); let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas"); let cameraCanvas = document.querySelector("#cameraCanvas");
@ -228,12 +229,6 @@
requestAnimationFrame(runCapture); requestAnimationFrame(runCapture);
} }
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
videoElement.srcObject = stream;
runCapture();
});
fsButton.addEventListener('click', async function () { fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
@ -247,6 +242,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_custom_source_and_sink/get.php?res=example_v4d_custom_source_and_sink.js"></script>
<script async type="text/javascript" src="example_v4d_custom_source_and_sink.js"></script> <script async type="text/javascript" src="example_v4d_custom_source_and_sink.js"></script>
</body> </body>
</html> </html>

@ -4,7 +4,7 @@
<meta charset="utf-8"> <meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no"> <meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no">
<title>Display Image Example</title> <title>Display Image through Videopipeline Example</title>
<style> <style>
body { body {
font-family: arial; font-family: arial;
@ -105,11 +105,11 @@
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,21 +122,23 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize();
}, },
preRun: [], preRun: [],
postRun: [], postRun: function() {
fixCanvasSize();
},
print: (function() { print: (function() {
var element = document.getElementById('output'); var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache if (element) element.value = ''; // clear browser cache
@ -206,7 +208,6 @@
}; };
let fsButton = document.querySelector("#fullscreenBtn"); let fsButton = document.querySelector("#fullscreenBtn");
let cameraBtn = document.querySelector("#captureBtn");
let videoElement = document.querySelector("#video"); let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas"); let cameraCanvas = document.querySelector("#cameraCanvas");
@ -228,12 +229,6 @@
requestAnimationFrame(runCapture); requestAnimationFrame(runCapture);
} }
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
videoElement.srcObject = stream;
runCapture();
});
fsButton.addEventListener('click', async function () { fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
@ -247,6 +242,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_display_image/get.php?res=example_v4d_display_image.js"></script>
<script async type="text/javascript" src="example_v4d_display_image.js"></script> <script async type="text/javascript" src="example_v4d_display_image.js"></script>
</body> </body>
</html> </html>

@ -4,7 +4,7 @@
<meta charset="utf-8"> <meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no"> <meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no">
<title>Display Image and Framebuffer</title> <title>Display Image through FrameBuffer Example</title>
<style> <style>
body { body {
font-family: arial; font-family: arial;
@ -105,11 +105,11 @@
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,21 +122,23 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize();
}, },
preRun: [], preRun: [],
postRun: [], postRun: function() {
fixCanvasSize();
},
print: (function() { print: (function() {
var element = document.getElementById('output'); var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache if (element) element.value = ''; // clear browser cache
@ -205,16 +207,15 @@
}; };
}; };
let fsButton = document.querySelector("#fullscreenBtn"); let fsButton1 = document.querySelector("#fullscreenBtn");
let cameraBtn = document.querySelector("#captureBtn"); let videoElement1 = document.querySelector("#video");
let videoElement = document.querySelector("#video"); let cameraCanvas1 = document.querySelector("#cameraCanvas");
let cameraCanvas = document.querySelector("#cameraCanvas");
function capture() { function capture() {
let ctx = cameraCanvas.getContext('2d', { willReadFrequently: true }); let ctx = cameraCanvas1.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement, 0, 0, cameraCanvas.width, cameraCanvas.height); ctx.drawImage(videoElement1, 0, 0, cameraCanvas1.width, cameraCanvas1.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas.width, cameraCanvas.height); var imageData = ctx.getImageData(0, 0, cameraCanvas1.width, cameraCanvas1.height);
let filename = 'v4d_rgba_canvas.raw'; let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+'); let stream = FS.open(filename, 'w+');
if(imageData) { if(imageData) {
@ -228,13 +229,7 @@
requestAnimationFrame(runCapture); requestAnimationFrame(runCapture);
} }
cameraBtn.addEventListener('click', async function() { fsButton1.addEventListener('click', async function () {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
videoElement.srcObject = stream;
runCapture();
});
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
@ -247,7 +242,8 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="example_v4d_display_image_and_fb.js"></script> <script async type="text/javascript" src="/example_v4d_display_image_fb/get.php?res=example_v4d_display_image_fb.js"></script>
<script async type="text/javascript" src="example_v4d_display_image_fb.js"></script>
</body> </body>
</html> </html>

@ -105,11 +105,11 @@
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,15 +122,16 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize(); fixCanvasSize();
@ -206,7 +207,6 @@
}; };
let fsButton = document.querySelector("#fullscreenBtn"); let fsButton = document.querySelector("#fullscreenBtn");
let cameraBtn = document.querySelector("#captureBtn");
let videoElement = document.querySelector("#video"); let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas"); let cameraCanvas = document.querySelector("#cameraCanvas");
@ -228,12 +228,6 @@
requestAnimationFrame(runCapture); requestAnimationFrame(runCapture);
} }
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
videoElement.srcObject = stream;
runCapture();
});
fsButton.addEventListener('click', async function () { fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
@ -247,7 +241,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="example_v4d_font-demo.js"></script> <script async type="text/javascript" src="/example_v4d_font-demo/get.php?res=example_v4d_font-demo.js"></script>
</body> </body>
</html> </html>

@ -105,11 +105,11 @@
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,21 +122,23 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize();
}, },
preRun: [], preRun: [],
postRun: [], postRun: function() {
fixCanvasSize();
},
print: (function() { print: (function() {
var element = document.getElementById('output'); var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache if (element) element.value = ''; // clear browser cache
@ -206,7 +208,6 @@
}; };
let fsButton = document.querySelector("#fullscreenBtn"); let fsButton = document.querySelector("#fullscreenBtn");
let cameraBtn = document.querySelector("#captureBtn");
let videoElement = document.querySelector("#video"); let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas"); let cameraCanvas = document.querySelector("#cameraCanvas");
@ -228,12 +229,6 @@
requestAnimationFrame(runCapture); requestAnimationFrame(runCapture);
} }
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
videoElement.srcObject = stream;
runCapture();
});
fsButton.addEventListener('click', async function () { fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
@ -247,6 +242,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_font_rendering/get.php?res=example_v4d_font_rendering.js"></script>
<script async type="text/javascript" src="example_v4d_font_rendering.js"></script> <script async type="text/javascript" src="example_v4d_font_rendering.js"></script>
</body> </body>
</html> </html>

@ -1,10 +1,11 @@
<!doctype html> <!doctype html>
<html lang="en-us"> <html lang="en-us">
<head> <head>
<meta charset="utf-8"> <meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no"> <meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no">
<title>Font with GUI Example</title> <title>Font with Form based GUI Example</title>
<style> <style>
body { body {
font-family: arial; font-family: arial;
@ -105,11 +106,11 @@
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,21 +123,23 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize();
}, },
preRun: [], preRun: [],
postRun: [], postRun: function() {
fixCanvasSize();
},
print: (function() { print: (function() {
var element = document.getElementById('output'); var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache if (element) element.value = ''; // clear browser cache
@ -206,7 +209,6 @@
}; };
let fsButton = document.querySelector("#fullscreenBtn"); let fsButton = document.querySelector("#fullscreenBtn");
let cameraBtn = document.querySelector("#captureBtn");
let videoElement = document.querySelector("#video"); let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas"); let cameraCanvas = document.querySelector("#cameraCanvas");
@ -228,12 +230,6 @@
requestAnimationFrame(runCapture); requestAnimationFrame(runCapture);
} }
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
videoElement.srcObject = stream;
runCapture();
});
fsButton.addEventListener('click', async function () { fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
@ -247,6 +243,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_font_with_gui/get.php?res=example_v4d_font_with_gui.js"></script>
<script async type="text/javascript" src="example_v4d_font_with_gui.js"></script> <script async type="text/javascript" src="example_v4d_font_with_gui.js"></script>
</body> </body>
</html> </html>

@ -108,8 +108,8 @@
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,15 +122,16 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize(); fixCanvasSize();
@ -247,7 +248,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="example_v4d_nanovg-demo.js"></script> <script async type="text/javascript" src="/example_v4d_nanovg-demo/get.php?res=example_v4d_nanovg-demo.js"></script>
</body> </body>
</html> </html>

@ -103,13 +103,13 @@
} }
</style> </style>
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -126,10 +126,10 @@
var progressElement = document.getElementById('progress'); var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (1920 / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (1080 / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1920; Module.canvas.width = 1280;
Module.canvas.height = 1080; Module.canvas.height = 720;
} }
var Module = { var Module = {
@ -249,7 +249,6 @@
}); });
</script> </script>
<script async type="text/javascript" src="example_v4d_optflow-demo.js"></script> <script async type="text/javascript" src="example_v4d_optflow-demo.js"></script>
</body> <script async type="text/javascript" src="/example_v4d_optflow-demo/get.php?res=example_v4d_optflow-demo.js"></script>
</body>
</html> </html>

@ -108,8 +108,8 @@
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,15 +122,16 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize(); fixCanvasSize();
@ -247,8 +248,8 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="example_v4d_pedestrian-demo.js"></script> <script async type="text/javascript" src="/example_v4d_pedestrian-demo/get.php?res=example_v4d_pedestrian-demo.js"></script>
</body> </body>
</html> </html>

@ -105,11 +105,11 @@
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,21 +122,23 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize();
}, },
preRun: [], preRun: [],
postRun: [], postRun: function() {
fixCanvasSize();
},
print: (function() { print: (function() {
var element = document.getElementById('output'); var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache if (element) element.value = ''; // clear browser cache
@ -206,7 +208,6 @@
}; };
let fsButton = document.querySelector("#fullscreenBtn"); let fsButton = document.querySelector("#fullscreenBtn");
let cameraBtn = document.querySelector("#captureBtn");
let videoElement = document.querySelector("#video"); let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas"); let cameraCanvas = document.querySelector("#cameraCanvas");
@ -228,12 +229,6 @@
requestAnimationFrame(runCapture); requestAnimationFrame(runCapture);
} }
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
videoElement.srcObject = stream;
runCapture();
});
fsButton.addEventListener('click', async function () { fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
@ -247,6 +242,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_render_opengl/get.php?res=example_v4d_render_opengl.js"></script>
<script async type="text/javascript" src="example_v4d_render_opengl.js"></script> <script async type="text/javascript" src="example_v4d_render_opengl.js"></script>
</body> </body>
</html> </html>

@ -108,8 +108,8 @@
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -126,10 +126,10 @@
var progressElement = document.getElementById('progress'); var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (1920 / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (1080 / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1920; Module.canvas.width = 1280;
Module.canvas.height = 1080; Module.canvas.height = 720;
} }
var Module = { var Module = {
@ -248,6 +248,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_shader-demo/get.php?res=example_v4d_shader-demo.js"></script>
<script async type="text/javascript" src="example_v4d_shader-demo.js"></script> <script async type="text/javascript" src="example_v4d_shader-demo.js"></script>
</body> </body>
</html> </html>

@ -4,7 +4,7 @@
<meta charset="utf-8"> <meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no"> <meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no">
<title>Vector Graphics Example</title> <title>Vector graphics example</title>
<style> <style>
body { body {
font-family: arial; font-family: arial;
@ -105,11 +105,11 @@
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,21 +122,23 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize();
}, },
preRun: [], preRun: [],
postRun: [], postRun: function() {
fixCanvasSize();
},
print: (function() { print: (function() {
var element = document.getElementById('output'); var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache if (element) element.value = ''; // clear browser cache
@ -206,7 +208,6 @@
}; };
let fsButton = document.querySelector("#fullscreenBtn"); let fsButton = document.querySelector("#fullscreenBtn");
let cameraBtn = document.querySelector("#captureBtn");
let videoElement = document.querySelector("#video"); let videoElement = document.querySelector("#video");
let cameraCanvas = document.querySelector("#cameraCanvas"); let cameraCanvas = document.querySelector("#cameraCanvas");
@ -228,12 +229,6 @@
requestAnimationFrame(runCapture); requestAnimationFrame(runCapture);
} }
cameraBtn.addEventListener('click', async function() {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
videoElement.srcObject = stream;
runCapture();
});
fsButton.addEventListener('click', async function () { fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
@ -247,6 +242,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_vector_graphics/get.php?res=example_v4d_vector_graphics.js"></script>
<script async type="text/javascript" src="example_v4d_vector_graphics.js"></script> <script async type="text/javascript" src="example_v4d_vector_graphics.js"></script>
</body> </body>
</html> </html>

@ -105,11 +105,11 @@
</head> </head>
<body> <body>
<span id='controls'> <span id='controls'>
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -122,21 +122,23 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize();
}, },
preRun: [], preRun: [],
postRun: [], postRun: function() {
fixCanvasSize();
},
print: (function() { print: (function() {
var element = document.getElementById('output'); var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache if (element) element.value = ''; // clear browser cache
@ -205,16 +207,14 @@
}; };
}; };
let fsButton = document.querySelector("#fullscreenBtn"); let fsButton1 = document.querySelector("#fullscreenBtn");
let cameraBtn = document.querySelector("#captureBtn"); let videoElement1 = document.querySelector("#video");
let videoElement = document.querySelector("#video"); let cameraCanvas1 = document.querySelector("#cameraCanvas");
let cameraCanvas = document.querySelector("#cameraCanvas");
function capture() { function capture() {
let ctx = cameraCanvas.getContext('2d', { willReadFrequently: true }); let ctx = cameraCanvas1.getContext('2d', { willReadFrequently: true });
ctx.drawImage(videoElement, 0, 0, cameraCanvas.width, cameraCanvas.height); ctx.drawImage(videoElement1, 0, 0, cameraCanvas1.width, cameraCanvas1.height);
var imageData = ctx.getImageData(0, 0, cameraCanvas.width, cameraCanvas.height); var imageData = ctx.getImageData(0, 0, cameraCanvas1.width, cameraCanvas1.height);
let filename = 'v4d_rgba_canvas.raw'; let filename = 'v4d_rgba_canvas.raw';
let stream = FS.open(filename, 'w+'); let stream = FS.open(filename, 'w+');
if(imageData) { if(imageData) {
@ -228,13 +228,7 @@
requestAnimationFrame(runCapture); requestAnimationFrame(runCapture);
} }
cameraBtn.addEventListener('click', async function() { fsButton1.addEventListener('click', async function () {
let stream = await navigator.mediaDevices.getUserMedia({ video: true, audio: false });
videoElement.srcObject = stream;
runCapture();
});
fsButton.addEventListener('click', async function () {
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
@ -247,6 +241,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_vector_graphics_and_fb/get.php?res=example_v4d_vector_graphics_and_fb.js"></script>
<script async type="text/javascript" src="example_v4d_vector_graphics_and_fb.js"></script> <script async type="text/javascript" src="example_v4d_vector_graphics_and_fb.js"></script>
</body> </body>
</html> </html>

@ -108,12 +108,8 @@
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<canvas id="canvas1" width="1920" height="1080" ></canvas> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<canvas id="canvas2" width="1920" height="1080" ></canvas>
<canvas id="canvas3" width="1920" height="1080" ></canvas>
<canvas id="canvas4" width="1920" height="1080" ></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -126,15 +122,16 @@
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize(); fixCanvasSize();
@ -162,7 +159,7 @@
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' '); if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
console.error(text); console.error(text);
}, },
canvas: (function() { canvas: (function() {
var canvas = document.getElementById('canvas'); var canvas = document.getElementById('canvas');
// As a default initial behavior, pop up an alert when webgl context is lost. To make your // As a default initial behavior, pop up an alert when webgl context is lost. To make your
@ -172,36 +169,6 @@
return canvas; return canvas;
})(), })(),
canvas1: (function() {
var canvas = document.getElementById('canvas1');
// As a default initial behavior, pop up an alert when webgl context is lost. To make your
// application robust, you may want to override this behavior before shipping!
// See http://www.khronos.org/registry/webgl/specs/latest/1.0/#5.15.2
canvas.addEventListener("webglcontextlost", function(e) { alert('WebGL context lost. You will need to reload the page.'); e.preventDefault(); }, false);
return canvas;
})(),
canvas2: (function() {
var canvas = document.getElementById('canvas2');
// As a default initial behavior, pop up an alert when webgl context is lost. To make your
// application robust, you may want to override this behavior before shipping!
// See http://www.khronos.org/registry/webgl/specs/latest/1.0/#5.15.2
canvas.addEventListener("webglcontextlost", function(e) { alert('WebGL context lost. You will need to reload the page.'); e.preventDefault(); }, false);
return canvas;
})(),
canvas3: (function() {
var canvas = document.getElementById('canvas3');
// As a default initial behavior, pop up an alert when webgl context is lost. To make your
// application robust, you may want to override this behavior before shipping!
// See http://www.khronos.org/registry/webgl/specs/latest/1.0/#5.15.2
canvas.addEventListener("webglcontextlost", function(e) { alert('WebGL context lost. You will need to reload the page.'); e.preventDefault(); }, false);
return canvas;
})(),
setStatus: function(text) { setStatus: function(text) {
if (!Module.setStatus.last) Module.setStatus.last = { time: Date.now(), text: '' }; if (!Module.setStatus.last) Module.setStatus.last = { time: Date.now(), text: '' };
if (text === Module.setStatus.last.text) return; if (text === Module.setStatus.last.text) return;
@ -281,6 +248,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_video-demo/get.php?res=example_v4d_video-demo.js"></script>
<script async type="text/javascript" src="example_v4d_video-demo.js"></script> <script async type="text/javascript" src="example_v4d_video-demo.js"></script>
</body> </body>
</html> </html>

@ -4,7 +4,7 @@
<meta charset="utf-8"> <meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no"> <meta name="viewport" content="width=device-width, initial-scale=2.0, user-scalable=no">
<title>Video Editing</title> <title>Video Editing Example</title>
<style> <style>
body { body {
font-family: arial; font-family: arial;
@ -108,8 +108,8 @@
<span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button> <span><button id="captureBtn">Start Capture</button><button id="fullscreenBtn">Fullscreen</button>
</span> </span>
</span> </span>
<canvas id="cameraCanvas" width="1920" height="1080" style="display: none;"></canvas> <canvas id="cameraCanvas" width="1280" height="720" style="display: none;"></canvas>
<video id="video" width="1920" height="1080" autoplay style="display: none;"></video> <video id="video" width="1280" height="720" autoplay style="display: none;"></video>
<div class="emscripten" id="status">Downloading...</div> <div class="emscripten" id="status">Downloading...</div>
@ -119,24 +119,26 @@
<div class="emscripten_border"> <div class="emscripten_border">
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas> <canvas class="emscripten" width="1280" height="720" id="canvas" oncontextmenu="event.preventDefault()" tabindex=-1></canvas>
</div> </div>
<script type='text/javascript'> <script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
function fixCanvasSize() { function fixCanvasSize() {
Module.canvas.style.width = (Module.canvas.width / window.devicePixelRatio)+ "px"; Module.canvas.style.width = (1280 / window.devicePixelRatio)+ "px";
Module.canvas.style.height = (Module.canvas.height / window.devicePixelRatio) + "px"; Module.canvas.style.height = (720 / window.devicePixelRatio) + "px";
Module.canvas.width = 1280;
Module.canvas.height = 720;
} }
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = { var Module = {
onRuntimeInitialized: function() { onRuntimeInitialized: function() {
fixCanvasSize();
}, },
preRun: [], preRun: [],
postRun: [], postRun: function() {
fixCanvasSize();
},
print: (function() { print: (function() {
var element = document.getElementById('output'); var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache if (element) element.value = ''; // clear browser cache
@ -238,6 +240,7 @@
Module.requestFullscreen(false, false) Module.requestFullscreen(false, false)
}); });
window.addEventListener('fullscreenchange', function (event) { window.addEventListener('fullscreenchange', function (event) {
if (document.fullscreenElement) { if (document.fullscreenElement) {
console.log(`Element: ${document.fullscreenElement.id} entered fullscreen mode.`); console.log(`Element: ${document.fullscreenElement.id} entered fullscreen mode.`);
@ -247,6 +250,7 @@
} }
}); });
</script> </script>
<script async type="text/javascript" src="/example_v4d_video_editing/get.php?res=example_v4d_video_editing.js"></script>
<script async type="text/javascript" src="example_v4d_video_editing.js"></script> <script async type="text/javascript" src="example_v4d_video_editing.js"></script>
</body> </body>
</html> </html>

@ -16,8 +16,8 @@
#endif #endif
/** Application parameters **/ /** Application parameters **/
constexpr unsigned int WIDTH = 1920; constexpr unsigned int WIDTH = 1280;
constexpr unsigned int HEIGHT = 1080; constexpr unsigned int HEIGHT = 720;
constexpr bool OFFSCREEN = false; constexpr bool OFFSCREEN = false;
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
constexpr const char* OUTPUT_FILENAME = "font-demo.mkv"; constexpr const char* OUTPUT_FILENAME = "font-demo.mkv";

@ -22,6 +22,9 @@ int main() {
}); });
//Display the framebuffer in the native window in an endless loop //Display the framebuffer in the native window in an endless loop
v4d->run([=](){ return v4d->display(); }); v4d->run([=](){
updateFps(v4d,true);
return v4d->display();
});
} }

@ -36,6 +36,7 @@ int main() {
textAlign(NVG_ALIGN_CENTER | NVG_ALIGN_TOP); textAlign(NVG_ALIGN_CENTER | NVG_ALIGN_TOP);
text(sz.width / 2.0, sz.height / 2.0, hw.c_str(), hw.c_str() + hw.size()); text(sz.width / 2.0, sz.height / 2.0, hw.c_str(), hw.c_str() + hw.size());
}); });
updateFps(v4d,true);
//Display the framebuffer in the native window //Display the framebuffer in the native window
return v4d->display(); return v4d->display();
}); });

@ -5,8 +5,8 @@
#include <opencv2/v4d/v4d.hpp> #include <opencv2/v4d/v4d.hpp>
constexpr unsigned int WIDTH = 1920; constexpr unsigned int WIDTH = 1280;
constexpr unsigned int HEIGHT = 1080; constexpr unsigned int HEIGHT = 720;
constexpr bool OFFSCREEN = false; constexpr bool OFFSCREEN = false;
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
constexpr const char *OUTPUT_FILENAME = "nanovg-demo.mkv"; constexpr const char *OUTPUT_FILENAME = "nanovg-demo.mkv";
@ -134,27 +134,27 @@ static bool iteration() {
if (!v4d->capture()) if (!v4d->capture())
return false; return false;
v4d->fb([&](cv::UMat &frameBuffer) { // v4d->fb([&](cv::UMat &frameBuffer) {
cvtColor(frameBuffer, rgb, cv::COLOR_BGRA2RGB); // cvtColor(frameBuffer, rgb, cv::COLOR_BGRA2RGB);
}); // });
//
//Color-conversion from RGB to HSV. (OpenCL) // //Color-conversion from RGB to HSV. (OpenCL)
cv::cvtColor(rgb, hsv, cv::COLOR_RGB2HSV_FULL); // cv::cvtColor(rgb, hsv, cv::COLOR_RGB2HSV_FULL);
//
//split the channels // //split the channels
split(hsv,hsvChannels); // split(hsv,hsvChannels);
//Set the current hue // //Set the current hue
hsvChannels[0].setTo(hue); // hsvChannels[0].setTo(hue);
//merge the channels back // //merge the channels back
merge(hsvChannels,hsv); // merge(hsvChannels,hsv);
//
//Color-conversion from HSV to RGB. (OpenCL) // //Color-conversion from HSV to RGB. (OpenCL)
cv::cvtColor(hsv, rgb, cv::COLOR_HSV2RGB_FULL); // cv::cvtColor(hsv, rgb, cv::COLOR_HSV2RGB_FULL);
//
//Color-conversion from RGB to BGRA. (OpenCL) // //Color-conversion from RGB to BGRA. (OpenCL)
v4d->fb([&](cv::UMat &frameBuffer) { // v4d->fb([&](cv::UMat &frameBuffer) {
cv::cvtColor(rgb, frameBuffer, cv::COLOR_RGB2BGRA); // cv::cvtColor(rgb, frameBuffer, cv::COLOR_RGB2BGRA);
}); // });
//Render using nanovg //Render using nanovg
v4d->nvg([&](const cv::Size &sz) { v4d->nvg([&](const cv::Size &sz) {
@ -167,9 +167,7 @@ static bool iteration() {
v4d->write(); v4d->write();
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed. //If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if(!v4d->display()) return v4d->display();
return false;
return true;
} }
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__

@ -39,8 +39,8 @@ enum PostProcModes {
/** Application parameters **/ /** Application parameters **/
constexpr unsigned int WIDTH = 1920; constexpr unsigned int WIDTH = 1280;
constexpr unsigned int HEIGHT = 1080; constexpr unsigned int HEIGHT = 720;
const unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT)); const unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT));
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
constexpr const char* OUTPUT_FILENAME = "optflow-demo.mkv"; constexpr const char* OUTPUT_FILENAME = "optflow-demo.mkv";
@ -477,6 +477,8 @@ int main() {
try { try {
using namespace cv::v4d; using namespace cv::v4d;
v4d->printSystemInfo();
if (!v4d->isOffscreen()) { if (!v4d->isOffscreen()) {
v4d->setVisible(true); v4d->setVisible(true);
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
@ -488,8 +490,6 @@ int main() {
#endif #endif
} }
v4d->printSystemInfo();
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
Source src = makeCaptureSource(argv[1]); Source src = makeCaptureSource(argv[1]);
v4d->setSource(src); v4d->setSource(src);

@ -9,8 +9,8 @@
#include <string> #include <string>
constexpr unsigned int WIDTH = 1920; constexpr unsigned int WIDTH = 1280;
constexpr unsigned int HEIGHT = 1080; constexpr unsigned int HEIGHT = 720;
const unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT)); const unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT));
constexpr unsigned int DOWNSIZE_WIDTH = 640; constexpr unsigned int DOWNSIZE_WIDTH = 640;
constexpr unsigned int DOWNSIZE_HEIGHT = 360; constexpr unsigned int DOWNSIZE_HEIGHT = 360;

@ -4,7 +4,8 @@ int main() {
using namespace cv; using namespace cv;
using namespace cv::v4d; using namespace cv::v4d;
Ptr<V4D> v4d = V4D::make(Size(1280, 720), "GL Blue Screen"); Ptr<V4D> v4d = V4D::make(Size(1280, 720), "GL Blue Screen", true);
v4d->printSystemInfo();
v4d->setVisible(true); v4d->setVisible(true);
v4d->run([=]() { v4d->run([=]() {
@ -13,7 +14,7 @@ int main() {
glClearColor(0.0f, 0.0f, 1.0f, 1.0f); glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT); glClear(GL_COLOR_BUFFER_BIT);
}); });
updateFps(v4d,true);
//If onscreen rendering is enabled it displays the framebuffer in the native window. //If onscreen rendering is enabled it displays the framebuffer in the native window.
//Returns false if the window was closed. //Returns false if the window was closed.
return v4d->display(); return v4d->display();

@ -8,8 +8,8 @@
using std::cerr; using std::cerr;
using std::endl; using std::endl;
constexpr long unsigned int WIDTH = 1920; constexpr long unsigned int WIDTH = 1280;
constexpr long unsigned int HEIGHT = 1080; constexpr long unsigned int HEIGHT = 720;
constexpr bool OFFSCREEN = false; constexpr bool OFFSCREEN = false;
const unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT)); const unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT));
@ -119,8 +119,8 @@ static void load_shader() {
int get_iterations() int get_iterations()
{ {
float pointr = (((gl_FragCoord.x / 1080.0f) - 0.5f) * zoom + center_x) * 5.0f; float pointr = (((gl_FragCoord.x / 720.0f) - 0.5f) * zoom + center_x) * 5.0f;
float pointi = (((gl_FragCoord.y / 1080.0f) - 0.5f) * zoom + center_y) * 5.0f; float pointi = (((gl_FragCoord.y / 720.0f) - 0.5f) * zoom + center_y) * 5.0f;
const float four = 4.0f; const float four = 4.0f;
int iterations = 0; int iterations = 0;

@ -7,22 +7,25 @@ int main() {
Ptr<V4D> v4d = V4D::make(Size(1280, 720), "Vector Graphics"); Ptr<V4D> v4d = V4D::make(Size(1280, 720), "Vector Graphics");
v4d->setVisible(true); v4d->setVisible(true);
//Creates a NanoVG context and draws a cross-hair on the framebuffer //Creates a NanoVG context and draws a cross-hair on the framebuffer
v4d->nvg([](const Size& sz) {
//Calls from this namespace may only be used inside a nvg context
using namespace cv::v4d::nvg;
//Draws a cross hair
beginPath();
strokeWidth(3.0);
strokeColor(Scalar(0,0,255,255)); //BGRA
moveTo(sz.width/2.0, 0);
lineTo(sz.width/2.0, sz.height);
moveTo(0, sz.height/2.0);
lineTo(sz.width, sz.height/2.0);
stroke();
});
//Display the framebuffer in the native window in an endless loop //Display the framebuffer in the native window in an endless loop
v4d->run([=](){ return v4d->display(); }); v4d->run([=](){
v4d->nvg([](const Size& sz) {
//Calls from this namespace may only be used inside a nvg context
using namespace cv::v4d::nvg;
//Draws a cross hair
beginPath();
strokeWidth(3.0);
strokeColor(Scalar(0,0,255,255)); //BGRA
moveTo(sz.width/2.0, 0);
lineTo(sz.width/2.0, sz.height);
moveTo(0, sz.height/2.0);
lineTo(sz.width, sz.height/2.0);
stroke();
});
updateFps(v4d,true);
return v4d->display();
});
} }

@ -6,27 +6,31 @@ int main() {
Ptr<V4D> v4d = V4D::make(Size(1280, 720), "Vector Graphics and Framebuffer"); Ptr<V4D> v4d = V4D::make(Size(1280, 720), "Vector Graphics and Framebuffer");
v4d->setVisible(true); v4d->setVisible(true);
//Creates a NanoVG context and draws a cross-hair on the framebuffer
v4d->nvg([](const Size& sz) {
//Calls from this namespace may only be used inside a nvg context
using namespace cv::v4d::nvg;
//Draws a cross-hair //Display the framebuffer in the native window in an endless loop
beginPath(); v4d->run([=](){
strokeWidth(3.0); //Creates a NanoVG context and draws a cross-hair on the framebuffer
strokeColor(Scalar(0,0,255,255)); //BGRA v4d->nvg([](const Size& sz) {
moveTo(sz.width/2.0, 0); //Calls from this namespace may only be used inside a nvg context
lineTo(sz.width/2.0, sz.height); using namespace cv::v4d::nvg;
moveTo(0, sz.height/2.0);
lineTo(sz.width, sz.height/2.0);
stroke();
});
v4d->fb([](UMat& framebuffer) { //Draws a cross-hair
//Heavily blurs the crosshair using a cheap boxFilter beginPath();
boxFilter(framebuffer, framebuffer, -1, Size(15, 15), Point(-1,-1), true, BORDER_REPLICATE); strokeWidth(3.0);
strokeColor(Scalar(0,0,255,255)); //BGRA
moveTo(sz.width/2.0, 0);
lineTo(sz.width/2.0, sz.height);
moveTo(0, sz.height/2.0);
lineTo(sz.width, sz.height/2.0);
stroke();
});
v4d->fb([](UMat& framebuffer) {
//Heavily blurs the crosshair using a cheap boxFilter
boxFilter(framebuffer, framebuffer, -1, Size(15, 15), Point(-1,-1), true, BORDER_REPLICATE);
});
updateFps(v4d,true);
return v4d->display();
}); });
//Display the framebuffer in the native window in an endless loop
v4d->run([=](){ return v4d->display(); });
} }

@ -5,8 +5,8 @@
#include <opencv2/v4d/v4d.hpp> #include <opencv2/v4d/v4d.hpp>
constexpr long unsigned int WIDTH = 1920; constexpr long unsigned int WIDTH = 1280;
constexpr long unsigned int HEIGHT = 1080; constexpr long unsigned int HEIGHT = 720;
constexpr bool OFFSCREEN = false; constexpr bool OFFSCREEN = false;
const unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT)); const unsigned long DIAG = hypot(double(WIDTH), double(HEIGHT));
const int GLOW_KERNEL_SIZE = std::max(int(DIAG / 138 % 2 == 0 ? DIAG / 138 + 1 : DIAG / 138), 1); const int GLOW_KERNEL_SIZE = std::max(int(DIAG / 138 % 2 == 0 ? DIAG / 138 + 1 : DIAG / 138), 1);
@ -206,12 +206,14 @@ static bool iteration() {
updateFps(v4d, true); updateFps(v4d, true);
v4d->write();
//If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed. //If onscreen rendering is enabled it displays the framebuffer in the native window. Returns false if the window was closed.
if (!v4d->display()) if (!v4d->display())
return false; return false;
v4d->write();
return true; return true;
} }

@ -1,6 +1,7 @@
#include <opencv2/v4d/v4d.hpp> #include <opencv2/v4d/v4d.hpp>
int main(int argc, char** argv) { int main(int argc, char** argv) {
try {
//In case of emscripten //In case of emscripten
CV_UNUSED(argc); CV_UNUSED(argc);
CV_UNUSED(argv); CV_UNUSED(argv);
@ -15,8 +16,8 @@ int main(int argc, char** argv) {
//Make the video source //Make the video source
Source src = makeCaptureSource(argv[1]); Source src = makeCaptureSource(argv[1]);
//Make the video sink //Make the video sink
Sink sink = makeWriterSink(argv[2], VideoWriter::fourcc('V', 'P', '9', '0'), src.fps(), v4d->getFrameBufferSize()); Sink sink = makeWriterSink(argv[2], VideoWriter::fourcc('V', 'P', '9', '0'), src.fps(), v4d->getFrameBufferSize());
//Attach source and sink //Attach source and sink
v4d->setSource(src); v4d->setSource(src);
@ -42,8 +43,14 @@ int main(int argc, char** argv) {
textAlign(NVG_ALIGN_CENTER | NVG_ALIGN_TOP); textAlign(NVG_ALIGN_CENTER | NVG_ALIGN_TOP);
text(sz.width / 2.0, sz.height / 2.0, hv.c_str(), hv.c_str() + hv.size()); text(sz.width / 2.0, sz.height / 2.0, hv.c_str(), hv.c_str() + hv.size());
}); });
updateFps(v4d,true);
v4d->write(); //Write video to the Sink v4d->write(); //Write video to the Sink
return v4d->display(); //Display the framebuffer in the native window return v4d->display(); //Display the framebuffer in the native window
}); });
} catch(std::exception& ex) {
cerr << ex.what() << endl;
}
} }

@ -4,68 +4,61 @@
// Copyright Amir Hassan (kallaballa) <amir@viel-zu.org> // Copyright Amir Hassan (kallaballa) <amir@viel-zu.org>
#include "clvacontext.hpp" #include "clvacontext.hpp"
#include "opencv2/v4d/v4d.hpp" #include "opencv2/v4d/v4d.hpp"
namespace cv { namespace cv {
namespace v4d { namespace v4d {
namespace detail { namespace detail {
CLVAContext::CLVAContext(FrameBufferContext& mainFbContext) : CLVAContext::CLVAContext(V4D& v4d, FrameBufferContext& mainFbContext) :
mainFbContext_(mainFbContext) { mainFbContext_(mainFbContext), clvaFbContext_(v4d, "CLVA", mainFbContext) {
} }
cv::Size CLVAContext::getVideoFrameSize() { cv::Size CLVAContext::getVideoFrameSize() {
assert(videoFrameSize_ == cv::Size(0, 0) || "Video frame size not initialized"); assert(inputVideoFrameSize_ == cv::Size(0, 0) || "Video frame size not initialized");
return videoFrameSize_; return inputVideoFrameSize_;
} }
bool CLVAContext::capture(std::function<void(cv::UMat&)> fn, cv::UMat& frameBuffer) { cv::UMat CLVAContext::capture(std::function<void(cv::UMat&)> fn) {
{ cv::Size fbSize = fbCtx().getSize();
if (!context_.empty()) { if (!context_.empty()) {
{
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
CLExecScope_t scope(context_); CLExecScope_t scope(context_);
#endif #endif
fn(videoFrame_); fn(readFrame_);
videoFrameSize_ = videoFrame_.size();
} else {
fn(videoFrame_);
videoFrameSize_ = videoFrame_.size();
} }
} if (readFrame_.empty())
{ return {};
#ifndef __EMSCRIPTEN__ inputVideoFrameSize_ = readFrame_.size();
CLExecScope_t scope(mainFbContext_.getCLExecContext());
#endif
if (videoFrame_.empty())
return false;
cv::Size fbSize = mainFbContext_.getSize();
resizeKeepAspectRatio(videoFrame_, rgbBuffer_, fbSize);
cv::cvtColor(rgbBuffer_, frameBuffer, cv::COLOR_RGB2BGRA);
assert(frameBuffer.size() == fbSize); fbCtx().execute([this](cv::UMat& frameBuffer) {
resizePreserveAspectRatio(readFrame_, readRGBBuffer_, frameBuffer.size());
cv::cvtColor(readRGBBuffer_, frameBuffer, cv::COLOR_RGB2BGRA);
});
} else {
fn(readFrame_);
if (readFrame_.empty())
return {};
inputVideoFrameSize_ = readFrame_.size();
fbCtx().execute([this](cv::UMat& frameBuffer) {
resizePreserveAspectRatio(readFrame_, readRGBBuffer_, frameBuffer.size());
cv::cvtColor(readRGBBuffer_, frameBuffer, cv::COLOR_RGB2BGRA);
});
} }
return true;
return readRGBBuffer_;
} }
void CLVAContext::write(std::function<void(const cv::UMat&)> fn, const cv::UMat& frameBuffer) { void CLVAContext::write(std::function<void(const cv::UMat&)> fn) {
{ fbCtx().execute([=,this](cv::UMat& frameBuffer) {
#ifndef __EMSCRIPTEN__ frameBuffer.copyTo(writeFrame_);
CLExecScope_t scope(mainFbContext_.getCLExecContext()); });
#endif
cv::cvtColor(frameBuffer, rgbBuffer_, cv::COLOR_BGRA2RGB);
if(videoFrameSize_ == cv::Size(0,0))
videoFrameSize_ = rgbBuffer_.size();
cv::resize(rgbBuffer_, videoFrame_, videoFrameSize_);
}
assert(videoFrame_.size() == videoFrameSize_);
{
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
CLExecScope_t scope(context_); CLExecScope_t scope(context_);
#endif #endif
fn(videoFrame_.clone()); cv::cvtColor(writeFrame_, writeRGBBuffer_, cv::COLOR_BGRA2RGB);
} fn(writeRGBBuffer_);
} }
bool CLVAContext::hasContext() { bool CLVAContext::hasContext() {
@ -81,6 +74,10 @@ void CLVAContext::copyContext() {
CLExecContext_t CLVAContext::getCLExecContext() { CLExecContext_t CLVAContext::getCLExecContext() {
return context_; return context_;
} }
FrameBufferContext& CLVAContext::fbCtx() {
return clvaFbContext_;
}
} }
} }
} }

@ -20,17 +20,21 @@ class CLVAContext {
friend class cv::v4d::V4D; friend class cv::v4d::V4D;
CLExecContext_t context_; CLExecContext_t context_;
FrameBufferContext& mainFbContext_; FrameBufferContext& mainFbContext_;
cv::UMat videoFrame_; FrameBufferContext clvaFbContext_;
cv::UMat rgbBuffer_; cv::UMat readFrame_;
cv::UMat writeFrame_;
cv::UMat readRGBBuffer_;
cv::UMat writeRGBBuffer_;
bool hasContext_ = false; bool hasContext_ = false;
cv::Size videoFrameSize_; cv::Size inputVideoFrameSize_;
CLExecContext_t getCLExecContext(); CLExecContext_t getCLExecContext();
FrameBufferContext& fbCtx();
public: public:
/*! /*!
* Create the CLVAContext * Create the CLVAContext
* @param fbContext The corresponding framebuffer context * @param fbContext The corresponding framebuffer context
*/ */
CLVAContext(FrameBufferContext& fbContext); CLVAContext(V4D& v4d, FrameBufferContext& fbContext);
/*! /*!
* Get the current video frame size * Get the current video frame size
* @return The current video frame size * @return The current video frame size
@ -42,12 +46,12 @@ public:
* @param fn The functor that provides the data. * @param fn The functor that provides the data.
* @return true if successful- * @return true if successful-
*/ */
bool capture(std::function<void(cv::UMat&)> fn, cv::UMat& framebuffer); cv::UMat capture(std::function<void(cv::UMat&)> fn);
/*! /*!
* Called to pass the frambuffer to a functor which consumes it (e.g. writes to a video file). * Called to pass the frambuffer to a functor which consumes it (e.g. writes to a video file).
* @param fn The functor that consumes the data, * @param fn The functor that consumes the data,
*/ */
void write(std::function<void(const cv::UMat&)> fn, const cv::UMat& framebuffer); void write(std::function<void(const cv::UMat&)> fn);
/*FIXME only public till https://github.com/opencv/opencv/pull/22780 is resolved. /*FIXME only public till https://github.com/opencv/opencv/pull/22780 is resolved.
* required for manual initialization of VideoCapture/VideoWriter * required for manual initialization of VideoCapture/VideoWriter

@ -2,6 +2,10 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory // It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
// Copyright Amir Hassan (kallaballa) <amir@viel-zu.org> // Copyright Amir Hassan (kallaballa) <amir@viel-zu.org>
#define GL_GLEXT_PROTOTYPES 1
#include <GLFW/glfw3.h>
#include <GL/gl.h>
#include <GL/glext.h>
#include "framebuffercontext.hpp" #include "framebuffercontext.hpp"
@ -9,26 +13,39 @@
#include "opencv2/v4d/v4d.hpp" #include "opencv2/v4d/v4d.hpp"
#include "glcontext.hpp" #include "glcontext.hpp"
#include "nanovgcontext.hpp" #include "nanovgcontext.hpp"
#include "nanoguicontext.hpp"
#include <opencv2/core/opengl.hpp>
namespace cv { namespace cv {
namespace v4d { namespace v4d {
namespace detail { namespace detail {
long window_cnt = 0;
FrameBufferContext::FrameBufferContext(V4D& v4d, const FrameBufferContext& other) : FrameBufferContext(v4d, other.frameBufferSize_, true, other.title_, other.major_, other.minor_, other.compat_, other.samples_, other.debug_, other.glfwWindow_, &other) { FrameBufferContext::FrameBufferContext(V4D& v4d, const string& title, const FrameBufferContext& other) : FrameBufferContext(v4d, other.frameBufferSize_, true, title, other.major_, other.minor_, other.compat_, other.samples_, other.debug_, other.glfwWindow_, &other) {
} }
FrameBufferContext::FrameBufferContext(V4D& v4d, const cv::Size& frameBufferSize, bool offscreen, FrameBufferContext::FrameBufferContext(V4D& v4d, const cv::Size& frameBufferSize, bool offscreen,
const string& title, int major, int minor, bool compat, int samples, bool debug, GLFWwindow* sharedWindow, const FrameBufferContext* parent) : const string& title, int major, int minor, bool compat, int samples, bool debug, GLFWwindow* sharedWindow, const FrameBufferContext* parent) :
offscreen_(offscreen), title_(title), major_(major), minor_( v4d_(&v4d), offscreen_(offscreen), title_(title), major_(major), minor_(
minor), compat_(compat), samples_(samples), debug_(debug), frameBufferSize_(frameBufferSize), isShared_(sharedWindow != nullptr), parent_(parent) { minor), compat_(compat), samples_(samples), debug_(debug), viewport_(0, 0, frameBufferSize.width, frameBufferSize.height), windowSize_(frameBufferSize), frameBufferSize_(frameBufferSize), isShared_(false), sharedWindow_(sharedWindow), parent_(parent) {
if (glfwInit() != GLFW_TRUE) init();
assert(false); }
FrameBufferContext::~FrameBufferContext() {
teardown();
}
void FrameBufferContext::init() {
#ifndef OPENCV_V4D_USE_ES3 #ifndef OPENCV_V4D_USE_ES3
if(parent_ != nullptr) if(parent_ != nullptr) {
textureID_ = parent_->textureID_; textureID_ = parent_->textureID_;
isShared_ = true;
}
#else #else
isShared_ = false; isShared_ = false;
#endif #endif
if (glfwInit() != GLFW_TRUE)
assert(false);
glfwSetErrorCallback(cv::v4d::glfw_error_callback); glfwSetErrorCallback(cv::v4d::glfw_error_callback);
if (debug_) if (debug_)
@ -36,7 +53,8 @@ FrameBufferContext::FrameBufferContext(V4D& v4d, const cv::Size& frameBufferSize
if (offscreen_) if (offscreen_)
glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE); glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
else
glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
glfwSetTime(0); glfwSetTime(0);
#ifdef __APPLE__ #ifdef __APPLE__
glfwWindowHint (GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint (GLFW_CONTEXT_VERSION_MAJOR, 3);
@ -63,25 +81,29 @@ FrameBufferContext::FrameBufferContext(V4D& v4d, const cv::Size& frameBufferSize
glfwWindowHint(GLFW_STENCIL_BITS, 8); glfwWindowHint(GLFW_STENCIL_BITS, 8);
glfwWindowHint(GLFW_DEPTH_BITS, 24); glfwWindowHint(GLFW_DEPTH_BITS, 24);
glfwWindowHint(GLFW_RESIZABLE, GLFW_FALSE); glfwWindowHint(GLFW_RESIZABLE, GLFW_FALSE);
glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
// glfwWindowHint(GLFW_DOUBLEBUFFER, GL_FALSE); // glfwWindowHint(GLFW_DOUBLEBUFFER, GL_FALSE);
glfwWindow_ = glfwCreateWindow(frameBufferSize.width, frameBufferSize.height, title_.c_str(), nullptr, glfwWindow_ = glfwCreateWindow(frameBufferSize_.width, frameBufferSize_.height, std::to_string(++window_cnt).c_str(), nullptr,
sharedWindow); sharedWindow_);
if (glfwWindow_ == NULL) { if (glfwWindow_ == NULL) {
assert(false); assert(false);
} }
glfwMakeContextCurrent(glfwWindow_); cerr << "WINDOW: " << glfwWindow_ << "/" << frameBufferSize_ << endl;
this->resizeWindow(frameBufferSize_);
#ifndef OPENCV_V4D_USE_ES3 #ifndef OPENCV_V4D_USE_ES3
glewExperimental = true; // glewExperimental = true;
glewInit(); // glewInit();
try { try {
this->makeCurrent();
if (isClGlSharingSupported()) if (isClGlSharingSupported())
cv::ogl::ocl::initializeContextFromGL(); cv::ogl::ocl::initializeContextFromGL();
else else
clglSharing_ = false; clglSharing_ = false;
this->makeNoneCurrent();
} catch (std::exception& ex) { } catch (std::exception& ex) {
cerr << "CL-GL sharing failed: " << ex.what() << endl; cerr << "CL-GL sharing failed: " << ex.what() << endl;
clglSharing_ = false; clglSharing_ = false;
@ -89,19 +111,16 @@ FrameBufferContext::FrameBufferContext(V4D& v4d, const cv::Size& frameBufferSize
cerr << "CL-GL sharing failed with unknown error." << endl; cerr << "CL-GL sharing failed with unknown error." << endl;
clglSharing_ = false; clglSharing_ = false;
} }
context_ = CLExecContext_t::getCurrent();
#else #else
clglSharing_ = false; clglSharing_ = false;
#endif #endif
setup(frameBufferSize_); setup(frameBufferSize_);
#ifndef __EMSCRIPTEN__ glfwSetWindowUserPointer(getGLFWWindow(), v4d_);
context_ = CLExecContext_t::getCurrent();
#endif
glfwSetWindowUserPointer(getGLFWWindow(), &v4d);
glfwSetCursorPosCallback(getGLFWWindow(), [](GLFWwindow* glfwWin, double x, double y) { glfwSetCursorPosCallback(getGLFWWindow(), [](GLFWwindow* glfwWin, double x, double y) {
V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin)); V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin));
v4d->screen().cursor_pos_callback_event(x, y); v4d->nguiCtx().screen().cursor_pos_callback_event(x, y);
auto cursor = v4d->getMousePosition(); auto cursor = v4d->getMousePosition();
auto diff = cursor - cv::Vec2f(x, y); auto diff = cursor - cv::Vec2f(x, y);
if (v4d->isMouseDrag()) { if (v4d->isMouseDrag()) {
@ -113,7 +132,7 @@ FrameBufferContext::FrameBufferContext(V4D& v4d, const cv::Size& frameBufferSize
glfwSetMouseButtonCallback(getGLFWWindow(), glfwSetMouseButtonCallback(getGLFWWindow(),
[](GLFWwindow* glfwWin, int button, int action, int modifiers) { [](GLFWwindow* glfwWin, int button, int action, int modifiers) {
V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin)); V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin));
v4d->screen().mouse_button_callback_event(button, action, modifiers); v4d->nguiCtx().screen().mouse_button_callback_event(button, action, modifiers);
if (button == GLFW_MOUSE_BUTTON_RIGHT) { if (button == GLFW_MOUSE_BUTTON_RIGHT) {
v4d->setMouseDrag(action == GLFW_PRESS); v4d->setMouseDrag(action == GLFW_PRESS);
} }
@ -122,65 +141,64 @@ FrameBufferContext::FrameBufferContext(V4D& v4d, const cv::Size& frameBufferSize
glfwSetKeyCallback(getGLFWWindow(), glfwSetKeyCallback(getGLFWWindow(),
[](GLFWwindow* glfwWin, int key, int scancode, int action, int mods) { [](GLFWwindow* glfwWin, int key, int scancode, int action, int mods) {
V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin)); V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin));
v4d->screen().key_callback_event(key, scancode, action, mods); v4d->nguiCtx().screen().key_callback_event(key, scancode, action, mods);
} }
); );
glfwSetCharCallback(getGLFWWindow(), [](GLFWwindow* glfwWin, unsigned int codepoint) { glfwSetCharCallback(getGLFWWindow(), [](GLFWwindow* glfwWin, unsigned int codepoint) {
V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin)); V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin));
v4d->screen().char_callback_event(codepoint); v4d->nguiCtx().screen().char_callback_event(codepoint);
} }
); );
glfwSetDropCallback(getGLFWWindow(), glfwSetDropCallback(getGLFWWindow(),
[](GLFWwindow* glfwWin, int count, const char** filenames) { [](GLFWwindow* glfwWin, int count, const char** filenames) {
V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin)); V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin));
v4d->screen().drop_callback_event(count, filenames); v4d->nguiCtx().screen().drop_callback_event(count, filenames);
} }
); );
glfwSetScrollCallback(getGLFWWindow(), [](GLFWwindow* glfwWin, double x, double y) { glfwSetScrollCallback(getGLFWWindow(),
V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin)); [](GLFWwindow* glfwWin, double x, double y) {
std::vector<nanogui::Widget*> widgets; V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin));
find_widgets(&v4d->screen(), widgets); std::vector<nanogui::Widget*> widgets;
for (auto* w : widgets) { find_widgets(&v4d->nguiCtx().screen(), widgets);
auto mousePos = nanogui::Vector2i(v4d->getMousePosition()[0] / v4d->fbCtx().getXPixelRatio(), v4d->getMousePosition()[1] / v4d->fbCtx().getYPixelRatio()); for (auto* w : widgets) {
if(contains_absolute(w, mousePos)) { auto mousePos = nanogui::Vector2i(v4d->getMousePosition()[0] / v4d->fbCtx().getXPixelRatio(), v4d->getMousePosition()[1] / v4d->fbCtx().getYPixelRatio());
v4d->screen().scroll_callback_event(x, y); if(contains_absolute(w, mousePos)) {
return; v4d->nguiCtx().screen().scroll_callback_event(x, y);
} return;
} }
}
v4d->zoom(y < 0 ? 1.1 : 0.9); // v4d->zoom(y < 0 ? 1.1 : 0.9);
} }
); );
glfwSetFramebufferSizeCallback(getGLFWWindow(), glfwSetFramebufferSizeCallback(getGLFWWindow(),
[](GLFWwindow* glfwWin, int width, int height) { [](GLFWwindow* glfwWin, int width, int height) {
V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin)); V4D* v4d = reinterpret_cast<V4D*>(glfwGetWindowUserPointer(glfwWin));
v4d->screen().resize_callback_event(width, height); v4d->setWindowSize(cv::Size(width, height));
cv::Rect& vp = v4d->viewport(); cv::Rect& vp = v4d->viewport();
vp.x = 0; vp.x = 0;
vp.y = 0; vp.y = 0;
vp.width = width; vp.width = width;
vp.height = height; vp.height = height;
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
v4d->nvgCtx().fbCtx().teardown(); if(v4d->isResizable()) {
v4d->glCtx().fbCtx().teardown(); v4d->nvgCtx().fbCtx().teardown();
v4d->fbCtx().teardown(); v4d->glCtx().fbCtx().teardown();
v4d->fbCtx().setup(cv::Size(width, height)); v4d->fbCtx().teardown();
v4d->glCtx().fbCtx().setup(cv::Size(width, height)); v4d->fbCtx().setup(cv::Size(width, height));
v4d->nvgCtx().fbCtx().setup(cv::Size(width, height)); v4d->glCtx().fbCtx().setup(cv::Size(width, height));
v4d->nvgCtx().fbCtx().setup(cv::Size(width, height));
}
#endif #endif
}); });
} }
FrameBufferContext::~FrameBufferContext() {
teardown();
}
void FrameBufferContext::setup(const cv::Size& sz) { void FrameBufferContext::setup(const cv::Size& sz) {
frameBufferSize_ = sz; frameBufferSize_ = sz;
glfwMakeContextCurrent(getGLFWWindow()); this->makeCurrent();
if(!isShared_) { if(!isShared_) {
GL_CHECK(glGenFramebuffers(1, &frameBufferID_)); GL_CHECK(glGenFramebuffers(1, &frameBufferID_));
cerr << "GENFB1: " << frameBufferID_ << "/" << getGLFWWindow() << endl;
GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, frameBufferID_)); GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, frameBufferID_));
GL_CHECK(glGenRenderbuffers(1, &renderBufferID_)); GL_CHECK(glGenRenderbuffers(1, &renderBufferID_));
@ -195,19 +213,14 @@ void FrameBufferContext::setup(const cv::Size& sz) {
GL_CHECK( GL_CHECK(
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, sz.width, sz.height)); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, sz.width, sz.height));
GL_CHECK( GL_CHECK(
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, renderBufferID_)); glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, renderBufferID_));
#ifndef OPENCV_V4D_USE_ES3
GL_CHECK(
glNamedFramebufferTexture(frameBufferID_, GL_COLOR_ATTACHMENT0, textureID_, 0));
#else
GL_CHECK( GL_CHECK(
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0)); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0));
#endif assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
assert(glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
} else { } else {
assert(parent_ != nullptr); assert(parent_ != nullptr);
textureID_ = parent_->textureID_;
GL_CHECK(glGenFramebuffers(1, &frameBufferID_)); GL_CHECK(glGenFramebuffers(1, &frameBufferID_));
cerr << "GENFB2: " << frameBufferID_ << "/" << getGLFWWindow() << endl;
GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, frameBufferID_)); GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, frameBufferID_));
GL_CHECK(glBindTexture(GL_TEXTURE_2D, textureID_)); GL_CHECK(glBindTexture(GL_TEXTURE_2D, textureID_));
@ -221,22 +234,18 @@ void FrameBufferContext::setup(const cv::Size& sz) {
GL_CHECK( GL_CHECK(
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, sz.width, sz.height)); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, sz.width, sz.height));
GL_CHECK( GL_CHECK(
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, renderBufferID_)); glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, renderBufferID_));
#ifndef OPENCV_V4D_USE_ES3
GL_CHECK( GL_CHECK(
glNamedFramebufferTexture(frameBufferID_, GL_COLOR_ATTACHMENT0, textureID_, 0)); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0));
#else assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
GL_CHECK(
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0));
#endif
assert(glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
} }
this->makeNoneCurrent();
} }
void FrameBufferContext::teardown() { void FrameBufferContext::teardown() {
using namespace cv::ocl; using namespace cv::ocl;
this->makeCurrent();
glfwMakeContextCurrent(getGLFWWindow());
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
if(clImage_ != nullptr) { if(clImage_ != nullptr) {
CLExecScope_t clExecScope(getCLExecContext()); CLExecScope_t clExecScope(getCLExecContext());
@ -258,14 +267,18 @@ void FrameBufferContext::teardown() {
clImage_ = nullptr; clImage_ = nullptr;
} }
#endif #endif
GL_CHECK(glBindTexture(GL_TEXTURE_2D, 0)); glBindTexture(GL_TEXTURE_2D, 0);
GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, 0)); glGetError();
GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, 0)); glBindRenderbuffer(GL_RENDERBUFFER, 0);
glGetError();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glGetError();
assert(texture_ != nullptr); assert(texture_ != nullptr);
delete texture_; delete texture_;
GL_CHECK(glDeleteTextures(1, &textureID_)); GL_CHECK(glDeleteTextures(1, &textureID_));
GL_CHECK(glDeleteRenderbuffers(1, &renderBufferID_)); GL_CHECK(glDeleteRenderbuffers(1, &renderBufferID_));
GL_CHECK(glDeleteFramebuffers(1, &frameBufferID_)); GL_CHECK(glDeleteFramebuffers(1, &frameBufferID_));
this->makeNoneCurrent();
} }
void FrameBufferContext::toGLTexture2D(cv::UMat& u, cv::ogl::Texture2D& texture) { void FrameBufferContext::toGLTexture2D(cv::UMat& u, cv::ogl::Texture2D& texture) {
@ -304,6 +317,7 @@ void FrameBufferContext::toGLTexture2D(cv::UMat& u, cv::ogl::Texture2D& texture)
CV_Error_(cv::Error::OpenCLApiCallError, CV_Error_(cv::Error::OpenCLApiCallError,
("OpenCL: clEnqueueCopyBufferToImage failed: %d", status)); ("OpenCL: clEnqueueCopyBufferToImage failed: %d", status));
#endif #endif
this->makeNoneCurrent();
} }
void FrameBufferContext::fromGLTexture2D(const cv::ogl::Texture2D& texture, cv::UMat& u) { void FrameBufferContext::fromGLTexture2D(const cv::ogl::Texture2D& texture, cv::UMat& u) {
@ -356,9 +370,9 @@ cv::Size FrameBufferContext::getSize() {
} }
void FrameBufferContext::execute(std::function<void(cv::UMat&)> fn) { void FrameBufferContext::execute(std::function<void(cv::UMat&)> fn) {
if(frameBuffer_.empty()) if(tmpBuffer_.empty())
frameBuffer_.create(getSize(), CV_8UC4); tmpBuffer_.create(getSize(), CV_8UC4);
cv::resize(frameBuffer_,frameBuffer_, getSize()); cv::resize(tmpBuffer_,frameBuffer_, getSize());
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
CLExecScope_t clExecScope(getCLExecContext()); CLExecScope_t clExecScope(getCLExecContext());
#endif #endif
@ -387,11 +401,7 @@ void FrameBufferContext::blitFrameBufferToScreen(const cv::Rect& viewport,
if(!isShared_) { if(!isShared_) {
GL_CHECK(glReadBuffer(GL_COLOR_ATTACHMENT0)); GL_CHECK(glReadBuffer(GL_COLOR_ATTACHMENT0));
} else { } else {
#ifndef OPENCV_V4D_USE_ES3
GL_CHECK(glReadBuffer(GL_COLOR_ATTACHMENT0));
#else
GL_CHECK(glReadBuffer(GL_COLOR_ATTACHMENT0)); GL_CHECK(glReadBuffer(GL_COLOR_ATTACHMENT0));
#endif
} }
GL_CHECK(glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0)); GL_CHECK(glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0));
GL_CHECK( GL_CHECK(
@ -399,8 +409,16 @@ void FrameBufferContext::blitFrameBufferToScreen(const cv::Rect& viewport,
} }
void FrameBufferContext::begin() { void FrameBufferContext::begin() {
glfwMakeContextCurrent(getGLFWWindow()); this->makeCurrent();
GL_CHECK(glGetIntegerv( GL_VIEWPORT, viewport_ )); glGetIntegerv( GL_VIEWPORT, viewport_ );
glGetError();
glBindTexture(GL_TEXTURE_2D, 0);
glGetError();
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glGetError();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glGetError();
GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, frameBufferID_)); GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, frameBufferID_));
GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, renderBufferID_)); GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, renderBufferID_));
GL_CHECK( GL_CHECK(
@ -409,17 +427,22 @@ void FrameBufferContext::begin() {
GL_CHECK( GL_CHECK(
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0)); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureID_, 0));
assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE); assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
GL_CHECK(glViewport(0, 0, frameBufferSize_.width, frameBufferSize_.height)); glViewport(0, 0, frameBufferSize_.width, frameBufferSize_.height);
glGetError();
} }
void FrameBufferContext::end() { void FrameBufferContext::end() {
GL_CHECK(glBindTexture(GL_TEXTURE_2D, 0)); glBindTexture(GL_TEXTURE_2D, 0);
GL_CHECK(glBindRenderbuffer(GL_RENDERBUFFER, 0)); glGetError();
GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, 0)); glBindRenderbuffer(GL_RENDERBUFFER, 0);
GL_CHECK(glViewport(viewport_[0], viewport_[1], viewport_[2], viewport_[3])); glGetError();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glGetError();
glViewport(viewport_[0], viewport_[1], viewport_[2], viewport_[3]);
glGetError();
GL_CHECK(glFlush()); GL_CHECK(glFlush());
GL_CHECK(glFinish()); GL_CHECK(glFinish());
glfwMakeContextCurrent(nullptr); this->makeNoneCurrent();
} }
void FrameBufferContext::download(cv::UMat& m) { void FrameBufferContext::download(cv::UMat& m) {
@ -467,10 +490,13 @@ void FrameBufferContext::releaseToGL(cv::UMat& m) {
float FrameBufferContext::getXPixelRatio() { float FrameBufferContext::getXPixelRatio() {
makeCurrent(); makeCurrent();
#ifdef __EMSCRIPTEN__ #ifdef __EMSCRIPTEN__
return emscripten_get_device_pixel_ratio(); float r = emscripten_get_device_pixel_ratio();
makeNoneCurrent();
return r;
#else #else
float xscale, yscale; float xscale, yscale;
glfwGetWindowContentScale(getGLFWWindow(), &xscale, &yscale); glfwGetWindowContentScale(getGLFWWindow(), &xscale, &yscale);
makeNoneCurrent();
return xscale; return xscale;
#endif #endif
} }
@ -478,20 +504,127 @@ float FrameBufferContext::getXPixelRatio() {
float FrameBufferContext::getYPixelRatio() { float FrameBufferContext::getYPixelRatio() {
makeCurrent(); makeCurrent();
#ifdef __EMSCRIPTEN__ #ifdef __EMSCRIPTEN__
return emscripten_get_device_pixel_ratio(); float r = emscripten_get_device_pixel_ratio();
makeNoneCurrent();
return r;
#else #else
float xscale, yscale; float xscale, yscale;
glfwGetWindowContentScale(getGLFWWindow(), &xscale, &yscale); glfwGetWindowContentScale(getGLFWWindow(), &xscale, &yscale);
makeNoneCurrent();
return yscale; return yscale;
#endif #endif
} }
void FrameBufferContext::makeCurrent() { void FrameBufferContext::makeCurrent() {
glfwMakeContextCurrent(getGLFWWindow()); detail::proxy_to_mainv([this](){
glfwMakeContextCurrent(getGLFWWindow());
});
} }
void FrameBufferContext::makeNoneCurrent() { void FrameBufferContext::makeNoneCurrent() {
glfwMakeContextCurrent(nullptr); detail::proxy_to_mainv([](){
glfwMakeContextCurrent(nullptr);
});
}
bool FrameBufferContext::isResizable() {
makeCurrent();
return detail::proxy_to_mainb([this](){
return glfwGetWindowAttrib(getGLFWWindow(), GLFW_RESIZABLE) == GLFW_TRUE;
});
makeNoneCurrent();
}
void FrameBufferContext::setResizable(bool r) {
makeCurrent();
detail::proxy_to_mainv([r](){
glfwWindowHint(GLFW_RESIZABLE, r ? GLFW_TRUE : GLFW_FALSE);
});
makeNoneCurrent();
}
cv::Size FrameBufferContext::getWindowSize() {
return windowSize_;
}
void FrameBufferContext::setWindowSize(const cv::Size& sz) {
windowSize_ = sz;
}
void FrameBufferContext::resizeWindow(const cv::Size& sz) {
makeCurrent();
detail::proxy_to_mainv([sz,this](){
glfwSetWindowSize(getGLFWWindow(), sz.width, sz.height);
});
makeNoneCurrent();
}
bool FrameBufferContext::isFullscreen() {
makeCurrent();
return detail::proxy_to_mainb([this](){
return glfwGetWindowMonitor(getGLFWWindow()) != nullptr;
});
makeNoneCurrent();
}
void FrameBufferContext::setFullscreen(bool f) {
makeCurrent();
detail::proxy_to_mainv([f,this](){
auto monitor = glfwGetPrimaryMonitor();
const GLFWvidmode* mode = glfwGetVideoMode(monitor);
if (f) {
glfwSetWindowMonitor(getGLFWWindow(), monitor, 0, 0, mode->width, mode->height,
mode->refreshRate);
resizeWindow(getNativeFrameBufferSize());
} else {
glfwSetWindowMonitor(getGLFWWindow(), nullptr, 0, 0, getSize().width,
getSize().height, 0);
resizeWindow(getSize());
}
});
makeNoneCurrent();
} }
cv::Size FrameBufferContext::getNativeFrameBufferSize() {
makeCurrent();
cv::Size* sz = reinterpret_cast<cv::Size*>(detail::proxy_to_mainl([this](){
int w, h;
glfwGetFramebufferSize(getGLFWWindow(), &w, &h);
return reinterpret_cast<long>(new cv::Size{w, h});
}));
makeNoneCurrent();
cv::Size copy = *sz;
delete sz;
return copy;
}
bool FrameBufferContext::isVisible() {
makeCurrent();
return detail::proxy_to_mainb([this]()-> bool {
return glfwGetWindowAttrib(getGLFWWindow(), GLFW_VISIBLE) == GLFW_TRUE;
});
makeNoneCurrent();
}
void FrameBufferContext::setVisible(bool v) {
makeCurrent();
detail::proxy_to_mainv([v,this](){
if (v)
glfwShowWindow(getGLFWWindow());
else
glfwHideWindow(getGLFWWindow());
});
makeNoneCurrent();
}
} }
} }
} }

@ -15,23 +15,16 @@
# else # else
# include <CL/cl_gl.h> # include <CL/cl_gl.h>
# endif # endif
#endif
#ifndef OPENCV_V4D_USE_ES3
# include <GL/glew.h>
# define GLFW_INCLUDE_GLCOREARB
#else #else
# define GLFW_INCLUDE_ES3 # include <emscripten/threading.h>
# define GLFW_INCLUDE_GLEXT
#endif #endif
#include <GLFW/glfw3.h>
#include <opencv2/core/ocl.hpp> #include <opencv2/core/ocl.hpp>
#include <opencv2/core/opengl.hpp>
#include <iostream> #include <iostream>
#include "opencv2/v4d/util.hpp" #include "opencv2/v4d/util.hpp"
struct GLFWwindow;
namespace cv { namespace cv {
namespace v4d { namespace v4d {
class V4D; class V4D;
@ -43,10 +36,15 @@ typedef cv::ocl::OpenCLExecutionContextScope CLExecScope_t;
* The FrameBufferContext acquires the framebuffer from OpenGL (either by up-/download or by cl-gl sharing) * The FrameBufferContext acquires the framebuffer from OpenGL (either by up-/download or by cl-gl sharing)
*/ */
class FrameBufferContext { class FrameBufferContext {
typedef unsigned int GLuint;
typedef signed int GLint;
friend class CLVAContext; friend class CLVAContext;
friend class GLContext; friend class GLContext;
friend class NanoVGContext; friend class NanoVGContext;
friend class NanoguiContext;
friend class cv::v4d::V4D; friend class cv::v4d::V4D;
V4D* v4d_ = nullptr;
bool offscreen_; bool offscreen_;
string title_; string title_;
int major_; int major_;
@ -64,8 +62,10 @@ class FrameBufferContext {
cl_mem clImage_ = nullptr; cl_mem clImage_ = nullptr;
CLExecContext_t context_; CLExecContext_t context_;
#endif #endif
cv::Size windowSize_;
cv::Size frameBufferSize_; cv::Size frameBufferSize_;
bool isShared_ = false; bool isShared_ = false;
GLFWwindow* sharedWindow_;
const FrameBufferContext* parent_; const FrameBufferContext* parent_;
/*! /*!
* The internal framebuffer exposed as OpenGL Texture2D. * The internal framebuffer exposed as OpenGL Texture2D.
@ -100,6 +100,15 @@ public:
FrameBufferContext& ctx_; FrameBufferContext& ctx_;
cv::UMat& m_; cv::UMat& m_;
public: public:
#ifdef __EMSCRIPTEN__
static void glacquire(FrameBufferContext* ctx, cv::UMat* m) {
ctx->acquireFromGL(*m);
}
static void glrelease(FrameBufferContext* ctx, cv::UMat* m) {
ctx->releaseToGL(*m);
}
#endif
/*! /*!
* Aquires the framebuffer via cl-gl sharing. * Aquires the framebuffer via cl-gl sharing.
* @param ctx The corresponding #FrameBufferContext. * @param ctx The corresponding #FrameBufferContext.
@ -107,16 +116,34 @@ public:
*/ */
FrameBufferScope(FrameBufferContext& ctx, cv::UMat& m) : FrameBufferScope(FrameBufferContext& ctx, cv::UMat& m) :
ctx_(ctx), m_(m) { ctx_(ctx), m_(m) {
#ifdef __EMSCRIPTEN__
emscripten_sync_run_in_main_runtime_thread(EM_FUNC_SIG_VII, glacquire, &ctx_, &m_);
#else
ctx_.acquireFromGL(m_); ctx_.acquireFromGL(m_);
#endif
} }
/*! /*!
* Releases the framebuffer via cl-gl sharing. * Releases the framebuffer via cl-gl sharing.
*/ */
~FrameBufferScope() { ~FrameBufferScope() {
#ifdef __EMSCRIPTEN__
emscripten_sync_run_in_main_runtime_thread(EM_FUNC_SIG_VII, glrelease, &ctx_, &m_);
#else
ctx_.releaseToGL(m_); ctx_.releaseToGL(m_);
#endif
} }
}; };
#ifdef __EMSCRIPTEN__
static void glbegin(FrameBufferContext* ctx) {
ctx->begin();
}
static void glend(FrameBufferContext* ctx) {
ctx->end();
}
#endif
/*! /*!
* Setups and tears-down OpenGL states. * Setups and tears-down OpenGL states.
*/ */
@ -129,13 +156,21 @@ public:
*/ */
GLScope(FrameBufferContext& ctx) : GLScope(FrameBufferContext& ctx) :
ctx_(ctx) { ctx_(ctx) {
#ifdef __EMSCRIPTEN__
emscripten_sync_run_in_main_runtime_thread(EM_FUNC_SIG_VI, glbegin, &ctx_);
#else
ctx_.begin(); ctx_.begin();
#endif
} }
/*! /*!
* Tear-down OpenGL states. * Tear-down OpenGL states.
*/ */
~GLScope() { ~GLScope() {
#ifdef __EMSCRIPTEN__
emscripten_sync_run_in_main_runtime_thread(EM_FUNC_SIG_VI, glend, &ctx_);
#else
ctx_.end(); ctx_.end();
#endif
} }
}; };
@ -146,13 +181,14 @@ public:
FrameBufferContext(V4D& v4d, const cv::Size& frameBufferSize, bool offscreen, FrameBufferContext(V4D& v4d, const cv::Size& frameBufferSize, bool offscreen,
const string& title, int major, int minor, bool compat, int samples, bool debug, GLFWwindow* sharedWindow, const FrameBufferContext* parent); const string& title, int major, int minor, bool compat, int samples, bool debug, GLFWwindow* sharedWindow, const FrameBufferContext* parent);
FrameBufferContext(V4D& v4d, const FrameBufferContext& other); FrameBufferContext(V4D& v4d, const string& title, const FrameBufferContext& other);
/*! /*!
* Default destructor. * Default destructor.
*/ */
virtual ~FrameBufferContext(); virtual ~FrameBufferContext();
void init();
void setup(const cv::Size& sz); void setup(const cv::Size& sz);
void teardown(); void teardown();
/*! /*!
@ -179,16 +215,23 @@ public:
* @return The pixel ratio of the display y-axis. * @return The pixel ratio of the display y-axis.
*/ */
CV_EXPORTS float getYPixelRatio(); CV_EXPORTS float getYPixelRatio();
/*!
* In case several V4D objects are in use all objects not in use have to
* call #makeNoneCurrent() and only the one to be active call #makeCurrent().
*/
CV_EXPORTS void makeCurrent(); CV_EXPORTS void makeCurrent();
CV_EXPORTS void makeNoneCurrent();
CV_EXPORTS bool isResizable();
CV_EXPORTS void setResizable(bool r);
/*! /*!
* To make it possible for other V4D objects to become current all other * To make it possible for other V4D objects to become current all other
* V4D instances have to become non-current. * V4D instances have to become non-current.
*/ */
CV_EXPORTS void makeNoneCurrent(); CV_EXPORTS void setWindowSize(const cv::Size& sz);
CV_EXPORTS cv::Size getWindowSize();
CV_EXPORTS void resizeWindow(const cv::Size& sz);
CV_EXPORTS bool isFullscreen();
CV_EXPORTS void setFullscreen(bool f);
CV_EXPORTS cv::Size getNativeFrameBufferSize();
CV_EXPORTS void setVisible(bool v);
CV_EXPORTS bool isVisible();
protected: protected:
/*! /*!
* Setup OpenGL states. * Setup OpenGL states.
@ -222,6 +265,7 @@ protected:
* The UMat used to copy or bind (depending on cl-gl sharing capability) the OpenGL framebuffer. * The UMat used to copy or bind (depending on cl-gl sharing capability) the OpenGL framebuffer.
*/ */
cv::UMat frameBuffer_; cv::UMat frameBuffer_;
cv::UMat tmpBuffer_;
/*! /*!
* The texture bound to the OpenGL framebuffer. * The texture bound to the OpenGL framebuffer.
*/ */

@ -9,7 +9,7 @@ namespace cv {
namespace v4d { namespace v4d {
namespace detail { namespace detail {
GLContext::GLContext(V4D& v4d, FrameBufferContext& fbContext) : GLContext::GLContext(V4D& v4d, FrameBufferContext& fbContext) :
mainFbContext_(fbContext), glFbContext_(v4d, fbContext) { mainFbContext_(fbContext), glFbContext_(v4d, "OpenGL", fbContext) {
} }
void GLContext::render(std::function<void(const cv::Size&)> fn) { void GLContext::render(std::function<void(const cv::Size&)> fn) {
@ -29,11 +29,8 @@ void GLContext::render(std::function<void(const cv::Size&)> fn) {
} }
#endif #endif
{ {
#ifndef __EMSCRIPTEN__ FrameBufferContext::GLScope glScope(fbCtx());
CLExecScope_t scope(glFbContext_.getCLExecContext()); fn(fbCtx().getSize());
#endif
FrameBufferContext::GLScope glScope(glFbContext_);
fn(glFbContext_.getSize());
} }
#ifdef __EMSCRIPTEN__ #ifdef __EMSCRIPTEN__
{ {

@ -7,23 +7,11 @@
#define SRC_OPENCV_GLCONTEXT_HPP_ #define SRC_OPENCV_GLCONTEXT_HPP_
#include "framebuffercontext.hpp" #include "framebuffercontext.hpp"
#include <nanogui/nanogui.h>
#ifdef __EMSCRIPTEN__ #ifdef __EMSCRIPTEN__
#include <emscripten.h> #include <emscripten.h>
#endif #endif
#ifndef OPENCV_V4D_USE_ES3
#define NANOGUI_USE_OPENGL
#else
#define NANOGUI_USE_GLES
#define NANOGUI_GLES_VERSION 3
#endif
#include <nanogui/opengl.h>
#include "opencv2/v4d/util.hpp"
#include "opencv2/v4d/nvg.hpp"
namespace cv { namespace cv {
namespace v4d { namespace v4d {
namespace detail { namespace detail {

@ -0,0 +1,45 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright Amir Hassan (kallaballa) <amir@viel-zu.org>
#include "nanoguicontext.hpp"
namespace cv {
namespace v4d {
namespace detail {
NanoguiContext::NanoguiContext(V4D& v4d, FrameBufferContext& fbContext) :
mainFbContext_(fbContext), nguiFbContext_(v4d, "NanoGUI", fbContext) {
fbCtx().makeCurrent();
screen_ = new nanogui::Screen();
screen_->initialize(nguiFbContext_.getGLFWWindow(), false);
form_ = new cv::v4d::FormHelper(screen_);
fbCtx().resizeWindow(fbCtx().getSize());
fbCtx().makeNoneCurrent();
}
void NanoguiContext::render() {
screen().draw_widgets();
}
void NanoguiContext::build(std::function<void(cv::v4d::FormHelper&)> fn) {
fbCtx().makeCurrent();
fn(form());
screen().perform_layout();
fbCtx().makeNoneCurrent();
}
nanogui::Screen& NanoguiContext::screen() {
return *screen_;
}
cv::v4d::FormHelper& NanoguiContext::form() {
return *form_;
}
FrameBufferContext& NanoguiContext::fbCtx() {
return nguiFbContext_;
}
}
}
}

@ -0,0 +1,44 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright Amir Hassan (kallaballa) <amir@viel-zu.org>
#ifndef SRC_OPENCV_NANOGUICONTEXT_HPP_
#define SRC_OPENCV_NANOGUICONTEXT_HPP_
#include "framebuffercontext.hpp"
#ifdef __EMSCRIPTEN__
#include <emscripten.h>
#endif
#include <opencv2/v4d/formhelper.hpp>
namespace cv {
namespace v4d {
namespace detail {
/*!
* Used to setup a nanogui context
*/
class NanoguiContext {
nanogui::Screen* screen_;
cv::v4d::FormHelper* form_;
FrameBufferContext& mainFbContext_;
FrameBufferContext nguiFbContext_;
cv::UMat preFB_;
cv::UMat fb_;
cv::UMat postFB_;
public:
NanoguiContext(V4D& v4d, FrameBufferContext& fbContext);
void render();
void build(std::function<void(cv::v4d::FormHelper&)> fn);
nanogui::Screen& screen();
cv::v4d::FormHelper& form();
FrameBufferContext& fbCtx();
};
}
}
}
#endif /* SRC_OPENCV_NANOGUICONTEXT_HPP_ */

@ -4,15 +4,41 @@
// Copyright Amir Hassan (kallaballa) <amir@viel-zu.org> // Copyright Amir Hassan (kallaballa) <amir@viel-zu.org>
#include "nanovgcontext.hpp" #include "nanovgcontext.hpp"
#include "opencv2/v4d/util.hpp"
#include "opencv2/v4d/nvg.hpp"
namespace cv { namespace cv {
namespace v4d { namespace v4d {
namespace detail { namespace detail {
#if !defined(GL_RGBA_FLOAT_MODE)
# define GL_RGBA_FLOAT_MODE 0x8820
#endif
#ifdef __EMSCRIPTEN__
#include <emscripten/threading.h>
static void initmenvg(NanoVGContext* initiatenvg) {
initiatenvg->init();
}
#endif
NanoVGContext::NanoVGContext(V4D& v4d, FrameBufferContext& fbContext) : NanoVGContext::NanoVGContext(V4D& v4d, FrameBufferContext& fbContext) :
mainFbContext_(fbContext), nvgFbContext_(v4d, fbContext) { v4d_(v4d), screen_(nullptr), context_(nullptr), mainFbContext_(fbContext), nvgFbContext_(v4d, "NanoVG", fbContext) {
#ifdef __EMSCRIPTEN__
emscripten_sync_run_in_main_runtime_thread(EM_FUNC_SIG_VI, initmenvg, this);
#else
init();
#endif
}
void NanoVGContext::init() {
fbCtx().makeCurrent();
screen_ = new nanogui::Screen(); screen_ = new nanogui::Screen();
screen_->initialize(nvgFbContext_.getGLFWWindow(), false); screen_->initialize(fbCtx().getGLFWWindow(), false);
context_ = screen_->nvg_context(); context_ = screen_->nvg_context();
fbCtx().resizeWindow(fbCtx().getSize());
fbCtx().makeNoneCurrent();
} }
void NanoVGContext::render(std::function<void(const cv::Size&)> fn) { void NanoVGContext::render(std::function<void(const cv::Size&)> fn) {
@ -29,13 +55,13 @@ void NanoVGContext::render(std::function<void(const cv::Size&)> fn) {
} }
#endif #endif
{ {
#ifndef __EMSCRIPTEN__ FrameBufferContext::GLScope glScope(fbCtx());
CLExecScope_t scope(nvgFbContext_.getCLExecContext());
#endif
FrameBufferContext::GLScope nvgGlScope(nvgFbContext_);
NanoVGContext::Scope nvgScope(*this); NanoVGContext::Scope nvgScope(*this);
cv::v4d::nvg::detail::NVG::initializeContext(context_); cv::v4d::nvg::detail::NVG::initializeContext(context_);
fn(nvgFbContext_.getSize()); fn(fbCtx().getSize());
// fbCtx().makeCurrent();
// fbCtx().blitFrameBufferToScreen(cv::Rect(0,0, fbCtx().getSize().width, fbCtx().getSize().height), fbCtx().getSize(), false);
// glfwSwapBuffers(fbCtx().getGLFWWindow());
} }
#ifdef __EMSCRIPTEN__ #ifdef __EMSCRIPTEN__
{ {
@ -61,7 +87,6 @@ void NanoVGContext::begin() {
//FIXME mirroring with text somehow doesn't work //FIXME mirroring with text somehow doesn't work
// nvgTranslate(context_, 0, h); // nvgTranslate(context_, 0, h);
// nvgScale(context_, 1, -1); // nvgScale(context_, 1, -1);
GL_CHECK(glViewport(0, 0, w, h));
} }
void NanoVGContext::end() { void NanoVGContext::end() {

@ -12,17 +12,11 @@
#include <emscripten.h> #include <emscripten.h>
#endif #endif
#ifndef OPENCV_V4D_USE_ES3 namespace nanogui {
#define NANOGUI_USE_OPENGL class Screen;
#else }
#define NANOGUI_USE_GLES
#define NANOGUI_GLES_VERSION 3
#endif
#include <nanogui/opengl.h>
#include "opencv2/v4d/util.hpp"
#include "opencv2/v4d/nvg.hpp"
struct NVGcontext;
namespace cv { namespace cv {
namespace v4d { namespace v4d {
namespace detail { namespace detail {
@ -30,6 +24,7 @@ namespace detail {
* Used to setup a nanovg context * Used to setup a nanovg context
*/ */
class NanoVGContext { class NanoVGContext {
V4D& v4d_;
nanogui::Screen* screen_; nanogui::Screen* screen_;
NVGcontext* context_; NVGcontext* context_;
FrameBufferContext& mainFbContext_; FrameBufferContext& mainFbContext_;
@ -66,6 +61,8 @@ public:
* @param fbContext The framebuffer context * @param fbContext The framebuffer context
*/ */
NanoVGContext(V4D& v4d, FrameBufferContext& fbContext); NanoVGContext(V4D& v4d, FrameBufferContext& fbContext);
void init();
/*! /*!
* Execute function object fn inside a nanovg context. * Execute function object fn inside a nanovg context.
* The context takes care of setting up opengl and nanovg states. * The context takes care of setting up opengl and nanovg states.

@ -3,22 +3,94 @@
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
// Copyright Amir Hassan (kallaballa) <amir@viel-zu.org> // Copyright Amir Hassan (kallaballa) <amir@viel-zu.org>
#include <opencv2/imgcodecs.hpp>
#include "opencv2/v4d/util.hpp" #include "opencv2/v4d/util.hpp"
#include "opencv2/v4d/v4d.hpp" #include "opencv2/v4d/v4d.hpp"
#include "opencv2/v4d/nvg.hpp" #include "opencv2/v4d/nvg.hpp"
#ifdef __EMSCRIPTEN__ #ifdef __EMSCRIPTEN__
# include <emscripten.h> # include <emscripten.h>
# include <fstream> # include <SDL/SDL.h>
# include <SDL/SDL_image.h>
# include <SDL/SDL_stdinc.h>
#else #else
# include <opencv2/core/ocl.hpp> # include <opencv2/core/ocl.hpp>
#endif #endif
#include <csignal> #include <csignal>
#include <thread>
namespace cv { namespace cv {
namespace v4d { namespace v4d {
namespace detail {
long proxy_to_mainl(std::function<long()> fn) {
#ifdef __EMSCRIPTEN__
long (*ptr)() = cv::v4d::detail::get_fn_ptr<0>(fn);
return emscripten_sync_run_in_main_runtime_thread(EM_FUNC_SIG_I, ptr);
#else
return fn();
#endif
}
void proxy_to_mainv(std::function<void()> fn) {
#ifdef __EMSCRIPTEN__
void (*ptr)() = cv::v4d::detail::get_fn_ptr<0>(fn);
emscripten_sync_run_in_main_runtime_thread(EM_FUNC_SIG_V, ptr);
#else
fn();
#endif
}
bool proxy_to_mainb(std::function<bool()> fn) {
#ifdef __EMSCRIPTEN__
bool (*ptr)() = cv::v4d::detail::get_fn_ptr<0>(fn);
return emscripten_sync_run_in_main_runtime_thread(EM_FUNC_SIG_V, ptr);
#else
return fn();
#endif
}
}
#ifdef __EMSCRIPTEN__
Mat read_image(const string &path) {
SDL_Surface *loadedSurface = IMG_Load(path.c_str());
Mat result;
if (loadedSurface == NULL) {
printf("Unable to load image %s! SDL_image Error: %s\n", path.c_str(),
IMG_GetError());
} else {
if (loadedSurface->w == 0 && loadedSurface->h == 0) {
std::cerr << "Empty image loaded" << std::endl;
SDL_FreeSurface(loadedSurface);
return Mat();
}
if(loadedSurface->format->BytesPerPixel == 1) {
result = Mat(loadedSurface->h, loadedSurface->w, CV_8UC1, (unsigned char*) loadedSurface->pixels, loadedSurface->pitch).clone();
cvtColor(result,result, COLOR_GRAY2BGR);
} else if(loadedSurface->format->BytesPerPixel == 3) {
result = Mat(loadedSurface->h, loadedSurface->w, CV_8UC3, (unsigned char*) loadedSurface->pixels, loadedSurface->pitch).clone();
if(loadedSurface->format->Rmask == 0x0000ff)
cvtColor(result,result, COLOR_RGB2BGR);
} else if(loadedSurface->format->BytesPerPixel == 4) {
result = Mat(loadedSurface->h, loadedSurface->w, CV_8UC4, (unsigned char*) loadedSurface->pixels, loadedSurface->pitch).clone();
if(loadedSurface->format->Rmask == 0x000000ff)
cvtColor(result,result, COLOR_RGBA2BGR);
else
cvtColor(result,result, COLOR_RGBA2RGB);
} else {
std::cerr << "Unsupported image depth" << std::endl;
SDL_FreeSurface(loadedSurface);
return Mat();
}
SDL_FreeSurface(loadedSurface);
}
return result;
}
#endif
unsigned int init_shader(const char* vShader, const char* fShader, const char* outputAttributeName) { unsigned int init_shader(const char* vShader, const char* fShader, const char* outputAttributeName) {
struct Shader { struct Shader {
GLenum type; GLenum type;
@ -271,6 +343,7 @@ Sink makeWriterSink(const string& outputFilename, const int fourcc, const float
cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, frameSize); cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, frameSize);
return Sink([=](const cv::UMat& frame) { return Sink([=](const cv::UMat& frame) {
cv::resize(frame, frame, frameSize);
(*writer) << frame; (*writer) << frame;
return writer->isOpened(); return writer->isOpened();
}); });
@ -295,11 +368,9 @@ Source makeCaptureSource(int width, int height) {
using namespace std; using namespace std;
static cv::Mat tmp(height, width, CV_8UC4); static cv::Mat tmp(height, width, CV_8UC4);
return Source([=](cv::OutputArray& array) { return Source([=](cv::UMat& frame) {
try { try {
cv::UMat frame = array.getUMat(); frame.create(cv::Size(width, height), CV_8UC3);
if (frame.empty())
frame.create(cv::Size(width, height), CV_8UC3);
std::ifstream fs("v4d_rgba_canvas.raw", std::fstream::in | std::fstream::binary); std::ifstream fs("v4d_rgba_canvas.raw", std::fstream::in | std::fstream::binary);
fs.seekg(0, std::ios::end); fs.seekg(0, std::ios::end);
auto length = fs.tellg(); auto length = fs.tellg();
@ -311,10 +382,10 @@ Source makeCaptureSource(int width, int height) {
cvtColor(tmp, v, cv::COLOR_BGRA2RGB); cvtColor(tmp, v, cv::COLOR_BGRA2RGB);
v.release(); v.release();
} else if(length == 0) { } else if(length == 0) {
frame.setTo(cv::Scalar(255, 0, 0, 255)); // frame.setTo(cv::Scalar(0, 0, 0, 255));
std::cerr << "Error: empty webcam frame received!" << endl; std::cerr << "Error: empty webcam frame received!" << endl;
} else { } else {
frame.setTo(cv::Scalar(0, 0, 255, 255)); // frame.setTo(cv::Scalar(0, 0, 0, 255));
std::cerr << "Error: webcam frame size mismatch!" << endl; std::cerr << "Error: webcam frame size mismatch!" << endl;
} }
} catch(std::exception& ex) { } catch(std::exception& ex) {

@ -8,8 +8,14 @@
#include "detail/framebuffercontext.hpp" #include "detail/framebuffercontext.hpp"
#include "detail/glcontext.hpp" #include "detail/glcontext.hpp"
#include "detail/nanovgcontext.hpp" #include "detail/nanovgcontext.hpp"
#include "detail/nanoguicontext.hpp"
#include <sstream> #include <sstream>
#ifdef __EMSCRIPTEN__
# include <emscripten/html5.h>
# include <emscripten/threading.h>
#endif
namespace cv { namespace cv {
namespace v4d { namespace v4d {
namespace detail { namespace detail {
@ -29,8 +35,8 @@ void gl_check_error(const std::filesystem::path& file, unsigned int line, const
if (errorCode != 0) { if (errorCode != 0) {
std::stringstream ss; std::stringstream ss;
ss << "GL failed in " << file.filename() << " (" << line << ") : " ss << "GL failed in " << file.filename() << " (" << line << ") : " << "\nExpression:\n "
<< "\nExpression:\n " << expression << "\nError code:\n " << errorCode; << expression << "\nError code:\n " << errorCode;
throw std::runtime_error(ss.str()); throw std::runtime_error(ss.str());
} }
} }
@ -46,26 +52,27 @@ cv::Scalar colorConvert(const cv::Scalar& src, cv::ColorConversionCodes code) {
return dst; return dst;
} }
void resizeKeepAspectRatio(const cv::UMat& src, cv::UMat& output, const cv::Size& dstSize, void resizePreserveAspectRatio(const cv::UMat& src, cv::UMat& output, const cv::Size& dstSize,
const cv::Scalar& bgcolor) { const cv::Scalar& bgcolor) {
double h1 = dstSize.width * (src.rows / (double) src.cols); cv::UMat tmp;
double w2 = dstSize.height * (src.cols / (double) src.rows); double hf = double(dstSize.height) / src.size().height;
if (h1 <= dstSize.height) { double wf = double(dstSize.width) / src.size().width;
cv::resize(src, output, cv::Size(dstSize.width, h1)); double f = std::min(hf, wf);
} else { if (f < 0)
cv::resize(src, output, cv::Size(w2, dstSize.height)); f = 1.0 / f;
}
int top = (dstSize.height - output.rows) / 2; cv::resize(src, tmp, cv::Size(), f, f);
int down = (dstSize.height - output.rows + 1) / 2;
int left = (dstSize.width - output.cols) / 2;
int right = (dstSize.width - output.cols + 1) / 2;
cv::copyMakeBorder(output, output, top, down, left, right, cv::BORDER_CONSTANT, bgcolor); int top = (dstSize.height - tmp.rows) / 2;
int down = (dstSize.height - tmp.rows + 1) / 2;
int left = (dstSize.width - tmp.cols) / 2;
int right = (dstSize.width - tmp.cols + 1) / 2;
cv::copyMakeBorder(tmp, output, top, down, left, right, cv::BORDER_CONSTANT, bgcolor);
} }
cv::Ptr<V4D> V4D::make(const cv::Size& size, const string& title, bool debug) { cv::Ptr<V4D> V4D::make(const cv::Size& size, const string& title, bool debug) {
cv::Ptr<V4D> v4d = new V4D(size, false, title, 4, 6, true, 0, debug); cv::Ptr<V4D> v4d = new V4D(size, false, title, 3, 2, false, 0, debug);
v4d->setVisible(true); v4d->setVisible(true);
return v4d; return v4d;
} }
@ -77,35 +84,32 @@ cv::Ptr<V4D> V4D::make(const cv::Size& initialSize, bool offscreen, const string
V4D::V4D(const cv::Size& size, bool offscreen, const string& title, int major, int minor, V4D::V4D(const cv::Size& size, bool offscreen, const string& title, int major, int minor,
bool compat, int samples, bool debug) : bool compat, int samples, bool debug) :
initialSize_(size), viewport_(0, 0, size.width, size.height), scale_(1), mousePos_(0, 0), stretch_( initialSize_(size), offscreen_(offscreen), title_(title), major_(major), minor_(minor), compat_(
false), offscreen_(offscreen) { compat), samples_(samples), debug_(debug), viewport_(0, 0, size.width, size.height), scale_(
screen_ = new nanogui::Screen(); 1), mousePos_(0, 0), stretch_(false), pool_(2) {
mainFbContext_ = new detail::FrameBufferContext(*this, size, offscreen, title, #ifdef __EMSCRIPTEN__
major, minor, compat, samples, debug, nullptr, nullptr); printf(""); //makes sure we have FS as a dependency
#endif
clvaContext_ = new detail::CLVAContext(*mainFbContext_); detail::proxy_to_mainv([=, this]() {
glContext_ = new detail::GLContext(*this, *mainFbContext_); mainFbContext_ = new detail::FrameBufferContext(*this, initialSize_, offscreen_, title_, major_,
nvgContext_ = new detail::NanoVGContext(*this, *mainFbContext_); minor_, compat_, samples_, debug_, nullptr, nullptr);
fbCtx().makeCurrent(); this->resizeWindow(initialSize_);
screen().initialize(getGLFWWindow(), false); clvaContext_ = new detail::CLVAContext(*this, *mainFbContext_);
form_ = new FormHelper(&screen()); glContext_ = new detail::GLContext(*this, *mainFbContext_);
nvgContext_ = new detail::NanoVGContext(*this, *mainFbContext_);
this->setWindowSize(initialSize_); nguiContext_ = new detail::NanoguiContext(*this, *mainFbContext_);
});
} }
V4D::~V4D() { V4D::~V4D() {
//don't delete form_. it is autmatically cleaned up by the base class (nanogui::Screen) //don't delete form_. it is autmatically cleaned up by the base class (nanogui::Screen)
if (screen_)
delete screen_;
if (writer_)
delete writer_;
if (capture_)
delete capture_;
if (glContext_) if (glContext_)
delete glContext_; delete glContext_;
if (nvgContext_) if (nvgContext_)
delete nvgContext_; delete nvgContext_;
if (nguiContext_)
delete nguiContext_;
if (clvaContext_) if (clvaContext_)
delete clvaContext_; delete clvaContext_;
if (mainFbContext_) if (mainFbContext_)
@ -116,10 +120,6 @@ cv::ogl::Texture2D& V4D::texture() {
return mainFbContext_->getTexture2D(); return mainFbContext_->getTexture2D();
} }
FormHelper& V4D::form() {
return *form_;
}
void V4D::setKeyboardEventCallback( void V4D::setKeyboardEventCallback(
std::function<bool(int key, int scancode, int action, int modifiers)> fn) { std::function<bool(int key, int scancode, int action, int modifiers)> fn) {
keyEventCb_ = fn; keyEventCb_ = fn;
@ -129,12 +129,11 @@ bool V4D::keyboard_event(int key, int scancode, int action, int modifiers) {
if (keyEventCb_) if (keyEventCb_)
return keyEventCb_(key, scancode, action, modifiers); return keyEventCb_(key, scancode, action, modifiers);
return screen().keyboard_event(key, scancode, action, modifiers); return nguiCtx().screen().keyboard_event(key, scancode, action, modifiers);
} }
FrameBufferContext& V4D::fbCtx() { FrameBufferContext& V4D::fbCtx() {
assert(mainFbContext_ != nullptr); assert(mainFbContext_ != nullptr);
mainFbContext_->makeCurrent();
return *mainFbContext_; return *mainFbContext_;
} }
@ -145,56 +144,65 @@ CLVAContext& V4D::clvaCtx() {
NanoVGContext& V4D::nvgCtx() { NanoVGContext& V4D::nvgCtx() {
assert(nvgContext_ != nullptr); assert(nvgContext_ != nullptr);
nvgContext_->fbCtx().makeCurrent();
return *nvgContext_; return *nvgContext_;
} }
NanoguiContext& V4D::nguiCtx() {
assert(nguiContext_ != nullptr);
return *nguiContext_;
}
GLContext& V4D::glCtx() { GLContext& V4D::glCtx() {
assert(glContext_ != nullptr); assert(glContext_ != nullptr);
glContext_->fbCtx().makeCurrent();
return *glContext_; return *glContext_;
} }
nanogui::Screen& V4D::screen() {
assert(screen_ != nullptr);
return *screen_;
}
cv::Size V4D::getVideoFrameSize() { cv::Size V4D::getVideoFrameSize() {
return clvaCtx().getVideoFrameSize(); return clvaCtx().getVideoFrameSize();
} }
void V4D::gl(std::function<void()> fn) { void V4D::gl(std::function<void()> fn) {
glCtx().render([=](const cv::Size& sz){ detail::proxy_to_mainv([fn, this]() {
CV_UNUSED(sz); glCtx().render([=](const cv::Size& sz) {
fn(); CV_UNUSED(sz);
fn();
});
}); });
} }
void V4D::gl(std::function<void(const cv::Size&)> fn) { void V4D::gl(std::function<void(const cv::Size&)> fn) {
glCtx().render(fn); detail::proxy_to_mainv([fn, this](){
glCtx().render(fn);
});
} }
void V4D::fb(std::function<void(cv::UMat&)> fn) { void V4D::fb(std::function<void(cv::UMat&)> fn) {
fbCtx().execute(fn); detail::proxy_to_mainv([fn, this](){
fbCtx().execute(fn);
});
} }
void V4D::nvg(std::function<void()> fn) { void V4D::nvg(std::function<void()> fn) {
nvgCtx().render([=](const cv::Size& sz){ detail::proxy_to_mainv([fn, this](){
CV_UNUSED(sz); nvgCtx().render([fn](const cv::Size& sz) {
fn(); CV_UNUSED(sz);
fn();
});
}); });
} }
void V4D::nvg(std::function<void(const cv::Size&)> fn) { void V4D::nvg(std::function<void(const cv::Size&)> fn) {
nvgCtx().render(fn); detail::proxy_to_mainv([fn, this](){
nvgCtx().render(fn);
});
} }
void V4D::nanogui(std::function<void(FormHelper& form)> fn) { void V4D::nanogui(std::function<void(cv::v4d::FormHelper& form)> fn) {
FrameBufferContext::GLScope mainGlScope(*mainFbContext_); nguiCtx().build(fn);
fn(form());
screen().set_visible(true);
screen().perform_layout();
} }
#ifdef __EMSCRIPTEN__ #ifdef __EMSCRIPTEN__
@ -209,7 +217,8 @@ static void do_frame(void* void_fn_ptr) {
void V4D::run(std::function<bool()> fn) { void V4D::run(std::function<bool()> fn) {
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
while (keepRunning() && fn()); while (keepRunning() && fn())
;
#else #else
emscripten_set_main_loop_arg(do_frame, &fn, -1, true); emscripten_set_main_loop_arg(do_frame, &fn, -1, true);
#endif #endif
@ -222,8 +231,11 @@ void V4D::setSource(const Source& src) {
} }
void V4D::feed(cv::InputArray& in) { void V4D::feed(cv::InputArray& in) {
this->capture([&](cv::OutputArray& videoFrame) { detail::proxy_to_mainv([in,this]() {
in.getUMat().copyTo(videoFrame); clvaCtx().capture([&](cv::UMat& videoFrame) {
in.copyTo(videoFrame);
});
}); });
} }
@ -235,8 +247,15 @@ bool V4D::capture() {
} }
bool V4D::capture(std::function<void(cv::UMat&)> fn) { bool V4D::capture(std::function<void(cv::UMat&)> fn) {
if(futureReader_.valid()) { if (!source_.isReady() || !source_.isOpen()) {
if(!futureReader_.get()) { #ifndef __EMSCRIPTEN__
return false;
#else
return true;
#endif
}
if (futureReader_.valid()) {
if (!futureReader_.get()) {
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
return false; return false;
#else #else
@ -244,9 +263,9 @@ bool V4D::capture(std::function<void(cv::UMat&)> fn) {
#endif #endif
} }
} }
if (nextReaderFrame_.empty()) {
if(nextReaderFrame_.empty()) { clvaCtx().capture(fn).copyTo(nextReaderFrame_);
if(!clvaCtx().capture(fn, nextReaderFrame_)) { if (nextReaderFrame_.empty()) {
#ifndef __EMSCRIPTEN__ #ifndef __EMSCRIPTEN__
return false; return false;
#else #else
@ -254,20 +273,17 @@ bool V4D::capture(std::function<void(cv::UMat&)> fn) {
#endif #endif
} }
} }
currentReaderFrame_ = nextReaderFrame_.clone(); currentReaderFrame_ = nextReaderFrame_.clone();
fb([=,this](cv::UMat frameBuffer) { fb([this](cv::UMat& frameBuffer){
currentReaderFrame_.copyTo(frameBuffer); currentReaderFrame_.copyTo(frameBuffer);
}); });
futureReader_ = pool_.enqueue(
[](V4D* v, std::function<void(UMat&)> fn, cv::UMat& frame) {
v->clvaCtx().capture(fn).copyTo(frame);
return !frame.empty();
}, this, fn, nextReaderFrame_);
futureReader_ = pool.push([=,this](){
return clvaCtx().capture(fn, nextReaderFrame_);
});
#ifndef __EMSCRIPTEN__
return captureSuccessful_;
#else
return true; return true;
#endif
} }
bool V4D::isSourceReady() { bool V4D::isSourceReady() {
@ -288,15 +304,15 @@ void V4D::write() {
} }
void V4D::write(std::function<void(const cv::UMat&)> fn) { void V4D::write(std::function<void(const cv::UMat&)> fn) {
if(futureWriter_.valid()) if (!sink_.isReady() || !sink_.isOpen())
futureWriter_.get(); return;
//
// if (futureWriter_.valid())
// futureWriter_.get();
fb([=, this](cv::UMat frameBuffer) { // futureWriter_ = pool_.enqueue([](V4D* v, std::function<void(const UMat&)> fn) {
frameBuffer.copyTo(currentWriterFrame_); // clvaCtx().write(fn);
}); // }, this, fn);
futureWriter_ = pool.push([=,this](){
clvaCtx().write(fn, currentWriterFrame_);
});
} }
bool V4D::isSinkReady() { bool V4D::isSinkReady() {
@ -304,7 +320,7 @@ bool V4D::isSinkReady() {
} }
void V4D::clear(const cv::Scalar& bgra) { void V4D::clear(const cv::Scalar& bgra) {
this->gl([&](){ this->gl([&]() {
const float& b = bgra[0] / 255.0f; const float& b = bgra[0] / 255.0f;
const float& g = bgra[1] / 255.0f; const float& g = bgra[1] / 255.0f;
const float& r = bgra[2] / 255.0f; const float& r = bgra[2] / 255.0f;
@ -315,7 +331,7 @@ void V4D::clear(const cv::Scalar& bgra) {
} }
void V4D::showGui(bool s) { void V4D::showGui(bool s) {
auto children = screen().children(); auto children = nguiCtx().screen().children();
for (auto* child : children) { for (auto* child : children) {
child->set_visible(s); child->set_visible(s);
} }
@ -401,9 +417,18 @@ void V4D::zoom(float factor) {
cv::Vec2f V4D::getPosition() { cv::Vec2f V4D::getPosition() {
fbCtx().makeCurrent(); fbCtx().makeCurrent();
int x, y;
glfwGetWindowPos(getGLFWWindow(), &x, &y); cv::Vec2f* sz = reinterpret_cast<cv::Vec2f*>(detail::proxy_to_mainl([this]() {
return {float(x), float(y)}; int x, y;
glfwGetWindowPos(getGLFWWindow(), &x, &y);
return reinterpret_cast<long>(new cv::Vec2f(x, y));
}));
fbCtx().makeNoneCurrent();
cv::Vec2f copy = *sz;
delete sz;
return copy;
} }
cv::Vec2f V4D::getMousePosition() { cv::Vec2f V4D::getMousePosition() {
@ -424,9 +449,15 @@ cv::Rect& V4D::viewport() {
cv::Size V4D::getNativeFrameBufferSize() { cv::Size V4D::getNativeFrameBufferSize() {
fbCtx().makeCurrent(); fbCtx().makeCurrent();
int w, h; cv::Size* sz = reinterpret_cast<cv::Size*>(detail::proxy_to_mainl([this](){
glfwGetFramebufferSize(getGLFWWindow(), &w, &h); int w, h;
return {w, h}; glfwGetFramebufferSize(getGLFWWindow(), &w, &h);
return reinterpret_cast<long>(new cv::Size{w, h});
}));
fbCtx().makeNoneCurrent();
cv::Size copy = *sz;
delete sz;
return copy;
} }
cv::Size V4D::getFrameBufferSize() { cv::Size V4D::getFrameBufferSize() {
@ -434,10 +465,7 @@ cv::Size V4D::getFrameBufferSize() {
} }
cv::Size V4D::getWindowSize() { cv::Size V4D::getWindowSize() {
fbCtx().makeCurrent(); return fbCtx().getWindowSize();
int w, h;
glfwGetWindowSize(getGLFWWindow(), &w, &h);
return {w, h};
} }
cv::Size V4D::getInitialSize() { cv::Size V4D::getInitialSize() {
@ -445,50 +473,57 @@ cv::Size V4D::getInitialSize() {
} }
void V4D::setWindowSize(const cv::Size& sz) { void V4D::setWindowSize(const cv::Size& sz) {
fbCtx().makeCurrent(); if(mainFbContext_ != nullptr)
screen().set_size(nanogui::Vector2i(sz.width / fbCtx().getXPixelRatio(), sz.height / fbCtx().getYPixelRatio())); fbCtx().setWindowSize(sz);
if(nguiContext_ != nullptr)
nguiCtx().screen().resize_callback_event(sz.width, sz.height);
}
void V4D::resizeWindow(const cv::Size& sz) {
fbCtx().resizeWindow(sz);
fbCtx().setWindowSize(sz);
} }
bool V4D::isFullscreen() { bool V4D::isFullscreen() {
fbCtx().makeCurrent(); fbCtx().makeCurrent();
return glfwGetWindowMonitor(getGLFWWindow()) != nullptr; return detail::proxy_to_mainb([this](){
return glfwGetWindowMonitor(getGLFWWindow()) != nullptr;
});
fbCtx().makeNoneCurrent();
} }
void V4D::setFullscreen(bool f) { void V4D::setFullscreen(bool f) {
fbCtx().makeCurrent(); fbCtx().makeCurrent();
auto monitor = glfwGetPrimaryMonitor(); detail::proxy_to_mainv([f,this](){
const GLFWvidmode* mode = glfwGetVideoMode(monitor); auto monitor = glfwGetPrimaryMonitor();
if (f) { const GLFWvidmode* mode = glfwGetVideoMode(monitor);
glfwSetWindowMonitor(getGLFWWindow(), monitor, 0, 0, mode->width, mode->height, if (f) {
mode->refreshRate); glfwSetWindowMonitor(getGLFWWindow(), monitor, 0, 0, mode->width, mode->height,
setWindowSize(getNativeFrameBufferSize()); mode->refreshRate);
} else { resizeWindow(getNativeFrameBufferSize());
glfwSetWindowMonitor(getGLFWWindow(), nullptr, 0, 0, getInitialSize().width, } else {
getInitialSize().height, 0); glfwSetWindowMonitor(getGLFWWindow(), nullptr, 0, 0, getInitialSize().width,
setWindowSize(getInitialSize()); getInitialSize().height, 0);
} resizeWindow(getInitialSize());
}
});
fbCtx().makeNoneCurrent();
} }
bool V4D::isResizable() { bool V4D::isResizable() {
fbCtx().makeCurrent(); return fbCtx().isResizable();
return glfwGetWindowAttrib(getGLFWWindow(), GLFW_RESIZABLE) == GLFW_TRUE;
} }
void V4D::setResizable(bool r) { void V4D::setResizable(bool r) {
fbCtx().makeCurrent(); fbCtx().setResizable(r);
glfwWindowHint(GLFW_RESIZABLE, r ? GLFW_TRUE : GLFW_FALSE);
} }
bool V4D::isVisible() { bool V4D::isVisible() {
fbCtx().makeCurrent(); return fbCtx().isVisible();
return glfwGetWindowAttrib(getGLFWWindow(), GLFW_VISIBLE) == GLFW_TRUE;
} }
void V4D::setVisible(bool v) { void V4D::setVisible(bool v) {
fbCtx().makeCurrent(); fbCtx().setVisible(v);
glfwWindowHint(GLFW_VISIBLE, v ? GLFW_TRUE : GLFW_FALSE);
screen().set_visible(v);
screen().perform_layout();
} }
bool V4D::isOffscreen() { bool V4D::isOffscreen() {
@ -516,7 +551,7 @@ void V4D::setDefaultKeyboardEventCallback() {
setOffscreen(!isOffscreen()); setOffscreen(!isOffscreen());
return true; return true;
} else if (key == GLFW_KEY_TAB && action == GLFW_PRESS) { } else if (key == GLFW_KEY_TAB && action == GLFW_PRESS) {
auto children = screen().children(); auto children = nguiCtx().screen().children();
for (auto* child : children) { for (auto* child : children) {
child->set_visible(!child->visible()); child->set_visible(!child->visible());
} }
@ -530,19 +565,50 @@ void V4D::setDefaultKeyboardEventCallback() {
bool V4D::display() { bool V4D::display() {
bool result = true; bool result = true;
if (!offscreen_) { if (!offscreen_) {
fbCtx().makeCurrent(); // {
GL_CHECK(glViewport(0, 0, getFrameBufferSize().width, getFrameBufferSize().height)); // FrameBufferContext::GLScope glScope(clvaCtx().fbCtx());
screen().draw_contents(); // GL_CHECK(glViewport(0, 0, getFrameBufferSize().width, getFrameBufferSize().height));
#ifndef __EMSCRIPTEN__ // clvaCtx().fbCtx().blitFrameBufferToScreen(viewport(), getWindowSize(), isStretching());
mainFbContext_->blitFrameBufferToScreen(viewport(), getWindowSize(), isStretching()); //// clvaCtx().fbCtx().makeCurrent();
#else //// glfwSwapBuffers(clvaCtx().fbCtx().getGLFWWindow());
mainFbContext_->blitFrameBufferToScreen(viewport(), getInitialSize(), isStretching()); // }
#endif // {
screen().draw_widgets(); // FrameBufferContext::GLScope glScope(glCtx().fbCtx());
glfwSwapBuffers(getGLFWWindow()); // GL_CHECK(glViewport(0, 0, getFrameBufferSize().width, getFrameBufferSize().height));
glfwPollEvents(); // glCtx().fbCtx().blitFrameBufferToScreen(viewport(), getWindowSize(), isStretching());
//// glCtx().fbCtx().makeCurrent();
//// glfwSwapBuffers(glCtx().fbCtx().getGLFWWindow());
// }
// {
// FrameBufferContext::GLScope glScope(nvgCtx().fbCtx());
// GL_CHECK(glViewport(0, 0, getFrameBufferSize().width, getFrameBufferSize().height));
//// nvgCtx().fbCtx().blitFrameBufferToScreen(viewport(), getWindowSize(), isStretching());
//// nvgCtx().fbCtx().makeCurrent();
//// glfwSwapBuffers(nvgCtx().fbCtx().getGLFWWindow());
// }
{
FrameBufferContext::GLScope glScope(nguiCtx().fbCtx());
// GL_CHECK(glViewport(0, 0, getFrameBufferSize().width, getFrameBufferSize().height));
//
// nguiCtx().fbCtx().blitFrameBufferToScreen(viewport(), getWindowSize(), isStretching());
// nguiCtx().fbCtx().makeCurrent();
// glfwSwapBuffers(nguiCtx().fbCtx().getGLFWWindow());
}
result = !glfwWindowShouldClose(getGLFWWindow()); detail::proxy_to_mainv([this](){
FrameBufferContext::GLScope glScope(nguiCtx().fbCtx());
nguiCtx().render();
nguiCtx().fbCtx().blitFrameBufferToScreen(viewport(), getWindowSize(), isStretching());
});
result = detail::proxy_to_mainb([this](){
FrameBufferContext::GLScope glScope(fbCtx());
fbCtx().blitFrameBufferToScreen(viewport(), getWindowSize(), isStretching());
glfwSwapBuffers(fbCtx().getGLFWWindow());
glfwPollEvents();
return !glfwWindowShouldClose(getGLFWWindow());
});
} }
return result; return result;
@ -550,23 +616,24 @@ bool V4D::display() {
bool V4D::isClosed() { bool V4D::isClosed() {
return closed_; return closed_;
} }
void V4D::close() { void V4D::close() {
setVisible(false); setVisible(false);
closed_ = true; closed_ = true;
} }
GLFWwindow* V4D::getGLFWWindow() { GLFWwindow* V4D::getGLFWWindow() {
return fbCtx().getGLFWWindow(); return fbCtx().getGLFWWindow();
} }
void V4D::printSystemInfo() { void V4D::printSystemInfo() {
#ifndef __EMSCRIPTEN__ detail::proxy_to_mainv([this]() {
CLExecScope_t scope(mainFbContext_->getCLExecContext()); fbCtx().makeCurrent();
#endif cerr << "OpenGL Version: " << getGlInfo() << endl;
FrameBufferContext::GLScope mainGlScope(*mainFbContext_); cerr << "OpenCL Platforms: " << getClInfo() << endl;
cerr << "OpenGL Version: " << getGlInfo() << endl; fbCtx().makeNoneCurrent();
cerr << "OpenCL Platforms: " << getClInfo() << endl; });
} }
} }
} }

@ -75,8 +75,10 @@ v4d->gl([](const Size sz) {
# Tutorials # Tutorials
The tutorials are designed to be read one after the other to give you a good overview over the key concepts of V4D. After that you can move on to the samples. The tutorials are designed to be read one after the other to give you a good overview over the key concepts of V4D. After that you can move on to the samples.
* \ref v4d_display_image * \ref v4d_display_image_pipeline
* \ref v4d_display_image_fb
* \ref v4d_vector_graphics * \ref v4d_vector_graphics
* \ref v4d_vector_graphics_and_fb
* \ref v4d_render_opengl * \ref v4d_render_opengl
* \ref v4d_font_rendering * \ref v4d_font_rendering
* \ref v4d_video_editing * \ref v4d_video_editing

@ -1,7 +1,7 @@
# Display an Image {#v4d_display_image} # Display an image using the video pipeline {#v4d_display_image_pipeline}
@prev_tutorial{v4d} @prev_tutorial{v4d}
@next_tutorial{v4d_vector_graphics} @next_tutorial{v4d_display_image_fb}
| | | | | |
| -: | :- | | -: | :- |
@ -11,13 +11,6 @@
## Using the video pipeline ## Using the video pipeline
Actually there are two ways to display an image using V4D. The most convenient way is to use the video pipeline to feed an image to V4D. That has the advantage that the image is automatically resized (preserving aspect ratio) to framebuffer size and color converted (the framebuffer is BGRA). Actually there are two ways to display an image using V4D. The most convenient way is to use the video pipeline to feed an image to V4D. That has the advantage that the image is automatically resized (preserving aspect ratio) to framebuffer size and color converted (the framebuffer is BGRA).
@include samples/display_image.cpp \htmlinclude "../samples/example_v4d_display_image.html"
![The result](doc/display_image.png)
## Using direct framebuffer access
Instead of feeding to the video pipeline we can request the framebuffer in a ```fb``` context and copy the image to it. But first we must manually resize and color convert the image.
@include samples/display_image_fb.cpp @include samples/display_image.cpp
![The result](doc/display_image_fb.png)

@ -0,0 +1,16 @@
# Display an image using direct framebuffer access {#v4d_display_image_fb}
@prev_tutorial{v4d_display_image_pipeline}
@next_tutorial{v4d_vector_graphics}
| | |
| -: | :- |
| Original author | Amir Hassan (kallaballa) <amir@viel-zu.org> |
| Compatibility | OpenCV >= 4.7 |
## Using direct framebuffer access
Instead of feeding to the video pipeline we can request the framebuffer in a ```fb``` context and copy the image to it. But first we must manually resize and color convert the image.
\htmlinclude "../samples/example_v4d_display_image_fb.html"
@include samples/display_image_fb.cpp

@ -0,0 +1,16 @@
# Render vector graphics {#v4d_vector_graphics}
@prev_tutorial{v4d_display_image_fb}
@next_tutorial{v4d_vector_graphics_and_fb}
| | |
| -: | :- |
| Original author | Amir Hassan (kallaballa) <amir@viel-zu.org> |
| Compatibility | OpenCV >= 4.7 |
## Vector graphics
Through the nvg context javascript-canvas-like rendering is possible.
\htmlinclude "../samples/example_v4d_vector_graphics.html"
@include samples/vector_graphics.cpp

@ -1,6 +1,6 @@
# Render vector graphics {#v4d_vector_graphics} # Render vector graphics {#v4d_vector_graphics_and_fb}
@prev_tutorial{v4d_display_image} @prev_tutorial{v4d_vector_graphics}
@next_tutorial{v4d_render_opengl} @next_tutorial{v4d_render_opengl}
| | | | | |
@ -8,16 +8,9 @@
| Original author | Amir Hassan (kallaballa) <amir@viel-zu.org> | | Original author | Amir Hassan (kallaballa) <amir@viel-zu.org> |
| Compatibility | OpenCV >= 4.7 | | Compatibility | OpenCV >= 4.7 |
## Vector graphics
Through the nvg context javascript-canvas-like rendering is possible.
@include samples/vector_graphics.cpp
![The result](doc/vector_graphics.png)
## Vector graphics and framebuffer manipulation ## Vector graphics and framebuffer manipulation
The framebuffer can be accessed directly to manipulate data created in other contexts. In this case vector graphics is rendered to the framebuffer through NanoVG and then blurred using an ```fb`` context. The framebuffer can be accessed directly to manipulate data created in other contexts. In this case vector graphics is rendered to the framebuffer through NanoVG and then blurred using an ```fb`` context.
@include samples/vector_graphics_and_fb.cpp \htmlinclude "../samples/example_v4d_vector_graphics_and_fb.html"
![The result](doc/vector_graphics_and_fb.png) @include samples/vector_graphics_and_fb.cpp

@ -11,7 +11,8 @@
## Render a blue screen using OpenGL ## Render a blue screen using OpenGL
This example simply paints the screen blue using OpenGL without shaders for brevity. This example simply paints the screen blue using OpenGL without shaders for brevity.
\htmlinclude "../samples/example_v4d_render_opengl.html"
@include samples/render_opengl.cpp @include samples/render_opengl.cpp
![The result](doc/render_opengl.png)

@ -11,7 +11,7 @@
## Render Hello World ## Render Hello World
Draws "Hello World" to the screen. Draws "Hello World" to the screen.
@include samples/font_rendering.cpp \htmlinclude "../samples/example_v4d_font_rendering.html"
![The result](doc/font_rendering.png) @include samples/font_rendering.cpp

@ -11,7 +11,8 @@
## Render text on top of a video ## Render text on top of a video
Through adding a Source and a Sink v4d becomes capable of video editing. Reads a video, renders text on top and writes the result. Note: Reading and writing of video-data is multi-threaded in the background for performance reasons. Through adding a Source and a Sink v4d becomes capable of video editing. Reads a video, renders text on top and writes the result. Note: Reading and writing of video-data is multi-threaded in the background for performance reasons.
\htmlinclude "../samples/example_v4d_video_editing.html"
@include samples/video_editing.cpp @include samples/video_editing.cpp
![The result](doc/video_editing.png)

@ -11,7 +11,9 @@
## Reading and writing to V4D using custom Sources and Sinks ## Reading and writing to V4D using custom Sources and Sinks
In the previous tutorial we used a default video source and a video sink to stream a video through V4D which can be manipulated using OpenGL, NanoVG or OpenCV. In this example we are creating a custom Source that generates rainbow frames. For each time the Source is invoked the frame is colored a slightly different color. Additionally the custom Sink saves individual images instead of a video. In the previous tutorial we used a default video source and a video sink to stream a video through V4D which can be manipulated using OpenGL, NanoVG or OpenCV. In this example we are creating a custom Source that generates rainbow frames. For each time the Source is invoked the frame is colored a slightly different color. Additionally the custom Sink saves individual images instead of a video.
\htmlinclude "../samples/example_v4d_custom_source_and_sink.html"
@include samples/custom_source_and_sink.cpp @include samples/custom_source_and_sink.cpp
![The result](doc/custom_source_and_sink.gif)

@ -10,7 +10,8 @@
## Font rendering with form based GUI ## Font rendering with form based GUI
Draws "Hello World" to the screen and let's you control the font size and color with a GUI based on FormHelper. Draws "Hello World" to the screen and let's you control the font size and color with a GUI based on FormHelper.
\htmlinclude "../samples/example_v4d_font_with_gui.html"
@include samples/font_with_gui.cpp @include samples/font_with_gui.cpp
![The result](doc/font_with_gui.png)

@ -10,7 +10,7 @@
Renders a rainbow cube on white background using OpenGL, applies a glow effect using OpenCV (OpenCL) and encodes on the GPU (VAAPI). Renders a rainbow cube on white background using OpenGL, applies a glow effect using OpenCV (OpenCL) and encodes on the GPU (VAAPI).
@youtube{Q73CqfxU8KU} \htmlinclude "../samples/example_v4d_cube-demo.html"
@include samples/cube-demo.cpp @include samples/cube-demo.cpp

@ -10,7 +10,7 @@
Renders a rainbow cube on top of a input-video using OpenGL, applies a glow effect using OpenCV (OpenCL) and decodes/encodes on the GPU (VAAPI). Renders a rainbow cube on top of a input-video using OpenGL, applies a glow effect using OpenCV (OpenCL) and decodes/encodes on the GPU (VAAPI).
@youtube{d9UkxDGsPqU} \htmlinclude "../samples/example_v4d_video-demo.html"
@include samples/video-demo.cpp @include samples/video-demo.cpp

@ -10,6 +10,6 @@
Renders a color wheel on top of an input-video using nanovg (OpenGL), does colorspace conversions using OpenCV (OpenCL) and decodes/encodes on the GPU (VAAPI). Renders a color wheel on top of an input-video using nanovg (OpenGL), does colorspace conversions using OpenCV (OpenCL) and decodes/encodes on the GPU (VAAPI).
@youtube{tLI_DywdPM4} \htmlinclude "../samples/example_v4d_nanovg-demo.html"
@include samples/nanovg-demo.cpp @include samples/nanovg-demo.cpp

@ -10,7 +10,7 @@
Renders a mandelbrot fractal zoom. Uses shaders, OpenCL and VAAPI together. Renders a mandelbrot fractal zoom. Uses shaders, OpenCL and VAAPI together.
@youtube{hx3TV80XpyE} \htmlinclude "../samples/example_v4d_shader-demo.html"
@include samples/shader-demo.cpp @include samples/shader-demo.cpp

@ -10,7 +10,7 @@
Renders a Star Wars like text crawl using nanovg (OpenGL), uses OpenCV (OpenCL) for a pseudo 3D effect and encodes on the GPU (VAAPI). Renders a Star Wars like text crawl using nanovg (OpenGL), uses OpenCV (OpenCL) for a pseudo 3D effect and encodes on the GPU (VAAPI).
@youtube{2H434WN_ID8} \htmlinclude "../samples/example_v4d_font-demo.html"
@include samples/font-demo.cpp @include samples/font-demo.cpp

@ -10,7 +10,7 @@
Pedestrian detection using HOG with a linear SVM, non-maximal suppression and tracking using KCF. Uses nanovg for rendering (OpenGL), detects using a linear SVM (OpenCV/OpenCL), filters resuls using NMS (CPU) and tracks using KCF (CPU). Decodes/encodes on the GPU (VAAPI). Pedestrian detection using HOG with a linear SVM, non-maximal suppression and tracking using KCF. Uses nanovg for rendering (OpenGL), detects using a linear SVM (OpenCV/OpenCL), filters resuls using NMS (CPU) and tracks using KCF (CPU). Decodes/encodes on the GPU (VAAPI).
@youtube{tEKwpD1PwRI} \htmlinclude "../samples/example_v4d_pedestrian-demo.html"
@include samples/pedestrian-demo.cpp @include samples/pedestrian-demo.cpp

@ -10,7 +10,7 @@
Optical flow visualization on top of a video. Uses background subtraction (OpenCV/OpenCL) to isolate areas with motion, detects features to track (OpenCV/OpenCL), calculates the optical flow (OpenCV/OpenCL), uses nanovg for rendering (OpenGL) and post-processes the video (OpenCL). Decodes/encodes on the GPU (VAAPI). Optical flow visualization on top of a video. Uses background subtraction (OpenCV/OpenCL) to isolate areas with motion, detects features to track (OpenCV/OpenCL), calculates the optical flow (OpenCV/OpenCL), uses nanovg for rendering (OpenGL) and post-processes the video (OpenCL). Decodes/encodes on the GPU (VAAPI).
@youtube{k-NA6R9SBvo} \htmlinclude "../samples/example_v4d_optflow-demo.html"
@include samples/optflow-demo.cpp @include samples/optflow-demo.cpp

Loading…
Cancel
Save