diff --git a/src/common/subsystems.hpp b/src/common/subsystems.hpp index fd52184e8..ba6e23b41 100644 --- a/src/common/subsystems.hpp +++ b/src/common/subsystems.hpp @@ -26,7 +26,6 @@ #include #include - using std::cout; using std::cerr; using std::endl; @@ -294,7 +293,7 @@ std::string get_info() { } void bind() { - va::context.bind(); + context.bind(); } } // namespace va @@ -375,44 +374,10 @@ void init() { XStoreName(xdisplay, xwin, "nanovg-demo"); wmDeleteMessage = XInternAtom(xdisplay, "WM_DELETE_WINDOW", False); XSetWMProtocols(xdisplay, xwin, &wmDeleteMessage, 1); - XSelectInput(xdisplay, xwin, ButtonPressMask | Button1MotionMask ); initialized = true; } - -enum EventState { - NONE, - PRESS, - RELEASE, - MOTION -}; - -std::pair consume_event() { - XEvent event; - EventState state; - int revert_to; - XGetInputFocus(xdisplay, &xwin, &revert_to); - int x = 0, y = 0; - if (XEventsQueued(xdisplay, QueuedAlready) > 0) { - XNextEvent(xdisplay, &event); - switch (event.type) { - - case ButtonPress: - x = event.xmotion.x; - y = event.xmotion.y; - state = PRESS; - break; - case MotionNotify: - x = event.xmotion.x; - y = event.xmotion.y; - state = MOTION; - break; - } - } - - return {state,{x,y}}; -} -} +} // namespace x11 namespace egl { //code in the kb::egl namespace deals with setting up EGL @@ -594,7 +559,10 @@ namespace gl { cv::ogl::Texture2D *frame_buf_tex; GLuint frame_buf; cv::ocl::OpenCLExecutionContext context; -bool initialized = false; + +void bind() { + context.bind(); +} void init() { glewExperimental = true; @@ -606,6 +574,7 @@ void init() { glCheck(glGenFramebuffers(1, &frame_buf)); glCheck(glBindFramebuffer(GL_DRAW_FRAMEBUFFER, frame_buf)); + // Create stencil render buffer (note that I create depth buffer the exact same way, and It works. GLuint sb; glGenRenderbuffers(1, &sb); glBindRenderbuffer(GL_RENDERBUFFER, sb); @@ -620,16 +589,6 @@ void init() { assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE); gl::context = cv::ocl::OpenCLExecutionContext::getCurrent(); - initialized = true; -} - -void bind() { - gl::context.bind(); -} - - -bool is_initialized() { - return initialized; } void swap_buffers() { @@ -705,11 +664,21 @@ void pop() { glCheck(glPopAttrib()); } -void begin(int w, int h, double pxRatio = 1) { +void begin() { push(); + + float w = WIDTH; + float h = HEIGHT; + if(x11::is_initialized()) { + auto ws = x11::get_window_size(); + w = ws.first; + h = ws.second; + } + glCheck(glBindFramebuffer(GL_DRAW_FRAMEBUFFER, kb::gl::frame_buf)); nvgSave(vg); - nvgBeginFrame(vg, w, h, pxRatio); + glCheck(glViewport(0, HEIGHT - h, w, h)); + nvgBeginFrame(vg, w, h, std::fmax(WIDTH/w, HEIGHT/h)); } void end() { @@ -736,8 +705,8 @@ void init(bool debug = false) { } nvgCreateFont(vg, "icons", "fonts/entypo.ttf"); - nvgCreateFont(vg, "sans-bold", "fonts/DejaVuSans-Bold.ttf"); - nvgCreateFont(vg, "sans", "fonts/DejaVuSans.ttf"); + nvgCreateFont(vg, "sans-bold", "fonts/TheBoldFont.ttf"); + nvgCreateFont(vg, "sans", "fonts/TheBoldFont.ttf"); pop(); } diff --git a/src/video/video-demo.cpp b/src/video/video-demo.cpp index 2c7b41a81..a9ccb8952 100644 --- a/src/video/video-demo.cpp +++ b/src/video/video-demo.cpp @@ -3,6 +3,9 @@ //WIDTH and HEIGHT have to be specified before including subsystems.hpp constexpr long unsigned int WIDTH = 1920; constexpr long unsigned int HEIGHT = 1080; +constexpr const int VA_HW_DEVICE_INDEX = 0; +constexpr bool OFFSCREEN = false; +constexpr const char *OUTPUT_FILENAME = "video-demo.mkv"; #include "../common/subsystems.hpp" #include @@ -12,26 +15,6 @@ using std::cerr; using std::endl; using std::string; -//Static stream info. Has to match your capture device/file -constexpr double INPUT_FPS = 30; -constexpr int INPUT_WIDTH = 320; -constexpr int INPUT_HEIGHT = 240; -const string INPUT_FORMAT = "mjpeg"; -const string PIXEL_FORMAT = "yuyv422"; -const string INPUT_FILENAME = "example.mp4"; -const string OUTPUT_FILENAME = "camera-demo.mkv"; - -//The ffmpeg capture and writer options we need to capture... but don't overwrite the environment variables if they already exist. -const string CAPTURE_OPTIONS = "framerate;" + std::to_string(INPUT_FPS) - + "|input_format;" + INPUT_FORMAT - + "|video_size;" + std::to_string(INPUT_WIDTH) + "x" + std::to_string(INPUT_HEIGHT) - + "|pixel_format;" + PIXEL_FORMAT; - -const string WRITER_OPTIONS = ""; - -constexpr const int VA_HW_DEVICE_INDEX = 0; -constexpr bool OFFSCREEN = false; - void init_render() { glViewport(0, 0, WIDTH, HEIGHT); glColor3f(1.0, 1.0, 1.0); @@ -52,8 +35,7 @@ void init_render() { void render() { //Render a tetrahedron using immediate mode because the code is more concise for a demo - glBindFramebuffer(GL_FRAMEBUFFER, kb::gl::frame_buf); - glViewport(0, 0, WIDTH , HEIGHT ); + glViewport(0, 0, WIDTH, HEIGHT); glRotatef(1, 0, 1, 0); glClearColor(0.0f, 0.0f, 1.0f, 1.0f); @@ -97,29 +79,31 @@ void glow_effect(cv::UMat &src, int ksize = WIDTH / 85 % 2 == 0 ? WIDTH / 85 + } int main(int argc, char **argv) { - //The ffmpeg capture and writer options we need to capture... but don't overwrite the environment variables if they already exist. - setenv("OPENCV_FFMPEG_CAPTURE_OPTIONS", CAPTURE_OPTIONS.c_str(), 0); - setenv("OPENCV_FFMPEG_WRITER_OPTIONS", WRITER_OPTIONS.c_str(), 0); - using namespace kb; + if(argc != 2) { + cerr << "Usage: video-demo " << endl; + exit(1); + } //Initialize OpenCL Context for VAAPI va::init(); //Initialize MJPEG HW decoding using VAAPI - cv::VideoCapture cap(INPUT_FILENAME, cv::CAP_FFMPEG, { + cv::VideoCapture capture(argv[1], cv::CAP_FFMPEG, { cv::CAP_PROP_HW_DEVICE, VA_HW_DEVICE_INDEX, cv::CAP_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI, cv::CAP_PROP_HW_ACCELERATION_USE_OPENCL, 1 }); // check if we succeeded - if (!cap.isOpened()) { + if (!capture.isOpened()) { cerr << "ERROR! Unable to open camera" << endl; return -1; } + double fps = capture.get(cv::CAP_PROP_FPS); + //Initialize VP9 HW encoding using VAAPI. We don't need to specify the hardware device twice. only generates a warning. - cv::VideoWriter video(OUTPUT_FILENAME, cv::CAP_FFMPEG, cv::VideoWriter::fourcc('V', 'P', '9', '0'), INPUT_FPS, cv::Size(WIDTH, HEIGHT), { + cv::VideoWriter writer(OUTPUT_FILENAME, cv::CAP_FFMPEG, cv::VideoWriter::fourcc('V', 'P', '9', '0'), fps, cv::Size(WIDTH, HEIGHT), { cv::VIDEOWRITER_PROP_HW_ACCELERATION, cv::VIDEO_ACCELERATION_VAAPI, cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, 1 }); @@ -145,7 +129,7 @@ int main(int argc, char **argv) { uint64_t cnt = 1; int64 start = cv::getTickCount(); double tickFreq = cv::getTickFrequency(); - double lastFps = INPUT_FPS; + double lastFps = fps; init_render(); @@ -158,17 +142,17 @@ int main(int argc, char **argv) { //Activate the OpenCL context for VAAPI va::bind(); //Decode a frame on the GPU using VAAPI - cap >> videoFrame; + capture >> videoFrame; if (videoFrame.empty()) { cerr << "End of stream. Exiting" << endl; break; } - //The video is upside-down. Flip it. (OpenCL) + //The frameBuffer is upside-down. Flip videoFrame. (OpenCL) cv::flip(videoFrame, videoFrame, 0); - //Color-conversion from BGRA to RGB. (OpenCL) + //Color-conversion from RGB to BGRA. (OpenCL) cv::cvtColor(videoFrame, videoFrameRGBA, cv::COLOR_RGB2BGRA); - //Resize the frame. (OpenCL) + //Resize the frame if necessary. (OpenCL) cv::resize(videoFrameRGBA, frameBuffer, cv::Size(WIDTH, HEIGHT)); gl::bind(); @@ -191,7 +175,7 @@ int main(int argc, char **argv) { //Activate the OpenCL context for VAAPI va::bind(); //Encode the frame using VAAPI on the GPU. - video.write(videoFrame); + writer.write(videoFrame); if(x11::is_initialized()) { //Yet again activate the OpenCL context for OpenGL