@ -5,7 +5,10 @@
// Copyright (C) 2020-2021 Intel Corporation
# include "opencv2/videoio.hpp"
# if defined(__OPENCV_BUILD) || defined(OPENCV_HAVE_CVCONFIG_H) // TODO Properly detect and add D3D11 / LIBVA dependencies for standalone plugins
# ifdef HAVE_OPENCL
# include "opencv2/core/ocl.hpp"
# endif
# if defined(__OPENCV_BUILD) && !defined(BUILD_PLUGIN) // TODO Properly detect and add D3D11 / LIBVA dependencies for standalone plugins
# include "cvconfig.h"
# endif
# include <sstream>
@ -14,16 +17,31 @@
# define D3D11_NO_HELPERS
# include <d3d11.h>
# include <codecvt>
# include "opencv2/core/directx.hpp"
# ifdef HAVE_OPENCL
# include <CL/cl_d3d11.h>
# endif
# endif // HAVE_D3D11
# ifdef HAVE_VA
# include <va/va_backend.h>
# ifdef HAVE_VA_INTEL
# include "opencv2/core/va_intel.hpp"
# ifndef CL_TARGET_OPENCL_VERSION
# define CL_TARGET_OPENCL_VERSION 120
# endif
# ifdef HAVE_VA_INTEL_OLD_HEADER
# include <CL/va_ext.h>
# else
# include <CL/cl_va_api_media_sharing_intel.h>
# endif
# endif
# endif // HAVE_VA
// FFMPEG "C" headers
extern " C " {
# include <libavcodec/avcodec.h>
# include <libavutil/avutil.h>
# include <libavutil/hwcontext.h>
# ifdef HAVE_D3D11
# include <libavutil/hwcontext_d3d11va.h>
@ -31,8 +49,23 @@ extern "C" {
# ifdef HAVE_VA
# include <libavutil/hwcontext_vaapi.h>
# endif
# ifdef HAVE_MFX // dependency only on MFX header files, no linkage dependency
# include <libavutil/hwcontext_qsv.h>
# endif
}
# define HW_DEFAULT_POOL_SIZE 32
# define HW_DEFAULT_SW_FORMAT AV_PIX_FMT_NV12
using namespace cv ;
static AVCodec * hw_find_codec ( AVCodecID id , AVHWDeviceType hw_type , int ( * check_category ) ( const AVCodec * ) ,
const char * disabled_codecs , AVPixelFormat * hw_pix_fmt ) ;
static AVBufferRef * hw_create_device ( AVHWDeviceType hw_type , int hw_device , const std : : string & device_subname , bool use_opencl ) ;
static AVBufferRef * hw_create_frames ( struct AVCodecContext * ctx , AVBufferRef * hw_device_ctx , int width , int height , AVPixelFormat hw_format ) ;
static AVPixelFormat hw_get_format_callback ( struct AVCodecContext * ctx , const enum AVPixelFormat * fmt ) ;
static VideoAccelerationType hw_type_to_va_type ( AVHWDeviceType hw_type ) ;
static
const char * getVideoAccelerationName ( VideoAccelerationType va_type )
{
@ -70,7 +103,7 @@ std::string getDecoderConfiguration(VideoAccelerationType va_type, AVDictionary
case VIDEO_ACCELERATION_ANY : return " d3d11va " ;
case VIDEO_ACCELERATION_D3D11 : return " d3d11va " ;
case VIDEO_ACCELERATION_VAAPI : return " " ;
case VIDEO_ACCELERATION_MFX : return " " ;
case VIDEO_ACCELERATION_MFX : return " " ; // "qsv" fails if non-Intel D3D11 device
}
return " " ;
# else
@ -80,7 +113,7 @@ std::string getDecoderConfiguration(VideoAccelerationType va_type, AVDictionary
case VIDEO_ACCELERATION_ANY : return " vaapi.iHD " ;
case VIDEO_ACCELERATION_D3D11 : return " " ;
case VIDEO_ACCELERATION_VAAPI : return " vaapi.iHD " ;
case VIDEO_ACCELERATION_MFX : return " " ;
case VIDEO_ACCELERATION_MFX : return " qsv.iHD " ;
}
return " " ;
# endif
@ -125,7 +158,6 @@ std::string getEncoderConfiguration(VideoAccelerationType va_type, AVDictionary
# endif
}
static
std : : string getDecoderDisabledCodecs ( AVDictionary * dict )
{
@ -170,19 +202,6 @@ std::string getEncoderDisabledCodecs(AVDictionary *dict)
# endif
}
# define HW_DEFAULT_POOL_SIZE 32
# define HW_DEFAULT_SW_FORMAT AV_PIX_FMT_NV12
using namespace cv ;
static AVCodec * hw_find_codec ( AVCodecID id , AVHWDeviceType hw_type , int ( * check_category ) ( const AVCodec * ) ,
const char * disabled_codecs , AVPixelFormat * hw_pix_fmt ) ;
static AVBufferRef * hw_create_device ( AVHWDeviceType hw_type , int hw_device , const std : : string & device_subname ) ;
static AVBufferRef * hw_create_frames ( struct AVCodecContext * ctx , AVBufferRef * hw_device_ctx , int width , int height , AVPixelFormat hw_format ) ;
static AVPixelFormat hw_get_format_callback ( struct AVCodecContext * ctx , const enum AVPixelFormat * fmt ) ;
static VideoAccelerationType hw_type_to_va_type ( AVHWDeviceType hw_type ) ;
static
bool hw_check_device ( AVBufferRef * ctx , AVHWDeviceType hw_type , const std : : string & device_subname ) {
if ( ! ctx )
@ -259,75 +278,343 @@ bool hw_check_device(AVBufferRef* ctx, AVHWDeviceType hw_type, const std::string
}
static
AVBufferRef * hw_create_device ( AVHWDeviceType hw_type , int hw_device , const std : : string & device_subname ) {
if ( AV_HWDEVICE_TYPE_NONE = = hw_type )
AVBufferRef * hw_create_derived_context ( AVHWDeviceType hw_type , AVBufferRef * hw_device_ctx ) {
AVBufferRef * derived_ctx = NULL ;
const char * hw_name = av_hwdevice_get_type_name ( hw_type ) ;
int err = av_hwdevice_ctx_create_derived ( & derived_ctx , hw_type , hw_device_ctx , 0 ) ;
if ( ! derived_ctx | | err < 0 )
{
if ( derived_ctx )
av_buffer_unref ( & derived_ctx ) ;
CV_LOG_INFO ( NULL , " FFMPEG: Failed to create derived video acceleration (av_hwdevice_ctx_create_derived) for " < < hw_name < < " . Error= " < < err ) ;
return NULL ;
}
else
{
// Store child context in 'user_opaque' field of parent context.
struct FreeChildContext {
static void free ( struct AVHWDeviceContext * ctx ) {
AVBufferRef * child_ctx = ( AVBufferRef * ) ctx - > user_opaque ;
if ( child_ctx )
av_buffer_unref ( & child_ctx ) ;
}
} ;
AVHWDeviceContext * ctx = ( AVHWDeviceContext * ) derived_ctx - > data ;
ctx - > user_opaque = av_buffer_ref ( hw_device_ctx ) ;
ctx - > free = FreeChildContext : : free ;
CV_LOG_INFO ( NULL , " FFMPEG: Created derived video acceleration context (av_hwdevice_ctx_create_derived) for " < < hw_name ) ;
return derived_ctx ;
}
}
AVHWDeviceType child_type = hw_type ;
if ( hw_type = = AV_HWDEVICE_TYPE_QSV ) {
# ifdef _WIN32
child_type = AV_HWDEVICE_TYPE_DXVA2 ;
# else
child_type = AV_HWDEVICE_TYPE_VAAPI ;
# ifdef HAVE_OPENCL // GPU buffer interop with cv::UMat
// FFmpeg context attached to OpenCL context
class OpenCL_FFMPEG_Context : public ocl : : Context : : UserContext {
public :
OpenCL_FFMPEG_Context ( AVBufferRef * ctx ) {
ctx_ = av_buffer_ref ( ctx ) ;
}
virtual ~ OpenCL_FFMPEG_Context ( ) {
av_buffer_unref ( & ctx_ ) ;
}
AVBufferRef * GetAVHWDevice ( ) {
return ctx_ ;
}
private :
AVBufferRef * ctx_ ;
} ;
# ifdef HAVE_MFX
static
int hw_find_qsv_surface_index ( AVFrame * hw_frame )
{
if ( AV_PIX_FMT_QSV ! = hw_frame - > format )
return - 1 ;
mfxFrameSurface1 * surface = ( mfxFrameSurface1 * ) hw_frame - > data [ 3 ] ; // As defined by AV_PIX_FMT_QSV
AVHWFramesContext * frames_ctx = ( AVHWFramesContext * ) hw_frame - > hw_frames_ctx - > data ;
AVQSVFramesContext * qsv_ctx = ( AVQSVFramesContext * ) frames_ctx - > hwctx ;
for ( int i = 0 ; i < qsv_ctx - > nb_surfaces ; i + + ) {
if ( surface = = qsv_ctx - > surfaces + i ) {
return i ;
}
}
return - 1 ;
}
# endif
# ifdef HAVE_VA
static
VADisplay hw_get_va_display ( AVHWDeviceContext * hw_device_ctx )
{
if ( hw_device_ctx - > type = = AV_HWDEVICE_TYPE_QSV ) { // we stored pointer to child context in 'user_opaque' field
AVBufferRef * ctx = ( AVBufferRef * ) hw_device_ctx - > user_opaque ;
hw_device_ctx = ( AVHWDeviceContext * ) ctx - > data ;
}
if ( hw_device_ctx & & hw_device_ctx - > type = = AV_HWDEVICE_TYPE_VAAPI ) {
return ( ( AVVAAPIDeviceContext * ) hw_device_ctx - > hwctx ) - > display ;
}
return NULL ;
}
# endif // HAVE_VA
# ifdef HAVE_VA_INTEL
static
VASurfaceID hw_get_va_surface ( AVFrame * hw_frame ) {
if ( AV_PIX_FMT_VAAPI = = hw_frame - > format ) {
return ( VASurfaceID ) ( size_t ) hw_frame - > data [ 3 ] ; // As defined by AV_PIX_FMT_VAAPI
}
# ifdef HAVE_MFX
else if ( AV_PIX_FMT_QSV = = hw_frame - > format ) {
int frame_idx = hw_find_qsv_surface_index ( hw_frame ) ;
if ( frame_idx > = 0 ) { // frame index is same in parent (QSV) and child (VAAPI) frame context
AVHWFramesContext * frames_ctx = ( AVHWFramesContext * ) hw_frame - > hw_frames_ctx - > data ;
AVHWFramesContext * child_ctx = ( AVHWFramesContext * ) frames_ctx - > user_opaque ;
if ( child_ctx & & AV_HWDEVICE_TYPE_VAAPI = = child_ctx - > device_ctx - > type ) {
AVVAAPIFramesContext * vaapi_ctx = ( AVVAAPIFramesContext * ) child_ctx - > hwctx ;
CV_Assert ( frame_idx < vaapi_ctx - > nb_surfaces ) ;
return vaapi_ctx - > surface_ids [ frame_idx ] ;
}
}
}
# endif // HAVE_MFX
return VA_INVALID_SURFACE ;
}
# endif // HAVE_VA_INTEL
# ifdef HAVE_D3D11
static
AVD3D11VADeviceContext * hw_get_d3d11_device_ctx ( AVHWDeviceContext * hw_device_ctx ) {
if ( AV_HWDEVICE_TYPE_QSV = = hw_device_ctx - > type ) { // we stored pointer to child context in 'user_opaque' field
AVBufferRef * ctx = ( AVBufferRef * ) hw_device_ctx - > user_opaque ;
hw_device_ctx = ( AVHWDeviceContext * ) ctx - > data ;
}
if ( AV_HWDEVICE_TYPE_D3D11VA = = hw_device_ctx - > type ) {
return ( AVD3D11VADeviceContext * ) hw_device_ctx - > hwctx ;
}
return NULL ;
}
ID3D11Texture2D * hw_get_d3d11_texture ( AVFrame * hw_frame , int * subresource ) {
ID3D11Texture2D * texture = NULL ;
if ( AV_PIX_FMT_D3D11 = = hw_frame - > format ) {
texture = ( ID3D11Texture2D * ) hw_frame - > data [ 0 ] ; // As defined by AV_PIX_FMT_D3D11
* subresource = ( intptr_t ) hw_frame - > data [ 1 ] ; // As defined by AV_PIX_FMT_D3D11
}
# ifdef HAVE_MFX
else if ( AV_PIX_FMT_QSV = = hw_frame - > format ) {
AVHWFramesContext * frames_ctx = ( AVHWFramesContext * ) hw_frame - > hw_frames_ctx - > data ;
AVHWFramesContext * child_ctx = ( AVHWFramesContext * ) frames_ctx - > user_opaque ;
if ( child_ctx & & AV_HWDEVICE_TYPE_D3D11VA = = child_ctx - > device_ctx - > type ) {
texture = ( ( AVD3D11VAFramesContext * ) child_ctx - > hwctx ) - > texture ;
}
* subresource = hw_find_qsv_surface_index ( hw_frame ) ;
CV_Assert ( * subresource > = 0 ) ;
}
# endif
return texture ;
}
// In D3D11 case we allocate additional texture as single texture (not texture array) because
// OpenCL interop with D3D11 doesn't support/work with NV12 sub-texture of texture array.
ID3D11Texture2D * hw_get_d3d11_single_texture ( AVFrame * hw_frame , AVD3D11VADeviceContext * d3d11_device_ctx , ID3D11Texture2D * texture ) {
AVHWFramesContext * frames_ctx = ( AVHWFramesContext * ) hw_frame - > hw_frames_ctx - > data ;
if ( AV_HWDEVICE_TYPE_QSV = = frames_ctx - > device_ctx - > type ) {
frames_ctx = ( AVHWFramesContext * ) frames_ctx - > user_opaque ; // we stored pointer to child context in 'user_opaque' field
}
if ( ! frames_ctx | | AV_HWDEVICE_TYPE_D3D11VA ! = frames_ctx - > device_ctx - > type ) {
return NULL ;
}
ID3D11Texture2D * singleTexture = ( ID3D11Texture2D * ) frames_ctx - > user_opaque ;
if ( ! singleTexture & & d3d11_device_ctx & & texture ) {
D3D11_TEXTURE2D_DESC desc = { } ;
texture - > GetDesc ( & desc ) ;
desc . ArraySize = 1 ;
desc . BindFlags | = D3D11_BIND_SHADER_RESOURCE ;
desc . MiscFlags | = D3D11_RESOURCE_MISC_SHARED ;
if ( SUCCEEDED ( d3d11_device_ctx - > device - > CreateTexture2D ( & desc , NULL , & singleTexture ) ) ) {
frames_ctx - > user_opaque = singleTexture ;
}
}
return singleTexture ;
}
# endif // HAVE_D3D11
static
AVHWDeviceType hw_check_opencl_context ( AVHWDeviceContext * ctx ) {
ocl : : OpenCLExecutionContext & ocl_context = ocl : : OpenCLExecutionContext : : getCurrentRef ( ) ;
if ( ! ctx | | ocl_context . empty ( ) )
return AV_HWDEVICE_TYPE_NONE ;
# ifdef HAVE_VA_INTEL
VADisplay vadisplay_ocl = ocl_context . getContext ( ) . getOpenCLContextProperty ( CL_CONTEXT_VA_API_DISPLAY_INTEL ) ;
VADisplay vadisplay_ctx = hw_get_va_display ( ctx ) ;
if ( vadisplay_ocl & & vadisplay_ocl = = vadisplay_ctx )
return AV_HWDEVICE_TYPE_VAAPI ;
# endif
# ifdef HAVE_D3D11
ID3D11Device * d3d11device_ocl = ( ID3D11Device * ) ocl_context . getContext ( ) . getOpenCLContextProperty ( CL_CONTEXT_D3D11_DEVICE_KHR ) ;
AVD3D11VADeviceContext * d3d11_device_ctx = hw_get_d3d11_device_ctx ( ctx ) ;
if ( d3d11_device_ctx & & d3d11device_ocl & & d3d11_device_ctx - > device = = d3d11device_ocl )
return AV_HWDEVICE_TYPE_D3D11VA ;
# endif
return AV_HWDEVICE_TYPE_NONE ;
}
static
void hw_init_opencl ( AVBufferRef * ctx ) {
if ( ! ctx )
return ;
AVHWDeviceContext * hw_device_ctx = ( AVHWDeviceContext * ) ctx - > data ;
if ( ! hw_device_ctx )
return ;
# ifdef HAVE_VA_INTEL
VADisplay va_display = hw_get_va_display ( hw_device_ctx ) ;
if ( va_display ) {
va_intel : : ocl : : initializeContextFromVA ( va_display ) ;
}
# endif
# ifdef HAVE_D3D11
AVD3D11VADeviceContext * d3d11_device_ctx = hw_get_d3d11_device_ctx ( hw_device_ctx ) ;
if ( d3d11_device_ctx ) {
directx : : ocl : : initializeContextFromD3D11Device ( d3d11_device_ctx - > device ) ;
}
# endif
if ( hw_check_opencl_context ( hw_device_ctx ) ! = AV_HWDEVICE_TYPE_NONE ) {
// Attach AVHWDeviceContext to OpenCL context
ocl : : Context & ocl_context = ocl : : OpenCLExecutionContext : : getCurrent ( ) . getContext ( ) ;
ocl_context . setUserContext ( std : : make_shared < OpenCL_FFMPEG_Context > ( ctx ) ) ;
}
}
static
AVBufferRef * hw_create_context_from_opencl ( ocl : : OpenCLExecutionContext & ocl_context , AVHWDeviceType hw_type ) {
if ( ocl_context . empty ( ) )
return NULL ;
auto ocl_ffmpeg_context = ocl_context . getContext ( ) . getUserContext < OpenCL_FFMPEG_Context > ( ) ;
if ( ! ocl_ffmpeg_context )
return NULL ;
AVBufferRef * ctx = ocl_ffmpeg_context - > GetAVHWDevice ( ) ;
if ( hw_type ! = ( ( AVHWDeviceContext * ) ctx - > data ) - > type ) {
ctx = hw_create_derived_context ( hw_type , ctx ) ;
}
else {
ctx = av_buffer_ref ( ctx ) ;
}
if ( ctx )
CV_LOG_INFO ( NULL , " FFMPEG: Using " < < av_hwdevice_get_type_name ( hw_type ) < < " video acceleration context attached to OpenCL context " ) ;
return ctx ;
}
# endif // HAVE_OPENCL
static
AVBufferRef * hw_create_device ( AVHWDeviceType hw_type , int hw_device , const std : : string & device_subname , bool use_opencl ) {
AVBufferRef * hw_device_ctx = NULL ;
char device [ 128 ] = " " ;
char * pdevice = NULL ;
if ( hw_device > = 0 & & hw_device < 100000 ) {
if ( child_type = = AV_HWDEVICE_TYPE_VAAPI ) {
snprintf ( device , sizeof ( device ) , " /dev/dri/renderD%d " , 128 + hw_device ) ;
} else {
snprintf ( device , sizeof ( device ) , " %d " , hw_device ) ;
if ( AV_HWDEVICE_TYPE_NONE = = hw_type )
return NULL ;
# ifdef HAVE_OPENCL
// Check if OpenCL context has AVHWDeviceContext attached to it
ocl : : OpenCLExecutionContext & ocl_context = ocl : : OpenCLExecutionContext : : getCurrentRef ( ) ;
try {
hw_device_ctx = hw_create_context_from_opencl ( ocl_context , hw_type ) ;
if ( hw_device_ctx ) {
if ( hw_device > = 0 )
CV_LOG_ERROR ( NULL , " VIDEOIO/FFMPEG: ignoring property HW_DEVICE as device context already created and attached to OpenCL context " ) ;
return hw_device_ctx ;
}
pdevice = device ;
}
const char * hw_child_name = av_hwdevice_get_type_name ( child_type ) ;
const char * device_name = pdevice ? pdevice : " 'default' " ;
int err = av_hwdevice_ctx_create ( & hw_device_ctx , child_type , pdevice , NULL , 0 ) ;
if ( hw_device_ctx & & err > = 0 )
{
CV_LOG_DEBUG ( NULL , " FFMPEG: Created video acceleration context (av_hwdevice_ctx_create) for " < < hw_child_name < < " on device " < < device_name ) ;
if ( ! hw_check_device ( hw_device_ctx , hw_type , device_subname ) ) {
av_buffer_unref ( & hw_device_ctx ) ;
return NULL ;
catch ( . . . ) {
CV_LOG_INFO ( NULL , " FFMPEG: Exception creating Video Acceleration context using current OpenCL context " ) ;
}
# endif
// Create new media context. In QSV case, first create 'child' context.
std : : vector < AVHWDeviceType > child_types = { hw_type } ;
if ( hw_type = = AV_HWDEVICE_TYPE_QSV ) {
# ifdef _WIN32
child_types = { AV_HWDEVICE_TYPE_D3D11VA , AV_HWDEVICE_TYPE_DXVA2 } ;
# else
child_types = { AV_HWDEVICE_TYPE_VAAPI } ;
# endif
}
for ( AVHWDeviceType child_type : child_types ) {
char device [ 128 ] = " " ;
char * pdevice = NULL ;
if ( hw_device > = 0 & & hw_device < 100000 ) {
if ( child_type = = AV_HWDEVICE_TYPE_VAAPI ) {
snprintf ( device , sizeof ( device ) , " /dev/dri/renderD%d " , 128 + hw_device ) ;
}
else {
snprintf ( device , sizeof ( device ) , " %d " , hw_device ) ;
}
pdevice = device ;
}
if ( hw_type ! = child_type ) {
AVBufferRef * derived_ctx = NULL ;
const char * hw_name = av_hwdevice_get_type_name ( hw_type ) ;
err = av_hwdevice_ctx_create_derived ( & derived_ctx , hw_type , hw_device_ctx , 0 ) ;
if ( ! derived_ctx | | err < 0 )
{
if ( derived_ctx )
av_buffer_unref ( & derived_ctx ) ;
CV_LOG_INFO ( NULL , " FFMPEG: Failed to create derived video acceleration (av_hwdevice_ctx_create_derived) for " < < hw_name < < " . Error= " < < err ) ;
const char * hw_child_name = av_hwdevice_get_type_name ( child_type ) ;
const char * device_name = pdevice ? pdevice : " 'default' " ;
int err = av_hwdevice_ctx_create ( & hw_device_ctx , child_type , pdevice , NULL , 0 ) ;
if ( hw_device_ctx & & err > = 0 )
{
if ( ! hw_check_device ( hw_device_ctx , hw_type , device_subname ) ) {
av_buffer_unref ( & hw_device_ctx ) ;
continue ;
}
else
{
CV_LOG_DEBUG ( NULL , " FFMPEG: Created derived video acceleration context (av_hwdevice_ctx_create_derived) for " < < hw_name ) ;
CV_LOG_INFO ( NULL , " FFMPEG: Created video acceleration context (av_hwdevice_ctx_create) for " < < hw_child_name < < " on device " < < device_name ) ;
# ifdef HAVE_OPENCL
// if OpenCL context not created yet or property HW_ACCELERATION_USE_OPENCL set, create OpenCL context with binding to video acceleration context
if ( ocl : : haveOpenCL ( ) ) {
if ( ocl_context . empty ( ) | | use_opencl ) {
try {
hw_init_opencl ( hw_device_ctx ) ;
ocl_context = ocl : : OpenCLExecutionContext : : getCurrentRef ( ) ;
if ( ! ocl_context . empty ( ) ) {
CV_LOG_INFO ( NULL , " FFMPEG: Created OpenCL context with " < < hw_child_name < <
" video acceleration on OpenCL device: " < < ocl_context . getDevice ( ) . name ( ) ) ;
}
} catch ( . . . ) {
CV_LOG_INFO ( NULL , " FFMPEG: Exception creating OpenCL context with " < < hw_child_name < < " video acceleration " ) ;
}
}
else {
CV_LOG_INFO ( NULL , " FFMPEG: Can't bind " < < hw_child_name < < " video acceleration context to already created OpenCL context " ) ;
}
}
# else
CV_UNUSED ( use_opencl ) ;
# endif
if ( hw_type ! = child_type ) {
AVBufferRef * derived_ctx = hw_create_derived_context ( hw_type , hw_device_ctx ) ;
av_buffer_unref ( & hw_device_ctx ) ;
return derived_ctx ;
} else {
return hw_device_ctx ;
}
av_buffer_unref ( & hw_device_ctx ) ;
return derived_ctx ;
} else {
return hw_device_ctx ;
}
else
{
const char * hw_name = hw_child_name ;
CV_LOG_INFO ( NULL , " FFMPEG: Failed to create " < < hw_name < < " video acceleration (av_hwdevice_ctx_create) on device " < < device_name ) ;
}
}
else
{
const char * hw_name = hw_child_name ;
CV_LOG_INFO ( NULL , " FFMPEG: Failed to create " < < hw_name < < " video acceleration (av_hwdevice_ctx_create) on device " < < device_name ) ;
return NULL ;
}
return NULL ;
}
static
AVBufferRef * hw_create_frames ( struct AVCodecContext * ctx , AVBufferRef * hw_device_ctx , int width , int height , AVPixelFormat hw_format )
AVBufferRef * hw_create_frames ( struct AVCodecContext * codec_ctx , AVBufferRef * hw_device_ctx , int width , int height , AVPixelFormat hw_format )
{
AVHWDeviceContext * device_ctx = ( AVHWDeviceContext * ) hw_device_ctx - > data ;
AVBufferRef * child_ctx = hw_device_ctx ;
// In QSV case we first allocate child D3D11/VAAPI frames (except DXVA2 as no OpenCL interop), then derive to parent QSV frames
if ( AV_HWDEVICE_TYPE_QSV = = device_ctx - > type ) {
AVBufferRef * ctx = ( AVBufferRef * ) device_ctx - > user_opaque ; // child context stored during creation of derived context
if ( ctx & & AV_HWDEVICE_TYPE_DXVA2 ! = ( ( AVHWDeviceContext * ) ctx - > data ) - > type ) {
child_ctx = ctx ;
}
}
AVBufferRef * hw_frames_ref = nullptr ;
if ( ctx )
if ( codec_c tx )
{
int res = avcodec_get_hw_frames_parameters ( ctx , hw_device_ctx , hw_format , & hw_frames_ref ) ;
int res = avcodec_get_hw_frames_parameters ( codec_c tx , child _ctx, hw_format , & hw_frames_ref ) ;
if ( res < 0 )
{
CV_LOG_DEBUG ( NULL , " FFMPEG: avcodec_get_hw_frames_parameters() call failed: " < < res )
@ -335,7 +622,7 @@ AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device
}
if ( ! hw_frames_ref )
{
hw_frames_ref = av_hwframe_ctx_alloc ( hw_device _ctx) ;
hw_frames_ref = av_hwframe_ctx_alloc ( child _ctx) ;
}
if ( ! hw_frames_ref )
{
@ -345,12 +632,41 @@ AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device
AVHWFramesContext * frames_ctx = ( AVHWFramesContext * ) ( hw_frames_ref - > data ) ;
frames_ctx - > width = width ;
frames_ctx - > height = height ;
if ( frames_ctx - > format = = AV_PIX_FMT_NONE )
frames_ctx - > format = hw_format ;
if ( frames_ctx - > format = = AV_PIX_FMT_NONE ) {
if ( child_ctx = = hw_device_ctx ) {
frames_ctx - > format = hw_format ;
}
else {
AVHWFramesConstraints * constraints = av_hwdevice_get_hwframe_constraints ( child_ctx , NULL ) ;
if ( constraints ) {
frames_ctx - > format = constraints - > valid_hw_formats [ 0 ] ;
av_hwframe_constraints_free ( & constraints ) ;
}
}
}
if ( frames_ctx - > sw_format = = AV_PIX_FMT_NONE )
frames_ctx - > sw_format = HW_DEFAULT_SW_FORMAT ;
if ( frames_ctx - > initial_pool_size = = 0 )
frames_ctx - > initial_pool_size = HW_DEFAULT_POOL_SIZE ;
# ifdef HAVE_D3D11
if ( frames_ctx - > device_ctx & & AV_HWDEVICE_TYPE_D3D11VA = = frames_ctx - > device_ctx - > type ) {
// BindFlags
AVD3D11VAFramesContext * frames_hwctx = ( AVD3D11VAFramesContext * ) frames_ctx - > hwctx ;
frames_hwctx - > BindFlags | = D3D11_BIND_DECODER | D3D11_BIND_VIDEO_ENCODER ;
// See function hw_get_d3d11_single_texture(), it allocates additional ID3D11Texture2D texture and
// attaches it as 'user_opaque' field. We have to set free() callback before av_hwframe_ctx_init() call.
struct D3D11SingleTexture {
static void free ( struct AVHWFramesContext * ctx ) {
ID3D11Texture2D * singleTexture = ( ID3D11Texture2D * ) ctx - > user_opaque ;
if ( ctx - > user_opaque )
singleTexture - > Release ( ) ;
}
} ;
frames_ctx - > free = D3D11SingleTexture : : free ;
}
# endif
int res = av_hwframe_ctx_init ( hw_frames_ref ) ;
if ( res < 0 )
{
@ -358,7 +674,25 @@ AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device
av_buffer_unref ( & hw_frames_ref ) ;
return NULL ;
}
return hw_frames_ref ;
if ( child_ctx ! = hw_device_ctx ) {
AVBufferRef * derived_frame_ctx = NULL ;
int flags = AV_HWFRAME_MAP_READ | AV_HWFRAME_MAP_WRITE ;
res = av_hwframe_ctx_create_derived ( & derived_frame_ctx , hw_format , hw_device_ctx , hw_frames_ref , flags ) ;
av_buffer_unref ( & hw_frames_ref ) ;
if ( res < 0 )
{
CV_LOG_INFO ( NULL , " FFMPEG: Failed to create derived HW frame context (av_hwframe_ctx_create_derived): " < < res ) ;
return NULL ;
}
else {
( ( AVHWFramesContext * ) derived_frame_ctx - > data ) - > user_opaque = frames_ctx ;
return derived_frame_ctx ;
}
}
else {
return hw_frames_ref ;
}
}
static
@ -455,6 +789,110 @@ AVPixelFormat hw_get_format_callback(struct AVCodecContext *ctx, const enum AVPi
return fmt [ 0 ] ;
}
// GPU color conversion NV12->BGRA via OpenCL extensions
static bool
hw_copy_frame_to_umat ( AVBufferRef * ctx , AVFrame * hw_frame , cv : : OutputArray output ) {
CV_UNUSED ( hw_frame ) ;
CV_UNUSED ( output ) ;
if ( ! ctx )
return false ;
# ifdef HAVE_OPENCL
try {
// check that current OpenCL context initilized with binding to same VAAPI/D3D11 context
AVHWDeviceContext * hw_device_ctx = ( AVHWDeviceContext * ) ctx - > data ;
AVHWDeviceType child_type = hw_check_opencl_context ( hw_device_ctx ) ;
if ( child_type = = AV_HWDEVICE_TYPE_NONE )
return false ;
# ifdef HAVE_VA_INTEL
if ( child_type = = AV_HWDEVICE_TYPE_VAAPI ) {
VADisplay va_display = hw_get_va_display ( hw_device_ctx ) ;
VASurfaceID va_surface = hw_get_va_surface ( hw_frame ) ;
if ( va_display & & va_surface ! = VA_INVALID_SURFACE ) {
va_intel : : convertFromVASurface ( va_display , va_surface , { hw_frame - > width , hw_frame - > height } , output ) ;
return true ;
}
}
# endif
# ifdef HAVE_D3D11
if ( child_type = = AV_HWDEVICE_TYPE_D3D11VA ) {
AVD3D11VADeviceContext * d3d11_device_ctx = hw_get_d3d11_device_ctx ( hw_device_ctx ) ;
int subresource = 0 ;
ID3D11Texture2D * texture = hw_get_d3d11_texture ( hw_frame , & subresource ) ;
ID3D11Texture2D * singleTexture = hw_get_d3d11_single_texture ( hw_frame , d3d11_device_ctx , texture ) ;
if ( texture & & singleTexture ) {
// Copy D3D11 sub-texture to D3D11 single texture
d3d11_device_ctx - > device_context - > CopySubresourceRegion ( singleTexture , 0 , 0 , 0 , 0 , texture , subresource , NULL ) ;
// Copy D3D11 single texture to cv::UMat
directx : : convertFromD3D11Texture2D ( singleTexture , output ) ;
return true ;
}
}
# endif
}
catch ( . . . )
{
return false ;
}
# endif // HAVE_OPENCL
return false ;
}
// GPU color conversion BGRA->NV12 via OpenCL extensions
static bool
hw_copy_umat_to_frame ( AVBufferRef * ctx , cv : : InputArray input , AVFrame * hw_frame ) {
CV_UNUSED ( input ) ;
CV_UNUSED ( hw_frame ) ;
if ( ! ctx )
return false ;
# ifdef HAVE_OPENCL
try {
// check that current OpenCL context initilized with binding to same VAAPI/D3D11 context
AVHWDeviceContext * hw_device_ctx = ( AVHWDeviceContext * ) ctx - > data ;
AVHWDeviceType child_type = hw_check_opencl_context ( hw_device_ctx ) ;
if ( child_type = = AV_HWDEVICE_TYPE_NONE )
return false ;
# ifdef HAVE_VA_INTEL
if ( child_type = = AV_HWDEVICE_TYPE_VAAPI ) {
VADisplay va_display = hw_get_va_display ( hw_device_ctx ) ;
VASurfaceID va_surface = hw_get_va_surface ( hw_frame ) ;
if ( va_display ! = NULL & & va_surface ! = VA_INVALID_SURFACE ) {
va_intel : : convertToVASurface ( va_display , input , va_surface , { hw_frame - > width , hw_frame - > height } ) ;
return true ;
}
}
# endif
# ifdef HAVE_D3D11
if ( child_type = = AV_HWDEVICE_TYPE_D3D11VA ) {
AVD3D11VADeviceContext * d3d11_device_ctx = hw_get_d3d11_device_ctx ( hw_device_ctx ) ;
int subresource = 0 ;
ID3D11Texture2D * texture = hw_get_d3d11_texture ( hw_frame , & subresource ) ;
ID3D11Texture2D * singleTexture = hw_get_d3d11_single_texture ( hw_frame , d3d11_device_ctx , texture ) ;
if ( texture & & singleTexture ) {
// Copy cv::UMat to D3D11 single texture
directx : : convertToD3D11Texture2D ( input , singleTexture ) ;
// Copy D3D11 single texture to D3D11 sub-texture
d3d11_device_ctx - > device_context - > CopySubresourceRegion ( texture , subresource , 0 , 0 , 0 , singleTexture , 0 , NULL ) ;
return true ;
}
}
# endif
}
catch ( . . . )
{
return false ;
}
# endif // HAVE_OPENCL
return false ;
}
static
VideoAccelerationType hw_type_to_va_type ( AVHWDeviceType hw_type ) {
struct HWTypeFFMPEG {