@ -27,6 +27,8 @@
# include <mfx/mfxvideo.h>
# include "libavutil/common.h"
# include "libavutil/hwcontext.h"
# include "libavutil/hwcontext_qsv.h"
# include "libavutil/mem.h"
# include "libavutil/log.h"
# include "libavutil/pixfmt.h"
@ -49,88 +51,129 @@ int ff_qsv_map_pixfmt(enum AVPixelFormat format)
}
}
static int qsv_init_session ( AVCodecContext * avctx , QSVContext * q , mfxSession session )
static int qsv_init_session ( AVCodecContext * avctx , QSVContext * q , mfxSession session ,
AVBufferRef * hw_frames_ref )
{
if ( ! session ) {
if ( ! q - > internal_qs . session ) {
int ret = ff_qsv_init_internal_session ( avctx , & q - > internal_qs ,
q - > load_plugins ) ;
int ret ;
if ( session ) {
q - > session = session ;
} else if ( hw_frames_ref ) {
if ( q - > internal_session ) {
MFXClose ( q - > internal_session ) ;
q - > internal_session = NULL ;
}
av_buffer_unref ( & q - > frames_ctx . hw_frames_ctx ) ;
q - > frames_ctx . hw_frames_ctx = av_buffer_ref ( hw_frames_ref ) ;
if ( ! q - > frames_ctx . hw_frames_ctx )
return AVERROR ( ENOMEM ) ;
ret = ff_qsv_init_session_hwcontext ( avctx , & q - > internal_session ,
& q - > frames_ctx , q - > load_plugins ,
q - > iopattern = = MFX_IOPATTERN_OUT_OPAQUE_MEMORY ) ;
if ( ret < 0 ) {
av_buffer_unref ( & q - > frames_ctx . hw_frames_ctx ) ;
return ret ;
}
q - > session = q - > internal_session ;
} else {
if ( ! q - > internal_session ) {
ret = ff_qsv_init_internal_session ( avctx , & q - > internal_session ,
q - > load_plugins ) ;
if ( ret < 0 )
return ret ;
}
q - > session = q - > internal_qs . session ;
} else {
q - > session = session ;
q - > session = q - > internal_session ;
}
return 0 ;
/* make sure the decoder is uninitialized */
MFXVideoDECODE_Close ( q - > session ) ;
return 0 ;
}
static int qsv_decode_init ( AVCodecContext * avctx , QSVContext * q , AVPacket * avpkt )
static int qsv_decode_init ( AVCodecContext * avctx , QSVContext * q )
{
mfxSession session = NULL ;
int iopattern = 0 ;
mfxVideoParam param = { { 0 } } ;
mfxBitstream bs = { { { 0 } } } ;
int ret ;
enum AVPixelFormat pix_fmts [ 3 ] = { AV_PIX_FMT_QSV ,
AV_PIX_FMT_NV12 ,
AV_PIX_FMT_NONE } ;
ret = ff_get_format ( avctx , pix_fmts ) ;
if ( ret < 0 )
return ret ;
avctx - > pix_fmt = ret ;
if ( ! q - > async_fifo ) {
q - > async_fifo = av_fifo_alloc ( ( 1 + q - > async_depth ) *
( sizeof ( mfxSyncPoint * ) + sizeof ( QSVFrame * ) ) ) ;
if ( ! q - > async_fifo )
return AVERROR ( ENOMEM ) ;
}
q - > iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY ;
if ( avctx - > hwaccel_context ) {
AVQSVContext * qsv = avctx - > hwaccel_context ;
AVQSVContext * user_ctx = avctx - > hwaccel_context ;
session = user_ctx - > session ;
iopattern = user_ctx - > iopattern ;
q - > ext_buffers = user_ctx - > ext_buffers ;
q - > nb_ext_buffers = user_ctx - > nb_ext_buffers ;
}
session = qsv - > session ;
q - > iopattern = qsv - > iopattern ;
q - > ext_buffers = qsv - > ext_buffers ;
q - > nb_ext_buffers = qsv - > nb_ext_buffers ;
if ( avctx - > hw_frames_ctx ) {
AVHWFramesContext * frames_ctx = ( AVHWFramesContext * ) avctx - > hw_frames_ctx - > data ;
AVQSVFramesContext * frames_hwctx = frames_ctx - > hwctx ;
if ( ! iopattern ) {
if ( frames_hwctx - > frame_type & MFX_MEMTYPE_OPAQUE_FRAME )
iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY ;
else if ( frames_hwctx - > frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET )
iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY ;
}
}
ret = qsv_init_session ( avctx , q , session ) ;
if ( ! iopattern )
iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY ;
q - > iopattern = iopattern ;
ret = qsv_init_session ( avctx , q , session , avctx - > hw_frames_ctx ) ;
if ( ret < 0 ) {
av_log ( avctx , AV_LOG_ERROR , " Error initializing an MFX session \n " ) ;
return ret ;
}
if ( avpkt - > size ) {
bs . Data = avpkt - > data ;
bs . DataLength = avpkt - > size ;
bs . MaxLength = bs . DataLength ;
bs . TimeStamp = avpkt - > pts ;
} else
return AVERROR_INVALIDDATA ;
ret = ff_qsv_codec_id_to_mfx ( avctx - > codec_id ) ;
if ( ret < 0 ) {
av_log ( avctx , AV_LOG_ERROR , " Unsupported codec_id %08x \n " , avctx - > codec_id ) ;
if ( ret < 0 )
return ret ;
}
param . mfx . CodecId = ret ;
param . mfx . CodecId = ret ;
param . mfx . CodecProfile = ff_qsv_profile_to_mfx ( avctx - > codec_id , avctx - > profile ) ;
param . mfx . CodecLevel = avctx - > level = = FF_LEVEL_UNKNOWN ? MFX_LEVEL_UNKNOWN : avctx - > level ;
ret = MFXVideoDECODE_DecodeHeader ( q - > session , & bs , & param ) ;
if ( MFX_ERR_MORE_DATA = = ret ) {
/* this code means that header not found so we return packet size to skip
a current packet
*/
return avpkt - > size ;
} else if ( ret < 0 ) {
av_log ( avctx , AV_LOG_ERROR , " Decode header error %d \n " , ret ) ;
return ff_qsv_error ( ret ) ;
param . mfx . FrameInfo . BitDepthLuma = 8 ;
param . mfx . FrameInfo . BitDepthChroma = 8 ;
param . mfx . FrameInfo . Shift = 0 ;
param . mfx . FrameInfo . FourCC = MFX_FOURCC_NV12 ;
param . mfx . FrameInfo . Width = avctx - > coded_width ;
param . mfx . FrameInfo . Height = avctx - > coded_height ;
param . mfx . FrameInfo . ChromaFormat = MFX_CHROMAFORMAT_YUV420 ;
switch ( avctx - > field_order ) {
case AV_FIELD_PROGRESSIVE :
param . mfx . FrameInfo . PicStruct = MFX_PICSTRUCT_PROGRESSIVE ;
break ;
case AV_FIELD_TT :
param . mfx . FrameInfo . PicStruct = MFX_PICSTRUCT_FIELD_TFF ;
break ;
case AV_FIELD_BB :
param . mfx . FrameInfo . PicStruct = MFX_PICSTRUCT_FIELD_BFF ;
break ;
default :
param . mfx . FrameInfo . PicStruct = MFX_PICSTRUCT_UNKNOWN ;
break ;
}
param . IOPattern = q - > iopattern ;
param . AsyncDepth = q - > async_depth ;
param . ExtParam = q - > ext_buffers ;
param . NumExtParam = q - > nb_ext_buffers ;
param . mfx . FrameInfo . BitDepthLuma = 8 ;
param . mfx . FrameInfo . BitDepthChroma = 8 ;
ret = MFXVideoDECODE_Init ( q - > session , & param ) ;
if ( ret < 0 ) {
@ -144,37 +187,6 @@ static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt
return ff_qsv_error ( ret ) ;
}
avctx - > profile = param . mfx . CodecProfile ;
avctx - > level = param . mfx . CodecLevel ;
avctx - > coded_width = param . mfx . FrameInfo . Width ;
avctx - > coded_height = param . mfx . FrameInfo . Height ;
avctx - > width = param . mfx . FrameInfo . CropW - param . mfx . FrameInfo . CropX ;
avctx - > height = param . mfx . FrameInfo . CropH - param . mfx . FrameInfo . CropY ;
/* maximum decoder latency should be not exceed max DPB size for h.264 and
HEVC which is 16 for both cases .
So weare pre - allocating fifo big enough for 17 elements :
*/
if ( ! q - > async_fifo ) {
q - > async_fifo = av_fifo_alloc ( ( 1 + 16 ) *
( sizeof ( mfxSyncPoint ) + sizeof ( QSVFrame * ) ) ) ;
if ( ! q - > async_fifo )
return AVERROR ( ENOMEM ) ;
}
if ( ! q - > input_fifo ) {
q - > input_fifo = av_fifo_alloc ( 1024 * 16 ) ;
if ( ! q - > input_fifo )
return AVERROR ( ENOMEM ) ;
}
if ( ! q - > pkt_fifo ) {
q - > pkt_fifo = av_fifo_alloc ( sizeof ( AVPacket ) * ( 1 + 16 ) ) ;
if ( ! q - > pkt_fifo )
return AVERROR ( ENOMEM ) ;
}
q - > engine_ready = 1 ;
return 0 ;
}
@ -270,161 +282,77 @@ static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
return NULL ;
}
/* This function uses for 'smart' releasing of consumed data
from the input bitstream fifo .
Since the input fifo mapped to mfxBitstream which does not understand
a wrapping of data over fifo end , we should also to relocate a possible
data rest to fifo begin . If rest of data is absent then we just reset fifo ' s
pointers to initial positions .
NOTE the case when fifo does contain unconsumed data is rare and typical
amount of such data is 1. .4 bytes .
*/
static void qsv_fifo_relocate ( AVFifoBuffer * f , int bytes_to_free )
{
int data_size ;
int data_rest = 0 ;
av_fifo_drain ( f , bytes_to_free ) ;
data_size = av_fifo_size ( f ) ;
if ( data_size > 0 ) {
if ( f - > buffer ! = f - > rptr ) {
if ( ( f - > end - f - > rptr ) < data_size ) {
data_rest = data_size - ( f - > end - f - > rptr ) ;
data_size - = data_rest ;
memmove ( f - > buffer + data_size , f - > buffer , data_rest ) ;
}
memmove ( f - > buffer , f - > rptr , data_size ) ;
data_size + = data_rest ;
}
}
f - > rptr = f - > buffer ;
f - > wptr = f - > buffer + data_size ;
f - > wndx = data_size ;
f - > rndx = 0 ;
}
static void close_decoder ( QSVContext * q )
{
QSVFrame * cur ;
if ( q - > session )
MFXVideoDECODE_Close ( q - > session ) ;
cur = q - > work_frames ;
while ( cur ) {
q - > work_frames = cur - > next ;
av_frame_free ( & cur - > frame ) ;
av_freep ( & cur ) ;
cur = q - > work_frames ;
}
q - > engine_ready = 0 ;
q - > reinit_pending = 0 ;
}
static int do_qsv_decode ( AVCodecContext * avctx , QSVContext * q ,
AVFrame * frame , int * got_frame ,
AVPacket * avpkt )
static int qsv_decode ( AVCodecContext * avctx , QSVContext * q ,
AVFrame * frame , int * got_frame ,
AVPacket * avpkt )
{
QSVFrame * out_frame ;
mfxFrameSurface1 * insurf ;
mfxFrameSurface1 * outsurf ;
mfxSyncPoint sync ;
mfxSyncPoint * sync ;
mfxBitstream bs = { { { 0 } } } ;
int ret ;
int n_out_frames ;
int buffered = 0 ;
int flush = ! avpkt - > size | | q - > reinit_pending ;
if ( ! q - > engine_ready ) {
ret = qsv_decode_init ( avctx , q , avpkt ) ;
if ( ret )
return ret ;
}
if ( ! flush ) {
if ( av_fifo_size ( q - > input_fifo ) ) {
/* we have got rest of previous packet into buffer */
if ( av_fifo_space ( q - > input_fifo ) < avpkt - > size ) {
ret = av_fifo_grow ( q - > input_fifo , avpkt - > size ) ;
if ( ret < 0 )
return ret ;
}
av_fifo_generic_write ( q - > input_fifo , avpkt - > data , avpkt - > size , NULL ) ;
bs . Data = q - > input_fifo - > rptr ;
bs . DataLength = av_fifo_size ( q - > input_fifo ) ;
buffered = 1 ;
} else {
bs . Data = avpkt - > data ;
bs . DataLength = avpkt - > size ;
}
if ( avpkt - > size ) {
bs . Data = avpkt - > data ;
bs . DataLength = avpkt - > size ;
bs . MaxLength = bs . DataLength ;
bs . TimeStamp = avpkt - > pts ;
}
while ( 1 ) {
sync = av_mallocz ( sizeof ( * sync ) ) ;
if ( ! sync ) {
av_freep ( & sync ) ;
return AVERROR ( ENOMEM ) ;
}
do {
ret = get_surface ( avctx , q , & insurf ) ;
if ( ret < 0 )
return ret ;
do {
ret = MFXVideoDECODE_DecodeFrameAsync ( q - > session , flush ? NULL : & bs ,
insurf , & outsurf , & sync ) ;
if ( ret ! = MFX_WRN_DEVICE_BUSY )
break ;
av_usleep ( 500 ) ;
} while ( 1 ) ;
if ( MFX_WRN_VIDEO_PARAM_CHANGED = = ret ) {
/* TODO: handle here minor sequence header changing */
} else if ( MFX_ERR_INCOMPATIBLE_VIDEO_PARAM = = ret ) {
av_fifo_reset ( q - > input_fifo ) ;
flush = q - > reinit_pending = 1 ;
continue ;
}
if ( sync ) {
QSVFrame * out_frame = find_frame ( q , outsurf ) ;
if ( ! out_frame ) {
av_log ( avctx , AV_LOG_ERROR ,
" The returned surface does not correspond to any frame \n " ) ;
return AVERROR_BUG ;
}
ret = MFXVideoDECODE_DecodeFrameAsync ( q - > session , avpkt - > size ? & bs : NULL ,
insurf , & outsurf , sync ) ;
if ( ret = = MFX_WRN_DEVICE_BUSY )
av_usleep ( 500 ) ;
out_frame - > queued = 1 ;
av_fifo_generic_write ( q - > async_fifo , & out_frame , sizeof ( out_frame ) , NULL ) ;
av_fifo_generic_write ( q - > async_fifo , & sync , sizeof ( sync ) , NULL ) ;
} while ( ret = = MFX_WRN_DEVICE_BUSY | | ret = = MFX_ERR_MORE_SURFACE ) ;
continue ;
}
if ( MFX_ERR_MORE_SURFACE ! = ret & & ret < 0 )
break ;
if ( ret ! = MFX_ERR_NONE & &
ret ! = MFX_ERR_MORE_DATA & &
ret ! = MFX_WRN_VIDEO_PARAM_CHANGED & &
ret ! = MFX_ERR_MORE_SURFACE ) {
av_log ( avctx , AV_LOG_ERROR , " Error during QSV decoding. \n " ) ;
av_freep ( & sync ) ;
return ff_qsv_error ( ret ) ;
}
/* make sure we do not enter an infinite loop if the SDK
* did not consume any data and did not return anything */
if ( ! sync & & ! bs . DataOffset & & ! flush ) {
if ( ! * sync & & ! bs . DataOffset ) {
av_log ( avctx , AV_LOG_WARNING , " A decode call did not consume any data \n " ) ;
bs . DataOffset = avpkt - > size ;
}
if ( buffered ) {
qsv_fifo_relocate ( q - > input_fifo , bs . DataOffset ) ;
} else if ( bs . DataOffset ! = avpkt - > size ) {
/* some data of packet was not consumed. store it to local buffer */
av_fifo_generic_write ( q - > input_fifo , avpkt - > data + bs . DataOffset ,
avpkt - > size - bs . DataOffset , NULL ) ;
}
if ( * sync ) {
QSVFrame * out_frame = find_frame ( q , outsurf ) ;
if ( MFX_ERR_MORE_DATA ! = ret & & ret < 0 ) {
av_log ( avctx , AV_LOG_ERROR , " Error %d during QSV decoding. \n " , ret ) ;
return ff_qsv_error ( ret ) ;
if ( ! out_frame ) {
av_log ( avctx , AV_LOG_ERROR ,
" The returned surface does not correspond to any frame \n " ) ;
av_freep ( & sync ) ;
return AVERROR_BUG ;
}
out_frame - > queued = 1 ;
av_fifo_generic_write ( q - > async_fifo , & out_frame , sizeof ( out_frame ) , NULL ) ;
av_fifo_generic_write ( q - > async_fifo , & sync , sizeof ( sync ) , NULL ) ;
} else {
av_freep ( & sync ) ;
}
n_out_frames = av_fifo_size ( q - > async_fifo ) / ( sizeof ( out_frame ) + sizeof ( sync ) ) ;
if ( n_out_frames > q - > async_depth | | ( flush & & n_out_frames ) ) {
if ( ! av_fifo_space ( q - > async_fifo ) | |
( ! avpkt - > size & & av_fifo_size ( q - > async_fifo ) ) ) {
AVFrame * src_frame ;
av_fifo_generic_read ( q - > async_fifo , & out_frame , sizeof ( out_frame ) , NULL ) ;
@ -432,9 +360,11 @@ static int do_qsv_decode(AVCodecContext *avctx, QSVContext *q,
out_frame - > queued = 0 ;
do {
ret = MFXVideoCORE_SyncOperation ( q - > session , sync , 1000 ) ;
ret = MFXVideoCORE_SyncOperation ( q - > session , * sync , 1000 ) ;
} while ( ret = = MFX_WRN_IN_EXECUTION ) ;
av_freep ( & sync ) ;
src_frame = out_frame - > frame ;
ret = av_frame_ref ( frame , src_frame ) ;
@ -462,149 +392,125 @@ FF_ENABLE_DEPRECATION_WARNINGS
* got_frame = 1 ;
}
return avpkt - > size ;
return bs . DataOffset ;
}
/*
This function inserts a packet at fifo front .
*/
static void qsv_packet_push_front ( QSVContext * q , AVPacket * avpkt )
{
int fifo_size = av_fifo_size ( q - > pkt_fifo ) ;
if ( ! fifo_size ) {
/* easy case fifo is empty */
av_fifo_generic_write ( q - > pkt_fifo , avpkt , sizeof ( * avpkt ) , NULL ) ;
} else {
/* realloc necessary */
AVPacket pkt ;
AVFifoBuffer * fifo = av_fifo_alloc ( fifo_size + av_fifo_space ( q - > pkt_fifo ) ) ;
av_fifo_generic_write ( fifo , avpkt , sizeof ( * avpkt ) , NULL ) ;
while ( av_fifo_size ( q - > pkt_fifo ) ) {
av_fifo_generic_read ( q - > pkt_fifo , & pkt , sizeof ( pkt ) , NULL ) ;
av_fifo_generic_write ( fifo , & pkt , sizeof ( pkt ) , NULL ) ;
}
av_fifo_free ( q - > pkt_fifo ) ;
q - > pkt_fifo = fifo ;
}
}
int ff_qsv_decode ( AVCodecContext * avctx , QSVContext * q ,
AVFrame * frame , int * got_frame ,
AVPacket * avpkt )
int ff_qsv_decode_close ( QSVContext * q )
{
AVPacket pkt_ref = { 0 } ;
int ret = 0 ;
QSVFrame * cur = q - > work_frames ;
if ( q - > pkt_fifo & & av_fifo_size ( q - > pkt_fifo ) > = sizeof ( AVPacket ) ) {
/* we already have got some buffered packets. so add new to tail */
ret = av_packet_ref ( & pkt_ref , avpkt ) ;
if ( ret < 0 )
return ret ;
av_fifo_generic_write ( q - > pkt_fifo , & pkt_ref , sizeof ( pkt_ref ) , NULL ) ;
}
if ( q - > reinit_pending ) {
ret = do_qsv_decode ( avctx , q , frame , got_frame , avpkt ) ;
if ( q - > session )
MFXVideoDECODE_Close ( q - > session ) ;
if ( ! * got_frame ) {
/* Flushing complete, no more frames */
close_decoder ( q ) ;
//return ff_qsv_decode(avctx, q, frame, got_frame, avpkt);
}
}
if ( ! q - > reinit_pending ) {
if ( q - > pkt_fifo & & av_fifo_size ( q - > pkt_fifo ) > = sizeof ( AVPacket ) ) {
/* process buffered packets */
while ( ! * got_frame & & av_fifo_size ( q - > pkt_fifo ) > = sizeof ( AVPacket ) ) {
av_fifo_generic_read ( q - > pkt_fifo , & pkt_ref , sizeof ( pkt_ref ) , NULL ) ;
ret = do_qsv_decode ( avctx , q , frame , got_frame , & pkt_ref ) ;
if ( q - > reinit_pending ) {
/*
A rare case : new reinit pending when buffering existing .
We should to return the pkt_ref back to same place of fifo
*/
qsv_packet_push_front ( q , & pkt_ref ) ;
} else {
av_packet_unref ( & pkt_ref ) ;
}
}
} else {
/* general decoding */
ret = do_qsv_decode ( avctx , q , frame , got_frame , avpkt ) ;
if ( q - > reinit_pending ) {
ret = av_packet_ref ( & pkt_ref , avpkt ) ;
if ( ret < 0 )
return ret ;
av_fifo_generic_write ( q - > pkt_fifo , & pkt_ref , sizeof ( pkt_ref ) , NULL ) ;
}
}
}
while ( q - > async_fifo & & av_fifo_size ( q - > async_fifo ) ) {
QSVFrame * out_frame ;
mfxSyncPoint * sync ;
return ret ;
}
/*
This function resets decoder and corresponded buffers before seek operation
*/
void ff_qsv_decode_reset ( AVCodecContext * avctx , QSVContext * q )
{
QSVFrame * cur ;
AVPacket pkt ;
int ret = 0 ;
mfxVideoParam param = { { 0 } } ;
if ( q - > reinit_pending ) {
close_decoder ( q ) ;
} else if ( q - > engine_ready ) {
ret = MFXVideoDECODE_GetVideoParam ( q - > session , & param ) ;
if ( ret < 0 ) {
av_log ( avctx , AV_LOG_ERROR , " MFX decode get param error %d \n " , ret ) ;
}
av_fifo_generic_read ( q - > async_fifo , & out_frame , sizeof ( out_frame ) , NULL ) ;
av_fifo_generic_read ( q - > async_fifo , & sync , sizeof ( sync ) , NULL ) ;
ret = MFXVideoDECODE_Reset ( q - > session , & param ) ;
if ( ret < 0 ) {
av_log ( avctx , AV_LOG_ERROR , " MFX decode reset error %d \n " , ret ) ;
}
av_freep ( & sync ) ;
}
/* Free all frames*/
while ( cur ) {
q - > work_frames = cur - > next ;
av_frame_free ( & cur - > frame ) ;
av_freep ( & cur ) ;
cur = q - > work_frames ;
while ( cur ) {
q - > work_frames = cur - > next ;
av_frame_free ( & cur - > frame ) ;
av_freep ( & cur ) ;
cur = q - > work_frames ;
}
}
/* Reset output surfaces */
if ( q - > async_fifo )
av_fifo_reset ( q - > async_fifo ) ;
av_fifo_free ( q - > async_fifo ) ;
q - > async_fifo = NULL ;
av_parser_close ( q - > parser ) ;
avcodec_free_context ( & q - > avctx_internal ) ;
/* Reset input packets fifo */
while ( q - > pkt_fifo & & av_fifo_size ( q - > pkt_fifo ) ) {
av_fifo_generic_read ( q - > pkt_fifo , & pkt , sizeof ( pkt ) , NULL ) ;
av_packet_unref ( & pkt ) ;
}
if ( q - > internal_session )
MFXClose ( q - > internal_session ) ;
/* Reset input bitstream fifo */
if ( q - > input_fifo )
av_fifo_reset ( q - > input_fifo ) ;
av_buffer_unref ( & q - > frames_ctx . hw_frames_ctx ) ;
av_freep ( & q - > frames_ctx . mids ) ;
q - > frames_ctx . nb_mids = 0 ;
return 0 ;
}
int ff_qsv_decode_close ( QSVContext * q )
int ff_qsv_process_data ( AVCodecContext * avctx , QSVContext * q ,
AVFrame * frame , int * got_frame , AVPacket * pkt )
{
close_decoder ( q ) ;
uint8_t * dummy_data ;
int dummy_size ;
int ret ;
q - > session = NULL ;
if ( ! q - > avctx_internal ) {
q - > avctx_internal = avcodec_alloc_context3 ( NULL ) ;
if ( ! q - > avctx_internal )
return AVERROR ( ENOMEM ) ;
ff_qsv_close_internal_session ( & q - > internal_qs ) ;
q - > parser = av_parser_init ( avctx - > codec_id ) ;
if ( ! q - > parser )
return AVERROR ( ENOMEM ) ;
av_fifo_free ( q - > async_fifo ) ;
q - > async_fifo = NULL ;
q - > parser - > flags | = PARSER_FLAG_COMPLETE_FRAMES ;
q - > orig_pix_fmt = AV_PIX_FMT_NONE ;
}
if ( ! pkt - > size )
return qsv_decode ( avctx , q , frame , got_frame , pkt ) ;
/* we assume the packets are already split properly and want
* just the codec parameters here */
av_parser_parse2 ( q - > parser , q - > avctx_internal ,
& dummy_data , & dummy_size ,
pkt - > data , pkt - > size , pkt - > pts , pkt - > dts ,
pkt - > pos ) ;
/* TODO: flush delayed frames on reinit */
if ( q - > parser - > format ! = q - > orig_pix_fmt | |
q - > parser - > coded_width ! = avctx - > coded_width | |
q - > parser - > coded_height ! = avctx - > coded_height ) {
enum AVPixelFormat pix_fmts [ 3 ] = { AV_PIX_FMT_QSV ,
AV_PIX_FMT_NONE ,
AV_PIX_FMT_NONE } ;
enum AVPixelFormat qsv_format ;
qsv_format = ff_qsv_map_pixfmt ( q - > parser - > format ) ;
if ( qsv_format < 0 ) {
av_log ( avctx , AV_LOG_ERROR ,
" Only 8-bit YUV420 streams are supported. \n " ) ;
ret = AVERROR ( ENOSYS ) ;
goto reinit_fail ;
}
av_fifo_free ( q - > input_fifo ) ;
q - > input_fifo = NULL ;
q - > orig_pix_fmt = q - > parser - > format ;
avctx - > pix_fmt = pix_fmts [ 1 ] = qsv_format ;
avctx - > width = q - > parser - > width ;
avctx - > height = q - > parser - > height ;
avctx - > coded_width = q - > parser - > coded_width ;
avctx - > coded_height = q - > parser - > coded_height ;
avctx - > field_order = q - > parser - > field_order ;
avctx - > level = q - > avctx_internal - > level ;
avctx - > profile = q - > avctx_internal - > profile ;
ret = ff_get_format ( avctx , pix_fmts ) ;
if ( ret < 0 )
goto reinit_fail ;
av_fifo_free ( q - > pkt_fifo ) ;
q - > pkt_fifo = NULL ;
avctx - > pix_fmt = ret ;
return 0 ;
ret = qsv_decode_init ( avctx , q ) ;
if ( ret < 0 )
goto reinit_fail ;
}
return qsv_decode ( avctx , q , frame , got_frame , pkt ) ;
reinit_fail :
q - > orig_pix_fmt = q - > parser - > format = avctx - > pix_fmt = AV_PIX_FMT_NONE ;
return ret ;
}
void ff_qsv_decode_flush ( AVCodecContext * avctx , QSVContext * q )
{
q - > orig_pix_fmt = AV_PIX_FMT_NONE ;
}