mirror of https://github.com/FFmpeg/FFmpeg.git
Based on the code by Luca Barbato <lu_zero@gentoo.org> and Yukinori Yamazoe <drocon11@gmail.com>.pull/113/head^2
parent
31d2039cb4
commit
4e08c82110
10 changed files with 845 additions and 1 deletions
@ -0,0 +1,362 @@ |
||||
/*
|
||||
* Intel MediaSDK QSV codec-independent code |
||||
* |
||||
* copyright (c) 2013 Luca Barbato |
||||
* copyright (c) 2015 Anton Khirnov <anton@khirnov.net> |
||||
* |
||||
* This file is part of Libav. |
||||
* |
||||
* Libav is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* Libav is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with Libav; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#include <string.h> |
||||
#include <sys/types.h> |
||||
|
||||
#include <mfx/mfxvideo.h> |
||||
|
||||
#include "libavutil/common.h" |
||||
#include "libavutil/mem.h" |
||||
#include "libavutil/log.h" |
||||
#include "libavutil/pixfmt.h" |
||||
#include "libavutil/time.h" |
||||
|
||||
#include "avcodec.h" |
||||
#include "internal.h" |
||||
#include "qsv_internal.h" |
||||
|
||||
int ff_qsv_error(int mfx_err) |
||||
{ |
||||
switch (mfx_err) { |
||||
case MFX_ERR_NONE: |
||||
return 0; |
||||
case MFX_ERR_MEMORY_ALLOC: |
||||
case MFX_ERR_NOT_ENOUGH_BUFFER: |
||||
return AVERROR(ENOMEM); |
||||
case MFX_ERR_INVALID_HANDLE: |
||||
return AVERROR(EINVAL); |
||||
case MFX_ERR_DEVICE_FAILED: |
||||
case MFX_ERR_DEVICE_LOST: |
||||
case MFX_ERR_LOCK_MEMORY: |
||||
return AVERROR(EIO); |
||||
case MFX_ERR_NULL_PTR: |
||||
case MFX_ERR_UNDEFINED_BEHAVIOR: |
||||
case MFX_ERR_NOT_INITIALIZED: |
||||
return AVERROR_BUG; |
||||
case MFX_ERR_UNSUPPORTED: |
||||
case MFX_ERR_NOT_FOUND: |
||||
return AVERROR(ENOSYS); |
||||
case MFX_ERR_MORE_DATA: |
||||
case MFX_ERR_MORE_SURFACE: |
||||
case MFX_ERR_MORE_BITSTREAM: |
||||
return AVERROR(EAGAIN); |
||||
case MFX_ERR_INCOMPATIBLE_VIDEO_PARAM: |
||||
case MFX_ERR_INVALID_VIDEO_PARAM: |
||||
return AVERROR(EINVAL); |
||||
case MFX_ERR_ABORTED: |
||||
case MFX_ERR_UNKNOWN: |
||||
default: |
||||
return AVERROR_UNKNOWN; |
||||
} |
||||
} |
||||
|
||||
int ff_qsv_map_pixfmt(enum AVPixelFormat format) |
||||
{ |
||||
switch (format) { |
||||
case AV_PIX_FMT_YUV420P: |
||||
case AV_PIX_FMT_YUVJ420P: |
||||
return AV_PIX_FMT_NV12; |
||||
default: |
||||
return AVERROR(ENOSYS); |
||||
} |
||||
} |
||||
|
||||
static int codec_id_to_mfx(enum AVCodecID codec_id) |
||||
{ |
||||
switch (codec_id) { |
||||
case AV_CODEC_ID_H264: |
||||
return MFX_CODEC_AVC; |
||||
case AV_CODEC_ID_MPEG1VIDEO: |
||||
case AV_CODEC_ID_MPEG2VIDEO: |
||||
return MFX_CODEC_MPEG2; |
||||
case AV_CODEC_ID_VC1: |
||||
return MFX_CODEC_VC1; |
||||
default: |
||||
break; |
||||
} |
||||
|
||||
return AVERROR(ENOSYS); |
||||
} |
||||
|
||||
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session) |
||||
{ |
||||
if (!session) { |
||||
if (!q->internal_session) { |
||||
mfxIMPL impl = MFX_IMPL_AUTO_ANY; |
||||
mfxVersion ver = { { QSV_VERSION_MINOR, QSV_VERSION_MAJOR } }; |
||||
|
||||
const char *desc; |
||||
int ret; |
||||
|
||||
ret = MFXInit(impl, &ver, &q->internal_session); |
||||
if (ret < 0) { |
||||
av_log(avctx, AV_LOG_ERROR, "Error initializing an internal MFX session\n"); |
||||
return ff_qsv_error(ret); |
||||
} |
||||
|
||||
MFXQueryIMPL(q->internal_session, &impl); |
||||
|
||||
if (impl & MFX_IMPL_SOFTWARE) |
||||
desc = "software"; |
||||
else if (impl & MFX_IMPL_HARDWARE) |
||||
desc = "hardware accelerated"; |
||||
else |
||||
desc = "unknown"; |
||||
|
||||
av_log(avctx, AV_LOG_VERBOSE, |
||||
"Initialized an internal MFX session using %s implementation\n", |
||||
desc); |
||||
} |
||||
|
||||
q->session = q->internal_session; |
||||
} else { |
||||
q->session = session; |
||||
} |
||||
|
||||
/* make sure the decoder is uninitialized */ |
||||
MFXVideoDECODE_Close(q->session); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
int ff_qsv_init(AVCodecContext *avctx, QSVContext *q, mfxSession session) |
||||
{ |
||||
mfxVideoParam param = { { 0 } }; |
||||
int ret; |
||||
|
||||
ret = qsv_init_session(avctx, q, session); |
||||
if (ret < 0) { |
||||
av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n"); |
||||
return ret; |
||||
} |
||||
|
||||
|
||||
ret = codec_id_to_mfx(avctx->codec_id); |
||||
if (ret < 0) |
||||
return ret; |
||||
|
||||
param.mfx.CodecId = ret; |
||||
param.mfx.CodecProfile = avctx->profile; |
||||
param.mfx.CodecLevel = avctx->level; |
||||
|
||||
param.mfx.FrameInfo.BitDepthLuma = 8; |
||||
param.mfx.FrameInfo.BitDepthChroma = 8; |
||||
param.mfx.FrameInfo.Shift = 0; |
||||
param.mfx.FrameInfo.FourCC = MFX_FOURCC_NV12; |
||||
param.mfx.FrameInfo.Width = avctx->coded_width; |
||||
param.mfx.FrameInfo.Height = avctx->coded_height; |
||||
param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420; |
||||
|
||||
param.IOPattern = q->iopattern; |
||||
param.AsyncDepth = q->async_depth; |
||||
param.ExtParam = q->ext_buffers; |
||||
param.NumExtParam = q->nb_ext_buffers; |
||||
|
||||
ret = MFXVideoDECODE_Init(q->session, ¶m); |
||||
if (ret < 0) { |
||||
av_log(avctx, AV_LOG_ERROR, "Error initializing the MFX video decoder\n"); |
||||
return ff_qsv_error(ret); |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static int alloc_frame(AVCodecContext *avctx, QSVFrame *frame) |
||||
{ |
||||
int ret; |
||||
|
||||
ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF); |
||||
if (ret < 0) |
||||
return ret; |
||||
|
||||
if (frame->frame->format == AV_PIX_FMT_QSV) { |
||||
frame->surface = (mfxFrameSurface1*)frame->frame->data[3]; |
||||
} else { |
||||
frame->surface_internal.Info.BitDepthLuma = 8; |
||||
frame->surface_internal.Info.BitDepthChroma = 8; |
||||
frame->surface_internal.Info.FourCC = MFX_FOURCC_NV12; |
||||
frame->surface_internal.Info.Width = avctx->coded_width; |
||||
frame->surface_internal.Info.Height = avctx->coded_height; |
||||
frame->surface_internal.Info.ChromaFormat = MFX_CHROMAFORMAT_YUV420; |
||||
|
||||
frame->surface_internal.Data.PitchLow = frame->frame->linesize[0]; |
||||
frame->surface_internal.Data.Y = frame->frame->data[0]; |
||||
frame->surface_internal.Data.UV = frame->frame->data[1]; |
||||
|
||||
frame->surface = &frame->surface_internal; |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static void qsv_clear_unused_frames(QSVContext *q) |
||||
{ |
||||
QSVFrame *cur = q->work_frames; |
||||
while (cur) { |
||||
if (cur->surface && !cur->surface->Data.Locked) { |
||||
cur->surface = NULL; |
||||
av_frame_unref(cur->frame); |
||||
} |
||||
cur = cur->next; |
||||
} |
||||
} |
||||
|
||||
static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf) |
||||
{ |
||||
QSVFrame *frame, **last; |
||||
int ret; |
||||
|
||||
qsv_clear_unused_frames(q); |
||||
|
||||
frame = q->work_frames; |
||||
last = &q->work_frames; |
||||
while (frame) { |
||||
if (!frame->surface) { |
||||
ret = alloc_frame(avctx, frame); |
||||
if (ret < 0) |
||||
return ret; |
||||
*surf = frame->surface; |
||||
return 0; |
||||
} |
||||
|
||||
last = &frame->next; |
||||
frame = frame->next; |
||||
} |
||||
|
||||
frame = av_mallocz(sizeof(*frame)); |
||||
if (!frame) |
||||
return AVERROR(ENOMEM); |
||||
frame->frame = av_frame_alloc(); |
||||
if (!frame->frame) { |
||||
av_freep(&frame); |
||||
return AVERROR(ENOMEM); |
||||
} |
||||
*last = frame; |
||||
|
||||
ret = alloc_frame(avctx, frame); |
||||
if (ret < 0) |
||||
return ret; |
||||
|
||||
*surf = frame->surface; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static AVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf) |
||||
{ |
||||
QSVFrame *cur = q->work_frames; |
||||
while (cur) { |
||||
if (surf == cur->surface) |
||||
return cur->frame; |
||||
cur = cur->next; |
||||
} |
||||
return NULL; |
||||
} |
||||
|
||||
int ff_qsv_decode(AVCodecContext *avctx, QSVContext *q, |
||||
AVFrame *frame, int *got_frame, |
||||
AVPacket *avpkt) |
||||
{ |
||||
mfxFrameSurface1 *insurf; |
||||
mfxFrameSurface1 *outsurf; |
||||
mfxSyncPoint sync; |
||||
mfxBitstream bs = { { { 0 } } }; |
||||
int ret; |
||||
|
||||
if (avpkt->size) { |
||||
bs.Data = avpkt->data; |
||||
bs.DataLength = avpkt->size; |
||||
bs.MaxLength = bs.DataLength; |
||||
bs.TimeStamp = avpkt->pts; |
||||
} |
||||
|
||||
do { |
||||
ret = get_surface(avctx, q, &insurf); |
||||
if (ret < 0) |
||||
return ret; |
||||
|
||||
ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL, |
||||
insurf, &outsurf, &sync); |
||||
if (ret == MFX_WRN_DEVICE_BUSY) |
||||
av_usleep(1); |
||||
|
||||
} while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE); |
||||
|
||||
if (ret != MFX_ERR_NONE && |
||||
ret != MFX_ERR_MORE_DATA && |
||||
ret != MFX_WRN_VIDEO_PARAM_CHANGED && |
||||
ret != MFX_ERR_MORE_SURFACE) { |
||||
av_log(avctx, AV_LOG_ERROR, "Error during QSV decoding.\n"); |
||||
return ff_qsv_error(ret); |
||||
} |
||||
|
||||
if (sync) { |
||||
AVFrame *src_frame; |
||||
|
||||
MFXVideoCORE_SyncOperation(q->session, sync, 60000); |
||||
|
||||
src_frame = find_frame(q, outsurf); |
||||
if (!src_frame) { |
||||
av_log(avctx, AV_LOG_ERROR, |
||||
"The returned surface does not correspond to any frame\n"); |
||||
return AVERROR_BUG; |
||||
} |
||||
|
||||
ret = av_frame_ref(frame, src_frame); |
||||
if (ret < 0) |
||||
return ret; |
||||
|
||||
frame->pkt_pts = frame->pts = outsurf->Data.TimeStamp; |
||||
|
||||
frame->repeat_pict = |
||||
outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 : |
||||
outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 : |
||||
outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0; |
||||
frame->top_field_first = |
||||
outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF; |
||||
frame->interlaced_frame = |
||||
!(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE); |
||||
|
||||
*got_frame = 1; |
||||
} |
||||
|
||||
return bs.DataOffset; |
||||
} |
||||
|
||||
int ff_qsv_close(QSVContext *q) |
||||
{ |
||||
QSVFrame *cur = q->work_frames; |
||||
|
||||
while (cur) { |
||||
q->work_frames = cur->next; |
||||
av_frame_free(&cur->frame); |
||||
av_freep(&cur); |
||||
cur = q->work_frames; |
||||
} |
||||
|
||||
if (q->internal_session) |
||||
MFXClose(q->internal_session); |
||||
|
||||
return 0; |
||||
} |
@ -0,0 +1,41 @@ |
||||
/*
|
||||
* Intel MediaSDK QSV public API |
||||
* |
||||
* This file is part of Libav. |
||||
* |
||||
* Libav is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* Libav is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with Libav; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#ifndef AVCODEC_QSV_H |
||||
#define AVCODEC_QSV_H |
||||
|
||||
#include <mfx/mfxvideo.h> |
||||
|
||||
typedef struct AVQSVContext { |
||||
mfxSession session; |
||||
int iopattern; |
||||
|
||||
mfxExtBuffer **ext_buffers; |
||||
int nb_ext_buffers; |
||||
} AVQSVContext; |
||||
|
||||
/**
|
||||
* Allocate a new context. |
||||
* |
||||
* It must be freed by the caller with av_free(). |
||||
*/ |
||||
AVQSVContext *av_qsv_alloc_context(void); |
||||
|
||||
#endif /* AVCODEC_QSV_H */ |
@ -0,0 +1,28 @@ |
||||
/*
|
||||
* Intel MediaSDK QSV public API functions |
||||
* |
||||
* This file is part of Libav. |
||||
* |
||||
* Libav is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* Libav is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with Libav; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#include "libavutil/mem.h" |
||||
|
||||
#include "qsv.h" |
||||
|
||||
AVQSVContext *av_qsv_alloc_context(void) |
||||
{ |
||||
return av_mallocz(sizeof(AVQSVContext)); |
||||
} |
@ -0,0 +1,313 @@ |
||||
/*
|
||||
* Intel MediaSDK QSV based H.264 decoder |
||||
* |
||||
* copyright (c) 2013 Luca Barbato |
||||
* copyright (c) 2015 Anton Khirnov |
||||
* |
||||
* This file is part of Libav. |
||||
* |
||||
* Libav is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* Libav is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with Libav; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
|
||||
#include <stdint.h> |
||||
#include <string.h> |
||||
|
||||
#include <mfx/mfxvideo.h> |
||||
|
||||
#include "libavutil/common.h" |
||||
#include "libavutil/fifo.h" |
||||
#include "libavutil/opt.h" |
||||
|
||||
#include "avcodec.h" |
||||
#include "internal.h" |
||||
#include "qsv_internal.h" |
||||
#include "qsv.h" |
||||
|
||||
typedef struct QSVH264Context { |
||||
AVClass *class; |
||||
QSVContext qsv; |
||||
|
||||
// the internal parser and codec context for parsing the data
|
||||
AVCodecParserContext *parser; |
||||
AVCodecContext *avctx_internal; |
||||
enum AVPixelFormat orig_pix_fmt; |
||||
|
||||
// the filter for converting to Annex B
|
||||
AVBitStreamFilterContext *bsf; |
||||
|
||||
AVFifoBuffer *packet_fifo; |
||||
|
||||
AVPacket input_ref; |
||||
AVPacket pkt_filtered; |
||||
uint8_t *filtered_data; |
||||
} QSVH264Context; |
||||
|
||||
static void qsv_clear_buffers(QSVH264Context *s) |
||||
{ |
||||
AVPacket pkt; |
||||
while (av_fifo_size(s->packet_fifo) >= sizeof(pkt)) { |
||||
av_fifo_generic_read(s->packet_fifo, &pkt, sizeof(pkt), NULL); |
||||
av_packet_unref(&pkt); |
||||
} |
||||
|
||||
if (s->filtered_data != s->input_ref.data) |
||||
av_freep(&s->filtered_data); |
||||
s->filtered_data = NULL; |
||||
av_packet_unref(&s->input_ref); |
||||
} |
||||
|
||||
static av_cold int qsv_decode_close(AVCodecContext *avctx) |
||||
{ |
||||
QSVH264Context *s = avctx->priv_data; |
||||
|
||||
ff_qsv_close(&s->qsv); |
||||
|
||||
qsv_clear_buffers(s); |
||||
|
||||
av_fifo_free(s->packet_fifo); |
||||
|
||||
av_bitstream_filter_close(s->bsf); |
||||
av_parser_close(s->parser); |
||||
avcodec_free_context(&s->avctx_internal); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static av_cold int qsv_decode_init(AVCodecContext *avctx) |
||||
{ |
||||
QSVH264Context *s = avctx->priv_data; |
||||
int ret; |
||||
|
||||
s->orig_pix_fmt = AV_PIX_FMT_NONE; |
||||
|
||||
s->packet_fifo = av_fifo_alloc(sizeof(AVPacket)); |
||||
if (!s->packet_fifo) { |
||||
ret = AVERROR(ENOMEM); |
||||
goto fail; |
||||
} |
||||
|
||||
s->bsf = av_bitstream_filter_init("h264_mp4toannexb"); |
||||
if (!s->bsf) { |
||||
ret = AVERROR(ENOMEM); |
||||
goto fail; |
||||
} |
||||
|
||||
s->avctx_internal = avcodec_alloc_context3(NULL); |
||||
if (!s->avctx_internal) { |
||||
ret = AVERROR(ENOMEM); |
||||
goto fail; |
||||
} |
||||
|
||||
if (avctx->extradata) { |
||||
s->avctx_internal->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); |
||||
if (!s->avctx_internal->extradata) { |
||||
ret = AVERROR(ENOMEM); |
||||
goto fail; |
||||
} |
||||
memcpy(s->avctx_internal->extradata, avctx->extradata, |
||||
avctx->extradata_size); |
||||
s->avctx_internal->extradata_size = avctx->extradata_size; |
||||
} |
||||
|
||||
s->parser = av_parser_init(AV_CODEC_ID_H264); |
||||
if (!s->parser) { |
||||
ret = AVERROR(ENOMEM); |
||||
goto fail; |
||||
} |
||||
s->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; |
||||
|
||||
s->qsv.iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY; |
||||
|
||||
return 0; |
||||
fail: |
||||
qsv_decode_close(avctx); |
||||
return ret; |
||||
} |
||||
|
||||
static int qsv_process_data(AVCodecContext *avctx, AVFrame *frame, |
||||
int *got_frame, AVPacket *pkt) |
||||
{ |
||||
QSVH264Context *s = avctx->priv_data; |
||||
uint8_t *dummy_data; |
||||
int dummy_size; |
||||
int ret; |
||||
|
||||
/* we assume the packets are already split properly and want
|
||||
* just the codec parameters here */ |
||||
av_parser_parse2(s->parser, s->avctx_internal, |
||||
&dummy_data, &dummy_size, |
||||
pkt->data, pkt->size, pkt->pts, pkt->dts, |
||||
pkt->pos); |
||||
|
||||
/* TODO: flush delayed frames on reinit */ |
||||
if (s->parser->format != s->orig_pix_fmt || |
||||
s->parser->coded_width != avctx->coded_width || |
||||
s->parser->coded_height != avctx->coded_height) { |
||||
mfxSession session = NULL; |
||||
|
||||
enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV, |
||||
AV_PIX_FMT_NONE, |
||||
AV_PIX_FMT_NONE }; |
||||
enum AVPixelFormat qsv_format; |
||||
|
||||
qsv_format = ff_qsv_map_pixfmt(s->parser->format); |
||||
if (qsv_format < 0) { |
||||
av_log(avctx, AV_LOG_ERROR, |
||||
"Only 8-bit YUV420 streams are supported.\n"); |
||||
ret = AVERROR(ENOSYS); |
||||
goto reinit_fail; |
||||
} |
||||
|
||||
s->orig_pix_fmt = s->parser->format; |
||||
avctx->pix_fmt = pix_fmts[1] = qsv_format; |
||||
avctx->width = s->parser->width; |
||||
avctx->height = s->parser->height; |
||||
avctx->coded_width = s->parser->coded_width; |
||||
avctx->coded_height = s->parser->coded_height; |
||||
avctx->level = s->avctx_internal->level; |
||||
avctx->profile = s->avctx_internal->profile; |
||||
|
||||
ret = ff_get_format(avctx, pix_fmts); |
||||
if (ret < 0) |
||||
goto reinit_fail; |
||||
|
||||
avctx->pix_fmt = ret; |
||||
|
||||
if (avctx->hwaccel_context) { |
||||
AVQSVContext *user_ctx = avctx->hwaccel_context; |
||||
session = user_ctx->session; |
||||
s->qsv.iopattern = user_ctx->iopattern; |
||||
s->qsv.ext_buffers = user_ctx->ext_buffers; |
||||
s->qsv.nb_ext_buffers = user_ctx->nb_ext_buffers; |
||||
} |
||||
|
||||
ret = ff_qsv_init(avctx, &s->qsv, session); |
||||
if (ret < 0) |
||||
goto reinit_fail; |
||||
} |
||||
|
||||
return ff_qsv_decode(avctx, &s->qsv, frame, got_frame, &s->pkt_filtered); |
||||
|
||||
reinit_fail: |
||||
s->orig_pix_fmt = s->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE; |
||||
return ret; |
||||
} |
||||
|
||||
static int qsv_decode_frame(AVCodecContext *avctx, void *data, |
||||
int *got_frame, AVPacket *avpkt) |
||||
{ |
||||
QSVH264Context *s = avctx->priv_data; |
||||
AVFrame *frame = data; |
||||
int ret; |
||||
|
||||
/* buffer the input packet */ |
||||
if (avpkt->size) { |
||||
AVPacket input_ref = { 0 }; |
||||
|
||||
if (av_fifo_space(s->packet_fifo) < sizeof(input_ref)) { |
||||
ret = av_fifo_realloc2(s->packet_fifo, |
||||
av_fifo_size(s->packet_fifo) + sizeof(input_ref)); |
||||
if (ret < 0) |
||||
return ret; |
||||
} |
||||
|
||||
ret = av_packet_ref(&input_ref, avpkt); |
||||
if (ret < 0) |
||||
return ret; |
||||
av_fifo_generic_write(s->packet_fifo, &input_ref, sizeof(input_ref), NULL); |
||||
} |
||||
|
||||
/* process buffered data */ |
||||
while (!*got_frame) { |
||||
/* prepare the input data -- convert to Annex B if needed */ |
||||
if (s->pkt_filtered.size <= 0) { |
||||
int size; |
||||
|
||||
/* no more data */ |
||||
if (av_fifo_size(s->packet_fifo) < sizeof(AVPacket)) |
||||
return avpkt->size ? avpkt->size : ff_qsv_decode(avctx, &s->qsv, frame, got_frame, avpkt); |
||||
|
||||
if (s->filtered_data != s->input_ref.data) |
||||
av_freep(&s->filtered_data); |
||||
s->filtered_data = NULL; |
||||
av_packet_unref(&s->input_ref); |
||||
|
||||
av_fifo_generic_read(s->packet_fifo, &s->input_ref, sizeof(s->input_ref), NULL); |
||||
ret = av_bitstream_filter_filter(s->bsf, avctx, NULL, |
||||
&s->filtered_data, &size, |
||||
s->input_ref.data, s->input_ref.size, 0); |
||||
if (ret < 0) { |
||||
s->filtered_data = s->input_ref.data; |
||||
size = s->input_ref.size; |
||||
} |
||||
s->pkt_filtered = s->input_ref; |
||||
s->pkt_filtered.data = s->filtered_data; |
||||
s->pkt_filtered.size = size; |
||||
} |
||||
|
||||
ret = qsv_process_data(avctx, frame, got_frame, &s->pkt_filtered); |
||||
if (ret < 0) |
||||
return ret; |
||||
|
||||
s->pkt_filtered.size -= ret; |
||||
s->pkt_filtered.data += ret; |
||||
} |
||||
|
||||
return avpkt->size; |
||||
} |
||||
|
||||
static void qsv_decode_flush(AVCodecContext *avctx) |
||||
{ |
||||
QSVH264Context *s = avctx->priv_data; |
||||
|
||||
qsv_clear_buffers(s); |
||||
s->orig_pix_fmt = AV_PIX_FMT_NONE; |
||||
} |
||||
|
||||
AVHWAccel ff_h264_qsv_hwaccel = { |
||||
.name = "h264_qsv", |
||||
.type = AVMEDIA_TYPE_VIDEO, |
||||
.id = AV_CODEC_ID_H264, |
||||
.pix_fmt = AV_PIX_FMT_QSV, |
||||
}; |
||||
|
||||
#define OFFSET(x) offsetof(QSVH264Context, x) |
||||
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM |
||||
static const AVOption options[] = { |
||||
{ "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 0, INT_MAX, VD }, |
||||
{ NULL }, |
||||
}; |
||||
|
||||
static const AVClass class = { |
||||
.class_name = "h264_qsv", |
||||
.item_name = av_default_item_name, |
||||
.option = options, |
||||
.version = LIBAVUTIL_VERSION_INT, |
||||
}; |
||||
|
||||
AVCodec ff_h264_qsv_decoder = { |
||||
.name = "h264_qsv", |
||||
.long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (Intel Quick Sync Video acceleration)"), |
||||
.priv_data_size = sizeof(QSVH264Context), |
||||
.type = AVMEDIA_TYPE_VIDEO, |
||||
.id = AV_CODEC_ID_H264, |
||||
.init = qsv_decode_init, |
||||
.decode = qsv_decode_frame, |
||||
.flush = qsv_decode_flush, |
||||
.close = qsv_decode_close, |
||||
.capabilities = CODEC_CAP_DELAY, |
||||
.priv_class = &class, |
||||
}; |
@ -0,0 +1,86 @@ |
||||
/*
|
||||
* Intel MediaSDK QSV utility functions |
||||
* |
||||
* copyright (c) 2013 Luca Barbato |
||||
* |
||||
* This file is part of Libav. |
||||
* |
||||
* Libav is free software; you can redistribute it and/or |
||||
* modify it under the terms of the GNU Lesser General Public |
||||
* License as published by the Free Software Foundation; either |
||||
* version 2.1 of the License, or (at your option) any later version. |
||||
* |
||||
* Libav is distributed in the hope that it will be useful, |
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
||||
* Lesser General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public |
||||
* License along with Libav; if not, write to the Free Software |
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#ifndef AVCODEC_QSV_INTERNAL_H |
||||
#define AVCODEC_QSV_INTERNAL_H |
||||
|
||||
#include <stdint.h> |
||||
#include <sys/types.h> |
||||
|
||||
#include <mfx/mfxvideo.h> |
||||
|
||||
#include "libavutil/frame.h" |
||||
#include "libavutil/pixfmt.h" |
||||
|
||||
#include "avcodec.h" |
||||
|
||||
#define QSV_VERSION_MAJOR 1 |
||||
#define QSV_VERSION_MINOR 1 |
||||
|
||||
#define ASYNC_DEPTH_DEFAULT 4 // internal parallelism
|
||||
|
||||
typedef struct QSVFrame { |
||||
AVFrame *frame; |
||||
mfxFrameSurface1 *surface; |
||||
|
||||
mfxFrameSurface1 surface_internal; |
||||
|
||||
struct QSVFrame *next; |
||||
} QSVFrame; |
||||
|
||||
typedef struct QSVContext { |
||||
// the session used for decoding
|
||||
mfxSession session; |
||||
|
||||
// the session we allocated internally, in case the caller did not provide
|
||||
// one
|
||||
mfxSession internal_session; |
||||
|
||||
/**
|
||||
* a linked list of frames currently being used by QSV |
||||
*/ |
||||
QSVFrame *work_frames; |
||||
|
||||
// options set by the caller
|
||||
int async_depth; |
||||
int iopattern; |
||||
|
||||
mfxExtBuffer **ext_buffers; |
||||
int nb_ext_buffers; |
||||
} QSVContext; |
||||
|
||||
/**
|
||||
* Convert a libmfx error code into a libav error code. |
||||
*/ |
||||
int ff_qsv_error(int mfx_err); |
||||
|
||||
int ff_qsv_map_pixfmt(enum AVPixelFormat format); |
||||
|
||||
int ff_qsv_init(AVCodecContext *s, QSVContext *q, mfxSession session); |
||||
|
||||
int ff_qsv_decode(AVCodecContext *s, QSVContext *q, |
||||
AVFrame *frame, int *got_frame, |
||||
AVPacket *avpkt); |
||||
|
||||
int ff_qsv_close(QSVContext *q); |
||||
|
||||
#endif /* AVCODEC_QSV_INTERNAL_H */ |
Loading…
Reference in new issue