avcodec: add new Videotoolbox hwaccel.

pull/143/merge
Sebastien Zwickert 10 years ago committed by Clément Bœsch
parent 127203ba5a
commit 11d923d414
  1. 1
      Changelog
  2. 1
      MAINTAINERS
  3. 5
      Makefile
  4. 19
      configure
  5. 3
      ffmpeg.h
  6. 8
      ffmpeg_opt.c
  7. 137
      ffmpeg_vda.c
  8. 187
      ffmpeg_videotoolbox.c
  9. 12
      libavcodec/Makefile
  10. 5
      libavcodec/allcodecs.c
  11. 3
      libavcodec/h263dec.c
  12. 4
      libavcodec/h264_slice.c
  13. 3
      libavcodec/mpeg12dec.c
  14. 2
      libavcodec/vda.c
  15. 154
      libavcodec/vda_h264.c
  16. 32
      libavcodec/vda_vt_internal.h
  17. 4
      libavcodec/version.h
  18. 690
      libavcodec/videotoolbox.c
  19. 126
      libavcodec/videotoolbox.h
  20. 4
      libavutil/pixdesc.c
  21. 2
      libavutil/pixfmt.h

@ -29,6 +29,7 @@ version <next>:
- acrossfade audio filter
- allyuv video source
- atadenoise video filter
- OS X VideoToolbox support
version 2.7:

@ -305,6 +305,7 @@ Hardware acceleration:
vaapi* Gwenole Beauchesne
vda* Sebastien Zwickert
vdpau* Carl Eugen Hoyos
videotoolbox* Sebastien Zwickert
libavdevice

@ -31,7 +31,10 @@ $(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog)-$(CONFIG_OPENCL) += cmdutils_o
OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_vda.o
ifndef CONFIG_VIDEOTOOLBOX
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_videotoolbox.o
endif
OBJS-ffmpeg-$(CONFIG_VIDEOTOOLBOX) += ffmpeg_videotoolbox.o
OBJS-ffserver += ffserver_config.o
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64

19
configure vendored

@ -155,6 +155,7 @@ Hardware accelerators:
--disable-vaapi disable VAAPI code [autodetect]
--disable-vda disable VDA code [autodetect]
--disable-vdpau disable VDPAU code [autodetect]
--enable-videotoolbox enable VideoToolbox code [autodetect]
Individual component options:
--disable-everything disable all components listed below
@ -1470,6 +1471,7 @@ HWACCEL_LIST="
vaapi
vda
vdpau
videotoolbox
xvmc
"
@ -2380,14 +2382,18 @@ d3d11va_deps="d3d11_h dxva_h ID3D11VideoDecoder"
dxva2_deps="dxva2api_h DXVA2_ConfigPictureDecode"
vaapi_deps="va_va_h"
vda_deps="VideoDecodeAcceleration_VDADecoder_h pthreads"
vda_extralibs="-framework CoreFoundation -framework VideoDecodeAcceleration -framework QuartzCore"
vda_extralibs="-framework CoreFoundation -framework VideoDecodeAcceleration -framework QuartzCore -framework CoreServices"
vdpau_deps="vdpau_vdpau_h vdpau_vdpau_x11_h"
videotoolbox_deps="VideoToolbox_VideoToolbox_h pthreads"
videotoolbox_extralibs="-framework CoreFoundation -framework VideoToolbox -framework CoreMedia -framework QuartzCore -framework CoreServices"
xvmc_deps="X11_extensions_XvMClib_h"
h263_vaapi_hwaccel_deps="vaapi"
h263_vaapi_hwaccel_select="h263_decoder"
h263_vdpau_hwaccel_deps="vdpau"
h263_vdpau_hwaccel_select="h263_decoder"
h263_videotoolbox_hwaccel_deps="videotoolbox"
h263_videotoolbox_hwaccel_select="h263_decoder"
h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
h264_d3d11va_hwaccel_deps="d3d11va"
h264_d3d11va_hwaccel_select="h264_decoder"
@ -2410,6 +2416,8 @@ h264_vdpau_decoder_deps="vdpau"
h264_vdpau_decoder_select="h264_decoder"
h264_vdpau_hwaccel_deps="vdpau"
h264_vdpau_hwaccel_select="h264_decoder"
h264_videotoolbox_hwaccel_deps="videotoolbox"
h264_videotoolbox_hwaccel_select="h264_decoder"
hevc_d3d11va_hwaccel_deps="d3d11va DXVA_PicParams_HEVC"
hevc_d3d11va_hwaccel_select="hevc_decoder"
hevc_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_HEVC"
@ -2425,6 +2433,8 @@ mpeg1_vdpau_decoder_deps="vdpau"
mpeg1_vdpau_decoder_select="mpeg1video_decoder"
mpeg1_vdpau_hwaccel_deps="vdpau"
mpeg1_vdpau_hwaccel_select="mpeg1video_decoder"
mpeg1_videotoolbox_hwaccel_deps="videotoolbox"
mpeg1_videotoolbox_hwaccel_select="mpeg1video_decoder"
mpeg1_xvmc_hwaccel_deps="xvmc"
mpeg1_xvmc_hwaccel_select="mpeg1video_decoder"
mpeg2_crystalhd_decoder_select="crystalhd"
@ -2438,6 +2448,8 @@ mpeg2_vaapi_hwaccel_deps="vaapi"
mpeg2_vaapi_hwaccel_select="mpeg2video_decoder"
mpeg2_vdpau_hwaccel_deps="vdpau"
mpeg2_vdpau_hwaccel_select="mpeg2video_decoder"
mpeg2_videotoolbox_hwaccel_deps="videotoolbox"
mpeg2_videotoolbox_hwaccel_select="mpeg2video_decoder"
mpeg2_xvmc_hwaccel_deps="xvmc"
mpeg2_xvmc_hwaccel_select="mpeg2video_decoder"
mpeg4_crystalhd_decoder_select="crystalhd"
@ -2447,6 +2459,8 @@ mpeg4_vdpau_decoder_deps="vdpau"
mpeg4_vdpau_decoder_select="mpeg4_decoder"
mpeg4_vdpau_hwaccel_deps="vdpau"
mpeg4_vdpau_hwaccel_select="mpeg4_decoder"
mpeg4_videotoolbox_hwaccel_deps="videotoolbox"
mpeg4_videotoolbox_hwaccel_select="mpeg4_decoder"
msmpeg4_crystalhd_decoder_select="crystalhd"
vc1_crystalhd_decoder_select="crystalhd"
vc1_d3d11va_hwaccel_deps="d3d11va"
@ -2902,7 +2916,7 @@ sws_max_filter_size_default=256
set_default sws_max_filter_size
# Enable hwaccels by default.
enable d3d11va dxva2 vaapi vda vdpau xvmc
enable d3d11va dxva2 vaapi vda vdpau videotoolbox xvmc
enable xlib
# build settings
@ -5108,6 +5122,7 @@ check_header valgrind/valgrind.h
check_header vdpau/vdpau.h
check_header vdpau/vdpau_x11.h
check_header VideoDecodeAcceleration/VDADecoder.h
check_header VideoToolbox/VideoToolbox.h
check_header windows.h
check_header X11/extensions/XvMClib.h
check_header asm/types.h

@ -63,6 +63,7 @@ enum HWAccelID {
HWACCEL_VDPAU,
HWACCEL_DXVA2,
HWACCEL_VDA,
HWACCEL_VIDEOTOOLBOX,
};
typedef struct HWAccel {
@ -520,6 +521,7 @@ extern int frame_bits_per_raw_sample;
extern AVIOContext *progress_avio;
extern float max_error_rate;
extern int vdpau_api_ver;
extern char *videotoolbox_pixfmt;
extern const AVIOInterruptCB int_cb;
@ -554,5 +556,6 @@ int ffmpeg_parse_options(int argc, char **argv);
int vdpau_init(AVCodecContext *s);
int dxva2_init(AVCodecContext *s);
int vda_init(AVCodecContext *s);
int videotoolbox_init(AVCodecContext *s);
#endif /* FFMPEG_H */

@ -74,7 +74,10 @@ const HWAccel hwaccels[] = {
{ "dxva2", dxva2_init, HWACCEL_DXVA2, AV_PIX_FMT_DXVA2_VLD },
#endif
#if CONFIG_VDA
{ "vda", vda_init, HWACCEL_VDA, AV_PIX_FMT_VDA },
{ "vda", videotoolbox_init, HWACCEL_VDA, AV_PIX_FMT_VDA },
#endif
#if CONFIG_VIDEOTOOLBOX
{ "videotoolbox", videotoolbox_init, HWACCEL_VIDEOTOOLBOX, AV_PIX_FMT_VIDEOTOOLBOX },
#endif
{ 0 },
};
@ -3232,6 +3235,9 @@ const OptionDef options[] = {
"select a device for HW acceleration" "devicename" },
#if HAVE_VDPAU_X11
{ "vdpau_api_ver", HAS_ARG | OPT_INT | OPT_EXPERT, { &vdpau_api_ver }, "" },
#endif
#if CONFIG_VDA || CONFIG_VIDEOTOOLBOX
{ "videotoolbox_pixfmt", HAS_ARG | OPT_STRING | OPT_EXPERT, { &videotoolbox_pixfmt}, "" },
#endif
{ "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },

@ -1,137 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/avcodec.h"
#include "libavcodec/vda.h"
#include "libavutil/imgutils.h"
#include "ffmpeg.h"
typedef struct VDAContext {
AVFrame *tmp_frame;
} VDAContext;
static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
VDAContext *vda = ist->hwaccel_ctx;
CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
CVReturn err;
uint8_t *data[4] = { 0 };
int linesize[4] = { 0 };
int planes, ret, i;
av_frame_unref(vda->tmp_frame);
switch (pixel_format) {
case kCVPixelFormatType_420YpCbCr8Planar: vda->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
case kCVPixelFormatType_422YpCbCr8: vda->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
default:
av_log(NULL, AV_LOG_ERROR,
"Unsupported pixel format: %u\n", pixel_format);
return AVERROR(ENOSYS);
}
vda->tmp_frame->width = frame->width;
vda->tmp_frame->height = frame->height;
ret = av_frame_get_buffer(vda->tmp_frame, 32);
if (ret < 0)
return ret;
err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
if (err != kCVReturnSuccess) {
av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
return AVERROR_UNKNOWN;
}
if (CVPixelBufferIsPlanar(pixbuf)) {
planes = CVPixelBufferGetPlaneCount(pixbuf);
for (i = 0; i < planes; i++) {
data[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
}
} else {
data[0] = CVPixelBufferGetBaseAddress(pixbuf);
linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
}
av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize,
(const uint8_t **)data, linesize, vda->tmp_frame->format,
frame->width, frame->height);
CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
ret = av_frame_copy_props(vda->tmp_frame, frame);
if (ret < 0)
return ret;
av_frame_unref(frame);
av_frame_move_ref(frame, vda->tmp_frame);
return 0;
}
static void vda_uninit(AVCodecContext *s)
{
InputStream *ist = s->opaque;
VDAContext *vda = ist->hwaccel_ctx;
ist->hwaccel_uninit = NULL;
ist->hwaccel_retrieve_data = NULL;
av_frame_free(&vda->tmp_frame);
av_vda_default_free(s);
av_freep(&ist->hwaccel_ctx);
}
int vda_init(AVCodecContext *s)
{
InputStream *ist = s->opaque;
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
VDAContext *vda;
int ret;
vda = av_mallocz(sizeof(*vda));
if (!vda)
return AVERROR(ENOMEM);
ist->hwaccel_ctx = vda;
ist->hwaccel_uninit = vda_uninit;
ist->hwaccel_retrieve_data = vda_retrieve_data;
vda->tmp_frame = av_frame_alloc();
if (!vda->tmp_frame) {
ret = AVERROR(ENOMEM);
goto fail;
}
ret = av_vda_default_init(s);
if (ret < 0) {
av_log(NULL, loglevel, "Error creating VDA decoder.\n");
goto fail;
}
return 0;
fail:
vda_uninit(s);
return ret;
}

@ -0,0 +1,187 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <CoreServices/CoreServices.h>
#include "config.h"
#include "libavcodec/avcodec.h"
#if CONFIG_VDA
# include "libavcodec/vda.h"
#endif
#if CONFIG_VIDEOTOOLBOX
# include "libavcodec/videotoolbox.h"
#endif
#include "libavutil/imgutils.h"
#include "ffmpeg.h"
typedef struct VTContext {
AVFrame *tmp_frame;
} VTContext;
char *videotoolbox_pixfmt;
static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
VTContext *vt = ist->hwaccel_ctx;
CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
CVReturn err;
uint8_t *data[4] = { 0 };
int linesize[4] = { 0 };
int planes, ret, i;
char codec_str[32];
av_frame_unref(vt->tmp_frame);
switch (pixel_format) {
case kCVPixelFormatType_420YpCbCr8Planar: vt->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
case kCVPixelFormatType_422YpCbCr8: vt->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
case kCVPixelFormatType_32BGRA: vt->tmp_frame->format = AV_PIX_FMT_BGRA; break;
#ifdef kCFCoreFoundationVersionNumber10_7
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
#endif
default:
av_get_codec_tag_string(codec_str, sizeof(codec_str), s->codec_tag);
av_log(NULL, AV_LOG_ERROR,
"%s: Unsupported pixel format: %s\n", codec_str, videotoolbox_pixfmt);
return AVERROR(ENOSYS);
}
vt->tmp_frame->width = frame->width;
vt->tmp_frame->height = frame->height;
ret = av_frame_get_buffer(vt->tmp_frame, 32);
if (ret < 0)
return ret;
err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
if (err != kCVReturnSuccess) {
av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
return AVERROR_UNKNOWN;
}
if (CVPixelBufferIsPlanar(pixbuf)) {
planes = CVPixelBufferGetPlaneCount(pixbuf);
for (i = 0; i < planes; i++) {
data[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
}
} else {
data[0] = CVPixelBufferGetBaseAddress(pixbuf);
linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
}
av_image_copy(vt->tmp_frame->data, vt->tmp_frame->linesize,
(const uint8_t **)data, linesize, vt->tmp_frame->format,
frame->width, frame->height);
ret = av_frame_copy_props(vt->tmp_frame, frame);
CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
if (ret < 0)
return ret;
av_frame_unref(frame);
av_frame_move_ref(frame, vt->tmp_frame);
return 0;
}
static void videotoolbox_uninit(AVCodecContext *s)
{
InputStream *ist = s->opaque;
VTContext *vt = ist->hwaccel_ctx;
ist->hwaccel_uninit = NULL;
ist->hwaccel_retrieve_data = NULL;
av_frame_free(&vt->tmp_frame);
if (ist->hwaccel_id == HWACCEL_VIDEOTOOLBOX) {
#if CONFIG_VIDEOTOOLBOX
av_videotoolbox_default_free(s);
#endif
} else {
#if CONFIG_VDA
av_vda_default_free(s);
#endif
}
av_freep(&ist->hwaccel_ctx);
}
int videotoolbox_init(AVCodecContext *s)
{
InputStream *ist = s->opaque;
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
int ret = 0;
VTContext *vt;
vt = av_mallocz(sizeof(*vt));
if (!vt)
return AVERROR(ENOMEM);
ist->hwaccel_ctx = vt;
ist->hwaccel_uninit = videotoolbox_uninit;
ist->hwaccel_retrieve_data = videotoolbox_retrieve_data;
vt->tmp_frame = av_frame_alloc();
if (!vt->tmp_frame) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (ist->hwaccel_id == HWACCEL_VIDEOTOOLBOX) {
#if CONFIG_VIDEOTOOLBOX
if (!videotoolbox_pixfmt) {
ret = av_videotoolbox_default_init(s);
} else {
AVVideotoolboxContext *vtctx = av_videotoolbox_alloc_context();
CFStringRef pixfmt_str = CFStringCreateWithCString(kCFAllocatorDefault,
videotoolbox_pixfmt,
kCFStringEncodingUTF8);
vtctx->cv_pix_fmt_type = UTGetOSTypeFromString(pixfmt_str);
ret = av_videotoolbox_default_init2(s, vtctx);
CFRelease(pixfmt_str);
}
#endif
} else {
#if CONFIG_VDA
if (!videotoolbox_pixfmt) {
ret = av_vda_default_init(s);
} else {
AVVDAContext *vdactx = av_vda_alloc_context();
CFStringRef pixfmt_str = CFStringCreateWithCString(kCFAllocatorDefault,
videotoolbox_pixfmt,
kCFStringEncodingUTF8);
vdactx->cv_pix_fmt_type = UTGetOSTypeFromString(pixfmt_str);
ret = av_vda_default_init2(s, vdactx);
CFRelease(pixfmt_str);
}
#endif
}
if (ret < 0) {
av_log(NULL, loglevel,
"Error creating %s decoder.\n", ist->hwaccel_id == HWACCEL_VIDEOTOOLBOX ? "Videotoolbox" : "VDA");
goto fail;
}
return 0;
fail:
videotoolbox_uninit(s);
return ret;
}

@ -13,6 +13,7 @@ HEADERS = avcodec.h \
vda.h \
vdpau.h \
version.h \
videotoolbox.h \
vorbis_parser.h \
xvmc.h \
@ -696,28 +697,34 @@ OBJS-$(CONFIG_VIMA_DECODER) += vima.o adpcm_data.o
OBJS-$(CONFIG_D3D11VA) += dxva2.o
OBJS-$(CONFIG_DXVA2) += dxva2.o
OBJS-$(CONFIG_VAAPI) += vaapi.o
OBJS-$(CONFIG_VDA) += vda.o
OBJS-$(CONFIG_VDA) += vda.o videotoolbox.o
OBJS-$(CONFIG_VIDEOTOOLBOX) += videotoolbox.o
OBJS-$(CONFIG_VDPAU) += vdpau.o
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o
OBJS-$(CONFIG_H263_VDPAU_HWACCEL) += vdpau_mpeg4.o
OBJS-$(CONFIG_H263_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
OBJS-$(CONFIG_H264_D3D11VA_HWACCEL) += dxva2_h264.o
OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
OBJS-$(CONFIG_H264_VDA_HWACCEL) += vda_h264.o
OBJS-$(CONFIG_H264_VDPAU_HWACCEL) += vdpau_h264.o
OBJS-$(CONFIG_H264_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
OBJS-$(CONFIG_HEVC_D3D11VA_HWACCEL) += dxva2_hevc.o
OBJS-$(CONFIG_HEVC_DXVA2_HWACCEL) += dxva2_hevc.o
OBJS-$(CONFIG_HEVC_VDPAU_HWACCEL) += vdpau_hevc.o
OBJS-$(CONFIG_MPEG1_VDPAU_HWACCEL) += vdpau_mpeg12.o
OBJS-$(CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
OBJS-$(CONFIG_MPEG1_XVMC_HWACCEL) += mpegvideo_xvmc.o
OBJS-$(CONFIG_MPEG2_D3D11VA_HWACCEL) += dxva2_mpeg2.o
OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o
OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL) += vdpau_mpeg12.o
OBJS-$(CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
OBJS-$(CONFIG_MPEG2_XVMC_HWACCEL) += mpegvideo_xvmc.o
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
OBJS-$(CONFIG_MPEG4_VDPAU_HWACCEL) += vdpau_mpeg4.o
OBJS-$(CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
OBJS-$(CONFIG_VC1_D3D11VA_HWACCEL) += dxva2_vc1.o
OBJS-$(CONFIG_VC1_DXVA2_HWACCEL) += dxva2_vc1.o
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o
@ -912,8 +919,9 @@ SKIPHEADERS-$(CONFIG_QSVDEC) += qsvdec.h
SKIPHEADERS-$(CONFIG_QSVENC) += qsvenc.h
SKIPHEADERS-$(CONFIG_XVMC) += xvmc.h
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_internal.h
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_vt_internal.h
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h vdpau_internal.h
SKIPHEADERS-$(CONFIG_VIDEOTOOLBOX) += videotoolbox.h vda_vt_internal.h
TESTPROGS = imgconvert \
jpeg2000dwt \

@ -76,6 +76,7 @@ void avcodec_register_all(void)
/* hardware accelerators */
REGISTER_HWACCEL(H263_VAAPI, h263_vaapi);
REGISTER_HWACCEL(H263_VDPAU, h263_vdpau);
REGISTER_HWACCEL(H263_VIDEOTOOLBOX, h263_videotoolbox);
REGISTER_HWACCEL(H264_D3D11VA, h264_d3d11va);
REGISTER_HWACCEL(H264_DXVA2, h264_dxva2);
REGISTER_HWACCEL(H264_MMAL, h264_mmal);
@ -84,20 +85,24 @@ void avcodec_register_all(void)
REGISTER_HWACCEL(H264_VDA, h264_vda);
REGISTER_HWACCEL(H264_VDA_OLD, h264_vda_old);
REGISTER_HWACCEL(H264_VDPAU, h264_vdpau);
REGISTER_HWACCEL(H264_VIDEOTOOLBOX, h264_videotoolbox);
REGISTER_HWACCEL(HEVC_D3D11VA, hevc_d3d11va);
REGISTER_HWACCEL(HEVC_DXVA2, hevc_dxva2);
REGISTER_HWACCEL(HEVC_QSV, hevc_qsv);
REGISTER_HWACCEL(HEVC_VDPAU, hevc_vdpau);
REGISTER_HWACCEL(MPEG1_XVMC, mpeg1_xvmc);
REGISTER_HWACCEL(MPEG1_VDPAU, mpeg1_vdpau);
REGISTER_HWACCEL(MPEG1_VIDEOTOOLBOX, mpeg1_videotoolbox);
REGISTER_HWACCEL(MPEG2_XVMC, mpeg2_xvmc);
REGISTER_HWACCEL(MPEG2_D3D11VA, mpeg2_d3d11va);
REGISTER_HWACCEL(MPEG2_DXVA2, mpeg2_dxva2);
REGISTER_HWACCEL(MPEG2_QSV, mpeg2_qsv);
REGISTER_HWACCEL(MPEG2_VAAPI, mpeg2_vaapi);
REGISTER_HWACCEL(MPEG2_VDPAU, mpeg2_vdpau);
REGISTER_HWACCEL(MPEG2_VIDEOTOOLBOX, mpeg2_videotoolbox);
REGISTER_HWACCEL(MPEG4_VAAPI, mpeg4_vaapi);
REGISTER_HWACCEL(MPEG4_VDPAU, mpeg4_vdpau);
REGISTER_HWACCEL(MPEG4_VIDEOTOOLBOX, mpeg4_videotoolbox);
REGISTER_HWACCEL(VC1_D3D11VA, vc1_d3d11va);
REGISTER_HWACCEL(VC1_DXVA2, vc1_dxva2);
REGISTER_HWACCEL(VC1_VAAPI, vc1_vaapi);

@ -722,6 +722,9 @@ const enum AVPixelFormat ff_h263_hwaccel_pixfmt_list_420[] = {
#endif
#if CONFIG_H263_VDPAU_HWACCEL || CONFIG_MPEG4_VDPAU_HWACCEL
AV_PIX_FMT_VDPAU,
#endif
#if CONFIG_H263_VIDEOTOOLBOX_HWACCEL || CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL
AV_PIX_FMT_VIDEOTOOLBOX,
#endif
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE

@ -868,6 +868,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
CONFIG_H264_D3D11VA_HWACCEL + \
CONFIG_H264_VAAPI_HWACCEL + \
(CONFIG_H264_VDA_HWACCEL * 2) + \
CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
CONFIG_H264_VDPAU_HWACCEL)
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
const enum AVPixelFormat *choices = pix_fmts;
@ -947,6 +948,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
#if CONFIG_H264_VDA_HWACCEL
*fmt++ = AV_PIX_FMT_VDA_VLD;
*fmt++ = AV_PIX_FMT_VDA;
#endif
#if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
*fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
#endif
if (h->avctx->codec->pix_fmts)
choices = h->avctx->codec->pix_fmts;

@ -1210,6 +1210,9 @@ static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = {
#endif
#if CONFIG_MPEG2_VAAPI_HWACCEL
AV_PIX_FMT_VAAPI_VLD,
#endif
#if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
AV_PIX_FMT_VIDEOTOOLBOX,
#endif
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE

@ -21,7 +21,7 @@
#include "libavutil/mem.h"
#include "vda.h"
#include "vda_internal.h"
#include "vda_vt_internal.h"
#if CONFIG_H264_VDA_HWACCEL
AVVDAContext *av_vda_alloc_context(void)

@ -32,20 +32,7 @@ struct vda_buffer {
CVPixelBufferRef cv_buffer;
};
#include "internal.h"
#include "vda_internal.h"
typedef struct VDAContext {
// The current bitstream buffer.
uint8_t *bitstream;
// The current size of the bitstream.
int bitstream_size;
// The reference size used for fast reallocation.
int allocated_size;
CVImageBufferRef frame;
} VDAContext;
#include "vda_vt_internal.h"
/* Decoder callback that adds the vda frame to the queue in display order. */
static void vda_decoder_callback(void *vda_hw_ctx,
@ -68,7 +55,7 @@ static void vda_decoder_callback(void *vda_hw_ctx,
vda_ctx->cv_buffer = CVPixelBufferRetain(image_buffer);
}
static int vda_sync_decode(VDAContext *ctx, struct vda_context *vda_ctx)
static int vda_sync_decode(VTContext *ctx, struct vda_context *vda_ctx)
{
OSStatus status;
CFDataRef coded_frame;
@ -93,7 +80,7 @@ static int vda_old_h264_start_frame(AVCodecContext *avctx,
av_unused const uint8_t *buffer,
av_unused uint32_t size)
{
VDAContext *vda = avctx->internal->hwaccel_priv_data;
VTContext *vda = avctx->internal->hwaccel_priv_data;
struct vda_context *vda_ctx = avctx->hwaccel_context;
if (!vda_ctx->decoder)
@ -108,7 +95,7 @@ static int vda_old_h264_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
VDAContext *vda = avctx->internal->hwaccel_priv_data;
VTContext *vda = avctx->internal->hwaccel_priv_data;
struct vda_context *vda_ctx = avctx->hwaccel_context;
void *tmp;
@ -141,7 +128,7 @@ static void vda_h264_release_buffer(void *opaque, uint8_t *data)
static int vda_old_h264_end_frame(AVCodecContext *avctx)
{
H264Context *h = avctx->priv_data;
VDAContext *vda = avctx->internal->hwaccel_priv_data;
VTContext *vda = avctx->internal->hwaccel_priv_data;
struct vda_context *vda_ctx = avctx->hwaccel_context;
AVFrame *frame = h->cur_pic_ptr->f;
struct vda_buffer *context;
@ -271,17 +258,6 @@ int ff_vda_destroy_decoder(struct vda_context *vda_ctx)
return status;
}
static int vda_h264_uninit(AVCodecContext *avctx)
{
VDAContext *vda = avctx->internal->hwaccel_priv_data;
if (vda) {
av_freep(&vda->bitstream);
if (vda->frame)
CVPixelBufferRelease(vda->frame);
}
return 0;
}
AVHWAccel ff_h264_vda_old_hwaccel = {
.name = "h264_vda",
.type = AVMEDIA_TYPE_VIDEO,
@ -290,8 +266,8 @@ AVHWAccel ff_h264_vda_old_hwaccel = {
.start_frame = vda_old_h264_start_frame,
.decode_slice = vda_old_h264_decode_slice,
.end_frame = vda_old_h264_end_frame,
.uninit = vda_h264_uninit,
.priv_data_size = sizeof(VDAContext),
.uninit = ff_videotoolbox_uninit,
.priv_data_size = sizeof(VTContext),
};
void ff_vda_output_callback(void *opaque,
@ -301,7 +277,7 @@ void ff_vda_output_callback(void *opaque,
CVImageBufferRef image_buffer)
{
AVCodecContext *ctx = opaque;
VDAContext *vda = ctx->internal->hwaccel_priv_data;
VTContext *vda = ctx->internal->hwaccel_priv_data;
if (vda->frame) {
@ -315,65 +291,10 @@ void ff_vda_output_callback(void *opaque,
vda->frame = CVPixelBufferRetain(image_buffer);
}
static int vda_h264_start_frame(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
VDAContext *vda = avctx->internal->hwaccel_priv_data;
H264Context *h = avctx->priv_data;
if (h->is_avc == 1) {
void *tmp;
vda->bitstream_size = 0;
tmp = av_fast_realloc(vda->bitstream,
&vda->allocated_size,
size);
vda->bitstream = tmp;
memcpy(vda->bitstream, buffer, size);
vda->bitstream_size = size;
} else {
vda->bitstream_size = 0;
}
return 0;
}
static int vda_h264_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
VDAContext *vda = avctx->internal->hwaccel_priv_data;
H264Context *h = avctx->priv_data;
void *tmp;
if (h->is_avc == 1)
return 0;
tmp = av_fast_realloc(vda->bitstream,
&vda->allocated_size,
vda->bitstream_size + size + 4);
if (!tmp)
return AVERROR(ENOMEM);
vda->bitstream = tmp;
AV_WB32(vda->bitstream + vda->bitstream_size, size);
memcpy(vda->bitstream + vda->bitstream_size + 4, buffer, size);
vda->bitstream_size += size + 4;
return 0;
}
static void release_buffer(void *opaque, uint8_t *data)
{
CVImageBufferRef frame = (CVImageBufferRef)data;
CVPixelBufferRelease(frame);
}
static int vda_h264_end_frame(AVCodecContext *avctx)
{
H264Context *h = avctx->priv_data;
VDAContext *vda = avctx->internal->hwaccel_priv_data;
VTContext *vda = avctx->internal->hwaccel_priv_data;
AVVDAContext *vda_ctx = avctx->hwaccel_context;
AVFrame *frame = h->cur_pic_ptr->f;
uint32_t flush_flags = 1 << 0; ///< kVDADecoderFlush_emitFrames
@ -403,19 +324,7 @@ static int vda_h264_end_frame(AVCodecContext *avctx)
return AVERROR_UNKNOWN;
}
av_buffer_unref(&frame->buf[0]);
frame->buf[0] = av_buffer_create((uint8_t*)vda->frame,
sizeof(vda->frame),
release_buffer, NULL,
AV_BUFFER_FLAG_READONLY);
if (!frame->buf[0])
return AVERROR(ENOMEM);
frame->data[3] = (uint8_t*)vda->frame;
vda->frame = NULL;
return 0;
return ff_videotoolbox_buffer_create(vda, frame);
}
int ff_vda_default_init(AVCodecContext *avctx)
@ -434,26 +343,7 @@ int ff_vda_default_init(AVCodecContext *avctx)
// kCVPixelFormatType_420YpCbCr8Planar;
/* Each VCL NAL in the bitstream sent to the decoder
* is preceded by a 4 bytes length header.
* Change the avcC atom header if needed, to signal headers of 4 bytes. */
if (avctx->extradata_size >= 4 && (avctx->extradata[4] & 0x03) != 0x03) {
uint8_t *rw_extradata;
if (!(rw_extradata = av_malloc(avctx->extradata_size)))
return AVERROR(ENOMEM);
memcpy(rw_extradata, avctx->extradata, avctx->extradata_size);
rw_extradata[4] |= 0x03;
avc_data = CFDataCreate(kCFAllocatorDefault, rw_extradata, avctx->extradata_size);
av_freep(&rw_extradata);
} else {
avc_data = CFDataCreate(kCFAllocatorDefault,
avctx->extradata, avctx->extradata_size);
}
avc_data = ff_videotoolbox_avcc_extradata_create(avctx);
config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
4,
@ -521,27 +411,15 @@ int ff_vda_default_init(AVCodecContext *avctx)
}
}
static int vda_h264_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
{
frame->width = avctx->width;
frame->height = avctx->height;
frame->format = avctx->pix_fmt;
frame->buf[0] = av_buffer_alloc(1);
if (!frame->buf[0])
return AVERROR(ENOMEM);
return 0;
}
AVHWAccel ff_h264_vda_hwaccel = {
.name = "h264_vda",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H264,
.pix_fmt = AV_PIX_FMT_VDA,
.alloc_frame = vda_h264_alloc_frame,
.start_frame = vda_h264_start_frame,
.decode_slice = vda_h264_decode_slice,
.alloc_frame = ff_videotoolbox_alloc_frame,
.start_frame = ff_videotoolbox_h264_start_frame,
.decode_slice = ff_videotoolbox_h264_decode_slice,
.end_frame = vda_h264_end_frame,
.uninit = vda_h264_uninit,
.priv_data_size = sizeof(VDAContext),
.uninit = ff_videotoolbox_uninit,
.priv_data_size = sizeof(VTContext),
};

@ -16,10 +16,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VDA_INTERNAL_H
#define AVCODEC_VDA_INTERNAL_H
#include "vda.h"
#ifndef AVCODEC_VDA_VT_INTERNAL_H
#define AVCODEC_VDA_VT_INTERNAL_H
void ff_vda_output_callback(void *vda_hw_ctx,
CFDictionaryRef user_info,
@ -30,4 +28,28 @@ void ff_vda_output_callback(void *vda_hw_ctx,
int ff_vda_default_init(AVCodecContext *avctx);
void ff_vda_default_free(AVCodecContext *avctx);
#endif /* AVCODEC_VDA_INTERNAL_H */
typedef struct VTContext {
// The current bitstream buffer.
uint8_t *bitstream;
// The current size of the bitstream.
int bitstream_size;
// The reference size used for fast reallocation.
int allocated_size;
// The core video buffer
CVImageBufferRef frame;
} VTContext;
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame);
int ff_videotoolbox_uninit(AVCodecContext *avctx);
int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame);
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size);
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size);
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx);
#endif /* AVCODEC_VDA_VT_INTERNAL_H */

@ -29,8 +29,8 @@
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 56
#define LIBAVCODEC_VERSION_MINOR 56
#define LIBAVCODEC_VERSION_MICRO 101
#define LIBAVCODEC_VERSION_MINOR 57
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \

@ -0,0 +1,690 @@
/*
* Videotoolbox hardware acceleration
*
* copyright (c) 2012 Sebastien Zwickert
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#if CONFIG_VIDEOTOOLBOX
# include "videotoolbox.h"
#else
# include "vda.h"
#endif
#include "vda_vt_internal.h"
#include "libavutil/avutil.h"
#include "bytestream.h"
#include "h264.h"
#include "mpegvideo.h"
#ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
# define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
#endif
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
{
CVPixelBufferRef cv_buffer = (CVImageBufferRef)data;
CVPixelBufferRelease(cv_buffer);
}
static int videotoolbox_buffer_copy(VTContext *vtctx,
const uint8_t *buffer,
uint32_t size)
{
void *tmp;
tmp = av_fast_realloc(vtctx->bitstream,
&vtctx->allocated_size,
size);
if (!tmp)
return AVERROR(ENOMEM);
vtctx->bitstream = tmp;
memcpy(vtctx->bitstream, buffer, size);
vtctx->bitstream_size = size;
return 0;
}
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
{
frame->width = avctx->width;
frame->height = avctx->height;
frame->format = avctx->pix_fmt;
frame->buf[0] = av_buffer_alloc(1);
if (!frame->buf[0])
return AVERROR(ENOMEM);
return 0;
}
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
{
CFDataRef data = NULL;
/* Each VCL NAL in the bistream sent to the decoder
* is preceded by a 4 bytes length header.
* Change the avcC atom header if needed, to signal headers of 4 bytes. */
if (avctx->extradata_size >= 4 && (avctx->extradata[4] & 0x03) != 0x03) {
uint8_t *rw_extradata = av_memdup(avctx->extradata, avctx->extradata_size);
if (!rw_extradata)
return NULL;
rw_extradata[4] |= 0x03;
data = CFDataCreate(kCFAllocatorDefault, rw_extradata, avctx->extradata_size);
av_freep(&rw_extradata);
} else {
data = CFDataCreate(kCFAllocatorDefault, avctx->extradata, avctx->extradata_size);
}
return data;
}
int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame)
{
av_buffer_unref(&frame->buf[0]);
frame->buf[0] = av_buffer_create((uint8_t*)vtctx->frame,
sizeof(vtctx->frame),
videotoolbox_buffer_release,
NULL,
AV_BUFFER_FLAG_READONLY);
if (!frame->buf[0]) {
return AVERROR(ENOMEM);
}
frame->data[3] = (uint8_t*)vtctx->frame;
vtctx->frame = NULL;
return 0;
}
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
VTContext *vtctx = avctx->internal->hwaccel_priv_data;
H264Context *h = avctx->priv_data;
vtctx->bitstream_size = 0;
if (h->is_avc == 1) {
return videotoolbox_buffer_copy(vtctx, buffer, size);
}
return 0;
}
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
VTContext *vtctx = avctx->internal->hwaccel_priv_data;
H264Context *h = avctx->priv_data;
void *tmp;
if (h->is_avc == 1)
return 0;
tmp = av_fast_realloc(vtctx->bitstream,
&vtctx->allocated_size,
vtctx->bitstream_size+size+4);
if (!tmp)
return AVERROR(ENOMEM);
vtctx->bitstream = tmp;
AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
vtctx->bitstream_size += size + 4;
return 0;
}
int ff_videotoolbox_uninit(AVCodecContext *avctx)
{
VTContext *vtctx = avctx->internal->hwaccel_priv_data;
if (vtctx) {
av_freep(&vtctx->bitstream);
if (vtctx->frame)
CVPixelBufferRelease(vtctx->frame);
}
return 0;
}
#if CONFIG_VIDEOTOOLBOX
static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
{
int i;
uint8_t b;
for (i = 3; i >= 0; i--) {
b = (length >> (i * 7)) & 0x7F;
if (i != 0)
b |= 0x80;
bytestream2_put_byteu(pb, b);
}
}
static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
{
CFDataRef data;
uint8_t *rw_extradata;
PutByteContext pb;
int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
// ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
int config_size = 13 + 5 + avctx->extradata_size;
int s;
if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
return NULL;
bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
bytestream2_put_byteu(&pb, 0); // version
bytestream2_put_ne24(&pb, 0); // flags
// elementary stream descriptor
bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
videotoolbox_write_mp4_descr_length(&pb, full_size);
bytestream2_put_ne16(&pb, 0); // esid
bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
// decoder configuration descriptor
bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
videotoolbox_write_mp4_descr_length(&pb, config_size);
bytestream2_put_byteu(&pb, 32); // object type indication. 32 = CODEC_ID_MPEG4
bytestream2_put_byteu(&pb, 0x11); // stream type
bytestream2_put_ne24(&pb, 0); // buffer size
bytestream2_put_ne32(&pb, 0); // max bitrate
bytestream2_put_ne32(&pb, 0); // avg bitrate
// decoder specific descriptor
bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
// SLConfigDescriptor
bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
bytestream2_put_byteu(&pb, 0x01); // length
bytestream2_put_byteu(&pb, 0x02); //
s = bytestream2_size_p(&pb);
data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
av_freep(&rw_extradata);
return data;
}
static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
void *buffer,
int size)
{
OSStatus status;
CMBlockBufferRef block_buf;
CMSampleBufferRef sample_buf;
block_buf = NULL;
sample_buf = NULL;
status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
buffer, // memoryBlock
size, // blockLength
kCFAllocatorNull, // blockAllocator
NULL, // customBlockSource
0, // offsetToData
size, // dataLength
0, // flags
&block_buf);
if (!status) {
status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
block_buf, // dataBuffer
TRUE, // dataReady
0, // makeDataReadyCallback
0, // makeDataReadyRefcon
fmt_desc, // formatDescription
1, // numSamples
0, // numSampleTimingEntries
NULL, // sampleTimingArray
0, // numSampleSizeEntries
NULL, // sampleSizeArray
&sample_buf);
}
if (block_buf)
CFRelease(block_buf);
return sample_buf;
}
static void videotoolbox_decoder_callback(void *opaque,
void *sourceFrameRefCon,
OSStatus status,
VTDecodeInfoFlags flags,
CVImageBufferRef image_buffer,
CMTime pts,
CMTime duration)
{
AVCodecContext *avctx = opaque;
VTContext *vtctx = avctx->internal->hwaccel_priv_data;
if (vtctx->frame) {
CVPixelBufferRelease(vtctx->frame);
vtctx->frame = NULL;
}
if (!image_buffer) {
av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
return;
}
vtctx->frame = CVPixelBufferRetain(image_buffer);
}
static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
{
OSStatus status;
CMSampleBufferRef sample_buf;
AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
VTContext *vtctx = avctx->internal->hwaccel_priv_data;
sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
vtctx->bitstream,
vtctx->bitstream_size);
if (!sample_buf)
return -1;
status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
sample_buf,
0, // decodeFlags
NULL, // sourceFrameRefCon
0); // infoFlagsOut
if (status == noErr)
status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
CFRelease(sample_buf);
return status;
}
static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
{
int status;
AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
VTContext *vtctx = avctx->internal->hwaccel_priv_data;
if (!videotoolbox->session || !vtctx->bitstream)
return AVERROR_INVALIDDATA;
status = videotoolbox_session_decode_frame(avctx);
if (status) {
av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);
return AVERROR_UNKNOWN;
}
if (!vtctx->frame)
return AVERROR_UNKNOWN;
return ff_videotoolbox_buffer_create(vtctx, frame);
}
static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
{
H264Context *h = avctx->priv_data;
AVFrame *frame = h->cur_pic_ptr->f;
return videotoolbox_common_end_frame(avctx, frame);
}
static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
VTContext *vtctx = avctx->internal->hwaccel_priv_data;
return videotoolbox_buffer_copy(vtctx, buffer, size);
}
static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
return 0;
}
static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
AVFrame *frame = s->current_picture_ptr->f;
return videotoolbox_common_end_frame(avctx, frame);
}
static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
AVCodecContext *avctx)
{
CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
1,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
CFDictionarySetValue(config_info,
kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder,
kCFBooleanTrue);
if (avctx->extradata_size) {
CFMutableDictionaryRef avc_info;
CFDataRef data = NULL;
avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
1,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
switch (codec_type) {
case kCMVideoCodecType_MPEG4Video :
data = videotoolbox_esds_extradata_create(avctx);
if (data)
CFDictionarySetValue(avc_info, CFSTR("esds"), data);
break;
case kCMVideoCodecType_H264 :
data = ff_videotoolbox_avcc_extradata_create(avctx);
if (data)
CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
break;
default:
break;
}
CFDictionarySetValue(config_info,
kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
avc_info);
if (data)
CFRelease(data);
CFRelease(avc_info);
}
return config_info;
}
static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
int height,
OSType pix_fmt)
{
CFMutableDictionaryRef buffer_attributes;
CFMutableDictionaryRef io_surface_properties;
CFNumberRef cv_pix_fmt;
CFNumberRef w;
CFNumberRef h;
w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
4,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
0,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
CFRelease(io_surface_properties);
CFRelease(cv_pix_fmt);
CFRelease(w);
CFRelease(h);
return buffer_attributes;
}
static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
CFDictionaryRef decoder_spec,
int width,
int height)
{
CMFormatDescriptionRef cm_fmt_desc;
OSStatus status;
status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
codec_type,
width,
height,
decoder_spec, // Dictionary of extension
&cm_fmt_desc);
if (status)
return NULL;
return cm_fmt_desc;
}
static int videotoolbox_default_init(AVCodecContext *avctx)
{
AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
OSStatus status;
VTDecompressionOutputCallbackRecord decoder_cb;
CFDictionaryRef decoder_spec;
CFDictionaryRef buf_attr;
int32_t pix_fmt;
if (!videotoolbox) {
av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
return -1;
}
switch( avctx->codec_id ) {
case AV_CODEC_ID_H263 :
videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
break;
case AV_CODEC_ID_H264 :
videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
break;
case AV_CODEC_ID_MPEG1VIDEO :
videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
break;
case AV_CODEC_ID_MPEG2VIDEO :
videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
break;
case AV_CODEC_ID_MPEG4 :
videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
break;
default :
break;
}
pix_fmt = videotoolbox->cv_pix_fmt_type;
decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
decoder_spec,
avctx->width,
avctx->height);
if (!videotoolbox->cm_fmt_desc) {
if (decoder_spec)
CFRelease(decoder_spec);
av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
return -1;
}
buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
avctx->height,
videotoolbox->cv_pix_fmt_type);
decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
decoder_cb.decompressionOutputRefCon = avctx;
status = VTDecompressionSessionCreate(NULL, // allocator
videotoolbox->cm_fmt_desc, // videoFormatDescription
decoder_spec, // videoDecoderSpecification
buf_attr, // destinationImageBufferAttributes
&decoder_cb, // outputCallback
&videotoolbox->session); // decompressionSessionOut
if (decoder_spec)
CFRelease(decoder_spec);
if (buf_attr)
CFRelease(buf_attr);
switch (status) {
case kVTVideoDecoderNotAvailableNowErr:
case kVTVideoDecoderUnsupportedDataFormatErr:
return AVERROR(ENOSYS);
case kVTVideoDecoderMalfunctionErr:
return AVERROR(EINVAL);
case kVTVideoDecoderBadDataErr :
return AVERROR_INVALIDDATA;
case 0:
return 0;
default:
return AVERROR_UNKNOWN;
}
}
static void videotoolbox_default_free(AVCodecContext *avctx)
{
AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
if (videotoolbox) {
if (videotoolbox->cm_fmt_desc)
CFRelease(videotoolbox->cm_fmt_desc);
if (videotoolbox->session)
VTDecompressionSessionInvalidate(videotoolbox->session);
}
}
AVHWAccel ff_h263_videotoolbox_hwaccel = {
.name = "h263_videotoolbox",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H263,
.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
.alloc_frame = ff_videotoolbox_alloc_frame,
.start_frame = videotoolbox_mpeg_start_frame,
.decode_slice = videotoolbox_mpeg_decode_slice,
.end_frame = videotoolbox_mpeg_end_frame,
.uninit = ff_videotoolbox_uninit,
.priv_data_size = sizeof(VTContext),
};
AVHWAccel ff_h264_videotoolbox_hwaccel = {
.name = "h264_videotoolbox",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H264,
.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
.alloc_frame = ff_videotoolbox_alloc_frame,
.start_frame = ff_videotoolbox_h264_start_frame,
.decode_slice = ff_videotoolbox_h264_decode_slice,
.end_frame = videotoolbox_h264_end_frame,
.uninit = ff_videotoolbox_uninit,
.priv_data_size = sizeof(VTContext),
};
AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
.name = "mpeg1_videotoolbox",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MPEG1VIDEO,
.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
.alloc_frame = ff_videotoolbox_alloc_frame,
.start_frame = videotoolbox_mpeg_start_frame,
.decode_slice = videotoolbox_mpeg_decode_slice,
.end_frame = videotoolbox_mpeg_end_frame,
.uninit = ff_videotoolbox_uninit,
.priv_data_size = sizeof(VTContext),
};
AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
.name = "mpeg2_videotoolbox",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MPEG2VIDEO,
.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
.alloc_frame = ff_videotoolbox_alloc_frame,
.start_frame = videotoolbox_mpeg_start_frame,
.decode_slice = videotoolbox_mpeg_decode_slice,
.end_frame = videotoolbox_mpeg_end_frame,
.uninit = ff_videotoolbox_uninit,
.priv_data_size = sizeof(VTContext),
};
AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
.name = "mpeg4_videotoolbox",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MPEG4,
.pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
.alloc_frame = ff_videotoolbox_alloc_frame,
.start_frame = videotoolbox_mpeg_start_frame,
.decode_slice = videotoolbox_mpeg_decode_slice,
.end_frame = videotoolbox_mpeg_end_frame,
.uninit = ff_videotoolbox_uninit,
.priv_data_size = sizeof(VTContext),
};
AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
{
AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
if (ret) {
ret->output_callback = videotoolbox_decoder_callback;
ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
}
return ret;
}
int av_videotoolbox_default_init(AVCodecContext *avctx)
{
return av_videotoolbox_default_init2(avctx, NULL);
}
int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
{
avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
if (!avctx->hwaccel_context)
return AVERROR(ENOMEM);
return videotoolbox_default_init(avctx);
}
void av_videotoolbox_default_free(AVCodecContext *avctx)
{
videotoolbox_default_free(avctx);
av_freep(&avctx->hwaccel_context);
}
#endif /* CONFIG_VIDEOTOOLBOX */

@ -0,0 +1,126 @@
/*
* Videotoolbox hardware acceleration
*
* copyright (c) 2012 Sebastien Zwickert
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VIDEOTOOLBOX_H
#define AVCODEC_VIDEOTOOLBOX_H
/**
* @file
* @ingroup lavc_codec_hwaccel_videotoolbox
* Public libavcodec Videotoolbox header.
*/
#include <stdint.h>
#define Picture QuickdrawPicture
#include <VideoToolbox/VideoToolbox.h>
#undef Picture
#include "libavcodec/avcodec.h"
/**
* This struct holds all the information that needs to be passed
* between the caller and libavcodec for initializing Videotoolbox decoding.
* Its size is not a part of the public ABI, it must be allocated with
* av_videotoolbox_alloc_context() and freed with av_free().
*/
typedef struct AVVideotoolboxContext {
/**
* Videotoolbox decompression session object.
* Created and freed the caller.
*/
VTDecompressionSessionRef session;
/**
* The output callback that must be passed to the session.
* Set by av_videottoolbox_default_init()
*/
VTDecompressionOutputCallback output_callback;
/**
* CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
* set by the caller.
*/
OSType cv_pix_fmt_type;
/**
* CoreMedia Format Description that Videotoolbox will use to create the decompression session.
* Set by the caller.
*/
CMVideoFormatDescriptionRef cm_fmt_desc;
/**
* CoreMedia codec type that Videotoolbox will use to create the decompression session.
* Set by the caller.
*/
int cm_codec_type;
} AVVideotoolboxContext;
/**
* Allocate and initialize a Videotoolbox context.
*
* This function should be called from the get_format() callback when the caller
* selects the AV_PIX_FMT_VIDETOOLBOX format. The caller must then create
* the decoder object (using the output callback provided by libavcodec) that
* will be used for Videotoolbox-accelerated decoding.
*
* When decoding with Videotoolbox is finished, the caller must destroy the decoder
* object and free the Videotoolbox context using av_free().
*
* @return the newly allocated context or NULL on failure
*/
AVVideotoolboxContext *av_videotoolbox_alloc_context(void);
/**
* This is a convenience function that creates and sets up the Videotoolbox context using
* an internal implementation.
*
* @param avctx the corresponding codec context
*
* @return >= 0 on success, a negative AVERROR code on failure
*/
int av_videotoolbox_default_init(AVCodecContext *avctx);
/**
* This is a convenience function that creates and sets up the Videotoolbox context using
* an internal implementation.
*
* @param avctx the corresponding codec context
* @param vtctx the Videotoolbox context to use
*
* @return >= 0 on success, a negative AVERROR code on failure
*/
int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx);
/**
* This function must be called to free the Videotoolbox context initialized with
* av_videotoolbox_default_init().
*
* @param avctx the corresponding codec context
*/
void av_videotoolbox_default_free(AVCodecContext *avctx);
/**
* @}
*/
#endif /* AVCODEC_VIDEOTOOLBOX_H */

@ -1632,6 +1632,10 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_ALPHA,
},
[AV_PIX_FMT_VIDEOTOOLBOX] = {
.name = "videotoolbox_vld",
.flags = AV_PIX_FMT_FLAG_HWACCEL,
},
[AV_PIX_FMT_GBRP] = {
.name = "gbrp",
.nb_components = 3,

@ -311,6 +311,8 @@ enum AVPixelFormat {
AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox
AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
#if FF_API_PIX_FMT

Loading…
Cancel
Save