|
|
|
/*
|
|
|
|
* AVCodecContext functions for libavcodec
|
|
|
|
*
|
|
|
|
* This file is part of FFmpeg.
|
|
|
|
*
|
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* AVCodecContext functions for libavcodec
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
#include "libavutil/avassert.h"
|
|
|
|
#include "libavutil/avstring.h"
|
|
|
|
#include "libavutil/bprint.h"
|
|
|
|
#include "libavutil/channel_layout.h"
|
|
|
|
#include "libavutil/common.h"
|
|
|
|
#include "libavutil/emms.h"
|
|
|
|
#include "libavutil/fifo.h"
|
|
|
|
#include "libavutil/imgutils.h"
|
|
|
|
#include "libavutil/mem.h"
|
|
|
|
#include "libavutil/opt.h"
|
|
|
|
#include "libavutil/thread.h"
|
|
|
|
#include "avcodec.h"
|
|
|
|
#include "avcodec_internal.h"
|
|
|
|
#include "bsf.h"
|
|
|
|
#include "codec_desc.h"
|
|
|
|
#include "codec_internal.h"
|
|
|
|
#include "decode.h"
|
|
|
|
#include "encode.h"
|
|
|
|
#include "frame_thread_encoder.h"
|
|
|
|
#include "hwconfig.h"
|
|
|
|
#include "internal.h"
|
|
|
|
#include "refstruct.h"
|
|
|
|
#include "thread.h"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Maximum size in bytes of extradata.
|
|
|
|
* This value was chosen such that every bit of the buffer is
|
|
|
|
* addressable by a 32-bit signed integer as used by get_bits.
|
|
|
|
*/
|
|
|
|
#define FF_MAX_EXTRADATA_SIZE ((1 << 28) - AV_INPUT_BUFFER_PADDING_SIZE)
|
|
|
|
|
|
|
|
const SideDataMap ff_sd_global_map[] = {
|
|
|
|
{ AV_PKT_DATA_REPLAYGAIN , AV_FRAME_DATA_REPLAYGAIN },
|
|
|
|
{ AV_PKT_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX },
|
|
|
|
{ AV_PKT_DATA_SPHERICAL, AV_FRAME_DATA_SPHERICAL },
|
|
|
|
{ AV_PKT_DATA_STEREO3D, AV_FRAME_DATA_STEREO3D },
|
|
|
|
{ AV_PKT_DATA_AUDIO_SERVICE_TYPE, AV_FRAME_DATA_AUDIO_SERVICE_TYPE },
|
|
|
|
{ AV_PKT_DATA_MASTERING_DISPLAY_METADATA, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA },
|
|
|
|
{ AV_PKT_DATA_CONTENT_LIGHT_LEVEL, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL },
|
|
|
|
{ AV_PKT_DATA_ICC_PROFILE, AV_FRAME_DATA_ICC_PROFILE },
|
|
|
|
{ AV_PKT_DATA_AMBIENT_VIEWING_ENVIRONMENT,AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT },
|
|
|
|
{ AV_PKT_DATA_NB },
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
size_t offset = i * size;
|
|
|
|
int r = func(c, FF_PTR_ADD((char *)arg, offset));
|
|
|
|
if (ret)
|
|
|
|
ret[i] = r;
|
|
|
|
}
|
|
|
|
emms_c();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
int r = func(c, arg, i, 0);
|
|
|
|
if (ret)
|
|
|
|
ret[i] = r;
|
|
|
|
}
|
|
|
|
emms_c();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static AVMutex codec_mutex = AV_MUTEX_INITIALIZER;
|
|
|
|
|
|
|
|
static void lock_avcodec(const FFCodec *codec)
|
|
|
|
{
|
|
|
|
if (codec->caps_internal & FF_CODEC_CAP_NOT_INIT_THREADSAFE && codec->init)
|
|
|
|
ff_mutex_lock(&codec_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unlock_avcodec(const FFCodec *codec)
|
|
|
|
{
|
|
|
|
if (codec->caps_internal & FF_CODEC_CAP_NOT_INIT_THREADSAFE && codec->init)
|
|
|
|
ff_mutex_unlock(&codec_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int64_t get_bit_rate(AVCodecContext *ctx)
|
|
|
|
{
|
|
|
|
int64_t bit_rate;
|
|
|
|
int bits_per_sample;
|
|
|
|
|
|
|
|
switch (ctx->codec_type) {
|
|
|
|
case AVMEDIA_TYPE_VIDEO:
|
|
|
|
case AVMEDIA_TYPE_DATA:
|
|
|
|
case AVMEDIA_TYPE_SUBTITLE:
|
|
|
|
case AVMEDIA_TYPE_ATTACHMENT:
|
|
|
|
bit_rate = ctx->bit_rate;
|
|
|
|
break;
|
|
|
|
case AVMEDIA_TYPE_AUDIO:
|
|
|
|
bits_per_sample = av_get_bits_per_sample(ctx->codec_id);
|
|
|
|
if (bits_per_sample) {
|
|
|
|
bit_rate = ctx->sample_rate * (int64_t)ctx->ch_layout.nb_channels;
|
|
|
|
if (bit_rate > INT64_MAX / bits_per_sample) {
|
|
|
|
bit_rate = 0;
|
|
|
|
} else
|
|
|
|
bit_rate *= bits_per_sample;
|
|
|
|
} else
|
|
|
|
bit_rate = ctx->bit_rate;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
bit_rate = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return bit_rate;
|
|
|
|
}
|
|
|
|
|
|
|
|
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
AVCodecInternal *avci;
|
|
|
|
const FFCodec *codec2;
|
|
|
|
|
|
|
|
if (avcodec_is_open(avctx))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!codec && !avctx->codec) {
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n");
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
if (codec && avctx->codec && codec != avctx->codec) {
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, "
|
|
|
|
"but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
if (!codec)
|
|
|
|
codec = avctx->codec;
|
|
|
|
codec2 = ffcodec(codec);
|
|
|
|
|
|
|
|
if ((avctx->codec_type != AVMEDIA_TYPE_UNKNOWN && avctx->codec_type != codec->type) ||
|
|
|
|
(avctx->codec_id != AV_CODEC_ID_NONE && avctx->codec_id != codec->id)) {
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n");
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
avctx->codec_type = codec->type;
|
|
|
|
avctx->codec_id = codec->id;
|
|
|
|
avctx->codec = codec;
|
|
|
|
|
|
|
|
if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
|
|
|
|
if ((ret = av_opt_set_dict(avctx, options)) < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) {
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "Codec (%s) not on whitelist \'%s\'\n", codec->name, avctx->codec_whitelist);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
avci = av_codec_is_decoder(codec) ?
|
|
|
|
ff_decode_internal_alloc() :
|
|
|
|
ff_encode_internal_alloc();
|
|
|
|
if (!avci) {
|
|
|
|
ret = AVERROR(ENOMEM);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
avctx->internal = avci;
|
|
|
|
|
|
|
|
avci->buffer_frame = av_frame_alloc();
|
|
|
|
avci->buffer_pkt = av_packet_alloc();
|
|
|
|
if (!avci->buffer_frame || !avci->buffer_pkt) {
|
|
|
|
ret = AVERROR(ENOMEM);
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (codec2->priv_data_size > 0) {
|
|
|
|
if (!avctx->priv_data) {
|
|
|
|
avctx->priv_data = av_mallocz(codec2->priv_data_size);
|
|
|
|
if (!avctx->priv_data) {
|
|
|
|
ret = AVERROR(ENOMEM);
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
if (codec->priv_class) {
|
|
|
|
*(const AVClass **)avctx->priv_data = codec->priv_class;
|
|
|
|
av_opt_set_defaults(avctx->priv_data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, options)) < 0)
|
|
|
|
goto free_and_end;
|
|
|
|
} else {
|
|
|
|
avctx->priv_data = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions
|
|
|
|
if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height &&
|
|
|
|
(avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F || avctx->codec_id == AV_CODEC_ID_DXV))) {
|
|
|
|
if (avctx->coded_width && avctx->coded_height)
|
|
|
|
ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
|
|
|
|
else if (avctx->width && avctx->height)
|
|
|
|
ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
|
|
|
|
if (ret < 0)
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height)
|
|
|
|
&& ( av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0
|
|
|
|
|| av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0)) {
|
|
|
|
av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n");
|
|
|
|
ff_set_dimensions(avctx, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avctx->width > 0 && avctx->height > 0) {
|
|
|
|
if (av_image_check_sar(avctx->width, avctx->height,
|
|
|
|
avctx->sample_aspect_ratio) < 0) {
|
|
|
|
av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
|
|
|
|
avctx->sample_aspect_ratio.num,
|
|
|
|
avctx->sample_aspect_ratio.den);
|
|
|
|
avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avctx->sample_rate < 0) {
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "Invalid sample rate: %d\n", avctx->sample_rate);
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
if (avctx->block_align < 0) {
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "Invalid block align: %d\n", avctx->block_align);
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* AV_CODEC_CAP_CHANNEL_CONF is a decoder-only flag; so the code below
|
|
|
|
* in particular checks that nb_channels is set for all audio encoders. */
|
|
|
|
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && !avctx->ch_layout.nb_channels
|
|
|
|
&& !(codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)) {
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "%s requires channel layout to be set\n",
|
|
|
|
av_codec_is_decoder(codec) ? "Decoder" : "Encoder");
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
if (avctx->ch_layout.nb_channels && !av_channel_layout_check(&avctx->ch_layout)) {
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "Invalid channel layout\n");
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
if (avctx->ch_layout.nb_channels > FF_SANE_NB_CHANNELS) {
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "Too many channels: %d\n", avctx->ch_layout.nb_channels);
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
avctx->frame_num = 0;
|
|
|
|
avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id);
|
|
|
|
|
|
|
|
if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) &&
|
|
|
|
avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
|
|
|
|
const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder";
|
|
|
|
const AVCodec *codec2;
|
|
|
|
av_log(avctx, AV_LOG_ERROR,
|
|
|
|
"The %s '%s' is experimental but experimental codecs are not enabled, "
|
|
|
|
"add '-strict %d' if you want to use it.\n",
|
|
|
|
codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL);
|
|
|
|
codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id);
|
|
|
|
if (!(codec2->capabilities & AV_CODEC_CAP_EXPERIMENTAL))
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n",
|
|
|
|
codec_string, codec2->name);
|
|
|
|
ret = AVERROR_EXPERIMENTAL;
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO &&
|
|
|
|
(!avctx->time_base.num || !avctx->time_base.den)) {
|
|
|
|
avctx->time_base.num = 1;
|
|
|
|
avctx->time_base.den = avctx->sample_rate;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (av_codec_is_encoder(avctx->codec))
|
|
|
|
ret = ff_encode_preinit(avctx);
|
|
|
|
else
|
|
|
|
ret = ff_decode_preinit(avctx);
|
|
|
|
if (ret < 0)
|
|
|
|
goto free_and_end;
|
|
|
|
|
|
|
|
if (HAVE_THREADS && !avci->frame_thread_encoder) {
|
|
|
|
/* Frame-threaded decoders call FFCodec.init for their child contexts. */
|
|
|
|
lock_avcodec(codec2);
|
|
|
|
ret = ff_thread_init(avctx);
|
|
|
|
unlock_avcodec(codec2);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!HAVE_THREADS && !(codec2->caps_internal & FF_CODEC_CAP_AUTO_THREADS))
|
|
|
|
avctx->thread_count = 1;
|
|
|
|
|
|
|
|
if (!(avctx->active_thread_type & FF_THREAD_FRAME) ||
|
|
|
|
avci->frame_thread_encoder) {
|
|
|
|
if (codec2->init) {
|
|
|
|
lock_avcodec(codec2);
|
|
|
|
ret = codec2->init(avctx);
|
|
|
|
unlock_avcodec(codec2);
|
|
|
|
if (ret < 0) {
|
|
|
|
avci->needs_close = codec2->caps_internal & FF_CODEC_CAP_INIT_CLEANUP;
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
avci->needs_close = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret=0;
|
|
|
|
|
|
|
|
if (av_codec_is_decoder(avctx->codec)) {
|
|
|
|
if (!avctx->bit_rate)
|
|
|
|
avctx->bit_rate = get_bit_rate(avctx);
|
|
|
|
|
|
|
|
/* validate channel layout from the decoder */
|
|
|
|
if ((avctx->ch_layout.nb_channels && !av_channel_layout_check(&avctx->ch_layout)) ||
|
|
|
|
avctx->ch_layout.nb_channels > FF_SANE_NB_CHANNELS) {
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
if (avctx->bits_per_coded_sample < 0) {
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto free_and_end;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (codec->priv_class)
|
|
|
|
av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class);
|
|
|
|
|
|
|
|
end:
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
free_and_end:
|
|
|
|
ff_codec_close(avctx);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
void avcodec_flush_buffers(AVCodecContext *avctx)
|
|
|
|
{
|
|
|
|
AVCodecInternal *avci = avctx->internal;
|
|
|
|
|
|
|
|
if (av_codec_is_encoder(avctx->codec)) {
|
|
|
|
int caps = avctx->codec->capabilities;
|
|
|
|
|
|
|
|
if (!(caps & AV_CODEC_CAP_ENCODER_FLUSH)) {
|
|
|
|
// Only encoders that explicitly declare support for it can be
|
|
|
|
// flushed. Otherwise, this is a no-op.
|
|
|
|
av_log(avctx, AV_LOG_WARNING, "Ignoring attempt to flush encoder "
|
|
|
|
"that doesn't support it\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ff_encode_flush_buffers(avctx);
|
|
|
|
} else
|
|
|
|
ff_decode_flush_buffers(avctx);
|
|
|
|
|
|
|
|
avci->draining = 0;
|
|
|
|
avci->draining_done = 0;
|
|
|
|
if (avci->buffer_frame)
|
|
|
|
av_frame_unref(avci->buffer_frame);
|
|
|
|
if (avci->buffer_pkt)
|
|
|
|
av_packet_unref(avci->buffer_pkt);
|
|
|
|
|
|
|
|
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME &&
|
|
|
|
!avci->is_frame_mt)
|
|
|
|
ff_thread_flush(avctx);
|
|
|
|
else if (ffcodec(avctx->codec)->flush)
|
|
|
|
ffcodec(avctx->codec)->flush(avctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void avsubtitle_free(AVSubtitle *sub)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < sub->num_rects; i++) {
|
|
|
|
AVSubtitleRect *const rect = sub->rects[i];
|
|
|
|
|
|
|
|
av_freep(&rect->data[0]);
|
|
|
|
av_freep(&rect->data[1]);
|
|
|
|
av_freep(&rect->data[2]);
|
|
|
|
av_freep(&rect->data[3]);
|
|
|
|
av_freep(&rect->text);
|
|
|
|
av_freep(&rect->ass);
|
|
|
|
|
|
|
|
av_freep(&sub->rects[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
av_freep(&sub->rects);
|
|
|
|
|
|
|
|
memset(sub, 0, sizeof(*sub));
|
|
|
|
}
|
|
|
|
|
|
|
|
av_cold void ff_codec_close(AVCodecContext *avctx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!avctx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (avcodec_is_open(avctx)) {
|
|
|
|
AVCodecInternal *avci = avctx->internal;
|
|
|
|
|
|
|
|
if (CONFIG_FRAME_THREAD_ENCODER &&
|
|
|
|
avci->frame_thread_encoder && avctx->thread_count > 1) {
|
|
|
|
ff_frame_thread_encoder_free(avctx);
|
|
|
|
}
|
|
|
|
if (HAVE_THREADS && avci->thread_ctx)
|
|
|
|
ff_thread_free(avctx);
|
|
|
|
if (avci->needs_close && ffcodec(avctx->codec)->close)
|
|
|
|
ffcodec(avctx->codec)->close(avctx);
|
|
|
|
avci->byte_buffer_size = 0;
|
|
|
|
av_freep(&avci->byte_buffer);
|
|
|
|
av_frame_free(&avci->buffer_frame);
|
|
|
|
av_packet_free(&avci->buffer_pkt);
|
|
|
|
av_packet_free(&avci->last_pkt_props);
|
|
|
|
|
|
|
|
av_packet_free(&avci->in_pkt);
|
|
|
|
av_frame_free(&avci->in_frame);
|
|
|
|
av_frame_free(&avci->recon_frame);
|
|
|
|
|
|
|
|
ff_refstruct_unref(&avci->pool);
|
avcodec/decode: Add new ProgressFrame API
Frame-threaded decoders with inter-frame dependencies
use the ThreadFrame API for syncing. It works as follows:
During init each thread allocates an AVFrame for every
ThreadFrame.
Thread A reads the header of its packet and allocates
a buffer for an AVFrame with ff_thread_get_ext_buffer()
(which also allocates a small structure that is shared
with other references to this frame) and sets its fields,
including side data. Then said thread calls ff_thread_finish_setup().
From that moment onward it is not allowed to change any
of the AVFrame fields at all any more, but it may change
fields which are an indirection away, like the content of
AVFrame.data or already existing side data.
After thread A has called ff_thread_finish_setup(),
another thread (the user one) calls the codec's update_thread_context
callback which in turn calls ff_thread_ref_frame() which
calls av_frame_ref() which reads every field of A's
AVFrame; hence the above restriction on modifications
of the AVFrame (as any modification of the AVFrame by A after
ff_thread_finish_setup() would be a data race). Of course,
this av_frame_ref() also incurs allocations and therefore
needs to be checked. ff_thread_ref_frame() also references
the small structure used for communicating progress.
This av_frame_ref() makes it awkward to propagate values that
only become known during decoding to later threads (in case of
frame reordering or other mechanisms of delayed output (like
show-existing-frames) it's not the decoding thread, but a later
thread that returns the AVFrame). E.g. for VP9 when exporting video
encoding parameters as side data the number of blocks only
becomes known during decoding, so one can't allocate the side data
before ff_thread_finish_setup(). It is currently being done afterwards
and this leads to a data race in the vp9-encparams test when using
frame-threading. Returning decode_error_flags is also complicated
by this.
To perform this exchange a buffer shared between the references
is needed (notice that simply giving the later threads a pointer
to the original AVFrame does not work, because said AVFrame will
be reused lateron when thread A decodes the next packet given to it).
One could extend the buffer already used for progress for this
or use a new one (requiring yet another allocation), yet both
of these approaches have the drawback of being unnatural, ugly
and requiring quite a lot of ad-hoc code. E.g. in case of the VP9
side data mentioned above one could not simply use the helper
that allocates and adds the side data to an AVFrame in one go.
The ProgressFrame API meanwhile offers a different solution to all
of this. It is based around the idea that the most natural
shared object for sharing information about an AVFrame between
decoding threads is the AVFrame itself. To actually implement this
the AVFrame needs to be reference counted. This is achieved by
putting a (ownership) pointer into a shared (and opaque) structure
that is managed by the RefStruct API and which also contains
the stuff necessary for progress reporting.
The users get a pointer to this AVFrame with the understanding
that the owner may set all the fields until it has indicated
that it has finished decoding this AVFrame; then the users are
allowed to read everything. Every decoder may of course employ
a different contract than the one outlined above.
Given that there is no underlying av_frame_ref(), creating
references to a ProgressFrame can't fail. Only
ff_thread_progress_get_buffer() can fail, but given that
it will replace calls to ff_thread_get_ext_buffer() it is
at places where errors are already expected and properly
taken care of.
The ProgressFrames are empty (i.e. the AVFrame pointer is NULL
and the AVFrames are not allocated during init at all)
while not being in use; ff_thread_progress_get_buffer() both
sets up the actual ProgressFrame and already calls
ff_thread_get_buffer(). So instead of checking for
ThreadFrame.f->data[0] or ThreadFrame.f->buf[0] being NULL
for "this reference frame is non-existing" one should check for
ProgressFrame.f.
This also implies that one can only set AVFrame properties
after having allocated the buffer. This restriction is not deep:
if it becomes onerous for any codec, ff_thread_progress_get_buffer()
can be broken up. The user would then have to get a buffer
himself.
In order to avoid unnecessary allocations, the shared structure
is pooled, so that both the structure as well as the AVFrame
itself are reused. This means that there won't be lots of
unnecessary allocations in case of non-frame-threaded decoding.
It might even turn out to have fewer than the current code
(the current code allocates AVFrames for every DPB slot, but
these are often excessively large and not completely used;
the new code allocates them on demand). Pooling relies on the
reset function of the RefStruct pool API, it would be impossible
to implement with the AVBufferPool API.
Finally, ProgressFrames have no notion of owner; they are built
on top of the ThreadProgress API which also lacks such a concept.
Instead every ThreadProgress and every ProgressFrame contains
its own mutex and condition variable, making it completely independent
of pthread_frame.c. Just like the ThreadFrame API it is simply
presumed that only the actual owner/producer of a frame reports
progress on said frame.
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
2 years ago
|
|
|
ff_refstruct_pool_uninit(&avci->progress_frame_pool);
|
|
|
|
|
|
|
|
ff_hwaccel_uninit(avctx);
|
|
|
|
|
|
|
|
av_bsf_free(&avci->bsf);
|
|
|
|
|
|
|
|
#if FF_API_DROPCHANGED
|
|
|
|
av_channel_layout_uninit(&avci->initial_ch_layout);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if CONFIG_LCMS2
|
|
|
|
ff_icc_context_uninit(&avci->icc);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
av_freep(&avctx->internal);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < avctx->nb_coded_side_data; i++)
|
|
|
|
av_freep(&avctx->coded_side_data[i].data);
|
|
|
|
av_freep(&avctx->coded_side_data);
|
|
|
|
avctx->nb_coded_side_data = 0;
|
|
|
|
av_frame_side_data_free(&avctx->decoded_side_data,
|
|
|
|
&avctx->nb_decoded_side_data);
|
|
|
|
|
|
|
|
av_buffer_unref(&avctx->hw_frames_ctx);
|
|
|
|
av_buffer_unref(&avctx->hw_device_ctx);
|
|
|
|
|
|
|
|
if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
|
|
|
|
av_opt_free(avctx->priv_data);
|
|
|
|
av_opt_free(avctx);
|
|
|
|
av_freep(&avctx->priv_data);
|
|
|
|
if (av_codec_is_encoder(avctx->codec)) {
|
|
|
|
av_freep(&avctx->extradata);
|
|
|
|
avctx->extradata_size = 0;
|
|
|
|
} else if (av_codec_is_decoder(avctx->codec))
|
|
|
|
av_freep(&avctx->subtitle_header);
|
|
|
|
|
|
|
|
avctx->codec = NULL;
|
|
|
|
avctx->active_thread_type = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if FF_API_AVCODEC_CLOSE
|
|
|
|
int avcodec_close(AVCodecContext *avctx)
|
|
|
|
{
|
|
|
|
ff_codec_close(avctx);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static const char *unknown_if_null(const char *str)
|
|
|
|
{
|
|
|
|
return str ? str : "unknown";
|
|
|
|
}
|
|
|
|
|
|
|
|
void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
|
|
|
|
{
|
|
|
|
const char *codec_type;
|
|
|
|
const char *codec_name;
|
|
|
|
const char *profile = NULL;
|
|
|
|
AVBPrint bprint;
|
|
|
|
int64_t bitrate;
|
|
|
|
int new_line = 0;
|
|
|
|
AVRational display_aspect_ratio;
|
|
|
|
const char *separator = enc->dump_separator ? (const char *)enc->dump_separator : ", ";
|
|
|
|
const char *str;
|
|
|
|
|
|
|
|
if (!buf || buf_size <= 0)
|
|
|
|
return;
|
|
|
|
av_bprint_init_for_buffer(&bprint, buf, buf_size);
|
|
|
|
codec_type = av_get_media_type_string(enc->codec_type);
|
|
|
|
codec_name = avcodec_get_name(enc->codec_id);
|
|
|
|
profile = avcodec_profile_name(enc->codec_id, enc->profile);
|
|
|
|
|
|
|
|
av_bprintf(&bprint, "%s: %s", codec_type ? codec_type : "unknown",
|
|
|
|
codec_name);
|
|
|
|
buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */
|
|
|
|
|
|
|
|
if (enc->codec && strcmp(enc->codec->name, codec_name))
|
|
|
|
av_bprintf(&bprint, " (%s)", enc->codec->name);
|
|
|
|
|
|
|
|
if (profile)
|
|
|
|
av_bprintf(&bprint, " (%s)", profile);
|
|
|
|
if ( enc->codec_type == AVMEDIA_TYPE_VIDEO
|
|
|
|
&& av_log_get_level() >= AV_LOG_VERBOSE
|
|
|
|
&& enc->refs)
|
|
|
|
av_bprintf(&bprint, ", %d reference frame%s",
|
|
|
|
enc->refs, enc->refs > 1 ? "s" : "");
|
|
|
|
|
|
|
|
if (enc->codec_tag)
|
|
|
|
av_bprintf(&bprint, " (%s / 0x%04X)",
|
|
|
|
av_fourcc2str(enc->codec_tag), enc->codec_tag);
|
|
|
|
|
|
|
|
switch (enc->codec_type) {
|
|
|
|
case AVMEDIA_TYPE_VIDEO:
|
|
|
|
{
|
|
|
|
unsigned len;
|
|
|
|
|
|
|
|
av_bprintf(&bprint, "%s%s", separator,
|
|
|
|
enc->pix_fmt == AV_PIX_FMT_NONE ? "none" :
|
|
|
|
unknown_if_null(av_get_pix_fmt_name(enc->pix_fmt)));
|
|
|
|
|
|
|
|
av_bprint_chars(&bprint, '(', 1);
|
|
|
|
len = bprint.len;
|
|
|
|
|
|
|
|
/* The following check ensures that '(' has been written
|
|
|
|
* and therefore allows us to erase it if it turns out
|
|
|
|
* to be unnecessary. */
|
|
|
|
if (!av_bprint_is_complete(&bprint))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (enc->bits_per_raw_sample && enc->pix_fmt != AV_PIX_FMT_NONE &&
|
|
|
|
enc->bits_per_raw_sample < av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth)
|
|
|
|
av_bprintf(&bprint, "%d bpc, ", enc->bits_per_raw_sample);
|
|
|
|
if (enc->color_range != AVCOL_RANGE_UNSPECIFIED &&
|
|
|
|
(str = av_color_range_name(enc->color_range)))
|
|
|
|
av_bprintf(&bprint, "%s, ", str);
|
|
|
|
|
|
|
|
if (enc->colorspace != AVCOL_SPC_UNSPECIFIED ||
|
|
|
|
enc->color_primaries != AVCOL_PRI_UNSPECIFIED ||
|
|
|
|
enc->color_trc != AVCOL_TRC_UNSPECIFIED) {
|
|
|
|
const char *col = unknown_if_null(av_color_space_name(enc->colorspace));
|
|
|
|
const char *pri = unknown_if_null(av_color_primaries_name(enc->color_primaries));
|
|
|
|
const char *trc = unknown_if_null(av_color_transfer_name(enc->color_trc));
|
|
|
|
if (strcmp(col, pri) || strcmp(col, trc)) {
|
|
|
|
new_line = 1;
|
|
|
|
av_bprintf(&bprint, "%s/%s/%s, ", col, pri, trc);
|
|
|
|
} else
|
|
|
|
av_bprintf(&bprint, "%s, ", col);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (enc->field_order != AV_FIELD_UNKNOWN) {
|
|
|
|
const char *field_order = "progressive";
|
|
|
|
if (enc->field_order == AV_FIELD_TT)
|
|
|
|
field_order = "top first";
|
|
|
|
else if (enc->field_order == AV_FIELD_BB)
|
|
|
|
field_order = "bottom first";
|
|
|
|
else if (enc->field_order == AV_FIELD_TB)
|
|
|
|
field_order = "top coded first (swapped)";
|
|
|
|
else if (enc->field_order == AV_FIELD_BT)
|
|
|
|
field_order = "bottom coded first (swapped)";
|
|
|
|
|
|
|
|
av_bprintf(&bprint, "%s, ", field_order);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (av_log_get_level() >= AV_LOG_VERBOSE &&
|
|
|
|
enc->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED &&
|
|
|
|
(str = av_chroma_location_name(enc->chroma_sample_location)))
|
|
|
|
av_bprintf(&bprint, "%s, ", str);
|
|
|
|
|
|
|
|
if (len == bprint.len) {
|
|
|
|
bprint.str[len - 1] = '\0';
|
|
|
|
bprint.len--;
|
|
|
|
} else {
|
|
|
|
if (bprint.len - 2 < bprint.size) {
|
|
|
|
/* Erase the last ", " */
|
|
|
|
bprint.len -= 2;
|
|
|
|
bprint.str[bprint.len] = '\0';
|
|
|
|
}
|
|
|
|
av_bprint_chars(&bprint, ')', 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (enc->width) {
|
|
|
|
av_bprintf(&bprint, "%s%dx%d", new_line ? separator : ", ",
|
|
|
|
enc->width, enc->height);
|
|
|
|
|
|
|
|
if (av_log_get_level() >= AV_LOG_VERBOSE &&
|
|
|
|
enc->coded_width && enc->coded_height &&
|
|
|
|
(enc->width != enc->coded_width ||
|
|
|
|
enc->height != enc->coded_height))
|
|
|
|
av_bprintf(&bprint, " (%dx%d)",
|
|
|
|
enc->coded_width, enc->coded_height);
|
|
|
|
|
|
|
|
if (enc->sample_aspect_ratio.num) {
|
|
|
|
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
|
|
|
|
enc->width * (int64_t)enc->sample_aspect_ratio.num,
|
|
|
|
enc->height * (int64_t)enc->sample_aspect_ratio.den,
|
|
|
|
1024 * 1024);
|
|
|
|
av_bprintf(&bprint, " [SAR %d:%d DAR %d:%d]",
|
|
|
|
enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den,
|
|
|
|
display_aspect_ratio.num, display_aspect_ratio.den);
|
|
|
|
}
|
|
|
|
if (av_log_get_level() >= AV_LOG_DEBUG) {
|
|
|
|
int g = av_gcd(enc->time_base.num, enc->time_base.den);
|
|
|
|
av_bprintf(&bprint, ", %d/%d",
|
|
|
|
enc->time_base.num / g, enc->time_base.den / g);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (encode) {
|
|
|
|
av_bprintf(&bprint, ", q=%d-%d", enc->qmin, enc->qmax);
|
|
|
|
} else {
|
|
|
|
if (enc->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS)
|
|
|
|
av_bprintf(&bprint, ", Closed Captions");
|
|
|
|
if (enc->properties & FF_CODEC_PROPERTY_FILM_GRAIN)
|
|
|
|
av_bprintf(&bprint, ", Film Grain");
|
|
|
|
if (enc->properties & FF_CODEC_PROPERTY_LOSSLESS)
|
|
|
|
av_bprintf(&bprint, ", lossless");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AVMEDIA_TYPE_AUDIO:
|
|
|
|
av_bprintf(&bprint, "%s", separator);
|
|
|
|
|
|
|
|
if (enc->sample_rate) {
|
|
|
|
av_bprintf(&bprint, "%d Hz, ", enc->sample_rate);
|
|
|
|
}
|
|
|
|
av_channel_layout_describe_bprint(&enc->ch_layout, &bprint);
|
|
|
|
if (enc->sample_fmt != AV_SAMPLE_FMT_NONE &&
|
|
|
|
(str = av_get_sample_fmt_name(enc->sample_fmt))) {
|
|
|
|
av_bprintf(&bprint, ", %s", str);
|
|
|
|
}
|
|
|
|
if ( enc->bits_per_raw_sample > 0
|
|
|
|
&& enc->bits_per_raw_sample != av_get_bytes_per_sample(enc->sample_fmt) * 8)
|
|
|
|
av_bprintf(&bprint, " (%d bit)", enc->bits_per_raw_sample);
|
|
|
|
if (av_log_get_level() >= AV_LOG_VERBOSE) {
|
|
|
|
if (enc->initial_padding)
|
|
|
|
av_bprintf(&bprint, ", delay %d", enc->initial_padding);
|
|
|
|
if (enc->trailing_padding)
|
|
|
|
av_bprintf(&bprint, ", padding %d", enc->trailing_padding);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AVMEDIA_TYPE_DATA:
|
|
|
|
if (av_log_get_level() >= AV_LOG_DEBUG) {
|
|
|
|
int g = av_gcd(enc->time_base.num, enc->time_base.den);
|
|
|
|
if (g)
|
|
|
|
av_bprintf(&bprint, ", %d/%d",
|
|
|
|
enc->time_base.num / g, enc->time_base.den / g);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AVMEDIA_TYPE_SUBTITLE:
|
|
|
|
if (enc->width)
|
|
|
|
av_bprintf(&bprint, ", %dx%d", enc->width, enc->height);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (encode) {
|
|
|
|
if (enc->flags & AV_CODEC_FLAG_PASS1)
|
|
|
|
av_bprintf(&bprint, ", pass 1");
|
|
|
|
if (enc->flags & AV_CODEC_FLAG_PASS2)
|
|
|
|
av_bprintf(&bprint, ", pass 2");
|
|
|
|
}
|
|
|
|
bitrate = get_bit_rate(enc);
|
|
|
|
if (bitrate != 0) {
|
|
|
|
av_bprintf(&bprint, ", %"PRId64" kb/s", bitrate / 1000);
|
|
|
|
} else if (enc->rc_max_rate > 0) {
|
|
|
|
av_bprintf(&bprint, ", max. %"PRId64" kb/s", enc->rc_max_rate / 1000);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int avcodec_is_open(AVCodecContext *s)
|
|
|
|
{
|
|
|
|
return !!s->internal;
|
|
|
|
}
|
|
|
|
|
|
|
|
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
|
|
|
|
{
|
|
|
|
av_frame_unref(frame);
|
|
|
|
|
|
|
|
if (av_codec_is_decoder(avctx->codec))
|
|
|
|
return ff_decode_receive_frame(avctx, frame);
|
|
|
|
return ff_encode_receive_frame(avctx, frame);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define WRAP_CONFIG(allowed_type, field, terminator) \
|
|
|
|
do { \
|
|
|
|
static const __typeof__(*(field)) end = terminator; \
|
|
|
|
if (codec->type != (allowed_type)) \
|
|
|
|
return AVERROR(EINVAL); \
|
|
|
|
*out_configs = (field); \
|
|
|
|
if (out_num_configs) { \
|
|
|
|
for (int i = 0;; i++) { \
|
|
|
|
if (!(field) || !memcmp(&(field)[i], &end, sizeof(end))) { \
|
|
|
|
*out_num_configs = i; \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
return 0; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
static const enum AVColorRange color_range_jpeg[] = {
|
|
|
|
AVCOL_RANGE_JPEG, AVCOL_RANGE_UNSPECIFIED
|
|
|
|
};
|
|
|
|
|
|
|
|
static const enum AVColorRange color_range_mpeg[] = {
|
|
|
|
AVCOL_RANGE_MPEG, AVCOL_RANGE_UNSPECIFIED
|
|
|
|
};
|
|
|
|
|
|
|
|
static const enum AVColorRange color_range_all[] = {
|
|
|
|
AVCOL_RANGE_MPEG, AVCOL_RANGE_JPEG, AVCOL_RANGE_UNSPECIFIED
|
|
|
|
};
|
|
|
|
|
|
|
|
static const enum AVColorRange *color_range_table[] = {
|
|
|
|
[AVCOL_RANGE_MPEG] = color_range_mpeg,
|
|
|
|
[AVCOL_RANGE_JPEG] = color_range_jpeg,
|
|
|
|
[AVCOL_RANGE_MPEG | AVCOL_RANGE_JPEG] = color_range_all,
|
|
|
|
};
|
|
|
|
|
|
|
|
int ff_default_get_supported_config(const AVCodecContext *avctx,
|
|
|
|
const AVCodec *codec,
|
|
|
|
enum AVCodecConfig config,
|
|
|
|
unsigned flags,
|
|
|
|
const void **out_configs,
|
|
|
|
int *out_num_configs)
|
|
|
|
{
|
|
|
|
switch (config) {
|
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
|
|
|
case AV_CODEC_CONFIG_PIX_FORMAT:
|
|
|
|
WRAP_CONFIG(AVMEDIA_TYPE_VIDEO, codec->pix_fmts, AV_PIX_FMT_NONE);
|
|
|
|
case AV_CODEC_CONFIG_FRAME_RATE:
|
|
|
|
WRAP_CONFIG(AVMEDIA_TYPE_VIDEO, codec->supported_framerates, (AVRational){0});
|
|
|
|
case AV_CODEC_CONFIG_SAMPLE_RATE:
|
|
|
|
WRAP_CONFIG(AVMEDIA_TYPE_AUDIO, codec->supported_samplerates, 0);
|
|
|
|
case AV_CODEC_CONFIG_SAMPLE_FORMAT:
|
|
|
|
WRAP_CONFIG(AVMEDIA_TYPE_AUDIO, codec->sample_fmts, AV_SAMPLE_FMT_NONE);
|
|
|
|
case AV_CODEC_CONFIG_CHANNEL_LAYOUT:
|
|
|
|
WRAP_CONFIG(AVMEDIA_TYPE_AUDIO, codec->ch_layouts, (AVChannelLayout){0});
|
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
|
|
|
|
|
|
|
case AV_CODEC_CONFIG_COLOR_RANGE:
|
|
|
|
if (codec->type != AVMEDIA_TYPE_VIDEO)
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
*out_configs = color_range_table[ffcodec(codec)->color_ranges];
|
|
|
|
if (out_num_configs)
|
|
|
|
*out_num_configs = av_popcount(ffcodec(codec)->color_ranges);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case AV_CODEC_CONFIG_COLOR_SPACE:
|
|
|
|
*out_configs = NULL;
|
|
|
|
if (out_num_configs)
|
|
|
|
*out_num_configs = 0;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int avcodec_get_supported_config(const AVCodecContext *avctx, const AVCodec *codec,
|
|
|
|
enum AVCodecConfig config, unsigned flags,
|
|
|
|
const void **out, int *out_num)
|
|
|
|
{
|
|
|
|
const FFCodec *codec2;
|
|
|
|
int dummy_num = 0;
|
|
|
|
if (!codec)
|
|
|
|
codec = avctx->codec;
|
|
|
|
if (!out_num)
|
|
|
|
out_num = &dummy_num;
|
|
|
|
|
|
|
|
codec2 = ffcodec(codec);
|
|
|
|
if (codec2->get_supported_config) {
|
|
|
|
return codec2->get_supported_config(avctx, codec, config, flags, out, out_num);
|
|
|
|
} else {
|
|
|
|
return ff_default_get_supported_config(avctx, codec, config, flags, out, out_num);
|
|
|
|
}
|
|
|
|
}
|