Add avcodec_decode_audio4().

Deprecate avcodec_decode_audio3().
Implement audio support in avcodec_default_get_buffer().
Implement the new audio decoder API in all audio decoders.
pull/2/head
Justin Ruggles 13 years ago
parent 560f773c7d
commit 0eea212943
  1. 7
      doc/APIchanges
  2. 38
      libavcodec/8svx.c
  3. 1
      libavcodec/aac.h
  4. 49
      libavcodec/aacdec.c
  5. 32
      libavcodec/ac3dec.c
  6. 1
      libavcodec/ac3dec.h
  7. 42
      libavcodec/adpcm.c
  8. 1
      libavcodec/adx.h
  9. 41
      libavcodec/adxdec.c
  10. 45
      libavcodec/alac.c
  11. 43
      libavcodec/alsdec.c
  12. 25
      libavcodec/amrnbdec.c
  13. 27
      libavcodec/amrwbdec.c
  14. 37
      libavcodec/apedec.c
  15. 26
      libavcodec/atrac1.c
  16. 34
      libavcodec/atrac3.c
  17. 145
      libavcodec/avcodec.h
  18. 34
      libavcodec/binkaudio.c
  19. 43
      libavcodec/cook.c
  20. 32
      libavcodec/dca.c
  21. 28
      libavcodec/dpcm.c
  22. 30
      libavcodec/dsicinav.c
  23. 37
      libavcodec/flacdec.c
  24. 2
      libavcodec/g722.h
  25. 25
      libavcodec/g722dec.c
  26. 29
      libavcodec/g726.c
  27. 32
      libavcodec/gsmdec.c
  28. 2
      libavcodec/gsmdec_data.h
  29. 29
      libavcodec/imc.c
  30. 9
      libavcodec/internal.h
  31. 58
      libavcodec/libgsm.c
  32. 52
      libavcodec/libopencore-amr.c
  33. 36
      libavcodec/libspeexdec.c
  34. 33
      libavcodec/mace.c
  35. 39
      libavcodec/mlpdec.c
  36. 1
      libavcodec/mpc.h
  37. 29
      libavcodec/mpc7.c
  38. 27
      libavcodec/mpc8.c
  39. 86
      libavcodec/mpegaudiodec.c
  40. 17
      libavcodec/mpegaudiodec_float.c
  41. 37
      libavcodec/nellymoserdec.c
  42. 42
      libavcodec/pcm.c
  43. 26
      libavcodec/qcelpdec.c
  44. 32
      libavcodec/qdm2.c
  45. 1
      libavcodec/ra144.h
  46. 31
      libavcodec/ra144dec.c
  47. 27
      libavcodec/ra288.c
  48. 46
      libavcodec/s302m.c
  49. 45
      libavcodec/shorten.c
  50. 34
      libavcodec/sipr.c
  51. 41
      libavcodec/smacker.c
  52. 29
      libavcodec/truespeech.c
  53. 28
      libavcodec/tta.c
  54. 36
      libavcodec/twinvq.c
  55. 230
      libavcodec/utils.c
  56. 5
      libavcodec/version.h
  57. 35
      libavcodec/vmdav.c
  58. 33
      libavcodec/vorbisdec.c
  59. 116
      libavcodec/wavpack.c
  60. 1
      libavcodec/wma.h
  61. 30
      libavcodec/wmadec.c
  62. 63
      libavcodec/wmaprodec.c
  63. 46
      libavcodec/wmavoice.c
  64. 38
      libavcodec/ws-snd1.c

@ -13,6 +13,13 @@ libavutil: 2011-04-18
API changes, most recent first: API changes, most recent first:
2011-xx-xx - xxxxxxx - lavc 53.25.0
Add nb_samples and extended_data fields to AVFrame.
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
avcodec_decode_audio4() writes output samples to an AVFrame, which allows
audio decoders to use get_buffer().
2011-xx-xx - xxxxxxx - lavc 53.24.0 2011-xx-xx - xxxxxxx - lavc 53.24.0
Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump. Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
Change AVPicture.data[4]/linesize[4] to [8] at next major bump. Change AVPicture.data[4]/linesize[4] to [8] at next major bump.

@ -32,6 +32,7 @@
/** decoder context */ /** decoder context */
typedef struct EightSvxContext { typedef struct EightSvxContext {
AVFrame frame;
uint8_t fib_acc[2]; uint8_t fib_acc[2];
const int8_t *table; const int8_t *table;
@ -83,13 +84,13 @@ static void raw_decode(uint8_t *dst, const int8_t *src, int src_size,
} }
/** decode a frame */ /** decode a frame */
static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
EightSvxContext *esc = avctx->priv_data; EightSvxContext *esc = avctx->priv_data;
int buf_size; int buf_size;
uint8_t *out_data = data; uint8_t *out_data;
int out_data_size; int ret;
int is_compr = (avctx->codec_id != CODEC_ID_PCM_S8_PLANAR); int is_compr = (avctx->codec_id != CODEC_ID_PCM_S8_PLANAR);
/* for the first packet, copy data to buffer */ /* for the first packet, copy data to buffer */
@ -134,15 +135,18 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_si
/* decode next piece of data from the buffer */ /* decode next piece of data from the buffer */
buf_size = FFMIN(MAX_FRAME_SIZE, esc->data_size - esc->data_idx); buf_size = FFMIN(MAX_FRAME_SIZE, esc->data_size - esc->data_idx);
if (buf_size <= 0) { if (buf_size <= 0) {
*data_size = 0; *got_frame_ptr = 0;
return avpkt->size; return avpkt->size;
} }
out_data_size = buf_size * (is_compr + 1) * avctx->channels;
if (*data_size < out_data_size) { /* get output buffer */
av_log(avctx, AV_LOG_ERROR, "Provided buffer with size %d is too small.\n", esc->frame.nb_samples = buf_size * (is_compr + 1);
*data_size); if ((ret = avctx->get_buffer(avctx, &esc->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
out_data = esc->frame.data[0];
if (is_compr) { if (is_compr) {
delta_decode(out_data, &esc->data[0][esc->data_idx], buf_size, delta_decode(out_data, &esc->data[0][esc->data_idx], buf_size,
&esc->fib_acc[0], esc->table, avctx->channels); &esc->fib_acc[0], esc->table, avctx->channels);
@ -158,7 +162,9 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_si
} }
} }
esc->data_idx += buf_size; esc->data_idx += buf_size;
*data_size = out_data_size;
*got_frame_ptr = 1;
*(AVFrame *)data = esc->frame;
return avpkt->size; return avpkt->size;
} }
@ -186,6 +192,10 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
return -1; return -1;
} }
avctx->sample_fmt = AV_SAMPLE_FMT_U8; avctx->sample_fmt = AV_SAMPLE_FMT_U8;
avcodec_get_frame_defaults(&esc->frame);
avctx->coded_frame = &esc->frame;
return 0; return 0;
} }
@ -207,7 +217,7 @@ AVCodec ff_eightsvx_fib_decoder = {
.init = eightsvx_decode_init, .init = eightsvx_decode_init,
.close = eightsvx_decode_close, .close = eightsvx_decode_close,
.decode = eightsvx_decode_frame, .decode = eightsvx_decode_frame,
.capabilities = CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"), .long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
}; };
@ -219,7 +229,7 @@ AVCodec ff_eightsvx_exp_decoder = {
.init = eightsvx_decode_init, .init = eightsvx_decode_init,
.close = eightsvx_decode_close, .close = eightsvx_decode_close,
.decode = eightsvx_decode_frame, .decode = eightsvx_decode_frame,
.capabilities = CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"), .long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
}; };
@ -231,6 +241,6 @@ AVCodec ff_pcm_s8_planar_decoder = {
.init = eightsvx_decode_init, .init = eightsvx_decode_init,
.close = eightsvx_decode_close, .close = eightsvx_decode_close,
.decode = eightsvx_decode_frame, .decode = eightsvx_decode_frame,
.capabilities = CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"), .long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"),
}; };

@ -251,6 +251,7 @@ typedef struct {
*/ */
typedef struct { typedef struct {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
MPEG4AudioConfig m4ac; MPEG4AudioConfig m4ac;

@ -646,6 +646,9 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
cbrt_tableinit(); cbrt_tableinit();
avcodec_get_frame_defaults(&ac->frame);
avctx->coded_frame = &ac->frame;
return 0; return 0;
} }
@ -2113,12 +2116,12 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
} }
static int aac_decode_frame_int(AVCodecContext *avctx, void *data, static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
int *data_size, GetBitContext *gb) int *got_frame_ptr, GetBitContext *gb)
{ {
AACContext *ac = avctx->priv_data; AACContext *ac = avctx->priv_data;
ChannelElement *che = NULL, *che_prev = NULL; ChannelElement *che = NULL, *che_prev = NULL;
enum RawDataBlockType elem_type, elem_type_prev = TYPE_END; enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
int err, elem_id, data_size_tmp; int err, elem_id;
int samples = 0, multiplier, audio_found = 0; int samples = 0, multiplier, audio_found = 0;
if (show_bits(gb, 12) == 0xfff) { if (show_bits(gb, 12) == 0xfff) {
@ -2222,24 +2225,26 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
avctx->frame_size = samples; avctx->frame_size = samples;
} }
data_size_tmp = samples * avctx->channels *
av_get_bytes_per_sample(avctx->sample_fmt);
if (*data_size < data_size_tmp) {
av_log(avctx, AV_LOG_ERROR,
"Output buffer too small (%d) or trying to output too many samples (%d) for this frame.\n",
*data_size, data_size_tmp);
return -1;
}
*data_size = data_size_tmp;
if (samples) { if (samples) {
/* get output buffer */
ac->frame.nb_samples = samples;
if ((err = avctx->get_buffer(avctx, &ac->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return err;
}
if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT) if (avctx->sample_fmt == AV_SAMPLE_FMT_FLT)
ac->fmt_conv.float_interleave(data, (const float **)ac->output_data, ac->fmt_conv.float_interleave((float *)ac->frame.data[0],
(const float **)ac->output_data,
samples, avctx->channels); samples, avctx->channels);
else else
ac->fmt_conv.float_to_int16_interleave(data, (const float **)ac->output_data, ac->fmt_conv.float_to_int16_interleave((int16_t *)ac->frame.data[0],
(const float **)ac->output_data,
samples, avctx->channels); samples, avctx->channels);
*(AVFrame *)data = ac->frame;
} }
*got_frame_ptr = !!samples;
if (ac->output_configured && audio_found) if (ac->output_configured && audio_found)
ac->output_configured = OC_LOCKED; ac->output_configured = OC_LOCKED;
@ -2248,7 +2253,7 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
} }
static int aac_decode_frame(AVCodecContext *avctx, void *data, static int aac_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
@ -2259,7 +2264,7 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
if ((err = aac_decode_frame_int(avctx, data, data_size, &gb)) < 0) if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb)) < 0)
return err; return err;
buf_consumed = (get_bits_count(&gb) + 7) >> 3; buf_consumed = (get_bits_count(&gb) + 7) >> 3;
@ -2481,8 +2486,8 @@ static int read_audio_mux_element(struct LATMContext *latmctx,
} }
static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size, static int latm_decode_frame(AVCodecContext *avctx, void *out,
AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
struct LATMContext *latmctx = avctx->priv_data; struct LATMContext *latmctx = avctx->priv_data;
int muxlength, err; int muxlength, err;
@ -2504,7 +2509,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
if (!latmctx->initialized) { if (!latmctx->initialized) {
if (!avctx->extradata) { if (!avctx->extradata) {
*out_size = 0; *got_frame_ptr = 0;
return avpkt->size; return avpkt->size;
} else { } else {
if ((err = decode_audio_specific_config( if ((err = decode_audio_specific_config(
@ -2522,7 +2527,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if ((err = aac_decode_frame_int(avctx, out, out_size, &gb)) < 0) if ((err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb)) < 0)
return err; return err;
return muxlength; return muxlength;
@ -2552,7 +2557,7 @@ AVCodec ff_aac_decoder = {
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
}, },
.capabilities = CODEC_CAP_CHANNEL_CONF, .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout, .channel_layouts = aac_channel_layout,
}; };
@ -2573,6 +2578,6 @@ AVCodec ff_aac_latm_decoder = {
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
}, },
.capabilities = CODEC_CAP_CHANNEL_CONF, .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout, .channel_layouts = aac_channel_layout,
}; };

@ -208,6 +208,9 @@ static av_cold int ac3_decode_init(AVCodecContext *avctx)
} }
s->downmixed = 1; s->downmixed = 1;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -1296,15 +1299,15 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
/** /**
* Decode a single AC-3 frame. * Decode a single AC-3 frame.
*/ */
static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size, static int ac3_decode_frame(AVCodecContext * avctx, void *data,
AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AC3DecodeContext *s = avctx->priv_data; AC3DecodeContext *s = avctx->priv_data;
float *out_samples_flt = data; float *out_samples_flt;
int16_t *out_samples_s16 = data; int16_t *out_samples_s16;
int blk, ch, err; int blk, ch, err, ret;
const uint8_t *channel_map; const uint8_t *channel_map;
const float *output[AC3_MAX_CHANNELS]; const float *output[AC3_MAX_CHANNELS];
@ -1321,7 +1324,6 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
init_get_bits(&s->gbc, buf, buf_size * 8); init_get_bits(&s->gbc, buf, buf_size * 8);
/* parse the syncinfo */ /* parse the syncinfo */
*data_size = 0;
err = parse_frame_header(s); err = parse_frame_header(s);
if (err) { if (err) {
@ -1343,6 +1345,7 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
/* TODO: add support for substreams and dependent frames */ /* TODO: add support for substreams and dependent frames */
if(s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) { if(s->frame_type == EAC3_FRAME_TYPE_DEPENDENT || s->substreamid) {
av_log(avctx, AV_LOG_ERROR, "unsupported frame type : skipping frame\n"); av_log(avctx, AV_LOG_ERROR, "unsupported frame type : skipping frame\n");
*got_frame_ptr = 0;
return s->frame_size; return s->frame_size;
} else { } else {
av_log(avctx, AV_LOG_ERROR, "invalid frame type\n"); av_log(avctx, AV_LOG_ERROR, "invalid frame type\n");
@ -1400,6 +1403,15 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
if (s->bitstream_mode == 0x7 && s->channels > 1) if (s->bitstream_mode == 0x7 && s->channels > 1)
avctx->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE; avctx->audio_service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
/* get output buffer */
s->frame.nb_samples = s->num_blocks * 256;
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
out_samples_flt = (float *)s->frame.data[0];
out_samples_s16 = (int16_t *)s->frame.data[0];
/* decode the audio blocks */ /* decode the audio blocks */
channel_map = ff_ac3_dec_channel_map[s->output_mode & ~AC3_OUTPUT_LFEON][s->lfe_on]; channel_map = ff_ac3_dec_channel_map[s->output_mode & ~AC3_OUTPUT_LFEON][s->lfe_on];
for (ch = 0; ch < s->out_channels; ch++) for (ch = 0; ch < s->out_channels; ch++)
@ -1419,8 +1431,10 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size,
out_samples_s16 += 256 * s->out_channels; out_samples_s16 += 256 * s->out_channels;
} }
} }
*data_size = s->num_blocks * 256 * avctx->channels *
av_get_bytes_per_sample(avctx->sample_fmt); *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return FFMIN(buf_size, s->frame_size); return FFMIN(buf_size, s->frame_size);
} }
@ -1458,6 +1472,7 @@ AVCodec ff_ac3_decoder = {
.init = ac3_decode_init, .init = ac3_decode_init,
.close = ac3_decode_end, .close = ac3_decode_end,
.decode = ac3_decode_frame, .decode = ac3_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
@ -1480,6 +1495,7 @@ AVCodec ff_eac3_decoder = {
.init = ac3_decode_init, .init = ac3_decode_init,
.close = ac3_decode_end, .close = ac3_decode_end,
.decode = ac3_decode_frame, .decode = ac3_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE

@ -68,6 +68,7 @@
typedef struct { typedef struct {
AVClass *class; ///< class for AVOptions AVClass *class; ///< class for AVOptions
AVCodecContext *avctx; ///< parent context AVCodecContext *avctx; ///< parent context
AVFrame frame; ///< AVFrame for decoded output
GetBitContext gbc; ///< bitstream reader GetBitContext gbc; ///< bitstream reader
///@name Bit stream information ///@name Bit stream information

@ -84,6 +84,7 @@ static const int swf_index_tables[4][16] = {
/* end of tables */ /* end of tables */
typedef struct ADPCMDecodeContext { typedef struct ADPCMDecodeContext {
AVFrame frame;
ADPCMChannelStatus status[6]; ADPCMChannelStatus status[6];
} ADPCMDecodeContext; } ADPCMDecodeContext;
@ -124,6 +125,10 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
break; break;
} }
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&c->frame);
avctx->coded_frame = &c->frame;
return 0; return 0;
} }
@ -501,9 +506,8 @@ static int get_nb_samples(AVCodecContext *avctx, const uint8_t *buf,
decode_top_nibble_next = 1; \ decode_top_nibble_next = 1; \
} }
static int adpcm_decode_frame(AVCodecContext *avctx, static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
@ -514,7 +518,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
const uint8_t *src; const uint8_t *src;
int st; /* stereo */ int st; /* stereo */
int count1, count2; int count1, count2;
int nb_samples, coded_samples, out_bps, out_size; int nb_samples, coded_samples, ret;
nb_samples = get_nb_samples(avctx, buf, buf_size, &coded_samples); nb_samples = get_nb_samples(avctx, buf, buf_size, &coded_samples);
if (nb_samples <= 0) { if (nb_samples <= 0) {
@ -522,22 +526,22 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
out_bps = av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
out_size = nb_samples * avctx->channels * out_bps; c->frame.nb_samples = nb_samples;
if (*data_size < out_size) { if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return ret;
} }
samples = (short *)c->frame.data[0];
/* use coded_samples when applicable */ /* use coded_samples when applicable */
/* it is always <= nb_samples, so the output buffer will be large enough */ /* it is always <= nb_samples, so the output buffer will be large enough */
if (coded_samples) { if (coded_samples) {
if (coded_samples != nb_samples) if (coded_samples != nb_samples)
av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n"); av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
nb_samples = coded_samples; c->frame.nb_samples = nb_samples = coded_samples;
out_size = nb_samples * avctx->channels * out_bps;
} }
samples = data;
src = buf; src = buf;
st = avctx->channels == 2 ? 1 : 0; st = avctx->channels == 2 ? 1 : 0;
@ -576,7 +580,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
cs->step_index = 88; cs->step_index = 88;
} }
samples = (short*)data + channel; samples = (short *)c->frame.data[0] + channel;
for (m = 0; m < 32; m++) { for (m = 0; m < 32; m++) {
*samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3); *samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3);
@ -628,7 +632,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
} }
for (i = 0; i < avctx->channels; i++) { for (i = 0; i < avctx->channels; i++) {
samples = (short*)data + i; samples = (short *)c->frame.data[0] + i;
cs = &c->status[i]; cs = &c->status[i];
for (n = nb_samples >> 1; n > 0; n--, src++) { for (n = nb_samples >> 1; n > 0; n--, src++) {
uint8_t v = *src; uint8_t v = *src;
@ -965,7 +969,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
} }
} }
out_size = count * 28 * avctx->channels * out_bps; c->frame.nb_samples = count * 28;
src = src_end; src = src_end;
break; break;
} }
@ -1144,7 +1148,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
prev[0][i] = (int16_t)bytestream_get_be16(&src); prev[0][i] = (int16_t)bytestream_get_be16(&src);
for (ch = 0; ch <= st; ch++) { for (ch = 0; ch <= st; ch++) {
samples = (unsigned short *) data + ch; samples = (short *)c->frame.data[0] + ch;
/* Read in every sample for this channel. */ /* Read in every sample for this channel. */
for (i = 0; i < nb_samples / 14; i++) { for (i = 0; i < nb_samples / 14; i++) {
@ -1177,7 +1181,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx,
default: default:
return -1; return -1;
} }
*data_size = out_size;
*got_frame_ptr = 1;
*(AVFrame *)data = c->frame;
return src - buf; return src - buf;
} }
@ -1190,6 +1197,7 @@ AVCodec ff_ ## name_ ## _decoder = { \
.priv_data_size = sizeof(ADPCMDecodeContext), \ .priv_data_size = sizeof(ADPCMDecodeContext), \
.init = adpcm_decode_init, \ .init = adpcm_decode_init, \
.decode = adpcm_decode_frame, \ .decode = adpcm_decode_frame, \
.capabilities = CODEC_CAP_DR1, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
} }

@ -40,6 +40,7 @@ typedef struct {
} ADXChannelState; } ADXChannelState;
typedef struct { typedef struct {
AVFrame frame;
int channels; int channels;
ADXChannelState prev[2]; ADXChannelState prev[2];
int header_parsed; int header_parsed;

@ -50,6 +50,10 @@ static av_cold int adx_decode_init(AVCodecContext *avctx)
c->channels = avctx->channels; c->channels = avctx->channels;
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&c->frame);
avctx->coded_frame = &c->frame;
return 0; return 0;
} }
@ -89,36 +93,42 @@ static int adx_decode(ADXContext *c, int16_t *out, const uint8_t *in, int ch)
return 0; return 0;
} }
static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, static int adx_decode_frame(AVCodecContext *avctx, void *data,
AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
int buf_size = avpkt->size; int buf_size = avpkt->size;
ADXContext *c = avctx->priv_data; ADXContext *c = avctx->priv_data;
int16_t *samples = data; int16_t *samples;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int num_blocks, ch; int num_blocks, ch, ret;
if (c->eof) { if (c->eof) {
*data_size = 0; *got_frame_ptr = 0;
return buf_size; return buf_size;
} }
/* 18 bytes of data are expanded into 32*2 bytes of audio, /* calculate number of blocks in the packet */
so guard against buffer overflows */
num_blocks = buf_size / (BLOCK_SIZE * c->channels); num_blocks = buf_size / (BLOCK_SIZE * c->channels);
if (num_blocks > *data_size / (BLOCK_SAMPLES * c->channels)) {
buf_size = (*data_size / (BLOCK_SAMPLES * c->channels)) * BLOCK_SIZE; /* if the packet is not an even multiple of BLOCK_SIZE, check for an EOF
num_blocks = buf_size / (BLOCK_SIZE * c->channels); packet */
} if (!num_blocks || buf_size % (BLOCK_SIZE * avctx->channels)) {
if (!buf_size || buf_size % (BLOCK_SIZE * avctx->channels)) {
if (buf_size >= 4 && (AV_RB16(buf) & 0x8000)) { if (buf_size >= 4 && (AV_RB16(buf) & 0x8000)) {
c->eof = 1; c->eof = 1;
*data_size = 0; *got_frame_ptr = 0;
return avpkt->size; return avpkt->size;
} }
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
/* get output buffer */
c->frame.nb_samples = num_blocks * BLOCK_SAMPLES;
if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
samples = (int16_t *)c->frame.data[0];
while (num_blocks--) { while (num_blocks--) {
for (ch = 0; ch < c->channels; ch++) { for (ch = 0; ch < c->channels; ch++) {
if (adx_decode(c, samples + ch, buf, ch)) { if (adx_decode(c, samples + ch, buf, ch)) {
@ -132,7 +142,9 @@ static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
samples += BLOCK_SAMPLES * c->channels; samples += BLOCK_SAMPLES * c->channels;
} }
*data_size = (uint8_t*)samples - (uint8_t*)data; *got_frame_ptr = 1;
*(AVFrame *)data = c->frame;
return buf - avpkt->data; return buf - avpkt->data;
} }
@ -143,5 +155,6 @@ AVCodec ff_adpcm_adx_decoder = {
.priv_data_size = sizeof(ADXContext), .priv_data_size = sizeof(ADXContext),
.init = adx_decode_init, .init = adx_decode_init,
.decode = adx_decode_frame, .decode = adx_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"), .long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"),
}; };

@ -62,10 +62,10 @@
typedef struct { typedef struct {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
GetBitContext gb; GetBitContext gb;
int numchannels; int numchannels;
int bytespersample;
/* buffers */ /* buffers */
int32_t *predicterror_buffer[MAX_CHANNELS]; int32_t *predicterror_buffer[MAX_CHANNELS];
@ -351,9 +351,8 @@ static void interleave_stereo_24(int32_t *buffer[MAX_CHANNELS],
} }
} }
static int alac_decode_frame(AVCodecContext *avctx, static int alac_decode_frame(AVCodecContext *avctx, void *data,
void *outbuffer, int *outputsize, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *inbuffer = avpkt->data; const uint8_t *inbuffer = avpkt->data;
int input_buffer_size = avpkt->size; int input_buffer_size = avpkt->size;
@ -366,7 +365,7 @@ static int alac_decode_frame(AVCodecContext *avctx,
int isnotcompressed; int isnotcompressed;
uint8_t interlacing_shift; uint8_t interlacing_shift;
uint8_t interlacing_leftweight; uint8_t interlacing_leftweight;
int i, ch; int i, ch, ret;
init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8); init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8);
@ -401,14 +400,17 @@ static int alac_decode_frame(AVCodecContext *avctx,
} else } else
outputsamples = alac->setinfo_max_samples_per_frame; outputsamples = alac->setinfo_max_samples_per_frame;
alac->bytespersample = channels * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (outputsamples > INT32_MAX) {
if(outputsamples > *outputsize / alac->bytespersample){ av_log(avctx, AV_LOG_ERROR, "unsupported block size: %u\n", outputsamples);
av_log(avctx, AV_LOG_ERROR, "sample buffer too small\n"); return AVERROR_INVALIDDATA;
return -1; }
alac->frame.nb_samples = outputsamples;
if ((ret = avctx->get_buffer(avctx, &alac->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
*outputsize = outputsamples * alac->bytespersample;
readsamplesize = alac->setinfo_sample_size - alac->extra_bits + channels - 1; readsamplesize = alac->setinfo_sample_size - alac->extra_bits + channels - 1;
if (readsamplesize > MIN_CACHE_BITS) { if (readsamplesize > MIN_CACHE_BITS) {
av_log(avctx, AV_LOG_ERROR, "readsamplesize too big (%d)\n", readsamplesize); av_log(avctx, AV_LOG_ERROR, "readsamplesize too big (%d)\n", readsamplesize);
@ -501,21 +503,23 @@ static int alac_decode_frame(AVCodecContext *avctx,
switch(alac->setinfo_sample_size) { switch(alac->setinfo_sample_size) {
case 16: case 16:
if (channels == 2) { if (channels == 2) {
interleave_stereo_16(alac->outputsamples_buffer, outbuffer, interleave_stereo_16(alac->outputsamples_buffer,
outputsamples); (int16_t *)alac->frame.data[0], outputsamples);
} else { } else {
int16_t *outbuffer = (int16_t *)alac->frame.data[0];
for (i = 0; i < outputsamples; i++) { for (i = 0; i < outputsamples; i++) {
((int16_t*)outbuffer)[i] = alac->outputsamples_buffer[0][i]; outbuffer[i] = alac->outputsamples_buffer[0][i];
} }
} }
break; break;
case 24: case 24:
if (channels == 2) { if (channels == 2) {
interleave_stereo_24(alac->outputsamples_buffer, outbuffer, interleave_stereo_24(alac->outputsamples_buffer,
outputsamples); (int32_t *)alac->frame.data[0], outputsamples);
} else { } else {
int32_t *outbuffer = (int32_t *)alac->frame.data[0];
for (i = 0; i < outputsamples; i++) for (i = 0; i < outputsamples; i++)
((int32_t *)outbuffer)[i] = alac->outputsamples_buffer[0][i] << 8; outbuffer[i] = alac->outputsamples_buffer[0][i] << 8;
} }
break; break;
} }
@ -523,6 +527,9 @@ static int alac_decode_frame(AVCodecContext *avctx,
if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8) if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8)
av_log(avctx, AV_LOG_ERROR, "Error : %d bits left\n", input_buffer_size * 8 - get_bits_count(&alac->gb)); av_log(avctx, AV_LOG_ERROR, "Error : %d bits left\n", input_buffer_size * 8 - get_bits_count(&alac->gb));
*got_frame_ptr = 1;
*(AVFrame *)data = alac->frame;
return input_buffer_size; return input_buffer_size;
} }
@ -637,6 +644,9 @@ static av_cold int alac_decode_init(AVCodecContext * avctx)
return ret; return ret;
} }
avcodec_get_frame_defaults(&alac->frame);
avctx->coded_frame = &alac->frame;
return 0; return 0;
} }
@ -648,5 +658,6 @@ AVCodec ff_alac_decoder = {
.init = alac_decode_init, .init = alac_decode_init,
.close = alac_decode_close, .close = alac_decode_close,
.decode = alac_decode_frame, .decode = alac_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"), .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"),
}; };

@ -191,6 +191,7 @@ typedef struct {
typedef struct { typedef struct {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
ALSSpecificConfig sconf; ALSSpecificConfig sconf;
GetBitContext gb; GetBitContext gb;
DSPContext dsp; DSPContext dsp;
@ -1415,15 +1416,14 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
/** Decode an ALS frame. /** Decode an ALS frame.
*/ */
static int decode_frame(AVCodecContext *avctx, static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
void *data, int *data_size,
AVPacket *avpkt) AVPacket *avpkt)
{ {
ALSDecContext *ctx = avctx->priv_data; ALSDecContext *ctx = avctx->priv_data;
ALSSpecificConfig *sconf = &ctx->sconf; ALSSpecificConfig *sconf = &ctx->sconf;
const uint8_t *buffer = avpkt->data; const uint8_t *buffer = avpkt->data;
int buffer_size = avpkt->size; int buffer_size = avpkt->size;
int invalid_frame, size; int invalid_frame, ret;
unsigned int c, sample, ra_frame, bytes_read, shift; unsigned int c, sample, ra_frame, bytes_read, shift;
init_get_bits(&ctx->gb, buffer, buffer_size * 8); init_get_bits(&ctx->gb, buffer, buffer_size * 8);
@ -1448,21 +1448,17 @@ static int decode_frame(AVCodecContext *avctx,
ctx->frame_id++; ctx->frame_id++;
// check for size of decoded data /* get output buffer */
size = ctx->cur_frame_length * avctx->channels * ctx->frame.nb_samples = ctx->cur_frame_length;
av_get_bytes_per_sample(avctx->sample_fmt); if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
if (size > *data_size) { return ret;
av_log(avctx, AV_LOG_ERROR, "Decoded data exceeds buffer size.\n");
return -1;
} }
*data_size = size;
// transform decoded frame into output format // transform decoded frame into output format
#define INTERLEAVE_OUTPUT(bps) \ #define INTERLEAVE_OUTPUT(bps) \
{ \ { \
int##bps##_t *dest = (int##bps##_t*) data; \ int##bps##_t *dest = (int##bps##_t*)ctx->frame.data[0]; \
shift = bps - ctx->avctx->bits_per_raw_sample; \ shift = bps - ctx->avctx->bits_per_raw_sample; \
for (sample = 0; sample < ctx->cur_frame_length; sample++) \ for (sample = 0; sample < ctx->cur_frame_length; sample++) \
for (c = 0; c < avctx->channels; c++) \ for (c = 0; c < avctx->channels; c++) \
@ -1480,7 +1476,7 @@ static int decode_frame(AVCodecContext *avctx,
int swap = HAVE_BIGENDIAN != sconf->msb_first; int swap = HAVE_BIGENDIAN != sconf->msb_first;
if (ctx->avctx->bits_per_raw_sample == 24) { if (ctx->avctx->bits_per_raw_sample == 24) {
int32_t *src = data; int32_t *src = (int32_t *)ctx->frame.data[0];
for (sample = 0; for (sample = 0;
sample < ctx->cur_frame_length * avctx->channels; sample < ctx->cur_frame_length * avctx->channels;
@ -1501,22 +1497,25 @@ static int decode_frame(AVCodecContext *avctx,
if (swap) { if (swap) {
if (ctx->avctx->bits_per_raw_sample <= 16) { if (ctx->avctx->bits_per_raw_sample <= 16) {
int16_t *src = (int16_t*) data; int16_t *src = (int16_t*) ctx->frame.data[0];
int16_t *dest = (int16_t*) ctx->crc_buffer; int16_t *dest = (int16_t*) ctx->crc_buffer;
for (sample = 0; for (sample = 0;
sample < ctx->cur_frame_length * avctx->channels; sample < ctx->cur_frame_length * avctx->channels;
sample++) sample++)
*dest++ = av_bswap16(src[sample]); *dest++ = av_bswap16(src[sample]);
} else { } else {
ctx->dsp.bswap_buf((uint32_t*)ctx->crc_buffer, data, ctx->dsp.bswap_buf((uint32_t*)ctx->crc_buffer,
(uint32_t *)ctx->frame.data[0],
ctx->cur_frame_length * avctx->channels); ctx->cur_frame_length * avctx->channels);
} }
crc_source = ctx->crc_buffer; crc_source = ctx->crc_buffer;
} else { } else {
crc_source = data; crc_source = ctx->frame.data[0];
} }
ctx->crc = av_crc(ctx->crc_table, ctx->crc, crc_source, size); ctx->crc = av_crc(ctx->crc_table, ctx->crc, crc_source,
ctx->cur_frame_length * avctx->channels *
av_get_bytes_per_sample(avctx->sample_fmt));
} }
@ -1527,6 +1526,9 @@ static int decode_frame(AVCodecContext *avctx,
} }
} }
*got_frame_ptr = 1;
*(AVFrame *)data = ctx->frame;
bytes_read = invalid_frame ? buffer_size : bytes_read = invalid_frame ? buffer_size :
(get_bits_count(&ctx->gb) + 7) >> 3; (get_bits_count(&ctx->gb) + 7) >> 3;
@ -1724,6 +1726,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
dsputil_init(&ctx->dsp, avctx); dsputil_init(&ctx->dsp, avctx);
avcodec_get_frame_defaults(&ctx->frame);
avctx->coded_frame = &ctx->frame;
return 0; return 0;
} }
@ -1747,7 +1752,7 @@ AVCodec ff_als_decoder = {
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.flush = flush, .flush = flush,
.capabilities = CODEC_CAP_SUBFRAMES, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"),
}; };

@ -95,6 +95,7 @@
#define AMR_AGC_ALPHA 0.9 #define AMR_AGC_ALPHA 0.9
typedef struct AMRContext { typedef struct AMRContext {
AVFrame avframe; ///< AVFrame for decoded samples
AMRNBFrame frame; ///< decoded AMR parameters (lsf coefficients, codebook indexes, etc) AMRNBFrame frame; ///< decoded AMR parameters (lsf coefficients, codebook indexes, etc)
uint8_t bad_frame_indicator; ///< bad frame ? 1 : 0 uint8_t bad_frame_indicator; ///< bad frame ? 1 : 0
enum Mode cur_frame_mode; enum Mode cur_frame_mode;
@ -167,6 +168,9 @@ static av_cold int amrnb_decode_init(AVCodecContext *avctx)
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
p->prediction_error[i] = MIN_ENERGY; p->prediction_error[i] = MIN_ENERGY;
avcodec_get_frame_defaults(&p->avframe);
avctx->coded_frame = &p->avframe;
return 0; return 0;
} }
@ -919,21 +923,29 @@ static void postfilter(AMRContext *p, float *lpc, float *buf_out)
/// @} /// @}
static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, static int amrnb_decode_frame(AVCodecContext *avctx, void *data,
AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
AMRContext *p = avctx->priv_data; // pointer to private data AMRContext *p = avctx->priv_data; // pointer to private data
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
float *buf_out = data; // pointer to the output data buffer float *buf_out; // pointer to the output data buffer
int i, subframe; int i, subframe, ret;
float fixed_gain_factor; float fixed_gain_factor;
AMRFixed fixed_sparse = {0}; // fixed vector up to anti-sparseness processing AMRFixed fixed_sparse = {0}; // fixed vector up to anti-sparseness processing
float spare_vector[AMR_SUBFRAME_SIZE]; // extra stack space to hold result from anti-sparseness processing float spare_vector[AMR_SUBFRAME_SIZE]; // extra stack space to hold result from anti-sparseness processing
float synth_fixed_gain; // the fixed gain that synthesis should use float synth_fixed_gain; // the fixed gain that synthesis should use
const float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use const float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use
/* get output buffer */
p->avframe.nb_samples = AMR_BLOCK_SIZE;
if ((ret = avctx->get_buffer(avctx, &p->avframe)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
buf_out = (float *)p->avframe.data[0];
p->cur_frame_mode = unpack_bitstream(p, buf, buf_size); p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
if (p->cur_frame_mode == MODE_DTX) { if (p->cur_frame_mode == MODE_DTX) {
av_log_missing_feature(avctx, "dtx mode", 1); av_log_missing_feature(avctx, "dtx mode", 1);
@ -1028,8 +1040,8 @@ static int amrnb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
ff_weighted_vector_sumf(p->lsf_avg, p->lsf_avg, p->lsf_q[3], ff_weighted_vector_sumf(p->lsf_avg, p->lsf_avg, p->lsf_q[3],
0.84, 0.16, LP_FILTER_ORDER); 0.84, 0.16, LP_FILTER_ORDER);
/* report how many samples we got */ *got_frame_ptr = 1;
*data_size = AMR_BLOCK_SIZE * sizeof(float); *(AVFrame *)data = p->avframe;
/* return the amount of bytes consumed if everything was OK */ /* return the amount of bytes consumed if everything was OK */
return frame_sizes_nb[p->cur_frame_mode] + 1; // +7 for rounding and +8 for TOC return frame_sizes_nb[p->cur_frame_mode] + 1; // +7 for rounding and +8 for TOC
@ -1043,6 +1055,7 @@ AVCodec ff_amrnb_decoder = {
.priv_data_size = sizeof(AMRContext), .priv_data_size = sizeof(AMRContext),
.init = amrnb_decode_init, .init = amrnb_decode_init,
.decode = amrnb_decode_frame, .decode = amrnb_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate NarrowBand"), .long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate NarrowBand"),
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
}; };

@ -41,6 +41,7 @@
#include "amrwbdata.h" #include "amrwbdata.h"
typedef struct { typedef struct {
AVFrame avframe; ///< AVFrame for decoded samples
AMRWBFrame frame; ///< AMRWB parameters decoded from bitstream AMRWBFrame frame; ///< AMRWB parameters decoded from bitstream
enum Mode fr_cur_mode; ///< mode index of current frame enum Mode fr_cur_mode; ///< mode index of current frame
uint8_t fr_quality; ///< frame quality index (FQI) uint8_t fr_quality; ///< frame quality index (FQI)
@ -102,6 +103,9 @@ static av_cold int amrwb_decode_init(AVCodecContext *avctx)
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
ctx->prediction_error[i] = MIN_ENERGY; ctx->prediction_error[i] = MIN_ENERGY;
avcodec_get_frame_defaults(&ctx->avframe);
avctx->coded_frame = &ctx->avframe;
return 0; return 0;
} }
@ -1062,15 +1066,15 @@ static void update_sub_state(AMRWBContext *ctx)
LP_ORDER_16k * sizeof(float)); LP_ORDER_16k * sizeof(float));
} }
static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, static int amrwb_decode_frame(AVCodecContext *avctx, void *data,
AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
AMRWBContext *ctx = avctx->priv_data; AMRWBContext *ctx = avctx->priv_data;
AMRWBFrame *cf = &ctx->frame; AMRWBFrame *cf = &ctx->frame;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int expected_fr_size, header_size; int expected_fr_size, header_size;
float *buf_out = data; float *buf_out;
float spare_vector[AMRWB_SFR_SIZE]; // extra stack space to hold result from anti-sparseness processing float spare_vector[AMRWB_SFR_SIZE]; // extra stack space to hold result from anti-sparseness processing
float fixed_gain_factor; // fixed gain correction factor (gamma) float fixed_gain_factor; // fixed gain correction factor (gamma)
float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use float *synth_fixed_vector; // pointer to the fixed vector that synthesis should use
@ -1080,7 +1084,15 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
float hb_exc[AMRWB_SFR_SIZE_16k]; // excitation for the high frequency band float hb_exc[AMRWB_SFR_SIZE_16k]; // excitation for the high frequency band
float hb_samples[AMRWB_SFR_SIZE_16k]; // filtered high-band samples from synthesis float hb_samples[AMRWB_SFR_SIZE_16k]; // filtered high-band samples from synthesis
float hb_gain; float hb_gain;
int sub, i; int sub, i, ret;
/* get output buffer */
ctx->avframe.nb_samples = 4 * AMRWB_SFR_SIZE_16k;
if ((ret = avctx->get_buffer(avctx, &ctx->avframe)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
buf_out = (float *)ctx->avframe.data[0];
header_size = decode_mime_header(ctx, buf); header_size = decode_mime_header(ctx, buf);
expected_fr_size = ((cf_sizes_wb[ctx->fr_cur_mode] + 7) >> 3) + 1; expected_fr_size = ((cf_sizes_wb[ctx->fr_cur_mode] + 7) >> 3) + 1;
@ -1088,7 +1100,7 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
if (buf_size < expected_fr_size) { if (buf_size < expected_fr_size) {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"Frame too small (%d bytes). Truncated file?\n", buf_size); "Frame too small (%d bytes). Truncated file?\n", buf_size);
*data_size = 0; *got_frame_ptr = 0;
return buf_size; return buf_size;
} }
@ -1219,8 +1231,8 @@ static int amrwb_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
memcpy(ctx->isp_sub4_past, ctx->isp[3], LP_ORDER * sizeof(ctx->isp[3][0])); memcpy(ctx->isp_sub4_past, ctx->isp[3], LP_ORDER * sizeof(ctx->isp[3][0]));
memcpy(ctx->isf_past_final, ctx->isf_cur, LP_ORDER * sizeof(float)); memcpy(ctx->isf_past_final, ctx->isf_cur, LP_ORDER * sizeof(float));
/* report how many samples we got */ *got_frame_ptr = 1;
*data_size = 4 * AMRWB_SFR_SIZE_16k * sizeof(float); *(AVFrame *)data = ctx->avframe;
return expected_fr_size; return expected_fr_size;
} }
@ -1232,6 +1244,7 @@ AVCodec ff_amrwb_decoder = {
.priv_data_size = sizeof(AMRWBContext), .priv_data_size = sizeof(AMRWBContext),
.init = amrwb_decode_init, .init = amrwb_decode_init,
.decode = amrwb_decode_frame, .decode = amrwb_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate WideBand"), .long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate WideBand"),
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
}; };

@ -129,6 +129,7 @@ typedef struct APEPredictor {
/** Decoder context */ /** Decoder context */
typedef struct APEContext { typedef struct APEContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
DSPContext dsp; DSPContext dsp;
int channels; int channels;
int samples; ///< samples left to decode in current frame int samples; ///< samples left to decode in current frame
@ -215,6 +216,10 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
dsputil_init(&s->dsp, avctx); dsputil_init(&s->dsp, avctx);
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
filter_alloc_fail: filter_alloc_fail:
ape_decode_close(avctx); ape_decode_close(avctx);
@ -805,16 +810,15 @@ static void ape_unpack_stereo(APEContext *ctx, int count)
} }
} }
static int ape_decode_frame(AVCodecContext *avctx, static int ape_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
APEContext *s = avctx->priv_data; APEContext *s = avctx->priv_data;
int16_t *samples = data; int16_t *samples;
int i; int i, ret;
int blockstodecode, out_size; int blockstodecode;
int bytes_used = 0; int bytes_used = 0;
/* this should never be negative, but bad things will happen if it is, so /* this should never be negative, but bad things will happen if it is, so
@ -826,7 +830,7 @@ static int ape_decode_frame(AVCodecContext *avctx,
void *tmp_data; void *tmp_data;
if (!buf_size) { if (!buf_size) {
*data_size = 0; *got_frame_ptr = 0;
return 0; return 0;
} }
if (buf_size < 8) { if (buf_size < 8) {
@ -874,18 +878,19 @@ static int ape_decode_frame(AVCodecContext *avctx,
} }
if (!s->data) { if (!s->data) {
*data_size = 0; *got_frame_ptr = 0;
return buf_size; return buf_size;
} }
blockstodecode = FFMIN(BLOCKS_PER_LOOP, s->samples); blockstodecode = FFMIN(BLOCKS_PER_LOOP, s->samples);
out_size = blockstodecode * avctx->channels * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); s->frame.nb_samples = blockstodecode;
if (*data_size < out_size) { if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small.\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return ret;
} }
samples = (int16_t *)s->frame.data[0];
s->error=0; s->error=0;
@ -909,7 +914,9 @@ static int ape_decode_frame(AVCodecContext *avctx,
s->samples -= blockstodecode; s->samples -= blockstodecode;
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return bytes_used; return bytes_used;
} }
@ -927,7 +934,7 @@ AVCodec ff_ape_decoder = {
.init = ape_decode_init, .init = ape_decode_init,
.close = ape_decode_close, .close = ape_decode_close,
.decode = ape_decode_frame, .decode = ape_decode_frame,
.capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1,
.flush = ape_flush, .flush = ape_flush,
.long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
}; };

@ -72,6 +72,7 @@ typedef struct {
* The atrac1 context, holds all needed parameters for decoding * The atrac1 context, holds all needed parameters for decoding
*/ */
typedef struct { typedef struct {
AVFrame frame;
AT1SUCtx SUs[AT1_MAX_CHANNELS]; ///< channel sound unit AT1SUCtx SUs[AT1_MAX_CHANNELS]; ///< channel sound unit
DECLARE_ALIGNED(32, float, spec)[AT1_SU_SAMPLES]; ///< the mdct spectrum buffer DECLARE_ALIGNED(32, float, spec)[AT1_SU_SAMPLES]; ///< the mdct spectrum buffer
@ -273,14 +274,14 @@ static void at1_subband_synthesis(AT1Ctx *q, AT1SUCtx* su, float *pOut)
static int atrac1_decode_frame(AVCodecContext *avctx, void *data, static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AT1Ctx *q = avctx->priv_data; AT1Ctx *q = avctx->priv_data;
int ch, ret, out_size; int ch, ret;
GetBitContext gb; GetBitContext gb;
float* samples = data; float *samples;
if (buf_size < 212 * q->channels) { if (buf_size < 212 * q->channels) {
@ -288,12 +289,13 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
out_size = q->channels * AT1_SU_SAMPLES * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); q->frame.nb_samples = AT1_SU_SAMPLES;
if (*data_size < out_size) { if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return ret;
} }
samples = (float *)q->frame.data[0];
for (ch = 0; ch < q->channels; ch++) { for (ch = 0; ch < q->channels; ch++) {
AT1SUCtx* su = &q->SUs[ch]; AT1SUCtx* su = &q->SUs[ch];
@ -321,7 +323,9 @@ static int atrac1_decode_frame(AVCodecContext *avctx, void *data,
AT1_SU_SAMPLES, 2); AT1_SU_SAMPLES, 2);
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = q->frame;
return avctx->block_align; return avctx->block_align;
} }
@ -389,6 +393,9 @@ static av_cold int atrac1_decode_init(AVCodecContext *avctx)
q->SUs[1].spectrum[0] = q->SUs[1].spec1; q->SUs[1].spectrum[0] = q->SUs[1].spec1;
q->SUs[1].spectrum[1] = q->SUs[1].spec2; q->SUs[1].spectrum[1] = q->SUs[1].spec2;
avcodec_get_frame_defaults(&q->frame);
avctx->coded_frame = &q->frame;
return 0; return 0;
} }
@ -401,5 +408,6 @@ AVCodec ff_atrac1_decoder = {
.init = atrac1_decode_init, .init = atrac1_decode_init,
.close = atrac1_decode_end, .close = atrac1_decode_end,
.decode = atrac1_decode_frame, .decode = atrac1_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Atrac 1 (Adaptive TRansform Acoustic Coding)"), .long_name = NULL_IF_CONFIG_SMALL("Atrac 1 (Adaptive TRansform Acoustic Coding)"),
}; };

@ -86,6 +86,7 @@ typedef struct {
} channel_unit; } channel_unit;
typedef struct { typedef struct {
AVFrame frame;
GetBitContext gb; GetBitContext gb;
//@{ //@{
/** stream data */ /** stream data */
@ -823,16 +824,16 @@ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf,
* @param avctx pointer to the AVCodecContext * @param avctx pointer to the AVCodecContext
*/ */
static int atrac3_decode_frame(AVCodecContext *avctx, static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt) { {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
ATRAC3Context *q = avctx->priv_data; ATRAC3Context *q = avctx->priv_data;
int result = 0, out_size; int result;
const uint8_t* databuf; const uint8_t* databuf;
float *samples_flt = data; float *samples_flt;
int16_t *samples_s16 = data; int16_t *samples_s16;
if (buf_size < avctx->block_align) { if (buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
@ -840,12 +841,14 @@ static int atrac3_decode_frame(AVCodecContext *avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
out_size = SAMPLES_PER_FRAME * q->channels * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); q->frame.nb_samples = SAMPLES_PER_FRAME;
if (*data_size < out_size) { if ((result = avctx->get_buffer(avctx, &q->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return result;
} }
samples_flt = (float *)q->frame.data[0];
samples_s16 = (int16_t *)q->frame.data[0];
/* Check if we need to descramble and what buffer to pass on. */ /* Check if we need to descramble and what buffer to pass on. */
if (q->scrambled_stream) { if (q->scrambled_stream) {
@ -875,7 +878,9 @@ static int atrac3_decode_frame(AVCodecContext *avctx,
(const float **)q->outSamples, (const float **)q->outSamples,
SAMPLES_PER_FRAME, q->channels); SAMPLES_PER_FRAME, q->channels);
} }
*data_size = out_size;
*got_frame_ptr = 1;
*(AVFrame *)data = q->frame;
return avctx->block_align; return avctx->block_align;
} }
@ -1047,6 +1052,9 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
} }
} }
avcodec_get_frame_defaults(&q->frame);
avctx->coded_frame = &q->frame;
return 0; return 0;
} }
@ -1060,6 +1068,6 @@ AVCodec ff_atrac3_decoder =
.init = atrac3_decode_init, .init = atrac3_decode_init,
.close = atrac3_decode_close, .close = atrac3_decode_close,
.decode = atrac3_decode_frame, .decode = atrac3_decode_frame,
.capabilities = CODEC_CAP_SUBFRAMES, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Atrac 3 (Adaptive TRansform Acoustic Coding 3)"), .long_name = NULL_IF_CONFIG_SMALL("Atrac 3 (Adaptive TRansform Acoustic Coding 3)"),
}; };

@ -480,8 +480,10 @@ enum CodecID {
#define CH_LAYOUT_STEREO_DOWNMIX AV_CH_LAYOUT_STEREO_DOWNMIX #define CH_LAYOUT_STEREO_DOWNMIX AV_CH_LAYOUT_STEREO_DOWNMIX
#endif #endif
#if FF_API_OLD_DECODE_AUDIO
/* in bytes */ /* in bytes */
#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
#endif
/** /**
* Required number of additionally allocated bytes at the end of the input bitstream for decoding. * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
@ -933,13 +935,24 @@ typedef struct AVFrame {
#define AV_NUM_DATA_POINTERS 8 #define AV_NUM_DATA_POINTERS 8
#endif #endif
/** /**
* pointer to the picture planes. * pointer to the picture/channel planes.
* This might be different from the first allocated byte * This might be different from the first allocated byte
* - encoding: * - encoding: Set by user
* - decoding: * - decoding: set by AVCodecContext.get_buffer()
*/ */
uint8_t *data[AV_NUM_DATA_POINTERS]; uint8_t *data[AV_NUM_DATA_POINTERS];
/**
* Size, in bytes, of the data for each picture/channel plane.
*
* For audio, only linesize[0] may be set. For planar audio, each channel
* plane must be the same size.
*
* - encoding: Set by user (video only)
* - decoding: set by AVCodecContext.get_buffer()
*/
int linesize[AV_NUM_DATA_POINTERS]; int linesize[AV_NUM_DATA_POINTERS];
/** /**
* pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer. * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
* This isn't used by libavcodec unless the default get/release_buffer() is used. * This isn't used by libavcodec unless the default get/release_buffer() is used.
@ -993,7 +1006,7 @@ typedef struct AVFrame {
* buffer age (1->was last buffer and dint change, 2->..., ...). * buffer age (1->was last buffer and dint change, 2->..., ...).
* Set to INT_MAX if the buffer has not been used yet. * Set to INT_MAX if the buffer has not been used yet.
* - encoding: unused * - encoding: unused
* - decoding: MUST be set by get_buffer(). * - decoding: MUST be set by get_buffer() for video.
*/ */
int age; int age;
@ -1190,6 +1203,33 @@ typedef struct AVFrame {
* - decoding: Set by libavcodec. * - decoding: Set by libavcodec.
*/ */
void *thread_opaque; void *thread_opaque;
/**
* number of audio samples (per channel) described by this frame
* - encoding: unused
* - decoding: Set by libavcodec
*/
int nb_samples;
/**
* pointers to the data planes/channels.
*
* For video, this should simply point to data[].
*
* For planar audio, each channel has a separate data pointer, and
* linesize[0] contains the size of each channel buffer.
* For packed audio, there is just one data pointer, and linesize[0]
* contains the total size of the buffer for all channels.
*
* Note: Both data and extended_data will always be set by get_buffer(),
* but for planar audio with more channels that can fit in data,
* extended_data must be used by the decoder in order to access all
* channels.
*
* encoding: unused
* decoding: set by AVCodecContext.get_buffer()
*/
uint8_t **extended_data;
} AVFrame; } AVFrame;
struct AVCodecInternal; struct AVCodecInternal;
@ -1545,15 +1585,56 @@ typedef struct AVCodecContext {
/** /**
* Called at the beginning of each frame to get a buffer for it. * Called at the beginning of each frame to get a buffer for it.
* If pic.reference is set then the frame will be read later by libavcodec. *
* avcodec_align_dimensions2() should be used to find the required width and * The function will set AVFrame.data[], AVFrame.linesize[].
* height, as they normally need to be rounded up to the next multiple of 16. * AVFrame.extended_data[] must also be set, but it should be the same as
* AVFrame.data[] except for planar audio with more channels than can fit
* in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as
* many data pointers as it can hold.
*
* if CODEC_CAP_DR1 is not set then get_buffer() must call * if CODEC_CAP_DR1 is not set then get_buffer() must call
* avcodec_default_get_buffer() instead of providing buffers allocated by * avcodec_default_get_buffer() instead of providing buffers allocated by
* some other means. * some other means.
*
* AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't
* need it. avcodec_default_get_buffer() aligns the output buffer properly,
* but if get_buffer() is overridden then alignment considerations should
* be taken into account.
*
* @see avcodec_default_get_buffer()
*
* Video:
*
* If pic.reference is set then the frame will be read later by libavcodec.
* avcodec_align_dimensions2() should be used to find the required width and
* height, as they normally need to be rounded up to the next multiple of 16.
*
* If frame multithreading is used and thread_safe_callbacks is set, * If frame multithreading is used and thread_safe_callbacks is set,
* it may be called from a different thread, but not from more than one at once. * it may be called from a different thread, but not from more than one at
* Does not need to be reentrant. * once. Does not need to be reentrant.
*
* @see release_buffer(), reget_buffer()
* @see avcodec_align_dimensions2()
*
* Audio:
*
* Decoders request a buffer of a particular size by setting
* AVFrame.nb_samples prior to calling get_buffer(). The decoder may,
* however, utilize only part of the buffer by setting AVFrame.nb_samples
* to a smaller value in the output frame.
*
* Decoders cannot use the buffer after returning from
* avcodec_decode_audio4(), so they will not call release_buffer(), as it
* is assumed to be released immediately upon return.
*
* As a convenience, av_samples_get_buffer_size() and
* av_samples_fill_arrays() in libavutil may be used by custom get_buffer()
* functions to find the required data size and to fill data pointers and
* linesize. In AVFrame.linesize, only linesize[0] may be set for audio
* since all planes must be the same size.
*
* @see av_samples_get_buffer_size(), av_samples_fill_arrays()
*
* - encoding: unused * - encoding: unused
* - decoding: Set by libavcodec, user can override. * - decoding: Set by libavcodec, user can override.
*/ */
@ -3882,7 +3963,12 @@ int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
*/ */
int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options); int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options);
#if FF_API_OLD_DECODE_AUDIO
/** /**
* Wrapper function which calls avcodec_decode_audio4.
*
* @deprecated Use avcodec_decode_audio4 instead.
*
* Decode the audio frame of size avpkt->size from avpkt->data into samples. * Decode the audio frame of size avpkt->size from avpkt->data into samples.
* Some decoders may support multiple frames in a single AVPacket, such * Some decoders may support multiple frames in a single AVPacket, such
* decoders would then just decode the first frame. In this case, * decoders would then just decode the first frame. In this case,
@ -3917,6 +4003,8 @@ int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
* *
* @param avctx the codec context * @param avctx the codec context
* @param[out] samples the output buffer, sample type in avctx->sample_fmt * @param[out] samples the output buffer, sample type in avctx->sample_fmt
* If the sample format is planar, each channel plane will
* be the same size, with no padding between channels.
* @param[in,out] frame_size_ptr the output buffer size in bytes * @param[in,out] frame_size_ptr the output buffer size in bytes
* @param[in] avpkt The input AVPacket containing the input buffer. * @param[in] avpkt The input AVPacket containing the input buffer.
* You can create such packet with av_init_packet() and by then setting * You can create such packet with av_init_packet() and by then setting
@ -3925,9 +4013,46 @@ int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options)
* @return On error a negative value is returned, otherwise the number of bytes * @return On error a negative value is returned, otherwise the number of bytes
* used or zero if no frame data was decompressed (used) from the input AVPacket. * used or zero if no frame data was decompressed (used) from the input AVPacket.
*/ */
int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr, int *frame_size_ptr,
AVPacket *avpkt); AVPacket *avpkt);
#endif
/**
* Decode the audio frame of size avpkt->size from avpkt->data into frame.
*
* Some decoders may support multiple frames in a single AVPacket. Such
* decoders would then just decode the first frame. In this case,
* avcodec_decode_audio4 has to be called again with an AVPacket containing
* the remaining data in order to decode the second frame, etc...
* Even if no frames are returned, the packet needs to be fed to the decoder
* with remaining data until it is completely consumed or an error occurs.
*
* @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
* larger than the actual read bytes because some optimized bitstream
* readers read 32 or 64 bits at once and could read over the end.
*
* @note You might have to align the input buffer. The alignment requirements
* depend on the CPU and the decoder.
*
* @param avctx the codec context
* @param[out] frame The AVFrame in which to store decoded audio samples.
* Decoders request a buffer of a particular size by setting
* AVFrame.nb_samples prior to calling get_buffer(). The
* decoder may, however, only utilize part of the buffer by
* setting AVFrame.nb_samples to a smaller value in the
* output frame.
* @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
* non-zero.
* @param[in] avpkt The input AVPacket containing the input buffer.
* At least avpkt->data and avpkt->size should be set. Some
* decoders might also require additional fields to be set.
* @return A negative error code is returned if an error occurred during
* decoding, otherwise the number of bytes consumed from the input
* AVPacket is returned.
*/
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
int *got_frame_ptr, AVPacket *avpkt);
/** /**
* Decode the video frame of size avpkt->size from avpkt->data into picture. * Decode the video frame of size avpkt->size from avpkt->data into picture.

@ -45,6 +45,7 @@ static float quant_table[96];
#define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11) #define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11)
typedef struct { typedef struct {
AVFrame frame;
GetBitContext gb; GetBitContext gb;
DSPContext dsp; DSPContext dsp;
FmtConvertContext fmt_conv; FmtConvertContext fmt_conv;
@ -147,6 +148,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
else else
return -1; return -1;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -293,6 +297,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
ff_rdft_end(&s->trans.rdft); ff_rdft_end(&s->trans.rdft);
else if (CONFIG_BINKAUDIO_DCT_DECODER) else if (CONFIG_BINKAUDIO_DCT_DECODER)
ff_dct_end(&s->trans.dct); ff_dct_end(&s->trans.dct);
return 0; return 0;
} }
@ -302,20 +307,19 @@ static void get_bits_align32(GetBitContext *s)
if (n) skip_bits(s, n); if (n) skip_bits(s, n);
} }
static int decode_frame(AVCodecContext *avctx, static int decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
BinkAudioContext *s = avctx->priv_data; BinkAudioContext *s = avctx->priv_data;
int16_t *samples = data; int16_t *samples;
GetBitContext *gb = &s->gb; GetBitContext *gb = &s->gb;
int out_size, consumed = 0; int ret, consumed = 0;
if (!get_bits_left(gb)) { if (!get_bits_left(gb)) {
uint8_t *buf; uint8_t *buf;
/* handle end-of-stream */ /* handle end-of-stream */
if (!avpkt->size) { if (!avpkt->size) {
*data_size = 0; *got_frame_ptr = 0;
return 0; return 0;
} }
if (avpkt->size < 4) { if (avpkt->size < 4) {
@ -334,11 +338,13 @@ static int decode_frame(AVCodecContext *avctx,
skip_bits_long(gb, 32); skip_bits_long(gb, 32);
} }
out_size = s->block_size * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (*data_size < out_size) { s->frame.nb_samples = s->block_size / avctx->channels;
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
samples = (int16_t *)s->frame.data[0];
if (decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT)) { if (decode_block(s, samples, avctx->codec->id == CODEC_ID_BINKAUDIO_DCT)) {
av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n"); av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n");
@ -346,7 +352,9 @@ static int decode_frame(AVCodecContext *avctx,
} }
get_bits_align32(gb); get_bits_align32(gb);
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return consumed; return consumed;
} }
@ -358,7 +366,7 @@ AVCodec ff_binkaudio_rdft_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Bink Audio (RDFT)") .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (RDFT)")
}; };
@ -370,6 +378,6 @@ AVCodec ff_binkaudio_dct_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Bink Audio (DCT)") .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (DCT)")
}; };

@ -122,6 +122,7 @@ typedef struct cook {
void (* saturate_output) (struct cook *q, int chan, float *out); void (* saturate_output) (struct cook *q, int chan, float *out);
AVCodecContext* avctx; AVCodecContext* avctx;
AVFrame frame;
GetBitContext gb; GetBitContext gb;
/* stream data */ /* stream data */
int nb_channels; int nb_channels;
@ -131,6 +132,7 @@ typedef struct cook {
int samples_per_channel; int samples_per_channel;
/* states */ /* states */
AVLFG random_state; AVLFG random_state;
int discarded_packets;
/* transform data */ /* transform data */
FFTContext mdct_ctx; FFTContext mdct_ctx;
@ -896,7 +898,8 @@ mlt_compensate_output(COOKContext *q, float *decode_buffer,
float *out, int chan) float *out, int chan)
{ {
imlt_gain(q, decode_buffer, gains_ptr, previous_buffer); imlt_gain(q, decode_buffer, gains_ptr, previous_buffer);
q->saturate_output (q, chan, out); if (out)
q->saturate_output(q, chan, out);
} }
@ -953,24 +956,28 @@ static void decode_subpacket(COOKContext *q, COOKSubpacket *p,
* @param avctx pointer to the AVCodecContext * @param avctx pointer to the AVCodecContext
*/ */
static int cook_decode_frame(AVCodecContext *avctx, static int cook_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt) { {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
COOKContext *q = avctx->priv_data; COOKContext *q = avctx->priv_data;
int i, out_size; float *samples = NULL;
int i, ret;
int offset = 0; int offset = 0;
int chidx = 0; int chidx = 0;
if (buf_size < avctx->block_align) if (buf_size < avctx->block_align)
return buf_size; return buf_size;
out_size = q->nb_channels * q->samples_per_channel * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); if (q->discarded_packets >= 2) {
if (*data_size < out_size) { q->frame.nb_samples = q->samples_per_channel;
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
samples = (float *)q->frame.data[0];
} }
/* estimate subpacket sizes */ /* estimate subpacket sizes */
@ -990,15 +997,21 @@ static int cook_decode_frame(AVCodecContext *avctx,
q->subpacket[i].bits_per_subpacket = (q->subpacket[i].size*8)>>q->subpacket[i].bits_per_subpdiv; q->subpacket[i].bits_per_subpacket = (q->subpacket[i].size*8)>>q->subpacket[i].bits_per_subpdiv;
q->subpacket[i].ch_idx = chidx; q->subpacket[i].ch_idx = chidx;
av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] size %i js %i %i block_align %i\n",i,q->subpacket[i].size,q->subpacket[i].joint_stereo,offset,avctx->block_align); av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] size %i js %i %i block_align %i\n",i,q->subpacket[i].size,q->subpacket[i].joint_stereo,offset,avctx->block_align);
decode_subpacket(q, &q->subpacket[i], buf + offset, data); decode_subpacket(q, &q->subpacket[i], buf + offset, samples);
offset += q->subpacket[i].size; offset += q->subpacket[i].size;
chidx += q->subpacket[i].num_channels; chidx += q->subpacket[i].num_channels;
av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] %i %i\n",i,q->subpacket[i].size * 8,get_bits_count(&q->gb)); av_log(avctx,AV_LOG_DEBUG,"subpacket[%i] %i %i\n",i,q->subpacket[i].size * 8,get_bits_count(&q->gb));
} }
*data_size = out_size;
/* Discard the first two frames: no valid audio. */ /* Discard the first two frames: no valid audio. */
if (avctx->frame_number < 2) *data_size = 0; if (q->discarded_packets < 2) {
q->discarded_packets++;
*got_frame_ptr = 0;
return avctx->block_align;
}
*got_frame_ptr = 1;
*(AVFrame *)data = q->frame;
return avctx->block_align; return avctx->block_align;
} }
@ -1246,6 +1259,9 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
else else
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
avcodec_get_frame_defaults(&q->frame);
avctx->coded_frame = &q->frame;
#ifdef DEBUG #ifdef DEBUG
dump_cook_context(q); dump_cook_context(q);
#endif #endif
@ -1262,5 +1278,6 @@ AVCodec ff_cook_decoder =
.init = cook_decode_init, .init = cook_decode_init,
.close = cook_decode_close, .close = cook_decode_close,
.decode = cook_decode_frame, .decode = cook_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("COOK"), .long_name = NULL_IF_CONFIG_SMALL("COOK"),
}; };

@ -261,6 +261,7 @@ static av_always_inline int get_bitalloc(GetBitContext *gb, BitAlloc *ba, int id
typedef struct { typedef struct {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
/* Frame header */ /* Frame header */
int frame_type; ///< type of the current frame int frame_type; ///< type of the current frame
int samples_deficit; ///< deficit sample count int samples_deficit; ///< deficit sample count
@ -1635,9 +1636,8 @@ static void dca_exss_parse_header(DCAContext *s)
* Main frame decoding function * Main frame decoding function
* FIXME add arguments * FIXME add arguments
*/ */
static int dca_decode_frame(AVCodecContext * avctx, static int dca_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
@ -1645,9 +1645,8 @@ static int dca_decode_frame(AVCodecContext * avctx,
int lfe_samples; int lfe_samples;
int num_core_channels = 0; int num_core_channels = 0;
int i, ret; int i, ret;
float *samples_flt = data; float *samples_flt;
int16_t *samples_s16 = data; int16_t *samples_s16;
int out_size;
DCAContext *s = avctx->priv_data; DCAContext *s = avctx->priv_data;
int channels; int channels;
int core_ss_end; int core_ss_end;
@ -1839,11 +1838,14 @@ static int dca_decode_frame(AVCodecContext * avctx,
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
out_size = 256 / 8 * s->sample_blocks * channels * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); s->frame.nb_samples = 256 * (s->sample_blocks / 8);
if (*data_size < out_size) if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
*data_size = out_size; return ret;
}
samples_flt = (float *)s->frame.data[0];
samples_s16 = (int16_t *)s->frame.data[0];
/* filter to get final output */ /* filter to get final output */
for (i = 0; i < (s->sample_blocks / 8); i++) { for (i = 0; i < (s->sample_blocks / 8); i++) {
@ -1877,6 +1879,9 @@ static int dca_decode_frame(AVCodecContext * avctx,
s->lfe_data[i] = s->lfe_data[i + lfe_samples]; s->lfe_data[i] = s->lfe_data[i + lfe_samples];
} }
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size; return buf_size;
} }
@ -1919,6 +1924,9 @@ static av_cold int dca_decode_init(AVCodecContext * avctx)
avctx->channels = avctx->request_channels; avctx->channels = avctx->request_channels;
} }
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -1947,7 +1955,7 @@ AVCodec ff_dca_decoder = {
.decode = dca_decode_frame, .decode = dca_decode_frame,
.close = dca_decode_end, .close = dca_decode_end,
.long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"), .long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"),
.capabilities = CODEC_CAP_CHANNEL_CONF, .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
}, },

@ -42,6 +42,7 @@
#include "bytestream.h" #include "bytestream.h"
typedef struct DPCMContext { typedef struct DPCMContext {
AVFrame frame;
int channels; int channels;
int16_t roq_square_array[256]; int16_t roq_square_array[256];
int sample[2]; ///< previous sample (for SOL_DPCM) int sample[2]; ///< previous sample (for SOL_DPCM)
@ -162,22 +163,25 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx)
else else
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
const uint8_t *buf_end = buf + buf_size; const uint8_t *buf_end = buf + buf_size;
DPCMContext *s = avctx->priv_data; DPCMContext *s = avctx->priv_data;
int out = 0; int out = 0, ret;
int predictor[2]; int predictor[2];
int ch = 0; int ch = 0;
int stereo = s->channels - 1; int stereo = s->channels - 1;
int16_t *output_samples = data; int16_t *output_samples;
/* calculate output size */ /* calculate output size */
switch(avctx->codec->id) { switch(avctx->codec->id) {
@ -197,15 +201,18 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
out = buf_size; out = buf_size;
break; break;
} }
out *= av_get_bytes_per_sample(avctx->sample_fmt);
if (out <= 0) { if (out <= 0) {
av_log(avctx, AV_LOG_ERROR, "packet is too small\n"); av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
if (*data_size < out) {
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); /* get output buffer */
return AVERROR(EINVAL); s->frame.nb_samples = out / s->channels;
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
output_samples = (int16_t *)s->frame.data[0];
switch(avctx->codec->id) { switch(avctx->codec->id) {
@ -307,7 +314,9 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
break; break;
} }
*data_size = out; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size; return buf_size;
} }
@ -319,6 +328,7 @@ AVCodec ff_ ## name_ ## _decoder = { \
.priv_data_size = sizeof(DPCMContext), \ .priv_data_size = sizeof(DPCMContext), \
.init = dpcm_decode_init, \ .init = dpcm_decode_init, \
.decode = dpcm_decode_frame, \ .decode = dpcm_decode_frame, \
.capabilities = CODEC_CAP_DR1, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
} }

@ -44,6 +44,7 @@ typedef struct CinVideoContext {
} CinVideoContext; } CinVideoContext;
typedef struct CinAudioContext { typedef struct CinAudioContext {
AVFrame frame;
int initial_decode_frame; int initial_decode_frame;
int delta; int delta;
} CinAudioContext; } CinAudioContext;
@ -317,25 +318,28 @@ static av_cold int cinaudio_decode_init(AVCodecContext *avctx)
cin->delta = 0; cin->delta = 0;
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&cin->frame);
avctx->coded_frame = &cin->frame;
return 0; return 0;
} }
static int cinaudio_decode_frame(AVCodecContext *avctx, static int cinaudio_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
CinAudioContext *cin = avctx->priv_data; CinAudioContext *cin = avctx->priv_data;
const uint8_t *buf_end = buf + avpkt->size; const uint8_t *buf_end = buf + avpkt->size;
int16_t *samples = data; int16_t *samples;
int delta, out_size; int delta, ret;
out_size = (avpkt->size - cin->initial_decode_frame) * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); cin->frame.nb_samples = avpkt->size - cin->initial_decode_frame;
if (*data_size < out_size) { if ((ret = avctx->get_buffer(avctx, &cin->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return ret;
} }
samples = (int16_t *)cin->frame.data[0];
delta = cin->delta; delta = cin->delta;
if (cin->initial_decode_frame) { if (cin->initial_decode_frame) {
@ -351,7 +355,8 @@ static int cinaudio_decode_frame(AVCodecContext *avctx,
} }
cin->delta = delta; cin->delta = delta;
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = cin->frame;
return avpkt->size; return avpkt->size;
} }
@ -376,5 +381,6 @@ AVCodec ff_dsicinaudio_decoder = {
.priv_data_size = sizeof(CinAudioContext), .priv_data_size = sizeof(CinAudioContext),
.init = cinaudio_decode_init, .init = cinaudio_decode_init,
.decode = cinaudio_decode_frame, .decode = cinaudio_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN audio"), .long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN audio"),
}; };

@ -49,6 +49,7 @@ typedef struct FLACContext {
FLACSTREAMINFO FLACSTREAMINFO
AVCodecContext *avctx; ///< parent AVCodecContext AVCodecContext *avctx; ///< parent AVCodecContext
AVFrame frame;
GetBitContext gb; ///< GetBitContext initialized to start at the current frame GetBitContext gb; ///< GetBitContext initialized to start at the current frame
int blocksize; ///< number of samples in the current frame int blocksize; ///< number of samples in the current frame
@ -116,6 +117,9 @@ static av_cold int flac_decode_init(AVCodecContext *avctx)
allocate_buffers(s); allocate_buffers(s);
s->got_streaminfo = 1; s->got_streaminfo = 1;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -542,20 +546,18 @@ static int decode_frame(FLACContext *s)
return 0; return 0;
} }
static int flac_decode_frame(AVCodecContext *avctx, static int flac_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
FLACContext *s = avctx->priv_data; FLACContext *s = avctx->priv_data;
int i, j = 0, bytes_read = 0; int i, j = 0, bytes_read = 0;
int16_t *samples_16 = data; int16_t *samples_16;
int32_t *samples_32 = data; int32_t *samples_32;
int alloc_data_size= *data_size; int ret;
int output_size;
*data_size=0; *got_frame_ptr = 0;
if (s->max_framesize == 0) { if (s->max_framesize == 0) {
s->max_framesize = s->max_framesize =
@ -586,15 +588,14 @@ static int flac_decode_frame(AVCodecContext *avctx,
} }
bytes_read = (get_bits_count(&s->gb)+7)/8; bytes_read = (get_bits_count(&s->gb)+7)/8;
/* check if allocated data size is large enough for output */ /* get output buffer */
output_size = s->blocksize * s->channels * s->frame.nb_samples = s->blocksize;
av_get_bytes_per_sample(avctx->sample_fmt); if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
if (output_size > alloc_data_size) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
av_log(s->avctx, AV_LOG_ERROR, "output data size is larger than " return ret;
"allocated data size\n");
return -1;
} }
*data_size = output_size; samples_16 = (int16_t *)s->frame.data[0];
samples_32 = (int32_t *)s->frame.data[0];
#define DECORRELATE(left, right)\ #define DECORRELATE(left, right)\
assert(s->channels == 2);\ assert(s->channels == 2);\
@ -639,6 +640,9 @@ static int flac_decode_frame(AVCodecContext *avctx,
buf_size - bytes_read, buf_size); buf_size - bytes_read, buf_size);
} }
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return bytes_read; return bytes_read;
} }
@ -662,5 +666,6 @@ AVCodec ff_flac_decoder = {
.init = flac_decode_init, .init = flac_decode_init,
.close = flac_decode_close, .close = flac_decode_close,
.decode = flac_decode_frame, .decode = flac_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name= NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"), .long_name= NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
}; };

@ -26,10 +26,12 @@
#define AVCODEC_G722_H #define AVCODEC_G722_H
#include <stdint.h> #include <stdint.h>
#include "avcodec.h"
#define PREV_SAMPLES_BUF_SIZE 1024 #define PREV_SAMPLES_BUF_SIZE 1024
typedef struct { typedef struct {
AVFrame frame;
int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]; ///< memory of past decoded samples int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]; ///< memory of past decoded samples
int prev_samples_pos; ///< the number of values in prev_samples int prev_samples_pos; ///< the number of values in prev_samples

@ -66,6 +66,9 @@ static av_cold int g722_decode_init(AVCodecContext * avctx)
c->band[1].scale_factor = 2; c->band[1].scale_factor = 2;
c->prev_samples_pos = 22; c->prev_samples_pos = 22;
avcodec_get_frame_defaults(&c->frame);
avctx->coded_frame = &c->frame;
return 0; return 0;
} }
@ -81,20 +84,22 @@ static const int16_t *low_inv_quants[3] = { ff_g722_low_inv_quant6,
ff_g722_low_inv_quant4 }; ff_g722_low_inv_quant4 };
static int g722_decode_frame(AVCodecContext *avctx, void *data, static int g722_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
G722Context *c = avctx->priv_data; G722Context *c = avctx->priv_data;
int16_t *out_buf = data; int16_t *out_buf;
int j, out_len; int j, ret;
const int skip = 8 - avctx->bits_per_coded_sample; const int skip = 8 - avctx->bits_per_coded_sample;
const int16_t *quantizer_table = low_inv_quants[skip]; const int16_t *quantizer_table = low_inv_quants[skip];
GetBitContext gb; GetBitContext gb;
out_len = avpkt->size * 2 * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (*data_size < out_len) { c->frame.nb_samples = avpkt->size * 2;
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
out_buf = (int16_t *)c->frame.data[0];
init_get_bits(&gb, avpkt->data, avpkt->size * 8); init_get_bits(&gb, avpkt->data, avpkt->size * 8);
@ -128,7 +133,10 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
c->prev_samples_pos = 22; c->prev_samples_pos = 22;
} }
} }
*data_size = out_len;
*got_frame_ptr = 1;
*(AVFrame *)data = c->frame;
return avpkt->size; return avpkt->size;
} }
@ -139,5 +147,6 @@ AVCodec ff_adpcm_g722_decoder = {
.priv_data_size = sizeof(G722Context), .priv_data_size = sizeof(G722Context),
.init = g722_decode_init, .init = g722_decode_init,
.decode = g722_decode_frame, .decode = g722_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
}; };

@ -75,6 +75,7 @@ typedef struct G726Tables {
typedef struct G726Context { typedef struct G726Context {
AVClass *class; AVClass *class;
AVFrame frame;
G726Tables tbls; /**< static tables needed for computation */ G726Tables tbls; /**< static tables needed for computation */
Float11 sr[2]; /**< prev. reconstructed samples */ Float11 sr[2]; /**< prev. reconstructed samples */
@ -427,26 +428,31 @@ static av_cold int g726_decode_init(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&c->frame);
avctx->coded_frame = &c->frame;
return 0; return 0;
} }
static int g726_decode_frame(AVCodecContext *avctx, static int g726_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
G726Context *c = avctx->priv_data; G726Context *c = avctx->priv_data;
int16_t *samples = data; int16_t *samples;
GetBitContext gb; GetBitContext gb;
int out_samples, out_size; int out_samples, ret;
out_samples = buf_size * 8 / c->code_size; out_samples = buf_size * 8 / c->code_size;
out_size = out_samples * av_get_bytes_per_sample(avctx->sample_fmt);
if (*data_size < out_size) { /* get output buffer */
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); c->frame.nb_samples = out_samples;
return AVERROR(EINVAL); if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
samples = (int16_t *)c->frame.data[0];
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
@ -456,7 +462,9 @@ static int g726_decode_frame(AVCodecContext *avctx,
if (get_bits_left(&gb) > 0) if (get_bits_left(&gb) > 0)
av_log(avctx, AV_LOG_ERROR, "Frame invalidly split, missing parser?\n"); av_log(avctx, AV_LOG_ERROR, "Frame invalidly split, missing parser?\n");
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = c->frame;
return buf_size; return buf_size;
} }
@ -474,6 +482,7 @@ AVCodec ff_adpcm_g726_decoder = {
.init = g726_decode_init, .init = g726_decode_init,
.decode = g726_decode_frame, .decode = g726_decode_frame,
.flush = g726_decode_flush, .flush = g726_decode_flush,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"), .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"),
}; };
#endif #endif

@ -32,6 +32,8 @@
static av_cold int gsm_init(AVCodecContext *avctx) static av_cold int gsm_init(AVCodecContext *avctx)
{ {
GSMContext *s = avctx->priv_data;
avctx->channels = 1; avctx->channels = 1;
if (!avctx->sample_rate) if (!avctx->sample_rate)
avctx->sample_rate = 8000; avctx->sample_rate = 8000;
@ -47,30 +49,35 @@ static av_cold int gsm_init(AVCodecContext *avctx)
avctx->block_align = GSM_MS_BLOCK_SIZE; avctx->block_align = GSM_MS_BLOCK_SIZE;
} }
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
static int gsm_decode_frame(AVCodecContext *avctx, void *data, static int gsm_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
GSMContext *s = avctx->priv_data;
int res; int res;
GetBitContext gb; GetBitContext gb;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int16_t *samples = data; int16_t *samples;
int frame_bytes = avctx->frame_size *
av_get_bytes_per_sample(avctx->sample_fmt);
if (*data_size < frame_bytes) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
return AVERROR(EINVAL);
}
if (buf_size < avctx->block_align) { if (buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
/* get output buffer */
s->frame.nb_samples = avctx->frame_size;
if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res;
}
samples = (int16_t *)s->frame.data[0];
switch (avctx->codec_id) { switch (avctx->codec_id) {
case CODEC_ID_GSM: case CODEC_ID_GSM:
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
@ -85,7 +92,10 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data,
if (res < 0) if (res < 0)
return res; return res;
} }
*data_size = frame_bytes;
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return avctx->block_align; return avctx->block_align;
} }
@ -103,6 +113,7 @@ AVCodec ff_gsm_decoder = {
.init = gsm_init, .init = gsm_init,
.decode = gsm_decode_frame, .decode = gsm_decode_frame,
.flush = gsm_flush, .flush = gsm_flush,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("GSM"), .long_name = NULL_IF_CONFIG_SMALL("GSM"),
}; };
@ -114,5 +125,6 @@ AVCodec ff_gsm_ms_decoder = {
.init = gsm_init, .init = gsm_init,
.decode = gsm_decode_frame, .decode = gsm_decode_frame,
.flush = gsm_flush, .flush = gsm_flush,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("GSM Microsoft variant"), .long_name = NULL_IF_CONFIG_SMALL("GSM Microsoft variant"),
}; };

@ -23,6 +23,7 @@
#define AVCODEC_GSMDEC_DATA #define AVCODEC_GSMDEC_DATA
#include <stdint.h> #include <stdint.h>
#include "avcodec.h"
// input and output sizes in byte // input and output sizes in byte
#define GSM_BLOCK_SIZE 33 #define GSM_BLOCK_SIZE 33
@ -30,6 +31,7 @@
#define GSM_FRAME_SIZE 160 #define GSM_FRAME_SIZE 160
typedef struct { typedef struct {
AVFrame frame;
// Contains first 120 elements from the previous frame // Contains first 120 elements from the previous frame
// (used by long_term_synth according to the "lag"), // (used by long_term_synth according to the "lag"),
// then in the following 160 elements the current // then in the following 160 elements the current

@ -51,6 +51,8 @@
#define COEFFS 256 #define COEFFS 256
typedef struct { typedef struct {
AVFrame frame;
float old_floor[BANDS]; float old_floor[BANDS];
float flcoeffs1[BANDS]; float flcoeffs1[BANDS];
float flcoeffs2[BANDS]; float flcoeffs2[BANDS];
@ -168,6 +170,10 @@ static av_cold int imc_decode_init(AVCodecContext * avctx)
dsputil_init(&q->dsp, avctx); dsputil_init(&q->dsp, avctx);
avctx->sample_fmt = AV_SAMPLE_FMT_FLT; avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
avctx->channel_layout = AV_CH_LAYOUT_MONO; avctx->channel_layout = AV_CH_LAYOUT_MONO;
avcodec_get_frame_defaults(&q->frame);
avctx->coded_frame = &q->frame;
return 0; return 0;
} }
@ -649,9 +655,8 @@ static int imc_get_coeffs (IMCContext* q) {
return 0; return 0;
} }
static int imc_decode_frame(AVCodecContext * avctx, static int imc_decode_frame(AVCodecContext * avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
@ -659,7 +664,7 @@ static int imc_decode_frame(AVCodecContext * avctx,
IMCContext *q = avctx->priv_data; IMCContext *q = avctx->priv_data;
int stream_format_code; int stream_format_code;
int imc_hdr, i, j, out_size, ret; int imc_hdr, i, j, ret;
int flag; int flag;
int bits, summer; int bits, summer;
int counter, bitscount; int counter, bitscount;
@ -670,15 +675,16 @@ static int imc_decode_frame(AVCodecContext * avctx,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
out_size = COEFFS * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (*data_size < out_size) { q->frame.nb_samples = COEFFS;
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &q->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
q->out_samples = (float *)q->frame.data[0];
q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2); q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2);
q->out_samples = data;
init_get_bits(&q->gb, (const uint8_t*)buf16, IMC_BLOCK_SIZE * 8); init_get_bits(&q->gb, (const uint8_t*)buf16, IMC_BLOCK_SIZE * 8);
/* Check the frame header */ /* Check the frame header */
@ -823,7 +829,8 @@ static int imc_decode_frame(AVCodecContext * avctx,
imc_imdct256(q); imc_imdct256(q);
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = q->frame;
return IMC_BLOCK_SIZE; return IMC_BLOCK_SIZE;
} }
@ -834,6 +841,7 @@ static av_cold int imc_decode_close(AVCodecContext * avctx)
IMCContext *q = avctx->priv_data; IMCContext *q = avctx->priv_data;
ff_fft_end(&q->fft); ff_fft_end(&q->fft);
return 0; return 0;
} }
@ -846,5 +854,6 @@ AVCodec ff_imc_decoder = {
.init = imc_decode_init, .init = imc_decode_init,
.close = imc_decode_close, .close = imc_decode_close,
.decode = imc_decode_frame, .decode = imc_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"), .long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"),
}; };

@ -31,12 +31,15 @@
typedef struct InternalBuffer { typedef struct InternalBuffer {
int last_pic_num; int last_pic_num;
uint8_t *base[4]; uint8_t *base[AV_NUM_DATA_POINTERS];
uint8_t *data[4]; uint8_t *data[AV_NUM_DATA_POINTERS];
int linesize[4]; int linesize[AV_NUM_DATA_POINTERS];
int width; int width;
int height; int height;
enum PixelFormat pix_fmt; enum PixelFormat pix_fmt;
uint8_t **extended_data;
int audio_data_size;
int nb_channels;
} InternalBuffer; } InternalBuffer;
typedef struct AVCodecInternal { typedef struct AVCodecInternal {

@ -124,7 +124,14 @@ AVCodec ff_libgsm_ms_encoder = {
.long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"), .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"),
}; };
typedef struct LibGSMDecodeContext {
AVFrame frame;
struct gsm_state *state;
} LibGSMDecodeContext;
static av_cold int libgsm_decode_init(AVCodecContext *avctx) { static av_cold int libgsm_decode_init(AVCodecContext *avctx) {
LibGSMDecodeContext *s = avctx->priv_data;
if (avctx->channels > 1) { if (avctx->channels > 1) {
av_log(avctx, AV_LOG_ERROR, "Mono required for GSM, got %d channels\n", av_log(avctx, AV_LOG_ERROR, "Mono required for GSM, got %d channels\n",
avctx->channels); avctx->channels);
@ -139,7 +146,7 @@ static av_cold int libgsm_decode_init(AVCodecContext *avctx) {
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avctx->priv_data = gsm_create(); s->state = gsm_create();
switch(avctx->codec_id) { switch(avctx->codec_id) {
case CODEC_ID_GSM: case CODEC_ID_GSM:
@ -154,59 +161,72 @@ static av_cold int libgsm_decode_init(AVCodecContext *avctx) {
} }
} }
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
static av_cold int libgsm_decode_close(AVCodecContext *avctx) { static av_cold int libgsm_decode_close(AVCodecContext *avctx) {
gsm_destroy(avctx->priv_data); LibGSMDecodeContext *s = avctx->priv_data;
avctx->priv_data = NULL;
gsm_destroy(s->state);
s->state = NULL;
return 0; return 0;
} }
static int libgsm_decode_frame(AVCodecContext *avctx, static int libgsm_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt) { {
int i, ret; int i, ret;
struct gsm_state *s = avctx->priv_data; LibGSMDecodeContext *s = avctx->priv_data;
uint8_t *buf = avpkt->data; uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int16_t *samples = data; int16_t *samples;
int out_size = avctx->frame_size * av_get_bytes_per_sample(avctx->sample_fmt);
if (*data_size < out_size) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
return AVERROR(EINVAL);
}
if (buf_size < avctx->block_align) { if (buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
/* get output buffer */
s->frame.nb_samples = avctx->frame_size;
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
samples = (int16_t *)s->frame.data[0];
for (i = 0; i < avctx->frame_size / GSM_FRAME_SIZE; i++) { for (i = 0; i < avctx->frame_size / GSM_FRAME_SIZE; i++) {
if ((ret = gsm_decode(s, buf, samples)) < 0) if ((ret = gsm_decode(s->state, buf, samples)) < 0)
return -1; return -1;
buf += GSM_BLOCK_SIZE; buf += GSM_BLOCK_SIZE;
samples += GSM_FRAME_SIZE; samples += GSM_FRAME_SIZE;
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return avctx->block_align; return avctx->block_align;
} }
static void libgsm_flush(AVCodecContext *avctx) { static void libgsm_flush(AVCodecContext *avctx) {
gsm_destroy(avctx->priv_data); LibGSMDecodeContext *s = avctx->priv_data;
avctx->priv_data = gsm_create();
gsm_destroy(s->state);
s->state = gsm_create();
} }
AVCodec ff_libgsm_decoder = { AVCodec ff_libgsm_decoder = {
.name = "libgsm", .name = "libgsm",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_GSM, .id = CODEC_ID_GSM,
.priv_data_size = sizeof(LibGSMDecodeContext),
.init = libgsm_decode_init, .init = libgsm_decode_init,
.close = libgsm_decode_close, .close = libgsm_decode_close,
.decode = libgsm_decode_frame, .decode = libgsm_decode_frame,
.flush = libgsm_flush, .flush = libgsm_flush,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"), .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"),
}; };
@ -214,9 +234,11 @@ AVCodec ff_libgsm_ms_decoder = {
.name = "libgsm_ms", .name = "libgsm_ms",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_GSM_MS, .id = CODEC_ID_GSM_MS,
.priv_data_size = sizeof(LibGSMDecodeContext),
.init = libgsm_decode_init, .init = libgsm_decode_init,
.close = libgsm_decode_close, .close = libgsm_decode_close,
.decode = libgsm_decode_frame, .decode = libgsm_decode_frame,
.flush = libgsm_flush, .flush = libgsm_flush,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"), .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"),
}; };

@ -79,6 +79,7 @@ static int get_bitrate_mode(int bitrate, void *log_ctx)
typedef struct AMRContext { typedef struct AMRContext {
AVClass *av_class; AVClass *av_class;
AVFrame frame;
void *dec_state; void *dec_state;
void *enc_state; void *enc_state;
int enc_bitrate; int enc_bitrate;
@ -112,6 +113,9 @@ static av_cold int amr_nb_decode_init(AVCodecContext *avctx)
return AVERROR(ENOSYS); return AVERROR(ENOSYS);
} }
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -120,26 +124,28 @@ static av_cold int amr_nb_decode_close(AVCodecContext *avctx)
AMRContext *s = avctx->priv_data; AMRContext *s = avctx->priv_data;
Decoder_Interface_exit(s->dec_state); Decoder_Interface_exit(s->dec_state);
return 0; return 0;
} }
static int amr_nb_decode_frame(AVCodecContext *avctx, void *data, static int amr_nb_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AMRContext *s = avctx->priv_data; AMRContext *s = avctx->priv_data;
static const uint8_t block_size[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0 }; static const uint8_t block_size[16] = { 12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0 };
enum Mode dec_mode; enum Mode dec_mode;
int packet_size, out_size; int packet_size, ret;
av_dlog(avctx, "amr_decode_frame buf=%p buf_size=%d frame_count=%d!!\n", av_dlog(avctx, "amr_decode_frame buf=%p buf_size=%d frame_count=%d!!\n",
buf, buf_size, avctx->frame_number); buf, buf_size, avctx->frame_number);
out_size = 160 * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (*data_size < out_size) { s->frame.nb_samples = 160;
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
dec_mode = (buf[0] >> 3) & 0x000F; dec_mode = (buf[0] >> 3) & 0x000F;
@ -154,8 +160,10 @@ static int amr_nb_decode_frame(AVCodecContext *avctx, void *data,
av_dlog(avctx, "packet_size=%d buf= 0x%X %X %X %X\n", av_dlog(avctx, "packet_size=%d buf= 0x%X %X %X %X\n",
packet_size, buf[0], buf[1], buf[2], buf[3]); packet_size, buf[0], buf[1], buf[2], buf[3]);
/* call decoder */ /* call decoder */
Decoder_Interface_Decode(s->dec_state, buf, data, 0); Decoder_Interface_Decode(s->dec_state, buf, (short *)s->frame.data[0], 0);
*data_size = out_size;
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return packet_size; return packet_size;
} }
@ -168,6 +176,7 @@ AVCodec ff_libopencore_amrnb_decoder = {
.init = amr_nb_decode_init, .init = amr_nb_decode_init,
.close = amr_nb_decode_close, .close = amr_nb_decode_close,
.decode = amr_nb_decode_frame, .decode = amr_nb_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"), .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"),
}; };
@ -251,6 +260,7 @@ AVCodec ff_libopencore_amrnb_encoder = {
#include <opencore-amrwb/if_rom.h> #include <opencore-amrwb/if_rom.h>
typedef struct AMRWBContext { typedef struct AMRWBContext {
AVFrame frame;
void *state; void *state;
} AMRWBContext; } AMRWBContext;
@ -267,23 +277,27 @@ static av_cold int amr_wb_decode_init(AVCodecContext *avctx)
return AVERROR(ENOSYS); return AVERROR(ENOSYS);
} }
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
static int amr_wb_decode_frame(AVCodecContext *avctx, void *data, static int amr_wb_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
AMRWBContext *s = avctx->priv_data; AMRWBContext *s = avctx->priv_data;
int mode; int mode, ret;
int packet_size, out_size; int packet_size;
static const uint8_t block_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1}; static const uint8_t block_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1};
out_size = 320 * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (*data_size < out_size) { s->frame.nb_samples = 320;
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
mode = (buf[0] >> 3) & 0x000F; mode = (buf[0] >> 3) & 0x000F;
@ -295,8 +309,11 @@ static int amr_wb_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
D_IF_decode(s->state, buf, data, _good_frame); D_IF_decode(s->state, buf, (short *)s->frame.data[0], _good_frame);
*data_size = out_size;
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return packet_size; return packet_size;
} }
@ -316,6 +333,7 @@ AVCodec ff_libopencore_amrwb_decoder = {
.init = amr_wb_decode_init, .init = amr_wb_decode_init,
.close = amr_wb_decode_close, .close = amr_wb_decode_close,
.decode = amr_wb_decode_frame, .decode = amr_wb_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Wide-Band"), .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Wide-Band"),
}; };

@ -25,6 +25,7 @@
#include "avcodec.h" #include "avcodec.h"
typedef struct { typedef struct {
AVFrame frame;
SpeexBits bits; SpeexBits bits;
SpeexStereoState stereo; SpeexStereoState stereo;
void *dec_state; void *dec_state;
@ -89,26 +90,29 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx)
s->stereo = (SpeexStereoState)SPEEX_STEREO_STATE_INIT; s->stereo = (SpeexStereoState)SPEEX_STEREO_STATE_INIT;
speex_decoder_ctl(s->dec_state, SPEEX_SET_HANDLER, &callback); speex_decoder_ctl(s->dec_state, SPEEX_SET_HANDLER, &callback);
} }
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
static int libspeex_decode_frame(AVCodecContext *avctx, static int libspeex_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
uint8_t *buf = avpkt->data; uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
LibSpeexContext *s = avctx->priv_data; LibSpeexContext *s = avctx->priv_data;
int16_t *output = data; int16_t *output;
int out_size, ret, consumed = 0; int ret, consumed = 0;
/* check output buffer size */ /* get output buffer */
out_size = s->frame_size * avctx->channels * s->frame.nb_samples = s->frame_size;
av_get_bytes_per_sample(avctx->sample_fmt); if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
if (*data_size < out_size) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); return ret;
return AVERROR(EINVAL);
} }
output = (int16_t *)s->frame.data[0];
/* if there is not enough data left for the smallest possible frame, /* if there is not enough data left for the smallest possible frame,
reset the libspeex buffer using the current packet, otherwise ignore reset the libspeex buffer using the current packet, otherwise ignore
@ -116,7 +120,7 @@ static int libspeex_decode_frame(AVCodecContext *avctx,
if (speex_bits_remaining(&s->bits) < 43) { if (speex_bits_remaining(&s->bits) < 43) {
/* check for flush packet */ /* check for flush packet */
if (!buf || !buf_size) { if (!buf || !buf_size) {
*data_size = 0; *got_frame_ptr = 0;
return buf_size; return buf_size;
} }
/* set new buffer */ /* set new buffer */
@ -133,7 +137,9 @@ static int libspeex_decode_frame(AVCodecContext *avctx,
if (avctx->channels == 2) if (avctx->channels == 2)
speex_decode_stereo_int(output, s->frame_size, &s->stereo); speex_decode_stereo_int(output, s->frame_size, &s->stereo);
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return consumed; return consumed;
} }
@ -163,6 +169,6 @@ AVCodec ff_libspeex_decoder = {
.close = libspeex_decode_close, .close = libspeex_decode_close,
.decode = libspeex_decode_frame, .decode = libspeex_decode_frame,
.flush = libspeex_decode_flush, .flush = libspeex_decode_flush,
.capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("libspeex Speex"), .long_name = NULL_IF_CONFIG_SMALL("libspeex Speex"),
}; };

@ -153,6 +153,7 @@ typedef struct ChannelData {
} ChannelData; } ChannelData;
typedef struct MACEContext { typedef struct MACEContext {
AVFrame frame;
ChannelData chd[2]; ChannelData chd[2];
} MACEContext; } MACEContext;
@ -228,30 +229,35 @@ static void chomp6(ChannelData *chd, int16_t *output, uint8_t val,
static av_cold int mace_decode_init(AVCodecContext * avctx) static av_cold int mace_decode_init(AVCodecContext * avctx)
{ {
MACEContext *ctx = avctx->priv_data;
if (avctx->channels > 2) if (avctx->channels > 2)
return -1; return -1;
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&ctx->frame);
avctx->coded_frame = &ctx->frame;
return 0; return 0;
} }
static int mace_decode_frame(AVCodecContext *avctx, static int mace_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int16_t *samples = data; int16_t *samples;
MACEContext *ctx = avctx->priv_data; MACEContext *ctx = avctx->priv_data;
int i, j, k, l; int i, j, k, l, ret;
int out_size;
int is_mace3 = (avctx->codec_id == CODEC_ID_MACE3); int is_mace3 = (avctx->codec_id == CODEC_ID_MACE3);
out_size = 3 * (buf_size << (1 - is_mace3)) * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); ctx->frame.nb_samples = 3 * (buf_size << (1 - is_mace3)) / avctx->channels;
if (*data_size < out_size) { if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return ret;
} }
samples = (int16_t *)ctx->frame.data[0];
for(i = 0; i < avctx->channels; i++) { for(i = 0; i < avctx->channels; i++) {
int16_t *output = samples + i; int16_t *output = samples + i;
@ -277,7 +283,8 @@ static int mace_decode_frame(AVCodecContext *avctx,
} }
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = ctx->frame;
return buf_size; return buf_size;
} }
@ -289,6 +296,7 @@ AVCodec ff_mace3_decoder = {
.priv_data_size = sizeof(MACEContext), .priv_data_size = sizeof(MACEContext),
.init = mace_decode_init, .init = mace_decode_init,
.decode = mace_decode_frame, .decode = mace_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 3:1"), .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 3:1"),
}; };
@ -299,6 +307,7 @@ AVCodec ff_mace6_decoder = {
.priv_data_size = sizeof(MACEContext), .priv_data_size = sizeof(MACEContext),
.init = mace_decode_init, .init = mace_decode_init,
.decode = mace_decode_frame, .decode = mace_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 6:1"), .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 6:1"),
}; };

@ -120,6 +120,7 @@ typedef struct SubStream {
typedef struct MLPDecodeContext { typedef struct MLPDecodeContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
//! Current access unit being read has a major sync. //! Current access unit being read has a major sync.
int is_major_sync_unit; int is_major_sync_unit;
@ -239,6 +240,9 @@ static av_cold int mlp_decode_init(AVCodecContext *avctx)
m->substream[substr].lossless_check_data = 0xffffffff; m->substream[substr].lossless_check_data = 0xffffffff;
dsputil_init(&m->dsp, avctx); dsputil_init(&m->dsp, avctx);
avcodec_get_frame_defaults(&m->frame);
avctx->coded_frame = &m->frame;
return 0; return 0;
} }
@ -905,13 +909,14 @@ static void rematrix_channels(MLPDecodeContext *m, unsigned int substr)
/** Write the audio data into the output buffer. */ /** Write the audio data into the output buffer. */
static int output_data(MLPDecodeContext *m, unsigned int substr, static int output_data(MLPDecodeContext *m, unsigned int substr,
uint8_t *data, unsigned int *data_size) void *data, int *got_frame_ptr)
{ {
AVCodecContext *avctx = m->avctx;
SubStream *s = &m->substream[substr]; SubStream *s = &m->substream[substr];
unsigned int i, out_ch = 0; unsigned int i, out_ch = 0;
int out_size; int32_t *data_32;
int32_t *data_32 = (int32_t*) data; int16_t *data_16;
int16_t *data_16 = (int16_t*) data; int ret;
int is32 = (m->avctx->sample_fmt == AV_SAMPLE_FMT_S32); int is32 = (m->avctx->sample_fmt == AV_SAMPLE_FMT_S32);
if (m->avctx->channels != s->max_matrix_channel + 1) { if (m->avctx->channels != s->max_matrix_channel + 1) {
@ -919,11 +924,14 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
out_size = s->blockpos * m->avctx->channels * /* get output buffer */
av_get_bytes_per_sample(m->avctx->sample_fmt); m->frame.nb_samples = s->blockpos;
if ((ret = avctx->get_buffer(avctx, &m->frame)) < 0) {
if (*data_size < out_size) av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return ret;
}
data_32 = (int32_t *)m->frame.data[0];
data_16 = (int16_t *)m->frame.data[0];
for (i = 0; i < s->blockpos; i++) { for (i = 0; i < s->blockpos; i++) {
for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) { for (out_ch = 0; out_ch <= s->max_matrix_channel; out_ch++) {
@ -936,7 +944,8 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
} }
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = m->frame;
return 0; return 0;
} }
@ -945,8 +954,8 @@ static int output_data(MLPDecodeContext *m, unsigned int substr,
* @return negative on error, 0 if not enough data is present in the input stream, * @return negative on error, 0 if not enough data is present in the input stream,
* otherwise the number of bytes consumed. */ * otherwise the number of bytes consumed. */
static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size, static int read_access_unit(AVCodecContext *avctx, void* data,
AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
@ -982,7 +991,7 @@ static int read_access_unit(AVCodecContext *avctx, void* data, int *data_size,
if (!m->params_valid) { if (!m->params_valid) {
av_log(m->avctx, AV_LOG_WARNING, av_log(m->avctx, AV_LOG_WARNING,
"Stream parameters not seen; skipping frame.\n"); "Stream parameters not seen; skipping frame.\n");
*data_size = 0; *got_frame_ptr = 0;
return length; return length;
} }
@ -1127,7 +1136,7 @@ next_substr:
rematrix_channels(m, m->max_decoded_substream); rematrix_channels(m, m->max_decoded_substream);
if ((ret = output_data(m, m->max_decoded_substream, data, data_size)) < 0) if ((ret = output_data(m, m->max_decoded_substream, data, got_frame_ptr)) < 0)
return ret; return ret;
return length; return length;
@ -1148,6 +1157,7 @@ AVCodec ff_mlp_decoder = {
.priv_data_size = sizeof(MLPDecodeContext), .priv_data_size = sizeof(MLPDecodeContext),
.init = mlp_decode_init, .init = mlp_decode_init,
.decode = read_access_unit, .decode = read_access_unit,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"), .long_name = NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"),
}; };
@ -1159,6 +1169,7 @@ AVCodec ff_truehd_decoder = {
.priv_data_size = sizeof(MLPDecodeContext), .priv_data_size = sizeof(MLPDecodeContext),
.init = mlp_decode_init, .init = mlp_decode_init,
.decode = read_access_unit, .decode = read_access_unit,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("TrueHD"), .long_name = NULL_IF_CONFIG_SMALL("TrueHD"),
}; };
#endif /* CONFIG_TRUEHD_DECODER */ #endif /* CONFIG_TRUEHD_DECODER */

@ -50,6 +50,7 @@ typedef struct {
}Band; }Band;
typedef struct { typedef struct {
AVFrame frame;
DSPContext dsp; DSPContext dsp;
MPADSPContext mpadsp; MPADSPContext mpadsp;
GetBitContext gb; GetBitContext gb;

@ -136,6 +136,10 @@ static av_cold int mpc7_decode_init(AVCodecContext * avctx)
} }
} }
vlc_initialized = 1; vlc_initialized = 1;
avcodec_get_frame_defaults(&c->frame);
avctx->coded_frame = &c->frame;
return 0; return 0;
} }
@ -192,9 +196,8 @@ static int get_scale_idx(GetBitContext *gb, int ref)
return ref + t; return ref + t;
} }
static int mpc7_decode_frame(AVCodecContext * avctx, static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
@ -204,7 +207,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
int i, ch; int i, ch;
int mb = -1; int mb = -1;
Band *bands = c->bands; Band *bands = c->bands;
int off, out_size; int off, ret;
int bits_used, bits_avail; int bits_used, bits_avail;
memset(bands, 0, sizeof(*bands) * (c->maxbands + 1)); memset(bands, 0, sizeof(*bands) * (c->maxbands + 1));
@ -213,10 +216,11 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
out_size = (buf[1] ? c->lastframelen : MPC_FRAME_SIZE) * 4; /* get output buffer */
if (*data_size < out_size) { c->frame.nb_samples = buf[1] ? c->lastframelen : MPC_FRAME_SIZE;
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
bits = av_malloc(((buf_size - 1) & ~3) + FF_INPUT_BUFFER_PADDING_SIZE); bits = av_malloc(((buf_size - 1) & ~3) + FF_INPUT_BUFFER_PADDING_SIZE);
@ -276,7 +280,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
for(ch = 0; ch < 2; ch++) for(ch = 0; ch < 2; ch++)
idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off); idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off);
ff_mpc_dequantize_and_synth(c, mb, data, 2); ff_mpc_dequantize_and_synth(c, mb, c->frame.data[0], 2);
av_free(bits); av_free(bits);
@ -288,10 +292,12 @@ static int mpc7_decode_frame(AVCodecContext * avctx,
} }
if(c->frames_to_skip){ if(c->frames_to_skip){
c->frames_to_skip--; c->frames_to_skip--;
*data_size = 0; *got_frame_ptr = 0;
return buf_size; return buf_size;
} }
*data_size = out_size;
*got_frame_ptr = 1;
*(AVFrame *)data = c->frame;
return buf_size; return buf_size;
} }
@ -312,5 +318,6 @@ AVCodec ff_mpc7_decoder = {
.init = mpc7_decode_init, .init = mpc7_decode_init,
.decode = mpc7_decode_frame, .decode = mpc7_decode_frame,
.flush = mpc7_decode_flush, .flush = mpc7_decode_flush,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"), .long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"),
}; };

@ -228,12 +228,15 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
&mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); &mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
} }
vlc_initialized = 1; vlc_initialized = 1;
avcodec_get_frame_defaults(&c->frame);
avctx->coded_frame = &c->frame;
return 0; return 0;
} }
static int mpc8_decode_frame(AVCodecContext * avctx, static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
@ -241,14 +244,15 @@ static int mpc8_decode_frame(AVCodecContext * avctx,
GetBitContext gb2, *gb = &gb2; GetBitContext gb2, *gb = &gb2;
int i, j, k, ch, cnt, res, t; int i, j, k, ch, cnt, res, t;
Band *bands = c->bands; Band *bands = c->bands;
int off, out_size; int off;
int maxband, keyframe; int maxband, keyframe;
int last[2]; int last[2];
out_size = MPC_FRAME_SIZE * 2 * avctx->channels; /* get output buffer */
if (*data_size < out_size) { c->frame.nb_samples = MPC_FRAME_SIZE;
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); if ((res = avctx->get_buffer(avctx, &c->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res;
} }
keyframe = c->cur_frame == 0; keyframe = c->cur_frame == 0;
@ -401,14 +405,16 @@ static int mpc8_decode_frame(AVCodecContext * avctx,
} }
} }
ff_mpc_dequantize_and_synth(c, maxband, data, avctx->channels); ff_mpc_dequantize_and_synth(c, maxband, c->frame.data[0], avctx->channels);
c->cur_frame++; c->cur_frame++;
c->last_bits_used = get_bits_count(gb); c->last_bits_used = get_bits_count(gb);
if(c->cur_frame >= c->frames) if(c->cur_frame >= c->frames)
c->cur_frame = 0; c->cur_frame = 0;
*data_size = out_size;
*got_frame_ptr = 1;
*(AVFrame *)data = c->frame;
return c->cur_frame ? c->last_bits_used >> 3 : buf_size; return c->cur_frame ? c->last_bits_used >> 3 : buf_size;
} }
@ -420,5 +426,6 @@ AVCodec ff_mpc8_decoder = {
.priv_data_size = sizeof(MPCContext), .priv_data_size = sizeof(MPCContext),
.init = mpc8_decode_init, .init = mpc8_decode_init,
.decode = mpc8_decode_frame, .decode = mpc8_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"), .long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"),
}; };

@ -79,6 +79,7 @@ typedef struct MPADecodeContext {
int err_recognition; int err_recognition;
AVCodecContext* avctx; AVCodecContext* avctx;
MPADSPContext mpadsp; MPADSPContext mpadsp;
AVFrame frame;
} MPADecodeContext; } MPADecodeContext;
#if CONFIG_FLOAT #if CONFIG_FLOAT
@ -474,6 +475,10 @@ static av_cold int decode_init(AVCodecContext * avctx)
if (avctx->codec_id == CODEC_ID_MP3ADU) if (avctx->codec_id == CODEC_ID_MP3ADU)
s->adu_mode = 1; s->adu_mode = 1;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -1695,7 +1700,7 @@ static int mp_decode_layer3(MPADecodeContext *s)
static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples, static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples,
const uint8_t *buf, int buf_size) const uint8_t *buf, int buf_size)
{ {
int i, nb_frames, ch; int i, nb_frames, ch, ret;
OUT_INT *samples_ptr; OUT_INT *samples_ptr;
init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8); init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE) * 8);
@ -1743,8 +1748,16 @@ static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples,
assert(i <= buf_size - HEADER_SIZE && i >= 0); assert(i <= buf_size - HEADER_SIZE && i >= 0);
memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i); memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
s->last_buf_size += i; s->last_buf_size += i;
}
break; /* get output buffer */
if (!samples) {
s->frame.nb_samples = s->avctx->frame_size;
if ((ret = s->avctx->get_buffer(s->avctx, &s->frame)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
samples = (OUT_INT *)s->frame.data[0];
} }
/* apply the synthesis filter */ /* apply the synthesis filter */
@ -1764,7 +1777,7 @@ static int mp_decode_frame(MPADecodeContext *s, OUT_INT *samples,
return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels; return nb_frames * 32 * sizeof(OUT_INT) * s->nb_channels;
} }
static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
AVPacket *avpkt) AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
@ -1772,7 +1785,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
MPADecodeContext *s = avctx->priv_data; MPADecodeContext *s = avctx->priv_data;
uint32_t header; uint32_t header;
int out_size; int out_size;
OUT_INT *out_samples = data;
if (buf_size < HEADER_SIZE) if (buf_size < HEADER_SIZE)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -1795,10 +1807,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
avctx->bit_rate = s->bit_rate; avctx->bit_rate = s->bit_rate;
avctx->sub_id = s->layer; avctx->sub_id = s->layer;
if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT))
return AVERROR(EINVAL);
*data_size = 0;
if (s->frame_size <= 0 || s->frame_size > buf_size) { if (s->frame_size <= 0 || s->frame_size > buf_size) {
av_log(avctx, AV_LOG_ERROR, "incomplete frame\n"); av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -1807,9 +1815,10 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
buf_size= s->frame_size; buf_size= s->frame_size;
} }
out_size = mp_decode_frame(s, out_samples, buf, buf_size); out_size = mp_decode_frame(s, NULL, buf, buf_size);
if (out_size >= 0) { if (out_size >= 0) {
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
avctx->sample_rate = s->sample_rate; avctx->sample_rate = s->sample_rate;
//FIXME maybe move the other codec info stuff from above here too //FIXME maybe move the other codec info stuff from above here too
} else { } else {
@ -1818,6 +1827,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size,
If there is more data in the packet, just consume the bad frame If there is more data in the packet, just consume the bad frame
instead of returning an error, which would discard the whole instead of returning an error, which would discard the whole
packet. */ packet. */
*got_frame_ptr = 0;
if (buf_size == avpkt->size) if (buf_size == avpkt->size)
return out_size; return out_size;
} }
@ -1833,15 +1843,14 @@ static void flush(AVCodecContext *avctx)
} }
#if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER #if CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER
static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size, static int decode_frame_adu(AVCodecContext *avctx, void *data,
AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
MPADecodeContext *s = avctx->priv_data; MPADecodeContext *s = avctx->priv_data;
uint32_t header; uint32_t header;
int len, out_size; int len, out_size;
OUT_INT *out_samples = data;
len = buf_size; len = buf_size;
@ -1871,9 +1880,6 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
avctx->bit_rate = s->bit_rate; avctx->bit_rate = s->bit_rate;
avctx->sub_id = s->layer; avctx->sub_id = s->layer;
if (*data_size < avctx->frame_size * avctx->channels * sizeof(OUT_INT))
return AVERROR(EINVAL);
s->frame_size = len; s->frame_size = len;
#if FF_API_PARSE_FRAME #if FF_API_PARSE_FRAME
@ -1881,9 +1887,11 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
out_size = buf_size; out_size = buf_size;
else else
#endif #endif
out_size = mp_decode_frame(s, out_samples, buf, buf_size); out_size = mp_decode_frame(s, NULL, buf, buf_size);
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
*data_size = out_size;
return buf_size; return buf_size;
} }
#endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */ #endif /* CONFIG_MP3ADU_DECODER || CONFIG_MP3ADUFLOAT_DECODER */
@ -1894,6 +1902,7 @@ static int decode_frame_adu(AVCodecContext *avctx, void *data, int *data_size,
* Context for MP3On4 decoder * Context for MP3On4 decoder
*/ */
typedef struct MP3On4DecodeContext { typedef struct MP3On4DecodeContext {
AVFrame *frame;
int frames; ///< number of mp3 frames per block (number of mp3 decoder instances) int frames; ///< number of mp3 frames per block (number of mp3 decoder instances)
int syncword; ///< syncword patch int syncword; ///< syncword patch
const uint8_t *coff; ///< channel offsets in output buffer const uint8_t *coff; ///< channel offsets in output buffer
@ -1984,6 +1993,7 @@ static int decode_init_mp3on4(AVCodecContext * avctx)
// Put decoder context in place to make init_decode() happy // Put decoder context in place to make init_decode() happy
avctx->priv_data = s->mp3decctx[0]; avctx->priv_data = s->mp3decctx[0];
decode_init(avctx); decode_init(avctx);
s->frame = avctx->coded_frame;
// Restore mp3on4 context pointer // Restore mp3on4 context pointer
avctx->priv_data = s; avctx->priv_data = s;
s->mp3decctx[0]->adu_mode = 1; // Set adu mode s->mp3decctx[0]->adu_mode = 1; // Set adu mode
@ -2028,9 +2038,8 @@ static void flush_mp3on4(AVCodecContext *avctx)
} }
static int decode_frame_mp3on4(AVCodecContext * avctx, static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
@ -2038,14 +2047,17 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
MPADecodeContext *m; MPADecodeContext *m;
int fsize, len = buf_size, out_size = 0; int fsize, len = buf_size, out_size = 0;
uint32_t header; uint32_t header;
OUT_INT *out_samples = data; OUT_INT *out_samples;
OUT_INT *outptr, *bp; OUT_INT *outptr, *bp;
int fr, j, n, ch; int fr, j, n, ch, ret;
if (*data_size < MPA_FRAME_SIZE * avctx->channels * sizeof(OUT_INT)) { /* get output buffer */
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); s->frame->nb_samples = MPA_FRAME_SIZE;
return AVERROR(EINVAL); if ((ret = avctx->get_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
out_samples = (OUT_INT *)s->frame->data[0];
// Discard too short frames // Discard too short frames
if (buf_size < HEADER_SIZE) if (buf_size < HEADER_SIZE)
@ -2104,7 +2116,10 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
/* update codec info */ /* update codec info */
avctx->sample_rate = s->mp3decctx[0]->sample_rate; avctx->sample_rate = s->mp3decctx[0]->sample_rate;
*data_size = out_size; s->frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
*got_frame_ptr = 1;
*(AVFrame *)data = *s->frame;
return buf_size; return buf_size;
} }
#endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */ #endif /* CONFIG_MP3ON4_DECODER || CONFIG_MP3ON4FLOAT_DECODER */
@ -2119,7 +2134,9 @@ AVCodec ff_mp1_decoder = {
.init = decode_init, .init = decode_init,
.decode = decode_frame, .decode = decode_frame,
#if FF_API_PARSE_FRAME #if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY, .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif #endif
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"), .long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
@ -2134,7 +2151,9 @@ AVCodec ff_mp2_decoder = {
.init = decode_init, .init = decode_init,
.decode = decode_frame, .decode = decode_frame,
#if FF_API_PARSE_FRAME #if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY, .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif #endif
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"), .long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
@ -2149,7 +2168,9 @@ AVCodec ff_mp3_decoder = {
.init = decode_init, .init = decode_init,
.decode = decode_frame, .decode = decode_frame,
#if FF_API_PARSE_FRAME #if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY, .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif #endif
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"), .long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
@ -2164,7 +2185,9 @@ AVCodec ff_mp3adu_decoder = {
.init = decode_init, .init = decode_init,
.decode = decode_frame_adu, .decode = decode_frame_adu,
#if FF_API_PARSE_FRAME #if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY, .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif #endif
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"), .long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
@ -2179,6 +2202,7 @@ AVCodec ff_mp3on4_decoder = {
.init = decode_init_mp3on4, .init = decode_init_mp3on4,
.close = decode_close_mp3on4, .close = decode_close_mp3on4,
.decode = decode_frame_mp3on4, .decode = decode_frame_mp3on4,
.capabilities = CODEC_CAP_DR1,
.flush = flush_mp3on4, .flush = flush_mp3on4,
.long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"), .long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"),
}; };

@ -31,7 +31,9 @@ AVCodec ff_mp1float_decoder = {
.init = decode_init, .init = decode_init,
.decode = decode_frame, .decode = decode_frame,
#if FF_API_PARSE_FRAME #if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY, .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif #endif
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"), .long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
@ -46,7 +48,9 @@ AVCodec ff_mp2float_decoder = {
.init = decode_init, .init = decode_init,
.decode = decode_frame, .decode = decode_frame,
#if FF_API_PARSE_FRAME #if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY, .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif #endif
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"), .long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
@ -61,7 +65,9 @@ AVCodec ff_mp3float_decoder = {
.init = decode_init, .init = decode_init,
.decode = decode_frame, .decode = decode_frame,
#if FF_API_PARSE_FRAME #if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY, .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif #endif
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"), .long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
@ -76,7 +82,9 @@ AVCodec ff_mp3adufloat_decoder = {
.init = decode_init, .init = decode_init,
.decode = decode_frame_adu, .decode = decode_frame_adu,
#if FF_API_PARSE_FRAME #if FF_API_PARSE_FRAME
.capabilities = CODEC_CAP_PARSE_ONLY, .capabilities = CODEC_CAP_PARSE_ONLY | CODEC_CAP_DR1,
#else
.capabilities = CODEC_CAP_DR1,
#endif #endif
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"), .long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
@ -91,6 +99,7 @@ AVCodec ff_mp3on4float_decoder = {
.init = decode_init_mp3on4, .init = decode_init_mp3on4,
.close = decode_close_mp3on4, .close = decode_close_mp3on4,
.decode = decode_frame_mp3on4, .decode = decode_frame_mp3on4,
.capabilities = CODEC_CAP_DR1,
.flush = flush_mp3on4, .flush = flush_mp3on4,
.long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"), .long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"),
}; };

@ -47,6 +47,7 @@
typedef struct NellyMoserDecodeContext { typedef struct NellyMoserDecodeContext {
AVCodecContext* avctx; AVCodecContext* avctx;
AVFrame frame;
float *float_buf; float *float_buf;
DECLARE_ALIGNED(16, float, state)[NELLY_BUF_LEN]; DECLARE_ALIGNED(16, float, state)[NELLY_BUF_LEN];
AVLFG random_state; AVLFG random_state;
@ -142,29 +143,28 @@ static av_cold int decode_init(AVCodecContext * avctx) {
ff_init_ff_sine_windows(7); ff_init_ff_sine_windows(7);
avctx->channel_layout = AV_CH_LAYOUT_MONO; avctx->channel_layout = AV_CH_LAYOUT_MONO;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
static int decode_tag(AVCodecContext * avctx, static int decode_tag(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt) { {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
NellyMoserDecodeContext *s = avctx->priv_data; NellyMoserDecodeContext *s = avctx->priv_data;
int blocks, i, block_size; int blocks, i, ret;
int16_t *samples_s16 = data; int16_t *samples_s16;
float *samples_flt = data; float *samples_flt;
block_size = NELLY_SAMPLES * av_get_bytes_per_sample(avctx->sample_fmt);
blocks = buf_size / NELLY_BLOCK_LEN; blocks = buf_size / NELLY_BLOCK_LEN;
if (blocks <= 0) { if (blocks <= 0) {
av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (*data_size < blocks * block_size) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
return AVERROR(EINVAL);
}
if (buf_size % NELLY_BLOCK_LEN) { if (buf_size % NELLY_BLOCK_LEN) {
av_log(avctx, AV_LOG_WARNING, "Leftover bytes: %d.\n", av_log(avctx, AV_LOG_WARNING, "Leftover bytes: %d.\n",
buf_size % NELLY_BLOCK_LEN); buf_size % NELLY_BLOCK_LEN);
@ -177,6 +177,15 @@ static int decode_tag(AVCodecContext * avctx,
* 44100 Hz - 8 * 44100 Hz - 8
*/ */
/* get output buffer */
s->frame.nb_samples = NELLY_SAMPLES * blocks;
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
samples_s16 = (int16_t *)s->frame.data[0];
samples_flt = (float *)s->frame.data[0];
for (i=0 ; i<blocks ; i++) { for (i=0 ; i<blocks ; i++) {
if (avctx->sample_fmt == SAMPLE_FMT_FLT) { if (avctx->sample_fmt == SAMPLE_FMT_FLT) {
nelly_decode_block(s, buf, samples_flt); nelly_decode_block(s, buf, samples_flt);
@ -188,7 +197,9 @@ static int decode_tag(AVCodecContext * avctx,
} }
buf += NELLY_BLOCK_LEN; buf += NELLY_BLOCK_LEN;
} }
*data_size = blocks * block_size;
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size; return buf_size;
} }
@ -198,6 +209,7 @@ static av_cold int decode_end(AVCodecContext * avctx) {
av_freep(&s->float_buf); av_freep(&s->float_buf);
ff_mdct_end(&s->imdct_ctx); ff_mdct_end(&s->imdct_ctx);
return 0; return 0;
} }
@ -209,6 +221,7 @@ AVCodec ff_nellymoser_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_tag, .decode = decode_tag,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"), .long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT, .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16,

@ -192,6 +192,7 @@ static int pcm_encode_frame(AVCodecContext *avctx,
} }
typedef struct PCMDecode { typedef struct PCMDecode {
AVFrame frame;
short table[256]; short table[256];
} PCMDecode; } PCMDecode;
@ -223,6 +224,9 @@ static av_cold int pcm_decode_init(AVCodecContext * avctx)
if (avctx->sample_fmt == AV_SAMPLE_FMT_S32) if (avctx->sample_fmt == AV_SAMPLE_FMT_S32)
avctx->bits_per_raw_sample = av_get_bits_per_sample(avctx->codec->id); avctx->bits_per_raw_sample = av_get_bits_per_sample(avctx->codec->id);
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -243,22 +247,20 @@ static av_cold int pcm_decode_init(AVCodecContext * avctx)
dst += size / 8; \ dst += size / 8; \
} }
static int pcm_decode_frame(AVCodecContext *avctx, static int pcm_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *src = avpkt->data; const uint8_t *src = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
PCMDecode *s = avctx->priv_data; PCMDecode *s = avctx->priv_data;
int sample_size, c, n, out_size; int sample_size, c, n, ret, samples_per_block;
uint8_t *samples; uint8_t *samples;
int32_t *dst_int32_t; int32_t *dst_int32_t;
samples = data;
sample_size = av_get_bits_per_sample(avctx->codec_id)/8; sample_size = av_get_bits_per_sample(avctx->codec_id)/8;
/* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */ /* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */
samples_per_block = 1;
if (CODEC_ID_PCM_DVD == avctx->codec_id) { if (CODEC_ID_PCM_DVD == avctx->codec_id) {
if (avctx->bits_per_coded_sample != 20 && if (avctx->bits_per_coded_sample != 20 &&
avctx->bits_per_coded_sample != 24) { avctx->bits_per_coded_sample != 24) {
@ -266,10 +268,13 @@ static int pcm_decode_frame(AVCodecContext *avctx,
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
/* 2 samples are interleaved per block in PCM_DVD */ /* 2 samples are interleaved per block in PCM_DVD */
samples_per_block = 2;
sample_size = avctx->bits_per_coded_sample * 2 / 8; sample_size = avctx->bits_per_coded_sample * 2 / 8;
} else if (avctx->codec_id == CODEC_ID_PCM_LXF) } else if (avctx->codec_id == CODEC_ID_PCM_LXF) {
/* we process 40-bit blocks per channel for LXF */ /* we process 40-bit blocks per channel for LXF */
samples_per_block = 2;
sample_size = 5; sample_size = 5;
}
if (sample_size == 0) { if (sample_size == 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid sample_size\n"); av_log(avctx, AV_LOG_ERROR, "Invalid sample_size\n");
@ -288,14 +293,13 @@ static int pcm_decode_frame(AVCodecContext *avctx,
n = buf_size/sample_size; n = buf_size/sample_size;
out_size = n * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (avctx->codec_id == CODEC_ID_PCM_DVD || s->frame.nb_samples = n * samples_per_block / avctx->channels;
avctx->codec_id == CODEC_ID_PCM_LXF) if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
out_size *= 2; av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
if (*data_size < out_size) { return ret;
av_log(avctx, AV_LOG_ERROR, "output buffer too small\n");
return AVERROR(EINVAL);
} }
samples = s->frame.data[0];
switch(avctx->codec->id) { switch(avctx->codec->id) {
case CODEC_ID_PCM_U32LE: case CODEC_ID_PCM_U32LE:
@ -401,7 +405,7 @@ static int pcm_decode_frame(AVCodecContext *avctx,
case CODEC_ID_PCM_DVD: case CODEC_ID_PCM_DVD:
{ {
const uint8_t *src8; const uint8_t *src8;
dst_int32_t = data; dst_int32_t = (int32_t *)s->frame.data[0];
n /= avctx->channels; n /= avctx->channels;
switch (avctx->bits_per_coded_sample) { switch (avctx->bits_per_coded_sample) {
case 20: case 20:
@ -433,7 +437,7 @@ static int pcm_decode_frame(AVCodecContext *avctx,
{ {
int i; int i;
const uint8_t *src8; const uint8_t *src8;
dst_int32_t = data; dst_int32_t = (int32_t *)s->frame.data[0];
n /= avctx->channels; n /= avctx->channels;
//unpack and de-planerize //unpack and de-planerize
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
@ -454,7 +458,10 @@ static int pcm_decode_frame(AVCodecContext *avctx,
default: default:
return -1; return -1;
} }
*data_size = out_size;
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size; return buf_size;
} }
@ -483,6 +490,7 @@ AVCodec ff_ ## name_ ## _decoder = { \
.priv_data_size = sizeof(PCMDecode), \ .priv_data_size = sizeof(PCMDecode), \
.init = pcm_decode_init, \ .init = pcm_decode_init, \
.decode = pcm_decode_frame, \ .decode = pcm_decode_frame, \
.capabilities = CODEC_CAP_DR1, \
.sample_fmts = (const enum AVSampleFormat[]){sample_fmt_,AV_SAMPLE_FMT_NONE}, \ .sample_fmts = (const enum AVSampleFormat[]){sample_fmt_,AV_SAMPLE_FMT_NONE}, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
} }

@ -56,6 +56,7 @@ typedef enum
typedef struct typedef struct
{ {
AVFrame avframe;
GetBitContext gb; GetBitContext gb;
qcelp_packet_rate bitrate; qcelp_packet_rate bitrate;
QCELPFrame frame; /**< unpacked data frame */ QCELPFrame frame; /**< unpacked data frame */
@ -97,6 +98,9 @@ static av_cold int qcelp_decode_init(AVCodecContext *avctx)
for(i=0; i<10; i++) for(i=0; i<10; i++)
q->prev_lspf[i] = (i+1)/11.; q->prev_lspf[i] = (i+1)/11.;
avcodec_get_frame_defaults(&q->avframe);
avctx->coded_frame = &q->avframe;
return 0; return 0;
} }
@ -682,23 +686,25 @@ static void postfilter(QCELPContext *q, float *samples, float *lpc)
160, 0.9375, &q->postfilter_agc_mem); 160, 0.9375, &q->postfilter_agc_mem);
} }
static int qcelp_decode_frame(AVCodecContext *avctx, void *data, int *data_size, static int qcelp_decode_frame(AVCodecContext *avctx, void *data,
AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
QCELPContext *q = avctx->priv_data; QCELPContext *q = avctx->priv_data;
float *outbuffer = data; float *outbuffer;
int i, out_size; int i, ret;
float quantized_lspf[10], lpc[10]; float quantized_lspf[10], lpc[10];
float gain[16]; float gain[16];
float *formant_mem; float *formant_mem;
out_size = 160 * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (*data_size < out_size) { q->avframe.nb_samples = 160;
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &q->avframe)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
outbuffer = (float *)q->avframe.data[0];
if ((q->bitrate = determine_bitrate(avctx, buf_size, &buf)) == I_F_Q) { if ((q->bitrate = determine_bitrate(avctx, buf_size, &buf)) == I_F_Q) {
warn_insufficient_frame_quality(avctx, "bitrate cannot be determined."); warn_insufficient_frame_quality(avctx, "bitrate cannot be determined.");
@ -783,7 +789,8 @@ erasure:
memcpy(q->prev_lspf, quantized_lspf, sizeof(q->prev_lspf)); memcpy(q->prev_lspf, quantized_lspf, sizeof(q->prev_lspf));
q->prev_bitrate = q->bitrate; q->prev_bitrate = q->bitrate;
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = q->avframe;
return buf_size; return buf_size;
} }
@ -795,6 +802,7 @@ AVCodec ff_qcelp_decoder =
.id = CODEC_ID_QCELP, .id = CODEC_ID_QCELP,
.init = qcelp_decode_init, .init = qcelp_decode_init,
.decode = qcelp_decode_frame, .decode = qcelp_decode_frame,
.capabilities = CODEC_CAP_DR1,
.priv_data_size = sizeof(QCELPContext), .priv_data_size = sizeof(QCELPContext),
.long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"), .long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"),
}; };

@ -130,6 +130,8 @@ typedef struct {
* QDM2 decoder context * QDM2 decoder context
*/ */
typedef struct { typedef struct {
AVFrame frame;
/// Parameters from codec header, do not change during playback /// Parameters from codec header, do not change during playback
int nb_channels; ///< number of channels int nb_channels; ///< number of channels
int channels; ///< number of channels int channels; ///< number of channels
@ -1875,6 +1877,9 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
// dump_context(s); // dump_context(s);
return 0; return 0;
} }
@ -1952,30 +1957,27 @@ static int qdm2_decode (QDM2Context *q, const uint8_t *in, int16_t *out)
} }
static int qdm2_decode_frame(AVCodecContext *avctx, static int qdm2_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
QDM2Context *s = avctx->priv_data; QDM2Context *s = avctx->priv_data;
int16_t *out = data; int16_t *out;
int i, out_size; int i, ret;
if(!buf) if(!buf)
return 0; return 0;
if(buf_size < s->checksum_size) if(buf_size < s->checksum_size)
return -1; return -1;
out_size = 16 * s->channels * s->frame_size * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); s->frame.nb_samples = 16 * s->frame_size;
if (*data_size < out_size) { if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return ret;
} }
out = (int16_t *)s->frame.data[0];
av_log(avctx, AV_LOG_DEBUG, "decode(%d): %p[%d] -> %p[%d]\n",
buf_size, buf, s->checksum_size, data, *data_size);
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
if (qdm2_decode(s, buf, out) < 0) if (qdm2_decode(s, buf, out) < 0)
@ -1983,7 +1985,8 @@ static int qdm2_decode_frame(AVCodecContext *avctx,
out += s->channels * s->frame_size; out += s->channels * s->frame_size;
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return s->checksum_size; return s->checksum_size;
} }
@ -1997,5 +2000,6 @@ AVCodec ff_qdm2_decoder =
.init = qdm2_decode_init, .init = qdm2_decode_init,
.close = qdm2_decode_close, .close = qdm2_decode_close,
.decode = qdm2_decode_frame, .decode = qdm2_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("QDesign Music Codec 2"), .long_name = NULL_IF_CONFIG_SMALL("QDesign Music Codec 2"),
}; };

@ -34,6 +34,7 @@
typedef struct { typedef struct {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
LPCContext lpc_ctx; LPCContext lpc_ctx;
unsigned int old_energy; ///< previous frame energy unsigned int old_energy; ///< previous frame energy

@ -38,6 +38,10 @@ static av_cold int ra144_decode_init(AVCodecContext * avctx)
ractx->lpc_coef[1] = ractx->lpc_tables[1]; ractx->lpc_coef[1] = ractx->lpc_tables[1];
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&ractx->frame);
avctx->coded_frame = &ractx->frame;
return 0; return 0;
} }
@ -54,8 +58,8 @@ static void do_output_subblock(RA144Context *ractx, const uint16_t *lpc_coefs,
} }
/** Uncompress one block (20 bytes -> 160*2 bytes). */ /** Uncompress one block (20 bytes -> 160*2 bytes). */
static int ra144_decode_frame(AVCodecContext * avctx, void *vdata, static int ra144_decode_frame(AVCodecContext * avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
@ -64,23 +68,25 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
uint16_t block_coefs[NBLOCKS][LPC_ORDER]; // LPC coefficients of each sub-block uint16_t block_coefs[NBLOCKS][LPC_ORDER]; // LPC coefficients of each sub-block
unsigned int lpc_refl[LPC_ORDER]; // LPC reflection coefficients of the frame unsigned int lpc_refl[LPC_ORDER]; // LPC reflection coefficients of the frame
int i, j; int i, j;
int out_size; int ret;
int16_t *data = vdata; int16_t *samples;
unsigned int energy; unsigned int energy;
RA144Context *ractx = avctx->priv_data; RA144Context *ractx = avctx->priv_data;
GetBitContext gb; GetBitContext gb;
out_size = NBLOCKS * BLOCKSIZE * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (*data_size < out_size) { ractx->frame.nb_samples = NBLOCKS * BLOCKSIZE;
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &ractx->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
samples = (int16_t *)ractx->frame.data[0];
if(buf_size < FRAMESIZE) { if(buf_size < FRAMESIZE) {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"Frame too small (%d bytes). Truncated file?\n", buf_size); "Frame too small (%d bytes). Truncated file?\n", buf_size);
*data_size = 0; *got_frame_ptr = 0;
return buf_size; return buf_size;
} }
init_get_bits(&gb, buf, FRAMESIZE * 8); init_get_bits(&gb, buf, FRAMESIZE * 8);
@ -106,7 +112,7 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
do_output_subblock(ractx, block_coefs[i], refl_rms[i], &gb); do_output_subblock(ractx, block_coefs[i], refl_rms[i], &gb);
for (j=0; j < BLOCKSIZE; j++) for (j=0; j < BLOCKSIZE; j++)
*data++ = av_clip_int16(ractx->curr_sblock[j + 10] << 2); *samples++ = av_clip_int16(ractx->curr_sblock[j + 10] << 2);
} }
ractx->old_energy = energy; ractx->old_energy = energy;
@ -114,7 +120,9 @@ static int ra144_decode_frame(AVCodecContext * avctx, void *vdata,
FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]); FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]);
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = ractx->frame;
return FRAMESIZE; return FRAMESIZE;
} }
@ -125,5 +133,6 @@ AVCodec ff_ra_144_decoder = {
.priv_data_size = sizeof(RA144Context), .priv_data_size = sizeof(RA144Context),
.init = ra144_decode_init, .init = ra144_decode_init,
.decode = ra144_decode_frame, .decode = ra144_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"), .long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"),
}; };

@ -36,6 +36,7 @@
#define RA288_BLOCKS_PER_FRAME 32 #define RA288_BLOCKS_PER_FRAME 32
typedef struct { typedef struct {
AVFrame frame;
DSPContext dsp; DSPContext dsp;
DECLARE_ALIGNED(16, float, sp_lpc)[FFALIGN(36, 8)]; ///< LPC coefficients for speech data (spec: A) DECLARE_ALIGNED(16, float, sp_lpc)[FFALIGN(36, 8)]; ///< LPC coefficients for speech data (spec: A)
DECLARE_ALIGNED(16, float, gain_lpc)[FFALIGN(10, 8)]; ///< LPC coefficients for gain (spec: GB) DECLARE_ALIGNED(16, float, gain_lpc)[FFALIGN(10, 8)]; ///< LPC coefficients for gain (spec: GB)
@ -62,6 +63,10 @@ static av_cold int ra288_decode_init(AVCodecContext *avctx)
RA288Context *ractx = avctx->priv_data; RA288Context *ractx = avctx->priv_data;
avctx->sample_fmt = AV_SAMPLE_FMT_FLT; avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
dsputil_init(&ractx->dsp, avctx); dsputil_init(&ractx->dsp, avctx);
avcodec_get_frame_defaults(&ractx->frame);
avctx->coded_frame = &ractx->frame;
return 0; return 0;
} }
@ -165,12 +170,12 @@ static void backward_filter(RA288Context *ractx,
} }
static int ra288_decode_frame(AVCodecContext * avctx, void *data, static int ra288_decode_frame(AVCodecContext * avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
float *out = data; float *out;
int i, out_size; int i, ret;
RA288Context *ractx = avctx->priv_data; RA288Context *ractx = avctx->priv_data;
GetBitContext gb; GetBitContext gb;
@ -181,12 +186,13 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
out_size = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); ractx->frame.nb_samples = RA288_BLOCK_SIZE * RA288_BLOCKS_PER_FRAME;
if (*data_size < out_size) { if ((ret = avctx->get_buffer(avctx, &ractx->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return ret;
} }
out = (float *)ractx->frame.data[0];
init_get_bits(&gb, buf, avctx->block_align * 8); init_get_bits(&gb, buf, avctx->block_align * 8);
@ -208,7 +214,9 @@ static int ra288_decode_frame(AVCodecContext * avctx, void *data,
} }
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = ractx->frame;
return avctx->block_align; return avctx->block_align;
} }
@ -219,5 +227,6 @@ AVCodec ff_ra_288_decoder = {
.priv_data_size = sizeof(RA288Context), .priv_data_size = sizeof(RA288Context),
.init = ra288_decode_init, .init = ra288_decode_init,
.decode = ra288_decode_frame, .decode = ra288_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("RealAudio 2.0 (28.8K)"), .long_name = NULL_IF_CONFIG_SMALL("RealAudio 2.0 (28.8K)"),
}; };

@ -25,6 +25,10 @@
#define AES3_HEADER_LEN 4 #define AES3_HEADER_LEN 4
typedef struct S302MDecodeContext {
AVFrame frame;
} S302MDecodeContext;
static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf, static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
int buf_size) int buf_size)
{ {
@ -73,10 +77,12 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
} }
static int s302m_decode_frame(AVCodecContext *avctx, void *data, static int s302m_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
S302MDecodeContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int block_size, ret;
int frame_size = s302m_parse_frame_header(avctx, buf, buf_size); int frame_size = s302m_parse_frame_header(avctx, buf, buf_size);
if (frame_size < 0) if (frame_size < 0)
@ -85,11 +91,18 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
buf_size -= AES3_HEADER_LEN; buf_size -= AES3_HEADER_LEN;
buf += AES3_HEADER_LEN; buf += AES3_HEADER_LEN;
if (*data_size < 4 * buf_size * 8 / (avctx->bits_per_coded_sample + 4)) /* get output buffer */
return -1; block_size = (avctx->bits_per_coded_sample + 4) / 4;
s->frame.nb_samples = 2 * (buf_size / block_size) / avctx->channels;
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
buf_size = (s->frame.nb_samples * avctx->channels / 2) * block_size;
if (avctx->bits_per_coded_sample == 24) { if (avctx->bits_per_coded_sample == 24) {
uint32_t *o = data; uint32_t *o = (uint32_t *)s->frame.data[0];
for (; buf_size > 6; buf_size -= 7) { for (; buf_size > 6; buf_size -= 7) {
*o++ = (av_reverse[buf[2]] << 24) | *o++ = (av_reverse[buf[2]] << 24) |
(av_reverse[buf[1]] << 16) | (av_reverse[buf[1]] << 16) |
@ -100,9 +113,8 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
(av_reverse[buf[3] & 0x0f] << 4); (av_reverse[buf[3] & 0x0f] << 4);
buf += 7; buf += 7;
} }
*data_size = (uint8_t*) o - (uint8_t*) data;
} else if (avctx->bits_per_coded_sample == 20) { } else if (avctx->bits_per_coded_sample == 20) {
uint32_t *o = data; uint32_t *o = (uint32_t *)s->frame.data[0];
for (; buf_size > 5; buf_size -= 6) { for (; buf_size > 5; buf_size -= 6) {
*o++ = (av_reverse[buf[2] & 0xf0] << 28) | *o++ = (av_reverse[buf[2] & 0xf0] << 28) |
(av_reverse[buf[1]] << 20) | (av_reverse[buf[1]] << 20) |
@ -112,9 +124,8 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
(av_reverse[buf[3]] << 12); (av_reverse[buf[3]] << 12);
buf += 6; buf += 6;
} }
*data_size = (uint8_t*) o - (uint8_t*) data;
} else { } else {
uint16_t *o = data; uint16_t *o = (uint16_t *)s->frame.data[0];
for (; buf_size > 4; buf_size -= 5) { for (; buf_size > 4; buf_size -= 5) {
*o++ = (av_reverse[buf[1]] << 8) | *o++ = (av_reverse[buf[1]] << 8) |
av_reverse[buf[0]]; av_reverse[buf[0]];
@ -123,10 +134,22 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
(av_reverse[buf[2]] >> 4); (av_reverse[buf[2]] >> 4);
buf += 5; buf += 5;
} }
*data_size = (uint8_t*) o - (uint8_t*) data;
} }
return buf - avpkt->data; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return avpkt->size;
}
static int s302m_decode_init(AVCodecContext *avctx)
{
S302MDecodeContext *s = avctx->priv_data;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0;
} }
@ -134,6 +157,9 @@ AVCodec ff_s302m_decoder = {
.name = "s302m", .name = "s302m",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_S302M, .id = CODEC_ID_S302M,
.priv_data_size = sizeof(S302MDecodeContext),
.init = s302m_decode_init,
.decode = s302m_decode_frame, .decode = s302m_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"), .long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"),
}; };

@ -79,6 +79,7 @@ static const uint8_t is_audio_command[10] = { 1, 1, 1, 1, 0, 0, 0, 1, 1, 0 };
typedef struct ShortenContext { typedef struct ShortenContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
GetBitContext gb; GetBitContext gb;
int min_framesize, max_framesize; int min_framesize, max_framesize;
@ -112,6 +113,9 @@ static av_cold int shorten_decode_init(AVCodecContext * avctx)
s->avctx = avctx; s->avctx = avctx;
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -394,15 +398,13 @@ static int read_header(ShortenContext *s)
return 0; return 0;
} }
static int shorten_decode_frame(AVCodecContext *avctx, static int shorten_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
ShortenContext *s = avctx->priv_data; ShortenContext *s = avctx->priv_data;
int i, input_buf_size = 0; int i, input_buf_size = 0;
int16_t *samples = data;
int ret; int ret;
/* allocate internal bitstream buffer */ /* allocate internal bitstream buffer */
@ -436,7 +438,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
/* do not decode until buffer has at least max_framesize bytes or /* do not decode until buffer has at least max_framesize bytes or
the end of the file has been reached */ the end of the file has been reached */
if (buf_size < s->max_framesize && avpkt->data) { if (buf_size < s->max_framesize && avpkt->data) {
*data_size = 0; *got_frame_ptr = 0;
return input_buf_size; return input_buf_size;
} }
} }
@ -448,13 +450,13 @@ static int shorten_decode_frame(AVCodecContext *avctx,
if (!s->got_header) { if (!s->got_header) {
if ((ret = read_header(s)) < 0) if ((ret = read_header(s)) < 0)
return ret; return ret;
*data_size = 0; *got_frame_ptr = 0;
goto finish_frame; goto finish_frame;
} }
/* if quit command was read previously, don't decode anything */ /* if quit command was read previously, don't decode anything */
if (s->got_quit_command) { if (s->got_quit_command) {
*data_size = 0; *got_frame_ptr = 0;
return avpkt->size; return avpkt->size;
} }
@ -464,7 +466,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
int len; int len;
if (get_bits_left(&s->gb) < 3+FNSIZE) { if (get_bits_left(&s->gb) < 3+FNSIZE) {
*data_size = 0; *got_frame_ptr = 0;
break; break;
} }
@ -472,7 +474,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
if (cmd > FN_VERBATIM) { if (cmd > FN_VERBATIM) {
av_log(avctx, AV_LOG_ERROR, "unknown shorten function %d\n", cmd); av_log(avctx, AV_LOG_ERROR, "unknown shorten function %d\n", cmd);
*data_size = 0; *got_frame_ptr = 0;
break; break;
} }
@ -507,7 +509,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
break; break;
} }
if (cmd == FN_BLOCKSIZE || cmd == FN_QUIT) { if (cmd == FN_BLOCKSIZE || cmd == FN_QUIT) {
*data_size = 0; *got_frame_ptr = 0;
break; break;
} }
} else { } else {
@ -571,19 +573,23 @@ static int shorten_decode_frame(AVCodecContext *avctx,
/* if this is the last channel in the block, output the samples */ /* if this is the last channel in the block, output the samples */
s->cur_chan++; s->cur_chan++;
if (s->cur_chan == s->channels) { if (s->cur_chan == s->channels) {
int out_size = s->blocksize * s->channels * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); s->frame.nb_samples = s->blocksize;
if (*data_size < out_size) { if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return ret;
} }
interleave_buffer(samples, s->channels, s->blocksize, s->decoded); /* interleave output */
*data_size = out_size; interleave_buffer((int16_t *)s->frame.data[0], s->channels,
s->blocksize, s->decoded);
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
} }
} }
} }
if (s->cur_chan < s->channels) if (s->cur_chan < s->channels)
*data_size = 0; *got_frame_ptr = 0;
finish_frame: finish_frame:
s->bitindex = get_bits_count(&s->gb) - 8*((get_bits_count(&s->gb))/8); s->bitindex = get_bits_count(&s->gb) - 8*((get_bits_count(&s->gb))/8);
@ -614,6 +620,7 @@ static av_cold int shorten_decode_close(AVCodecContext *avctx)
} }
av_freep(&s->bitstream); av_freep(&s->bitstream);
av_freep(&s->coeffs); av_freep(&s->coeffs);
return 0; return 0;
} }
@ -625,6 +632,6 @@ AVCodec ff_shorten_decoder = {
.init = shorten_decode_init, .init = shorten_decode_init,
.close = shorten_decode_close, .close = shorten_decode_close,
.decode = shorten_decode_frame, .decode = shorten_decode_frame,
.capabilities = CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name= NULL_IF_CONFIG_SMALL("Shorten"), .long_name= NULL_IF_CONFIG_SMALL("Shorten"),
}; };

@ -507,20 +507,23 @@ static av_cold int sipr_decoder_init(AVCodecContext * avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_FLT; avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
avcodec_get_frame_defaults(&ctx->frame);
avctx->coded_frame = &ctx->frame;
return 0; return 0;
} }
static int sipr_decode_frame(AVCodecContext *avctx, void *datap, static int sipr_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
SiprContext *ctx = avctx->priv_data; SiprContext *ctx = avctx->priv_data;
const uint8_t *buf=avpkt->data; const uint8_t *buf=avpkt->data;
SiprParameters parm; SiprParameters parm;
const SiprModeParam *mode_par = &modes[ctx->mode]; const SiprModeParam *mode_par = &modes[ctx->mode];
GetBitContext gb; GetBitContext gb;
float *data = datap; float *samples;
int subframe_size = ctx->mode == MODE_16k ? L_SUBFR_16k : SUBFR_SIZE; int subframe_size = ctx->mode == MODE_16k ? L_SUBFR_16k : SUBFR_SIZE;
int i, out_size; int i, ret;
ctx->avctx = avctx; ctx->avctx = avctx;
if (avpkt->size < (mode_par->bits_per_frame >> 3)) { if (avpkt->size < (mode_par->bits_per_frame >> 3)) {
@ -530,27 +533,27 @@ static int sipr_decode_frame(AVCodecContext *avctx, void *datap,
return -1; return -1;
} }
out_size = mode_par->frames_per_packet * subframe_size * /* get output buffer */
mode_par->subframe_count * ctx->frame.nb_samples = mode_par->frames_per_packet * subframe_size *
av_get_bytes_per_sample(avctx->sample_fmt); mode_par->subframe_count;
if (*data_size < out_size) { if ((ret = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
"Error processing packet: output buffer (%d) too small\n", return ret;
*data_size);
return -1;
} }
samples = (float *)ctx->frame.data[0];
init_get_bits(&gb, buf, mode_par->bits_per_frame); init_get_bits(&gb, buf, mode_par->bits_per_frame);
for (i = 0; i < mode_par->frames_per_packet; i++) { for (i = 0; i < mode_par->frames_per_packet; i++) {
decode_parameters(&parm, &gb, mode_par); decode_parameters(&parm, &gb, mode_par);
ctx->decode_frame(ctx, &parm, data); ctx->decode_frame(ctx, &parm, samples);
data += subframe_size * mode_par->subframe_count; samples += subframe_size * mode_par->subframe_count;
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = ctx->frame;
return mode_par->bits_per_frame >> 3; return mode_par->bits_per_frame >> 3;
} }
@ -562,5 +565,6 @@ AVCodec ff_sipr_decoder = {
.priv_data_size = sizeof(SiprContext), .priv_data_size = sizeof(SiprContext),
.init = sipr_decoder_init, .init = sipr_decoder_init,
.decode = sipr_decode_frame, .decode = sipr_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("RealAudio SIPR / ACELP.NET"), .long_name = NULL_IF_CONFIG_SMALL("RealAudio SIPR / ACELP.NET"),
}; };

@ -558,31 +558,43 @@ static av_cold int decode_end(AVCodecContext *avctx)
} }
typedef struct SmackerAudioContext {
AVFrame frame;
} SmackerAudioContext;
static av_cold int smka_decode_init(AVCodecContext *avctx) static av_cold int smka_decode_init(AVCodecContext *avctx)
{ {
SmackerAudioContext *s = avctx->priv_data;
if (avctx->channels < 1 || avctx->channels > 2) { if (avctx->channels < 1 || avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n"); av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
avctx->sample_fmt = avctx->bits_per_coded_sample == 8 ? AV_SAMPLE_FMT_U8 : AV_SAMPLE_FMT_S16; avctx->sample_fmt = avctx->bits_per_coded_sample == 8 ? AV_SAMPLE_FMT_U8 : AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
/** /**
* Decode Smacker audio data * Decode Smacker audio data
*/ */
static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) static int smka_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{ {
SmackerAudioContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
GetBitContext gb; GetBitContext gb;
HuffContext h[4]; HuffContext h[4];
VLC vlc[4]; VLC vlc[4];
int16_t *samples = data; int16_t *samples;
uint8_t *samples8 = data; uint8_t *samples8;
int val; int val;
int i, res; int i, res, ret;
int unp_size; int unp_size;
int bits, stereo; int bits, stereo;
int pred[2] = {0, 0}; int pred[2] = {0, 0};
@ -598,15 +610,11 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
if(!get_bits1(&gb)){ if(!get_bits1(&gb)){
av_log(avctx, AV_LOG_INFO, "Sound: no data\n"); av_log(avctx, AV_LOG_INFO, "Sound: no data\n");
*data_size = 0; *got_frame_ptr = 0;
return 1; return 1;
} }
stereo = get_bits1(&gb); stereo = get_bits1(&gb);
bits = get_bits1(&gb); bits = get_bits1(&gb);
if (unp_size & 0xC0000000 || unp_size > *data_size) {
av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n");
return -1;
}
if (stereo ^ (avctx->channels != 1)) { if (stereo ^ (avctx->channels != 1)) {
av_log(avctx, AV_LOG_ERROR, "channels mismatch\n"); av_log(avctx, AV_LOG_ERROR, "channels mismatch\n");
return AVERROR(EINVAL); return AVERROR(EINVAL);
@ -616,6 +624,15 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
/* get output buffer */
s->frame.nb_samples = unp_size / (avctx->channels * (bits + 1));
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
samples = (int16_t *)s->frame.data[0];
samples8 = s->frame.data[0];
memset(vlc, 0, sizeof(VLC) * 4); memset(vlc, 0, sizeof(VLC) * 4);
memset(h, 0, sizeof(HuffContext) * 4); memset(h, 0, sizeof(HuffContext) * 4);
// Initialize // Initialize
@ -705,7 +722,9 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
av_free(h[i].values); av_free(h[i].values);
} }
*data_size = unp_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size; return buf_size;
} }
@ -725,8 +744,10 @@ AVCodec ff_smackaud_decoder = {
.name = "smackaud", .name = "smackaud",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_SMACKAUDIO, .id = CODEC_ID_SMACKAUDIO,
.priv_data_size = sizeof(SmackerAudioContext),
.init = smka_decode_init, .init = smka_decode_init,
.decode = smka_decode_frame, .decode = smka_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Smacker audio"), .long_name = NULL_IF_CONFIG_SMALL("Smacker audio"),
}; };

@ -34,6 +34,7 @@
* TrueSpeech decoder context * TrueSpeech decoder context
*/ */
typedef struct { typedef struct {
AVFrame frame;
DSPContext dsp; DSPContext dsp;
/* input data */ /* input data */
uint8_t buffer[32]; uint8_t buffer[32];
@ -69,6 +70,9 @@ static av_cold int truespeech_decode_init(AVCodecContext * avctx)
dsputil_init(&c->dsp, avctx); dsputil_init(&c->dsp, avctx);
avcodec_get_frame_defaults(&c->frame);
avctx->coded_frame = &c->frame;
return 0; return 0;
} }
@ -299,17 +303,16 @@ static void truespeech_save_prevvec(TSContext *c)
c->prevfilt[i] = c->cvector[i]; c->prevfilt[i] = c->cvector[i];
} }
static int truespeech_decode_frame(AVCodecContext *avctx, static int truespeech_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
TSContext *c = avctx->priv_data; TSContext *c = avctx->priv_data;
int i, j; int i, j;
short *samples = data; int16_t *samples;
int iterations, out_size; int iterations, ret;
iterations = buf_size / 32; iterations = buf_size / 32;
@ -319,13 +322,15 @@ static int truespeech_decode_frame(AVCodecContext *avctx,
return -1; return -1;
} }
out_size = iterations * 240 * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (*data_size < out_size) { c->frame.nb_samples = iterations * 240;
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
samples = (int16_t *)c->frame.data[0];
memset(samples, 0, out_size); memset(samples, 0, iterations * 240 * sizeof(*samples));
for(j = 0; j < iterations; j++) { for(j = 0; j < iterations; j++) {
truespeech_read_frame(c, buf); truespeech_read_frame(c, buf);
@ -345,7 +350,8 @@ static int truespeech_decode_frame(AVCodecContext *avctx,
truespeech_save_prevvec(c); truespeech_save_prevvec(c);
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = c->frame;
return buf_size; return buf_size;
} }
@ -357,5 +363,6 @@ AVCodec ff_truespeech_decoder = {
.priv_data_size = sizeof(TSContext), .priv_data_size = sizeof(TSContext),
.init = truespeech_decode_init, .init = truespeech_decode_init,
.decode = truespeech_decode_frame, .decode = truespeech_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("DSP Group TrueSpeech"), .long_name = NULL_IF_CONFIG_SMALL("DSP Group TrueSpeech"),
}; };

@ -56,6 +56,7 @@ typedef struct TTAChannel {
typedef struct TTAContext { typedef struct TTAContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
GetBitContext gb; GetBitContext gb;
int format, channels, bps, data_length; int format, channels, bps, data_length;
@ -276,17 +277,19 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
return -1; return -1;
} }
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
static int tta_decode_frame(AVCodecContext *avctx, static int tta_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
TTAContext *s = avctx->priv_data; TTAContext *s = avctx->priv_data;
int i, out_size; int i, ret;
int cur_chan = 0, framelen = s->frame_length; int cur_chan = 0, framelen = s->frame_length;
int32_t *p; int32_t *p;
@ -297,10 +300,11 @@ static int tta_decode_frame(AVCodecContext *avctx,
if (!s->total_frames && s->last_frame_length) if (!s->total_frames && s->last_frame_length)
framelen = s->last_frame_length; framelen = s->last_frame_length;
out_size = framelen * s->channels * av_get_bytes_per_sample(avctx->sample_fmt); /* get output buffer */
if (*data_size < out_size) { s->frame.nb_samples = framelen;
av_log(avctx, AV_LOG_ERROR, "Output buffer size is too small.\n"); if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
return -1; av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
// decode directly to output buffer for 24-bit sample format // decode directly to output buffer for 24-bit sample format
@ -396,19 +400,20 @@ static int tta_decode_frame(AVCodecContext *avctx,
// convert to output buffer // convert to output buffer
if (s->bps == 2) { if (s->bps == 2) {
int16_t *samples = data; int16_t *samples = (int16_t *)s->frame.data[0];
for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
*samples++ = *p; *samples++ = *p;
} else { } else {
// shift samples for 24-bit sample format // shift samples for 24-bit sample format
int32_t *samples = data; int32_t *samples = (int32_t *)s->frame.data[0];
for (i = 0; i < framelen * s->channels; i++) for (i = 0; i < framelen * s->channels; i++)
*samples++ <<= 8; *samples++ <<= 8;
// reset decode buffer // reset decode buffer
s->decode_buffer = NULL; s->decode_buffer = NULL;
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size; return buf_size;
} }
@ -430,5 +435,6 @@ AVCodec ff_tta_decoder = {
.init = tta_decode_init, .init = tta_decode_init,
.close = tta_decode_close, .close = tta_decode_close,
.decode = tta_decode_frame, .decode = tta_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("True Audio (TTA)"), .long_name = NULL_IF_CONFIG_SMALL("True Audio (TTA)"),
}; };

@ -174,6 +174,7 @@ static const ModeTab mode_44_48 = {
typedef struct TwinContext { typedef struct TwinContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
DSPContext dsp; DSPContext dsp;
FFTContext mdct_ctx[3]; FFTContext mdct_ctx[3];
@ -195,6 +196,7 @@ typedef struct TwinContext {
float *curr_frame; ///< non-interleaved output float *curr_frame; ///< non-interleaved output
float *prev_frame; ///< non-interleaved previous frame float *prev_frame; ///< non-interleaved previous frame
int last_block_pos[2]; int last_block_pos[2];
int discarded_packets;
float *cos_tabs[3]; float *cos_tabs[3];
@ -676,6 +678,9 @@ static void imdct_output(TwinContext *tctx, enum FrameType ftype, int wtype,
i); i);
} }
if (!out)
return;
size2 = tctx->last_block_pos[0]; size2 = tctx->last_block_pos[0];
size1 = mtab->size - size2; size1 = mtab->size - size2;
if (tctx->avctx->channels == 2) { if (tctx->avctx->channels == 2) {
@ -811,16 +816,16 @@ static void read_and_decode_spectrum(TwinContext *tctx, GetBitContext *gb,
} }
static int twin_decode_frame(AVCodecContext * avctx, void *data, static int twin_decode_frame(AVCodecContext * avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
TwinContext *tctx = avctx->priv_data; TwinContext *tctx = avctx->priv_data;
GetBitContext gb; GetBitContext gb;
const ModeTab *mtab = tctx->mtab; const ModeTab *mtab = tctx->mtab;
float *out = data; float *out = NULL;
enum FrameType ftype; enum FrameType ftype;
int window_type, out_size; int window_type, ret;
static const enum FrameType wtype_to_ftype_table[] = { static const enum FrameType wtype_to_ftype_table[] = {
FT_LONG, FT_LONG, FT_SHORT, FT_LONG, FT_LONG, FT_LONG, FT_SHORT, FT_LONG,
FT_MEDIUM, FT_LONG, FT_LONG, FT_MEDIUM, FT_MEDIUM FT_MEDIUM, FT_LONG, FT_LONG, FT_MEDIUM, FT_MEDIUM
@ -832,11 +837,14 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
out_size = mtab->size * avctx->channels * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); if (tctx->discarded_packets >= 2) {
if (*data_size < out_size) { tctx->frame.nb_samples = mtab->size;
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); if ((ret = avctx->get_buffer(avctx, &tctx->frame)) < 0) {
return AVERROR(EINVAL); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
out = (float *)tctx->frame.data[0];
} }
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
@ -856,12 +864,14 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
FFSWAP(float*, tctx->curr_frame, tctx->prev_frame); FFSWAP(float*, tctx->curr_frame, tctx->prev_frame);
if (tctx->avctx->frame_number < 2) { if (tctx->discarded_packets < 2) {
*data_size=0; tctx->discarded_packets++;
*got_frame_ptr = 0;
return buf_size; return buf_size;
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = tctx->frame;;
return buf_size; return buf_size;
} }
@ -1153,6 +1163,9 @@ static av_cold int twin_decode_init(AVCodecContext *avctx)
memset_float(tctx->bark_hist[0][0], 0.1, FF_ARRAY_ELEMS(tctx->bark_hist)); memset_float(tctx->bark_hist[0][0], 0.1, FF_ARRAY_ELEMS(tctx->bark_hist));
avcodec_get_frame_defaults(&tctx->frame);
avctx->coded_frame = &tctx->frame;
return 0; return 0;
} }
@ -1164,5 +1177,6 @@ AVCodec ff_twinvq_decoder = {
.init = twin_decode_init, .init = twin_decode_init,
.close = twin_decode_close, .close = twin_decode_close,
.decode = twin_decode_frame, .decode = twin_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("VQF TwinVQ"), .long_name = NULL_IF_CONFIG_SMALL("VQF TwinVQ"),
}; };

@ -222,9 +222,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 || if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 ||
s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F || s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F ||
s->codec_id == CODEC_ID_VP6A) { s->codec_id == CODEC_ID_VP6A) {
linesize_align[0] = for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
linesize_align[1] = linesize_align[i] = 16;
linesize_align[2] = 16;
} }
#endif #endif
} }
@ -241,7 +240,108 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){
*width=FFALIGN(*width, align); *width=FFALIGN(*width, align);
} }
int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
InternalBuffer *buf;
int buf_size, ret, i, needs_extended_data;
buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
frame->nb_samples, avctx->sample_fmt,
32);
if (buf_size < 0)
return AVERROR(EINVAL);
needs_extended_data = av_sample_fmt_is_planar(avctx->sample_fmt) &&
avctx->channels > AV_NUM_DATA_POINTERS;
/* allocate InternalBuffer if needed */
if (!avci->buffer) {
avci->buffer = av_mallocz(sizeof(InternalBuffer));
if (!avci->buffer)
return AVERROR(ENOMEM);
}
buf = avci->buffer;
/* if there is a previously-used internal buffer, check its size and
channel count to see if we can reuse it */
if (buf->extended_data) {
/* if current buffer is too small, free it */
if (buf->extended_data[0] && buf_size > buf->audio_data_size) {
av_free(buf->extended_data[0]);
if (buf->extended_data != buf->data)
av_free(&buf->extended_data);
buf->extended_data = NULL;
buf->data[0] = NULL;
}
/* if number of channels has changed, reset and/or free extended data
pointers but leave data buffer in buf->data[0] for reuse */
if (buf->nb_channels != avctx->channels) {
if (buf->extended_data != buf->data)
av_free(buf->extended_data);
buf->extended_data = NULL;
}
}
/* if there is no previous buffer or the previous buffer cannot be used
as-is, allocate a new buffer and/or rearrange the channel pointers */
if (!buf->extended_data) {
/* if the channel pointers will fit, just set extended_data to data,
otherwise allocate the extended_data channel pointers */
if (needs_extended_data) {
buf->extended_data = av_mallocz(avctx->channels *
sizeof(*buf->extended_data));
if (!buf->extended_data)
return AVERROR(ENOMEM);
} else {
buf->extended_data = buf->data;
}
/* if there is a previous buffer and it is large enough, reuse it and
just fill-in new channel pointers and linesize, otherwise allocate
a new buffer */
if (buf->extended_data[0]) {
ret = av_samples_fill_arrays(buf->extended_data, &buf->linesize[0],
buf->extended_data[0], avctx->channels,
frame->nb_samples, avctx->sample_fmt,
32);
} else {
ret = av_samples_alloc(buf->extended_data, &buf->linesize[0],
avctx->channels, frame->nb_samples,
avctx->sample_fmt, 32);
}
if (ret)
return ret;
/* if data was not used for extended_data, we need to copy as many of
the extended_data channel pointers as will fit */
if (needs_extended_data) {
for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
buf->data[i] = buf->extended_data[i];
}
buf->audio_data_size = buf_size;
buf->nb_channels = avctx->channels;
}
/* copy InternalBuffer info to the AVFrame */
frame->type = FF_BUFFER_TYPE_INTERNAL;
frame->extended_data = buf->extended_data;
frame->linesize[0] = buf->linesize[0];
memcpy(frame->data, buf->data, sizeof(frame->data));
if (avctx->pkt) frame->pkt_pts = avctx->pkt->pts;
else frame->pkt_pts = AV_NOPTS_VALUE;
frame->reordered_opaque = avctx->reordered_opaque;
if (avctx->debug & FF_DEBUG_BUFFERS)
av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, "
"internal audio buffer used\n", frame);
return 0;
}
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
{
int i; int i;
int w= s->width; int w= s->width;
int h= s->height; int h= s->height;
@ -362,6 +462,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
pic->data[i]= buf->data[i]; pic->data[i]= buf->data[i];
pic->linesize[i]= buf->linesize[i]; pic->linesize[i]= buf->linesize[i];
} }
pic->extended_data = pic->data;
avci->buffer_count++; avci->buffer_count++;
if(s->pkt) pic->pkt_pts= s->pkt->pts; if(s->pkt) pic->pkt_pts= s->pkt->pts;
@ -375,11 +476,25 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
return 0; return 0;
} }
int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame)
{
switch (avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
return video_get_buffer(avctx, frame);
case AVMEDIA_TYPE_AUDIO:
return audio_get_buffer(avctx, frame);
default:
return -1;
}
}
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){ void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
int i; int i;
InternalBuffer *buf, *last; InternalBuffer *buf, *last;
AVCodecInternal *avci = s->internal; AVCodecInternal *avci = s->internal;
assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
assert(pic->type==FF_BUFFER_TYPE_INTERNAL); assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
assert(avci->buffer_count); assert(avci->buffer_count);
@ -412,6 +527,8 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){
AVFrame temp_pic; AVFrame temp_pic;
int i; int i;
assert(s->codec_type == AVMEDIA_TYPE_VIDEO);
/* If no picture return a new buffer */ /* If no picture return a new buffer */
if(pic->data[0] == NULL) { if(pic->data[0] == NULL) {
/* We will copy from buffer, so must be readable */ /* We will copy from buffer, so must be readable */
@ -761,11 +878,59 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi
return ret; return ret;
} }
#if FF_API_OLD_DECODE_AUDIO
int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr, int *frame_size_ptr,
AVPacket *avpkt) AVPacket *avpkt)
{ {
int ret; AVFrame frame;
int ret, got_frame = 0;
if (avctx->get_buffer != avcodec_default_get_buffer) {
av_log(avctx, AV_LOG_ERROR, "A custom get_buffer() cannot be used with "
"avcodec_decode_audio3()\n");
return AVERROR(EINVAL);
}
ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt);
if (ret >= 0 && got_frame) {
int ch, plane_size;
int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
frame.nb_samples,
avctx->sample_fmt, 1);
if (*frame_size_ptr < data_size) {
av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for "
"the current frame (%d < %d)\n", *frame_size_ptr, data_size);
return AVERROR(EINVAL);
}
memcpy(samples, frame.extended_data[0], plane_size);
if (planar && avctx->channels > 1) {
uint8_t *out = ((uint8_t *)samples) + plane_size;
for (ch = 1; ch < avctx->channels; ch++) {
memcpy(out, frame.extended_data[ch], plane_size);
out += plane_size;
}
}
*frame_size_ptr = data_size;
} else {
*frame_size_ptr = 0;
}
return ret;
}
#endif
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
AVFrame *frame,
int *got_frame_ptr,
AVPacket *avpkt)
{
int ret = 0;
*got_frame_ptr = 0;
avctx->pkt = avpkt; avctx->pkt = avpkt;
@ -774,23 +939,12 @@ int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *sa
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){ if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) {
//FIXME remove the check below _after_ ensuring that all audio check that the available space is enough ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt);
if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){ if (ret >= 0 && *got_frame_ptr) {
av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n"); avctx->frame_number++;
return -1; frame->pkt_dts = avpkt->dts;
}
if(*frame_size_ptr < FF_MIN_BUFFER_SIZE ||
*frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t)){
av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr);
return -1;
} }
ret = avctx->codec->decode(avctx, samples, frame_size_ptr, avpkt);
avctx->frame_number++;
}else{
ret= 0;
*frame_size_ptr=0;
} }
return ret; return ret;
} }
@ -1115,7 +1269,8 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
avctx->codec->flush(avctx); avctx->codec->flush(avctx);
} }
void avcodec_default_free_buffers(AVCodecContext *s){ static void video_free_buffers(AVCodecContext *s)
{
AVCodecInternal *avci = s->internal; AVCodecInternal *avci = s->internal;
int i, j; int i, j;
@ -1137,6 +1292,37 @@ void avcodec_default_free_buffers(AVCodecContext *s){
avci->buffer_count=0; avci->buffer_count=0;
} }
static void audio_free_buffers(AVCodecContext *avctx)
{
AVCodecInternal *avci = avctx->internal;
InternalBuffer *buf;
if (!avci->buffer)
return;
buf = avci->buffer;
if (buf->extended_data) {
av_free(buf->extended_data[0]);
if (buf->extended_data != buf->data)
av_free(buf->extended_data);
}
av_freep(&avci->buffer);
}
void avcodec_default_free_buffers(AVCodecContext *avctx)
{
switch (avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
video_free_buffers(avctx);
break;
case AVMEDIA_TYPE_AUDIO:
audio_free_buffers(avctx);
break;
default:
break;
}
}
#if FF_API_OLD_FF_PICT_TYPES #if FF_API_OLD_FF_PICT_TYPES
char av_get_pict_type_char(int pict_type){ char av_get_pict_type_char(int pict_type){
return av_get_picture_type_char(pict_type); return av_get_picture_type_char(pict_type);

@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H #define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53 #define LIBAVCODEC_VERSION_MAJOR 53
#define LIBAVCODEC_VERSION_MINOR 24 #define LIBAVCODEC_VERSION_MINOR 25
#define LIBAVCODEC_VERSION_MICRO 0 #define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
@ -113,5 +113,8 @@
#ifndef FF_API_DATA_POINTERS #ifndef FF_API_DATA_POINTERS
#define FF_API_DATA_POINTERS (LIBAVCODEC_VERSION_MAJOR < 54) #define FF_API_DATA_POINTERS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif #endif
#ifndef FF_API_OLD_DECODE_AUDIO
#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#endif /* AVCODEC_VERSION_H */ #endif /* AVCODEC_VERSION_H */

@ -473,6 +473,7 @@ static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
#define BLOCK_TYPE_SILENCE 3 #define BLOCK_TYPE_SILENCE 3
typedef struct VmdAudioContext { typedef struct VmdAudioContext {
AVFrame frame;
int out_bps; int out_bps;
int chunk_size; int chunk_size;
} VmdAudioContext; } VmdAudioContext;
@ -514,6 +515,9 @@ static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2); s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2);
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, " av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
"block align = %d, sample rate = %d\n", "block align = %d, sample rate = %d\n",
avctx->channels, avctx->bits_per_coded_sample, avctx->block_align, avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
@ -551,22 +555,21 @@ static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size,
} }
} }
static int vmdaudio_decode_frame(AVCodecContext *avctx, static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
const uint8_t *buf_end; const uint8_t *buf_end;
int buf_size = avpkt->size; int buf_size = avpkt->size;
VmdAudioContext *s = avctx->priv_data; VmdAudioContext *s = avctx->priv_data;
int block_type, silent_chunks, audio_chunks; int block_type, silent_chunks, audio_chunks;
int nb_samples, out_size; int ret;
uint8_t *output_samples_u8 = data; uint8_t *output_samples_u8;
int16_t *output_samples_s16 = data; int16_t *output_samples_s16;
if (buf_size < 16) { if (buf_size < 16) {
av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n"); av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n");
*data_size = 0; *got_frame_ptr = 0;
return buf_size; return buf_size;
} }
@ -597,10 +600,15 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx,
/* ensure output buffer is large enough */ /* ensure output buffer is large enough */
audio_chunks = buf_size / s->chunk_size; audio_chunks = buf_size / s->chunk_size;
nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels;
out_size = nb_samples * avctx->channels * s->out_bps; /* get output buffer */
if (*data_size < out_size) s->frame.nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels;
return -1; if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
output_samples_u8 = s->frame.data[0];
output_samples_s16 = (int16_t *)s->frame.data[0];
/* decode silent chunks */ /* decode silent chunks */
if (silent_chunks > 0) { if (silent_chunks > 0) {
@ -630,7 +638,9 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx,
} }
} }
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return avpkt->size; return avpkt->size;
} }
@ -658,5 +668,6 @@ AVCodec ff_vmdaudio_decoder = {
.priv_data_size = sizeof(VmdAudioContext), .priv_data_size = sizeof(VmdAudioContext),
.init = vmdaudio_decode_init, .init = vmdaudio_decode_init,
.decode = vmdaudio_decode_frame, .decode = vmdaudio_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"), .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
}; };

@ -121,6 +121,7 @@ typedef struct {
typedef struct vorbis_context_s { typedef struct vorbis_context_s {
AVCodecContext *avccontext; AVCodecContext *avccontext;
AVFrame frame;
GetBitContext gb; GetBitContext gb;
DSPContext dsp; DSPContext dsp;
FmtConvertContext fmt_conv; FmtConvertContext fmt_conv;
@ -1033,6 +1034,9 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext)
avccontext->sample_rate = vc->audio_samplerate; avccontext->sample_rate = vc->audio_samplerate;
avccontext->frame_size = FFMIN(vc->blocksize[0], vc->blocksize[1]) >> 2; avccontext->frame_size = FFMIN(vc->blocksize[0], vc->blocksize[1]) >> 2;
avcodec_get_frame_defaults(&vc->frame);
avccontext->coded_frame = &vc->frame;
return 0; return 0;
} }
@ -1605,16 +1609,15 @@ static int vorbis_parse_audio_packet(vorbis_context *vc)
// Return the decoded audio packet through the standard api // Return the decoded audio packet through the standard api
static int vorbis_decode_frame(AVCodecContext *avccontext, static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
vorbis_context *vc = avccontext->priv_data; vorbis_context *vc = avccontext->priv_data;
GetBitContext *gb = &(vc->gb); GetBitContext *gb = &(vc->gb);
const float *channel_ptrs[255]; const float *channel_ptrs[255];
int i, len, out_size; int i, len, ret;
av_dlog(NULL, "packet length %d \n", buf_size); av_dlog(NULL, "packet length %d \n", buf_size);
@ -1625,18 +1628,18 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
if (!vc->first_frame) { if (!vc->first_frame) {
vc->first_frame = 1; vc->first_frame = 1;
*data_size = 0; *got_frame_ptr = 0;
return buf_size; return buf_size;
} }
av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n", av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n",
get_bits_count(gb) / 8, get_bits_count(gb) % 8, len); get_bits_count(gb) / 8, get_bits_count(gb) % 8, len);
out_size = len * vc->audio_channels * /* get output buffer */
av_get_bytes_per_sample(avccontext->sample_fmt); vc->frame.nb_samples = len;
if (*data_size < out_size) { if ((ret = avccontext->get_buffer(avccontext, &vc->frame)) < 0) {
av_log(avccontext, AV_LOG_ERROR, "output buffer is too small\n"); av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed\n");
return AVERROR(EINVAL); return ret;
} }
if (vc->audio_channels > 8) { if (vc->audio_channels > 8) {
@ -1649,12 +1652,15 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
} }
if (avccontext->sample_fmt == AV_SAMPLE_FMT_FLT) if (avccontext->sample_fmt == AV_SAMPLE_FMT_FLT)
vc->fmt_conv.float_interleave(data, channel_ptrs, len, vc->audio_channels); vc->fmt_conv.float_interleave((float *)vc->frame.data[0], channel_ptrs,
len, vc->audio_channels);
else else
vc->fmt_conv.float_to_int16_interleave(data, channel_ptrs, len, vc->fmt_conv.float_to_int16_interleave((int16_t *)vc->frame.data[0],
channel_ptrs, len,
vc->audio_channels); vc->audio_channels);
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = vc->frame;
return buf_size; return buf_size;
} }
@ -1678,6 +1684,7 @@ AVCodec ff_vorbis_decoder = {
.init = vorbis_decode_init, .init = vorbis_decode_init,
.close = vorbis_decode_close, .close = vorbis_decode_close,
.decode = vorbis_decode_frame, .decode = vorbis_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Vorbis"), .long_name = NULL_IF_CONFIG_SMALL("Vorbis"),
.channel_layouts = ff_vorbis_channel_layouts, .channel_layouts = ff_vorbis_channel_layouts,
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) {

@ -115,8 +115,6 @@ typedef struct WavpackFrameContext {
int float_shift; int float_shift;
int float_max_exp; int float_max_exp;
WvChannel ch[2]; WvChannel ch[2];
int samples_left;
int max_samples;
int pos; int pos;
SavedContext sc, extra_sc; SavedContext sc, extra_sc;
} WavpackFrameContext; } WavpackFrameContext;
@ -125,6 +123,7 @@ typedef struct WavpackFrameContext {
typedef struct WavpackContext { typedef struct WavpackContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
WavpackFrameContext *fdec[WV_MAX_FRAME_DECODERS]; WavpackFrameContext *fdec[WV_MAX_FRAME_DECODERS];
int fdec_num; int fdec_num;
@ -133,7 +132,6 @@ typedef struct WavpackContext {
int mkv_mode; int mkv_mode;
int block; int block;
int samples; int samples;
int samples_left;
int ch_offset; int ch_offset;
} WavpackContext; } WavpackContext;
@ -485,7 +483,6 @@ static float wv_get_value_float(WavpackFrameContext *s, uint32_t *crc, int S)
static void wv_reset_saved_context(WavpackFrameContext *s) static void wv_reset_saved_context(WavpackFrameContext *s)
{ {
s->pos = 0; s->pos = 0;
s->samples_left = 0;
s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF; s->sc.crc = s->extra_sc.crc = 0xFFFFFFFF;
} }
@ -502,8 +499,7 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
float *dstfl = dst; float *dstfl = dst;
const int channel_pad = s->avctx->channels - 2; const int channel_pad = s->avctx->channels - 2;
if(s->samples_left == s->samples) s->one = s->zero = s->zeroes = 0;
s->one = s->zero = s->zeroes = 0;
do{ do{
L = wv_get_value(s, gb, 0, &last); L = wv_get_value(s, gb, 0, &last);
if(last) break; if(last) break;
@ -594,13 +590,8 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
dst16 += channel_pad; dst16 += channel_pad;
} }
count++; count++;
}while(!last && count < s->max_samples); } while (!last && count < s->samples);
if (last)
s->samples_left = 0;
else
s->samples_left -= count;
if(!s->samples_left){
wv_reset_saved_context(s); wv_reset_saved_context(s);
if(crc != s->CRC){ if(crc != s->CRC){
av_log(s->avctx, AV_LOG_ERROR, "CRC error\n"); av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
@ -610,15 +601,7 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, vo
av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n"); av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
return -1; return -1;
} }
}else{
s->pos = pos;
s->sc.crc = crc;
s->sc.bits_used = get_bits_count(&s->gb);
if(s->got_extra_bits){
s->extra_sc.crc = crc_extra_bits;
s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits);
}
}
return count * 2; return count * 2;
} }
@ -635,8 +618,7 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
float *dstfl = dst; float *dstfl = dst;
const int channel_stride = s->avctx->channels; const int channel_stride = s->avctx->channels;
if(s->samples_left == s->samples) s->one = s->zero = s->zeroes = 0;
s->one = s->zero = s->zeroes = 0;
do{ do{
T = wv_get_value(s, gb, 0, &last); T = wv_get_value(s, gb, 0, &last);
S = 0; S = 0;
@ -675,13 +657,8 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
dst16 += channel_stride; dst16 += channel_stride;
} }
count++; count++;
}while(!last && count < s->max_samples); } while (!last && count < s->samples);
if (last)
s->samples_left = 0;
else
s->samples_left -= count;
if(!s->samples_left){
wv_reset_saved_context(s); wv_reset_saved_context(s);
if(crc != s->CRC){ if(crc != s->CRC){
av_log(s->avctx, AV_LOG_ERROR, "CRC error\n"); av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
@ -691,15 +668,7 @@ static inline int wv_unpack_mono(WavpackFrameContext *s, GetBitContext *gb, void
av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n"); av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n");
return -1; return -1;
} }
}else{
s->pos = pos;
s->sc.crc = crc;
s->sc.bits_used = get_bits_count(&s->gb);
if(s->got_extra_bits){
s->extra_sc.crc = crc_extra_bits;
s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits);
}
}
return count; return count;
} }
@ -743,6 +712,9 @@ static av_cold int wavpack_decode_init(AVCodecContext *avctx)
s->fdec_num = 0; s->fdec_num = 0;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -759,7 +731,7 @@ static av_cold int wavpack_decode_end(AVCodecContext *avctx)
} }
static int wavpack_decode_block(AVCodecContext *avctx, int block_no, static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
void *data, int *data_size, void *data, int *got_frame_ptr,
const uint8_t *buf, int buf_size) const uint8_t *buf, int buf_size)
{ {
WavpackContext *wc = avctx->priv_data; WavpackContext *wc = avctx->priv_data;
@ -774,7 +746,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
int bpp, chan, chmask; int bpp, chan, chmask;
if (buf_size == 0){ if (buf_size == 0){
*data_size = 0; *got_frame_ptr = 0;
return 0; return 0;
} }
@ -789,18 +761,16 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
return -1; return -1;
} }
if(!s->samples_left){
memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr)); memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr));
memset(s->ch, 0, sizeof(s->ch)); memset(s->ch, 0, sizeof(s->ch));
s->extra_bits = 0; s->extra_bits = 0;
s->and = s->or = s->shift = 0; s->and = s->or = s->shift = 0;
s->got_extra_bits = 0; s->got_extra_bits = 0;
}
if(!wc->mkv_mode){ if(!wc->mkv_mode){
s->samples = AV_RL32(buf); buf += 4; s->samples = AV_RL32(buf); buf += 4;
if(!s->samples){ if(!s->samples){
*data_size = 0; *got_frame_ptr = 0;
return 0; return 0;
} }
}else{ }else{
@ -829,13 +799,6 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
wc->ch_offset += 1 + s->stereo; wc->ch_offset += 1 + s->stereo;
s->max_samples = *data_size / (bpp * avctx->channels);
s->max_samples = FFMIN(s->max_samples, s->samples);
if(s->samples_left > 0){
s->max_samples = FFMIN(s->max_samples, s->samples_left);
buf = buf_end;
}
// parse metadata blocks // parse metadata blocks
while(buf < buf_end){ while(buf < buf_end){
id = *buf++; id = *buf++;
@ -1064,7 +1027,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
} }
if(id & WP_IDF_ODD) buf++; if(id & WP_IDF_ODD) buf++;
} }
if(!s->samples_left){
if(!got_terms){ if(!got_terms){
av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n"); av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n");
return -1; return -1;
@ -1101,16 +1064,6 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
s->got_extra_bits = 0; s->got_extra_bits = 0;
} }
} }
s->samples_left = s->samples;
}else{
init_get_bits(&s->gb, orig_buf + s->sc.offset, s->sc.size);
skip_bits_long(&s->gb, s->sc.bits_used);
if(s->got_extra_bits){
init_get_bits(&s->gb_extra_bits, orig_buf + s->extra_sc.offset,
s->extra_sc.size);
skip_bits_long(&s->gb_extra_bits, s->extra_sc.bits_used);
}
}
if(s->stereo_in){ if(s->stereo_in){
if(avctx->sample_fmt == AV_SAMPLE_FMT_S16) if(avctx->sample_fmt == AV_SAMPLE_FMT_S16)
@ -1167,7 +1120,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
} }
} }
wc->samples_left = s->samples_left; *got_frame_ptr = 1;
return samplecount * bpp; return samplecount * bpp;
} }
@ -1181,23 +1134,40 @@ static void wavpack_decode_flush(AVCodecContext *avctx)
wv_reset_saved_context(s->fdec[i]); wv_reset_saved_context(s->fdec[i]);
} }
static int wavpack_decode_frame(AVCodecContext *avctx, static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
WavpackContext *s = avctx->priv_data; WavpackContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int frame_size; int frame_size, ret;
int samplecount = 0; int samplecount = 0;
s->block = 0; s->block = 0;
s->samples_left = 0;
s->ch_offset = 0; s->ch_offset = 0;
/* determine number of samples */
if(s->mkv_mode){ if(s->mkv_mode){
s->samples = AV_RL32(buf); buf += 4; s->samples = AV_RL32(buf); buf += 4;
} else {
if (s->multichannel)
s->samples = AV_RL32(buf + 4);
else
s->samples = AV_RL32(buf);
}
if (s->samples <= 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of samples: %d\n",
s->samples);
return AVERROR(EINVAL);
}
/* get output buffer */
s->frame.nb_samples = s->samples;
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
} }
while(buf_size > 0){ while(buf_size > 0){
if(!s->multichannel){ if(!s->multichannel){
frame_size = buf_size; frame_size = buf_size;
@ -1216,17 +1186,19 @@ static int wavpack_decode_frame(AVCodecContext *avctx,
wavpack_decode_flush(avctx); wavpack_decode_flush(avctx);
return -1; return -1;
} }
if((samplecount = wavpack_decode_block(avctx, s->block, data, if((samplecount = wavpack_decode_block(avctx, s->block, s->frame.data[0],
data_size, buf, frame_size)) < 0) { got_frame_ptr, buf, frame_size)) < 0) {
wavpack_decode_flush(avctx); wavpack_decode_flush(avctx);
return -1; return -1;
} }
s->block++; s->block++;
buf += frame_size; buf_size -= frame_size; buf += frame_size; buf_size -= frame_size;
} }
*data_size = samplecount * avctx->channels;
return s->samples_left > 0 ? 0 : avpkt->size; if (*got_frame_ptr)
*(AVFrame *)data = s->frame;
return avpkt->size;
} }
AVCodec ff_wavpack_decoder = { AVCodec ff_wavpack_decoder = {
@ -1238,6 +1210,6 @@ AVCodec ff_wavpack_decoder = {
.close = wavpack_decode_end, .close = wavpack_decode_end,
.decode = wavpack_decode_frame, .decode = wavpack_decode_frame,
.flush = wavpack_decode_flush, .flush = wavpack_decode_flush,
.capabilities = CODEC_CAP_SUBFRAMES, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("WavPack"), .long_name = NULL_IF_CONFIG_SMALL("WavPack"),
}; };

@ -65,6 +65,7 @@ typedef struct CoefVLCTable {
typedef struct WMACodecContext { typedef struct WMACodecContext {
AVCodecContext* avctx; AVCodecContext* avctx;
AVFrame frame;
GetBitContext gb; GetBitContext gb;
PutBitContext pb; PutBitContext pb;
int sample_rate; int sample_rate;

@ -124,6 +124,10 @@ static int wma_decode_init(AVCodecContext * avctx)
} }
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -797,14 +801,13 @@ static int wma_decode_frame(WMACodecContext *s, int16_t *samples)
return 0; return 0;
} }
static int wma_decode_superframe(AVCodecContext *avctx, static int wma_decode_superframe(AVCodecContext *avctx, void *data,
void *data, int *data_size, int *got_frame_ptr, AVPacket *avpkt)
AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
WMACodecContext *s = avctx->priv_data; WMACodecContext *s = avctx->priv_data;
int nb_frames, bit_offset, i, pos, len, out_size; int nb_frames, bit_offset, i, pos, len, ret;
uint8_t *q; uint8_t *q;
int16_t *samples; int16_t *samples;
@ -818,8 +821,6 @@ static int wma_decode_superframe(AVCodecContext *avctx,
return 0; return 0;
buf_size = s->block_align; buf_size = s->block_align;
samples = data;
init_get_bits(&s->gb, buf, buf_size*8); init_get_bits(&s->gb, buf, buf_size*8);
if (s->use_bit_reservoir) { if (s->use_bit_reservoir) {
@ -830,12 +831,13 @@ static int wma_decode_superframe(AVCodecContext *avctx,
nb_frames = 1; nb_frames = 1;
} }
out_size = nb_frames * s->frame_len * s->nb_channels * /* get output buffer */
av_get_bytes_per_sample(avctx->sample_fmt); s->frame.nb_samples = nb_frames * s->frame_len;
if (*data_size < out_size) { if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Insufficient output space\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
goto fail; return ret;
} }
samples = (int16_t *)s->frame.data[0];
if (s->use_bit_reservoir) { if (s->use_bit_reservoir) {
bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3); bit_offset = get_bits(&s->gb, s->byte_offset_bits + 3);
@ -903,7 +905,9 @@ static int wma_decode_superframe(AVCodecContext *avctx,
//av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len, (int8_t *)samples - (int8_t *)data, s->block_align); //av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len, (int8_t *)samples - (int8_t *)data, s->block_align);
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return s->block_align; return s->block_align;
fail: fail:
/* when error, we reset the bit reservoir */ /* when error, we reset the bit reservoir */
@ -928,6 +932,7 @@ AVCodec ff_wmav1_decoder = {
.close = ff_wma_end, .close = ff_wma_end,
.decode = wma_decode_superframe, .decode = wma_decode_superframe,
.flush = flush, .flush = flush,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"),
}; };
@ -940,5 +945,6 @@ AVCodec ff_wmav2_decoder = {
.close = ff_wma_end, .close = ff_wma_end,
.decode = wma_decode_superframe, .decode = wma_decode_superframe,
.flush = flush, .flush = flush,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"),
}; };

@ -167,6 +167,7 @@ typedef struct {
typedef struct WMAProDecodeCtx { typedef struct WMAProDecodeCtx {
/* generic decoder variables */ /* generic decoder variables */
AVCodecContext* avctx; ///< codec context for av_log AVCodecContext* avctx; ///< codec context for av_log
AVFrame frame; ///< AVFrame for decoded output
DSPContext dsp; ///< accelerated DSP functions DSPContext dsp; ///< accelerated DSP functions
FmtConvertContext fmt_conv; FmtConvertContext fmt_conv;
uint8_t frame_data[MAX_FRAMESIZE + uint8_t frame_data[MAX_FRAMESIZE +
@ -209,8 +210,6 @@ typedef struct WMAProDecodeCtx {
uint32_t frame_num; ///< current frame number (not used for decoding) uint32_t frame_num; ///< current frame number (not used for decoding)
GetBitContext gb; ///< bitstream reader context GetBitContext gb; ///< bitstream reader context
int buf_bit_size; ///< buffer size in bits int buf_bit_size; ///< buffer size in bits
float* samples; ///< current samplebuffer pointer
float* samples_end; ///< maximum samplebuffer pointer
uint8_t drc_gain; ///< gain for the DRC tool uint8_t drc_gain; ///< gain for the DRC tool
int8_t skip_frame; ///< skip output step int8_t skip_frame; ///< skip output step
int8_t parsed_all_subframes; ///< all subframes decoded? int8_t parsed_all_subframes; ///< all subframes decoded?
@ -453,6 +452,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
dump_context(s); dump_context(s);
avctx->channel_layout = channel_mask; avctx->channel_layout = channel_mask;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -1279,22 +1282,15 @@ static int decode_subframe(WMAProDecodeCtx *s)
*@return 0 if the trailer bit indicates that this is the last frame, *@return 0 if the trailer bit indicates that this is the last frame,
* 1 if there are additional frames * 1 if there are additional frames
*/ */
static int decode_frame(WMAProDecodeCtx *s) static int decode_frame(WMAProDecodeCtx *s, int *got_frame_ptr)
{ {
AVCodecContext *avctx = s->avctx;
GetBitContext* gb = &s->gb; GetBitContext* gb = &s->gb;
int more_frames = 0; int more_frames = 0;
int len = 0; int len = 0;
int i; int i, ret;
const float *out_ptr[WMAPRO_MAX_CHANNELS]; const float *out_ptr[WMAPRO_MAX_CHANNELS];
float *samples;
/** check for potential output buffer overflow */
if (s->num_channels * s->samples_per_frame > s->samples_end - s->samples) {
/** return an error if no frame could be decoded at all */
av_log(s->avctx, AV_LOG_ERROR,
"not enough space for the output samples\n");
s->packet_loss = 1;
return 0;
}
/** get frame length */ /** get frame length */
if (s->len_prefix) if (s->len_prefix)
@ -1360,10 +1356,19 @@ static int decode_frame(WMAProDecodeCtx *s)
} }
} }
/* get output buffer */
s->frame.nb_samples = s->samples_per_frame;
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
s->packet_loss = 1;
return 0;
}
samples = (float *)s->frame.data[0];
/** interleave samples and write them to the output buffer */ /** interleave samples and write them to the output buffer */
for (i = 0; i < s->num_channels; i++) for (i = 0; i < s->num_channels; i++)
out_ptr[i] = s->channel[i].out; out_ptr[i] = s->channel[i].out;
s->fmt_conv.float_interleave(s->samples, out_ptr, s->samples_per_frame, s->fmt_conv.float_interleave(samples, out_ptr, s->samples_per_frame,
s->num_channels); s->num_channels);
for (i = 0; i < s->num_channels; i++) { for (i = 0; i < s->num_channels; i++) {
@ -1375,8 +1380,10 @@ static int decode_frame(WMAProDecodeCtx *s)
if (s->skip_frame) { if (s->skip_frame) {
s->skip_frame = 0; s->skip_frame = 0;
} else *got_frame_ptr = 0;
s->samples += s->num_channels * s->samples_per_frame; } else {
*got_frame_ptr = 1;
}
if (s->len_prefix) { if (s->len_prefix) {
if (len != (get_bits_count(gb) - s->frame_offset) + 2) { if (len != (get_bits_count(gb) - s->frame_offset) + 2) {
@ -1473,8 +1480,8 @@ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len,
*@param avpkt input packet *@param avpkt input packet
*@return number of bytes that were read from the input buffer *@return number of bytes that were read from the input buffer
*/ */
static int decode_packet(AVCodecContext *avctx, static int decode_packet(AVCodecContext *avctx, void *data,
void *data, int *data_size, AVPacket* avpkt) int *got_frame_ptr, AVPacket* avpkt)
{ {
WMAProDecodeCtx *s = avctx->priv_data; WMAProDecodeCtx *s = avctx->priv_data;
GetBitContext* gb = &s->pgb; GetBitContext* gb = &s->pgb;
@ -1483,9 +1490,7 @@ static int decode_packet(AVCodecContext *avctx,
int num_bits_prev_frame; int num_bits_prev_frame;
int packet_sequence_number; int packet_sequence_number;
s->samples = data; *got_frame_ptr = 0;
s->samples_end = (float*)((int8_t*)data + *data_size);
*data_size = 0;
if (s->packet_done || s->packet_loss) { if (s->packet_done || s->packet_loss) {
s->packet_done = 0; s->packet_done = 0;
@ -1532,7 +1537,7 @@ static int decode_packet(AVCodecContext *avctx,
/** decode the cross packet frame if it is valid */ /** decode the cross packet frame if it is valid */
if (!s->packet_loss) if (!s->packet_loss)
decode_frame(s); decode_frame(s, got_frame_ptr);
} else if (s->num_saved_bits - s->frame_offset) { } else if (s->num_saved_bits - s->frame_offset) {
av_dlog(avctx, "ignoring %x previously saved bits\n", av_dlog(avctx, "ignoring %x previously saved bits\n",
s->num_saved_bits - s->frame_offset); s->num_saved_bits - s->frame_offset);
@ -1555,7 +1560,7 @@ static int decode_packet(AVCodecContext *avctx,
(frame_size = show_bits(gb, s->log2_frame_size)) && (frame_size = show_bits(gb, s->log2_frame_size)) &&
frame_size <= remaining_bits(s, gb)) { frame_size <= remaining_bits(s, gb)) {
save_bits(s, gb, frame_size, 0); save_bits(s, gb, frame_size, 0);
s->packet_done = !decode_frame(s); s->packet_done = !decode_frame(s, got_frame_ptr);
} else if (!s->len_prefix } else if (!s->len_prefix
&& s->num_saved_bits > get_bits_count(&s->gb)) { && s->num_saved_bits > get_bits_count(&s->gb)) {
/** when the frames do not have a length prefix, we don't know /** when the frames do not have a length prefix, we don't know
@ -1565,7 +1570,7 @@ static int decode_packet(AVCodecContext *avctx,
therefore we save the incoming packet first, then we append therefore we save the incoming packet first, then we append
the "previous frame" data from the next packet so that the "previous frame" data from the next packet so that
we get a buffer that only contains full frames */ we get a buffer that only contains full frames */
s->packet_done = !decode_frame(s); s->packet_done = !decode_frame(s, got_frame_ptr);
} else } else
s->packet_done = 1; s->packet_done = 1;
} }
@ -1577,10 +1582,14 @@ static int decode_packet(AVCodecContext *avctx,
save_bits(s, gb, remaining_bits(s, gb), 0); save_bits(s, gb, remaining_bits(s, gb), 0);
} }
*data_size = (int8_t *)s->samples - (int8_t *)data;
s->packet_offset = get_bits_count(gb) & 7; s->packet_offset = get_bits_count(gb) & 7;
if (s->packet_loss)
return AVERROR_INVALIDDATA;
if (*got_frame_ptr)
*(AVFrame *)data = s->frame;
return (s->packet_loss) ? AVERROR_INVALIDDATA : get_bits_count(gb) >> 3; return get_bits_count(gb) >> 3;
} }
/** /**
@ -1611,7 +1620,7 @@ AVCodec ff_wmapro_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_packet, .decode = decode_packet,
.capabilities = CODEC_CAP_SUBFRAMES, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.flush= flush, .flush= flush,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
}; };

@ -131,6 +131,7 @@ typedef struct {
* @name Global values specified in the stream header / extradata or used all over. * @name Global values specified in the stream header / extradata or used all over.
* @{ * @{
*/ */
AVFrame frame;
GetBitContext gb; ///< packet bitreader. During decoder init, GetBitContext gb; ///< packet bitreader. During decoder init,
///< it contains the extradata from the ///< it contains the extradata from the
///< demuxer. During decoding, it contains ///< demuxer. During decoding, it contains
@ -438,6 +439,9 @@ static av_cold int wmavoice_decode_init(AVCodecContext *ctx)
ctx->sample_fmt = AV_SAMPLE_FMT_FLT; ctx->sample_fmt = AV_SAMPLE_FMT_FLT;
avcodec_get_frame_defaults(&s->frame);
ctx->coded_frame = &s->frame;
return 0; return 0;
} }
@ -1725,17 +1729,17 @@ static int check_bits_for_superframe(GetBitContext *orig_gb,
* @return 0 on success, <0 on error or 1 if there was not enough data to * @return 0 on success, <0 on error or 1 if there was not enough data to
* fully parse the superframe * fully parse the superframe
*/ */
static int synth_superframe(AVCodecContext *ctx, static int synth_superframe(AVCodecContext *ctx, int *got_frame_ptr)
float *samples, int *data_size)
{ {
WMAVoiceContext *s = ctx->priv_data; WMAVoiceContext *s = ctx->priv_data;
GetBitContext *gb = &s->gb, s_gb; GetBitContext *gb = &s->gb, s_gb;
int n, res, out_size, n_samples = 480; int n, res, n_samples = 480;
double lsps[MAX_FRAMES][MAX_LSPS]; double lsps[MAX_FRAMES][MAX_LSPS];
const double *mean_lsf = s->lsps == 16 ? const double *mean_lsf = s->lsps == 16 ?
wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode]; wmavoice_mean_lsf16[s->lsp_def_mode] : wmavoice_mean_lsf10[s->lsp_def_mode];
float excitation[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE + 12]; float excitation[MAX_SIGNAL_HISTORY + MAX_SFRAMESIZE + 12];
float synth[MAX_LSPS + MAX_SFRAMESIZE]; float synth[MAX_LSPS + MAX_SFRAMESIZE];
float *samples;
memcpy(synth, s->synth_history, memcpy(synth, s->synth_history,
s->lsps * sizeof(*synth)); s->lsps * sizeof(*synth));
@ -1749,7 +1753,7 @@ static int synth_superframe(AVCodecContext *ctx,
} }
if ((res = check_bits_for_superframe(gb, s)) == 1) { if ((res = check_bits_for_superframe(gb, s)) == 1) {
*data_size = 0; *got_frame_ptr = 0;
return 1; return 1;
} }
@ -1792,13 +1796,14 @@ static int synth_superframe(AVCodecContext *ctx,
stabilize_lsps(lsps[n], s->lsps); stabilize_lsps(lsps[n], s->lsps);
} }
out_size = n_samples * av_get_bytes_per_sample(ctx->sample_fmt); /* get output buffer */
if (*data_size < out_size) { s->frame.nb_samples = 480;
av_log(ctx, AV_LOG_ERROR, if ((res = ctx->get_buffer(ctx, &s->frame)) < 0) {
"Output buffer too small (%d given - %d needed)\n", av_log(ctx, AV_LOG_ERROR, "get_buffer() failed\n");
*data_size, out_size); return res;
return -1;
} }
s->frame.nb_samples = n_samples;
samples = (float *)s->frame.data[0];
/* Parse frames, optionally preceeded by per-frame (independent) LSPs. */ /* Parse frames, optionally preceeded by per-frame (independent) LSPs. */
for (n = 0; n < 3; n++) { for (n = 0; n < 3; n++) {
@ -1820,7 +1825,7 @@ static int synth_superframe(AVCodecContext *ctx,
lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1], lsps[n], n == 0 ? s->prev_lsps : lsps[n - 1],
&excitation[s->history_nsamples + n * MAX_FRAMESIZE], &excitation[s->history_nsamples + n * MAX_FRAMESIZE],
&synth[s->lsps + n * MAX_FRAMESIZE]))) { &synth[s->lsps + n * MAX_FRAMESIZE]))) {
*data_size = 0; *got_frame_ptr = 0;
return res; return res;
} }
} }
@ -1833,8 +1838,7 @@ static int synth_superframe(AVCodecContext *ctx,
skip_bits(gb, 10 * (res + 1)); skip_bits(gb, 10 * (res + 1));
} }
/* Specify nr. of output samples */ *got_frame_ptr = 1;
*data_size = out_size;
/* Update history */ /* Update history */
memcpy(s->prev_lsps, lsps[2], memcpy(s->prev_lsps, lsps[2],
@ -1922,7 +1926,7 @@ static void copy_bits(PutBitContext *pb,
* For more information about frames, see #synth_superframe(). * For more information about frames, see #synth_superframe().
*/ */
static int wmavoice_decode_packet(AVCodecContext *ctx, void *data, static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
WMAVoiceContext *s = ctx->priv_data; WMAVoiceContext *s = ctx->priv_data;
GetBitContext *gb = &s->gb; GetBitContext *gb = &s->gb;
@ -1935,7 +1939,7 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
* capping the packet size at ctx->block_align. */ * capping the packet size at ctx->block_align. */
for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align); for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align);
if (!size) { if (!size) {
*data_size = 0; *got_frame_ptr = 0;
return 0; return 0;
} }
init_get_bits(&s->gb, avpkt->data, size << 3); init_get_bits(&s->gb, avpkt->data, size << 3);
@ -1956,10 +1960,11 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits); copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits);
flush_put_bits(&s->pb); flush_put_bits(&s->pb);
s->sframe_cache_size += s->spillover_nbits; s->sframe_cache_size += s->spillover_nbits;
if ((res = synth_superframe(ctx, data, data_size)) == 0 && if ((res = synth_superframe(ctx, got_frame_ptr)) == 0 &&
*data_size > 0) { *got_frame_ptr) {
cnt += s->spillover_nbits; cnt += s->spillover_nbits;
s->skip_bits_next = cnt & 7; s->skip_bits_next = cnt & 7;
*(AVFrame *)data = s->frame;
return cnt >> 3; return cnt >> 3;
} else } else
skip_bits_long (gb, s->spillover_nbits - cnt + skip_bits_long (gb, s->spillover_nbits - cnt +
@ -1974,11 +1979,12 @@ static int wmavoice_decode_packet(AVCodecContext *ctx, void *data,
s->sframe_cache_size = 0; s->sframe_cache_size = 0;
s->skip_bits_next = 0; s->skip_bits_next = 0;
pos = get_bits_left(gb); pos = get_bits_left(gb);
if ((res = synth_superframe(ctx, data, data_size)) < 0) { if ((res = synth_superframe(ctx, got_frame_ptr)) < 0) {
return res; return res;
} else if (*data_size > 0) { } else if (*got_frame_ptr) {
int cnt = get_bits_count(gb); int cnt = get_bits_count(gb);
s->skip_bits_next = cnt & 7; s->skip_bits_next = cnt & 7;
*(AVFrame *)data = s->frame;
return cnt >> 3; return cnt >> 3;
} else if ((s->sframe_cache_size = pos) > 0) { } else if ((s->sframe_cache_size = pos) > 0) {
/* rewind bit reader to start of last (incomplete) superframe... */ /* rewind bit reader to start of last (incomplete) superframe... */
@ -2046,7 +2052,7 @@ AVCodec ff_wmavoice_decoder = {
.init = wmavoice_decode_init, .init = wmavoice_decode_init,
.close = wmavoice_decode_end, .close = wmavoice_decode_end,
.decode = wmavoice_decode_packet, .decode = wmavoice_decode_packet,
.capabilities = CODEC_CAP_SUBFRAMES, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.flush = wmavoice_flush, .flush = wmavoice_flush,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"),
}; };

@ -37,26 +37,37 @@ static const int8_t ws_adpcm_4bit[] = {
0, 1, 2, 3, 4, 5, 6, 8 0, 1, 2, 3, 4, 5, 6, 8
}; };
typedef struct WSSndContext {
AVFrame frame;
} WSSndContext;
static av_cold int ws_snd_decode_init(AVCodecContext *avctx) static av_cold int ws_snd_decode_init(AVCodecContext *avctx)
{ {
WSSndContext *s = avctx->priv_data;
if (avctx->channels != 1) { if (avctx->channels != 1) {
av_log_ask_for_sample(avctx, "unsupported number of channels\n"); av_log_ask_for_sample(avctx, "unsupported number of channels\n");
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
avctx->sample_fmt = AV_SAMPLE_FMT_U8; avctx->sample_fmt = AV_SAMPLE_FMT_U8;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
static int ws_snd_decode_frame(AVCodecContext *avctx, void *data, static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
WSSndContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int in_size, out_size; int in_size, out_size, ret;
int sample = 128; int sample = 128;
uint8_t *samples = data; uint8_t *samples;
uint8_t *samples_end; uint8_t *samples_end;
if (!buf_size) if (!buf_size)
@ -71,19 +82,24 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
in_size = AV_RL16(&buf[2]); in_size = AV_RL16(&buf[2]);
buf += 4; buf += 4;
if (out_size > *data_size) {
av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n");
return -1;
}
if (in_size > buf_size) { if (in_size > buf_size) {
av_log(avctx, AV_LOG_ERROR, "Frame data is larger than input buffer\n"); av_log(avctx, AV_LOG_ERROR, "Frame data is larger than input buffer\n");
return -1; return -1;
} }
/* get output buffer */
s->frame.nb_samples = out_size;
if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
samples = s->frame.data[0];
samples_end = samples + out_size; samples_end = samples + out_size;
if (in_size == out_size) { if (in_size == out_size) {
memcpy(samples, buf, out_size); memcpy(samples, buf, out_size);
*data_size = out_size; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size; return buf_size;
} }
@ -159,7 +175,9 @@ static int ws_snd_decode_frame(AVCodecContext *avctx, void *data,
} }
} }
*data_size = samples - (uint8_t *)data; s->frame.nb_samples = samples - s->frame.data[0];
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size; return buf_size;
} }
@ -168,7 +186,9 @@ AVCodec ff_ws_snd1_decoder = {
.name = "ws_snd1", .name = "ws_snd1",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_WESTWOOD_SND1, .id = CODEC_ID_WESTWOOD_SND1,
.priv_data_size = sizeof(WSSndContext),
.init = ws_snd_decode_init, .init = ws_snd_decode_init,
.decode = ws_snd_decode_frame, .decode = ws_snd_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Westwood Audio (SND1)"), .long_name = NULL_IF_CONFIG_SMALL("Westwood Audio (SND1)"),
}; };

Loading…
Cancel
Save