Merge remote-tracking branch 'qatar/master'

* qatar/master:
  mov: Don't av_malloc(0).
  avconv: only allocate 1 AVFrame per input stream
  avconv: fix memleaks due to not freeing the AVFrame for audio
  h264-fate: remove -strict 1 except where necessary (mr4/5-tandberg).
  misc Doxygen markup improvements
  doxygen: eliminate Qt-style doxygen syntax
  g722: Add a regression test for muxing/demuxing in wav
  g722: Change bits per sample to 4
  g722dec: Signal skipping the lower bits via AVOptions instead of bits_per_coded_sample
  api-example: update to use avcodec_decode_audio4()
  avplay: use avcodec_decode_audio4()
  avplay: use a separate buffer for playing silence
  avformat: use avcodec_decode_audio4() in avformat_find_stream_info()
  avconv: use avcodec_decode_audio4() instead of avcodec_decode_audio3()
  mov: Allow empty stts atom.
  doc: document preferred Doxygen syntax and make patcheck detect it

Conflicts:
	avconv.c
	ffplay.c
	libavcodec/mlpdec.c
	libavcodec/version.h
	libavformat/mov.c
	tests/codec-regression.sh
	tests/fate/h264.mak

Merged-by: Michael Niedermayer <michaelni@gmx.at>
pull/2/head
Michael Niedermayer 13 years ago
commit b404ab9e74
  1. 86
      avconv.c
  2. 2
      cmdutils.h
  3. 5
      doc/developer.texi
  4. 29
      doc/examples/decoding_encoding.c
  5. 85
      ffmpeg.c
  6. 46
      ffplay.c
  7. 2
      libavcodec/amrnbdec.c
  8. 7
      libavcodec/cinepak.c
  9. 4
      libavcodec/eamad.c
  10. 2
      libavcodec/g722.h
  11. 32
      libavcodec/g722dec.c
  12. 4
      libavcodec/g722enc.c
  13. 2
      libavcodec/ivi_common.h
  14. 2
      libavcodec/lsp.c
  15. 56
      libavcodec/mlpdec.c
  16. 16
      libavcodec/qcelpdata.h
  17. 2
      libavcodec/rtjpeg.c
  18. 2
      libavcodec/utils.c
  19. 2
      libavcodec/version.h
  20. 1
      libavdevice/pulse.c
  21. 5
      libavdevice/x11grab.c
  22. 2
      libavformat/avformat.h
  23. 8
      libavformat/matroskadec.c
  24. 10
      libavformat/mov.c
  25. 2
      libavformat/nuv.c
  26. 5
      libavformat/oggdec.c
  27. 31
      libavformat/utils.c
  28. 6
      libavutil/lzo.c
  29. 8
      libavutil/lzo.h
  30. 4
      libpostproc/postprocess.c
  31. 5
      tests/codec-regression.sh
  32. 124
      tests/fate/h264.mak
  33. 4
      tests/ref/acodec/g722
  34. 1
      tools/patcheck

@ -155,8 +155,6 @@ static uint8_t *audio_buf;
static uint8_t *audio_out; static uint8_t *audio_out;
static unsigned int allocated_audio_out_size, allocated_audio_buf_size; static unsigned int allocated_audio_out_size, allocated_audio_buf_size;
static void *samples;
#define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass" #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
typedef struct InputStream { typedef struct InputStream {
@ -165,6 +163,8 @@ typedef struct InputStream {
int discard; /* true if stream data should be discarded */ int discard; /* true if stream data should be discarded */
int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */ int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
AVCodec *dec; AVCodec *dec;
AVFrame *decoded_frame;
AVFrame *filtered_frame;
int64_t start; /* time when read started */ int64_t start; /* time when read started */
int64_t next_pts; /* synthetic pts for cases where pkt.pts int64_t next_pts; /* synthetic pts for cases where pkt.pts
@ -612,8 +612,11 @@ void exit_program(int ret)
for(i=0;i<nb_input_files;i++) { for(i=0;i<nb_input_files;i++) {
av_close_input_file(input_files[i].ctx); av_close_input_file(input_files[i].ctx);
} }
for (i = 0; i < nb_input_streams; i++) for (i = 0; i < nb_input_streams; i++) {
av_freep(&input_streams[i].decoded_frame);
av_freep(&input_streams[i].filtered_frame);
av_dict_free(&input_streams[i].opts); av_dict_free(&input_streams[i].opts);
}
if (vstats_file) if (vstats_file)
fclose(vstats_file); fclose(vstats_file);
@ -628,7 +631,6 @@ void exit_program(int ret)
av_free(audio_buf); av_free(audio_buf);
av_free(audio_out); av_free(audio_out);
allocated_audio_buf_size= allocated_audio_out_size= 0; allocated_audio_buf_size= allocated_audio_out_size= 0;
av_free(samples);
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
avfilter_uninit(); avfilter_uninit();
@ -787,14 +789,11 @@ static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_
memset(buf, fill_char, size); memset(buf, fill_char, size);
} }
static void do_audio_out(AVFormatContext *s, static void do_audio_out(AVFormatContext *s, OutputStream *ost,
OutputStream *ost, InputStream *ist, AVFrame *decoded_frame)
InputStream *ist,
unsigned char *buf, int size)
{ {
uint8_t *buftmp; uint8_t *buftmp;
int64_t audio_out_size, audio_buf_size; int64_t audio_out_size, audio_buf_size;
int64_t allocated_for_size= size;
int size_out, frame_bytes, ret, resample_changed; int size_out, frame_bytes, ret, resample_changed;
AVCodecContext *enc= ost->st->codec; AVCodecContext *enc= ost->st->codec;
@ -802,6 +801,9 @@ static void do_audio_out(AVFormatContext *s,
int osize = av_get_bytes_per_sample(enc->sample_fmt); int osize = av_get_bytes_per_sample(enc->sample_fmt);
int isize = av_get_bytes_per_sample(dec->sample_fmt); int isize = av_get_bytes_per_sample(dec->sample_fmt);
const int coded_bps = av_get_bits_per_sample(enc->codec->id); const int coded_bps = av_get_bits_per_sample(enc->codec->id);
uint8_t *buf = decoded_frame->data[0];
int size = decoded_frame->nb_samples * dec->channels * isize;
int64_t allocated_for_size = size;
need_realloc: need_realloc:
audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels); audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels);
@ -1697,39 +1699,42 @@ static void rate_emu_sleep(InputStream *ist)
static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output) static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
{ {
static unsigned int samples_size = 0; AVFrame *decoded_frame;
AVCodecContext *avctx = ist->st->codec;
int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt); int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
uint8_t *decoded_data_buf = NULL;
int decoded_data_size = 0;
int i, ret; int i, ret;
if (pkt && samples_size < FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE)) { if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
av_free(samples); return AVERROR(ENOMEM);
samples_size = FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE); else
samples = av_malloc(samples_size); avcodec_get_frame_defaults(ist->decoded_frame);
} decoded_frame = ist->decoded_frame;
decoded_data_size = samples_size;
ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size, ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
pkt); if (ret < 0) {
if (ret < 0)
return ret; return ret;
*got_output = decoded_data_size > 0; }
/* Some bug in mpeg audio decoder gives */
/* decoded_data_size < 0, it seems they are overflows */
if (!*got_output) { if (!*got_output) {
/* no audio frame */ /* no audio frame */
return ret; return ret;
} }
decoded_data_buf = (uint8_t *)samples; /* if the decoder provides a pts, use it instead of the last packet pts.
ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) / the decoder could be delaying output by a packet or more. */
(ist->st->codec->sample_rate * ist->st->codec->channels); if (decoded_frame->pts != AV_NOPTS_VALUE)
ist->next_pts = decoded_frame->pts;
/* increment next_pts to use for the case where the input stream does not
have timestamps or there are multiple frames in the packet */
ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
avctx->sample_rate;
// preprocess audio (volume) // preprocess audio (volume)
if (audio_volume != 256) { if (audio_volume != 256) {
switch (ist->st->codec->sample_fmt) { int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
void *samples = decoded_frame->data[0];
switch (avctx->sample_fmt) {
case AV_SAMPLE_FMT_U8: case AV_SAMPLE_FMT_U8:
{ {
uint8_t *volp = samples; uint8_t *volp = samples;
@ -1790,9 +1795,9 @@ static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
if (!check_output_constraints(ist, ost) || !ost->encoding_needed) if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
continue; continue;
do_audio_out(output_files[ost->file_index].ctx, ost, ist, do_audio_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame);
decoded_data_buf, decoded_data_size);
} }
return ret; return ret;
} }
@ -1806,8 +1811,11 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
int frame_available = 1; int frame_available = 1;
#endif #endif
if (!(decoded_frame = avcodec_alloc_frame())) if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
else
avcodec_get_frame_defaults(ist->decoded_frame);
decoded_frame = ist->decoded_frame;
pkt->pts = *pkt_pts; pkt->pts = *pkt_pts;
pkt->dts = ist->pts; pkt->dts = ist->pts;
*pkt_pts = AV_NOPTS_VALUE; *pkt_pts = AV_NOPTS_VALUE;
@ -1815,12 +1823,11 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
ret = avcodec_decode_video2(ist->st->codec, ret = avcodec_decode_video2(ist->st->codec,
decoded_frame, got_output, pkt); decoded_frame, got_output, pkt);
if (ret < 0) if (ret < 0)
goto fail; return ret;
quality = same_quant ? decoded_frame->quality : 0; quality = same_quant ? decoded_frame->quality : 0;
if (!*got_output) { if (!*got_output) {
/* no picture yet */ /* no picture yet */
av_freep(&decoded_frame);
return ret; return ret;
} }
ist->next_pts = ist->pts = decoded_frame->best_effort_timestamp; ist->next_pts = ist->pts = decoded_frame->best_effort_timestamp;
@ -1852,10 +1859,12 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
decoded_frame->pts = ist->pts; decoded_frame->pts = ist->pts;
av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE); av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE);
if (!(filtered_frame = avcodec_alloc_frame())) { if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
ret = AVERROR(ENOMEM); av_free(buffer_to_free);
goto fail; return AVERROR(ENOMEM);
} } else
avcodec_get_frame_defaults(ist->filtered_frame);
filtered_frame = ist->filtered_frame;
frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]); frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
} }
while (frame_available) { while (frame_available) {
@ -1884,13 +1893,10 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
if (ost->picref) if (ost->picref)
avfilter_unref_buffer(ost->picref); avfilter_unref_buffer(ost->picref);
} }
av_freep(&filtered_frame);
#endif #endif
} }
fail:
av_free(buffer_to_free); av_free(buffer_to_free);
av_freep(&decoded_frame);
return ret; return ret;
} }

@ -196,7 +196,7 @@ void parse_loglevel(int argc, char **argv, const OptionDef *options);
* *
* @param s Corresponding format context. * @param s Corresponding format context.
* @param st Stream from s to be checked. * @param st Stream from s to be checked.
* @param spec A stream specifier of the [v|a|s|d]:[<stream index>] form. * @param spec A stream specifier of the [v|a|s|d]:[\<stream index\>] form.
* *
* @return 1 if the stream matches, 0 if it doesn't, <0 on error * @return 1 if the stream matches, 0 if it doesn't, <0 on error
*/ */

@ -77,6 +77,11 @@ Use the JavaDoc/Doxygen format (see examples below) so that code documentation
can be generated automatically. All nontrivial functions should have a comment can be generated automatically. All nontrivial functions should have a comment
above them explaining what the function does, even if it is just one sentence. above them explaining what the function does, even if it is just one sentence.
All structures and their member variables should be documented, too. All structures and their member variables should be documented, too.
Avoid Qt-style and similar Doxygen syntax with @code{!} in it, i.e. replace
@code{//!} with @code{///} and similar. Also @@ syntax should be employed
for markup commands, i.e. use @code{@@param} and not @code{\param}.
@example @example
/** /**
* @@file * @@file

@ -33,6 +33,7 @@
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavcodec/avcodec.h" #include "libavcodec/avcodec.h"
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#define INBUF_SIZE 4096 #define INBUF_SIZE 4096
#define AUDIO_INBUF_SIZE 20480 #define AUDIO_INBUF_SIZE 20480
@ -114,11 +115,11 @@ static void audio_decode_example(const char *outfilename, const char *filename)
{ {
AVCodec *codec; AVCodec *codec;
AVCodecContext *c= NULL; AVCodecContext *c= NULL;
int out_size, len; int len;
FILE *f, *outfile; FILE *f, *outfile;
uint8_t *outbuf;
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt; AVPacket avpkt;
AVFrame *decoded_frame = NULL;
av_init_packet(&avpkt); av_init_packet(&avpkt);
@ -139,8 +140,6 @@ static void audio_decode_example(const char *outfilename, const char *filename)
exit(1); exit(1);
} }
outbuf = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
f = fopen(filename, "rb"); f = fopen(filename, "rb");
if (!f) { if (!f) {
fprintf(stderr, "could not open %s\n", filename); fprintf(stderr, "could not open %s\n", filename);
@ -157,15 +156,27 @@ static void audio_decode_example(const char *outfilename, const char *filename)
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f); avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
while (avpkt.size > 0) { while (avpkt.size > 0) {
out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE; int got_frame = 0;
len = avcodec_decode_audio3(c, (short *)outbuf, &out_size, &avpkt);
if (!decoded_frame) {
if (!(decoded_frame = avcodec_alloc_frame())) {
fprintf(stderr, "out of memory\n");
exit(1);
}
} else
avcodec_get_frame_defaults(decoded_frame);
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
if (len < 0) { if (len < 0) {
fprintf(stderr, "Error while decoding\n"); fprintf(stderr, "Error while decoding\n");
exit(1); exit(1);
} }
if (out_size > 0) { if (got_frame) {
/* if a frame has been decoded, output it */ /* if a frame has been decoded, output it */
fwrite(outbuf, 1, out_size, outfile); int data_size = av_samples_get_buffer_size(NULL, c->channels,
decoded_frame->nb_samples,
c->sample_fmt, 1);
fwrite(decoded_frame->data[0], 1, data_size, outfile);
} }
avpkt.size -= len; avpkt.size -= len;
avpkt.data += len; avpkt.data += len;
@ -185,10 +196,10 @@ static void audio_decode_example(const char *outfilename, const char *filename)
fclose(outfile); fclose(outfile);
fclose(f); fclose(f);
free(outbuf);
avcodec_close(c); avcodec_close(c);
av_free(c); av_free(c);
av_free(decoded_frame);
} }
/* /*

@ -168,7 +168,6 @@ static uint8_t *audio_buf;
static uint8_t *audio_out; static uint8_t *audio_out;
static unsigned int allocated_audio_out_size, allocated_audio_buf_size; static unsigned int allocated_audio_out_size, allocated_audio_buf_size;
static void *samples;
static uint8_t *input_tmp= NULL; static uint8_t *input_tmp= NULL;
#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass" #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
@ -179,6 +178,8 @@ typedef struct InputStream {
int discard; /* true if stream data should be discarded */ int discard; /* true if stream data should be discarded */
int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */ int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
AVCodec *dec; AVCodec *dec;
AVFrame *decoded_frame;
AVFrame *filtered_frame;
int64_t start; /* time when read started */ int64_t start; /* time when read started */
int64_t next_pts; /* synthetic pts for cases where pkt.pts int64_t next_pts; /* synthetic pts for cases where pkt.pts
@ -658,8 +659,11 @@ void av_noreturn exit_program(int ret)
for(i=0;i<nb_input_files;i++) { for(i=0;i<nb_input_files;i++) {
av_close_input_file(input_files[i].ctx); av_close_input_file(input_files[i].ctx);
} }
for (i = 0; i < nb_input_streams; i++) for (i = 0; i < nb_input_streams; i++) {
av_freep(&input_streams[i].decoded_frame);
av_freep(&input_streams[i].filtered_frame);
av_dict_free(&input_streams[i].opts); av_dict_free(&input_streams[i].opts);
}
if (vstats_file) if (vstats_file)
fclose(vstats_file); fclose(vstats_file);
@ -674,7 +678,6 @@ void av_noreturn exit_program(int ret)
av_free(audio_buf); av_free(audio_buf);
av_free(audio_out); av_free(audio_out);
allocated_audio_buf_size= allocated_audio_out_size= 0; allocated_audio_buf_size= allocated_audio_out_size= 0;
av_free(samples);
#if CONFIG_AVFILTER #if CONFIG_AVFILTER
avfilter_uninit(); avfilter_uninit();
@ -838,14 +841,11 @@ static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_
memset(buf, fill_char, size); memset(buf, fill_char, size);
} }
static void do_audio_out(AVFormatContext *s, static void do_audio_out(AVFormatContext *s, OutputStream *ost,
OutputStream *ost, InputStream *ist, AVFrame *decoded_frame)
InputStream *ist,
unsigned char *buf, int size)
{ {
uint8_t *buftmp; uint8_t *buftmp;
int64_t audio_out_size, audio_buf_size; int64_t audio_out_size, audio_buf_size;
int64_t allocated_for_size= size;
int size_out, frame_bytes, ret, resample_changed; int size_out, frame_bytes, ret, resample_changed;
AVCodecContext *enc= ost->st->codec; AVCodecContext *enc= ost->st->codec;
@ -853,6 +853,9 @@ static void do_audio_out(AVFormatContext *s,
int osize = av_get_bytes_per_sample(enc->sample_fmt); int osize = av_get_bytes_per_sample(enc->sample_fmt);
int isize = av_get_bytes_per_sample(dec->sample_fmt); int isize = av_get_bytes_per_sample(dec->sample_fmt);
const int coded_bps = av_get_bits_per_sample(enc->codec->id); const int coded_bps = av_get_bits_per_sample(enc->codec->id);
uint8_t *buf = decoded_frame->data[0];
int size = decoded_frame->nb_samples * dec->channels * isize;
int64_t allocated_for_size = size;
need_realloc: need_realloc:
audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels); audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels);
@ -1732,39 +1735,42 @@ static void rate_emu_sleep(InputStream *ist)
static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output) static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
{ {
static unsigned int samples_size = 0; AVFrame *decoded_frame;
AVCodecContext *avctx = ist->st->codec;
int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt); int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
uint8_t *decoded_data_buf = NULL;
int decoded_data_size = 0;
int i, ret; int i, ret;
if (pkt && samples_size < FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE)) { if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
av_free(samples); return AVERROR(ENOMEM);
samples_size = FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE); else
samples = av_malloc(samples_size); avcodec_get_frame_defaults(ist->decoded_frame);
} decoded_frame = ist->decoded_frame;
decoded_data_size = samples_size;
ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size, ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
pkt); if (ret < 0) {
if (ret < 0)
return ret; return ret;
*got_output = decoded_data_size > 0; }
/* Some bug in mpeg audio decoder gives */
/* decoded_data_size < 0, it seems they are overflows */
if (!*got_output) { if (!*got_output) {
/* no audio frame */ /* no audio frame */
return ret; return ret;
} }
decoded_data_buf = (uint8_t *)samples; /* if the decoder provides a pts, use it instead of the last packet pts.
ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) / the decoder could be delaying output by a packet or more. */
(ist->st->codec->sample_rate * ist->st->codec->channels); if (decoded_frame->pts != AV_NOPTS_VALUE)
ist->next_pts = decoded_frame->pts;
/* increment next_pts to use for the case where the input stream does not
have timestamps or there are multiple frames in the packet */
ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
avctx->sample_rate;
// preprocess audio (volume) // preprocess audio (volume)
if (audio_volume != 256) { if (audio_volume != 256) {
switch (ist->st->codec->sample_fmt) { int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
void *samples = decoded_frame->data[0];
switch (avctx->sample_fmt) {
case AV_SAMPLE_FMT_U8: case AV_SAMPLE_FMT_U8:
{ {
uint8_t *volp = samples; uint8_t *volp = samples;
@ -1825,9 +1831,9 @@ static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
if (!check_output_constraints(ist, ost) || !ost->encoding_needed) if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
continue; continue;
do_audio_out(output_files[ost->file_index].ctx, ost, ist, do_audio_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame);
decoded_data_buf, decoded_data_size);
} }
return ret; return ret;
} }
@ -1844,8 +1850,11 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
int64_t *best_effort_timestamp; int64_t *best_effort_timestamp;
AVRational *frame_sample_aspect; AVRational *frame_sample_aspect;
if (!(decoded_frame = avcodec_alloc_frame())) if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
else
avcodec_get_frame_defaults(ist->decoded_frame);
decoded_frame = ist->decoded_frame;
pkt->pts = *pkt_pts; pkt->pts = *pkt_pts;
pkt->dts = *pkt_dts; pkt->dts = *pkt_dts;
*pkt_pts = AV_NOPTS_VALUE; *pkt_pts = AV_NOPTS_VALUE;
@ -1867,12 +1876,11 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
ret = avcodec_decode_video2(ist->st->codec, ret = avcodec_decode_video2(ist->st->codec,
decoded_frame, got_output, pkt); decoded_frame, got_output, pkt);
if (ret < 0) if (ret < 0)
goto fail; return ret;
quality = same_quant ? decoded_frame->quality : 0; quality = same_quant ? decoded_frame->quality : 0;
if (!*got_output) { if (!*got_output) {
/* no picture yet */ /* no picture yet */
av_freep(&decoded_frame);
return ret; return ret;
} }
@ -1917,10 +1925,12 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base; AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0) if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
goto cont; goto cont;
if (!filtered_frame && !(filtered_frame = avcodec_alloc_frame())) { if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
ret = AVERROR(ENOMEM); av_free(buffer_to_free);
goto fail; return AVERROR(ENOMEM);
} } else
avcodec_get_frame_defaults(ist->filtered_frame);
filtered_frame = ist->filtered_frame;
*filtered_frame= *decoded_frame; //for me_threshold *filtered_frame= *decoded_frame; //for me_threshold
if (ost->picref) { if (ost->picref) {
avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref); avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref);
@ -1942,13 +1952,10 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]); frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
avfilter_unref_buffer(ost->picref); avfilter_unref_buffer(ost->picref);
} }
av_freep(&filtered_frame);
#endif #endif
} }
fail:
av_free(buffer_to_free); av_free(buffer_to_free);
av_freep(&decoded_frame);
return ret; return ret;
} }

@ -151,11 +151,10 @@ typedef struct VideoState {
AVStream *audio_st; AVStream *audio_st;
PacketQueue audioq; PacketQueue audioq;
int audio_hw_buf_size; int audio_hw_buf_size;
/* samples output by the codec. we reserve more space for avsync
compensation, resampling and format conversion */
DECLARE_ALIGNED(16,uint8_t,audio_buf1)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4]; DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
uint8_t *audio_buf; uint8_t *audio_buf;
uint8_t *audio_buf1;
unsigned int audio_buf_size; /* in bytes */ unsigned int audio_buf_size; /* in bytes */
int audio_buf_index; /* in bytes */ int audio_buf_index; /* in bytes */
int audio_write_buf_size; int audio_write_buf_size;
@ -174,6 +173,7 @@ typedef struct VideoState {
double audio_current_pts_drift; double audio_current_pts_drift;
int frame_drops_early; int frame_drops_early;
int frame_drops_late; int frame_drops_late;
AVFrame *frame;
enum ShowMode { enum ShowMode {
SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
@ -1998,8 +1998,8 @@ static int synchronize_audio(VideoState *is, short *samples,
max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n; max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
if (wanted_size < min_size) if (wanted_size < min_size)
wanted_size = min_size; wanted_size = min_size;
else if (wanted_size > FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2))) else if (wanted_size > FFMIN3(max_size, samples_size, sizeof(is->audio_buf2)))
wanted_size = FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2)); wanted_size = FFMIN3(max_size, samples_size, sizeof(is->audio_buf2));
/* add or remove samples to correction the synchro */ /* add or remove samples to correction the synchro */
if (wanted_size < samples_size) { if (wanted_size < samples_size) {
@ -2043,7 +2043,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
AVPacket *pkt = &is->audio_pkt; AVPacket *pkt = &is->audio_pkt;
AVCodecContext *dec= is->audio_st->codec; AVCodecContext *dec= is->audio_st->codec;
int len1, len2, data_size, resampled_data_size; int len1, len2, data_size, resampled_data_size;
int64_t dec_channel_layout; int64_t dec_channel_layout, got_frame;
double pts; double pts;
int new_packet = 0; int new_packet = 0;
int flush_complete = 0; int flush_complete = 0;
@ -2051,13 +2051,16 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
for(;;) { for(;;) {
/* NOTE: the audio packet can contain several frames */ /* NOTE: the audio packet can contain several frames */
while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) { while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
if (!is->frame) {
if (!(is->frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
} else
avcodec_get_frame_defaults(is->frame);
if (flush_complete) if (flush_complete)
break; break;
new_packet = 0; new_packet = 0;
data_size = sizeof(is->audio_buf1); len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
len1 = avcodec_decode_audio3(dec,
(int16_t *)is->audio_buf1, &data_size,
pkt_temp);
if (len1 < 0) { if (len1 < 0) {
/* if error, we skip the frame */ /* if error, we skip the frame */
pkt_temp->size = 0; pkt_temp->size = 0;
@ -2067,12 +2070,15 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
pkt_temp->data += len1; pkt_temp->data += len1;
pkt_temp->size -= len1; pkt_temp->size -= len1;
if (data_size <= 0) { if (!got_frame) {
/* stop sending empty packets if the decoder is finished */ /* stop sending empty packets if the decoder is finished */
if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY) if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
flush_complete = 1; flush_complete = 1;
continue; continue;
} }
data_size = av_samples_get_buffer_size(NULL, dec->channels,
is->frame->nb_samples,
dec->sample_fmt, 1);
dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels); dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
@ -2101,7 +2107,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
resampled_data_size = data_size; resampled_data_size = data_size;
if (is->swr_ctx) { if (is->swr_ctx) {
const uint8_t *in[] = {is->audio_buf1}; const uint8_t *in[] = { is->frame->data[0] };
uint8_t *out[] = {is->audio_buf2}; uint8_t *out[] = {is->audio_buf2};
len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt), len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt)); in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt));
@ -2116,7 +2122,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
is->audio_buf = is->audio_buf2; is->audio_buf = is->audio_buf2;
resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt); resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
} else { } else {
is->audio_buf= is->audio_buf1; is->audio_buf = is->frame->data[0];
} }
/* if no pts, then compute it */ /* if no pts, then compute it */
@ -2150,11 +2156,7 @@ static int audio_decode_frame(VideoState *is, double *pts_ptr)
if (pkt->data == flush_pkt.data) if (pkt->data == flush_pkt.data)
avcodec_flush_buffers(dec); avcodec_flush_buffers(dec);
pkt_temp->data = pkt->data; *pkt_temp = *pkt;
pkt_temp->size = pkt->size;
pkt_temp->flags = pkt->flags;
pkt_temp->side_data = pkt->side_data;
pkt_temp->side_data_elems = pkt->side_data_elems;
/* if update the audio clock with the pts */ /* if update the audio clock with the pts */
if (pkt->pts != AV_NOPTS_VALUE) { if (pkt->pts != AV_NOPTS_VALUE) {
@ -2178,9 +2180,8 @@ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
audio_size = audio_decode_frame(is, &pts); audio_size = audio_decode_frame(is, &pts);
if (audio_size < 0) { if (audio_size < 0) {
/* if error, just output silence */ /* if error, just output silence */
is->audio_buf = is->audio_buf1; is->audio_buf = is->silence_buf;
is->audio_buf_size = 256 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt); is->audio_buf_size = sizeof(is->silence_buf);
memset(is->audio_buf, 0, is->audio_buf_size);
} else { } else {
if (is->show_mode != SHOW_MODE_VIDEO) if (is->show_mode != SHOW_MODE_VIDEO)
update_sample_display(is, (int16_t *)is->audio_buf, audio_size); update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
@ -2356,6 +2357,9 @@ static void stream_component_close(VideoState *is, int stream_index)
if (is->swr_ctx) if (is->swr_ctx)
swr_free(&is->swr_ctx); swr_free(&is->swr_ctx);
av_free_packet(&is->audio_pkt); av_free_packet(&is->audio_pkt);
av_freep(&is->audio_buf1);
is->audio_buf = NULL;
av_freep(&is->frame);
if (is->rdft) { if (is->rdft) {
av_rdft_end(is->rdft); av_rdft_end(is->rdft);

@ -653,7 +653,7 @@ static void decode_gains(AMRContext *p, const AMRNBSubframe *amr_subframe,
static void apply_ir_filter(float *out, const AMRFixed *in, static void apply_ir_filter(float *out, const AMRFixed *in,
const float *filter) const float *filter)
{ {
float filter1[AMR_SUBFRAME_SIZE], //!< filters at pitch lag*1 and *2 float filter1[AMR_SUBFRAME_SIZE], ///< filters at pitch lag*1 and *2
filter2[AMR_SUBFRAME_SIZE]; filter2[AMR_SUBFRAME_SIZE];
int lag = in->pitch_lag; int lag = in->pitch_lag;
float fac = in->pitch_fac; float fac = in->pitch_fac;

@ -22,10 +22,11 @@
/** /**
* @file * @file
* Cinepak video decoder * Cinepak video decoder
* by Ewald Snel <ewald@rambo.its.tudelft.nl> * @author Ewald Snel <ewald@rambo.its.tudelft.nl>
* For more information on the Cinepak algorithm, visit: *
* @see For more information on the Cinepak algorithm, visit:
* http://www.csse.monash.edu.au/~timf/ * http://www.csse.monash.edu.au/~timf/
* For more information on the quirky data inside Sega FILM/CPK files, visit: * @see For more information on the quirky data inside Sega FILM/CPK files, visit:
* http://wiki.multimedia.cx/index.php?title=Sega_FILM * http://wiki.multimedia.cx/index.php?title=Sega_FILM
*/ */

@ -22,9 +22,9 @@
/** /**
* @file * @file
* Electronic Arts Madcow Video Decoder * Electronic Arts Madcow Video Decoder
* by Peter Ross <pross@xvid.org> * @author Peter Ross <pross@xvid.org>
* *
* Technical details here: * @see technical details at
* http://wiki.multimedia.cx/index.php?title=Electronic_Arts_MAD * http://wiki.multimedia.cx/index.php?title=Electronic_Arts_MAD
*/ */

@ -31,7 +31,9 @@
#define PREV_SAMPLES_BUF_SIZE 1024 #define PREV_SAMPLES_BUF_SIZE 1024
typedef struct { typedef struct {
const AVClass *class;
AVFrame frame; AVFrame frame;
int bits_per_codeword;
int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]; ///< memory of past decoded samples int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]; ///< memory of past decoded samples
int prev_samples_pos; ///< the number of values in prev_samples int prev_samples_pos; ///< the number of values in prev_samples

@ -37,6 +37,21 @@
#include "avcodec.h" #include "avcodec.h"
#include "get_bits.h" #include "get_bits.h"
#include "g722.h" #include "g722.h"
#include "libavutil/opt.h"
#define OFFSET(x) offsetof(G722Context, x)
#define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "bits_per_codeword", "Bits per G722 codeword", OFFSET(bits_per_codeword), AV_OPT_TYPE_FLAGS, { 8 }, 6, 8, AD },
{ NULL }
};
static const AVClass g722_decoder_class = {
.class_name = "g722 decoder",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
static av_cold int g722_decode_init(AVCodecContext * avctx) static av_cold int g722_decode_init(AVCodecContext * avctx)
{ {
@ -48,20 +63,6 @@ static av_cold int g722_decode_init(AVCodecContext * avctx)
} }
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
switch (avctx->bits_per_coded_sample) {
case 8:
case 7:
case 6:
break;
default:
av_log(avctx, AV_LOG_WARNING, "Unsupported bits_per_coded_sample [%d], "
"assuming 8\n",
avctx->bits_per_coded_sample);
case 0:
avctx->bits_per_coded_sample = 8;
break;
}
c->band[0].scale_factor = 8; c->band[0].scale_factor = 8;
c->band[1].scale_factor = 2; c->band[1].scale_factor = 2;
c->prev_samples_pos = 22; c->prev_samples_pos = 22;
@ -89,7 +90,7 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
G722Context *c = avctx->priv_data; G722Context *c = avctx->priv_data;
int16_t *out_buf; int16_t *out_buf;
int j, ret; int j, ret;
const int skip = 8 - avctx->bits_per_coded_sample; const int skip = 8 - c->bits_per_codeword;
const int16_t *quantizer_table = low_inv_quants[skip]; const int16_t *quantizer_table = low_inv_quants[skip];
GetBitContext gb; GetBitContext gb;
@ -149,4 +150,5 @@ AVCodec ff_adpcm_g722_decoder = {
.decode = g722_decode_frame, .decode = g722_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
.priv_class = &g722_decoder_class,
}; };

@ -139,7 +139,7 @@ static int g722_encode_trellis(AVCodecContext *avctx,
nodes[i][0]->state = c->band[i]; nodes[i][0]->state = c->band[i];
} }
for (i = 0; i < buf_size >> 1; i++) { for (i = 0; i < buf_size; i++) {
int xlow, xhigh; int xlow, xhigh;
struct TrellisNode *next[2]; struct TrellisNode *next[2];
int heap_pos[2] = {0, 0}; int heap_pos[2] = {0, 0};
@ -285,7 +285,7 @@ static int g722_encode_frame(AVCodecContext *avctx,
if (avctx->trellis) if (avctx->trellis)
return g722_encode_trellis(avctx, dst, buf_size, data); return g722_encode_trellis(avctx, dst, buf_size, data);
for (i = 0; i < buf_size >> 1; i++) { for (i = 0; i < buf_size; i++) {
int xlow, xhigh, ihigh, ilow; int xlow, xhigh, ihigh, ilow;
filter_samples(c, &samples[2*i], &xlow, &xhigh); filter_samples(c, &samples[2*i], &xlow, &xhigh);
ihigh = encode_high(&c->band[1], xhigh); ihigh = encode_high(&c->band[1], xhigh);

@ -51,7 +51,7 @@ typedef struct {
/// or "7" for custom one /// or "7" for custom one
VLC *tab; /// pointer to the table associated with tab_sel VLC *tab; /// pointer to the table associated with tab_sel
//! the following are used only when tab_sel == 7 /// the following are used only when tab_sel == 7
IVIHuffDesc cust_desc; /// custom Huffman codebook descriptor IVIHuffDesc cust_desc; /// custom Huffman codebook descriptor
VLC cust_tab; /// vlc table for custom codebook VLC cust_tab; /// vlc table for custom codebook
} IVIHuffTab; } IVIHuffTab;

@ -75,7 +75,7 @@ void ff_acelp_lsf2lspd(double *lsp, const float *lsf, int lp_order)
/** /**
* @brief decodes polynomial coefficients from LSP * @brief decodes polynomial coefficients from LSP
* @param f [out] decoded polynomial coefficients (-0x20000000 <= (3.22) <= 0x1fffffff) * @param[out] f decoded polynomial coefficients (-0x20000000 <= (3.22) <= 0x1fffffff)
* @param lsp LSP coefficients (-0x8000 <= (0.15) <= 0x7fff) * @param lsp LSP coefficients (-0x8000 <= (0.15) <= 0x7fff)
*/ */
static void lsp2poly(int* f, const int16_t* lsp, int lp_half_order) static void lsp2poly(int* f, const int16_t* lsp, int lp_half_order)

@ -45,35 +45,35 @@ static const char* sample_message =
"a sample of this file."; "a sample of this file.";
typedef struct SubStream { typedef struct SubStream {
//! Set if a valid restart header has been read. Otherwise the substream cannot be decoded. /// Set if a valid restart header has been read. Otherwise the substream cannot be decoded.
uint8_t restart_seen; uint8_t restart_seen;
//@{ //@{
/** restart header data */ /** restart header data */
//! The type of noise to be used in the rematrix stage. /// The type of noise to be used in the rematrix stage.
uint16_t noise_type; uint16_t noise_type;
//! The index of the first channel coded in this substream. /// The index of the first channel coded in this substream.
uint8_t min_channel; uint8_t min_channel;
//! The index of the last channel coded in this substream. /// The index of the last channel coded in this substream.
uint8_t max_channel; uint8_t max_channel;
//! The number of channels input into the rematrix stage. /// The number of channels input into the rematrix stage.
uint8_t max_matrix_channel; uint8_t max_matrix_channel;
//! For each channel output by the matrix, the output channel to map it to /// For each channel output by the matrix, the output channel to map it to
uint8_t ch_assign[MAX_CHANNELS]; uint8_t ch_assign[MAX_CHANNELS];
//! Channel coding parameters for channels in the substream /// Channel coding parameters for channels in the substream
ChannelParams channel_params[MAX_CHANNELS]; ChannelParams channel_params[MAX_CHANNELS];
//! The left shift applied to random noise in 0x31ea substreams. /// The left shift applied to random noise in 0x31ea substreams.
uint8_t noise_shift; uint8_t noise_shift;
//! The current seed value for the pseudorandom noise generator(s). /// The current seed value for the pseudorandom noise generator(s).
uint32_t noisegen_seed; uint32_t noisegen_seed;
//! Set if the substream contains extra info to check the size of VLC blocks. /// Set if the substream contains extra info to check the size of VLC blocks.
uint8_t data_check_present; uint8_t data_check_present;
//! Bitmask of which parameter sets are conveyed in a decoding parameter block. /// Bitmask of which parameter sets are conveyed in a decoding parameter block.
uint8_t param_presence_flags; uint8_t param_presence_flags;
#define PARAM_BLOCKSIZE (1 << 7) #define PARAM_BLOCKSIZE (1 << 7)
#define PARAM_MATRIX (1 << 6) #define PARAM_MATRIX (1 << 6)
@ -88,32 +88,32 @@ typedef struct SubStream {
//@{ //@{
/** matrix data */ /** matrix data */
//! Number of matrices to be applied. /// Number of matrices to be applied.
uint8_t num_primitive_matrices; uint8_t num_primitive_matrices;
//! matrix output channel /// matrix output channel
uint8_t matrix_out_ch[MAX_MATRICES]; uint8_t matrix_out_ch[MAX_MATRICES];
//! Whether the LSBs of the matrix output are encoded in the bitstream. /// Whether the LSBs of the matrix output are encoded in the bitstream.
uint8_t lsb_bypass[MAX_MATRICES]; uint8_t lsb_bypass[MAX_MATRICES];
//! Matrix coefficients, stored as 2.14 fixed point. /// Matrix coefficients, stored as 2.14 fixed point.
int32_t matrix_coeff[MAX_MATRICES][MAX_CHANNELS]; int32_t matrix_coeff[MAX_MATRICES][MAX_CHANNELS];
//! Left shift to apply to noise values in 0x31eb substreams. /// Left shift to apply to noise values in 0x31eb substreams.
uint8_t matrix_noise_shift[MAX_MATRICES]; uint8_t matrix_noise_shift[MAX_MATRICES];
//@} //@}
//! Left shift to apply to Huffman-decoded residuals. /// Left shift to apply to Huffman-decoded residuals.
uint8_t quant_step_size[MAX_CHANNELS]; uint8_t quant_step_size[MAX_CHANNELS];
//! number of PCM samples in current audio block /// number of PCM samples in current audio block
uint16_t blocksize; uint16_t blocksize;
//! Number of PCM samples decoded so far in this frame. /// Number of PCM samples decoded so far in this frame.
uint16_t blockpos; uint16_t blockpos;
//! Left shift to apply to decoded PCM values to get final 24-bit output. /// Left shift to apply to decoded PCM values to get final 24-bit output.
int8_t output_shift[MAX_CHANNELS]; int8_t output_shift[MAX_CHANNELS];
//! Running XOR of all output samples. /// Running XOR of all output samples.
int32_t lossless_check_data; int32_t lossless_check_data;
} SubStream; } SubStream;
@ -122,24 +122,24 @@ typedef struct MLPDecodeContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame frame;
//! Current access unit being read has a major sync. /// Current access unit being read has a major sync.
int is_major_sync_unit; int is_major_sync_unit;
//! Set if a valid major sync block has been read. Otherwise no decoding is possible. /// Set if a valid major sync block has been read. Otherwise no decoding is possible.
uint8_t params_valid; uint8_t params_valid;
//! Number of substreams contained within this stream. /// Number of substreams contained within this stream.
uint8_t num_substreams; uint8_t num_substreams;
//! Index of the last substream to decode - further substreams are skipped. /// Index of the last substream to decode - further substreams are skipped.
uint8_t max_decoded_substream; uint8_t max_decoded_substream;
//! Stream needs channel reordering to comply with FFmpeg's channel order /// Stream needs channel reordering to comply with FFmpeg's channel order
uint8_t needs_reordering; uint8_t needs_reordering;
//! number of PCM samples contained in each frame /// number of PCM samples contained in each frame
int access_unit_size; int access_unit_size;
//! next power of two above the number of samples in each frame /// next power of two above the number of samples in each frame
int access_unit_size_pow2; int access_unit_size_pow2;
SubStream substream[MAX_SUBSTREAMS]; SubStream substream[MAX_SUBSTREAMS];

@ -40,16 +40,16 @@
typedef struct { typedef struct {
/// @name QCELP excitation codebook parameters /// @name QCELP excitation codebook parameters
/// @{ /// @{
uint8_t cbsign[16]; ///!< sign of the codebook gain for each codebook subframe uint8_t cbsign[16]; ///< sign of the codebook gain for each codebook subframe
uint8_t cbgain[16]; ///!< unsigned codebook gain for each codebook subframe uint8_t cbgain[16]; ///< unsigned codebook gain for each codebook subframe
uint8_t cindex[16]; ///!< codebook index for each codebook subframe uint8_t cindex[16]; ///< codebook index for each codebook subframe
/// @} /// @}
/// @name QCELP pitch prediction parameters /// @name QCELP pitch prediction parameters
/// @{ /// @{
uint8_t plag[4]; ///!< pitch lag for each pitch subframe uint8_t plag[4]; ///< pitch lag for each pitch subframe
uint8_t pfrac[4]; ///!< fractional pitch lag for each pitch subframe uint8_t pfrac[4]; ///< fractional pitch lag for each pitch subframe
uint8_t pgain[4]; ///!< pitch gain for each pitch subframe uint8_t pgain[4]; ///< pitch gain for each pitch subframe
/// @} /// @}
/** /**
@ -266,7 +266,7 @@ static const QCELPBitmap qcelp_rate_octave_bitmap[] = {
* the QCELPContext * the QCELPContext
*/ */
static const QCELPBitmap * const qcelp_unpacking_bitmaps_per_rate[5] = { static const QCELPBitmap * const qcelp_unpacking_bitmaps_per_rate[5] = {
NULL, ///!< for SILENCE rate NULL, ///< for SILENCE rate
qcelp_rate_octave_bitmap, qcelp_rate_octave_bitmap,
qcelp_rate_quarter_bitmap, qcelp_rate_quarter_bitmap,
qcelp_rate_half_bitmap, qcelp_rate_half_bitmap,
@ -274,7 +274,7 @@ static const QCELPBitmap * const qcelp_unpacking_bitmaps_per_rate[5] = {
}; };
static const uint16_t qcelp_unpacking_bitmaps_lengths[5] = { static const uint16_t qcelp_unpacking_bitmaps_lengths[5] = {
0, ///!< for SILENCE rate 0, ///< for SILENCE rate
FF_ARRAY_ELEMS(qcelp_rate_octave_bitmap), FF_ARRAY_ELEMS(qcelp_rate_octave_bitmap),
FF_ARRAY_ELEMS(qcelp_rate_quarter_bitmap), FF_ARRAY_ELEMS(qcelp_rate_quarter_bitmap),
FF_ARRAY_ELEMS(qcelp_rate_half_bitmap), FF_ARRAY_ELEMS(qcelp_rate_half_bitmap),

@ -27,7 +27,7 @@
i = scan[coeff--]; \ i = scan[coeff--]; \
block[i] = (c) * quant[i]; block[i] = (c) * quant[i];
//! aligns the bitstream to the give power of two /// aligns the bitstream to the given power of two
#define ALIGN(a) \ #define ALIGN(a) \
n = (-get_bits_count(gb)) & (a - 1); \ n = (-get_bits_count(gb)) & (a - 1); \
if (n) {skip_bits(gb, n);} if (n) {skip_bits(gb, n);}

@ -1468,8 +1468,8 @@ int av_get_bits_per_sample(enum CodecID codec_id){
case CODEC_ID_ADPCM_SWF: case CODEC_ID_ADPCM_SWF:
case CODEC_ID_ADPCM_MS: case CODEC_ID_ADPCM_MS:
case CODEC_ID_ADPCM_YAMAHA: case CODEC_ID_ADPCM_YAMAHA:
return 4;
case CODEC_ID_ADPCM_G722: case CODEC_ID_ADPCM_G722:
return 4;
case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_ALAW:
case CODEC_ID_PCM_MULAW: case CODEC_ID_PCM_MULAW:
case CODEC_ID_PCM_S8: case CODEC_ID_PCM_S8:

@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H #define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53 #define LIBAVCODEC_VERSION_MAJOR 53
#define LIBAVCODEC_VERSION_MINOR 40 #define LIBAVCODEC_VERSION_MINOR 41
#define LIBAVCODEC_VERSION_MICRO 0 #define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \

@ -23,7 +23,6 @@
* @file * @file
* PulseAudio input using the simple API. * PulseAudio input using the simple API.
* @author Luca Barbato <lu_zero@gentoo.org> * @author Luca Barbato <lu_zero@gentoo.org>
*
*/ */
#include <pulse/simple.h> #include <pulse/simple.h>

@ -31,8 +31,9 @@
/** /**
* @file * @file
* X11 frame device demuxer by Clemens Fruhwirth <clemens@endorphin.org> * X11 frame device demuxer
* and Edouard Gomez <ed.gomez@free.fr>. * @author Clemens Fruhwirth <clemens@endorphin.org>
* @author Edouard Gomez <ed.gomez@free.fr>
*/ */
#include "config.h" #include "config.h"

@ -293,7 +293,7 @@ typedef struct AVFormatParameters {
#endif #endif
} AVFormatParameters; } AVFormatParameters;
//! Demuxer will use avio_open, no opened file should be provided by the caller. /// Demuxer will use avio_open, no opened file should be provided by the caller.
#define AVFMT_NOFILE 0x0001 #define AVFMT_NOFILE 0x0001
#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ #define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */
#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ #define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */

@ -22,10 +22,10 @@
/** /**
* @file * @file
* Matroska file demuxer * Matroska file demuxer
* by Ronald Bultje <rbultje@ronald.bitfreak.net> * @author Ronald Bultje <rbultje@ronald.bitfreak.net>
* with a little help from Moritz Bunkus <moritz@bunkus.org> * @author with a little help from Moritz Bunkus <moritz@bunkus.org>
* totally reworked by Aurelien Jacobs <aurel@gnuage.org> * @author totally reworked by Aurelien Jacobs <aurel@gnuage.org>
* Specs available on the Matroska project page: http://www.matroska.org/. * @see specs available on the Matroska project page: http://www.matroska.org/
*/ */
#include <stdio.h> #include <stdio.h>

@ -957,6 +957,8 @@ static int mov_read_stco(MOVContext *c, AVIOContext *pb, MOVAtom atom)
entries = avio_rb32(pb); entries = avio_rb32(pb);
if (!entries)
return 0;
if (entries >= UINT_MAX/sizeof(int64_t)) if (entries >= UINT_MAX/sizeof(int64_t))
return -1; return -1;
@ -1398,6 +1400,8 @@ static int mov_read_stsc(MOVContext *c, AVIOContext *pb, MOVAtom atom)
av_dlog(c->fc, "track[%i].stsc.entries = %i\n", c->fc->nb_streams-1, entries); av_dlog(c->fc, "track[%i].stsc.entries = %i\n", c->fc->nb_streams-1, entries);
if (!entries)
return 0;
if (entries >= UINT_MAX / sizeof(*sc->stsc_data)) if (entries >= UINT_MAX / sizeof(*sc->stsc_data))
return -1; return -1;
sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data)); sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data));
@ -1513,6 +1517,8 @@ static int mov_read_stsz(MOVContext *c, AVIOContext *pb, MOVAtom atom)
return -1; return -1;
} }
if (!entries)
return 0;
if (entries >= UINT_MAX / sizeof(int) || entries >= (UINT_MAX - 4) / field_size) if (entries >= UINT_MAX / sizeof(int) || entries >= (UINT_MAX - 4) / field_size)
return -1; return -1;
sc->sample_sizes = av_malloc(entries * sizeof(int)); sc->sample_sizes = av_malloc(entries * sizeof(int));
@ -1615,6 +1621,8 @@ static int mov_read_ctts(MOVContext *c, AVIOContext *pb, MOVAtom atom)
av_dlog(c->fc, "track[%i].ctts.entries = %i\n", c->fc->nb_streams-1, entries); av_dlog(c->fc, "track[%i].ctts.entries = %i\n", c->fc->nb_streams-1, entries);
if (!entries)
return 0;
if (entries >= UINT_MAX / sizeof(*sc->ctts_data)) if (entries >= UINT_MAX / sizeof(*sc->ctts_data))
return -1; return -1;
sc->ctts_data = av_malloc(entries * sizeof(*sc->ctts_data)); sc->ctts_data = av_malloc(entries * sizeof(*sc->ctts_data));
@ -1675,6 +1683,8 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
current_dts -= sc->dts_shift; current_dts -= sc->dts_shift;
if (!sc->sample_count)
return;
if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries)) if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries))
return; return;
st->index_entries = av_malloc(sc->sample_count*sizeof(*st->index_entries)); st->index_entries = av_malloc(sc->sample_count*sizeof(*st->index_entries));

@ -47,7 +47,7 @@ static int nuv_probe(AVProbeData *p) {
return 0; return 0;
} }
//! little macro to sanitize packet size /// little macro to sanitize packet size
#define PKTSIZE(s) (s & 0xffffff) #define PKTSIZE(s) (s & 0xffffff)
/** /**

@ -2,10 +2,9 @@
* Ogg bitstream support * Ogg bitstream support
* Luca Barbato <lu_zero@gentoo.org> * Luca Barbato <lu_zero@gentoo.org>
* Based on tcvp implementation * Based on tcvp implementation
*
*/ */
/** /*
Copyright (C) 2005 Michael Ahlberg, Måns Rullgård Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
Permission is hereby granted, free of charge, to any person Permission is hereby granted, free of charge, to any person
@ -27,7 +26,7 @@
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE. DEALINGS IN THE SOFTWARE.
**/ */
#include <stdio.h> #include <stdio.h>
#include "oggdec.h" #include "oggdec.h"

@ -2209,10 +2209,10 @@ static int has_decode_delay_been_guessed(AVStream *st)
static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options) static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
{ {
int16_t *samples;
AVCodec *codec; AVCodec *codec;
int got_picture, data_size, ret=0; int got_picture, ret = 0;
AVFrame picture; AVFrame picture;
AVPacket pkt = *avpkt;
if(!st->codec->codec){ if(!st->codec->codec){
codec = avcodec_find_decoder(st->codec->codec_id); codec = avcodec_find_decoder(st->codec->codec_id);
@ -2223,28 +2223,29 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **option
return ret; return ret;
} }
if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st) || while (pkt.size > 0 && ret >= 0 &&
(!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF)) { (!has_codec_parameters(st->codec) ||
!has_decode_delay_been_guessed(st) ||
(!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
got_picture = 0;
avcodec_get_frame_defaults(&picture);
switch(st->codec->codec_type) { switch(st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
avcodec_get_frame_defaults(&picture);
ret = avcodec_decode_video2(st->codec, &picture, ret = avcodec_decode_video2(st->codec, &picture,
&got_picture, avpkt); &got_picture, &pkt);
if (got_picture)
st->info->nb_decoded_frames++;
break; break;
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE); ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
samples = av_malloc(data_size);
if (!samples)
goto fail;
ret = avcodec_decode_audio3(st->codec, samples,
&data_size, avpkt);
av_free(samples);
break; break;
default: default:
break; break;
} }
if (ret >= 0) {
if (got_picture)
st->info->nb_decoded_frames++;
pkt.data += ret;
pkt.size -= ret;
}
} }
fail: fail:
return ret; return ret;

@ -21,14 +21,14 @@
#include "avutil.h" #include "avutil.h"
#include "common.h" #include "common.h"
//! Avoid e.g. MPlayers fast_memcpy, it slows things down here. /// Avoid e.g. MPlayers fast_memcpy, it slows things down here.
#undef memcpy #undef memcpy
#include <string.h> #include <string.h>
#include "lzo.h" #include "lzo.h"
//! Define if we may write up to 12 bytes beyond the output buffer. /// Define if we may write up to 12 bytes beyond the output buffer.
#define OUTBUF_PADDED 1 #define OUTBUF_PADDED 1
//! Define if we may read up to 8 bytes beyond the input buffer. /// Define if we may read up to 8 bytes beyond the input buffer.
#define INBUF_PADDED 1 #define INBUF_PADDED 1
typedef struct LZOContext { typedef struct LZOContext {
const uint8_t *in, *in_end; const uint8_t *in, *in_end;

@ -33,13 +33,13 @@
/** @name Error flags returned by av_lzo1x_decode /** @name Error flags returned by av_lzo1x_decode
* \{ */ * \{ */
//! end of the input buffer reached before decoding finished /// end of the input buffer reached before decoding finished
#define AV_LZO_INPUT_DEPLETED 1 #define AV_LZO_INPUT_DEPLETED 1
//! decoded data did not fit into output buffer /// decoded data did not fit into output buffer
#define AV_LZO_OUTPUT_FULL 2 #define AV_LZO_OUTPUT_FULL 2
//! a reference to previously decoded data was wrong /// a reference to previously decoded data was wrong
#define AV_LZO_INVALID_BACKPTR 4 #define AV_LZO_INVALID_BACKPTR 4
//! a non-specific error in the compressed bitstream /// a non-specific error in the compressed bitstream
#define AV_LZO_ERROR 8 #define AV_LZO_ERROR 8
/** \} */ /** \} */

@ -644,7 +644,7 @@ static inline void postProcess(const uint8_t src[], int srcStride, uint8_t dst[]
#endif #endif
postProcess_C(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c); postProcess_C(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
#endif #endif
#else //CONFIG_RUNTIME_CPUDETECT #else /* CONFIG_RUNTIME_CPUDETECT */
#if HAVE_MMX2 #if HAVE_MMX2
postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c); postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
#elif HAVE_AMD3DNOW #elif HAVE_AMD3DNOW
@ -656,7 +656,7 @@ static inline void postProcess(const uint8_t src[], int srcStride, uint8_t dst[]
#else #else
postProcess_C(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c); postProcess_C(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
#endif #endif
#endif //!CONFIG_RUNTIME_CPUDETECT #endif /* !CONFIG_RUNTIME_CPUDETECT */
} }
//static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height, //static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,

@ -352,6 +352,11 @@ do_audio_encoding g723_1.tco "-b:a 6.3k -ac 1 -ar 8000 -acodec g723_1"
do_audio_decoding do_audio_decoding
fi fi
if [ -n "$do_g722" ] ; then
do_audio_encoding g722.wav "-b 64k -ac 1 -ar 16000 -acodec g722"
do_audio_decoding
fi
if [ -n "$do_g726" ] ; then if [ -n "$do_g726" ] ; then
do_audio_encoding g726.wav "-b:a 32k -ac 1 -ar 8000 -acodec g726" do_audio_encoding g726.wav "-b:a 32k -ac 1 -ar 8000 -acodec g726"
do_audio_decoding do_audio_decoding

@ -194,7 +194,7 @@ fate-h264-conformance-aud_mw_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-confo
fate-h264-conformance-ba1_ft_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA1_FT_C.264 fate-h264-conformance-ba1_ft_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA1_FT_C.264
fate-h264-conformance-ba1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA1_Sony_D.jsv fate-h264-conformance-ba1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA1_Sony_D.jsv
fate-h264-conformance-ba2_sony_f: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA2_Sony_F.jsv fate-h264-conformance-ba2_sony_f: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA2_Sony_F.jsv
fate-h264-conformance-ba3_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/BA3_SVA_C.264 fate-h264-conformance-ba3_sva_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA3_SVA_C.264
fate-h264-conformance-ba_mw_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA_MW_D.264 fate-h264-conformance-ba_mw_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA_MW_D.264
fate-h264-conformance-bamq1_jvc_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BAMQ1_JVC_C.264 fate-h264-conformance-bamq1_jvc_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BAMQ1_JVC_C.264
fate-h264-conformance-bamq2_jvc_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BAMQ2_JVC_C.264 fate-h264-conformance-bamq2_jvc_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BAMQ2_JVC_C.264
@ -204,80 +204,80 @@ fate-h264-conformance-caba1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-c
fate-h264-conformance-caba1_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA1_SVA_B.264 fate-h264-conformance-caba1_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA1_SVA_B.264
fate-h264-conformance-caba2_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA2_Sony_E.jsv fate-h264-conformance-caba2_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA2_Sony_E.jsv
fate-h264-conformance-caba2_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA2_SVA_B.264 fate-h264-conformance-caba2_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA2_SVA_B.264
fate-h264-conformance-caba3_sony_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABA3_Sony_C.jsv fate-h264-conformance-caba3_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA3_Sony_C.jsv
fate-h264-conformance-caba3_sva_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABA3_SVA_B.264 fate-h264-conformance-caba3_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA3_SVA_B.264
fate-h264-conformance-caba3_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA3_TOSHIBA_E.264 fate-h264-conformance-caba3_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA3_TOSHIBA_E.264
fate-h264-conformance-cabac_mot_fld0_full: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/camp_mot_fld0_full.26l fate-h264-conformance-cabac_mot_fld0_full: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/camp_mot_fld0_full.26l
fate-h264-conformance-cabac_mot_frm0_full: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/camp_mot_frm0_full.26l fate-h264-conformance-cabac_mot_frm0_full: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/camp_mot_frm0_full.26l
fate-h264-conformance-cabac_mot_mbaff0_full: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/camp_mot_mbaff0_full.26l fate-h264-conformance-cabac_mot_mbaff0_full: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/camp_mot_mbaff0_full.26l
fate-h264-conformance-cabac_mot_picaff0_full: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/camp_mot_picaff0_full.26l fate-h264-conformance-cabac_mot_picaff0_full: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/camp_mot_picaff0_full.26l
fate-h264-conformance-cabaci3_sony_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABACI3_Sony_B.jsv fate-h264-conformance-cabaci3_sony_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABACI3_Sony_B.jsv
fate-h264-conformance-cabast3_sony_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABAST3_Sony_E.jsv fate-h264-conformance-cabast3_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABAST3_Sony_E.jsv
fate-h264-conformance-cabastbr3_sony_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABASTBR3_Sony_B.jsv fate-h264-conformance-cabastbr3_sony_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABASTBR3_Sony_B.jsv
fate-h264-conformance-cabref3_sand_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABREF3_Sand_D.264 fate-h264-conformance-cabref3_sand_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABREF3_Sand_D.264
fate-h264-conformance-cacqp3_sony_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CACQP3_Sony_D.jsv fate-h264-conformance-cacqp3_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CACQP3_Sony_D.jsv
fate-h264-conformance-cafi1_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAFI1_SVA_C.264 fate-h264-conformance-cafi1_sva_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAFI1_SVA_C.264
fate-h264-conformance-cama1_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMA1_Sony_C.jsv fate-h264-conformance-cama1_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMA1_Sony_C.jsv
fate-h264-conformance-cama1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMA1_TOSHIBA_B.264 fate-h264-conformance-cama1_toshiba_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMA1_TOSHIBA_B.264
fate-h264-conformance-cama1_vtc_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cama1_vtc_c.avc fate-h264-conformance-cama1_vtc_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/cama1_vtc_c.avc
fate-h264-conformance-cama2_vtc_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/cama2_vtc_b.avc fate-h264-conformance-cama2_vtc_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/cama2_vtc_b.avc
fate-h264-conformance-cama3_sand_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMA3_Sand_E.264 fate-h264-conformance-cama3_sand_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMA3_Sand_E.264
fate-h264-conformance-cama3_vtc_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cama3_vtc_b.avc fate-h264-conformance-cama3_vtc_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/cama3_vtc_b.avc
fate-h264-conformance-camaci3_sony_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMACI3_Sony_C.jsv fate-h264-conformance-camaci3_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMACI3_Sony_C.jsv
fate-h264-conformance-camanl1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMANL1_TOSHIBA_B.264 fate-h264-conformance-camanl1_toshiba_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMANL1_TOSHIBA_B.264
fate-h264-conformance-camanl2_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMANL2_TOSHIBA_B.264 fate-h264-conformance-camanl2_toshiba_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMANL2_TOSHIBA_B.264
fate-h264-conformance-camanl3_sand_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMANL3_Sand_E.264 fate-h264-conformance-camanl3_sand_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMANL3_Sand_E.264
fate-h264-conformance-camasl3_sony_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMASL3_Sony_B.jsv fate-h264-conformance-camasl3_sony_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMASL3_Sony_B.jsv
fate-h264-conformance-camp_mot_mbaff_l30: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMP_MOT_MBAFF_L30.26l fate-h264-conformance-camp_mot_mbaff_l30: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMP_MOT_MBAFF_L30.26l
fate-h264-conformance-camp_mot_mbaff_l31: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMP_MOT_MBAFF_L31.26l fate-h264-conformance-camp_mot_mbaff_l31: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMP_MOT_MBAFF_L31.26l
fate-h264-conformance-canl1_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL1_Sony_E.jsv fate-h264-conformance-canl1_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL1_Sony_E.jsv
fate-h264-conformance-canl1_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL1_SVA_B.264 fate-h264-conformance-canl1_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL1_SVA_B.264
fate-h264-conformance-canl1_toshiba_g: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL1_TOSHIBA_G.264 fate-h264-conformance-canl1_toshiba_g: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL1_TOSHIBA_G.264
fate-h264-conformance-canl2_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL2_Sony_E.jsv fate-h264-conformance-canl2_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL2_Sony_E.jsv
fate-h264-conformance-canl2_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL2_SVA_B.264 fate-h264-conformance-canl2_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL2_SVA_B.264
fate-h264-conformance-canl3_sony_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CANL3_Sony_C.jsv fate-h264-conformance-canl3_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL3_Sony_C.jsv
fate-h264-conformance-canl3_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL3_SVA_B.264 fate-h264-conformance-canl3_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL3_SVA_B.264
fate-h264-conformance-canl4_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL4_SVA_B.264 fate-h264-conformance-canl4_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL4_SVA_B.264
fate-h264-conformance-canlma2_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANLMA2_Sony_C.jsv fate-h264-conformance-canlma2_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANLMA2_Sony_C.jsv
fate-h264-conformance-canlma3_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANLMA3_Sony_C.jsv fate-h264-conformance-canlma3_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANLMA3_Sony_C.jsv
fate-h264-conformance-capa1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAPA1_TOSHIBA_B.264 fate-h264-conformance-capa1_toshiba_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAPA1_TOSHIBA_B.264
fate-h264-conformance-capama3_sand_f: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAPAMA3_Sand_F.264 fate-h264-conformance-capama3_sand_f: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAPAMA3_Sand_F.264
fate-h264-conformance-capcm1_sand_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAPCM1_Sand_E.264 fate-h264-conformance-capcm1_sand_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAPCM1_Sand_E.264
fate-h264-conformance-capcmnl1_sand_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAPCMNL1_Sand_E.264 fate-h264-conformance-capcmnl1_sand_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAPCMNL1_Sand_E.264
fate-h264-conformance-capm3_sony_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAPM3_Sony_D.jsv fate-h264-conformance-capm3_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAPM3_Sony_D.jsv
fate-h264-conformance-caqp1_sony_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAQP1_Sony_B.jsv fate-h264-conformance-caqp1_sony_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAQP1_Sony_B.jsv
fate-h264-conformance-cavlc_mot_fld0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_fld0_full_B.26l fate-h264-conformance-cavlc_mot_fld0_full_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/cvmp_mot_fld0_full_B.26l
fate-h264-conformance-cavlc_mot_frm0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_frm0_full_B.26l fate-h264-conformance-cavlc_mot_frm0_full_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/cvmp_mot_frm0_full_B.26l
fate-h264-conformance-cavlc_mot_mbaff0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_mbaff0_full_B.26l fate-h264-conformance-cavlc_mot_mbaff0_full_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/cvmp_mot_mbaff0_full_B.26l
fate-h264-conformance-cavlc_mot_picaff0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_picaff0_full_B.26l fate-h264-conformance-cavlc_mot_picaff0_full_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/cvmp_mot_picaff0_full_B.26l
fate-h264-conformance-cawp1_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAWP1_TOSHIBA_E.264 fate-h264-conformance-cawp1_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAWP1_TOSHIBA_E.264
fate-h264-conformance-cawp5_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAWP5_TOSHIBA_E.264 fate-h264-conformance-cawp5_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAWP5_TOSHIBA_E.264
fate-h264-conformance-ci1_ft_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CI1_FT_B.264 fate-h264-conformance-ci1_ft_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CI1_FT_B.264
fate-h264-conformance-ci_mw_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CI_MW_D.264 fate-h264-conformance-ci_mw_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CI_MW_D.264
fate-h264-conformance-cvbs3_sony_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVBS3_Sony_C.jsv fate-h264-conformance-cvbs3_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVBS3_Sony_C.jsv
fate-h264-conformance-cvcanlma2_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVCANLMA2_Sony_C.jsv fate-h264-conformance-cvcanlma2_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVCANLMA2_Sony_C.jsv
fate-h264-conformance-cvfi1_sony_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI1_Sony_D.jsv fate-h264-conformance-cvfi1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVFI1_Sony_D.jsv
fate-h264-conformance-cvfi1_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI1_SVA_C.264 fate-h264-conformance-cvfi1_sva_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVFI1_SVA_C.264
fate-h264-conformance-cvfi2_sony_h: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI2_Sony_H.jsv fate-h264-conformance-cvfi2_sony_h: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVFI2_Sony_H.jsv
fate-h264-conformance-cvfi2_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI2_SVA_C.264 fate-h264-conformance-cvfi2_sva_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVFI2_SVA_C.264
fate-h264-conformance-cvma1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMA1_Sony_D.jsv fate-h264-conformance-cvma1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMA1_Sony_D.jsv
fate-h264-conformance-cvma1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMA1_TOSHIBA_B.264 fate-h264-conformance-cvma1_toshiba_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMA1_TOSHIBA_B.264
fate-h264-conformance-cvmanl1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMANL1_TOSHIBA_B.264 fate-h264-conformance-cvmanl1_toshiba_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMANL1_TOSHIBA_B.264
fate-h264-conformance-cvmanl2_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMANL2_TOSHIBA_B.264 fate-h264-conformance-cvmanl2_toshiba_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMANL2_TOSHIBA_B.264
fate-h264-conformance-cvmapaqp3_sony_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMAPAQP3_Sony_E.jsv fate-h264-conformance-cvmapaqp3_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMAPAQP3_Sony_E.jsv
fate-h264-conformance-cvmaqp2_sony_g: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMAQP2_Sony_G.jsv fate-h264-conformance-cvmaqp2_sony_g: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMAQP2_Sony_G.jsv
fate-h264-conformance-cvmaqp3_sony_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMAQP3_Sony_D.jsv fate-h264-conformance-cvmaqp3_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMAQP3_Sony_D.jsv
fate-h264-conformance-cvmp_mot_fld_l30_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMP_MOT_FLD_L30_B.26l fate-h264-conformance-cvmp_mot_fld_l30_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMP_MOT_FLD_L30_B.26l
fate-h264-conformance-cvmp_mot_frm_l31_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMP_MOT_FRM_L31_B.26l fate-h264-conformance-cvmp_mot_frm_l31_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMP_MOT_FRM_L31_B.26l
fate-h264-conformance-cvnlfi1_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVNLFI1_Sony_C.jsv fate-h264-conformance-cvnlfi1_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVNLFI1_Sony_C.jsv
fate-h264-conformance-cvnlfi2_sony_h: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVNLFI2_Sony_H.jsv fate-h264-conformance-cvnlfi2_sony_h: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVNLFI2_Sony_H.jsv
fate-h264-conformance-cvpa1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVPA1_TOSHIBA_B.264 fate-h264-conformance-cvpa1_toshiba_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVPA1_TOSHIBA_B.264
fate-h264-conformance-cvpcmnl1_sva_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVPCMNL1_SVA_C.264 fate-h264-conformance-cvpcmnl1_sva_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVPCMNL1_SVA_C.264
fate-h264-conformance-cvpcmnl2_sva_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVPCMNL2_SVA_C.264 fate-h264-conformance-cvpcmnl2_sva_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVPCMNL2_SVA_C.264
fate-h264-conformance-cvwp1_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVWP1_TOSHIBA_E.264 fate-h264-conformance-cvwp1_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVWP1_TOSHIBA_E.264
fate-h264-conformance-cvwp2_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVWP2_TOSHIBA_E.264 fate-h264-conformance-cvwp2_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVWP2_TOSHIBA_E.264
fate-h264-conformance-cvwp3_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVWP3_TOSHIBA_E.264 fate-h264-conformance-cvwp3_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVWP3_TOSHIBA_E.264
fate-h264-conformance-cvwp5_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVWP5_TOSHIBA_E.264 fate-h264-conformance-cvwp5_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVWP5_TOSHIBA_E.264
fate-h264-conformance-fi1_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FI1_Sony_E.jsv fate-h264-conformance-fi1_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FI1_Sony_E.jsv
fate-h264-conformance-frext-alphaconformanceg: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/test8b43.264 fate-h264-conformance-frext-alphaconformanceg: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/test8b43.264
fate-h264-conformance-frext-bcrm_freh10: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/freh10.264 -vsync 0 fate-h264-conformance-frext-bcrm_freh10: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/freh10.264 -vsync 0
@ -337,8 +337,8 @@ fate-h264-conformance-frext-pph422i4_panasonic_a: CMD = framecrc -vsync 0 -i $(S
fate-h264-conformance-frext-pph422i5_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH422I5_Panasonic_A.264 -pix_fmt yuv422p10le fate-h264-conformance-frext-pph422i5_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH422I5_Panasonic_A.264 -pix_fmt yuv422p10le
fate-h264-conformance-frext-pph422i6_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH422I6_Panasonic_A.264 -pix_fmt yuv422p10le fate-h264-conformance-frext-pph422i6_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH422I6_Panasonic_A.264 -pix_fmt yuv422p10le
fate-h264-conformance-frext-pph422i7_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH422I7_Panasonic_A.264 -pix_fmt yuv422p10le fate-h264-conformance-frext-pph422i7_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH422I7_Panasonic_A.264 -pix_fmt yuv422p10le
fate-h264-conformance-hcbp2_hhi_a: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/HCBP2_HHI_A.264 fate-h264-conformance-hcbp2_hhi_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/HCBP2_HHI_A.264
fate-h264-conformance-hcmp1_hhi_a: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/HCMP1_HHI_A.264 fate-h264-conformance-hcmp1_hhi_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/HCMP1_HHI_A.264
fate-h264-conformance-ls_sva_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/LS_SVA_D.264 fate-h264-conformance-ls_sva_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/LS_SVA_D.264
fate-h264-conformance-midr_mw_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MIDR_MW_D.264 fate-h264-conformance-midr_mw_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MIDR_MW_D.264
fate-h264-conformance-mps_mw_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MPS_MW_A.264 fate-h264-conformance-mps_mw_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MPS_MW_A.264
@ -349,11 +349,11 @@ fate-h264-conformance-mr2_tandberg_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h26
fate-h264-conformance-mr3_tandberg_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MR3_TANDBERG_B.264 fate-h264-conformance-mr3_tandberg_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MR3_TANDBERG_B.264
fate-h264-conformance-mr4_tandberg_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR4_TANDBERG_C.264 fate-h264-conformance-mr4_tandberg_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR4_TANDBERG_C.264
fate-h264-conformance-mr5_tandberg_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR5_TANDBERG_C.264 fate-h264-conformance-mr5_tandberg_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR5_TANDBERG_C.264
fate-h264-conformance-mr6_bt_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR6_BT_B.h264 fate-h264-conformance-mr6_bt_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MR6_BT_B.h264
fate-h264-conformance-mr7_bt_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR7_BT_B.h264 fate-h264-conformance-mr7_bt_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MR7_BT_B.h264
fate-h264-conformance-mr8_bt_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR8_BT_B.h264 fate-h264-conformance-mr8_bt_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MR8_BT_B.h264
fate-h264-conformance-mr9_bt_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR9_BT_B.h264 fate-h264-conformance-mr9_bt_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MR9_BT_B.h264
fate-h264-conformance-mv1_brcm_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/src19td.IBP.264 fate-h264-conformance-mv1_brcm_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/src19td.IBP.264
fate-h264-conformance-nl1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NL1_Sony_D.jsv fate-h264-conformance-nl1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NL1_Sony_D.jsv
fate-h264-conformance-nl2_sony_h: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NL2_Sony_H.jsv fate-h264-conformance-nl2_sony_h: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NL2_Sony_H.jsv
fate-h264-conformance-nl3_sva_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NL3_SVA_E.264 fate-h264-conformance-nl3_sva_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NL3_SVA_E.264
@ -363,9 +363,9 @@ fate-h264-conformance-nrf_mw_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-confo
fate-h264-conformance-sharp_mp_field_1_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_1_B.jvt fate-h264-conformance-sharp_mp_field_1_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_1_B.jvt
fate-h264-conformance-sharp_mp_field_2_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_2_B.jvt fate-h264-conformance-sharp_mp_field_2_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_2_B.jvt
fate-h264-conformance-sharp_mp_field_3_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_3_B.jvt fate-h264-conformance-sharp_mp_field_3_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_3_B.jvt
fate-h264-conformance-sharp_mp_paff_1r2: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/Sharp_MP_PAFF_1r2.jvt fate-h264-conformance-sharp_mp_paff_1r2: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_PAFF_1r2.jvt
fate-h264-conformance-sharp_mp_paff_2r: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/Sharp_MP_PAFF_2.jvt fate-h264-conformance-sharp_mp_paff_2r: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_PAFF_2.jvt
fate-h264-conformance-sl1_sva_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/SL1_SVA_B.264 fate-h264-conformance-sl1_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SL1_SVA_B.264
fate-h264-conformance-sva_ba1_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_BA1_B.264 fate-h264-conformance-sva_ba1_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_BA1_B.264
fate-h264-conformance-sva_ba2_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_BA2_D.264 fate-h264-conformance-sva_ba2_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_BA2_D.264
fate-h264-conformance-sva_base_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_Base_B.264 fate-h264-conformance-sva_base_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_Base_B.264
@ -376,4 +376,4 @@ fate-h264-conformance-sva_nl2_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conf
fate-h264-interlace-crop: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264/interlaced_crop.mp4 -vframes 3 fate-h264-interlace-crop: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264/interlaced_crop.mp4 -vframes 3
fate-h264-lossless: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264/lossless.h264 fate-h264-lossless: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264/lossless.h264
fate-h264-extreme-plane-pred: CMD = framemd5 -strict 1 -vsync 0 -i $(SAMPLES)/h264/extreme-plane-pred.h264 fate-h264-extreme-plane-pred: CMD = framemd5 -vsync 0 -i $(SAMPLES)/h264/extreme-plane-pred.h264

@ -0,0 +1,4 @@
156f63e3391b95020ae882dbae6eccf3 *./tests/data/acodec/g722.wav
47991 ./tests/data/acodec/g722.wav
8f65de513acc08b37a488d6a802b4f00 *./tests/data/g722.acodec.out.wav
stddev: 8860.50 PSNR: 17.38 MAXDIFF:33814 bytes: 191732/ 1058400

@ -55,6 +55,7 @@ hiegrep 'INIT_VLC_USE_STATIC' 'forbidden ancient vlc type' $*
hiegrep '=[-+\*\&] ' 'looks like compound assignment' $* hiegrep '=[-+\*\&] ' 'looks like compound assignment' $*
hiegrep2 '/\*\* *[a-zA-Z0-9].*' '\*/' 'Inconsistently formatted doxygen comment' $* hiegrep2 '/\*\* *[a-zA-Z0-9].*' '\*/' 'Inconsistently formatted doxygen comment' $*
hiegrep '; */\*\*[^<]' 'Misformatted doxygen comment' $* hiegrep '; */\*\*[^<]' 'Misformatted doxygen comment' $*
hiegrep '//!|/\*!' 'inconsistent doxygen syntax' $*
hiegrep2 '(int|unsigned|static|void)[a-zA-Z0-9 _]*(init|end)[a-zA-Z0-9 _]*\(.*[^;]$' '(av_cold|:\+[^a-zA-Z_])' 'These functions may need av_cold, please review the whole patch for similar functions needing av_cold' $* hiegrep2 '(int|unsigned|static|void)[a-zA-Z0-9 _]*(init|end)[a-zA-Z0-9 _]*\(.*[^;]$' '(av_cold|:\+[^a-zA-Z_])' 'These functions may need av_cold, please review the whole patch for similar functions needing av_cold' $*

Loading…
Cancel
Save