Merge commit '6d97484d72e33f7dde9493a9ead1a72e2f029605'

* commit '6d97484d72e33f7dde9493a9ead1a72e2f029605':
  avcodec: av_log_ask_for_sample() ---> avpriv_request_sample()
  rsodec: Use avpriv_report_missing_feature() where appropriate

Conflicts:
	libavcodec/anm.c
	libavcodec/mlpdec.c
	libavcodec/pictordec.c
	libavcodec/sunrast.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
pull/12/merge
Michael Niedermayer 12 years ago
commit 13795dbb64
  1. 14
      libavcodec/aacdec.c
  2. 2
      libavcodec/adx.c
  3. 3
      libavcodec/alac.c
  4. 6
      libavcodec/anm.c
  5. 6
      libavcodec/ansi.c
  6. 4
      libavcodec/apedec.c
  7. 2
      libavcodec/atrac3.c
  8. 4
      libavcodec/cdxl.c
  9. 2
      libavcodec/cinepak.c
  10. 16
      libavcodec/cook.c
  11. 5
      libavcodec/dcadec.c
  12. 2
      libavcodec/dxtory.c
  13. 4
      libavcodec/h264.c
  14. 5
      libavcodec/imc.c
  15. 4
      libavcodec/indeo3.c
  16. 4
      libavcodec/indeo4.c
  17. 2
      libavcodec/loco.c
  18. 22
      libavcodec/mlpdec.c
  19. 3
      libavcodec/mpc7.c
  20. 4
      libavcodec/mss2.c
  21. 2
      libavcodec/pictordec.c
  22. 4
      libavcodec/ptx.c
  23. 4
      libavcodec/qcelpdec.c
  24. 2
      libavcodec/ralf.c
  25. 4
      libavcodec/sunrast.c
  26. 4
      libavcodec/truemotion1.c
  27. 2
      libavcodec/truespeech.c
  28. 4
      libavcodec/utvideodec.c
  29. 2
      libavcodec/v210x.c
  30. 2
      libavcodec/vc1dec.c
  31. 5
      libavcodec/vorbis_parser.c
  32. 11
      libavcodec/wmalosslessdec.c
  33. 21
      libavcodec/wmaprodec.c
  34. 6
      libavcodec/wnv1.c
  35. 6
      libavcodec/xwddec.c
  36. 12
      libavcodec/zmbv.c
  37. 2
      libavformat/rsodec.c

@ -1168,10 +1168,10 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
clipped_offset = av_clip(offset[2], -155, 100); clipped_offset = av_clip(offset[2], -155, 100);
if (offset[2] != clipped_offset) { if (offset[2] != clipped_offset) {
av_log_ask_for_sample(ac->avctx, "Intensity stereo " avpriv_request_sample(ac->avctx,
"position clipped (%d -> %d).\nIf you heard an " "If you heard an audible artifact, there may be a bug in the decoder. "
"audible artifact, there may be a bug in the " "Clipped intensity stereo position (%d -> %d)",
"decoder. ", offset[2], clipped_offset); offset[2], clipped_offset);
} }
sf[idx] = ff_aac_pow2sf_tab[-clipped_offset + POW_SF2_ZERO]; sf[idx] = ff_aac_pow2sf_tab[-clipped_offset + POW_SF2_ZERO];
} }
@ -1183,9 +1183,9 @@ static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
clipped_offset = av_clip(offset[1], -100, 155); clipped_offset = av_clip(offset[1], -100, 155);
if (offset[1] != clipped_offset) { if (offset[1] != clipped_offset) {
av_log_ask_for_sample(ac->avctx, "Noise gain clipped " avpriv_request_sample(ac->avctx,
"(%d -> %d).\nIf you heard an audible " "If you heard an audible artifact, there may be a bug in the decoder. "
"artifact, there may be a bug in the decoder. ", "Clipped noise gain (%d -> %d)",
offset[1], clipped_offset); offset[1], clipped_offset);
} }
sf[idx] = -ff_aac_pow2sf_tab[clipped_offset + POW_SF2_ZERO]; sf[idx] = -ff_aac_pow2sf_tab[clipped_offset + POW_SF2_ZERO];

@ -58,7 +58,7 @@ int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf,
/* check for encoding=3 block_size=18, sample_size=4 */ /* check for encoding=3 block_size=18, sample_size=4 */
if (buf[4] != 3 || buf[5] != 18 || buf[6] != 4) { if (buf[4] != 3 || buf[5] != 18 || buf[6] != 4) {
av_log_ask_for_sample(avctx, "unsupported ADX format\n"); avpriv_request_sample(avctx, "Support for this ADX format");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -586,8 +586,7 @@ static av_cold int alac_decode_init(AVCodecContext * avctx)
case 24: case 24:
case 32: avctx->sample_fmt = req_packed ? AV_SAMPLE_FMT_S32 : AV_SAMPLE_FMT_S32P; case 32: avctx->sample_fmt = req_packed ? AV_SAMPLE_FMT_S32 : AV_SAMPLE_FMT_S32P;
break; break;
default: av_log_ask_for_sample(avctx, "Sample depth %d is not supported.\n", default: avpriv_request_sample(avctx, "Sample depth %d", alac->sample_size);
alac->sample_size);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
avctx->bits_per_raw_sample = alac->sample_size; avctx->bits_per_raw_sample = alac->sample_size;

@ -125,11 +125,11 @@ static int decode_frame(AVCodecContext *avctx,
bytestream2_init(&s->gb, avpkt->data, buf_size); bytestream2_init(&s->gb, avpkt->data, buf_size);
if (bytestream2_get_byte(&s->gb) != 0x42) { if (bytestream2_get_byte(&s->gb) != 0x42) {
av_log_ask_for_sample(avctx, "unknown record type\n"); avpriv_request_sample(avctx, "Unknown record type");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (bytestream2_get_byte(&s->gb)) { if (bytestream2_get_byte(&s->gb)) {
av_log_ask_for_sample(avctx, "padding bytes not supported\n"); avpriv_request_sample(avctx, "Padding bytes");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
bytestream2_skip(&s->gb, 2); bytestream2_skip(&s->gb, 2);
@ -159,7 +159,7 @@ static int decode_frame(AVCodecContext *avctx,
if (type == 0) if (type == 0)
break; // stop break; // stop
if (type == 2) { if (type == 2) {
av_log_ask_for_sample(avctx, "unknown opcode"); avpriv_request_sample(avctx, "Unknown opcode");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
continue; continue;

@ -241,7 +241,7 @@ static int execute_code(AVCodecContext * avctx, int c)
height = 60<<4; height = 60<<4;
break; break;
default: default:
av_log_ask_for_sample(avctx, "unsupported screen mode\n"); avpriv_request_sample(avctx, "Unsupported screen mode");
} }
if (width != avctx->width || height != avctx->height) { if (width != avctx->width || height != avctx->height) {
av_frame_unref(s->frame); av_frame_unref(s->frame);
@ -316,7 +316,7 @@ static int execute_code(AVCodecContext * avctx, int c)
} else if (m == 49) { } else if (m == 49) {
s->fg = ansi_to_cga[DEFAULT_BG_COLOR]; s->fg = ansi_to_cga[DEFAULT_BG_COLOR];
} else { } else {
av_log_ask_for_sample(avctx, "unsupported rendition parameter\n"); avpriv_request_sample(avctx, "Unsupported rendition parameter");
} }
} }
break; break;
@ -333,7 +333,7 @@ static int execute_code(AVCodecContext * avctx, int c)
s->y = av_clip(s->sy, 0, avctx->height - s->font_height); s->y = av_clip(s->sy, 0, avctx->height - s->font_height);
break; break;
default: default:
av_log_ask_for_sample(avctx, "unsupported escape code\n"); avpriv_request_sample(avctx, "Unknown escape code");
break; break;
} }
return 0; return 0;

@ -205,8 +205,8 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_S32P; avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
break; break;
default: default:
av_log_ask_for_sample(avctx, "Unsupported bits per coded sample %d\n", avpriv_request_sample(avctx,
s->bps); "%d bits per coded sample", s->bps);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
s->avctx = avctx; s->avctx = avctx;

@ -170,7 +170,7 @@ static int decode_bytes(const uint8_t *input, uint8_t *out, int bytes)
output[i] = c ^ buf[i]; output[i] = c ^ buf[i];
if (off) if (off)
av_log_ask_for_sample(NULL, "Offset of %d not handled.\n", off); avpriv_request_sample(NULL, "Offset of %d", off);
return off; return off;
} }

@ -235,7 +235,7 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
if (c->bpp < 1) if (c->bpp < 1)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (c->format != BIT_PLANAR && c->format != BIT_LINE) { if (c->format != BIT_PLANAR && c->format != BIT_LINE) {
av_log_ask_for_sample(avctx, "unsupported pixel format: 0x%0x\n", c->format); avpriv_request_sample(avctx, "Pixel format 0x%0x", c->format);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -255,7 +255,7 @@ static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
avctx->pix_fmt = AV_PIX_FMT_BGR24; avctx->pix_fmt = AV_PIX_FMT_BGR24;
} else { } else {
av_log_ask_for_sample(avctx, "unsupported encoding %d and bpp %d\n", avpriv_request_sample(avctx, "Encoding %d and bpp %d",
encoding, c->bpp); encoding, c->bpp);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -332,7 +332,7 @@ static int cinepak_decode (CinepakContext *s)
/* if this is the first frame, check for deviant Sega FILM data */ /* if this is the first frame, check for deviant Sega FILM data */
if (s->sega_film_skip_bytes == -1) { if (s->sega_film_skip_bytes == -1) {
if (!encoded_buf_size) { if (!encoded_buf_size) {
av_log_ask_for_sample(s->avctx, "encoded_buf_size is 0"); avpriv_request_sample(s->avctx, "encoded_buf_size 0");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (encoded_buf_size != s->size && (s->size % encoded_buf_size) != 0) { if (encoded_buf_size != s->size && (s->size % encoded_buf_size) != 0) {

@ -1115,7 +1115,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
switch (q->subpacket[s].cookversion) { switch (q->subpacket[s].cookversion) {
case MONO: case MONO:
if (avctx->channels != 1) { if (avctx->channels != 1) {
av_log_ask_for_sample(avctx, "Container channels != 1.\n"); avpriv_request_sample(avctx, "Container channels != 1");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
av_log(avctx, AV_LOG_DEBUG, "MONO\n"); av_log(avctx, AV_LOG_DEBUG, "MONO\n");
@ -1129,7 +1129,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
break; break;
case JOINT_STEREO: case JOINT_STEREO:
if (avctx->channels != 2) { if (avctx->channels != 2) {
av_log_ask_for_sample(avctx, "Container channels != 2.\n"); avpriv_request_sample(avctx, "Container channels != 2");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
av_log(avctx, AV_LOG_DEBUG, "JOINT_STEREO\n"); av_log(avctx, AV_LOG_DEBUG, "JOINT_STEREO\n");
@ -1169,7 +1169,8 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
break; break;
default: default:
av_log_ask_for_sample(avctx, "Unknown Cook version.\n"); avpriv_request_sample(avctx, "Cook version %d",
q->subpacket[s].cookversion);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -1185,7 +1186,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
/* Try to catch some obviously faulty streams, othervise it might be exploitable */ /* Try to catch some obviously faulty streams, othervise it might be exploitable */
if (q->subpacket[s].total_subbands > 53) { if (q->subpacket[s].total_subbands > 53) {
av_log_ask_for_sample(avctx, "total_subbands > 53\n"); avpriv_request_sample(avctx, "total_subbands > 53");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -1197,7 +1198,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
} }
if (q->subpacket[s].subbands > 50) { if (q->subpacket[s].subbands > 50) {
av_log_ask_for_sample(avctx, "subbands > 50\n"); avpriv_request_sample(avctx, "subbands > 50");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (q->subpacket[s].subbands == 0) { if (q->subpacket[s].subbands == 0) {
@ -1217,7 +1218,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
q->num_subpackets++; q->num_subpackets++;
s++; s++;
if (s > MAX_SUBPACKETS) { if (s > MAX_SUBPACKETS) {
av_log_ask_for_sample(avctx, "Too many subpackets > 5\n"); avpriv_request_sample(avctx, "subpackets > %d", MAX_SUBPACKETS);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
} }
@ -1259,8 +1260,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
/* Try to catch some obviously faulty streams, othervise it might be exploitable */ /* Try to catch some obviously faulty streams, othervise it might be exploitable */
if (q->samples_per_channel != 256 && q->samples_per_channel != 512 && if (q->samples_per_channel != 256 && q->samples_per_channel != 512 &&
q->samples_per_channel != 1024) { q->samples_per_channel != 1024) {
av_log_ask_for_sample(avctx, avpriv_request_sample(avctx, "samples_per_channel = %d",
"unknown amount of samples_per_channel = %d\n",
q->samples_per_channel); q->samples_per_channel);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -1985,14 +1985,15 @@ static void dca_exss_parse_header(DCAContext *s)
num_audiop = get_bits(&s->gb, 3) + 1; num_audiop = get_bits(&s->gb, 3) + 1;
if (num_audiop > 1) { if (num_audiop > 1) {
av_log_ask_for_sample(s->avctx, "Multiple DTS-HD audio presentations."); avpriv_request_sample(s->avctx,
"Multiple DTS-HD audio presentations");
/* ignore such streams for now */ /* ignore such streams for now */
return; return;
} }
num_assets = get_bits(&s->gb, 3) + 1; num_assets = get_bits(&s->gb, 3) + 1;
if (num_assets > 1) { if (num_assets > 1) {
av_log_ask_for_sample(s->avctx, "Multiple DTS-HD audio assets."); avpriv_request_sample(s->avctx, "Multiple DTS-HD audio assets");
/* ignore such streams for now */ /* ignore such streams for now */
return; return;
} }

@ -53,7 +53,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
pic->key_frame = 1; pic->key_frame = 1;
if (AV_RL32(src) != 0x01000002) { if (AV_RL32(src) != 0x01000002) {
av_log_ask_for_sample(avctx, "Unknown frame header %X\n", AV_RL32(src)); avpriv_request_sample(avctx, "Frame header %X", AV_RL32(src));
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
src += 16; src += 16;

@ -3484,8 +3484,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h->droppable = last_pic_droppable; h->droppable = last_pic_droppable;
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} else if (last_pic_droppable != h->droppable) { } else if (last_pic_droppable != h->droppable) {
av_log_ask_for_sample(h->avctx, avpriv_request_sample(h->avctx,
"Found reference and non-reference fields in the same frame.\n"); "Found reference and non-reference fields in the same frame, which");
h->picture_structure = last_pic_structure; h->picture_structure = last_pic_structure;
h->droppable = last_pic_droppable; h->droppable = last_pic_droppable;
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;

@ -182,7 +182,7 @@ static av_cold int imc_decode_init(AVCodecContext *avctx)
avctx->channels = 1; avctx->channels = 1;
if (avctx->channels > 2) { if (avctx->channels > 2) {
av_log_ask_for_sample(avctx, "Number of channels is not supported\n"); avpriv_request_sample(avctx, "Number of channels > 2");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -779,8 +779,7 @@ static int imc_decode_block(AVCodecContext *avctx, IMCContext *q, int ch)
stream_format_code = get_bits(&q->gb, 3); stream_format_code = get_bits(&q->gb, 3);
if (stream_format_code & 1) { if (stream_format_code & 1) {
av_log_ask_for_sample(avctx, "Stream format %X is not supported\n", avpriv_request_sample(avctx, "Stream format %X", stream_format_code);
stream_format_code);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -993,12 +993,12 @@ static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
} }
if (ctx->frame_flags & BS_8BIT_PEL) { if (ctx->frame_flags & BS_8BIT_PEL) {
av_log_ask_for_sample(avctx, "8-bit pixel format\n"); avpriv_request_sample(avctx, "8-bit pixel format");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (ctx->frame_flags & BS_MV_X_HALF || ctx->frame_flags & BS_MV_Y_HALF) { if (ctx->frame_flags & BS_MV_X_HALF || ctx->frame_flags & BS_MV_Y_HALF) {
av_log_ask_for_sample(avctx, "halfpel motion vectors\n"); avpriv_request_sample(avctx, "Halfpel motion vectors");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -331,12 +331,12 @@ static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band,
transform_id = get_bits(&ctx->gb, 5); transform_id = get_bits(&ctx->gb, 5);
if (transform_id >= FF_ARRAY_ELEMS(transforms) || if (transform_id >= FF_ARRAY_ELEMS(transforms) ||
!transforms[transform_id].inv_trans) { !transforms[transform_id].inv_trans) {
av_log_ask_for_sample(avctx, "Unimplemented transform: %d!\n", transform_id); avpriv_request_sample(avctx, "Transform %d", transform_id);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if ((transform_id >= 7 && transform_id <= 9) || if ((transform_id >= 7 && transform_id <= 9) ||
transform_id == 17) { transform_id == 17) {
av_log_ask_for_sample(avctx, "DCT transform not supported yet!\n"); avpriv_request_sample(avctx, "DCT transform");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -268,7 +268,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
break; break;
default: default:
l->lossy = AV_RL32(avctx->extradata + 8); l->lossy = AV_RL32(avctx->extradata + 8);
av_log_ask_for_sample(avctx, "This is LOCO codec version %i.\n", version); avpriv_request_sample(avctx, "LOCO codec version %i", version);
} }
l->mode = AV_RL32(avctx->extradata + 4); l->mode = AV_RL32(avctx->extradata + 4);

@ -332,9 +332,10 @@ static int read_major_sync(MLPDecodeContext *m, GetBitContext *gb)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (mh.num_substreams > MAX_SUBSTREAMS) { if (mh.num_substreams > MAX_SUBSTREAMS) {
av_log_ask_for_sample(m->avctx, avpriv_request_sample(m->avctx,
"Number of substreams %d is larger than the maximum supported " "%d substreams (more than the "
"by the decoder.\n", mh.num_substreams); "maximum supported by the decoder)",
mh.num_substreams);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -442,9 +443,10 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
/* This should happen for TrueHD streams with >6 channels and MLP's noise /* This should happen for TrueHD streams with >6 channels and MLP's noise
* type. It is not yet known if this is allowed. */ * type. It is not yet known if this is allowed. */
if (max_channel > MAX_MATRIX_CHANNEL_MLP && !s->noise_type) { if (max_channel > MAX_MATRIX_CHANNEL_MLP && !s->noise_type) {
av_log_ask_for_sample(m->avctx, avpriv_request_sample(m->avctx,
"Number of channels %d is larger than the maximum supported " "%d channels (more than the "
"by the decoder.\n", max_channel + 2); "maximum supported by the decoder)",
max_channel + 2);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -507,8 +509,8 @@ static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp,
channel); channel);
} }
if ((unsigned)ch_assign > s->max_matrix_channel) { if ((unsigned)ch_assign > s->max_matrix_channel) {
av_log_ask_for_sample(m->avctx, avpriv_request_sample(m->avctx,
"Assignment of matrix channel %d to invalid output channel %d.\n", "Assignment of matrix channel %d to invalid output channel %d",
ch, ch_assign); ch, ch_assign);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -855,8 +857,8 @@ static int read_block_data(MLPDecodeContext *m, GetBitContext *gbp,
if (s->data_check_present) { if (s->data_check_present) {
expected_stream_pos = get_bits_count(gbp); expected_stream_pos = get_bits_count(gbp);
expected_stream_pos += get_bits(gbp, 16); expected_stream_pos += get_bits(gbp, 16);
av_log_ask_for_sample(m->avctx, "This file contains some features " avpriv_request_sample(m->avctx,
"we have not tested yet.\n"); "Substreams with VLC block size check info");
} }
if (s->blockpos + s->blocksize > m->access_unit_size) { if (s->blockpos + s->blocksize > m->access_unit_size) {

@ -61,8 +61,7 @@ static av_cold int mpc7_decode_init(AVCodecContext * avctx)
/* Musepack SV7 is always stereo */ /* Musepack SV7 is always stereo */
if (avctx->channels != 2) { if (avctx->channels != 2) {
av_log_ask_for_sample(avctx, "Unsupported number of channels: %d\n", avpriv_request_sample(avctx, "%d channels", avctx->channels);
avctx->channels);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -430,8 +430,8 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
ctx->dsp.upsample_plane(f->data[1], f->linesize[1], w >> 1, h >> 1); ctx->dsp.upsample_plane(f->data[1], f->linesize[1], w >> 1, h >> 1);
ctx->dsp.upsample_plane(f->data[2], f->linesize[2], w >> 1, h >> 1); ctx->dsp.upsample_plane(f->data[2], f->linesize[2], w >> 1, h >> 1);
} else if (v->respic) } else if (v->respic)
av_log_ask_for_sample(v->s.avctx, avpriv_request_sample(v->s.avctx,
"Asymmetric WMV9 rectangle subsampling\n"); "Asymmetric WMV9 rectangle subsampling");
av_assert0(f->linesize[1] == f->linesize[2]); av_assert0(f->linesize[1] == f->linesize[2]);

@ -123,7 +123,7 @@ static int decode_frame(AVCodecContext *avctx,
s->nb_planes = (tmp >> 4) + 1; s->nb_planes = (tmp >> 4) + 1;
bpp = bits_per_plane * s->nb_planes; bpp = bits_per_plane * s->nb_planes;
if (bits_per_plane > 8 || bpp < 1 || bpp > 32) { if (bits_per_plane > 8 || bpp < 1 || bpp > 32) {
av_log_ask_for_sample(avctx, "unsupported bit depth\n"); avpriv_request_sample(avctx, "Unsupported bit depth");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -42,7 +42,7 @@ static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
bytes_per_pixel = AV_RL16(buf+12) >> 3; bytes_per_pixel = AV_RL16(buf+12) >> 3;
if (bytes_per_pixel != 2) { if (bytes_per_pixel != 2) {
av_log_ask_for_sample(avctx, "Image format is not RGB15.\n"); avpriv_request_sample(avctx, "Image format not RGB15");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -51,7 +51,7 @@ static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (buf_end - buf < offset) if (buf_end - buf < offset)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
if (offset != 0x2c) if (offset != 0x2c)
av_log_ask_for_sample(avctx, "offset != 0x2c\n"); avpriv_request_sample(avctx, "offset != 0x2c");
buf += offset; buf += offset;

@ -635,8 +635,8 @@ static qcelp_packet_rate determine_bitrate(AVCodecContext *avctx,
return I_F_Q; return I_F_Q;
if (bitrate == SILENCE) { if (bitrate == SILENCE) {
//FIXME: Remove experimental warning when tested with samples. // FIXME: Remove this warning when tested with samples.
av_log_ask_for_sample(avctx, "'Blank frame handling is experimental."); avpriv_request_sample(avctx, "Blank frame handling");
} }
return bitrate; return bitrate;
} }

@ -136,7 +136,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
ctx->version = AV_RB16(avctx->extradata + 4); ctx->version = AV_RB16(avctx->extradata + 4);
if (ctx->version != 0x103) { if (ctx->version != 0x103) {
av_log_ask_for_sample(avctx, "unknown version %X\n", ctx->version); avpriv_request_sample(avctx, "Unknown version %X", ctx->version);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -54,7 +54,7 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
buf += 32; buf += 32;
if (type == RT_EXPERIMENTAL) { if (type == RT_EXPERIMENTAL) {
av_log_ask_for_sample(avctx, "unsupported (compression) type\n"); avpriv_request_sample(avctx, "TIFF/IFF/EXPERIMENTAL (compression) type");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (type > RT_FORMAT_IFF) { if (type > RT_FORMAT_IFF) {
@ -66,7 +66,7 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (maptype == RMT_RAW) { if (maptype == RMT_RAW) {
av_log_ask_for_sample(avctx, "unsupported colormap type\n"); avpriv_request_sample(avctx, "Unknown colormap type");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (maptype > RMT_RAW) { if (maptype > RMT_RAW) {

@ -355,7 +355,7 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
s->flags = FLAG_KEYFRAME; s->flags = FLAG_KEYFRAME;
if (s->flags & FLAG_SPRITE) { if (s->flags & FLAG_SPRITE) {
av_log_ask_for_sample(s->avctx, "SPRITE frame found.\n"); avpriv_request_sample(s->avctx, "Frame with sprite");
/* FIXME header.width, height, xoffset and yoffset aren't initialized */ /* FIXME header.width, height, xoffset and yoffset aren't initialized */
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} else { } else {
@ -365,7 +365,7 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
if ((s->w < 213) && (s->h >= 176)) if ((s->w < 213) && (s->h >= 176))
{ {
s->flags |= FLAG_INTERPOLATED; s->flags |= FLAG_INTERPOLATED;
av_log_ask_for_sample(s->avctx, "INTERPOLATION selected.\n"); avpriv_request_sample(s->avctx, "Interpolated frame");
} }
} }
} }

@ -63,7 +63,7 @@ static av_cold int truespeech_decode_init(AVCodecContext * avctx)
TSContext *c = avctx->priv_data; TSContext *c = avctx->priv_data;
if (avctx->channels != 1) { if (avctx->channels != 1) {
av_log_ask_for_sample(avctx, "Unsupported channel count: %d\n", avctx->channels); avpriv_request_sample(avctx, "Channel count %d", avctx->channels);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -372,7 +372,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
c->frame_pred = (c->frame_info >> 8) & 3; c->frame_pred = (c->frame_info >> 8) & 3;
if (c->frame_pred == PRED_GRADIENT) { if (c->frame_pred == PRED_GRADIENT) {
av_log_ask_for_sample(avctx, "Frame uses gradient prediction\n"); avpriv_request_sample(avctx, "Frame with gradient prediction");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -487,7 +487,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->flags = AV_RL32(avctx->extradata + 12); c->flags = AV_RL32(avctx->extradata + 12);
if (c->frame_info_size != 4) if (c->frame_info_size != 4)
av_log_ask_for_sample(avctx, "Frame info is not 4 bytes\n"); avpriv_request_sample(avctx, "Frame info not 4 bytes");
av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08X\n", c->flags); av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08X\n", c->flags);
c->slices = (c->flags >> 24) + 1; c->slices = (c->flags >> 24) + 1;
c->compression = c->flags & 1; c->compression = c->flags & 1;

@ -52,7 +52,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
if (avpkt->size > avctx->width * avctx->height * 8 / 3) { if (avpkt->size > avctx->width * avctx->height * 8 / 3) {
av_log_ask_for_sample(avctx, "Probably padded data\n"); avpriv_request_sample(avctx, "(Probably) padded data");
} }
if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)

@ -4863,7 +4863,7 @@ static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
for (sprite = 0; sprite <= v->two_sprites; sprite++) { for (sprite = 0; sprite <= v->two_sprites; sprite++) {
vc1_sprite_parse_transform(gb, sd->coefs[sprite]); vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
if (sd->coefs[sprite][1] || sd->coefs[sprite][3]) if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
av_log_ask_for_sample(avctx, "Rotation coefficients are not zero"); avpriv_request_sample(avctx, "Non-zero rotation coefficients");
av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:"); av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
for (i = 0; i < 7; i++) for (i = 0; i < 7; i++)
av_log(avctx, AV_LOG_DEBUG, " %d.%.3d", av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",

@ -141,8 +141,9 @@ static int parse_setup_header(AVCodecContext *avctx, VorbisParseContext *s,
* we may need to approach this the long way and parse the whole Setup * we may need to approach this the long way and parse the whole Setup
* header, but I hope very much that it never comes to that. */ * header, but I hope very much that it never comes to that. */
if (last_mode_count > 2) { if (last_mode_count > 2) {
av_log_ask_for_sample(avctx, "%d modes found. This is either a false " avpriv_request_sample(avctx,
"positive or a sample from an unknown encoder.\n", "%d modes (either a false positive or a "
"sample from an unknown encoder)",
last_mode_count); last_mode_count);
} }
/* We're limiting the mode count to 63 so that we know that the previous /* We're limiting the mode count to 63 so that we know that the previous

@ -202,7 +202,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
av_dlog(avctx, "\n"); av_dlog(avctx, "\n");
} else { } else {
av_log_ask_for_sample(avctx, "Unsupported extradata size\n"); avpriv_request_sample(avctx, "Unsupported extradata size");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -256,7 +256,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
s->num_channels); s->num_channels);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} else if (s->num_channels > WMALL_MAX_CHANNELS) { } else if (s->num_channels > WMALL_MAX_CHANNELS) {
av_log_ask_for_sample(avctx, "unsupported number of channels\n"); avpriv_request_sample(avctx,
"More than %d channels", WMALL_MAX_CHANNELS);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -925,8 +926,8 @@ static int decode_subframe(WmallDecodeCtx *s)
s->do_lpc = get_bits1(&s->gb); s->do_lpc = get_bits1(&s->gb);
if (s->do_lpc) { if (s->do_lpc) {
decode_lpc(s); decode_lpc(s);
av_log_ask_for_sample(s->avctx, "Inverse LPC filter not " avpriv_request_sample(s->avctx, "Expect wrong output since "
"implemented. Expect wrong output.\n"); "inverse LPC filter");
} }
} else } else
s->do_lpc = 0; s->do_lpc = 0;
@ -1135,7 +1136,7 @@ static void save_bits(WmallDecodeCtx *s, GetBitContext* gb, int len,
buflen = (s->num_saved_bits + len + 8) >> 3; buflen = (s->num_saved_bits + len + 8) >> 3;
if (len <= 0 || buflen > MAX_FRAMESIZE) { if (len <= 0 || buflen > MAX_FRAMESIZE) {
av_log_ask_for_sample(s->avctx, "input buffer too small\n"); avpriv_request_sample(s->avctx, "Too small input buffer");
s->packet_loss = 1; s->packet_loss = 1;
return; return;
} }

@ -299,7 +299,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
av_dlog(avctx, "\n"); av_dlog(avctx, "\n");
} else { } else {
av_log_ask_for_sample(avctx, "Unknown extradata size\n"); avpriv_request_sample(avctx, "Unknown extradata size");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -352,7 +352,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->channels); avctx->channels);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} else if (avctx->channels > WMAPRO_MAX_CHANNELS) { } else if (avctx->channels > WMAPRO_MAX_CHANNELS) {
av_log_ask_for_sample(avctx, "unsupported number of channels\n"); avpriv_request_sample(avctx,
"More than %d channels", WMAPRO_MAX_CHANNELS);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -686,8 +687,8 @@ static int decode_channel_transform(WMAProDecodeCtx* s)
int remaining_channels = s->channels_for_cur_subframe; int remaining_channels = s->channels_for_cur_subframe;
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
av_log_ask_for_sample(s->avctx, avpriv_request_sample(s->avctx,
"unsupported channel transform bit\n"); "Channel transform bit");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -723,8 +724,8 @@ static int decode_channel_transform(WMAProDecodeCtx* s)
if (chgroup->num_channels == 2) { if (chgroup->num_channels == 2) {
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
av_log_ask_for_sample(s->avctx, avpriv_request_sample(s->avctx,
"unsupported channel transform type\n"); "Unknown channel transform type");
} }
} else { } else {
chgroup->transform = 1; chgroup->transform = 1;
@ -749,8 +750,8 @@ static int decode_channel_transform(WMAProDecodeCtx* s)
} else { } else {
/** FIXME: more than 6 coupled channels not supported */ /** FIXME: more than 6 coupled channels not supported */
if (chgroup->num_channels > 6) { if (chgroup->num_channels > 6) {
av_log_ask_for_sample(s->avctx, avpriv_request_sample(s->avctx,
"coupled channels > 6\n"); "Coupled channels > 6");
} else { } else {
memcpy(chgroup->decorrelation_matrix, memcpy(chgroup->decorrelation_matrix,
default_decorrelation[chgroup->num_channels], default_decorrelation[chgroup->num_channels],
@ -1157,7 +1158,7 @@ static int decode_subframe(WMAProDecodeCtx *s)
/** no idea for what the following bit is used */ /** no idea for what the following bit is used */
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
av_log_ask_for_sample(s->avctx, "reserved bit set\n"); avpriv_request_sample(s->avctx, "Reserved bit");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -1463,7 +1464,7 @@ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len,
buflen = (put_bits_count(&s->pb) + len + 8) >> 3; buflen = (put_bits_count(&s->pb) + len + 8) >> 3;
if (len <= 0 || buflen > MAX_FRAMESIZE) { if (len <= 0 || buflen > MAX_FRAMESIZE) {
av_log_ask_for_sample(s->avctx, "input buffer too small\n"); avpriv_request_sample(s->avctx, "Too small input buffer");
s->packet_loss = 1; s->packet_loss = 1;
return; return;
} }

@ -96,12 +96,14 @@ static int decode_frame(AVCodecContext *avctx,
else { else {
l->shift = 8 - (buf[2] >> 4); l->shift = 8 - (buf[2] >> 4);
if (l->shift > 4) { if (l->shift > 4) {
av_log_ask_for_sample(avctx, "Unknown WNV1 frame header value %i\n", avpriv_request_sample(avctx,
"Unknown WNV1 frame header value %i",
buf[2] >> 4); buf[2] >> 4);
l->shift = 4; l->shift = 4;
} }
if (l->shift < 1) { if (l->shift < 1) {
av_log_ask_for_sample(avctx, "Unknown WNV1 frame header value %i\n", avpriv_request_sample(avctx,
"Unknown WNV1 frame header value %i",
buf[2] >> 4); buf[2] >> 4);
l->shift = 1; l->shift = 1;
} }

@ -92,7 +92,7 @@ static int xwd_decode_frame(AVCodecContext *avctx, void *data,
} }
if (xoffset) { if (xoffset) {
av_log_ask_for_sample(avctx, "unsupported xoffset %d\n", xoffset); avpriv_request_sample(avctx, "xoffset %d", xoffset);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -195,7 +195,9 @@ static int xwd_decode_frame(AVCodecContext *avctx, void *data,
} }
if (avctx->pix_fmt == AV_PIX_FMT_NONE) { if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
av_log_ask_for_sample(avctx, "unknown file: bpp %d, pixdepth %d, vclass %d\n", bpp, pixdepth, vclass); avpriv_request_sample(avctx,
"Unknown file: bpp %d, pixdepth %d, vclass %d",
bpp, pixdepth, vclass);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -429,18 +429,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
"Flags=%X ver=%i.%i comp=%i fmt=%i blk=%ix%i\n", "Flags=%X ver=%i.%i comp=%i fmt=%i blk=%ix%i\n",
c->flags,hi_ver,lo_ver,c->comp,c->fmt,c->bw,c->bh); c->flags,hi_ver,lo_ver,c->comp,c->fmt,c->bw,c->bh);
if (hi_ver != 0 || lo_ver != 1) { if (hi_ver != 0 || lo_ver != 1) {
av_log_ask_for_sample(avctx, "Unsupported version %i.%i\n", avpriv_request_sample(avctx, "Version %i.%i", hi_ver, lo_ver);
hi_ver, lo_ver);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (c->bw == 0 || c->bh == 0) { if (c->bw == 0 || c->bh == 0) {
av_log_ask_for_sample(avctx, "Unsupported block size %ix%i\n", avpriv_request_sample(avctx, "Block size %ix%i", c->bw, c->bh);
c->bw, c->bh);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (c->comp != 0 && c->comp != 1) { if (c->comp != 0 && c->comp != 1) {
av_log_ask_for_sample(avctx, "Unsupported compression type %i\n", avpriv_request_sample(avctx, "Compression type %i", c->comp);
c->comp);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -481,8 +478,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
break; break;
default: default:
c->decode_xor = NULL; c->decode_xor = NULL;
av_log_ask_for_sample(avctx, "Unsupported (for now) format %i\n", avpriv_request_sample(avctx, "Format %i", c->fmt);
c->fmt);
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

@ -43,7 +43,7 @@ static int rso_read_header(AVFormatContext *s)
codec = ff_codec_get_id(ff_codec_rso_tags, id); codec = ff_codec_get_id(ff_codec_rso_tags, id);
if (codec == AV_CODEC_ID_ADPCM_IMA_WAV) { if (codec == AV_CODEC_ID_ADPCM_IMA_WAV) {
av_log(s, AV_LOG_ERROR, "ADPCM in RSO not implemented\n"); avpriv_report_missing_feature(s, "ADPCM in RSO");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }

Loading…
Cancel
Save