avformat: Avoid allocation for AVStreamInternal

Do this by allocating AVStream together with the data that is
currently in AVStreamInternal; or rather: Put AVStream at the
beginning of a new structure called FFStream (which encompasses
more than just the internal fields and is a proper context in its own
right, hence the name) and remove AVStreamInternal altogether.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
pull/368/head
Andreas Rheinhardt 3 years ago
parent 9f05b3ba60
commit 40bdd8cc05
  1. 2
      libavformat/aacdec.c
  2. 8
      libavformat/aadec.c
  3. 2
      libavformat/acm.c
  4. 2
      libavformat/ape.c
  5. 19
      libavformat/asfdec_f.c
  6. 10
      libavformat/asfdec_o.c
  7. 6
      libavformat/av1dec.c
  8. 8
      libavformat/avformat.h
  9. 138
      libavformat/avidec.c
  10. 18
      libavformat/bink.c
  11. 24
      libavformat/cafdec.c
  12. 5
      libavformat/cinedec.c
  13. 6
      libavformat/concatdec.c
  14. 8
      libavformat/dashenc.c
  15. 5
      libavformat/dhav.c
  16. 2
      libavformat/dtshddec.c
  17. 15
      libavformat/dump.c
  18. 2
      libavformat/electronicarts.c
  19. 2
      libavformat/fifo.c
  20. 19
      libavformat/flacdec.c
  21. 7
      libavformat/flic.c
  22. 26
      libavformat/flvdec.c
  23. 15
      libavformat/gxf.c
  24. 7
      libavformat/hdsenc.c
  25. 2
      libavformat/hls.c
  26. 17
      libavformat/ifv.c
  27. 6
      libavformat/img2dec.c
  28. 19
      libavformat/internal.h
  29. 2
      libavformat/ipudec.c
  30. 2
      libavformat/iv8.c
  31. 2
      libavformat/ivfdec.c
  32. 29
      libavformat/jvdec.c
  33. 4
      libavformat/lmlm4.c
  34. 2
      libavformat/loasdec.c
  35. 2
      libavformat/lxfdec.c
  36. 73
      libavformat/matroskadec.c
  37. 2
      libavformat/mgsts.c
  38. 29
      libavformat/mlvdec.c
  39. 265
      libavformat/mov.c
  40. 20
      libavformat/mp3dec.c
  41. 5
      libavformat/mpc.c
  42. 5
      libavformat/mpc8.c
  43. 6
      libavformat/mpeg.c
  44. 75
      libavformat/mpegts.c
  45. 2
      libavformat/msf.c
  46. 2
      libavformat/mtv.c
  47. 34
      libavformat/mux.c
  48. 5
      libavformat/mvdec.c
  49. 14
      libavformat/mxfdec.c
  50. 10
      libavformat/mxfenc.c
  51. 2
      libavformat/ncdec.c
  52. 9
      libavformat/nsvdec.c
  53. 9
      libavformat/nutdec.c
  54. 13
      libavformat/nutenc.c
  55. 2
      libavformat/nuv.c
  56. 2
      libavformat/oggparseflac.c
  57. 7
      libavformat/oggparseogm.c
  58. 2
      libavformat/oggparsetheora.c
  59. 2
      libavformat/oggparsevp8.c
  60. 2
      libavformat/omadec.c
  61. 2
      libavformat/pcm.c
  62. 4
      libavformat/pva.c
  63. 8
      libavformat/rawdec.c
  64. 4
      libavformat/rdt.c
  65. 9
      libavformat/rl2.c
  66. 9
      libavformat/rmdec.c
  67. 6
      libavformat/rpl.c
  68. 4
      libavformat/rtpdec_asf.c
  69. 2
      libavformat/rtsp.c
  70. 2
      libavformat/s337m.c
  71. 10
      libavformat/sbgdec.c
  72. 2
      libavformat/sdr2.c
  73. 4
      libavformat/segafilm.c
  74. 8
      libavformat/segment.c
  75. 7
      libavformat/smoothstreamingenc.c
  76. 4
      libavformat/swfdec.c
  77. 2
      libavformat/takdec.c
  78. 6
      libavformat/tedcaptionsdec.c
  79. 9
      libavformat/tta.c
  80. 4
      libavformat/ty.c
  81. 172
      libavformat/utils.c
  82. 10
      libavformat/vocdec.c
  83. 2
      libavformat/vqf.c
  84. 15
      libavformat/wavdec.c
  85. 2
      libavformat/wtvdec.c
  86. 2
      libavformat/xvag.c
  87. 2
      libavformat/xwma.c

@ -112,7 +112,7 @@ static int adts_aac_read_header(AVFormatContext *s)
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = s->iformat->raw_codec_id; st->codecpar->codec_id = s->iformat->raw_codec_id;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
ff_id3v1_read(s); ff_id3v1_read(s);
if ((s->pb->seekable & AVIO_SEEKABLE_NORMAL) && if ((s->pb->seekable & AVIO_SEEKABLE_NORMAL) &&

@ -86,6 +86,7 @@ static int aa_read_header(AVFormatContext *s)
AADemuxContext *c = s->priv_data; AADemuxContext *c = s->priv_data;
AVIOContext *pb = s->pb; AVIOContext *pb = s->pb;
AVStream *st; AVStream *st;
FFStream *sti;
int ret; int ret;
/* parse .aa header */ /* parse .aa header */
@ -178,11 +179,12 @@ static int aa_read_header(AVFormatContext *s)
st = avformat_new_stream(s, NULL); st = avformat_new_stream(s, NULL);
if (!st) if (!st)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
sti = ffstream(st);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
if (!strcmp(codec_name, "mp332")) { if (!strcmp(codec_name, "mp332")) {
st->codecpar->codec_id = AV_CODEC_ID_MP3; st->codecpar->codec_id = AV_CODEC_ID_MP3;
st->codecpar->sample_rate = 22050; st->codecpar->sample_rate = 22050;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; sti->need_parsing = AVSTREAM_PARSE_FULL_RAW;
avpriv_set_pts_info(st, 64, 8, 32000 * TIMEPREC); avpriv_set_pts_info(st, 64, 8, 32000 * TIMEPREC);
// encoded audio frame is MP3_FRAME_SIZE bytes (+1 with padding, unlikely) // encoded audio frame is MP3_FRAME_SIZE bytes (+1 with padding, unlikely)
} else if (!strcmp(codec_name, "acelp85")) { } else if (!strcmp(codec_name, "acelp85")) {
@ -191,7 +193,7 @@ static int aa_read_header(AVFormatContext *s)
st->codecpar->channels = 1; st->codecpar->channels = 1;
st->codecpar->sample_rate = 8500; st->codecpar->sample_rate = 8500;
st->codecpar->bit_rate = 8500; st->codecpar->bit_rate = 8500;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; sti->need_parsing = AVSTREAM_PARSE_FULL_RAW;
avpriv_set_pts_info(st, 64, 8, 8500 * TIMEPREC); avpriv_set_pts_info(st, 64, 8, 8500 * TIMEPREC);
} else if (!strcmp(codec_name, "acelp16")) { } else if (!strcmp(codec_name, "acelp16")) {
st->codecpar->codec_id = AV_CODEC_ID_SIPR; st->codecpar->codec_id = AV_CODEC_ID_SIPR;
@ -199,7 +201,7 @@ static int aa_read_header(AVFormatContext *s)
st->codecpar->channels = 1; st->codecpar->channels = 1;
st->codecpar->sample_rate = 16000; st->codecpar->sample_rate = 16000;
st->codecpar->bit_rate = 16000; st->codecpar->bit_rate = 16000;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; sti->need_parsing = AVSTREAM_PARSE_FULL_RAW;
avpriv_set_pts_info(st, 64, 8, 16000 * TIMEPREC); avpriv_set_pts_info(st, 64, 8, 16000 * TIMEPREC);
} }

@ -54,7 +54,7 @@ static int acm_read_header(AVFormatContext *s)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
st->start_time = 0; st->start_time = 0;
st->duration = AV_RL32(st->codecpar->extradata + 4) / st->codecpar->channels; st->duration = AV_RL32(st->codecpar->extradata + 4) / st->codecpar->channels;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate);
return 0; return 0;

@ -434,7 +434,7 @@ static int ape_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
if (index < 0) if (index < 0)
return -1; return -1;
if ((ret = avio_seek(s->pb, st->internal->index_entries[index].pos, SEEK_SET)) < 0) if ((ret = avio_seek(s->pb, ffstream(st)->index_entries[index].pos, SEEK_SET)) < 0)
return ret; return ret;
ape->currentframe = index; ape->currentframe = index;
return 0; return 0;

@ -299,6 +299,7 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
ASFContext *asf = s->priv_data; ASFContext *asf = s->priv_data;
AVIOContext *pb = s->pb; AVIOContext *pb = s->pb;
AVStream *st; AVStream *st;
FFStream *sti;
ASFStream *asf_st; ASFStream *asf_st;
ff_asf_guid g; ff_asf_guid g;
enum AVMediaType type; enum AVMediaType type;
@ -317,6 +318,7 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
st = avformat_new_stream(s, NULL); st = avformat_new_stream(s, NULL);
if (!st) if (!st)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
sti = ffstream(st);
avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */ avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
start_time = asf->hdr.preroll; start_time = asf->hdr.preroll;
@ -378,13 +380,13 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
if (is_dvr_ms_audio) { if (is_dvr_ms_audio) {
// codec_id and codec_tag are unreliable in dvr_ms // codec_id and codec_tag are unreliable in dvr_ms
// files. Set them later by probing stream. // files. Set them later by probing stream.
st->internal->request_probe = 1; sti->request_probe = 1;
st->codecpar->codec_tag = 0; st->codecpar->codec_tag = 0;
} }
if (st->codecpar->codec_id == AV_CODEC_ID_AAC) if (st->codecpar->codec_id == AV_CODEC_ID_AAC)
st->internal->need_parsing = AVSTREAM_PARSE_NONE; sti->need_parsing = AVSTREAM_PARSE_NONE;
else else
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
/* We have to init the frame size at some point .... */ /* We have to init the frame size at some point .... */
pos2 = avio_tell(pb); pos2 = avio_tell(pb);
if (size >= (pos2 + 8 - pos1 + 24)) { if (size >= (pos2 + 8 - pos1 + 24)) {
@ -443,7 +445,7 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
st->codecpar->codec_tag = tag1; st->codecpar->codec_tag = tag1;
st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1); st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1);
if (tag1 == MKTAG('D', 'V', 'R', ' ')) { if (tag1 == MKTAG('D', 'V', 'R', ' ')) {
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
/* issue658 contains wrong w/h and MS even puts a fake seq header /* issue658 contains wrong w/h and MS even puts a fake seq header
* with wrong w/h in extradata while a correct one is in the stream. * with wrong w/h in extradata while a correct one is in the stream.
* maximum lameness */ * maximum lameness */
@ -453,9 +455,9 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size)
st->codecpar->extradata_size = 0; st->codecpar->extradata_size = 0;
} }
if (st->codecpar->codec_id == AV_CODEC_ID_H264) if (st->codecpar->codec_id == AV_CODEC_ID_H264)
st->internal->need_parsing = AVSTREAM_PARSE_FULL_ONCE; sti->need_parsing = AVSTREAM_PARSE_FULL_ONCE;
if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4) if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4)
st->internal->need_parsing = AVSTREAM_PARSE_FULL_ONCE; sti->need_parsing = AVSTREAM_PARSE_FULL_ONCE;
} }
pos2 = avio_tell(pb); pos2 = avio_tell(pb);
avio_skip(pb, size - (pos2 - pos1 + 24)); avio_skip(pb, size - (pos2 - pos1 + 24));
@ -1557,6 +1559,7 @@ static int asf_read_seek(AVFormatContext *s, int stream_index,
{ {
ASFContext *asf = s->priv_data; ASFContext *asf = s->priv_data;
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
FFStream *const sti = ffstream(st);
int ret = 0; int ret = 0;
if (s->packet_size <= 0) if (s->packet_size <= 0)
@ -1584,11 +1587,11 @@ static int asf_read_seek(AVFormatContext *s, int stream_index,
asf->index_read = -1; asf->index_read = -1;
} }
if (asf->index_read > 0 && st->internal->index_entries) { if (asf->index_read > 0 && sti->index_entries) {
int index = av_index_search_timestamp(st, pts, flags); int index = av_index_search_timestamp(st, pts, flags);
if (index >= 0) { if (index >= 0) {
/* find the position */ /* find the position */
uint64_t pos = st->internal->index_entries[index].pos; uint64_t pos = sti->index_entries[index].pos;
/* do the seek */ /* do the seek */
av_log(s, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos); av_log(s, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos);

@ -1513,13 +1513,15 @@ static int asf_read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags) int64_t timestamp, int flags)
{ {
ASFContext *asf = s->priv_data; ASFContext *asf = s->priv_data;
AVStream *const st = s->streams[stream_index];
FFStream *const sti = ffstream(st);
int idx, ret; int idx, ret;
if (s->streams[stream_index]->internal->nb_index_entries && asf->is_simple_index) { if (sti->nb_index_entries && asf->is_simple_index) {
idx = av_index_search_timestamp(s->streams[stream_index], timestamp, flags); idx = av_index_search_timestamp(st, timestamp, flags);
if (idx < 0 || idx >= s->streams[stream_index]->internal->nb_index_entries) if (idx < 0 || idx >= sti->nb_index_entries)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
avio_seek(s->pb, s->streams[stream_index]->internal->index_entries[idx].pos, SEEK_SET); avio_seek(s->pb, sti->index_entries[idx].pos, SEEK_SET);
} else { } else {
if ((ret = ff_seek_frame_binary(s, stream_index, timestamp, flags)) < 0) if ((ret = ff_seek_frame_binary(s, stream_index, timestamp, flags)) < 0)
return ret; return ret;

@ -61,6 +61,7 @@ static int av1_read_header(AVFormatContext *s)
AV1DemuxContext *const c = s->priv_data; AV1DemuxContext *const c = s->priv_data;
const AVBitStreamFilter *filter = av_bsf_get_by_name("av1_frame_merge"); const AVBitStreamFilter *filter = av_bsf_get_by_name("av1_frame_merge");
AVStream *st; AVStream *st;
FFStream *sti;
int ret; int ret;
if (!filter) { if (!filter) {
@ -72,12 +73,13 @@ static int av1_read_header(AVFormatContext *s)
st = avformat_new_stream(s, NULL); st = avformat_new_stream(s, NULL);
if (!st) if (!st)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
sti = ffstream(st);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_AV1; st->codecpar->codec_id = AV_CODEC_ID_AV1;
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; sti->need_parsing = AVSTREAM_PARSE_HEADERS;
st->internal->avctx->framerate = c->framerate; sti->avctx->framerate = c->framerate;
// taken from rawvideo demuxers // taken from rawvideo demuxers
avpriv_set_pts_info(st, 64, 1, 1200000); avpriv_set_pts_info(st, 64, 1, 1200000);

@ -821,8 +821,6 @@ typedef struct AVIndexEntry {
*/ */
#define AV_DISPOSITION_TIMED_THUMBNAILS 0x0800 #define AV_DISPOSITION_TIMED_THUMBNAILS 0x0800
typedef struct AVStreamInternal AVStreamInternal;
/** /**
* To specify text track kind (different from subtitles default). * To specify text track kind (different from subtitles default).
*/ */
@ -1003,12 +1001,6 @@ typedef struct AVStream {
* *
*/ */
int pts_wrap_bits; int pts_wrap_bits;
/**
* An opaque field for libavformat internal usage.
* Must not be accessed in any way by callers.
*/
AVStreamInternal *internal;
} AVStream; } AVStream;
struct AVCodecParserContext *av_stream_get_parser(const AVStream *s); struct AVCodecParserContext *av_stream_get_parser(const AVStream *s);

@ -279,8 +279,9 @@ static void clean_index(AVFormatContext *s)
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
FFStream *const sti = ffstream(st);
AVIStream *ast = st->priv_data; AVIStream *ast = st->priv_data;
int n = st->internal->nb_index_entries; int n = sti->nb_index_entries;
int max = ast->sample_size; int max = ast->sample_size;
int64_t pos, size, ts; int64_t pos, size, ts;
@ -290,9 +291,9 @@ static void clean_index(AVFormatContext *s)
while (max < 1024) while (max < 1024)
max += max; max += max;
pos = st->internal->index_entries[0].pos; pos = sti->index_entries[0].pos;
size = st->internal->index_entries[0].size; size = sti->index_entries[0].size;
ts = st->internal->index_entries[0].timestamp; ts = sti->index_entries[0].timestamp;
for (j = 0; j < size; j += max) for (j = 0; j < size; j += max)
av_add_index_entry(st, pos + j, ts + j, FFMIN(max, size - j), 0, av_add_index_entry(st, pos + j, ts + j, FFMIN(max, size - j), 0,
@ -439,14 +440,14 @@ static int calculate_bitrate(AVFormatContext *s)
for (i = 0; i<s->nb_streams; i++) { for (i = 0; i<s->nb_streams; i++) {
int64_t len = 0; int64_t len = 0;
AVStream *st = s->streams[i]; FFStream *const sti = ffstream(s->streams[i]);
if (!st->internal->nb_index_entries) if (!sti->nb_index_entries)
continue; continue;
for (j = 0; j < st->internal->nb_index_entries; j++) for (j = 0; j < sti->nb_index_entries; j++)
len += st->internal->index_entries[j].size; len += sti->index_entries[j].size;
maxpos = FFMAX(maxpos, st->internal->index_entries[j-1].pos); maxpos = FFMAX(maxpos, sti->index_entries[j-1].pos);
lensum += len; lensum += len;
} }
if (maxpos < av_rescale(avi->io_fsize, 9, 10)) // index does not cover the whole file if (maxpos < av_rescale(avi->io_fsize, 9, 10)) // index does not cover the whole file
@ -457,15 +458,16 @@ static int calculate_bitrate(AVFormatContext *s)
for (i = 0; i<s->nb_streams; i++) { for (i = 0; i<s->nb_streams; i++) {
int64_t len = 0; int64_t len = 0;
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
FFStream *const sti = ffstream(st);
int64_t duration; int64_t duration;
int64_t bitrate; int64_t bitrate;
for (j = 0; j < st->internal->nb_index_entries; j++) for (j = 0; j < sti->nb_index_entries; j++)
len += st->internal->index_entries[j].size; len += sti->index_entries[j].size;
if (st->internal->nb_index_entries < 2 || st->codecpar->bit_rate > 0) if (sti->nb_index_entries < 2 || st->codecpar->bit_rate > 0)
continue; continue;
duration = st->internal->index_entries[j-1].timestamp - st->internal->index_entries[0].timestamp; duration = sti->index_entries[j-1].timestamp - sti->index_entries[0].timestamp;
bitrate = av_rescale(8*len, st->time_base.den, duration * st->time_base.num); bitrate = av_rescale(8*len, st->time_base.den, duration * st->time_base.num);
if (bitrate > 0) { if (bitrate > 0) {
st->codecpar->bit_rate = bitrate; st->codecpar->bit_rate = bitrate;
@ -736,10 +738,12 @@ static int avi_read_header(AVFormatContext *s)
avio_skip(pb, size); avio_skip(pb, size);
} else { } else {
uint64_t cur_pos = avio_tell(pb); uint64_t cur_pos = avio_tell(pb);
FFStream *sti;
unsigned esize; unsigned esize;
if (cur_pos < list_end) if (cur_pos < list_end)
size = FFMIN(size, list_end - cur_pos); size = FFMIN(size, list_end - cur_pos);
st = s->streams[stream_index]; st = s->streams[stream_index];
sti = ffstream(st);
if (st->codecpar->codec_type != AVMEDIA_TYPE_UNKNOWN) { if (st->codecpar->codec_type != AVMEDIA_TYPE_UNKNOWN) {
avio_skip(pb, size); avio_skip(pb, size);
break; break;
@ -823,19 +827,19 @@ static int avi_read_header(AVFormatContext *s)
/* This is needed to get the pict type which is necessary /* This is needed to get the pict type which is necessary
* for generating correct pts. */ * for generating correct pts. */
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; sti->need_parsing = AVSTREAM_PARSE_HEADERS;
if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4 && if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4 &&
ast->handler == MKTAG('X', 'V', 'I', 'D')) ast->handler == MKTAG('X', 'V', 'I', 'D'))
st->codecpar->codec_tag = MKTAG('X', 'V', 'I', 'D'); st->codecpar->codec_tag = MKTAG('X', 'V', 'I', 'D');
if (st->codecpar->codec_tag == MKTAG('V', 'S', 'S', 'H')) if (st->codecpar->codec_tag == MKTAG('V', 'S', 'S', 'H'))
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
if (st->codecpar->codec_id == AV_CODEC_ID_RV40) if (st->codecpar->codec_id == AV_CODEC_ID_RV40)
st->internal->need_parsing = AVSTREAM_PARSE_NONE; sti->need_parsing = AVSTREAM_PARSE_NONE;
if (st->codecpar->codec_id == AV_CODEC_ID_HEVC && if (st->codecpar->codec_id == AV_CODEC_ID_HEVC &&
st->codecpar->codec_tag == MKTAG('H', '2', '6', '5')) st->codecpar->codec_tag == MKTAG('H', '2', '6', '5'))
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
if (st->codecpar->codec_id == AV_CODEC_ID_AVRN && if (st->codecpar->codec_id == AV_CODEC_ID_AVRN &&
st->codecpar->codec_tag == MKTAG('A', 'V', 'R', 'n') && st->codecpar->codec_tag == MKTAG('A', 'V', 'R', 'n') &&
@ -879,16 +883,16 @@ static int avi_read_header(AVFormatContext *s)
avio_skip(pb, 1); avio_skip(pb, 1);
/* Force parsing as several audio frames can be in /* Force parsing as several audio frames can be in
* one packet and timestamps refer to packet start. */ * one packet and timestamps refer to packet start. */
st->internal->need_parsing = AVSTREAM_PARSE_TIMESTAMPS; sti->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
/* ADTS header is in extradata, AAC without header must be /* ADTS header is in extradata, AAC without header must be
* stored as exact frames. Parser not needed and it will * stored as exact frames. Parser not needed and it will
* fail. */ * fail. */
if (st->codecpar->codec_id == AV_CODEC_ID_AAC && if (st->codecpar->codec_id == AV_CODEC_ID_AAC &&
st->codecpar->extradata_size) st->codecpar->extradata_size)
st->internal->need_parsing = AVSTREAM_PARSE_NONE; sti->need_parsing = AVSTREAM_PARSE_NONE;
// The flac parser does not work with AVSTREAM_PARSE_TIMESTAMPS // The flac parser does not work with AVSTREAM_PARSE_TIMESTAMPS
if (st->codecpar->codec_id == AV_CODEC_ID_FLAC) if (st->codecpar->codec_id == AV_CODEC_ID_FLAC)
st->internal->need_parsing = AVSTREAM_PARSE_NONE; sti->need_parsing = AVSTREAM_PARSE_NONE;
/* AVI files with Xan DPCM audio (wrongly) declare PCM /* AVI files with Xan DPCM audio (wrongly) declare PCM
* audio in the header but have Axan as stream_code_tag. */ * audio in the header but have Axan as stream_code_tag. */
if (ast->handler == AV_RL32("Axan")) { if (ast->handler == AV_RL32("Axan")) {
@ -915,7 +919,7 @@ static int avi_read_header(AVFormatContext *s)
break; break;
case AVMEDIA_TYPE_SUBTITLE: case AVMEDIA_TYPE_SUBTITLE:
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE; st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->internal->request_probe= 1; sti->request_probe = 1;
avio_skip(pb, size); avio_skip(pb, size);
break; break;
default: default:
@ -1050,12 +1054,12 @@ end_of_header:
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
if ( st->codecpar->codec_id == AV_CODEC_ID_MPEG1VIDEO if ( st->codecpar->codec_id == AV_CODEC_ID_MPEG1VIDEO
|| st->codecpar->codec_id == AV_CODEC_ID_MPEG2VIDEO) || st->codecpar->codec_id == AV_CODEC_ID_MPEG2VIDEO)
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
} }
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
if (st->internal->nb_index_entries) if (ffstream(st)->nb_index_entries)
break; break;
} }
// DV-in-AVI cannot be non-interleaved, if set this must be // DV-in-AVI cannot be non-interleaved, if set this must be
@ -1341,9 +1345,10 @@ start_sync:
ast->remaining = size; ast->remaining = size;
if (size) { if (size) {
FFStream *const sti = ffstream(st);
uint64_t pos = avio_tell(pb) - 8; uint64_t pos = avio_tell(pb) - 8;
if (!st->internal->index_entries || !st->internal->nb_index_entries || if (!sti->index_entries || !sti->nb_index_entries ||
st->internal->index_entries[st->internal->nb_index_entries - 1].pos < pos) { sti->index_entries[sti->nb_index_entries - 1].pos < pos) {
av_add_index_entry(st, pos, ast->frame_offset, size, av_add_index_entry(st, pos, ast->frame_offset, size,
0, AVINDEX_KEYFRAME); 0, AVINDEX_KEYFRAME);
} }
@ -1363,20 +1368,22 @@ static int ni_prepare_read(AVFormatContext *s)
AVIContext *avi = s->priv_data; AVIContext *avi = s->priv_data;
int best_stream_index = 0; int best_stream_index = 0;
AVStream *best_st = NULL; AVStream *best_st = NULL;
FFStream *best_sti;
AVIStream *best_ast; AVIStream *best_ast;
int64_t best_ts = INT64_MAX; int64_t best_ts = INT64_MAX;
int i; int i;
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
FFStream *const sti = ffstream(st);
AVIStream *ast = st->priv_data; AVIStream *ast = st->priv_data;
int64_t ts = ast->frame_offset; int64_t ts = ast->frame_offset;
int64_t last_ts; int64_t last_ts;
if (!st->internal->nb_index_entries) if (!sti->nb_index_entries)
continue; continue;
last_ts = st->internal->index_entries[st->internal->nb_index_entries - 1].timestamp; last_ts = sti->index_entries[sti->nb_index_entries - 1].timestamp;
if (!ast->remaining && ts > last_ts) if (!ast->remaining && ts > last_ts)
continue; continue;
@ -1395,6 +1402,7 @@ static int ni_prepare_read(AVFormatContext *s)
if (!best_st) if (!best_st)
return AVERROR_EOF; return AVERROR_EOF;
best_sti = ffstream(best_st);
best_ast = best_st->priv_data; best_ast = best_st->priv_data;
best_ts = best_ast->frame_offset; best_ts = best_ast->frame_offset;
if (best_ast->remaining) { if (best_ast->remaining) {
@ -1405,11 +1413,11 @@ static int ni_prepare_read(AVFormatContext *s)
} else { } else {
i = av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY); i = av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY);
if (i >= 0) if (i >= 0)
best_ast->frame_offset = best_st->internal->index_entries[i].timestamp; best_ast->frame_offset = best_sti->index_entries[i].timestamp;
} }
if (i >= 0) { if (i >= 0) {
int64_t pos = best_st->internal->index_entries[i].pos; int64_t pos = best_sti->index_entries[i].pos;
pos += best_ast->packet_size - best_ast->remaining; pos += best_ast->packet_size - best_ast->remaining;
if (avio_seek(s->pb, pos + 8, SEEK_SET) < 0) if (avio_seek(s->pb, pos + 8, SEEK_SET) < 0)
return AVERROR_EOF; return AVERROR_EOF;
@ -1419,7 +1427,7 @@ static int ni_prepare_read(AVFormatContext *s)
avi->stream_index = best_stream_index; avi->stream_index = best_stream_index;
if (!best_ast->remaining) if (!best_ast->remaining)
best_ast->packet_size = best_ast->packet_size =
best_ast->remaining = best_st->internal->index_entries[i].size; best_ast->remaining = best_sti->index_entries[i].size;
} }
else else
return AVERROR_EOF; return AVERROR_EOF;
@ -1450,6 +1458,7 @@ static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
resync: resync:
if (avi->stream_index >= 0) { if (avi->stream_index >= 0) {
AVStream *st = s->streams[avi->stream_index]; AVStream *st = s->streams[avi->stream_index];
FFStream *const sti = ffstream(st);
AVIStream *ast = st->priv_data; AVIStream *ast = st->priv_data;
int dv_demux = CONFIG_DV_DEMUXER && avi->dv_demux; int dv_demux = CONFIG_DV_DEMUXER && avi->dv_demux;
int size, err; int size, err;
@ -1508,15 +1517,15 @@ resync:
pkt->dts /= ast->sample_size; pkt->dts /= ast->sample_size;
pkt->stream_index = avi->stream_index; pkt->stream_index = avi->stream_index;
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && st->internal->index_entries) { if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && sti->index_entries) {
AVIndexEntry *e; AVIndexEntry *e;
int index; int index;
index = av_index_search_timestamp(st, ast->frame_offset, AVSEEK_FLAG_ANY); index = av_index_search_timestamp(st, ast->frame_offset, AVSEEK_FLAG_ANY);
e = &st->internal->index_entries[index]; e = &sti->index_entries[index];
if (index >= 0 && e->timestamp == ast->frame_offset) { if (index >= 0 && e->timestamp == ast->frame_offset) {
if (index == st->internal->nb_index_entries-1) { if (index == sti->nb_index_entries-1) {
int key=1; int key=1;
uint32_t state=-1; uint32_t state=-1;
if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4) { if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4) {
@ -1552,7 +1561,7 @@ resync:
} }
ast->seek_pos= 0; ast->seek_pos= 0;
if (!avi->non_interleaved && st->internal->nb_index_entries>1 && avi->index_loaded>1) { if (!avi->non_interleaved && sti->nb_index_entries > 1 && avi->index_loaded > 1) {
int64_t dts= av_rescale_q(pkt->dts, st->time_base, AV_TIME_BASE_Q); int64_t dts= av_rescale_q(pkt->dts, st->time_base, AV_TIME_BASE_Q);
if (avi->dts_max < dts) { if (avi->dts_max < dts) {
@ -1651,9 +1660,9 @@ static int avi_read_idx1(AVFormatContext *s, int size)
} }
if (!anykey) { if (!anykey) {
for (index = 0; index < s->nb_streams; index++) { for (index = 0; index < s->nb_streams; index++) {
st = s->streams[index]; FFStream *const sti = ffstream(s->streams[index]);
if (st->internal->nb_index_entries) if (sti->nb_index_entries)
st->internal->index_entries[0].flags |= AVINDEX_KEYFRAME; sti->index_entries[0].flags |= AVINDEX_KEYFRAME;
} }
} }
return 0; return 0;
@ -1678,25 +1687,27 @@ static int check_stream_max_drift(AVFormatContext *s)
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data; AVIStream *ast = st->priv_data;
int n = st->internal->nb_index_entries; FFStream *const sti = ffstream(st);
while (idx[i] < n && st->internal->index_entries[idx[i]].pos < pos) int n = sti->nb_index_entries;
while (idx[i] < n && sti->index_entries[idx[i]].pos < pos)
idx[i]++; idx[i]++;
if (idx[i] < n) { if (idx[i] < n) {
int64_t dts; int64_t dts;
dts = av_rescale_q(st->internal->index_entries[idx[i]].timestamp / dts = av_rescale_q(sti->index_entries[idx[i]].timestamp /
FFMAX(ast->sample_size, 1), FFMAX(ast->sample_size, 1),
st->time_base, AV_TIME_BASE_Q); st->time_base, AV_TIME_BASE_Q);
min_dts = FFMIN(min_dts, dts); min_dts = FFMIN(min_dts, dts);
min_pos = FFMIN(min_pos, st->internal->index_entries[idx[i]].pos); min_pos = FFMIN(min_pos, sti->index_entries[idx[i]].pos);
} }
} }
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
FFStream *const sti = ffstream(st);
AVIStream *ast = st->priv_data; AVIStream *ast = st->priv_data;
if (idx[i] && min_dts != INT64_MAX / 2) { if (idx[i] && min_dts != INT64_MAX / 2) {
int64_t dts, delta_dts; int64_t dts, delta_dts;
dts = av_rescale_q(st->internal->index_entries[idx[i] - 1].timestamp / dts = av_rescale_q(sti->index_entries[idx[i] - 1].timestamp /
FFMAX(ast->sample_size, 1), FFMAX(ast->sample_size, 1),
st->time_base, AV_TIME_BASE_Q); st->time_base, AV_TIME_BASE_Q);
delta_dts = av_sat_sub64(dts, min_dts); delta_dts = av_sat_sub64(dts, min_dts);
@ -1726,30 +1737,31 @@ static int guess_ni_flag(AVFormatContext *s)
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
int n = st->internal->nb_index_entries; FFStream *const sti = ffstream(st);
int n = sti->nb_index_entries;
unsigned int size; unsigned int size;
if (n <= 0) if (n <= 0)
continue; continue;
if (n >= 2) { if (n >= 2) {
int64_t pos = st->internal->index_entries[0].pos; int64_t pos = sti->index_entries[0].pos;
unsigned tag[2]; unsigned tag[2];
avio_seek(s->pb, pos, SEEK_SET); avio_seek(s->pb, pos, SEEK_SET);
tag[0] = avio_r8(s->pb); tag[0] = avio_r8(s->pb);
tag[1] = avio_r8(s->pb); tag[1] = avio_r8(s->pb);
avio_rl16(s->pb); avio_rl16(s->pb);
size = avio_rl32(s->pb); size = avio_rl32(s->pb);
if (get_stream_idx(tag) == i && pos + size > st->internal->index_entries[1].pos) if (get_stream_idx(tag) == i && pos + size > sti->index_entries[1].pos)
last_start = INT64_MAX; last_start = INT64_MAX;
if (get_stream_idx(tag) == i && size == st->internal->index_entries[0].size + 8) if (get_stream_idx(tag) == i && size == sti->index_entries[0].size + 8)
last_start = INT64_MAX; last_start = INT64_MAX;
} }
if (st->internal->index_entries[0].pos > last_start) if (sti->index_entries[0].pos > last_start)
last_start = st->internal->index_entries[0].pos; last_start = sti->index_entries[0].pos;
if (st->internal->index_entries[n - 1].pos < first_end) if (sti->index_entries[n - 1].pos < first_end)
first_end = st->internal->index_entries[n - 1].pos; first_end = sti->index_entries[n - 1].pos;
} }
avio_seek(s->pb, oldpos, SEEK_SET); avio_seek(s->pb, oldpos, SEEK_SET);
@ -1817,6 +1829,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index,
{ {
AVIContext *avi = s->priv_data; AVIContext *avi = s->priv_data;
AVStream *st; AVStream *st;
FFStream *sti;
int i, index; int i, index;
int64_t pos, pos_min; int64_t pos, pos_min;
AVIStream *ast; AVIStream *ast;
@ -1835,25 +1848,26 @@ static int avi_read_seek(AVFormatContext *s, int stream_index,
av_assert0(stream_index >= 0); av_assert0(stream_index >= 0);
st = s->streams[stream_index]; st = s->streams[stream_index];
sti = ffstream(st);
ast = st->priv_data; ast = st->priv_data;
index = av_index_search_timestamp(st, index = av_index_search_timestamp(st,
timestamp * FFMAX(ast->sample_size, 1), timestamp * FFMAX(ast->sample_size, 1),
flags); flags);
if (index < 0) { if (index < 0) {
if (st->internal->nb_index_entries > 0) if (sti->nb_index_entries > 0)
av_log(s, AV_LOG_DEBUG, "Failed to find timestamp %"PRId64 " in index %"PRId64 " .. %"PRId64 "\n", av_log(s, AV_LOG_DEBUG, "Failed to find timestamp %"PRId64 " in index %"PRId64 " .. %"PRId64 "\n",
timestamp * FFMAX(ast->sample_size, 1), timestamp * FFMAX(ast->sample_size, 1),
st->internal->index_entries[0].timestamp, sti->index_entries[0].timestamp,
st->internal->index_entries[st->internal->nb_index_entries - 1].timestamp); sti->index_entries[sti->nb_index_entries - 1].timestamp);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
/* find the position */ /* find the position */
pos = st->internal->index_entries[index].pos; pos = sti->index_entries[index].pos;
timestamp = st->internal->index_entries[index].timestamp / FFMAX(ast->sample_size, 1); timestamp = sti->index_entries[index].timestamp / FFMAX(ast->sample_size, 1);
av_log(s, AV_LOG_TRACE, "XX %"PRId64" %d %"PRId64"\n", av_log(s, AV_LOG_TRACE, "XX %"PRId64" %d %"PRId64"\n",
timestamp, index, st->internal->index_entries[index].timestamp); timestamp, index, sti->index_entries[index].timestamp);
if (CONFIG_DV_DEMUXER && avi->dv_demux) { if (CONFIG_DV_DEMUXER && avi->dv_demux) {
/* One and only one real stream for DV in AVI, and it has video */ /* One and only one real stream for DV in AVI, and it has video */
@ -1874,6 +1888,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index,
pos_min = pos; pos_min = pos;
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st2 = s->streams[i]; AVStream *st2 = s->streams[i];
FFStream *const sti2 = ffstream(st2);
AVIStream *ast2 = st2->priv_data; AVIStream *ast2 = st2->priv_data;
ast2->packet_size = ast2->packet_size =
@ -1884,7 +1899,7 @@ static int avi_read_seek(AVFormatContext *s, int stream_index,
continue; continue;
} }
if (st2->internal->nb_index_entries <= 0) if (sti2->nb_index_entries <= 0)
continue; continue;
// av_assert1(st2->codecpar->block_align); // av_assert1(st2->codecpar->block_align);
@ -1898,14 +1913,15 @@ static int avi_read_seek(AVFormatContext *s, int stream_index,
(st2->codecpar->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0)); (st2->codecpar->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0));
if (index < 0) if (index < 0)
index = 0; index = 0;
ast2->seek_pos = st2->internal->index_entries[index].pos; ast2->seek_pos = sti2->index_entries[index].pos;
pos_min = FFMIN(pos_min,ast2->seek_pos); pos_min = FFMIN(pos_min,ast2->seek_pos);
} }
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st2 = s->streams[i]; AVStream *st2 = s->streams[i];
FFStream *const sti2 = ffstream(st2);
AVIStream *ast2 = st2->priv_data; AVIStream *ast2 = st2->priv_data;
if (ast2->sub_ctx || st2->internal->nb_index_entries <= 0) if (ast2->sub_ctx || sti2->nb_index_entries <= 0)
continue; continue;
index = av_index_search_timestamp( index = av_index_search_timestamp(
@ -1914,9 +1930,9 @@ static int avi_read_seek(AVFormatContext *s, int stream_index,
flags | AVSEEK_FLAG_BACKWARD | (st2->codecpar->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0)); flags | AVSEEK_FLAG_BACKWARD | (st2->codecpar->codec_type != AVMEDIA_TYPE_VIDEO ? AVSEEK_FLAG_ANY : 0));
if (index < 0) if (index < 0)
index = 0; index = 0;
while (!avi->non_interleaved && index>0 && st2->internal->index_entries[index-1].pos >= pos_min) while (!avi->non_interleaved && index > 0 && sti2->index_entries[index-1].pos >= pos_min)
index--; index--;
ast2->frame_offset = st2->internal->index_entries[index].timestamp; ast2->frame_offset = sti2->index_entries[index].timestamp;
} }
/* do the seek */ /* do the seek */

@ -87,7 +87,8 @@ static int read_header(AVFormatContext *s)
BinkDemuxContext *bink = s->priv_data; BinkDemuxContext *bink = s->priv_data;
AVIOContext *pb = s->pb; AVIOContext *pb = s->pb;
uint32_t fps_num, fps_den; uint32_t fps_num, fps_den;
AVStream *vst, *ast; AVStream *const vst = avformat_new_stream(s, NULL);
FFStream *const vsti = ffstream(vst);
unsigned int i; unsigned int i;
uint32_t pos, next_pos; uint32_t pos, next_pos;
uint16_t flags; uint16_t flags;
@ -97,7 +98,6 @@ static int read_header(AVFormatContext *s)
uint32_t signature; uint32_t signature;
uint8_t revision; uint8_t revision;
vst = avformat_new_stream(s, NULL);
if (!vst) if (!vst)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -175,7 +175,7 @@ static int read_header(AVFormatContext *s)
avio_skip(pb, 4 * bink->num_audio_tracks); /* max decoded size */ avio_skip(pb, 4 * bink->num_audio_tracks); /* max decoded size */
for (i = 0; i < bink->num_audio_tracks; i++) { for (i = 0; i < bink->num_audio_tracks; i++) {
ast = avformat_new_stream(s, NULL); AVStream *const ast = avformat_new_stream(s, NULL);
if (!ast) if (!ast)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
@ -225,8 +225,8 @@ static int read_header(AVFormatContext *s)
return ret; return ret;
} }
if (vst->internal->index_entries) if (vsti->index_entries)
avio_seek(pb, vst->internal->index_entries[0].pos + bink->smush_size, SEEK_SET); avio_seek(pb, vsti->index_entries[0].pos + bink->smush_size, SEEK_SET);
else else
avio_skip(pb, 4); avio_skip(pb, 4);
@ -243,6 +243,7 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt)
if (bink->current_track < 0) { if (bink->current_track < 0) {
int index_entry; int index_entry;
AVStream *st = s->streams[0]; // stream 0 is video stream with index AVStream *st = s->streams[0]; // stream 0 is video stream with index
FFStream *const sti = ffstream(st);
if (bink->video_pts >= st->duration) if (bink->video_pts >= st->duration)
return AVERROR_EOF; return AVERROR_EOF;
@ -256,8 +257,8 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt)
return AVERROR(EIO); return AVERROR(EIO);
} }
bink->remain_packet_size = st->internal->index_entries[index_entry].size; bink->remain_packet_size = sti->index_entries[index_entry].size;
bink->flags = st->internal->index_entries[index_entry].flags; bink->flags = sti->index_entries[index_entry].flags;
bink->current_track = 0; bink->current_track = 0;
} }
@ -307,13 +308,14 @@ static int read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, in
{ {
BinkDemuxContext *bink = s->priv_data; BinkDemuxContext *bink = s->priv_data;
AVStream *vst = s->streams[0]; AVStream *vst = s->streams[0];
FFStream *const vsti = ffstream(vst);
int64_t ret; int64_t ret;
if (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL)) if (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL))
return -1; return -1;
/* seek to the first frame */ /* seek to the first frame */
ret = avio_seek(s->pb, vst->internal->index_entries[0].pos + bink->smush_size, SEEK_SET); ret = avio_seek(s->pb, vsti->index_entries[0].pos + bink->smush_size, SEEK_SET);
if (ret < 0) if (ret < 0)
return ret; return ret;

@ -342,7 +342,7 @@ found_data:
if (caf->bytes_per_packet > 0 && caf->frames_per_packet > 0) { if (caf->bytes_per_packet > 0 && caf->frames_per_packet > 0) {
if (caf->data_size > 0) if (caf->data_size > 0)
st->nb_frames = (caf->data_size / caf->bytes_per_packet) * caf->frames_per_packet; st->nb_frames = (caf->data_size / caf->bytes_per_packet) * caf->frames_per_packet;
} else if (st->internal->nb_index_entries && st->duration > 0) { } else if (ffstream(st)->nb_index_entries && st->duration > 0) {
if (st->codecpar->sample_rate && caf->data_size / st->duration > INT64_MAX / st->codecpar->sample_rate / 8) { if (st->codecpar->sample_rate && caf->data_size / st->duration > INT64_MAX / st->codecpar->sample_rate / 8) {
av_log(s, AV_LOG_ERROR, "Overflow during bit rate calculation %d * 8 * %"PRId64"\n", av_log(s, AV_LOG_ERROR, "Overflow during bit rate calculation %d * 8 * %"PRId64"\n",
st->codecpar->sample_rate, caf->data_size / st->duration); st->codecpar->sample_rate, caf->data_size / st->duration);
@ -372,6 +372,7 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt)
{ {
AVIOContext *pb = s->pb; AVIOContext *pb = s->pb;
AVStream *st = s->streams[0]; AVStream *st = s->streams[0];
FFStream *const sti = ffstream(st);
CafContext *caf = s->priv_data; CafContext *caf = s->priv_data;
int res, pkt_size = 0, pkt_frames = 0; int res, pkt_size = 0, pkt_frames = 0;
int64_t left = CAF_MAX_PKT_SIZE; int64_t left = CAF_MAX_PKT_SIZE;
@ -395,13 +396,13 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt)
pkt_size = (CAF_MAX_PKT_SIZE / pkt_size) * pkt_size; pkt_size = (CAF_MAX_PKT_SIZE / pkt_size) * pkt_size;
pkt_size = FFMIN(pkt_size, left); pkt_size = FFMIN(pkt_size, left);
pkt_frames = pkt_size / caf->bytes_per_packet; pkt_frames = pkt_size / caf->bytes_per_packet;
} else if (st->internal->nb_index_entries) { } else if (sti->nb_index_entries) {
if (caf->packet_cnt < st->internal->nb_index_entries - 1) { if (caf->packet_cnt < sti->nb_index_entries - 1) {
pkt_size = st->internal->index_entries[caf->packet_cnt + 1].pos - st->internal->index_entries[caf->packet_cnt].pos; pkt_size = sti->index_entries[caf->packet_cnt + 1].pos - sti->index_entries[caf->packet_cnt].pos;
pkt_frames = st->internal->index_entries[caf->packet_cnt + 1].timestamp - st->internal->index_entries[caf->packet_cnt].timestamp; pkt_frames = sti->index_entries[caf->packet_cnt + 1].timestamp - sti->index_entries[caf->packet_cnt].timestamp;
} else if (caf->packet_cnt == st->internal->nb_index_entries - 1) { } else if (caf->packet_cnt == sti->nb_index_entries - 1) {
pkt_size = caf->num_bytes - st->internal->index_entries[caf->packet_cnt].pos; pkt_size = caf->num_bytes - sti->index_entries[caf->packet_cnt].pos;
pkt_frames = st->duration - st->internal->index_entries[caf->packet_cnt].timestamp; pkt_frames = st->duration - sti->index_entries[caf->packet_cnt].timestamp;
} else { } else {
return AVERROR(EIO); return AVERROR(EIO);
} }
@ -428,6 +429,7 @@ static int read_seek(AVFormatContext *s, int stream_index,
int64_t timestamp, int flags) int64_t timestamp, int flags)
{ {
AVStream *st = s->streams[0]; AVStream *st = s->streams[0];
FFStream *const sti = ffstream(st);
CafContext *caf = s->priv_data; CafContext *caf = s->priv_data;
int64_t pos, packet_cnt, frame_cnt; int64_t pos, packet_cnt, frame_cnt;
@ -440,10 +442,10 @@ static int read_seek(AVFormatContext *s, int stream_index,
pos = FFMIN(pos, caf->data_size); pos = FFMIN(pos, caf->data_size);
packet_cnt = pos / caf->bytes_per_packet; packet_cnt = pos / caf->bytes_per_packet;
frame_cnt = caf->frames_per_packet * packet_cnt; frame_cnt = caf->frames_per_packet * packet_cnt;
} else if (st->internal->nb_index_entries) { } else if (sti->nb_index_entries) {
packet_cnt = av_index_search_timestamp(st, timestamp, flags); packet_cnt = av_index_search_timestamp(st, timestamp, flags);
frame_cnt = st->internal->index_entries[packet_cnt].timestamp; frame_cnt = sti->index_entries[packet_cnt].timestamp;
pos = st->internal->index_entries[packet_cnt].pos; pos = sti->index_entries[packet_cnt].pos;
} else { } else {
return -1; return -1;
} }

@ -285,13 +285,14 @@ static int cine_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{ {
CineDemuxContext *cine = avctx->priv_data; CineDemuxContext *cine = avctx->priv_data;
AVStream *st = avctx->streams[0]; AVStream *st = avctx->streams[0];
FFStream *const sti = ffstream(st);
AVIOContext *pb = avctx->pb; AVIOContext *pb = avctx->pb;
int n, size, ret; int n, size, ret;
if (cine->pts >= st->internal->nb_index_entries) if (cine->pts >= sti->nb_index_entries)
return AVERROR_EOF; return AVERROR_EOF;
avio_seek(pb, st->internal->index_entries[cine->pts].pos, SEEK_SET); avio_seek(pb, sti->index_entries[cine->pts].pos, SEEK_SET);
n = avio_rl32(pb); n = avio_rl32(pb);
if (n < 8) if (n < 8)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;

@ -738,6 +738,7 @@ static int concat_read_packet(AVFormatContext *avf, AVPacket *pkt)
int64_t delta; int64_t delta;
ConcatStream *cs; ConcatStream *cs;
AVStream *st; AVStream *st;
FFStream *sti;
if (cat->eof) if (cat->eof)
return AVERROR_EOF; return AVERROR_EOF;
@ -774,6 +775,7 @@ static int concat_read_packet(AVFormatContext *avf, AVPacket *pkt)
return ret; return ret;
st = cat->avf->streams[pkt->stream_index]; st = cat->avf->streams[pkt->stream_index];
sti = ffstream(st);
av_log(avf, AV_LOG_DEBUG, "file:%d stream:%d pts:%s pts_time:%s dts:%s dts_time:%s", av_log(avf, AV_LOG_DEBUG, "file:%d stream:%d pts:%s pts_time:%s dts:%s dts_time:%s",
(unsigned)(cat->cur_file - cat->files), pkt->stream_index, (unsigned)(cat->cur_file - cat->files), pkt->stream_index,
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base), av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &st->time_base),
@ -802,8 +804,8 @@ static int concat_read_packet(AVFormatContext *avf, AVPacket *pkt)
} }
} }
if (cat->cur_file->duration == AV_NOPTS_VALUE && st->internal->cur_dts != AV_NOPTS_VALUE) { if (cat->cur_file->duration == AV_NOPTS_VALUE && sti->cur_dts != AV_NOPTS_VALUE) {
int64_t next_dts = av_rescale_q(st->internal->cur_dts, st->time_base, AV_TIME_BASE_Q); int64_t next_dts = av_rescale_q(sti->cur_dts, st->time_base, AV_TIME_BASE_Q);
if (cat->cur_file->next_dts == AV_NOPTS_VALUE || next_dts > cat->cur_file->next_dts) { if (cat->cur_file->next_dts == AV_NOPTS_VALUE || next_dts > cat->cur_file->next_dts) {
cat->cur_file->next_dts = next_dts; cat->cur_file->next_dts = next_dts;
} }

@ -2344,10 +2344,10 @@ static int dash_check_bitstream(struct AVFormatContext *s, const AVPacket *avpkt
pkt.stream_index = 0; pkt.stream_index = 0;
ret = oc->oformat->check_bitstream(oc, &pkt); ret = oc->oformat->check_bitstream(oc, &pkt);
if (ret == 1) { if (ret == 1) {
AVStream *st = s->streams[avpkt->stream_index]; FFStream *const sti = ffstream(s->streams[avpkt->stream_index]);
AVStream *ost = oc->streams[0]; FFStream *const osti = ffstream(oc->streams[0]);
st->internal->bsfc = ost->internal->bsfc; sti->bsfc = osti->bsfc;
ost->internal->bsfc = NULL; osti->bsfc = NULL;
} }
return ret; return ret;
} }

@ -433,15 +433,16 @@ static int dhav_read_seek(AVFormatContext *s, int stream_index,
{ {
DHAVContext *dhav = s->priv_data; DHAVContext *dhav = s->priv_data;
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
FFStream *const sti = ffstream(st);
int index = av_index_search_timestamp(st, timestamp, flags); int index = av_index_search_timestamp(st, timestamp, flags);
int64_t pts; int64_t pts;
if (index < 0) if (index < 0)
return -1; return -1;
if (avio_seek(s->pb, st->internal->index_entries[index].pos, SEEK_SET) < 0) if (avio_seek(s->pb, sti->index_entries[index].pos, SEEK_SET) < 0)
return -1; return -1;
pts = st->internal->index_entries[index].timestamp; pts = sti->index_entries[index].timestamp;
for (int n = 0; n < s->nb_streams; n++) { for (int n = 0; n < s->nb_streams; n++) {
AVStream *st = s->streams[n]; AVStream *st = s->streams[n];

@ -65,7 +65,7 @@ static int dtshd_read_header(AVFormatContext *s)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_DTS; st->codecpar->codec_id = AV_CODEC_ID_DTS;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
for (;;) { for (;;) {
chunk_type = avio_rb64(pb); chunk_type = avio_rb64(pb);

@ -508,6 +508,7 @@ static void dump_stream_format(const AVFormatContext *ic, int i,
char buf[256]; char buf[256];
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
const AVStream *st = ic->streams[i]; const AVStream *st = ic->streams[i];
const FFStream *const sti = cffstream(st);
const AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0); const AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
const char *separator = ic->dump_separator; const char *separator = ic->dump_separator;
AVCodecContext *avctx; AVCodecContext *avctx;
@ -524,12 +525,12 @@ static void dump_stream_format(const AVFormatContext *ic, int i,
} }
// Fields which are missing from AVCodecParameters need to be taken from the AVCodecContext // Fields which are missing from AVCodecParameters need to be taken from the AVCodecContext
avctx->properties = st->internal->avctx->properties; avctx->properties = sti->avctx->properties;
avctx->codec = st->internal->avctx->codec; avctx->codec = sti->avctx->codec;
avctx->qmin = st->internal->avctx->qmin; avctx->qmin = sti->avctx->qmin;
avctx->qmax = st->internal->avctx->qmax; avctx->qmax = sti->avctx->qmax;
avctx->coded_width = st->internal->avctx->coded_width; avctx->coded_width = sti->avctx->coded_width;
avctx->coded_height = st->internal->avctx->coded_height; avctx->coded_height = sti->avctx->coded_height;
if (separator) if (separator)
av_opt_set(avctx, "dump_separator", separator, 0); av_opt_set(avctx, "dump_separator", separator, 0);
@ -544,7 +545,7 @@ static void dump_stream_format(const AVFormatContext *ic, int i,
av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id); av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
if (lang) if (lang)
av_log(NULL, AV_LOG_INFO, "(%s)", lang->value); av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->internal->codec_info_nb_frames, av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", sti->codec_info_nb_frames,
st->time_base.num, st->time_base.den); st->time_base.num, st->time_base.den);
av_log(NULL, AV_LOG_INFO, ": %s", buf); av_log(NULL, AV_LOG_INFO, ": %s", buf);

@ -503,7 +503,7 @@ static int init_video_stream(AVFormatContext *s, VideoProperties *video)
st->codecpar->codec_id = video->codec; st->codecpar->codec_id = video->codec;
// parsing is necessary to make FFmpeg generate correct timestamps // parsing is necessary to make FFmpeg generate correct timestamps
if (st->codecpar->codec_id == AV_CODEC_ID_MPEG2VIDEO) if (st->codecpar->codec_id == AV_CODEC_ID_MPEG2VIDEO)
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; ffstream(st)->need_parsing = AVSTREAM_PARSE_HEADERS;
st->codecpar->codec_tag = 0; /* no fourcc */ st->codecpar->codec_tag = 0; /* no fourcc */
st->codecpar->width = video->width; st->codecpar->width = video->width;
st->codecpar->height = video->height; st->codecpar->height = video->height;

@ -139,7 +139,7 @@ static int fifo_thread_write_header(FifoThreadContext *ctx)
} }
for (i = 0;i < avf2->nb_streams; i++) for (i = 0;i < avf2->nb_streams; i++)
avf2->streams[i]->internal->cur_dts = 0; ffstream(avf2->streams[i])->cur_dts = 0;
ret = avformat_write_header(avf2, &format_options); ret = avformat_write_header(avf2, &format_options);
if (!ret) if (!ret)

@ -37,11 +37,10 @@ typedef struct FLACDecContext {
static void reset_index_position(int64_t metadata_head_size, AVStream *st) static void reset_index_position(int64_t metadata_head_size, AVStream *st)
{ {
FFStream *const sti = ffstream(st);
/* the real seek index offset should be the size of metadata blocks with the offset in the frame blocks */ /* the real seek index offset should be the size of metadata blocks with the offset in the frame blocks */
int i; for (int i = 0; i < sti->nb_index_entries; i++)
for(i=0; i<st->internal->nb_index_entries; i++) { sti->index_entries[i].pos += metadata_head_size;
st->internal->index_entries[i].pos += metadata_head_size;
}
} }
static int flac_read_header(AVFormatContext *s) static int flac_read_header(AVFormatContext *s)
@ -55,7 +54,7 @@ static int flac_read_header(AVFormatContext *s)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_FLAC; st->codecpar->codec_id = AV_CODEC_ID_FLAC;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
/* the parameters will be extracted from the compressed bitstream */ /* the parameters will be extracted from the compressed bitstream */
/* if fLaC marker is not found, assume there is no header */ /* if fLaC marker is not found, assume there is no header */
@ -287,7 +286,7 @@ static av_unused int64_t flac_read_timestamp(AVFormatContext *s, int stream_inde
av_assert1(!pkt->size); av_assert1(!pkt->size);
} }
} }
av_parser_parse2(parser, st->internal->avctx, av_parser_parse2(parser, ffstream(st)->avctx,
&data, &size, pkt->data, pkt->size, &data, &size, pkt->data, pkt->size,
pkt->pts, pkt->dts, *ppos); pkt->pts, pkt->dts, *ppos);
@ -308,6 +307,8 @@ static av_unused int64_t flac_read_timestamp(AVFormatContext *s, int stream_inde
} }
static int flac_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { static int flac_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) {
AVStream *const st = s->streams[0];
FFStream *const sti = ffstream(st);
int index; int index;
int64_t pos; int64_t pos;
AVIndexEntry e; AVIndexEntry e;
@ -317,11 +318,11 @@ static int flac_seek(AVFormatContext *s, int stream_index, int64_t timestamp, in
return -1; return -1;
} }
index = av_index_search_timestamp(s->streams[0], timestamp, flags); index = av_index_search_timestamp(st, timestamp, flags);
if(index<0 || index >= s->streams[0]->internal->nb_index_entries) if (index < 0 || index >= sti->nb_index_entries)
return -1; return -1;
e = s->streams[0]->internal->index_entries[index]; e = sti->index_entries[index];
pos = avio_seek(s->pb, e.pos, SEEK_SET); pos = avio_seek(s->pb, e.pos, SEEK_SET);
if (pos >= 0) { if (pos >= 0) {
return 0; return 0;

@ -265,10 +265,11 @@ static int flic_read_seek(AVFormatContext *s, int stream_index,
{ {
FlicDemuxContext *flic = s->priv_data; FlicDemuxContext *flic = s->priv_data;
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
FFStream *const sti = ffstream(st);
int64_t pos, ts; int64_t pos, ts;
int index; int index;
if (!st->internal->index_entries || stream_index != flic->video_stream_index) if (!sti->index_entries || stream_index != flic->video_stream_index)
return -1; return -1;
index = av_index_search_timestamp(st, pts, flags); index = av_index_search_timestamp(st, pts, flags);
@ -278,8 +279,8 @@ static int flic_read_seek(AVFormatContext *s, int stream_index,
if (index < 0) if (index < 0)
return -1; return -1;
pos = st->internal->index_entries[index].pos; pos = sti->index_entries[index].pos;
ts = st->internal->index_entries[index].timestamp; ts = sti->index_entries[index].timestamp;
flic->frame_number = ts; flic->frame_number = ts;
avio_seek(s->pb, pos, SEEK_SET); avio_seek(s->pb, pos, SEEK_SET);
return 0; return 0;

@ -143,7 +143,7 @@ static void add_keyframes_index(AVFormatContext *s)
av_assert0(flv->last_keyframe_stream_index <= s->nb_streams); av_assert0(flv->last_keyframe_stream_index <= s->nb_streams);
stream = s->streams[flv->last_keyframe_stream_index]; stream = s->streams[flv->last_keyframe_stream_index];
if (stream->internal->nb_index_entries == 0) { if (ffstream(stream)->nb_index_entries == 0) {
for (i = 0; i < flv->keyframe_count; i++) { for (i = 0; i < flv->keyframe_count; i++) {
av_log(s, AV_LOG_TRACE, "keyframe filepositions = %"PRId64" times = %"PRId64"\n", av_log(s, AV_LOG_TRACE, "keyframe filepositions = %"PRId64" times = %"PRId64"\n",
flv->keyframe_filepositions[i], flv->keyframe_times[i] * 1000); flv->keyframe_filepositions[i], flv->keyframe_times[i] * 1000);
@ -272,7 +272,7 @@ static void flv_set_audio_codec(AVFormatContext *s, AVStream *astream,
break; break;
case FLV_CODECID_MP3: case FLV_CODECID_MP3:
apar->codec_id = AV_CODEC_ID_MP3; apar->codec_id = AV_CODEC_ID_MP3;
astream->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(astream)->need_parsing = AVSTREAM_PARSE_FULL;
break; break;
case FLV_CODECID_NELLYMOSER_8KHZ_MONO: case FLV_CODECID_NELLYMOSER_8KHZ_MONO:
// in case metadata does not otherwise declare samplerate // in case metadata does not otherwise declare samplerate
@ -329,6 +329,7 @@ static int flv_same_video_codec(AVCodecParameters *vpar, int flags)
static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream, static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream,
int flv_codecid, int read) int flv_codecid, int read)
{ {
FFStream *const vstreami = ffstream(vstream);
int ret = 0; int ret = 0;
AVCodecParameters *par = vstream->codecpar; AVCodecParameters *par = vstream->codecpar;
enum AVCodecID old_codec_id = vstream->codecpar->codec_id; enum AVCodecID old_codec_id = vstream->codecpar->codec_id;
@ -363,7 +364,7 @@ static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream,
break; break;
case FLV_CODECID_H264: case FLV_CODECID_H264:
par->codec_id = AV_CODEC_ID_H264; par->codec_id = AV_CODEC_ID_H264;
vstream->internal->need_parsing = AVSTREAM_PARSE_HEADERS; vstreami->need_parsing = AVSTREAM_PARSE_HEADERS;
ret = 3; // not 4, reading packet type will consume one byte ret = 3; // not 4, reading packet type will consume one byte
break; break;
case FLV_CODECID_MPEG4: case FLV_CODECID_MPEG4:
@ -375,7 +376,7 @@ static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream,
par->codec_tag = flv_codecid; par->codec_tag = flv_codecid;
} }
if (!vstream->internal->need_context_update && par->codec_id != old_codec_id) { if (!vstreami->need_context_update && par->codec_id != old_codec_id) {
avpriv_request_sample(s, "Changing the codec id midstream"); avpriv_request_sample(s, "Changing the codec id midstream");
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
@ -815,7 +816,7 @@ static int flv_get_extradata(AVFormatContext *s, AVStream *st, int size)
if ((ret = ff_get_extradata(s, st->codecpar, s->pb, size)) < 0) if ((ret = ff_get_extradata(s, st->codecpar, s->pb, size)) < 0)
return ret; return ret;
st->internal->need_context_update = 1; ffstream(st)->need_context_update = 1;
return 0; return 0;
} }
@ -837,17 +838,16 @@ static int flv_queue_extradata(FLVContext *flv, AVIOContext *pb, int stream,
static void clear_index_entries(AVFormatContext *s, int64_t pos) static void clear_index_entries(AVFormatContext *s, int64_t pos)
{ {
int i, j, out;
av_log(s, AV_LOG_WARNING, av_log(s, AV_LOG_WARNING,
"Found invalid index entries, clearing the index.\n"); "Found invalid index entries, clearing the index.\n");
for (i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; FFStream *const sti = ffstream(s->streams[i]);
int out = 0;
/* Remove all index entries that point to >= pos */ /* Remove all index entries that point to >= pos */
out = 0; for (int j = 0; j < sti->nb_index_entries; j++)
for (j = 0; j < st->internal->nb_index_entries; j++) if (sti->index_entries[j].pos < pos)
if (st->internal->index_entries[j].pos < pos) sti->index_entries[out++] = sti->index_entries[j];
st->internal->index_entries[out++] = st->internal->index_entries[j]; sti->nb_index_entries = out;
st->internal->nb_index_entries = out;
} }
} }

@ -104,12 +104,14 @@ static int gxf_probe(const AVProbeData *p) {
static int get_sindex(AVFormatContext *s, int id, int format) { static int get_sindex(AVFormatContext *s, int id, int format) {
int i; int i;
AVStream *st = NULL; AVStream *st = NULL;
FFStream *sti;
i = ff_find_stream_index(s, id); i = ff_find_stream_index(s, id);
if (i >= 0) if (i >= 0)
return i; return i;
st = avformat_new_stream(s, NULL); st = avformat_new_stream(s, NULL);
if (!st) if (!st)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
sti = ffstream(st);
st->id = id; st->id = id;
switch (format) { switch (format) {
case 3: case 3:
@ -130,13 +132,13 @@ static int get_sindex(AVFormatContext *s, int id, int format) {
case 20: case 20:
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_MPEG2VIDEO; st->codecpar->codec_id = AV_CODEC_ID_MPEG2VIDEO;
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; //get keyframe flag etc. sti->need_parsing = AVSTREAM_PARSE_HEADERS; //get keyframe flag etc.
break; break;
case 22: case 22:
case 23: case 23:
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_MPEG1VIDEO; st->codecpar->codec_id = AV_CODEC_ID_MPEG1VIDEO;
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; //get keyframe flag etc. sti->need_parsing = AVSTREAM_PARSE_HEADERS; //get keyframe flag etc.
break; break;
case 9: case 9:
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
@ -169,7 +171,7 @@ static int get_sindex(AVFormatContext *s, int id, int format) {
case 29: /* AVCHD */ case 29: /* AVCHD */
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_H264; st->codecpar->codec_id = AV_CODEC_ID_H264;
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; sti->need_parsing = AVSTREAM_PARSE_HEADERS;
break; break;
// timecode tracks: // timecode tracks:
case 7: case 7:
@ -567,6 +569,7 @@ static int gxf_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int
uint64_t pos; uint64_t pos;
uint64_t maxlen = 100 * 1024 * 1024; uint64_t maxlen = 100 * 1024 * 1024;
AVStream *st = s->streams[0]; AVStream *st = s->streams[0];
FFStream *const sti = ffstream(st);
int64_t start_time = s->streams[stream_index]->start_time; int64_t start_time = s->streams[stream_index]->start_time;
int64_t found; int64_t found;
int idx; int idx;
@ -575,9 +578,9 @@ static int gxf_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int
AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD); AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
if (idx < 0) if (idx < 0)
return -1; return -1;
pos = st->internal->index_entries[idx].pos; pos = sti->index_entries[idx].pos;
if (idx < st->internal->nb_index_entries - 2) if (idx < sti->nb_index_entries - 2)
maxlen = st->internal->index_entries[idx + 2].pos - pos; maxlen = sti->index_entries[idx + 2].pos - pos;
maxlen = FFMAX(maxlen, 200 * 1024); maxlen = FFMAX(maxlen, 200 * 1024);
res = avio_seek(s->pb, pos, SEEK_SET); res = avio_seek(s->pb, pos, SEEK_SET);
if (res < 0) if (res < 0)

@ -500,15 +500,16 @@ static int hds_write_packet(AVFormatContext *s, AVPacket *pkt)
{ {
HDSContext *c = s->priv_data; HDSContext *c = s->priv_data;
AVStream *st = s->streams[pkt->stream_index]; AVStream *st = s->streams[pkt->stream_index];
FFStream *const sti = ffstream(st);
OutputStream *os = &c->streams[s->streams[pkt->stream_index]->id]; OutputStream *os = &c->streams[s->streams[pkt->stream_index]->id];
int64_t end_dts = os->fragment_index * (int64_t)c->min_frag_duration; int64_t end_dts = os->fragment_index * (int64_t)c->min_frag_duration;
int ret; int ret;
if (st->internal->first_dts == AV_NOPTS_VALUE) if (sti->first_dts == AV_NOPTS_VALUE)
st->internal->first_dts = pkt->dts; sti->first_dts = pkt->dts;
if ((!os->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) && if ((!os->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
av_compare_ts(pkt->dts - st->internal->first_dts, st->time_base, av_compare_ts(pkt->dts - sti->first_dts, st->time_base,
end_dts, AV_TIME_BASE_Q) >= 0 && end_dts, AV_TIME_BASE_Q) >= 0 &&
pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) { pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) {

@ -1794,7 +1794,7 @@ static int set_stream_info_from_input_stream(AVStream *st, struct playlist *pls,
memcpy(dst_data, sd_src->data, sd_src->size); memcpy(dst_data, sd_src->data, sd_src->size);
} }
st->internal->need_context_update = 1; ffstream(st)->need_context_update = 1;
return 0; return 0;
} }

@ -188,23 +188,26 @@ static int ifv_read_header(AVFormatContext *s)
static int ifv_read_packet(AVFormatContext *s, AVPacket *pkt) static int ifv_read_packet(AVFormatContext *s, AVPacket *pkt)
{ {
IFVContext *ifv = s->priv_data; IFVContext *ifv = s->priv_data;
AVStream *st;
AVIndexEntry *ev, *ea, *e_next; AVIndexEntry *ev, *ea, *e_next;
int ret; int ret;
ev = ea = e_next = NULL; ev = ea = e_next = NULL;
if (ifv->next_video_index < ifv->total_vframes) { if (ifv->next_video_index < ifv->total_vframes) {
st = s->streams[ifv->video_stream_index]; AVStream *const st = s->streams[ifv->video_stream_index];
if (ifv->next_video_index < st->internal->nb_index_entries) FFStream *const sti = ffstream(st);
e_next = ev = &st->internal->index_entries[ifv->next_video_index];
if (ifv->next_video_index < sti->nb_index_entries)
e_next = ev = &sti->index_entries[ifv->next_video_index];
} }
if (ifv->is_audio_present && if (ifv->is_audio_present &&
ifv->next_audio_index < ifv->total_aframes) { ifv->next_audio_index < ifv->total_aframes) {
st = s->streams[ifv->audio_stream_index]; AVStream *const st = s->streams[ifv->audio_stream_index];
if (ifv->next_audio_index < st->internal->nb_index_entries) { FFStream *const sti = ffstream(st);
ea = &st->internal->index_entries[ifv->next_audio_index];
if (ifv->next_audio_index < sti->nb_index_entries) {
ea = &sti->index_entries[ifv->next_audio_index];
if (!ev || ea->timestamp < ev->timestamp) if (!ev || ea->timestamp < ev->timestamp)
e_next = ea; e_next = ea;
} }

@ -209,7 +209,7 @@ int ff_img_read_header(AVFormatContext *s1)
s->is_pipe = 0; s->is_pipe = 0;
else { else {
s->is_pipe = 1; s->is_pipe = 1;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
} }
if (s->ts_from_file == 2) { if (s->ts_from_file == 2) {
@ -482,7 +482,7 @@ int ff_img_read_packet(AVFormatContext *s1, AVPacket *pkt)
return AVERROR_EOF; return AVERROR_EOF;
if (s->frame_size > 0) { if (s->frame_size > 0) {
size[0] = s->frame_size; size[0] = s->frame_size;
} else if (!s1->streams[0]->internal->parser) { } else if (!ffstream(s1->streams[0])->parser) {
size[0] = avio_size(s1->pb); size[0] = avio_size(s1->pb);
} else { } else {
size[0] = 4096; size[0] = 4096;
@ -590,7 +590,7 @@ static int img_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
int index = av_index_search_timestamp(st, timestamp, flags); int index = av_index_search_timestamp(st, timestamp, flags);
if(index < 0) if(index < 0)
return -1; return -1;
s1->img_number = st->internal->index_entries[index].pos; s1->img_number = ffstream(st)->index_entries[index].pos;
return 0; return 0;
} }

@ -185,7 +185,12 @@ static av_always_inline FFFormatContext *ffformatcontext(AVFormatContext *s)
return (FFFormatContext*)s; return (FFFormatContext*)s;
} }
struct AVStreamInternal { typedef struct FFStream {
/**
* The public context.
*/
AVStream pub;
/** /**
* Set to 1 if the codec allows reordering, so pts can be different * Set to 1 if the codec allows reordering, so pts can be different
* from dts. * from dts.
@ -416,7 +421,17 @@ struct AVStreamInternal {
*/ */
int64_t first_dts; int64_t first_dts;
int64_t cur_dts; int64_t cur_dts;
}; } FFStream;
static av_always_inline FFStream *ffstream(AVStream *st)
{
return (FFStream*)st;
}
static av_always_inline const FFStream *cffstream(const AVStream *st)
{
return (FFStream*)st;
}
void avpriv_stream_set_need_parsing(AVStream *st, enum AVStreamParseType type); void avpriv_stream_set_need_parsing(AVStream *st, enum AVStreamParseType type);

@ -62,7 +62,7 @@ static int ipu_read_header(AVFormatContext *s)
st->start_time = 0; st->start_time = 0;
st->duration = st->duration =
st->nb_frames = avio_rl32(pb); st->nb_frames = avio_rl32(pb);
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
avpriv_set_pts_info(st, 64, 1, 25); avpriv_set_pts_info(st, 64, 1, 25);
return 0; return 0;

@ -47,7 +47,7 @@ static int read_header(AVFormatContext *s)
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_MPEG4; st->codecpar->codec_id = AV_CODEC_ID_MPEG4;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
avpriv_set_pts_info(st, 64, 1, 90000); avpriv_set_pts_info(st, 64, 1, 90000);
return 0; return 0;

@ -56,7 +56,7 @@ static int read_header(AVFormatContext *s)
st->duration = avio_rl32(s->pb); st->duration = avio_rl32(s->pb);
avio_skip(s->pb, 4); // unused avio_skip(s->pb, 4); // unused
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; ffstream(st)->need_parsing = AVSTREAM_PARSE_HEADERS;
if (!time_base.den || !time_base.num) { if (!time_base.den || !time_base.num) {
av_log(s, AV_LOG_ERROR, "Invalid frame rate\n"); av_log(s, AV_LOG_ERROR, "Invalid frame rate\n");

@ -74,9 +74,9 @@ static int read_header(AVFormatContext *s)
JVDemuxContext *jv = s->priv_data; JVDemuxContext *jv = s->priv_data;
AVIOContext *pb = s->pb; AVIOContext *pb = s->pb;
AVStream *vst, *ast; AVStream *vst, *ast;
FFStream *asti;
int64_t audio_pts = 0; int64_t audio_pts = 0;
int64_t offset; int64_t offset;
int i;
avio_skip(pb, 80); avio_skip(pb, 80);
@ -84,6 +84,7 @@ static int read_header(AVFormatContext *s)
vst = avformat_new_stream(s, NULL); vst = avformat_new_stream(s, NULL);
if (!ast || !vst) if (!ast || !vst)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
asti = ffstream(ast);
vst->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; vst->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codecpar->codec_id = AV_CODEC_ID_JV; vst->codecpar->codec_id = AV_CODEC_ID_JV;
@ -92,7 +93,7 @@ static int read_header(AVFormatContext *s)
vst->codecpar->height = avio_rl16(pb); vst->codecpar->height = avio_rl16(pb);
vst->duration = vst->duration =
vst->nb_frames = vst->nb_frames =
ast->internal->nb_index_entries = avio_rl16(pb); asti->nb_index_entries = avio_rl16(pb);
avpriv_set_pts_info(vst, 64, avio_rl16(pb), 1000); avpriv_set_pts_info(vst, 64, avio_rl16(pb), 1000);
avio_skip(pb, 4); avio_skip(pb, 4);
@ -107,17 +108,17 @@ static int read_header(AVFormatContext *s)
avio_skip(pb, 10); avio_skip(pb, 10);
ast->internal->index_entries = av_malloc(ast->internal->nb_index_entries * asti->index_entries = av_malloc(asti->nb_index_entries *
sizeof(*ast->internal->index_entries)); sizeof(*asti->index_entries));
if (!ast->internal->index_entries) if (!asti->index_entries)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
jv->frames = av_malloc(ast->internal->nb_index_entries * sizeof(JVFrame)); jv->frames = av_malloc(asti->nb_index_entries * sizeof(*jv->frames));
if (!jv->frames) if (!jv->frames)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
offset = 0x68 + ast->internal->nb_index_entries * 16; offset = 0x68 + asti->nb_index_entries * 16;
for (i = 0; i < ast->internal->nb_index_entries; i++) { for (int i = 0; i < asti->nb_index_entries; i++) {
AVIndexEntry *e = ast->internal->index_entries + i; AVIndexEntry *e = asti->index_entries + i;
JVFrame *jvf = jv->frames + i; JVFrame *jvf = jv->frames + i;
/* total frame size including audio, video, palette data and padding */ /* total frame size including audio, video, palette data and padding */
@ -162,10 +163,11 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt)
JVDemuxContext *jv = s->priv_data; JVDemuxContext *jv = s->priv_data;
AVIOContext *pb = s->pb; AVIOContext *pb = s->pb;
AVStream *ast = s->streams[0]; AVStream *ast = s->streams[0];
FFStream *const asti = ffstream(ast);
int ret; int ret;
while (!avio_feof(s->pb) && jv->pts < ast->internal->nb_index_entries) { while (!avio_feof(s->pb) && jv->pts < asti->nb_index_entries) {
const AVIndexEntry *e = ast->internal->index_entries + jv->pts; const AVIndexEntry *const e = asti->index_entries + jv->pts;
const JVFrame *jvf = jv->frames + jv->pts; const JVFrame *jvf = jv->frames + jv->pts;
switch (jv->state) { switch (jv->state) {
@ -222,6 +224,7 @@ static int read_seek(AVFormatContext *s, int stream_index,
{ {
JVDemuxContext *jv = s->priv_data; JVDemuxContext *jv = s->priv_data;
AVStream *ast = s->streams[0]; AVStream *ast = s->streams[0];
FFStream *const asti = ffstream(ast);
int i; int i;
if (flags & (AVSEEK_FLAG_BYTE | AVSEEK_FLAG_FRAME)) if (flags & (AVSEEK_FLAG_BYTE | AVSEEK_FLAG_FRAME))
@ -238,9 +241,9 @@ static int read_seek(AVFormatContext *s, int stream_index,
return 0; return 0;
} }
if (i < 0 || i >= ast->internal->nb_index_entries) if (i < 0 || i >= asti->nb_index_entries)
return 0; return 0;
if (avio_seek(s->pb, ast->internal->index_entries[i].pos, SEEK_SET) < 0) if (avio_seek(s->pb, asti->index_entries[i].pos, SEEK_SET) < 0)
return -1; return -1;
jv->state = JV_AUDIO; jv->state = JV_AUDIO;

@ -67,14 +67,14 @@ static int lmlm4_read_header(AVFormatContext *s)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_MPEG4; st->codecpar->codec_id = AV_CODEC_ID_MPEG4;
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; ffstream(st)->need_parsing = AVSTREAM_PARSE_HEADERS;
avpriv_set_pts_info(st, 64, 1001, 30000); avpriv_set_pts_info(st, 64, 1001, 30000);
if (!(st = avformat_new_stream(s, NULL))) if (!(st = avformat_new_stream(s, NULL)))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_MP2; st->codecpar->codec_id = AV_CODEC_ID_MP2;
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; ffstream(st)->need_parsing = AVSTREAM_PARSE_HEADERS;
/* the parameters will be extracted from the compressed bitstream */ /* the parameters will be extracted from the compressed bitstream */
return 0; return 0;

@ -75,7 +75,7 @@ static int loas_read_header(AVFormatContext *s)
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = s->iformat->raw_codec_id; st->codecpar->codec_id = s->iformat->raw_codec_id;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
//LCM of all possible AAC sample rates //LCM of all possible AAC sample rates
avpriv_set_pts_info(st, 64, 1, 28224000); avpriv_set_pts_info(st, 64, 1, 28224000);

@ -262,7 +262,7 @@ static int lxf_read_header(AVFormatContext *s)
st->codecpar->bit_rate = 1000000 * ((video_params >> 14) & 0xFF); st->codecpar->bit_rate = 1000000 * ((video_params >> 14) & 0xFF);
st->codecpar->codec_tag = video_params & 0xF; st->codecpar->codec_tag = video_params & 0xF;
st->codecpar->codec_id = ff_codec_get_id(lxf_tags, st->codecpar->codec_tag); st->codecpar->codec_id = ff_codec_get_id(lxf_tags, st->codecpar->codec_tag);
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; ffstream(st)->need_parsing = AVSTREAM_PARSE_HEADERS;
av_log(s, AV_LOG_DEBUG, "record: %x = %i-%02i-%02i\n", av_log(s, AV_LOG_DEBUG, "record: %x = %i-%02i-%02i\n",
record_date, 1900 + (record_date & 0x7F), (record_date >> 7) & 0xF, record_date, 1900 + (record_date & 0x7F), (record_date >> 7) & 0xF,

@ -2338,7 +2338,6 @@ static int matroska_parse_tracks(AVFormatContext *s)
{ {
MatroskaDemuxContext *matroska = s->priv_data; MatroskaDemuxContext *matroska = s->priv_data;
MatroskaTrack *tracks = matroska->tracks.elem; MatroskaTrack *tracks = matroska->tracks.elem;
AVStream *st;
int i, j, ret; int i, j, ret;
int k; int k;
@ -2352,6 +2351,8 @@ static int matroska_parse_tracks(AVFormatContext *s)
int extradata_offset = 0; int extradata_offset = 0;
uint32_t fourcc = 0; uint32_t fourcc = 0;
FFIOContext b; FFIOContext b;
AVStream *st;
FFStream *sti;
char* key_id_base64 = NULL; char* key_id_base64 = NULL;
int bit_depth = -1; int bit_depth = -1;
@ -2486,6 +2487,7 @@ static int matroska_parse_tracks(AVFormatContext *s)
av_free(key_id_base64); av_free(key_id_base64);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
sti = ffstream(st);
if (key_id_base64) { if (key_id_base64) {
/* export encryption key id as base64 metadata tag */ /* export encryption key id as base64 metadata tag */
@ -2802,7 +2804,7 @@ static int matroska_parse_tracks(AVFormatContext *s)
255); 255);
} }
if (st->codecpar->codec_id != AV_CODEC_ID_HEVC) if (st->codecpar->codec_id != AV_CODEC_ID_HEVC)
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; sti->need_parsing = AVSTREAM_PARSE_HEADERS;
if (track->default_duration) { if (track->default_duration) {
int div = track->default_duration <= INT64_MAX ? 1 : 2; int div = track->default_duration <= INT64_MAX ? 1 : 2;
@ -2861,9 +2863,9 @@ static int matroska_parse_tracks(AVFormatContext *s)
if (st->codecpar->codec_id == AV_CODEC_ID_MP3 || if (st->codecpar->codec_id == AV_CODEC_ID_MP3 ||
st->codecpar->codec_id == AV_CODEC_ID_MLP || st->codecpar->codec_id == AV_CODEC_ID_MLP ||
st->codecpar->codec_id == AV_CODEC_ID_TRUEHD) st->codecpar->codec_id == AV_CODEC_ID_TRUEHD)
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
else if (st->codecpar->codec_id != AV_CODEC_ID_AAC) else if (st->codecpar->codec_id != AV_CODEC_ID_AAC)
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; sti->need_parsing = AVSTREAM_PARSE_HEADERS;
if (track->codec_delay > 0) { if (track->codec_delay > 0) {
st->codecpar->initial_padding = av_rescale_q(track->codec_delay, st->codecpar->initial_padding = av_rescale_q(track->codec_delay,
(AVRational){1, 1000000000}, (AVRational){1, 1000000000},
@ -3652,7 +3654,7 @@ static int matroska_parse_block(MatroskaDemuxContext *matroska, AVBufferRef *buf
return res; return res;
if (is_keyframe) if (is_keyframe)
matroska->skip_to_keyframe = 0; matroska->skip_to_keyframe = 0;
else if (!st->internal->skip_to_keyframe) { else if (!ffstream(st)->skip_to_keyframe) {
av_log(matroska->ctx, AV_LOG_ERROR, "File is broken, keyframes not correctly marked!\n"); av_log(matroska->ctx, AV_LOG_ERROR, "File is broken, keyframes not correctly marked!\n");
matroska->skip_to_keyframe = 0; matroska->skip_to_keyframe = 0;
} }
@ -3814,6 +3816,7 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index,
MatroskaDemuxContext *matroska = s->priv_data; MatroskaDemuxContext *matroska = s->priv_data;
MatroskaTrack *tracks = NULL; MatroskaTrack *tracks = NULL;
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
FFStream *const sti = ffstream(st);
int i, index; int i, index;
/* Parse the CUES now since we need the index data to seek. */ /* Parse the CUES now since we need the index data to seek. */
@ -3822,13 +3825,15 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index,
matroska_parse_cues(matroska); matroska_parse_cues(matroska);
} }
if (!st->internal->nb_index_entries) if (!sti->nb_index_entries)
goto err; goto err;
timestamp = FFMAX(timestamp, st->internal->index_entries[0].timestamp); timestamp = FFMAX(timestamp, sti->index_entries[0].timestamp);
if ((index = av_index_search_timestamp(st, timestamp, flags)) < 0 || index == st->internal->nb_index_entries - 1) { if ((index = av_index_search_timestamp(st, timestamp, flags)) < 0 ||
matroska_reset_status(matroska, 0, st->internal->index_entries[st->internal->nb_index_entries - 1].pos); index == sti->nb_index_entries - 1) {
while ((index = av_index_search_timestamp(st, timestamp, flags)) < 0 || index == st->internal->nb_index_entries - 1) { matroska_reset_status(matroska, 0, sti->index_entries[sti->nb_index_entries - 1].pos);
while ((index = av_index_search_timestamp(st, timestamp, flags)) < 0 ||
index == sti->nb_index_entries - 1) {
matroska_clear_queue(matroska); matroska_clear_queue(matroska);
if (matroska_parse_cluster(matroska) < 0) if (matroska_parse_cluster(matroska) < 0)
break; break;
@ -3836,7 +3841,8 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index,
} }
matroska_clear_queue(matroska); matroska_clear_queue(matroska);
if (index < 0 || (matroska->cues_parsing_deferred < 0 && index == st->internal->nb_index_entries - 1)) if (index < 0 || (matroska->cues_parsing_deferred < 0 &&
index == sti->nb_index_entries - 1))
goto err; goto err;
tracks = matroska->tracks.elem; tracks = matroska->tracks.elem;
@ -3848,17 +3854,17 @@ static int matroska_read_seek(AVFormatContext *s, int stream_index,
} }
/* We seek to a level 1 element, so set the appropriate status. */ /* We seek to a level 1 element, so set the appropriate status. */
matroska_reset_status(matroska, 0, st->internal->index_entries[index].pos); matroska_reset_status(matroska, 0, sti->index_entries[index].pos);
if (flags & AVSEEK_FLAG_ANY) { if (flags & AVSEEK_FLAG_ANY) {
st->internal->skip_to_keyframe = 0; sti->skip_to_keyframe = 0;
matroska->skip_to_timecode = timestamp; matroska->skip_to_timecode = timestamp;
} else { } else {
st->internal->skip_to_keyframe = 1; sti->skip_to_keyframe = 1;
matroska->skip_to_timecode = st->internal->index_entries[index].timestamp; matroska->skip_to_timecode = sti->index_entries[index].timestamp;
} }
matroska->skip_to_keyframe = 1; matroska->skip_to_keyframe = 1;
matroska->done = 0; matroska->done = 0;
avpriv_update_cur_dts(s, st, st->internal->index_entries[index].timestamp); avpriv_update_cur_dts(s, st, sti->index_entries[index].timestamp);
return 0; return 0;
err: err:
// slightly hackish but allows proper fallback to // slightly hackish but allows proper fallback to
@ -3866,7 +3872,7 @@ err:
matroska_reset_status(matroska, 0, -1); matroska_reset_status(matroska, 0, -1);
matroska->resync_pos = -1; matroska->resync_pos = -1;
matroska_clear_queue(matroska); matroska_clear_queue(matroska);
st->internal->skip_to_keyframe = sti->skip_to_keyframe =
matroska->skip_to_keyframe = 0; matroska->skip_to_keyframe = 0;
matroska->done = 0; matroska->done = 0;
return -1; return -1;
@ -3902,10 +3908,12 @@ typedef struct {
*/ */
static CueDesc get_cue_desc(AVFormatContext *s, int64_t ts, int64_t cues_start) { static CueDesc get_cue_desc(AVFormatContext *s, int64_t ts, int64_t cues_start) {
MatroskaDemuxContext *matroska = s->priv_data; MatroskaDemuxContext *matroska = s->priv_data;
FFStream *const sti = ffstream(s->streams[0]);
AVIndexEntry *const index_entries = sti->index_entries;
int nb_index_entries = sti->nb_index_entries;
CueDesc cue_desc; CueDesc cue_desc;
int i; int i;
int nb_index_entries = s->streams[0]->internal->nb_index_entries;
AVIndexEntry *index_entries = s->streams[0]->internal->index_entries;
if (ts >= matroska->duration * matroska->time_scale) return (CueDesc) {-1, -1, -1, -1}; if (ts >= matroska->duration * matroska->time_scale) return (CueDesc) {-1, -1, -1, -1};
for (i = 1; i < nb_index_entries; i++) { for (i = 1; i < nb_index_entries; i++) {
if (index_entries[i - 1].timestamp * matroska->time_scale <= ts && if (index_entries[i - 1].timestamp * matroska->time_scale <= ts &&
@ -3932,14 +3940,20 @@ static CueDesc get_cue_desc(AVFormatContext *s, int64_t ts, int64_t cues_start)
static int webm_clusters_start_with_keyframe(AVFormatContext *s) static int webm_clusters_start_with_keyframe(AVFormatContext *s)
{ {
MatroskaDemuxContext *matroska = s->priv_data; MatroskaDemuxContext *matroska = s->priv_data;
AVStream *const st = s->streams[0];
FFStream *const sti = ffstream(st);
uint32_t id = matroska->current_id; uint32_t id = matroska->current_id;
int64_t cluster_pos, before_pos; int64_t cluster_pos, before_pos;
int index, rv = 1; int index, rv = 1;
if (s->streams[0]->internal->nb_index_entries <= 0) return 0;
if (sti->nb_index_entries <= 0)
return 0;
// seek to the first cluster using cues. // seek to the first cluster using cues.
index = av_index_search_timestamp(s->streams[0], 0, 0); index = av_index_search_timestamp(st, 0, 0);
if (index < 0) return 0; if (index < 0)
cluster_pos = s->streams[0]->internal->index_entries[index].pos; return 0;
cluster_pos = sti->index_entries[index].pos;
before_pos = avio_tell(s->pb); before_pos = avio_tell(s->pb);
while (1) { while (1) {
uint64_t cluster_id, cluster_length; uint64_t cluster_id, cluster_length;
@ -4060,12 +4074,12 @@ static int64_t webm_dash_manifest_compute_bandwidth(AVFormatContext *s, int64_t
{ {
MatroskaDemuxContext *matroska = s->priv_data; MatroskaDemuxContext *matroska = s->priv_data;
AVStream *st = s->streams[0]; AVStream *st = s->streams[0];
FFStream *const sti = ffstream(st);
double bandwidth = 0.0; double bandwidth = 0.0;
int i;
for (i = 0; i < st->internal->nb_index_entries; i++) { for (int i = 0; i < sti->nb_index_entries; i++) {
int64_t prebuffer_ns = 1000000000; int64_t prebuffer_ns = 1000000000;
int64_t time_ns = st->internal->index_entries[i].timestamp * matroska->time_scale; int64_t time_ns = sti->index_entries[i].timestamp * matroska->time_scale;
double nano_seconds_per_second = 1000000000.0; double nano_seconds_per_second = 1000000000.0;
int64_t prebuffered_ns = time_ns + prebuffer_ns; int64_t prebuffered_ns = time_ns + prebuffer_ns;
double prebuffer_bytes = 0.0; double prebuffer_bytes = 0.0;
@ -4149,6 +4163,7 @@ static int webm_dash_manifest_cues(AVFormatContext *s, int64_t init_range)
EbmlList *seekhead_list = &matroska->seekhead; EbmlList *seekhead_list = &matroska->seekhead;
MatroskaSeekhead *seekhead = seekhead_list->elem; MatroskaSeekhead *seekhead = seekhead_list->elem;
AVStream *const st = s->streams[0]; AVStream *const st = s->streams[0];
FFStream *const sti = ffstream(st);
AVBPrint bprint; AVBPrint bprint;
char *buf; char *buf;
int64_t cues_start = -1, cues_end = -1, before_pos, bandwidth; int64_t cues_start = -1, cues_end = -1, before_pos, bandwidth;
@ -4184,7 +4199,7 @@ static int webm_dash_manifest_cues(AVFormatContext *s, int64_t init_range)
// parse the cues // parse the cues
matroska_parse_cues(matroska); matroska_parse_cues(matroska);
if (!st->internal->nb_index_entries) if (!sti->nb_index_entries)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
// cues start // cues start
@ -4209,8 +4224,8 @@ static int webm_dash_manifest_cues(AVFormatContext *s, int64_t init_range)
// Store cue point timestamps as a comma separated list // Store cue point timestamps as a comma separated list
// for checking subsegment alignment in the muxer. // for checking subsegment alignment in the muxer.
av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED); av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED);
for (int i = 0; i < st->internal->nb_index_entries; i++) for (int i = 0; i < sti->nb_index_entries; i++)
av_bprintf(&bprint, "%" PRId64",", st->internal->index_entries[i].timestamp); av_bprintf(&bprint, "%" PRId64",", sti->index_entries[i].timestamp);
if (!av_bprint_is_complete(&bprint)) { if (!av_bprint_is_complete(&bprint)) {
av_bprint_finalize(&bprint, NULL); av_bprint_finalize(&bprint, NULL);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);

@ -50,7 +50,7 @@ static int read_header(AVFormatContext *s)
if (!st) if (!st)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; ffstream(st)->need_parsing = AVSTREAM_PARSE_HEADERS;
st->start_time = 0; st->start_time = 0;
st->nb_frames = st->nb_frames =
st->duration = avio_rb32(pb); st->duration = avio_rb32(pb);

@ -117,6 +117,7 @@ static void read_uint64(AVFormatContext *avctx, AVIOContext *pb, const char *tag
static int scan_file(AVFormatContext *avctx, AVStream *vst, AVStream *ast, int file) static int scan_file(AVFormatContext *avctx, AVStream *vst, AVStream *ast, int file)
{ {
FFStream *const vsti = ffstream(vst), *const asti = ffstream(ast);
MlvContext *mlv = avctx->priv_data; MlvContext *mlv = avctx->priv_data;
AVIOContext *pb = mlv->pb[file]; AVIOContext *pb = mlv->pb[file];
int ret; int ret;
@ -189,12 +190,14 @@ static int scan_file(AVFormatContext *avctx, AVStream *vst, AVStream *ast, int f
} }
} else if (vst && type == MKTAG('V', 'I', 'D', 'F') && size >= 4) { } else if (vst && type == MKTAG('V', 'I', 'D', 'F') && size >= 4) {
uint64_t pts = avio_rl32(pb); uint64_t pts = avio_rl32(pb);
ff_add_index_entry(&vst->internal->index_entries, &vst->internal->nb_index_entries, &vst->internal->index_entries_allocated_size, ff_add_index_entry(&vsti->index_entries, &vsti->nb_index_entries,
&vsti->index_entries_allocated_size,
avio_tell(pb) - 20, pts, file, 0, AVINDEX_KEYFRAME); avio_tell(pb) - 20, pts, file, 0, AVINDEX_KEYFRAME);
size -= 4; size -= 4;
} else if (ast && type == MKTAG('A', 'U', 'D', 'F') && size >= 4) { } else if (ast && type == MKTAG('A', 'U', 'D', 'F') && size >= 4) {
uint64_t pts = avio_rl32(pb); uint64_t pts = avio_rl32(pb);
ff_add_index_entry(&ast->internal->index_entries, &ast->internal->nb_index_entries, &ast->internal->index_entries_allocated_size, ff_add_index_entry(&asti->index_entries, &asti->nb_index_entries,
&asti->index_entries_allocated_size,
avio_tell(pb) - 20, pts, file, 0, AVINDEX_KEYFRAME); avio_tell(pb) - 20, pts, file, 0, AVINDEX_KEYFRAME);
size -= 4; size -= 4;
} else if (vst && type == MKTAG('W','B','A','L') && size >= 28) { } else if (vst && type == MKTAG('W','B','A','L') && size >= 28) {
@ -257,6 +260,7 @@ static int read_header(AVFormatContext *avctx)
MlvContext *mlv = avctx->priv_data; MlvContext *mlv = avctx->priv_data;
AVIOContext *pb = avctx->pb; AVIOContext *pb = avctx->pb;
AVStream *vst = NULL, *ast = NULL; AVStream *vst = NULL, *ast = NULL;
FFStream *vsti = NULL, *asti = NULL;
int size, ret; int size, ret;
unsigned nb_video_frames, nb_audio_frames; unsigned nb_video_frames, nb_audio_frames;
uint64_t guid; uint64_t guid;
@ -285,6 +289,8 @@ static int read_header(AVFormatContext *avctx)
vst = avformat_new_stream(avctx, NULL); vst = avformat_new_stream(avctx, NULL);
if (!vst) if (!vst)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
vsti = ffstream(vst);
vst->id = 0; vst->id = 0;
vst->nb_frames = nb_video_frames; vst->nb_frames = nb_video_frames;
if ((mlv->class[0] & (MLV_CLASS_FLAG_DELTA|MLV_CLASS_FLAG_LZMA))) if ((mlv->class[0] & (MLV_CLASS_FLAG_DELTA|MLV_CLASS_FLAG_LZMA)))
@ -316,6 +322,7 @@ static int read_header(AVFormatContext *avctx)
ast = avformat_new_stream(avctx, NULL); ast = avformat_new_stream(avctx, NULL);
if (!ast) if (!ast)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
asti = ffstream(ast);
ast->id = 1; ast->id = 1;
ast->nb_frames = nb_audio_frames; ast->nb_frames = nb_audio_frames;
if ((mlv->class[1] & MLV_CLASS_FLAG_LZMA)) if ((mlv->class[1] & MLV_CLASS_FLAG_LZMA))
@ -372,21 +379,21 @@ static int read_header(AVFormatContext *avctx)
} }
if (vst) if (vst)
vst->duration = vst->internal->nb_index_entries; vst->duration = vsti->nb_index_entries;
if (ast) if (ast)
ast->duration = ast->internal->nb_index_entries; ast->duration = asti->nb_index_entries;
if ((vst && !vst->internal->nb_index_entries) || (ast && !ast->internal->nb_index_entries)) { if ((vst && !vsti->nb_index_entries) || (ast && !asti->nb_index_entries)) {
av_log(avctx, AV_LOG_ERROR, "no index entries found\n"); av_log(avctx, AV_LOG_ERROR, "no index entries found\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
if (vst && ast) if (vst && ast)
avio_seek(pb, FFMIN(vst->internal->index_entries[0].pos, ast->internal->index_entries[0].pos), SEEK_SET); avio_seek(pb, FFMIN(vsti->index_entries[0].pos, asti->index_entries[0].pos), SEEK_SET);
else if (vst) else if (vst)
avio_seek(pb, vst->internal->index_entries[0].pos, SEEK_SET); avio_seek(pb, vsti->index_entries[0].pos, SEEK_SET);
else if (ast) else if (ast)
avio_seek(pb, ast->internal->index_entries[0].pos, SEEK_SET); avio_seek(pb, asti->index_entries[0].pos, SEEK_SET);
return 0; return 0;
} }
@ -396,6 +403,7 @@ static int read_packet(AVFormatContext *avctx, AVPacket *pkt)
MlvContext *mlv = avctx->priv_data; MlvContext *mlv = avctx->priv_data;
AVIOContext *pb; AVIOContext *pb;
AVStream *st; AVStream *st;
FFStream *sti;
int index, ret; int index, ret;
unsigned int size, space; unsigned int size, space;
@ -403,6 +411,7 @@ static int read_packet(AVFormatContext *avctx, AVPacket *pkt)
return AVERROR_EOF; return AVERROR_EOF;
st = avctx->streams[mlv->stream_index]; st = avctx->streams[mlv->stream_index];
sti = ffstream(st);
if (mlv->pts >= st->duration) if (mlv->pts >= st->duration)
return AVERROR_EOF; return AVERROR_EOF;
@ -412,12 +421,12 @@ static int read_packet(AVFormatContext *avctx, AVPacket *pkt)
return AVERROR(EIO); return AVERROR(EIO);
} }
pb = mlv->pb[st->internal->index_entries[index].size]; pb = mlv->pb[sti->index_entries[index].size];
if (!pb) { if (!pb) {
ret = FFERROR_REDO; ret = FFERROR_REDO;
goto next_packet; goto next_packet;
} }
avio_seek(pb, st->internal->index_entries[index].pos, SEEK_SET); avio_seek(pb, sti->index_entries[index].pos, SEEK_SET);
avio_skip(pb, 4); // blockType avio_skip(pb, 4); // blockType
size = avio_rl32(pb); size = avio_rl32(pb);

@ -1767,7 +1767,10 @@ static int mov_read_targa_y216(MOVContext *c, AVIOContext *pb, MOVAtom atom)
static int mov_read_ares(MOVContext *c, AVIOContext *pb, MOVAtom atom) static int mov_read_ares(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{ {
if (c->fc->nb_streams >= 1) { if (c->fc->nb_streams >= 1) {
AVCodecParameters *par = c->fc->streams[c->fc->nb_streams-1]->codecpar; AVStream *const st = c->fc->streams[c->fc->nb_streams - 1];
FFStream *const sti = ffstream(st);
AVCodecParameters *par = st->codecpar;
if (par->codec_tag == MKTAG('A', 'V', 'i', 'n') && if (par->codec_tag == MKTAG('A', 'V', 'i', 'n') &&
par->codec_id == AV_CODEC_ID_H264 && par->codec_id == AV_CODEC_ID_H264 &&
atom.size > 11) { atom.size > 11) {
@ -1794,8 +1797,7 @@ static int mov_read_ares(MOVContext *c, AVIOContext *pb, MOVAtom atom)
return 0; return 0;
den *= 2; den *= 2;
case 1: case 1:
c->fc->streams[c->fc->nb_streams-1]->internal->display_aspect_ratio.num = num; sti->display_aspect_ratio = (AVRational){ num, den };
c->fc->streams[c->fc->nb_streams-1]->internal->display_aspect_ratio.den = den;
default: default:
return 0; return 0;
} }
@ -2198,7 +2200,7 @@ static void mov_parse_stsd_audio(MOVContext *c, AVIOContext *pb,
switch (st->codecpar->codec_id) { switch (st->codecpar->codec_id) {
case AV_CODEC_ID_MP2: case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3: case AV_CODEC_ID_MP3:
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
break; break;
} }
} }
@ -2368,6 +2370,8 @@ static int mov_parse_stsd_data(MOVContext *c, AVIOContext *pb,
static int mov_finalize_stsd_codec(MOVContext *c, AVIOContext *pb, static int mov_finalize_stsd_codec(MOVContext *c, AVIOContext *pb,
AVStream *st, MOVStreamContext *sc) AVStream *st, MOVStreamContext *sc)
{ {
FFStream *const sti = ffstream(st);
if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
!st->codecpar->sample_rate && sc->time_scale > 1) !st->codecpar->sample_rate && sc->time_scale > 1)
st->codecpar->sample_rate = sc->time_scale; st->codecpar->sample_rate = sc->time_scale;
@ -2436,10 +2440,10 @@ static int mov_finalize_stsd_codec(MOVContext *c, AVIOContext *pb,
case AV_CODEC_ID_VC1: case AV_CODEC_ID_VC1:
case AV_CODEC_ID_VP8: case AV_CODEC_ID_VP8:
case AV_CODEC_ID_VP9: case AV_CODEC_ID_VP9:
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
break; break;
case AV_CODEC_ID_AV1: case AV_CODEC_ID_AV1:
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; sti->need_parsing = AVSTREAM_PARSE_HEADERS;
break; break;
default: default:
break; break;
@ -2768,12 +2772,14 @@ static int mov_read_stps(MOVContext *c, AVIOContext *pb, MOVAtom atom)
static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom) static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{ {
AVStream *st; AVStream *st;
FFStream *sti;
MOVStreamContext *sc; MOVStreamContext *sc;
unsigned int i, entries; unsigned int i, entries;
if (c->fc->nb_streams < 1) if (c->fc->nb_streams < 1)
return 0; return 0;
st = c->fc->streams[c->fc->nb_streams-1]; st = c->fc->streams[c->fc->nb_streams-1];
sti = ffstream(st);
sc = st->priv_data; sc = st->priv_data;
avio_r8(pb); /* version */ avio_r8(pb); /* version */
@ -2785,8 +2791,8 @@ static int mov_read_stss(MOVContext *c, AVIOContext *pb, MOVAtom atom)
if (!entries) { if (!entries) {
sc->keyframe_absent = 1; sc->keyframe_absent = 1;
if (!st->internal->need_parsing && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) if (!sti->need_parsing && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; sti->need_parsing = AVSTREAM_PARSE_HEADERS;
return 0; return 0;
} }
if (sc->keyframes) if (sc->keyframes)
@ -3191,8 +3197,9 @@ static int find_prev_closest_index(AVStream *st,
int64_t* ctts_sample) int64_t* ctts_sample)
{ {
MOVStreamContext *msc = st->priv_data; MOVStreamContext *msc = st->priv_data;
AVIndexEntry *e_keep = st->internal->index_entries; FFStream *const sti = ffstream(st);
int nb_keep = st->internal->nb_index_entries; AVIndexEntry *e_keep = sti->index_entries;
int nb_keep = sti->nb_index_entries;
int64_t i = 0; int64_t i = 0;
int64_t index_ctts_count; int64_t index_ctts_count;
@ -3205,8 +3212,8 @@ static int find_prev_closest_index(AVStream *st,
timestamp_pts -= msc->dts_shift; timestamp_pts -= msc->dts_shift;
} }
st->internal->index_entries = e_old; sti->index_entries = e_old;
st->internal->nb_index_entries = nb_old; sti->nb_index_entries = nb_old;
*index = av_index_search_timestamp(st, timestamp_pts, flag | AVSEEK_FLAG_BACKWARD); *index = av_index_search_timestamp(st, timestamp_pts, flag | AVSEEK_FLAG_BACKWARD);
// Keep going backwards in the index entries until the timestamp is the same. // Keep going backwards in the index entries until the timestamp is the same.
@ -3259,14 +3266,14 @@ static int find_prev_closest_index(AVStream *st,
} }
/* restore AVStream state*/ /* restore AVStream state*/
st->internal->index_entries = e_keep; sti->index_entries = e_keep;
st->internal->nb_index_entries = nb_keep; sti->nb_index_entries = nb_keep;
return *index >= 0 ? 0 : -1; return *index >= 0 ? 0 : -1;
} }
/** /**
* Add index entry with the given values, to the end of st->internal->index_entries. * Add index entry with the given values, to the end of ffstream(st)->index_entries.
* Returns the new size st->internal->index_entries if successful, else returns -1. * Returns the new size ffstream(st)->index_entries if successful, else returns -1.
* *
* This function is similar to ff_add_index_entry in libavformat/utils.c * This function is similar to ff_add_index_entry in libavformat/utils.c
* except that here we are always unconditionally adding an index entry to * except that here we are always unconditionally adding an index entry to
@ -3278,29 +3285,30 @@ static int find_prev_closest_index(AVStream *st,
static int64_t add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, static int64_t add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
int size, int distance, int flags) int size, int distance, int flags)
{ {
FFStream *const sti = ffstream(st);
AVIndexEntry *entries, *ie; AVIndexEntry *entries, *ie;
int64_t index = -1; int64_t index = -1;
const size_t min_size_needed = (st->internal->nb_index_entries + 1) * sizeof(AVIndexEntry); const size_t min_size_needed = (sti->nb_index_entries + 1) * sizeof(AVIndexEntry);
// Double the allocation each time, to lower memory fragmentation. // Double the allocation each time, to lower memory fragmentation.
// Another difference from ff_add_index_entry function. // Another difference from ff_add_index_entry function.
const size_t requested_size = const size_t requested_size =
min_size_needed > st->internal->index_entries_allocated_size ? min_size_needed > sti->index_entries_allocated_size ?
FFMAX(min_size_needed, 2 * st->internal->index_entries_allocated_size) : FFMAX(min_size_needed, 2 * sti->index_entries_allocated_size) :
min_size_needed; min_size_needed;
if (st->internal->nb_index_entries + 1U >= UINT_MAX / sizeof(AVIndexEntry)) if (sti->nb_index_entries + 1U >= UINT_MAX / sizeof(AVIndexEntry))
return -1; return -1;
entries = av_fast_realloc(st->internal->index_entries, entries = av_fast_realloc(sti->index_entries,
&st->internal->index_entries_allocated_size, &sti->index_entries_allocated_size,
requested_size); requested_size);
if (!entries) if (!entries)
return -1; return -1;
st->internal->index_entries= entries; sti->index_entries = entries;
index= st->internal->nb_index_entries++; index = sti->nb_index_entries++;
ie= &entries[index]; ie= &entries[index];
ie->pos = pos; ie->pos = pos;
@ -3318,11 +3326,12 @@ static int64_t add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
static void fix_index_entry_timestamps(AVStream* st, int end_index, int64_t end_ts, static void fix_index_entry_timestamps(AVStream* st, int end_index, int64_t end_ts,
int64_t* frame_duration_buffer, int64_t* frame_duration_buffer,
int frame_duration_buffer_size) { int frame_duration_buffer_size) {
FFStream *const sti = ffstream(st);
int i = 0; int i = 0;
av_assert0(end_index >= 0 && end_index <= st->internal->nb_index_entries); av_assert0(end_index >= 0 && end_index <= sti->nb_index_entries);
for (i = 0; i < frame_duration_buffer_size; i++) { for (i = 0; i < frame_duration_buffer_size; i++) {
end_ts -= frame_duration_buffer[frame_duration_buffer_size - 1 - i]; end_ts -= frame_duration_buffer[frame_duration_buffer_size - 1 - i];
st->internal->index_entries[end_index - 1 - i].timestamp = end_ts; sti->index_entries[end_index - 1 - i].timestamp = end_ts;
} }
} }
@ -3361,7 +3370,7 @@ static int64_t add_ctts_entry(MOVStts** ctts_data, unsigned int* ctts_count, uns
static void mov_estimate_video_delay(MOVContext *c, AVStream* st) static void mov_estimate_video_delay(MOVContext *c, AVStream* st)
{ {
MOVStreamContext *msc = st->priv_data; MOVStreamContext *msc = st->priv_data;
int ind; FFStream *const sti = ffstream(st);
int ctts_ind = 0; int ctts_ind = 0;
int ctts_sample = 0; int ctts_sample = 0;
int64_t pts_buf[MAX_REORDER_DELAY + 1]; // Circular buffer to sort pts. int64_t pts_buf[MAX_REORDER_DELAY + 1]; // Circular buffer to sort pts.
@ -3374,14 +3383,14 @@ static void mov_estimate_video_delay(MOVContext *c, AVStream* st)
if (st->codecpar->video_delay <= 0 && msc->ctts_data && if (st->codecpar->video_delay <= 0 && msc->ctts_data &&
st->codecpar->codec_id == AV_CODEC_ID_H264) { st->codecpar->codec_id == AV_CODEC_ID_H264) {
st->codecpar->video_delay = 0; st->codecpar->video_delay = 0;
for (ind = 0; ind < st->internal->nb_index_entries && ctts_ind < msc->ctts_count; ++ind) { for (int ind = 0; ind < sti->nb_index_entries && ctts_ind < msc->ctts_count; ++ind) {
// Point j to the last elem of the buffer and insert the current pts there. // Point j to the last elem of the buffer and insert the current pts there.
j = buf_start; j = buf_start;
buf_start = (buf_start + 1); buf_start = (buf_start + 1);
if (buf_start == MAX_REORDER_DELAY + 1) if (buf_start == MAX_REORDER_DELAY + 1)
buf_start = 0; buf_start = 0;
pts_buf[j] = st->internal->index_entries[ind].timestamp + msc->ctts_data[ctts_ind].duration; pts_buf[j] = sti->index_entries[ind].timestamp + msc->ctts_data[ctts_ind].duration;
// The timestamps that are already in the sorted buffer, and are greater than the // The timestamps that are already in the sorted buffer, and are greater than the
// current pts, are exactly the timestamps that need to be buffered to output PTS // current pts, are exactly the timestamps that need to be buffered to output PTS
@ -3461,7 +3470,7 @@ static void mov_current_sample_set(MOVStreamContext *sc, int current_sample)
} }
/** /**
* Fix st->internal->index_entries, so that it contains only the entries (and the entries * Fix ffstream(st)->index_entries, so that it contains only the entries (and the entries
* which are needed to decode them) that fall in the edit list time ranges. * which are needed to decode them) that fall in the edit list time ranges.
* Also fixes the timestamps of the index entries to match the timeline * Also fixes the timestamps of the index entries to match the timeline
* specified the edit lists. * specified the edit lists.
@ -3469,8 +3478,9 @@ static void mov_current_sample_set(MOVStreamContext *sc, int current_sample)
static void mov_fix_index(MOVContext *mov, AVStream *st) static void mov_fix_index(MOVContext *mov, AVStream *st)
{ {
MOVStreamContext *msc = st->priv_data; MOVStreamContext *msc = st->priv_data;
AVIndexEntry *e_old = st->internal->index_entries; FFStream *const sti = ffstream(st);
int nb_old = st->internal->nb_index_entries; AVIndexEntry *e_old = sti->index_entries;
int nb_old = sti->nb_index_entries;
const AVIndexEntry *e_old_end = e_old + nb_old; const AVIndexEntry *e_old_end = e_old + nb_old;
const AVIndexEntry *current = NULL; const AVIndexEntry *current = NULL;
MOVStts *ctts_data_old = msc->ctts_data; MOVStts *ctts_data_old = msc->ctts_data;
@ -3497,7 +3507,6 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
int first_non_zero_audio_edit = -1; int first_non_zero_audio_edit = -1;
int packet_skip_samples = 0; int packet_skip_samples = 0;
MOVIndexRange *current_index_range; MOVIndexRange *current_index_range;
int i;
int found_keyframe_after_edit = 0; int found_keyframe_after_edit = 0;
int found_non_empty_edit = 0; int found_non_empty_edit = 0;
@ -3515,9 +3524,9 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
current_index_range = msc->index_ranges - 1; current_index_range = msc->index_ranges - 1;
// Clean AVStream from traces of old index // Clean AVStream from traces of old index
st->internal->index_entries = NULL; sti->index_entries = NULL;
st->internal->index_entries_allocated_size = 0; sti->index_entries_allocated_size = 0;
st->internal->nb_index_entries = 0; sti->nb_index_entries = 0;
// Clean ctts fields of MOVStreamContext // Clean ctts fields of MOVStreamContext
msc->ctts_data = NULL; msc->ctts_data = NULL;
@ -3562,7 +3571,7 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
} }
if (first_non_zero_audio_edit > 0) if (first_non_zero_audio_edit > 0)
st->internal->skip_samples = msc->start_pad = 0; sti->skip_samples = msc->start_pad = 0;
} }
// While reordering frame index according to edit list we must handle properly // While reordering frame index according to edit list we must handle properly
@ -3637,7 +3646,7 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
curr_cts < edit_list_media_time && curr_cts + frame_duration > edit_list_media_time && curr_cts < edit_list_media_time && curr_cts + frame_duration > edit_list_media_time &&
first_non_zero_audio_edit > 0) { first_non_zero_audio_edit > 0) {
packet_skip_samples = edit_list_media_time - curr_cts; packet_skip_samples = edit_list_media_time - curr_cts;
st->internal->skip_samples += packet_skip_samples; sti->skip_samples += packet_skip_samples;
// Shift the index entry timestamp by packet_skip_samples to be correct. // Shift the index entry timestamp by packet_skip_samples to be correct.
edit_list_dts_counter -= packet_skip_samples; edit_list_dts_counter -= packet_skip_samples;
@ -3646,7 +3655,7 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
// Make timestamps strictly monotonically increasing for audio, by rewriting timestamps for // Make timestamps strictly monotonically increasing for audio, by rewriting timestamps for
// discarded packets. // discarded packets.
if (frame_duration_buffer) { if (frame_duration_buffer) {
fix_index_entry_timestamps(st, st->internal->nb_index_entries, edit_list_dts_counter, fix_index_entry_timestamps(st, sti->nb_index_entries, edit_list_dts_counter,
frame_duration_buffer, num_discarded_begin); frame_duration_buffer, num_discarded_begin);
av_freep(&frame_duration_buffer); av_freep(&frame_duration_buffer);
} }
@ -3670,7 +3679,7 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
// Increment skip_samples for the first non-zero audio edit list // Increment skip_samples for the first non-zero audio edit list
if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
first_non_zero_audio_edit > 0 && st->codecpar->codec_id != AV_CODEC_ID_VORBIS) { first_non_zero_audio_edit > 0 && st->codecpar->codec_id != AV_CODEC_ID_VORBIS) {
st->internal->skip_samples += frame_duration; sti->skip_samples += frame_duration;
} }
} }
} }
@ -3685,7 +3694,7 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
// Make timestamps strictly monotonically increasing by rewriting timestamps for // Make timestamps strictly monotonically increasing by rewriting timestamps for
// discarded packets. // discarded packets.
if (frame_duration_buffer) { if (frame_duration_buffer) {
fix_index_entry_timestamps(st, st->internal->nb_index_entries, edit_list_dts_counter, fix_index_entry_timestamps(st, sti->nb_index_entries, edit_list_dts_counter,
frame_duration_buffer, num_discarded_begin); frame_duration_buffer, num_discarded_begin);
av_freep(&frame_duration_buffer); av_freep(&frame_duration_buffer);
} }
@ -3746,9 +3755,8 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
if (msc->min_corrected_pts > 0) { if (msc->min_corrected_pts > 0) {
av_log(mov->fc, AV_LOG_DEBUG, "Offset DTS by %"PRId64" to make first pts zero.\n", msc->min_corrected_pts); av_log(mov->fc, AV_LOG_DEBUG, "Offset DTS by %"PRId64" to make first pts zero.\n", msc->min_corrected_pts);
for (i = 0; i < st->internal->nb_index_entries; ++i) { for (int i = 0; i < sti->nb_index_entries; ++i)
st->internal->index_entries[i].timestamp -= msc->min_corrected_pts; sti->index_entries[i].timestamp -= msc->min_corrected_pts;
}
} }
} }
// Start time should be equal to zero or the duration of any empty edits. // Start time should be equal to zero or the duration of any empty edits.
@ -3756,7 +3764,7 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
// Update av stream length, if it ends up shorter than the track's media duration // Update av stream length, if it ends up shorter than the track's media duration
st->duration = FFMIN(st->duration, edit_list_dts_entry_end - start_dts); st->duration = FFMIN(st->duration, edit_list_dts_entry_end - start_dts);
msc->start_pad = st->internal->skip_samples; msc->start_pad = sti->skip_samples;
// Free the old index and the old CTTS structures // Free the old index and the old CTTS structures
av_free(e_old); av_free(e_old);
@ -3773,6 +3781,7 @@ static void mov_fix_index(MOVContext *mov, AVStream *st)
static void mov_build_index(MOVContext *mov, AVStream *st) static void mov_build_index(MOVContext *mov, AVStream *st)
{ {
MOVStreamContext *sc = st->priv_data; MOVStreamContext *sc = st->priv_data;
FFStream *const sti = ffstream(st);
int64_t current_offset; int64_t current_offset;
int64_t current_dts = 0; int64_t current_dts = 0;
unsigned int stts_index = 0; unsigned int stts_index = 0;
@ -3844,17 +3853,17 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
current_dts -= sc->dts_shift; current_dts -= sc->dts_shift;
last_dts = current_dts; last_dts = current_dts;
if (!sc->sample_count || st->internal->nb_index_entries) if (!sc->sample_count || sti->nb_index_entries)
return; return;
if (sc->sample_count >= UINT_MAX / sizeof(*st->internal->index_entries) - st->internal->nb_index_entries) if (sc->sample_count >= UINT_MAX / sizeof(*sti->index_entries) - sti->nb_index_entries)
return; return;
if (av_reallocp_array(&st->internal->index_entries, if (av_reallocp_array(&sti->index_entries,
st->internal->nb_index_entries + sc->sample_count, sti->nb_index_entries + sc->sample_count,
sizeof(*st->internal->index_entries)) < 0) { sizeof(*sti->index_entries)) < 0) {
st->internal->nb_index_entries = 0; sti->nb_index_entries = 0;
return; return;
} }
st->internal->index_entries_allocated_size = (st->internal->nb_index_entries + sc->sample_count) * sizeof(*st->internal->index_entries); sti->index_entries_allocated_size = (sti->nb_index_entries + sc->sample_count) * sizeof(*sti->index_entries);
if (ctts_data_old) { if (ctts_data_old) {
// Expand ctts entries such that we have a 1-1 mapping with samples // Expand ctts entries such that we have a 1-1 mapping with samples
@ -3937,7 +3946,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
av_log(mov->fc, AV_LOG_ERROR, "Sample size %u is too large\n", sample_size); av_log(mov->fc, AV_LOG_ERROR, "Sample size %u is too large\n", sample_size);
return; return;
} }
e = &st->internal->index_entries[st->internal->nb_index_entries++]; e = &sti->index_entries[sti->nb_index_entries++];
e->pos = current_offset; e->pos = current_offset;
e->timestamp = current_dts; e->timestamp = current_dts;
e->size = sample_size; e->size = sample_size;
@ -3946,7 +3955,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
av_log(mov->fc, AV_LOG_TRACE, "AVIndex stream %d, sample %u, offset %"PRIx64", dts %"PRId64", " av_log(mov->fc, AV_LOG_TRACE, "AVIndex stream %d, sample %u, offset %"PRIx64", dts %"PRId64", "
"size %u, distance %u, keyframe %d\n", st->index, current_sample, "size %u, distance %u, keyframe %d\n", st->index, current_sample,
current_offset, current_dts, sample_size, distance, keyframe); current_offset, current_dts, sample_size, distance, keyframe);
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && st->internal->nb_index_entries < 100) if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && sti->nb_index_entries < 100)
ff_rfps_add_frame(mov->fc, st, current_dts); ff_rfps_add_frame(mov->fc, st, current_dts);
} }
@ -4018,15 +4027,15 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
} }
av_log(mov->fc, AV_LOG_TRACE, "chunk count %u\n", total); av_log(mov->fc, AV_LOG_TRACE, "chunk count %u\n", total);
if (total >= UINT_MAX / sizeof(*st->internal->index_entries) - st->internal->nb_index_entries) if (total >= UINT_MAX / sizeof(*sti->index_entries) - sti->nb_index_entries)
return; return;
if (av_reallocp_array(&st->internal->index_entries, if (av_reallocp_array(&sti->index_entries,
st->internal->nb_index_entries + total, sti->nb_index_entries + total,
sizeof(*st->internal->index_entries)) < 0) { sizeof(*sti->index_entries)) < 0) {
st->internal->nb_index_entries = 0; sti->nb_index_entries = 0;
return; return;
} }
st->internal->index_entries_allocated_size = (st->internal->nb_index_entries + total) * sizeof(*st->internal->index_entries); sti->index_entries_allocated_size = (sti->nb_index_entries + total) * sizeof(*sti->index_entries);
// populate index // populate index
for (i = 0; i < sc->chunk_count; i++) { for (i = 0; i < sc->chunk_count; i++) {
@ -4061,7 +4070,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
} }
} }
if (st->internal->nb_index_entries >= total) { if (sti->nb_index_entries >= total) {
av_log(mov->fc, AV_LOG_ERROR, "wrong chunk count %u\n", total); av_log(mov->fc, AV_LOG_ERROR, "wrong chunk count %u\n", total);
return; return;
} }
@ -4069,7 +4078,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
av_log(mov->fc, AV_LOG_ERROR, "Sample size %u is too large\n", size); av_log(mov->fc, AV_LOG_ERROR, "Sample size %u is too large\n", size);
return; return;
} }
e = &st->internal->index_entries[st->internal->nb_index_entries++]; e = &sti->index_entries[sti->nb_index_entries++];
e->pos = current_offset; e->pos = current_offset;
e->timestamp = current_dts; e->timestamp = current_dts;
e->size = size; e->size = size;
@ -4092,8 +4101,8 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
} }
// Update start time of the stream. // Update start time of the stream.
if (st->start_time == AV_NOPTS_VALUE && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && st->internal->nb_index_entries > 0) { if (st->start_time == AV_NOPTS_VALUE && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && sti->nb_index_entries > 0) {
st->start_time = st->internal->index_entries[0].timestamp + sc->dts_shift; st->start_time = sti->index_entries[0].timestamp + sc->dts_shift;
if (sc->ctts_data) { if (sc->ctts_data) {
st->start_time += sc->ctts_data[0].duration; st->start_time += sc->ctts_data[0].duration;
} }
@ -4328,7 +4337,7 @@ static int mov_read_trak(MOVContext *c, AVIOContext *pb, MOVAtom atom)
&& sc->stts_count > 3 && sc->stts_count > 3
&& sc->stts_count*10 > st->nb_frames && sc->stts_count*10 > st->nb_frames
&& sc->time_scale == st->codecpar->sample_rate) { && sc->time_scale == st->codecpar->sample_rate) {
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
} }
/* Do not need those anymore. */ /* Do not need those anymore. */
av_freep(&sc->chunk_offsets); av_freep(&sc->chunk_offsets);
@ -4740,6 +4749,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{ {
MOVFragment *frag = &c->fragment; MOVFragment *frag = &c->fragment;
AVStream *st = NULL; AVStream *st = NULL;
FFStream *sti = NULL;
MOVStreamContext *sc; MOVStreamContext *sc;
MOVStts *ctts_data; MOVStts *ctts_data;
uint64_t offset; uint64_t offset;
@ -4762,6 +4772,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
for (i = 0; i < c->fc->nb_streams; i++) { for (i = 0; i < c->fc->nb_streams; i++) {
if (c->fc->streams[i]->id == frag->track_id) { if (c->fc->streams[i]->id == frag->track_id) {
st = c->fc->streams[i]; st = c->fc->streams[i];
sti = ffstream(st);
break; break;
} }
} }
@ -4779,7 +4790,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
// A valid index_entry means the trun for the fragment was read // A valid index_entry means the trun for the fragment was read
// and it's samples are in index_entries at the given position. // and it's samples are in index_entries at the given position.
// New index entries will be inserted before the index_entry found. // New index entries will be inserted before the index_entry found.
index_entry_pos = st->internal->nb_index_entries; index_entry_pos = sti->nb_index_entries;
for (i = c->frag_index.current + 1; i < c->frag_index.nb_items; i++) { for (i = c->frag_index.current + 1; i < c->frag_index.nb_items; i++) {
frag_stream_info = get_frag_stream_info(&c->frag_index, i, frag->track_id); frag_stream_info = get_frag_stream_info(&c->frag_index, i, frag->track_id);
if (frag_stream_info && frag_stream_info->index_entry >= 0) { if (frag_stream_info && frag_stream_info->index_entry >= 0) {
@ -4788,7 +4799,7 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
break; break;
} }
} }
av_assert0(index_entry_pos <= st->internal->nb_index_entries); av_assert0(index_entry_pos <= sti->nb_index_entries);
avio_r8(pb); /* version */ avio_r8(pb); /* version */
flags = avio_rb24(pb); flags = avio_rb24(pb);
@ -4839,22 +4850,22 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
av_log(c->fc, AV_LOG_TRACE, "first sample flags 0x%x\n", first_sample_flags); av_log(c->fc, AV_LOG_TRACE, "first sample flags 0x%x\n", first_sample_flags);
// realloc space for new index entries // realloc space for new index entries
if((uint64_t)st->internal->nb_index_entries + entries >= UINT_MAX / sizeof(AVIndexEntry)) { if ((uint64_t)sti->nb_index_entries + entries >= UINT_MAX / sizeof(AVIndexEntry)) {
entries = UINT_MAX / sizeof(AVIndexEntry) - st->internal->nb_index_entries; entries = UINT_MAX / sizeof(AVIndexEntry) - sti->nb_index_entries;
av_log(c->fc, AV_LOG_ERROR, "Failed to add index entry\n"); av_log(c->fc, AV_LOG_ERROR, "Failed to add index entry\n");
} }
if (entries == 0) if (entries == 0)
return 0; return 0;
requested_size = (st->internal->nb_index_entries + entries) * sizeof(AVIndexEntry); requested_size = (sti->nb_index_entries + entries) * sizeof(AVIndexEntry);
new_entries = av_fast_realloc(st->internal->index_entries, new_entries = av_fast_realloc(sti->index_entries,
&st->internal->index_entries_allocated_size, &sti->index_entries_allocated_size,
requested_size); requested_size);
if (!new_entries) if (!new_entries)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->internal->index_entries= new_entries; sti->index_entries= new_entries;
requested_size = (st->internal->nb_index_entries + entries) * sizeof(*sc->ctts_data); requested_size = (sti->nb_index_entries + entries) * sizeof(*sc->ctts_data);
old_ctts_allocated_size = sc->ctts_allocated_size; old_ctts_allocated_size = sc->ctts_allocated_size;
ctts_data = av_fast_realloc(sc->ctts_data, &sc->ctts_allocated_size, ctts_data = av_fast_realloc(sc->ctts_data, &sc->ctts_allocated_size,
requested_size); requested_size);
@ -4868,12 +4879,12 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
memset((uint8_t*)(sc->ctts_data) + old_ctts_allocated_size, 0, memset((uint8_t*)(sc->ctts_data) + old_ctts_allocated_size, 0,
sc->ctts_allocated_size - old_ctts_allocated_size); sc->ctts_allocated_size - old_ctts_allocated_size);
if (index_entry_pos < st->internal->nb_index_entries) { if (index_entry_pos < sti->nb_index_entries) {
// Make hole in index_entries and ctts_data for new samples // Make hole in index_entries and ctts_data for new samples
memmove(st->internal->index_entries + index_entry_pos + entries, memmove(sti->index_entries + index_entry_pos + entries,
st->internal->index_entries + index_entry_pos, sti->index_entries + index_entry_pos,
sizeof(*st->internal->index_entries) * sizeof(*sti->index_entries) *
(st->internal->nb_index_entries - index_entry_pos)); (sti->nb_index_entries - index_entry_pos));
memmove(sc->ctts_data + index_entry_pos + entries, memmove(sc->ctts_data + index_entry_pos + entries,
sc->ctts_data + index_entry_pos, sc->ctts_data + index_entry_pos,
sizeof(*sc->ctts_data) * (sc->ctts_count - index_entry_pos)); sizeof(*sc->ctts_data) * (sc->ctts_count - index_entry_pos));
@ -4882,15 +4893,15 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
} }
} }
st->internal->nb_index_entries += entries; sti->nb_index_entries += entries;
sc->ctts_count = st->internal->nb_index_entries; sc->ctts_count = sti->nb_index_entries;
// Record the index_entry position in frag_index of this fragment // Record the index_entry position in frag_index of this fragment
if (frag_stream_info) if (frag_stream_info)
frag_stream_info->index_entry = index_entry_pos; frag_stream_info->index_entry = index_entry_pos;
if (index_entry_pos > 0) if (index_entry_pos > 0)
prev_dts = st->internal->index_entries[index_entry_pos-1].timestamp; prev_dts = sti->index_entries[index_entry_pos-1].timestamp;
for (i = 0; i < entries && !pb->eof_reached; i++) { for (i = 0; i < entries && !pb->eof_reached; i++) {
unsigned sample_size = frag->size; unsigned sample_size = frag->size;
@ -4939,11 +4950,11 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
if (prev_dts >= dts) if (prev_dts >= dts)
index_entry_flags |= AVINDEX_DISCARD_FRAME; index_entry_flags |= AVINDEX_DISCARD_FRAME;
st->internal->index_entries[index_entry_pos].pos = offset; sti->index_entries[index_entry_pos].pos = offset;
st->internal->index_entries[index_entry_pos].timestamp = dts; sti->index_entries[index_entry_pos].timestamp = dts;
st->internal->index_entries[index_entry_pos].size= sample_size; sti->index_entries[index_entry_pos].size = sample_size;
st->internal->index_entries[index_entry_pos].min_distance= distance; sti->index_entries[index_entry_pos].min_distance = distance;
st->internal->index_entries[index_entry_pos].flags = index_entry_flags; sti->index_entries[index_entry_pos].flags = index_entry_flags;
sc->ctts_data[index_entry_pos].count = 1; sc->ctts_data[index_entry_pos].count = 1;
sc->ctts_data[index_entry_pos].duration = ctts_duration; sc->ctts_data[index_entry_pos].duration = ctts_duration;
@ -4972,16 +4983,16 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
// EOF found before reading all entries. Fix the hole this would // EOF found before reading all entries. Fix the hole this would
// leave in index_entries and ctts_data // leave in index_entries and ctts_data
int gap = entries - i; int gap = entries - i;
memmove(st->internal->index_entries + index_entry_pos, memmove(sti->index_entries + index_entry_pos,
st->internal->index_entries + index_entry_pos + gap, sti->index_entries + index_entry_pos + gap,
sizeof(*st->internal->index_entries) * sizeof(*sti->index_entries) *
(st->internal->nb_index_entries - (index_entry_pos + gap))); (sti->nb_index_entries - (index_entry_pos + gap)));
memmove(sc->ctts_data + index_entry_pos, memmove(sc->ctts_data + index_entry_pos,
sc->ctts_data + index_entry_pos + gap, sc->ctts_data + index_entry_pos + gap,
sizeof(*sc->ctts_data) * sizeof(*sc->ctts_data) *
(sc->ctts_count - (index_entry_pos + gap))); (sc->ctts_count - (index_entry_pos + gap)));
st->internal->nb_index_entries -= gap; sti->nb_index_entries -= gap;
sc->ctts_count -= gap; sc->ctts_count -= gap;
if (index_entry_pos < sc->current_sample) { if (index_entry_pos < sc->current_sample) {
sc->current_sample -= gap; sc->current_sample -= gap;
@ -4994,11 +5005,11 @@ static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom)
// fragment that overlap with AVINDEX_DISCARD_FRAME // fragment that overlap with AVINDEX_DISCARD_FRAME
prev_dts = AV_NOPTS_VALUE; prev_dts = AV_NOPTS_VALUE;
if (index_entry_pos > 0) if (index_entry_pos > 0)
prev_dts = st->internal->index_entries[index_entry_pos-1].timestamp; prev_dts = sti->index_entries[index_entry_pos-1].timestamp;
for (i = index_entry_pos; i < st->internal->nb_index_entries; i++) { for (int i = index_entry_pos; i < sti->nb_index_entries; i++) {
if (prev_dts < st->internal->index_entries[i].timestamp) if (prev_dts < sti->index_entries[i].timestamp)
break; break;
st->internal->index_entries[i].flags |= AVINDEX_DISCARD_FRAME; sti->index_entries[i].flags |= AVINDEX_DISCARD_FRAME;
} }
// If a hole was created to insert the new index_entries into, // If a hole was created to insert the new index_entries into,
@ -7156,15 +7167,15 @@ static int mov_probe(const AVProbeData *p)
static void mov_read_chapters(AVFormatContext *s) static void mov_read_chapters(AVFormatContext *s)
{ {
MOVContext *mov = s->priv_data; MOVContext *mov = s->priv_data;
AVStream *st;
MOVStreamContext *sc; MOVStreamContext *sc;
int64_t cur_pos; int64_t cur_pos;
int i, j; int i, j;
int chapter_track; int chapter_track;
for (j = 0; j < mov->nb_chapter_tracks; j++) { for (j = 0; j < mov->nb_chapter_tracks; j++) {
AVStream *st = NULL;
FFStream *sti = NULL;
chapter_track = mov->chapter_tracks[j]; chapter_track = mov->chapter_tracks[j];
st = NULL;
for (i = 0; i < s->nb_streams; i++) for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->id == chapter_track) { if (s->streams[i]->id == chapter_track) {
st = s->streams[i]; st = s->streams[i];
@ -7174,15 +7185,16 @@ static void mov_read_chapters(AVFormatContext *s)
av_log(s, AV_LOG_ERROR, "Referenced QT chapter track not found\n"); av_log(s, AV_LOG_ERROR, "Referenced QT chapter track not found\n");
continue; continue;
} }
sti = ffstream(st);
sc = st->priv_data; sc = st->priv_data;
cur_pos = avio_tell(sc->pb); cur_pos = avio_tell(sc->pb);
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
st->disposition |= AV_DISPOSITION_ATTACHED_PIC | AV_DISPOSITION_TIMED_THUMBNAILS; st->disposition |= AV_DISPOSITION_ATTACHED_PIC | AV_DISPOSITION_TIMED_THUMBNAILS;
if (st->internal->nb_index_entries) { if (sti->nb_index_entries) {
// Retrieve the first frame, if possible // Retrieve the first frame, if possible
AVIndexEntry *sample = &st->internal->index_entries[0]; AVIndexEntry *sample = &sti->index_entries[0];
if (avio_seek(sc->pb, sample->pos, SEEK_SET) != sample->pos) { if (avio_seek(sc->pb, sample->pos, SEEK_SET) != sample->pos) {
av_log(s, AV_LOG_ERROR, "Failed to retrieve first frame\n"); av_log(s, AV_LOG_ERROR, "Failed to retrieve first frame\n");
goto finish; goto finish;
@ -7195,9 +7207,9 @@ static void mov_read_chapters(AVFormatContext *s)
st->codecpar->codec_type = AVMEDIA_TYPE_DATA; st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
st->codecpar->codec_id = AV_CODEC_ID_BIN_DATA; st->codecpar->codec_id = AV_CODEC_ID_BIN_DATA;
st->discard = AVDISCARD_ALL; st->discard = AVDISCARD_ALL;
for (i = 0; i < st->internal->nb_index_entries; i++) { for (int i = 0; i < sti->nb_index_entries; i++) {
AVIndexEntry *sample = &st->internal->index_entries[i]; AVIndexEntry *sample = &sti->index_entries[i];
int64_t end = i+1 < st->internal->nb_index_entries ? st->internal->index_entries[i+1].timestamp : st->duration; int64_t end = i+1 < sti->nb_index_entries ? sti->index_entries[i+1].timestamp : st->duration;
uint8_t *title; uint8_t *title;
uint16_t ch; uint16_t ch;
int len, title_len; int len, title_len;
@ -7266,14 +7278,15 @@ static int parse_timecode_in_framenum_format(AVFormatContext *s, AVStream *st,
static int mov_read_rtmd_track(AVFormatContext *s, AVStream *st) static int mov_read_rtmd_track(AVFormatContext *s, AVStream *st)
{ {
MOVStreamContext *sc = st->priv_data; MOVStreamContext *sc = st->priv_data;
FFStream *const sti = ffstream(st);
char buf[AV_TIMECODE_STR_SIZE]; char buf[AV_TIMECODE_STR_SIZE];
int64_t cur_pos = avio_tell(sc->pb); int64_t cur_pos = avio_tell(sc->pb);
int hh, mm, ss, ff, drop; int hh, mm, ss, ff, drop;
if (!st->internal->nb_index_entries) if (!sti->nb_index_entries)
return -1; return -1;
avio_seek(sc->pb, st->internal->index_entries->pos, SEEK_SET); avio_seek(sc->pb, sti->index_entries->pos, SEEK_SET);
avio_skip(s->pb, 13); avio_skip(s->pb, 13);
hh = avio_r8(s->pb); hh = avio_r8(s->pb);
mm = avio_r8(s->pb); mm = avio_r8(s->pb);
@ -7291,14 +7304,15 @@ static int mov_read_rtmd_track(AVFormatContext *s, AVStream *st)
static int mov_read_timecode_track(AVFormatContext *s, AVStream *st) static int mov_read_timecode_track(AVFormatContext *s, AVStream *st)
{ {
MOVStreamContext *sc = st->priv_data; MOVStreamContext *sc = st->priv_data;
FFStream *const sti = ffstream(st);
int flags = 0; int flags = 0;
int64_t cur_pos = avio_tell(sc->pb); int64_t cur_pos = avio_tell(sc->pb);
uint32_t value; uint32_t value;
if (!st->internal->nb_index_entries) if (!sti->nb_index_entries)
return -1; return -1;
avio_seek(sc->pb, st->internal->index_entries->pos, SEEK_SET); avio_seek(sc->pb, sti->index_entries->pos, SEEK_SET);
value = avio_rb32(s->pb); value = avio_rb32(s->pb);
if (sc->tmcd_flags & 0x0001) flags |= AV_TIMECODE_FLAG_DROPFRAME; if (sc->tmcd_flags & 0x0001) flags |= AV_TIMECODE_FLAG_DROPFRAME;
@ -7616,11 +7630,12 @@ static int mov_read_header(AVFormatContext *s)
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
FFStream *const sti = ffstream(st);
MOVStreamContext *sc = st->priv_data; MOVStreamContext *sc = st->priv_data;
fix_timescale(mov, sc); fix_timescale(mov, sc);
if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
st->codecpar->codec_id == AV_CODEC_ID_AAC) { st->codecpar->codec_id == AV_CODEC_ID_AAC) {
st->internal->skip_samples = sc->start_pad; sti->skip_samples = sc->start_pad;
} }
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && sc->nb_frames_for_fps > 0 && sc->duration_for_fps > 0) if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && sc->nb_frames_for_fps > 0 && sc->duration_for_fps > 0)
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
@ -7639,7 +7654,7 @@ static int mov_read_header(AVFormatContext *s)
mov->handbrake_version <= 1000000*0 + 1000*10 + 2 && // 0.10.2 mov->handbrake_version <= 1000000*0 + 1000*10 + 2 && // 0.10.2
st->codecpar->codec_id == AV_CODEC_ID_MP3) { st->codecpar->codec_id == AV_CODEC_ID_MP3) {
av_log(s, AV_LOG_VERBOSE, "Forcing full parsing for mp3 stream\n"); av_log(s, AV_LOG_VERBOSE, "Forcing full parsing for mp3 stream\n");
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
} }
} }
@ -7756,9 +7771,10 @@ static AVIndexEntry *mov_find_next_sample(AVFormatContext *s, AVStream **st)
int i; int i;
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *avst = s->streams[i]; AVStream *avst = s->streams[i];
FFStream *const avsti = ffstream(avst);
MOVStreamContext *msc = avst->priv_data; MOVStreamContext *msc = avst->priv_data;
if (msc->pb && msc->current_sample < avst->internal->nb_index_entries) { if (msc->pb && msc->current_sample < avsti->nb_index_entries) {
AVIndexEntry *current_sample = &avst->internal->index_entries[msc->current_sample]; AVIndexEntry *current_sample = &avsti->index_entries[msc->current_sample];
int64_t dts = av_rescale(current_sample->timestamp, AV_TIME_BASE, msc->time_scale); int64_t dts = av_rescale(current_sample->timestamp, AV_TIME_BASE, msc->time_scale);
av_log(s, AV_LOG_TRACE, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts); av_log(s, AV_LOG_TRACE, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts);
if (!sample || (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL) && current_sample->pos < sample->pos) || if (!sample || (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL) && current_sample->pos < sample->pos) ||
@ -7940,9 +7956,9 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
sc->has_palette = 0; sc->has_palette = 0;
} }
} }
if (st->codecpar->codec_id == AV_CODEC_ID_MP3 && !st->internal->need_parsing && pkt->size > 4) { if (st->codecpar->codec_id == AV_CODEC_ID_MP3 && !ffstream(st)->need_parsing && pkt->size > 4) {
if (ff_mpa_check_header(AV_RB32(pkt->data)) < 0) if (ff_mpa_check_header(AV_RB32(pkt->data)) < 0)
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
} }
} }
@ -7961,8 +7977,8 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
sc->ctts_sample = 0; sc->ctts_sample = 0;
} }
} else { } else {
int64_t next_dts = (sc->current_sample < st->internal->nb_index_entries) ? int64_t next_dts = (sc->current_sample < ffstream(st)->nb_index_entries) ?
st->internal->index_entries[sc->current_sample].timestamp : st->duration; ffstream(st)->index_entries[sc->current_sample].timestamp : st->duration;
if (next_dts >= pkt->dts) if (next_dts >= pkt->dts)
pkt->duration = next_dts - pkt->dts; pkt->duration = next_dts - pkt->dts;
@ -8030,6 +8046,7 @@ static int mov_seek_fragment(AVFormatContext *s, AVStream *st, int64_t timestamp
static int mov_seek_stream(AVFormatContext *s, AVStream *st, int64_t timestamp, int flags) static int mov_seek_stream(AVFormatContext *s, AVStream *st, int64_t timestamp, int flags)
{ {
MOVStreamContext *sc = st->priv_data; MOVStreamContext *sc = st->priv_data;
FFStream *const sti = ffstream(st);
int sample, time_sample, ret; int sample, time_sample, ret;
unsigned int i; unsigned int i;
@ -8043,7 +8060,7 @@ static int mov_seek_stream(AVFormatContext *s, AVStream *st, int64_t timestamp,
sample = av_index_search_timestamp(st, timestamp, flags); sample = av_index_search_timestamp(st, timestamp, flags);
av_log(s, AV_LOG_TRACE, "stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample); av_log(s, AV_LOG_TRACE, "stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample);
if (sample < 0 && st->internal->nb_index_entries && timestamp < st->internal->index_entries[0].timestamp) if (sample < 0 && sti->nb_index_entries && timestamp < sti->index_entries[0].timestamp)
sample = 0; sample = 0;
if (sample < 0) /* not sure what to do */ if (sample < 0) /* not sure what to do */
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
@ -8084,8 +8101,9 @@ static int mov_seek_stream(AVFormatContext *s, AVStream *st, int64_t timestamp,
static int64_t mov_get_skip_samples(AVStream *st, int sample) static int64_t mov_get_skip_samples(AVStream *st, int sample)
{ {
MOVStreamContext *sc = st->priv_data; MOVStreamContext *sc = st->priv_data;
int64_t first_ts = st->internal->index_entries[0].timestamp; FFStream *const sti = ffstream(st);
int64_t ts = st->internal->index_entries[sample].timestamp; int64_t first_ts = sti->index_entries[0].timestamp;
int64_t ts = sti->index_entries[sample].timestamp;
int64_t off; int64_t off;
if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
@ -8101,6 +8119,7 @@ static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
{ {
MOVContext *mc = s->priv_data; MOVContext *mc = s->priv_data;
AVStream *st; AVStream *st;
FFStream *sti;
int sample; int sample;
int i; int i;
@ -8108,18 +8127,20 @@ static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
st = s->streams[stream_index]; st = s->streams[stream_index];
sti = ffstream(st);
sample = mov_seek_stream(s, st, sample_time, flags); sample = mov_seek_stream(s, st, sample_time, flags);
if (sample < 0) if (sample < 0)
return sample; return sample;
if (mc->seek_individually) { if (mc->seek_individually) {
/* adjust seek timestamp to found sample timestamp */ /* adjust seek timestamp to found sample timestamp */
int64_t seek_timestamp = st->internal->index_entries[sample].timestamp; int64_t seek_timestamp = sti->index_entries[sample].timestamp;
st->internal->skip_samples = mov_get_skip_samples(st, sample); sti->skip_samples = mov_get_skip_samples(st, sample);
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *const st = s->streams[i];
FFStream *const sti = ffstream(st);
int64_t timestamp; int64_t timestamp;
st = s->streams[i];
if (stream_index == i) if (stream_index == i)
continue; continue;
@ -8127,7 +8148,7 @@ static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_ti
timestamp = av_rescale_q(seek_timestamp, s->streams[stream_index]->time_base, st->time_base); timestamp = av_rescale_q(seek_timestamp, s->streams[stream_index]->time_base, st->time_base);
sample = mov_seek_stream(s, st, timestamp, flags); sample = mov_seek_stream(s, st, timestamp, flags);
if (sample >= 0) if (sample >= 0)
st->internal->skip_samples = mov_get_skip_samples(st, sample); sti->skip_samples = mov_get_skip_samples(st, sample);
} }
} else { } else {
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {

@ -160,6 +160,7 @@ static void mp3_parse_info_tag(AVFormatContext *s, AVStream *st,
#define LAST_BITS(k, n) ((k) & ((1 << (n)) - 1)) #define LAST_BITS(k, n) ((k) & ((1 << (n)) - 1))
#define MIDDLE_BITS(k, m, n) LAST_BITS((k) >> (m), ((n) - (m) + 1)) #define MIDDLE_BITS(k, m, n) LAST_BITS((k) >> (m), ((n) - (m) + 1))
FFStream *const sti = ffstream(st);
uint16_t crc; uint16_t crc;
uint32_t v; uint32_t v;
@ -256,13 +257,13 @@ static void mp3_parse_info_tag(AVFormatContext *s, AVStream *st,
mp3->start_pad = v>>12; mp3->start_pad = v>>12;
mp3-> end_pad = v&4095; mp3-> end_pad = v&4095;
st->internal->start_skip_samples = mp3->start_pad + 528 + 1; sti->start_skip_samples = mp3->start_pad + 528 + 1;
if (mp3->frames) { if (mp3->frames) {
st->internal->first_discard_sample = -mp3->end_pad + 528 + 1 + mp3->frames * (int64_t)spf; sti->first_discard_sample = -mp3->end_pad + 528 + 1 + mp3->frames * (int64_t)spf;
st->internal->last_discard_sample = mp3->frames * (int64_t)spf; sti->last_discard_sample = mp3->frames * (int64_t)spf;
} }
if (!st->start_time) if (!st->start_time)
st->start_time = av_rescale_q(st->internal->start_skip_samples, st->start_time = av_rescale_q(sti->start_skip_samples,
(AVRational){1, c->sample_rate}, (AVRational){1, c->sample_rate},
st->time_base); st->time_base);
av_log(s, AV_LOG_DEBUG, "pad %d %d\n", mp3->start_pad, mp3-> end_pad); av_log(s, AV_LOG_DEBUG, "pad %d %d\n", mp3->start_pad, mp3-> end_pad);
@ -363,6 +364,7 @@ static int mp3_read_header(AVFormatContext *s)
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
MP3DecContext *mp3 = s->priv_data; MP3DecContext *mp3 = s->priv_data;
AVStream *st; AVStream *st;
FFStream *sti;
int64_t off; int64_t off;
int ret; int ret;
int i; int i;
@ -373,10 +375,11 @@ static int mp3_read_header(AVFormatContext *s)
st = avformat_new_stream(s, NULL); st = avformat_new_stream(s, NULL);
if (!st) if (!st)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
sti = ffstream(st);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_MP3; st->codecpar->codec_id = AV_CODEC_ID_MP3;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; sti->need_parsing = AVSTREAM_PARSE_FULL_RAW;
st->start_time = 0; st->start_time = 0;
// lcm of all mp3 sample rates // lcm of all mp3 sample rates
@ -434,8 +437,8 @@ static int mp3_read_header(AVFormatContext *s)
off = avio_tell(s->pb); off = avio_tell(s->pb);
// the seek index is relative to the end of the xing vbr headers // the seek index is relative to the end of the xing vbr headers
for (i = 0; i < st->internal->nb_index_entries; i++) for (int i = 0; i < sti->nb_index_entries; i++)
st->internal->index_entries[i].pos += off; sti->index_entries[i].pos += off;
/* the parameters will be extracted from the compressed bitstream */ /* the parameters will be extracted from the compressed bitstream */
return 0; return 0;
@ -551,6 +554,7 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
MP3DecContext *mp3 = s->priv_data; MP3DecContext *mp3 = s->priv_data;
AVIndexEntry *ie, ie1; AVIndexEntry *ie, ie1;
AVStream *st = s->streams[0]; AVStream *st = s->streams[0];
FFStream *const sti = ffstream(st);
int64_t best_pos; int64_t best_pos;
int fast_seek = s->flags & AVFMT_FLAG_FAST_SEEK; int fast_seek = s->flags & AVFMT_FLAG_FAST_SEEK;
int64_t filesize = mp3->header_filesize; int64_t filesize = mp3->header_filesize;
@ -571,7 +575,7 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
if (ret < 0) if (ret < 0)
return ret; return ret;
ie = &st->internal->index_entries[ret]; ie = &sti->index_entries[ret];
} else if (fast_seek && st->duration > 0 && filesize > 0) { } else if (fast_seek && st->duration > 0 && filesize > 0) {
if (!mp3->is_cbr) if (!mp3->is_cbr)
av_log(s, AV_LOG_WARNING, "Using scaling to seek VBR MP3; may be imprecise.\n"); av_log(s, AV_LOG_WARNING, "Using scaling to seek VBR MP3; may be imprecise.\n");

@ -187,6 +187,7 @@ static int mpc_read_packet(AVFormatContext *s, AVPacket *pkt)
static int mpc_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) static int mpc_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{ {
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
FFStream *const sti = ffstream(st);
MPCContext *c = s->priv_data; MPCContext *c = s->priv_data;
AVPacket pkt1, *pkt = &pkt1; AVPacket pkt1, *pkt = &pkt1;
int ret; int ret;
@ -194,8 +195,8 @@ static int mpc_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
uint32_t lastframe; uint32_t lastframe;
/* if found, seek there */ /* if found, seek there */
if (index >= 0 && st->internal->index_entries[st->internal->nb_index_entries-1].timestamp >= timestamp - DELAY_FRAMES){ if (index >= 0 && sti->index_entries[sti->nb_index_entries-1].timestamp >= timestamp - DELAY_FRAMES) {
c->curframe = st->internal->index_entries[index].pos; c->curframe = sti->index_entries[index].pos;
return 0; return 0;
} }
/* if timestamp is out of bounds, return error */ /* if timestamp is out of bounds, return error */

@ -324,12 +324,13 @@ static int mpc8_read_packet(AVFormatContext *s, AVPacket *pkt)
static int mpc8_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) static int mpc8_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
{ {
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
FFStream *const sti = ffstream(st);
int index = av_index_search_timestamp(st, timestamp, flags); int index = av_index_search_timestamp(st, timestamp, flags);
if(index < 0) return -1; if(index < 0) return -1;
if (avio_seek(s->pb, st->internal->index_entries[index].pos, SEEK_SET) < 0) if (avio_seek(s->pb, sti->index_entries[index].pos, SEEK_SET) < 0)
return -1; return -1;
avpriv_update_cur_dts(s, st, st->internal->index_entries[index].timestamp); avpriv_update_cur_dts(s, st, sti->index_entries[index].timestamp);
return 0; return 0;
} }

@ -478,6 +478,7 @@ static int mpegps_read_packet(AVFormatContext *s,
{ {
MpegDemuxContext *m = s->priv_data; MpegDemuxContext *m = s->priv_data;
AVStream *st; AVStream *st;
FFStream *sti;
int len, startcode, i, es_type, ret; int len, startcode, i, es_type, ret;
int pcm_dvd = 0; int pcm_dvd = 0;
int request_probe= 0; int request_probe= 0;
@ -614,6 +615,7 @@ skip:
st = avformat_new_stream(s, NULL); st = avformat_new_stream(s, NULL);
if (!st) if (!st)
goto skip; goto skip;
sti = ffstream(st);
st->id = startcode; st->id = startcode;
st->codecpar->codec_type = type; st->codecpar->codec_type = type;
st->codecpar->codec_id = codec_id; st->codecpar->codec_id = codec_id;
@ -623,8 +625,8 @@ skip:
st->codecpar->channel_layout = AV_CH_LAYOUT_MONO; st->codecpar->channel_layout = AV_CH_LAYOUT_MONO;
st->codecpar->sample_rate = 8000; st->codecpar->sample_rate = 8000;
} }
st->internal->request_probe = request_probe; sti->request_probe = request_probe;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
found: found:
if (st->discard >= AVDISCARD_ALL) if (st->discard >= AVDISCARD_ALL)

@ -875,15 +875,16 @@ static void mpegts_find_stream_type(AVStream *st,
uint32_t stream_type, uint32_t stream_type,
const StreamType *types) const StreamType *types)
{ {
FFStream *const sti = ffstream(st);
for (; types->stream_type; types++) for (; types->stream_type; types++)
if (stream_type == types->stream_type) { if (stream_type == types->stream_type) {
if (st->codecpar->codec_type != types->codec_type || if (st->codecpar->codec_type != types->codec_type ||
st->codecpar->codec_id != types->codec_id) { st->codecpar->codec_id != types->codec_id) {
st->codecpar->codec_type = types->codec_type; st->codecpar->codec_type = types->codec_type;
st->codecpar->codec_id = types->codec_id; st->codecpar->codec_id = types->codec_id;
st->internal->need_context_update = 1; sti->need_context_update = 1;
} }
st->internal->request_probe = 0; sti->request_probe = 0;
return; return;
} }
} }
@ -891,11 +892,12 @@ static void mpegts_find_stream_type(AVStream *st,
static int mpegts_set_stream_info(AVStream *st, PESContext *pes, static int mpegts_set_stream_info(AVStream *st, PESContext *pes,
uint32_t stream_type, uint32_t prog_reg_desc) uint32_t stream_type, uint32_t prog_reg_desc)
{ {
FFStream *const sti = ffstream(st);
int old_codec_type = st->codecpar->codec_type; int old_codec_type = st->codecpar->codec_type;
int old_codec_id = st->codecpar->codec_id; int old_codec_id = st->codecpar->codec_id;
int old_codec_tag = st->codecpar->codec_tag; int old_codec_tag = st->codecpar->codec_tag;
if (avcodec_is_open(st->internal->avctx)) { if (avcodec_is_open(sti->avctx)) {
av_log(pes->stream, AV_LOG_DEBUG, "cannot set stream info, internal codec is open\n"); av_log(pes->stream, AV_LOG_DEBUG, "cannot set stream info, internal codec is open\n");
return 0; return 0;
} }
@ -904,7 +906,7 @@ static int mpegts_set_stream_info(AVStream *st, PESContext *pes,
st->priv_data = pes; st->priv_data = pes;
st->codecpar->codec_type = AVMEDIA_TYPE_DATA; st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
st->codecpar->codec_id = AV_CODEC_ID_NONE; st->codecpar->codec_id = AV_CODEC_ID_NONE;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
pes->st = st; pes->st = st;
pes->stream_type = stream_type; pes->stream_type = stream_type;
@ -916,7 +918,7 @@ static int mpegts_set_stream_info(AVStream *st, PESContext *pes,
mpegts_find_stream_type(st, pes->stream_type, ISO_types); mpegts_find_stream_type(st, pes->stream_type, ISO_types);
if (pes->stream_type == 4 || pes->stream_type == 0x0f) if (pes->stream_type == 4 || pes->stream_type == 0x0f)
st->internal->request_probe = 50; sti->request_probe = 50;
if ((prog_reg_desc == AV_RL32("HDMV") || if ((prog_reg_desc == AV_RL32("HDMV") ||
prog_reg_desc == AV_RL32("HDPR")) && prog_reg_desc == AV_RL32("HDPR")) &&
st->codecpar->codec_id == AV_CODEC_ID_NONE) { st->codecpar->codec_id == AV_CODEC_ID_NONE) {
@ -942,7 +944,7 @@ static int mpegts_set_stream_info(AVStream *st, PESContext *pes,
sub_st->priv_data = sub_pes; sub_st->priv_data = sub_pes;
sub_st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; sub_st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
sub_st->codecpar->codec_id = AV_CODEC_ID_AC3; sub_st->codecpar->codec_id = AV_CODEC_ID_AC3;
sub_st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(sub_st)->need_parsing = AVSTREAM_PARSE_FULL;
sub_pes->sub_st = pes->sub_st = sub_st; sub_pes->sub_st = pes->sub_st = sub_st;
} }
} }
@ -953,19 +955,19 @@ static int mpegts_set_stream_info(AVStream *st, PESContext *pes,
st->codecpar->codec_type = old_codec_type; st->codecpar->codec_type = old_codec_type;
} }
if ((st->codecpar->codec_id == AV_CODEC_ID_NONE || if ((st->codecpar->codec_id == AV_CODEC_ID_NONE ||
(st->internal->request_probe > 0 && st->internal->request_probe < AVPROBE_SCORE_STREAM_RETRY / 5)) && (sti->request_probe > 0 && sti->request_probe < AVPROBE_SCORE_STREAM_RETRY / 5)) &&
st->internal->probe_packets > 0 && sti->probe_packets > 0 &&
stream_type == STREAM_TYPE_PRIVATE_DATA) { stream_type == STREAM_TYPE_PRIVATE_DATA) {
st->codecpar->codec_type = AVMEDIA_TYPE_DATA; st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
st->codecpar->codec_id = AV_CODEC_ID_BIN_DATA; st->codecpar->codec_id = AV_CODEC_ID_BIN_DATA;
st->internal->request_probe = AVPROBE_SCORE_STREAM_RETRY / 5; sti->request_probe = AVPROBE_SCORE_STREAM_RETRY / 5;
} }
/* queue a context update if properties changed */ /* queue a context update if properties changed */
if (old_codec_type != st->codecpar->codec_type || if (old_codec_type != st->codecpar->codec_type ||
old_codec_id != st->codecpar->codec_id || old_codec_id != st->codecpar->codec_id ||
old_codec_tag != st->codecpar->codec_tag) old_codec_tag != st->codecpar->codec_tag)
st->internal->need_context_update = 1; sti->need_context_update = 1;
return 0; return 0;
} }
@ -1198,13 +1200,14 @@ static int mpegts_push_data(MpegTSFilter *filter,
code != 0x1f0 && code != 0x1f1 && /* ECM, EMM */ code != 0x1f0 && code != 0x1f1 && /* ECM, EMM */
code != 0x1ff && code != 0x1f2 && /* program_stream_directory, DSMCC_stream */ code != 0x1ff && code != 0x1f2 && /* program_stream_directory, DSMCC_stream */
code != 0x1f8) { /* ITU-T Rec. H.222.1 type E stream */ code != 0x1f8) { /* ITU-T Rec. H.222.1 type E stream */
FFStream *const pes_sti = ffstream(pes->st);
pes->state = MPEGTS_PESHEADER; pes->state = MPEGTS_PESHEADER;
if (pes->st->codecpar->codec_id == AV_CODEC_ID_NONE && !pes->st->internal->request_probe) { if (pes->st->codecpar->codec_id == AV_CODEC_ID_NONE && !pes_sti->request_probe) {
av_log(pes->stream, AV_LOG_TRACE, av_log(pes->stream, AV_LOG_TRACE,
"pid=%x stream_type=%x probing\n", "pid=%x stream_type=%x probing\n",
pes->pid, pes->pid,
pes->stream_type); pes->stream_type);
pes->st->internal->request_probe = 1; pes_sti->request_probe = 1;
} }
} else { } else {
pes->pes_header_size = 6; pes->pes_header_size = 6;
@ -1326,8 +1329,11 @@ skip:
int64_t pcr = f->last_pcr / 300; int64_t pcr = f->last_pcr / 300;
pcr_found = 1; pcr_found = 1;
if (st) { if (st) {
pes->st->internal->pts_wrap_reference = st->internal->pts_wrap_reference; const FFStream *const sti = ffstream(st);
pes->st->internal->pts_wrap_behavior = st->internal->pts_wrap_behavior; FFStream *const pes_sti = ffstream(pes->st);
pes_sti->pts_wrap_reference = sti->pts_wrap_reference;
pes_sti->pts_wrap_behavior = sti->pts_wrap_behavior;
} }
if (pes->dts == AV_NOPTS_VALUE || pes->dts < pcr) { if (pes->dts == AV_NOPTS_VALUE || pes->dts < pcr) {
pes->pts = pes->dts = pcr; pes->pts = pes->dts = pcr;
@ -1699,6 +1705,7 @@ static void m4sl_cb(MpegTSFilter *filter, const uint8_t *section,
for (i = 0; i < mp4_descr_count; i++) { for (i = 0; i < mp4_descr_count; i++) {
PESContext *pes; PESContext *pes;
AVStream *st; AVStream *st;
FFStream *sti;
FFIOContext pb; FFIOContext pb;
if (ts->pids[pid]->es_id != mp4_descr[i].es_id) if (ts->pids[pid]->es_id != mp4_descr[i].es_id)
continue; continue;
@ -1710,6 +1717,7 @@ static void m4sl_cb(MpegTSFilter *filter, const uint8_t *section,
st = pes->st; st = pes->st;
if (!st) if (!st)
continue; continue;
sti = ffstream(st);
pes->sl = mp4_descr[i].sl; pes->sl = mp4_descr[i].sl;
@ -1719,13 +1727,13 @@ static void m4sl_cb(MpegTSFilter *filter, const uint8_t *section,
ff_mp4_read_dec_config_descr(s, st, &pb.pub); ff_mp4_read_dec_config_descr(s, st, &pb.pub);
if (st->codecpar->codec_id == AV_CODEC_ID_AAC && if (st->codecpar->codec_id == AV_CODEC_ID_AAC &&
st->codecpar->extradata_size > 0) st->codecpar->extradata_size > 0)
st->internal->need_parsing = 0; sti->need_parsing = 0;
if (st->codecpar->codec_id == AV_CODEC_ID_H264 && if (st->codecpar->codec_id == AV_CODEC_ID_H264 &&
st->codecpar->extradata_size > 0) st->codecpar->extradata_size > 0)
st->internal->need_parsing = 0; sti->need_parsing = 0;
st->codecpar->codec_type = avcodec_get_type(st->codecpar->codec_id); st->codecpar->codec_type = avcodec_get_type(st->codecpar->codec_id);
st->internal->need_context_update = 1; sti->need_context_update = 1;
} }
} }
for (i = 0; i < mp4_descr_count; i++) for (i = 0; i < mp4_descr_count; i++)
@ -1785,6 +1793,7 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
Mp4Descr *mp4_descr, int mp4_descr_count, int pid, Mp4Descr *mp4_descr, int mp4_descr_count, int pid,
MpegTSContext *ts) MpegTSContext *ts)
{ {
FFStream *const sti = ffstream(st);
const uint8_t *desc_end; const uint8_t *desc_end;
int desc_len, desc_tag, desc_es_id, ext_desc_tag, channels, channel_config_code; int desc_len, desc_tag, desc_es_id, ext_desc_tag, channels, channel_config_code;
char language[252]; char language[252];
@ -1802,7 +1811,7 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
av_log(fc, AV_LOG_TRACE, "tag: 0x%02x len=%d\n", desc_tag, desc_len); av_log(fc, AV_LOG_TRACE, "tag: 0x%02x len=%d\n", desc_tag, desc_len);
if ((st->codecpar->codec_id == AV_CODEC_ID_NONE || st->internal->request_probe > 0) && if ((st->codecpar->codec_id == AV_CODEC_ID_NONE || sti->request_probe > 0) &&
stream_type == STREAM_TYPE_PRIVATE_DATA) stream_type == STREAM_TYPE_PRIVATE_DATA)
mpegts_find_stream_type(st, desc_tag, DESC_types); mpegts_find_stream_type(st, desc_tag, DESC_types);
@ -1828,8 +1837,8 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
ff_mp4_read_dec_config_descr(fc, st, &pb.pub); ff_mp4_read_dec_config_descr(fc, st, &pb.pub);
if (st->codecpar->codec_id == AV_CODEC_ID_AAC && if (st->codecpar->codec_id == AV_CODEC_ID_AAC &&
st->codecpar->extradata_size > 0) { st->codecpar->extradata_size > 0) {
st->internal->need_parsing = 0; sti->need_parsing = 0;
st->internal->need_context_update = 1; sti->need_context_update = 1;
} }
if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4SYSTEMS) if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4SYSTEMS)
mpegts_open_section_filter(ts, pid, m4sl_cb, ts, 1); mpegts_open_section_filter(ts, pid, m4sl_cb, ts, 1);
@ -1840,8 +1849,8 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
break; break;
if (mp4_descr_count > 0 && if (mp4_descr_count > 0 &&
(st->codecpar->codec_id == AV_CODEC_ID_AAC_LATM || (st->codecpar->codec_id == AV_CODEC_ID_AAC_LATM ||
(st->internal->request_probe == 0 && st->codecpar->codec_id == AV_CODEC_ID_NONE) || (sti->request_probe == 0 && st->codecpar->codec_id == AV_CODEC_ID_NONE) ||
st->internal->request_probe > 0) && sti->request_probe > 0) &&
mp4_descr->dec_config_descr_len && mp4_descr->es_id == pid) { mp4_descr->dec_config_descr_len && mp4_descr->es_id == pid) {
FFIOContext pb; FFIOContext pb;
ffio_init_context(&pb, mp4_descr->dec_config_descr, ffio_init_context(&pb, mp4_descr->dec_config_descr,
@ -1850,9 +1859,9 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
ff_mp4_read_dec_config_descr(fc, st, &pb.pub); ff_mp4_read_dec_config_descr(fc, st, &pb.pub);
if (st->codecpar->codec_id == AV_CODEC_ID_AAC && if (st->codecpar->codec_id == AV_CODEC_ID_AAC &&
st->codecpar->extradata_size > 0) { st->codecpar->extradata_size > 0) {
st->internal->request_probe = st->internal->need_parsing = 0; sti->request_probe = sti->need_parsing = 0;
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->internal->need_context_update = 1; sti->need_context_update = 1;
} }
} }
break; break;
@ -1893,7 +1902,7 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
language[i * 4 - 1] = 0; language[i * 4 - 1] = 0;
av_dict_set(&st->metadata, "language", language, 0); av_dict_set(&st->metadata, "language", language, 0);
st->internal->need_context_update = 1; sti->need_context_update = 1;
} }
} }
break; break;
@ -1957,7 +1966,7 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
language[i * 4 - 1] = 0; language[i * 4 - 1] = 0;
av_dict_set(&st->metadata, "language", language, 0); av_dict_set(&st->metadata, "language", language, 0);
st->internal->need_context_update = 1; sti->need_context_update = 1;
} }
} }
break; break;
@ -1990,14 +1999,14 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
case REGISTRATION_DESCRIPTOR: case REGISTRATION_DESCRIPTOR:
st->codecpar->codec_tag = bytestream_get_le32(pp); st->codecpar->codec_tag = bytestream_get_le32(pp);
av_log(fc, AV_LOG_TRACE, "reg_desc=%.4s\n", (char *)&st->codecpar->codec_tag); av_log(fc, AV_LOG_TRACE, "reg_desc=%.4s\n", (char *)&st->codecpar->codec_tag);
if (st->codecpar->codec_id == AV_CODEC_ID_NONE || st->internal->request_probe > 0) { if (st->codecpar->codec_id == AV_CODEC_ID_NONE || sti->request_probe > 0) {
mpegts_find_stream_type(st, st->codecpar->codec_tag, REGD_types); mpegts_find_stream_type(st, st->codecpar->codec_tag, REGD_types);
if (st->codecpar->codec_tag == MKTAG('B', 'S', 'S', 'D')) if (st->codecpar->codec_tag == MKTAG('B', 'S', 'S', 'D'))
st->internal->request_probe = 50; sti->request_probe = 50;
} }
break; break;
case 0x52: /* stream identifier descriptor */ case 0x52: /* stream identifier descriptor */
st->internal->stream_identifier = 1 + get8(pp, desc_end); sti->stream_identifier = 1 + get8(pp, desc_end);
break; break;
case METADATA_DESCRIPTOR: case METADATA_DESCRIPTOR:
if (get16(pp, desc_end) == 0xFFFF) if (get16(pp, desc_end) == 0xFFFF)
@ -2037,8 +2046,8 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
} else { } else {
avpriv_request_sample(fc, "Opus in MPEG-TS - channel_config_code > 0x8"); avpriv_request_sample(fc, "Opus in MPEG-TS - channel_config_code > 0x8");
} }
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
st->internal->need_context_update = 1; sti->need_context_update = 1;
} }
} }
if (ext_desc_tag == 0x06) { /* supplementary audio descriptor */ if (ext_desc_tag == 0x06) { /* supplementary audio descriptor */
@ -2116,7 +2125,7 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
// Listing of data_component_ids is in STD-B10, part 2, Annex J. // Listing of data_component_ids is in STD-B10, part 2, Annex J.
// Component tag limits are documented in TR-B14, fascicle 2, // Component tag limits are documented in TR-B14, fascicle 2,
// Vol. 3, Section 2, 4.2.8.1 // Vol. 3, Section 2, 4.2.8.1
int actual_component_tag = st->internal->stream_identifier - 1; int actual_component_tag = sti->stream_identifier - 1;
int picked_profile = FF_PROFILE_UNKNOWN; int picked_profile = FF_PROFILE_UNKNOWN;
int data_component_id = get16(pp, desc_end); int data_component_id = get16(pp, desc_end);
if (data_component_id < 0) if (data_component_id < 0)
@ -2148,7 +2157,7 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE; st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codecpar->codec_id = AV_CODEC_ID_ARIB_CAPTION; st->codecpar->codec_id = AV_CODEC_ID_ARIB_CAPTION;
st->codecpar->profile = picked_profile; st->codecpar->profile = picked_profile;
st->internal->request_probe = 0; sti->request_probe = 0;
} }
break; break;
case 0xb0: /* DOVI video stream descriptor */ case 0xb0: /* DOVI video stream descriptor */

@ -82,7 +82,7 @@ static int msf_read_header(AVFormatContext *s)
AV_WL16(st->codecpar->extradata+8, codec == 4 ? 1 : 0); /* joint stereo (repeat?) */ AV_WL16(st->codecpar->extradata+8, codec == 4 ? 1 : 0); /* joint stereo (repeat?) */
AV_WL16(st->codecpar->extradata+10, 1); AV_WL16(st->codecpar->extradata+10, 1);
st->codecpar->codec_id = AV_CODEC_ID_ATRAC3; break; st->codecpar->codec_id = AV_CODEC_ID_ATRAC3; break;
case 7: st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; case 7: ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
st->codecpar->codec_id = AV_CODEC_ID_MP3; break; st->codecpar->codec_id = AV_CODEC_ID_MP3; break;
default: default:
avpriv_request_sample(s, "Codec %d", codec); avpriv_request_sample(s, "Codec %d", codec);

@ -185,7 +185,7 @@ static int mtv_read_header(AVFormatContext *s)
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_MP3; st->codecpar->codec_id = AV_CODEC_ID_MP3;
st->codecpar->bit_rate = mtv->audio_br; st->codecpar->bit_rate = mtv->audio_br;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
// Jump over header // Jump over header

@ -253,7 +253,7 @@ static int init_muxer(AVFormatContext *s, AVDictionary **options)
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
AVStream *const st = s->streams[i]; AVStream *const st = s->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
AVCodecParameters *const par = st->codecpar; AVCodecParameters *const par = st->codecpar;
const AVCodecDescriptor *desc; const AVCodecDescriptor *desc;
@ -386,7 +386,7 @@ static int init_pts(AVFormatContext *s)
/* init PTS generation */ /* init PTS generation */
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
AVStream *const st = s->streams[i]; AVStream *const st = s->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int64_t den = AV_NOPTS_VALUE; int64_t den = AV_NOPTS_VALUE;
switch (st->codecpar->codec_type) { switch (st->codecpar->codec_type) {
@ -509,7 +509,7 @@ FF_DISABLE_DEPRECATION_WARNINGS
static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *pkt) static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{ {
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int delay = st->codecpar->video_delay; int delay = st->codecpar->video_delay;
int frame_size; int frame_size;
@ -645,7 +645,7 @@ static int write_packet(AVFormatContext *s, AVPacket *pkt)
{ {
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
AVStream *const st = s->streams[pkt->stream_index]; AVStream *const st = s->streams[pkt->stream_index];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int ret; int ret;
// If the timestamp offsetting below is adjusted, adjust // If the timestamp offsetting below is adjusted, adjust
@ -743,7 +743,7 @@ static int check_packet(AVFormatContext *s, AVPacket *pkt)
static int prepare_input_packet(AVFormatContext *s, AVStream *st, AVPacket *pkt) static int prepare_input_packet(AVFormatContext *s, AVStream *st, AVPacket *pkt)
{ {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
#if !FF_API_COMPUTE_PKT_FIELDS2 #if !FF_API_COMPUTE_PKT_FIELDS2
/* sanitize the timestamps */ /* sanitize the timestamps */
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) { if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
@ -799,7 +799,7 @@ int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
PacketList **next_point, *this_pktl; PacketList **next_point, *this_pktl;
AVStream *st = s->streams[pkt->stream_index]; AVStream *st = s->streams[pkt->stream_index];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int chunked = s->max_chunk_size || s->max_chunk_duration; int chunked = s->max_chunk_size || s->max_chunk_duration;
this_pktl = av_malloc(sizeof(PacketList)); this_pktl = av_malloc(sizeof(PacketList));
@ -912,7 +912,7 @@ int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
const AVStream *const st = s->streams[i]; const AVStream *const st = s->streams[i];
const AVStreamInternal *const sti = st->internal; const FFStream *const sti = cffstream(st);
const AVCodecParameters *const par = st->codecpar; const AVCodecParameters *const par = st->codecpar;
if (sti->last_in_packet_buffer) { if (sti->last_in_packet_buffer) {
++stream_count; ++stream_count;
@ -939,7 +939,7 @@ int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
const AVStream *const st = s->streams[i]; const AVStream *const st = s->streams[i];
const AVStreamInternal *const sti = st->internal; const FFStream *const sti = cffstream(st);
const PacketList *last = sti->last_in_packet_buffer; const PacketList *last = sti->last_in_packet_buffer;
int64_t last_dts; int64_t last_dts;
@ -977,7 +977,7 @@ int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
PacketList *pktl = si->packet_buffer; PacketList *pktl = si->packet_buffer;
AVPacket *const top_pkt = &pktl->pkt; AVPacket *const top_pkt = &pktl->pkt;
AVStream *const st = s->streams[top_pkt->stream_index]; AVStream *const st = s->streams[top_pkt->stream_index];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int64_t top_dts = av_rescale_q(top_pkt->dts, st->time_base, int64_t top_dts = av_rescale_q(top_pkt->dts, st->time_base,
AV_TIME_BASE_Q); AV_TIME_BASE_Q);
@ -1000,7 +1000,7 @@ int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
if (stream_count && flush) { if (stream_count && flush) {
PacketList *pktl = si->packet_buffer; PacketList *pktl = si->packet_buffer;
AVStream *const st = s->streams[pktl->pkt.stream_index]; AVStream *const st = s->streams[pktl->pkt.stream_index];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
*out = pktl->pkt; *out = pktl->pkt;
@ -1026,7 +1026,7 @@ int ff_get_muxer_ts_offset(AVFormatContext *s, int stream_index, int64_t *offset
return AVERROR(EINVAL); return AVERROR(EINVAL);
st = s->streams[stream_index]; st = s->streams[stream_index];
*offset = st->internal->mux_ts_offset; *offset = ffstream(st)->mux_ts_offset;
if (s->output_ts_offset) if (s->output_ts_offset)
*offset += av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base); *offset += av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
@ -1064,7 +1064,7 @@ static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, in
return ff_interleave_packet_per_dts(s, out, in, flush); return ff_interleave_packet_per_dts(s, out, in, flush);
} }
static int check_bitstream(AVFormatContext *s, AVStreamInternal *sti, AVPacket *pkt) static int check_bitstream(AVFormatContext *s, FFStream *sti, AVPacket *pkt)
{ {
int ret; int ret;
@ -1128,7 +1128,7 @@ static int write_packet_common(AVFormatContext *s, AVStream *st, AVPacket *pkt,
static int write_packets_from_bsfs(AVFormatContext *s, AVStream *st, AVPacket *pkt, int interleaved) static int write_packets_from_bsfs(AVFormatContext *s, AVStream *st, AVPacket *pkt, int interleaved)
{ {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
AVBSFContext *const bsfc = sti->bsfc; AVBSFContext *const bsfc = sti->bsfc;
int ret; int ret;
@ -1162,12 +1162,12 @@ static int write_packets_from_bsfs(AVFormatContext *s, AVStream *st, AVPacket *p
static int write_packets_common(AVFormatContext *s, AVPacket *pkt, int interleaved) static int write_packets_common(AVFormatContext *s, AVPacket *pkt, int interleaved)
{ {
AVStream *st; AVStream *st;
AVStreamInternal *sti; FFStream *sti;
int ret = check_packet(s, pkt); int ret = check_packet(s, pkt);
if (ret < 0) if (ret < 0)
return ret; return ret;
st = s->streams[pkt->stream_index]; st = s->streams[pkt->stream_index];
sti = st->internal; sti = ffstream(st);
ret = prepare_input_packet(s, st, pkt); ret = prepare_input_packet(s, st, pkt);
if (ret < 0) if (ret < 0)
@ -1256,7 +1256,7 @@ int av_write_trailer(AVFormatContext *s)
av_packet_unref(pkt); av_packet_unref(pkt);
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
if (s->streams[i]->internal->bsfc) { if (ffstream(s->streams[i])->bsfc) {
ret1 = write_packets_from_bsfs(s, s->streams[i], pkt, 1/*interleaved*/); ret1 = write_packets_from_bsfs(s, s->streams[i], pkt, 1/*interleaved*/);
if (ret1 < 0) if (ret1 < 0)
av_packet_unref(pkt); av_packet_unref(pkt);
@ -1286,7 +1286,7 @@ int av_write_trailer(AVFormatContext *s)
ret = s->pb ? s->pb->error : 0; ret = s->pb ? s->pb->error : 0;
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
av_freep(&s->streams[i]->priv_data); av_freep(&s->streams[i]->priv_data);
av_freep(&s->streams[i]->internal->index_entries); av_freep(&ffstream(s->streams[i])->index_entries);
} }
if (s->oformat->priv_class) if (s->oformat->priv_class)
av_opt_free(s->priv_data); av_opt_free(s->priv_data);

@ -436,13 +436,14 @@ static int mv_read_packet(AVFormatContext *avctx, AVPacket *pkt)
MvContext *mv = avctx->priv_data; MvContext *mv = avctx->priv_data;
AVIOContext *pb = avctx->pb; AVIOContext *pb = avctx->pb;
AVStream *st = avctx->streams[mv->stream_index]; AVStream *st = avctx->streams[mv->stream_index];
FFStream *const sti = ffstream(st);
const AVIndexEntry *index; const AVIndexEntry *index;
int frame = mv->frame[mv->stream_index]; int frame = mv->frame[mv->stream_index];
int64_t ret; int64_t ret;
uint64_t pos; uint64_t pos;
if (frame < st->internal->nb_index_entries) { if (frame < sti->nb_index_entries) {
index = &st->internal->index_entries[frame]; index = &sti->index_entries[frame];
pos = avio_tell(pb); pos = avio_tell(pb);
if (index->pos > pos) if (index->pos > pos)
avio_skip(pb, index->pos - pos); avio_skip(pb, index->pos - pos);

@ -1793,7 +1793,7 @@ static int mxf_compute_ptses_fake_index(MXFContext *mxf, MXFIndexTable *index_ta
* 6: 5 5 * 6: 5 5
* *
* We do this by bucket sorting x by x+TemporalOffset[x] into mxf->ptses, * We do this by bucket sorting x by x+TemporalOffset[x] into mxf->ptses,
* then settings mxf->internal->first_dts = -max(TemporalOffset[x]). * then settings ffstream(mxf)->first_dts = -max(TemporalOffset[x]).
* The latter makes DTS <= PTS. * The latter makes DTS <= PTS.
*/ */
for (i = x = 0; i < index_table->nb_segments; i++) { for (i = x = 0; i < index_table->nb_segments; i++) {
@ -2328,6 +2328,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
const MXFCodecUL *container_ul = NULL; const MXFCodecUL *container_ul = NULL;
const MXFCodecUL *pix_fmt_ul = NULL; const MXFCodecUL *pix_fmt_ul = NULL;
AVStream *st; AVStream *st;
FFStream *sti;
AVTimecode tc; AVTimecode tc;
int flags; int flags;
@ -2435,6 +2436,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
ret = AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto fail_and_free; goto fail_and_free;
} }
sti = ffstream(st);
st->id = material_track->track_id; st->id = material_track->track_id;
st->priv_data = source_track; st->priv_data = source_track;
@ -2620,7 +2622,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
} }
} }
} }
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; sti->need_parsing = AVSTREAM_PARSE_HEADERS;
if (material_track->sequence->origin) { if (material_track->sequence->origin) {
av_dict_set_int(&st->metadata, "material_track_origin", material_track->sequence->origin, 0); av_dict_set_int(&st->metadata, "material_track_origin", material_track->sequence->origin, 0);
} }
@ -2628,7 +2630,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
av_dict_set_int(&st->metadata, "source_track_origin", source_track->sequence->origin, 0); av_dict_set_int(&st->metadata, "source_track_origin", source_track->sequence->origin, 0);
} }
if (descriptor->aspect_ratio.num && descriptor->aspect_ratio.den) if (descriptor->aspect_ratio.num && descriptor->aspect_ratio.den)
st->internal->display_aspect_ratio = descriptor->aspect_ratio; sti->display_aspect_ratio = descriptor->aspect_ratio;
st->codecpar->color_range = mxf_get_color_range(mxf, descriptor); st->codecpar->color_range = mxf_get_color_range(mxf, descriptor);
st->codecpar->color_primaries = mxf_get_codec_ul(ff_mxf_color_primaries_uls, &descriptor->color_primaries_ul)->id; st->codecpar->color_primaries = mxf_get_codec_ul(ff_mxf_color_primaries_uls, &descriptor->color_primaries_ul)->id;
st->codecpar->color_trc = mxf_get_codec_ul(ff_mxf_color_trc_uls, &descriptor->color_trc_ul)->id; st->codecpar->color_trc = mxf_get_codec_ul(ff_mxf_color_trc_uls, &descriptor->color_trc_ul)->id;
@ -2685,7 +2687,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
else if (descriptor->bits_per_sample == 32) else if (descriptor->bits_per_sample == 32)
st->codecpar->codec_id = AV_CODEC_ID_PCM_S32BE; st->codecpar->codec_id = AV_CODEC_ID_PCM_S32BE;
} else if (st->codecpar->codec_id == AV_CODEC_ID_MP2) { } else if (st->codecpar->codec_id == AV_CODEC_ID_MP2) {
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
} }
st->codecpar->bits_per_coded_sample = av_get_bits_per_sample(st->codecpar->codec_id); st->codecpar->bits_per_coded_sample = av_get_bits_per_sample(st->codecpar->codec_id);
} else if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA) { } else if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA) {
@ -2719,7 +2721,7 @@ static int mxf_parse_structural_metadata(MXFContext *mxf)
} }
if (st->codecpar->codec_type != AVMEDIA_TYPE_DATA && source_track->wrapping != FrameWrapped) { if (st->codecpar->codec_type != AVMEDIA_TYPE_DATA && source_track->wrapping != FrameWrapped) {
/* TODO: decode timestamps */ /* TODO: decode timestamps */
st->internal->need_parsing = AVSTREAM_PARSE_TIMESTAMPS; sti->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
} }
} }
@ -3680,7 +3682,7 @@ static int mxf_read_packet(AVFormatContext *s, AVPacket *pkt)
if (next_ofs <= 0) { if (next_ofs <= 0) {
// If we have no way to packetize the data, then return it in chunks... // If we have no way to packetize the data, then return it in chunks...
if (klv.next_klv - klv.length == pos && max_data_size > MXF_MAX_CHUNK_SIZE) { if (klv.next_klv - klv.length == pos && max_data_size > MXF_MAX_CHUNK_SIZE) {
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
avpriv_request_sample(s, "Huge KLV without proper index in non-frame wrapped essence"); avpriv_request_sample(s, "Huge KLV without proper index in non-frame wrapped essence");
} }
size = FFMIN(max_data_size, MXF_MAX_CHUNK_SIZE); size = FFMIN(max_data_size, MXF_MAX_CHUNK_SIZE);

@ -3104,7 +3104,7 @@ static int mxf_interleave_get_packet(AVFormatContext *s, AVPacket *out, AVPacket
int i, stream_count = 0; int i, stream_count = 0;
for (i = 0; i < s->nb_streams; i++) for (i = 0; i < s->nb_streams; i++)
stream_count += !!s->streams[i]->internal->last_in_packet_buffer; stream_count += !!ffstream(s->streams[i])->last_in_packet_buffer;
if (stream_count && (s->nb_streams == stream_count || flush)) { if (stream_count && (s->nb_streams == stream_count || flush)) {
PacketList *pktl = si->packet_buffer; PacketList *pktl = si->packet_buffer;
@ -3115,8 +3115,8 @@ static int mxf_interleave_get_packet(AVFormatContext *s, AVPacket *out, AVPacket
if (!stream_count || pktl->pkt.stream_index == 0) if (!stream_count || pktl->pkt.stream_index == 0)
break; break;
// update last packet in packet buffer // update last packet in packet buffer
if (s->streams[pktl->pkt.stream_index]->internal->last_in_packet_buffer != pktl) if (ffstream(s->streams[pktl->pkt.stream_index])->last_in_packet_buffer != pktl)
s->streams[pktl->pkt.stream_index]->internal->last_in_packet_buffer = pktl; ffstream(s->streams[pktl->pkt.stream_index])->last_in_packet_buffer = pktl;
last = pktl; last = pktl;
pktl = pktl->next; pktl = pktl->next;
stream_count--; stream_count--;
@ -3141,8 +3141,8 @@ static int mxf_interleave_get_packet(AVFormatContext *s, AVPacket *out, AVPacket
*out = pktl->pkt; *out = pktl->pkt;
av_log(s, AV_LOG_TRACE, "out st:%d dts:%"PRId64"\n", (*out).stream_index, (*out).dts); av_log(s, AV_LOG_TRACE, "out st:%d dts:%"PRId64"\n", (*out).stream_index, (*out).dts);
si->packet_buffer = pktl->next; si->packet_buffer = pktl->next;
if(s->streams[pktl->pkt.stream_index]->internal->last_in_packet_buffer == pktl) if (ffstream(s->streams[pktl->pkt.stream_index])->last_in_packet_buffer == pktl)
s->streams[pktl->pkt.stream_index]->internal->last_in_packet_buffer= NULL; ffstream(s->streams[pktl->pkt.stream_index])->last_in_packet_buffer = NULL;
if (!si->packet_buffer) if (!si->packet_buffer)
si->packet_buffer_end = NULL; si->packet_buffer_end = NULL;
av_freep(&pktl); av_freep(&pktl);

@ -53,7 +53,7 @@ static int nc_read_header(AVFormatContext *s)
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_MPEG4; st->codecpar->codec_id = AV_CODEC_ID_MPEG4;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
avpriv_set_pts_info(st, 64, 1, 100); avpriv_set_pts_info(st, 64, 1, 100);

@ -461,7 +461,7 @@ static int nsv_parse_NSVs_header(AVFormatContext *s)
st->codecpar->codec_tag = atag; st->codecpar->codec_tag = atag;
st->codecpar->codec_id = ff_codec_get_id(nsv_codec_audio_tags, atag); st->codecpar->codec_id = ff_codec_get_id(nsv_codec_audio_tags, atag);
st->internal->need_parsing = AVSTREAM_PARSE_FULL; /* for PCM we will read a chunk later and put correct info */ ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL; /* for PCM we will read a chunk later and put correct info */
/* set timebase to common denominator of ms and framerate */ /* set timebase to common denominator of ms and framerate */
avpriv_set_pts_info(st, 64, 1, framerate.num*1000); avpriv_set_pts_info(st, 64, 1, framerate.num*1000);
@ -609,7 +609,7 @@ null_chunk_retry:
asize-=4; asize-=4;
av_log(s, AV_LOG_TRACE, "NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate); av_log(s, AV_LOG_TRACE, "NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate);
if (fill_header) { if (fill_header) {
st[NSV_ST_AUDIO]->internal->need_parsing = AVSTREAM_PARSE_NONE; /* we know everything */ ffstream(st[NSV_ST_AUDIO])->need_parsing = AVSTREAM_PARSE_NONE; /* we know everything */
if (bps != 16) { if (bps != 16) {
av_log(s, AV_LOG_TRACE, "NSV AUDIO bit/sample != 16 (%d)!!!\n", bps); av_log(s, AV_LOG_TRACE, "NSV AUDIO bit/sample != 16 (%d)!!!\n", bps);
} }
@ -669,6 +669,7 @@ static int nsv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
{ {
NSVContext *nsv = s->priv_data; NSVContext *nsv = s->priv_data;
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
FFStream *const sti = ffstream(st);
NSVStream *nst = st->priv_data; NSVStream *nst = st->priv_data;
int index; int index;
@ -676,10 +677,10 @@ static int nsv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
if(index < 0) if(index < 0)
return -1; return -1;
if (avio_seek(s->pb, st->internal->index_entries[index].pos, SEEK_SET) < 0) if (avio_seek(s->pb, sti->index_entries[index].pos, SEEK_SET) < 0)
return -1; return -1;
nst->frame_offset = st->internal->index_entries[index].timestamp; nst->frame_offset = sti->index_entries[index].timestamp;
nsv->state = NSV_UNSYNC; nsv->state = NSV_UNSYNC;
return 0; return 0;
} }

@ -1086,7 +1086,7 @@ static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code)
stc->skip_until_key_frame = 0; stc->skip_until_key_frame = 0;
discard = s->streams[stream_id]->discard; discard = s->streams[stream_id]->discard;
last_IP_pts = s->streams[stream_id]->internal->last_IP_pts; last_IP_pts = ffstream(s->streams[stream_id])->last_IP_pts;
if ((discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY)) || if ((discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY)) ||
(discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && (discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE &&
last_IP_pts > pts) || last_IP_pts > pts) ||
@ -1225,6 +1225,7 @@ static int read_seek(AVFormatContext *s, int stream_index,
{ {
NUTContext *nut = s->priv_data; NUTContext *nut = s->priv_data;
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
FFStream *const sti = ffstream(st);
Syncpoint dummy = { .ts = pts * av_q2d(st->time_base) * AV_TIME_BASE }; Syncpoint dummy = { .ts = pts * av_q2d(st->time_base) * AV_TIME_BASE };
Syncpoint nopts_sp = { .ts = AV_NOPTS_VALUE, .back_ptr = AV_NOPTS_VALUE }; Syncpoint nopts_sp = { .ts = AV_NOPTS_VALUE, .back_ptr = AV_NOPTS_VALUE };
Syncpoint *sp, *next_node[2] = { &nopts_sp, &nopts_sp }; Syncpoint *sp, *next_node[2] = { &nopts_sp, &nopts_sp };
@ -1235,15 +1236,15 @@ static int read_seek(AVFormatContext *s, int stream_index,
return AVERROR(ENOSYS); return AVERROR(ENOSYS);
} }
if (st->internal->index_entries) { if (sti->index_entries) {
int index = av_index_search_timestamp(st, pts, flags); int index = av_index_search_timestamp(st, pts, flags);
if (index < 0) if (index < 0)
index = av_index_search_timestamp(st, pts, flags ^ AVSEEK_FLAG_BACKWARD); index = av_index_search_timestamp(st, pts, flags ^ AVSEEK_FLAG_BACKWARD);
if (index < 0) if (index < 0)
return -1; return -1;
pos2 = st->internal->index_entries[index].pos; pos2 = sti->index_entries[index].pos;
ts = st->internal->index_entries[index].timestamp; ts = sti->index_entries[index].timestamp;
} else { } else {
av_tree_find(nut->syncpoints, &dummy, ff_nut_sp_pts_cmp, av_tree_find(nut->syncpoints, &dummy, ff_nut_sp_pts_cmp,
(void **) next_node); (void **) next_node);

@ -1006,6 +1006,7 @@ static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
ff_nut_reset_ts(nut, *nus->time_base, pkt->dts); ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
for (i = 0; i < s->nb_streams; i++) { for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
FFStream *const sti = ffstream(st);
int64_t dts_tb = av_rescale_rnd(pkt->dts, int64_t dts_tb = av_rescale_rnd(pkt->dts,
nus->time_base->num * (int64_t)nut->stream[i].time_base->den, nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
nus->time_base->den * (int64_t)nut->stream[i].time_base->num, nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
@ -1013,12 +1014,12 @@ static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
int index = av_index_search_timestamp(st, dts_tb, int index = av_index_search_timestamp(st, dts_tb,
AVSEEK_FLAG_BACKWARD); AVSEEK_FLAG_BACKWARD);
if (index >= 0) { if (index >= 0) {
sp_pos = FFMIN(sp_pos, st->internal->index_entries[index].pos); sp_pos = FFMIN(sp_pos, sti->index_entries[index].pos);
if (!nut->write_index && 2*index > st->internal->nb_index_entries) { if (!nut->write_index && 2*index > sti->nb_index_entries) {
memmove(st->internal->index_entries, memmove(sti->index_entries,
st->internal->index_entries + index, sti->index_entries + index,
sizeof(*st->internal->index_entries) * (st->internal->nb_index_entries - index)); sizeof(*sti->index_entries) * (sti->nb_index_entries - index));
st->internal->nb_index_entries -= index; sti->nb_index_entries -= index;
} }
} }
} }

@ -133,7 +133,7 @@ static int get_codec_data(AVFormatContext *s, AVIOContext *pb, AVStream *vst,
} }
ast->codecpar->codec_id = id; ast->codecpar->codec_id = id;
ast->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(ast)->need_parsing = AVSTREAM_PARSE_FULL;
} else } else
avio_skip(pb, 4 * 4); avio_skip(pb, 4 * 4);

@ -59,7 +59,7 @@ flac_header (AVFormatContext * s, int idx)
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_FLAC; st->codecpar->codec_id = AV_CODEC_ID_FLAC;
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; ffstream(st)->need_parsing = AVSTREAM_PARSE_HEADERS;
if ((ret = ff_alloc_extradata(st->codecpar, FLAC_STREAMINFO_SIZE)) < 0) if ((ret = ff_alloc_extradata(st->codecpar, FLAC_STREAMINFO_SIZE)) < 0)
return ret; return ret;

@ -39,6 +39,7 @@ ogm_header(AVFormatContext *s, int idx)
struct ogg *ogg = s->priv_data; struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx; struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx]; AVStream *st = s->streams[idx];
FFStream *const sti = ffstream(st);
GetByteContext p; GetByteContext p;
uint64_t time_unit; uint64_t time_unit;
uint64_t spu; uint64_t spu;
@ -60,7 +61,7 @@ ogm_header(AVFormatContext *s, int idx)
st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag); st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag);
st->codecpar->codec_tag = tag; st->codecpar->codec_tag = tag;
if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4) if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4)
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; sti->need_parsing = AVSTREAM_PARSE_HEADERS;
} else if (bytestream2_peek_byte(&p) == 't') { } else if (bytestream2_peek_byte(&p) == 't') {
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE; st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codecpar->codec_id = AV_CODEC_ID_TEXT; st->codecpar->codec_id = AV_CODEC_ID_TEXT;
@ -76,7 +77,7 @@ ogm_header(AVFormatContext *s, int idx)
st->codecpar->codec_id = ff_codec_get_id(ff_codec_wav_tags, cid); st->codecpar->codec_id = ff_codec_get_id(ff_codec_wav_tags, cid);
// our parser completely breaks AAC in Ogg // our parser completely breaks AAC in Ogg
if (st->codecpar->codec_id != AV_CODEC_ID_AAC) if (st->codecpar->codec_id != AV_CODEC_ID_AAC)
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
} }
size = bytestream2_get_le32(&p); size = bytestream2_get_le32(&p);
@ -116,7 +117,7 @@ ogm_header(AVFormatContext *s, int idx)
} }
// Update internal avctx with changes to codecpar above. // Update internal avctx with changes to codecpar above.
st->internal->need_context_update = 1; sti->need_context_update = 1;
} else if (bytestream2_peek_byte(&p) == 3) { } else if (bytestream2_peek_byte(&p) == 3) {
bytestream2_skip(&p, 7); bytestream2_skip(&p, 7);
if (bytestream2_get_bytes_left(&p) > 1) if (bytestream2_get_bytes_left(&p) > 1)

@ -112,7 +112,7 @@ static int theora_header(AVFormatContext *s, int idx)
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_THEORA; st->codecpar->codec_id = AV_CODEC_ID_THEORA;
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; ffstream(st)->need_parsing = AVSTREAM_PARSE_HEADERS;
} }
break; break;
case 0x81: case 0x81:

@ -61,7 +61,7 @@ static int vp8_header(AVFormatContext *s, int idx)
avpriv_set_pts_info(st, 64, framerate.den, framerate.num); avpriv_set_pts_info(st, 64, framerate.den, framerate.num);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_VP8; st->codecpar->codec_id = AV_CODEC_ID_VP8;
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; ffstream(st)->need_parsing = AVSTREAM_PARSE_HEADERS;
break; break;
case 0x02: case 0x02:
if (p[6] != 0x20) if (p[6] != 0x20)

@ -515,7 +515,7 @@ static int oma_read_header(AVFormatContext *s)
avpriv_set_pts_info(st, 64, 1, samplerate); avpriv_set_pts_info(st, 64, 1, samplerate);
break; break;
case OMA_CODECID_MP3: case OMA_CODECID_MP3:
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
framesize = 1024; framesize = 1024;
break; break;
case OMA_CODECID_LPCM: case OMA_CODECID_LPCM:

@ -79,7 +79,7 @@ int ff_pcm_read_seek(AVFormatContext *s,
pos *= block_align; pos *= block_align;
/* recompute exact position */ /* recompute exact position */
st->internal->cur_dts = av_rescale(pos, st->time_base.den, byte_rate * (int64_t)st->time_base.num); ffstream(st)->cur_dts = av_rescale(pos, st->time_base.den, byte_rate * (int64_t)st->time_base.num);
if ((ret = avio_seek(s->pb, pos + ffformatcontext(s)->data_offset, SEEK_SET)) < 0) if ((ret = avio_seek(s->pb, pos + ffformatcontext(s)->data_offset, SEEK_SET)) < 0)
return ret; return ret;
return 0; return 0;

@ -62,7 +62,7 @@ static int pva_read_header(AVFormatContext *s) {
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_MPEG2VIDEO; st->codecpar->codec_id = AV_CODEC_ID_MPEG2VIDEO;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
avpriv_set_pts_info(st, 32, 1, 90000); avpriv_set_pts_info(st, 32, 1, 90000);
av_add_index_entry(st, 0, 0, 0, 0, AVINDEX_KEYFRAME); av_add_index_entry(st, 0, 0, 0, 0, AVINDEX_KEYFRAME);
@ -70,7 +70,7 @@ static int pva_read_header(AVFormatContext *s) {
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_MP2; st->codecpar->codec_id = AV_CODEC_ID_MP2;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
avpriv_set_pts_info(st, 33, 1, 90000); avpriv_set_pts_info(st, 33, 1, 90000);
av_add_index_entry(st, 0, 0, 0, 0, AVINDEX_KEYFRAME); av_add_index_entry(st, 0, 0, 0, 0, AVINDEX_KEYFRAME);

@ -59,7 +59,7 @@ int ff_raw_audio_read_header(AVFormatContext *s)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = s->iformat->raw_codec_id; st->codecpar->codec_id = s->iformat->raw_codec_id;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
st->start_time = 0; st->start_time = 0;
/* the parameters will be extracted from the compressed bitstream */ /* the parameters will be extracted from the compressed bitstream */
@ -70,6 +70,7 @@ int ff_raw_audio_read_header(AVFormatContext *s)
int ff_raw_video_read_header(AVFormatContext *s) int ff_raw_video_read_header(AVFormatContext *s)
{ {
AVStream *st; AVStream *st;
FFStream *sti;
FFRawVideoDemuxerContext *s1 = s->priv_data; FFRawVideoDemuxerContext *s1 = s->priv_data;
int ret = 0; int ret = 0;
@ -79,12 +80,13 @@ int ff_raw_video_read_header(AVFormatContext *s)
ret = AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto fail; goto fail;
} }
sti = ffstream(st);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = s->iformat->raw_codec_id; st->codecpar->codec_id = s->iformat->raw_codec_id;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; sti->need_parsing = AVSTREAM_PARSE_FULL_RAW;
st->internal->avctx->framerate = s1->framerate; sti->avctx->framerate = s1->framerate;
avpriv_set_pts_info(st, 64, 1, 1200000); avpriv_set_pts_info(st, 64, 1, 1200000);
fail: fail:

@ -416,7 +416,7 @@ rdt_parse_sdp_line (AVFormatContext *s, int st_index,
if (av_strstart(p, "OpaqueData:buffer;", &p)) { if (av_strstart(p, "OpaqueData:buffer;", &p)) {
rdt->mlti_data = rdt_parse_b64buf(&rdt->mlti_data_size, p); rdt->mlti_data = rdt_parse_b64buf(&rdt->mlti_data_size, p);
} else if (av_strstart(p, "StartTime:integer;", &p)) } else if (av_strstart(p, "StartTime:integer;", &p))
stream->internal->first_dts = atoi(p); ffstream(stream)->first_dts = atoi(p);
else if (av_strstart(p, "ASMRuleBook:string;", &p)) { else if (av_strstart(p, "ASMRuleBook:string;", &p)) {
int n, first = -1; int n, first = -1;
@ -466,7 +466,7 @@ add_dstream(AVFormatContext *s, AVStream *orig_st)
return NULL; return NULL;
st->id = orig_st->id; st->id = orig_st->id;
st->codecpar->codec_type = orig_st->codecpar->codec_type; st->codecpar->codec_type = orig_st->codecpar->codec_type;
st->internal->first_dts = orig_st->internal->first_dts; ffstream(st)->first_dts = ffstream(orig_st)->first_dts;
return st; return st;
} }

@ -237,9 +237,10 @@ static int rl2_read_packet(AVFormatContext *s,
/** check if there is a valid video or audio entry that can be used */ /** check if there is a valid video or audio entry that can be used */
for(i=0; i<s->nb_streams; i++){ for(i=0; i<s->nb_streams; i++){
if(rl2->index_pos[i] < s->streams[i]->internal->nb_index_entries const FFStream *const sti = ffstream(s->streams[i]);
&& s->streams[i]->internal->index_entries[ rl2->index_pos[i] ].pos < pos){ if (rl2->index_pos[i] < sti->nb_index_entries
sample = &s->streams[i]->internal->index_entries[ rl2->index_pos[i] ]; && sti->index_entries[ rl2->index_pos[i] ].pos < pos) {
sample = &sti->index_entries[ rl2->index_pos[i] ];
pos= sample->pos; pos= sample->pos;
stream_id= i; stream_id= i;
} }
@ -283,7 +284,7 @@ static int rl2_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
return -1; return -1;
rl2->index_pos[stream_index] = index; rl2->index_pos[stream_index] = index;
timestamp = st->internal->index_entries[index].timestamp; timestamp = ffstream(st)->index_entries[index].timestamp;
for(i=0; i < s->nb_streams; i++){ for(i=0; i < s->nb_streams; i++){
AVStream *st2 = s->streams[i]; AVStream *st2 = s->streams[i];

@ -122,6 +122,7 @@ void ff_rm_free_rmstream (RMStream *rms)
static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb, static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *ast, int read_all) AVStream *st, RMStream *ast, int read_all)
{ {
FFStream *const sti = ffstream(st);
char buf[256]; char buf[256];
uint32_t version; uint32_t version;
int ret; int ret;
@ -202,7 +203,7 @@ static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb,
switch (st->codecpar->codec_id) { switch (st->codecpar->codec_id) {
case AV_CODEC_ID_AC3: case AV_CODEC_ID_AC3:
st->internal->need_parsing = AVSTREAM_PARSE_FULL; sti->need_parsing = AVSTREAM_PARSE_FULL;
break; break;
case AV_CODEC_ID_RA_288: case AV_CODEC_ID_RA_288:
st->codecpar->extradata_size= 0; st->codecpar->extradata_size= 0;
@ -211,7 +212,7 @@ static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb,
st->codecpar->block_align = coded_framesize; st->codecpar->block_align = coded_framesize;
break; break;
case AV_CODEC_ID_COOK: case AV_CODEC_ID_COOK:
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; sti->need_parsing = AVSTREAM_PARSE_HEADERS;
case AV_CODEC_ID_ATRAC3: case AV_CODEC_ID_ATRAC3:
case AV_CODEC_ID_SIPR: case AV_CODEC_ID_SIPR:
if (read_all) { if (read_all) {
@ -235,7 +236,7 @@ static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb,
return -1; return -1;
} }
st->codecpar->block_align = ff_sipr_subpk_size[flavor]; st->codecpar->block_align = ff_sipr_subpk_size[flavor];
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; sti->need_parsing = AVSTREAM_PARSE_FULL_RAW;
} else { } else {
if(sub_packet_size <= 0){ if(sub_packet_size <= 0){
av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n"); av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n");
@ -388,7 +389,7 @@ int ff_rm_read_mdpr_codecdata(AVFormatContext *s, AVIOContext *pb,
avio_skip(pb, 2); // looks like bits per sample avio_skip(pb, 2); // looks like bits per sample
avio_skip(pb, 4); // always zero? avio_skip(pb, 4); // always zero?
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->internal->need_parsing = AVSTREAM_PARSE_TIMESTAMPS; ffstream(st)->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
fps = avio_rb32(pb); fps = avio_rb32(pb);
if ((ret = rm_read_extradata(s, pb, st->codecpar, codec_data_size - (avio_tell(pb) - codec_pos))) < 0) if ((ret = rm_read_extradata(s, pb, st->codecpar, codec_data_size - (avio_tell(pb) - codec_pos))) < 0)

@ -311,6 +311,7 @@ static int rpl_read_packet(AVFormatContext *s, AVPacket *pkt)
RPLContext *rpl = s->priv_data; RPLContext *rpl = s->priv_data;
AVIOContext *pb = s->pb; AVIOContext *pb = s->pb;
AVStream* stream; AVStream* stream;
FFStream *sti;
AVIndexEntry* index_entry; AVIndexEntry* index_entry;
int ret; int ret;
@ -320,11 +321,12 @@ static int rpl_read_packet(AVFormatContext *s, AVPacket *pkt)
} }
stream = s->streams[rpl->chunk_part]; stream = s->streams[rpl->chunk_part];
sti = ffstream(stream);
if (rpl->chunk_number >= stream->internal->nb_index_entries) if (rpl->chunk_number >= sti->nb_index_entries)
return AVERROR_EOF; return AVERROR_EOF;
index_entry = &stream->internal->index_entries[rpl->chunk_number]; index_entry = &sti->index_entries[rpl->chunk_number];
if (rpl->frame_in_part == 0) { if (rpl->frame_in_part == 0) {
if (avio_seek(pb, index_entry->pos, SEEK_SET) < 0) if (avio_seek(pb, index_entry->pos, SEEK_SET) < 0)

@ -166,8 +166,8 @@ static int asfrtp_parse_sdp_line(AVFormatContext *s, int stream_index,
if (s->streams[stream_index]->id == rt->asf_ctx->streams[i]->id) { if (s->streams[stream_index]->id == rt->asf_ctx->streams[i]->id) {
avcodec_parameters_copy(s->streams[stream_index]->codecpar, avcodec_parameters_copy(s->streams[stream_index]->codecpar,
rt->asf_ctx->streams[i]->codecpar); rt->asf_ctx->streams[i]->codecpar);
s->streams[stream_index]->internal->need_parsing = ffstream(s->streams[stream_index])->need_parsing =
rt->asf_ctx->streams[i]->internal->need_parsing; ffstream(rt->asf_ctx->streams[i])->need_parsing;
avpriv_set_pts_info(s->streams[stream_index], 32, 1, 1000); avpriv_set_pts_info(s->streams[stream_index], 32, 1, 1000);
} }
} }

@ -218,7 +218,7 @@ static void init_rtp_handler(const RTPDynamicProtocolHandler *handler,
par->codec_id = handler->codec_id; par->codec_id = handler->codec_id;
rtsp_st->dynamic_handler = handler; rtsp_st->dynamic_handler = handler;
if (st) if (st)
st->internal->need_parsing = handler->need_parsing; ffstream(st)->need_parsing = handler->need_parsing;
if (handler->priv_data_size) { if (handler->priv_data_size) {
rtsp_st->dynamic_protocol_context = av_mallocz(handler->priv_data_size); rtsp_st->dynamic_protocol_context = av_mallocz(handler->priv_data_size);
if (!rtsp_st->dynamic_protocol_context) if (!rtsp_st->dynamic_protocol_context)

@ -181,7 +181,7 @@ static int s337m_read_packet(AVFormatContext *s, AVPacket *pkt)
} }
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = codec; st->codecpar->codec_id = codec;
st->internal->need_parsing = AVSTREAM_PARSE_HEADERS; ffstream(st)->need_parsing = AVSTREAM_PARSE_HEADERS;
} }
return 0; return 0;

@ -1409,6 +1409,7 @@ static av_cold int sbg_read_header(AVFormatContext *avf)
char *buf = NULL; char *buf = NULL;
struct sbg_script script = { 0 }; struct sbg_script script = { 0 };
AVStream *st; AVStream *st;
FFStream *sti;
struct ws_intervals inter = { 0 }; struct ws_intervals inter = { 0 };
r = read_whole_file(avf->pb, sbg->max_file_size, &buf); r = read_whole_file(avf->pb, sbg->max_file_size, &buf);
@ -1442,6 +1443,7 @@ static av_cold int sbg_read_header(AVFormatContext *avf)
st = avformat_new_stream(avf, NULL); st = avformat_new_stream(avf, NULL);
if (!st) if (!st)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
sti = ffstream(st);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_FFWAVESYNTH; st->codecpar->codec_id = AV_CODEC_ID_FFWAVESYNTH;
st->codecpar->channels = 2; st->codecpar->channels = 2;
@ -1449,13 +1451,13 @@ static av_cold int sbg_read_header(AVFormatContext *avf)
st->codecpar->sample_rate = sbg->sample_rate; st->codecpar->sample_rate = sbg->sample_rate;
st->codecpar->frame_size = sbg->frame_size; st->codecpar->frame_size = sbg->frame_size;
avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate);
st->internal->probe_packets = 0; sti->probe_packets = 0;
st->start_time = av_rescale(script.start_ts, st->start_time = av_rescale(script.start_ts,
sbg->sample_rate, AV_TIME_BASE); sbg->sample_rate, AV_TIME_BASE);
st->duration = script.end_ts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE : st->duration = script.end_ts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
av_rescale(script.end_ts - script.start_ts, av_rescale(script.end_ts - script.start_ts,
sbg->sample_rate, AV_TIME_BASE); sbg->sample_rate, AV_TIME_BASE);
st->internal->cur_dts = st->start_time; sti->cur_dts = st->start_time;
r = encode_intervals(&script, st->codecpar, &inter); r = encode_intervals(&script, st->codecpar, &inter);
if (r < 0) if (r < 0)
goto fail; goto fail;
@ -1476,7 +1478,7 @@ static int sbg_read_packet(AVFormatContext *avf, AVPacket *packet)
int64_t ts, end_ts; int64_t ts, end_ts;
int ret; int ret;
ts = avf->streams[0]->internal->cur_dts; ts = ffstream(avf->streams[0])->cur_dts;
end_ts = ts + avf->streams[0]->codecpar->frame_size; end_ts = ts + avf->streams[0]->codecpar->frame_size;
if (avf->streams[0]->duration != AV_NOPTS_VALUE) if (avf->streams[0]->duration != AV_NOPTS_VALUE)
end_ts = FFMIN(avf->streams[0]->start_time + avf->streams[0]->duration, end_ts = FFMIN(avf->streams[0]->start_time + avf->streams[0]->duration,
@ -1499,7 +1501,7 @@ static int sbg_read_seek2(AVFormatContext *avf, int stream_index,
return AVERROR(EINVAL); return AVERROR(EINVAL);
if (stream_index < 0) if (stream_index < 0)
ts = av_rescale_q(ts, AV_TIME_BASE_Q, avf->streams[0]->time_base); ts = av_rescale_q(ts, AV_TIME_BASE_Q, avf->streams[0]->time_base);
avf->streams[0]->internal->cur_dts = ts; ffstream(avf->streams[0])->cur_dts = ts;
return 0; return 0;
} }

@ -51,7 +51,7 @@ static int sdr2_read_header(AVFormatContext *s)
st->codecpar->width = avio_rl32(s->pb); st->codecpar->width = avio_rl32(s->pb);
st->codecpar->height = avio_rl32(s->pb); st->codecpar->height = avio_rl32(s->pb);
st->codecpar->codec_id = AV_CODEC_ID_H264; st->codecpar->codec_id = AV_CODEC_ID_H264;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codecpar->channels = 1; ast->codecpar->channels = 1;

@ -183,7 +183,7 @@ static int film_read_header(AVFormatContext *s)
if (film->audio_type == AV_CODEC_ID_ADPCM_ADX) { if (film->audio_type == AV_CODEC_ID_ADPCM_ADX) {
st->codecpar->bits_per_coded_sample = 18 * 8 / 32; st->codecpar->bits_per_coded_sample = 18 * 8 / 32;
st->codecpar->block_align = st->codecpar->channels * 18; st->codecpar->block_align = st->codecpar->channels * 18;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
} else { } else {
st->codecpar->bits_per_coded_sample = film->audio_bits; st->codecpar->bits_per_coded_sample = film->audio_bits;
st->codecpar->block_align = st->codecpar->channels * st->codecpar->block_align = st->codecpar->channels *
@ -315,7 +315,7 @@ static int film_read_seek(AVFormatContext *s, int stream_index, int64_t timestam
if (ret < 0) if (ret < 0)
return ret; return ret;
pos = avio_seek(s->pb, st->internal->index_entries[ret].pos, SEEK_SET); pos = avio_seek(s->pb, ffstream(st)->index_entries[ret].pos, SEEK_SET);
if (pos < 0) if (pos < 0)
return pos; return pos;

@ -991,10 +991,10 @@ static int seg_check_bitstream(struct AVFormatContext *s, const AVPacket *pkt)
if (oc->oformat->check_bitstream) { if (oc->oformat->check_bitstream) {
int ret = oc->oformat->check_bitstream(oc, pkt); int ret = oc->oformat->check_bitstream(oc, pkt);
if (ret == 1) { if (ret == 1) {
AVStream *st = s->streams[pkt->stream_index]; FFStream *const sti = ffstream( s->streams[pkt->stream_index]);
AVStream *ost = oc->streams[pkt->stream_index]; FFStream *const osti = ffstream(oc->streams[pkt->stream_index]);
st->internal->bsfc = ost->internal->bsfc; sti->bsfc = osti->bsfc;
ost->internal->bsfc = NULL; osti->bsfc = NULL;
} }
return ret; return ret;
} }

@ -582,15 +582,16 @@ static int ism_write_packet(AVFormatContext *s, AVPacket *pkt)
{ {
SmoothStreamingContext *c = s->priv_data; SmoothStreamingContext *c = s->priv_data;
AVStream *st = s->streams[pkt->stream_index]; AVStream *st = s->streams[pkt->stream_index];
FFStream *const sti = ffstream(st);
OutputStream *os = &c->streams[pkt->stream_index]; OutputStream *os = &c->streams[pkt->stream_index];
int64_t end_dts = (c->nb_fragments + 1) * (int64_t) c->min_frag_duration; int64_t end_dts = (c->nb_fragments + 1) * (int64_t) c->min_frag_duration;
int ret; int ret;
if (st->internal->first_dts == AV_NOPTS_VALUE) if (sti->first_dts == AV_NOPTS_VALUE)
st->internal->first_dts = pkt->dts; sti->first_dts = pkt->dts;
if ((!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) && if ((!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
av_compare_ts(pkt->dts - st->internal->first_dts, st->time_base, av_compare_ts(pkt->dts - sti->first_dts, st->time_base,
end_dts, AV_TIME_BASE_Q) >= 0 && end_dts, AV_TIME_BASE_Q) >= 0 &&
pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) { pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) {

@ -204,7 +204,7 @@ static AVStream *create_new_audio_stream(AVFormatContext *s, int id, int info)
} }
ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; ast->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codecpar->codec_id = ff_codec_get_id(swf_audio_codec_tags, info>>4 & 15); ast->codecpar->codec_id = ff_codec_get_id(swf_audio_codec_tags, info>>4 & 15);
ast->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(ast)->need_parsing = AVSTREAM_PARSE_FULL;
sample_rate_code = info>>2 & 3; sample_rate_code = info>>2 & 3;
sample_size_code = info>>1 & 1; sample_size_code = info>>1 & 1;
if (!sample_size_code && ast->codecpar->codec_id == AV_CODEC_ID_PCM_S16LE) if (!sample_size_code && ast->codecpar->codec_id == AV_CODEC_ID_PCM_S16LE)
@ -293,7 +293,7 @@ static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
ast->duration = avio_rl32(pb); // number of samples ast->duration = avio_rl32(pb); // number of samples
if (((v>>4) & 15) == 2) { // MP3 sound data record if (((v>>4) & 15) == 2) { // MP3 sound data record
ast->internal->skip_samples = avio_rl16(pb); ffstream(ast)->skip_samples = avio_rl16(pb);
len -= 2; len -= 2;
} }
len -= 7; len -= 7;

@ -64,7 +64,7 @@ static int tak_read_header(AVFormatContext *s)
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_TAK; st->codecpar->codec_id = AV_CODEC_ID_TAK;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
tc->mlast_frame = 0; tc->mlast_frame = 0;
if (avio_rl32(pb) != MKTAG('t', 'B', 'a', 'K')) { if (avio_rl32(pb) != MKTAG('t', 'B', 'a', 'K')) {

@ -277,12 +277,14 @@ static av_cold int tedcaptions_read_header(AVFormatContext *avf)
{ {
TEDCaptionsDemuxer *tc = avf->priv_data; TEDCaptionsDemuxer *tc = avf->priv_data;
AVStream *st = avformat_new_stream(avf, NULL); AVStream *st = avformat_new_stream(avf, NULL);
FFStream *sti;
int ret, i; int ret, i;
AVPacket *last; AVPacket *last;
if (!st) if (!st)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
sti = ffstream(st);
ret = parse_file(avf->pb, &tc->subs); ret = parse_file(avf->pb, &tc->subs);
if (ret < 0) { if (ret < 0) {
if (ret == AVERROR_INVALIDDATA) if (ret == AVERROR_INVALIDDATA)
@ -298,10 +300,10 @@ static av_cold int tedcaptions_read_header(AVFormatContext *avf)
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE; st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codecpar->codec_id = AV_CODEC_ID_TEXT; st->codecpar->codec_id = AV_CODEC_ID_TEXT;
avpriv_set_pts_info(st, 64, 1, 1000); avpriv_set_pts_info(st, 64, 1, 1000);
st->internal->probe_packets = 0; sti->probe_packets = 0;
st->start_time = 0; st->start_time = 0;
st->duration = last->pts + last->duration; st->duration = last->pts + last->duration;
st->internal->cur_dts = 0; sti->cur_dts = 0;
return 0; return 0;
} }

@ -151,21 +151,22 @@ static int tta_read_packet(AVFormatContext *s, AVPacket *pkt)
{ {
TTAContext *c = s->priv_data; TTAContext *c = s->priv_data;
AVStream *st = s->streams[0]; AVStream *st = s->streams[0];
FFStream *const sti = ffstream(st);
int size, ret; int size, ret;
// FIXME! // FIXME!
if (c->currentframe >= c->totalframes) if (c->currentframe >= c->totalframes)
return AVERROR_EOF; return AVERROR_EOF;
if (st->internal->nb_index_entries < c->totalframes) { if (sti->nb_index_entries < c->totalframes) {
av_log(s, AV_LOG_ERROR, "Index entry disappeared\n"); av_log(s, AV_LOG_ERROR, "Index entry disappeared\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
size = st->internal->index_entries[c->currentframe].size; size = sti->index_entries[c->currentframe].size;
ret = av_get_packet(s->pb, pkt, size); ret = av_get_packet(s->pb, pkt, size);
pkt->dts = st->internal->index_entries[c->currentframe++].timestamp; pkt->dts = sti->index_entries[c->currentframe++].timestamp;
pkt->duration = c->currentframe == c->totalframes ? c->last_frame_size : pkt->duration = c->currentframe == c->totalframes ? c->last_frame_size :
c->frame_size; c->frame_size;
return ret; return ret;
@ -178,7 +179,7 @@ static int tta_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
int index = av_index_search_timestamp(st, timestamp, flags); int index = av_index_search_timestamp(st, timestamp, flags);
if (index < 0) if (index < 0)
return -1; return -1;
if (avio_seek(s->pb, st->internal->index_entries[index].pos, SEEK_SET) < 0) if (avio_seek(s->pb, ffstream(st)->index_entries[index].pos, SEEK_SET) < 0)
return -1; return -1;
c->currentframe = index; c->currentframe = index;

@ -308,7 +308,7 @@ static int ty_read_header(AVFormatContext *s)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_MPEG2VIDEO; st->codecpar->codec_id = AV_CODEC_ID_MPEG2VIDEO;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
avpriv_set_pts_info(st, 64, 1, 90000); avpriv_set_pts_info(st, 64, 1, 90000);
ast = avformat_new_stream(s, NULL); ast = avformat_new_stream(s, NULL);
@ -318,7 +318,7 @@ static int ty_read_header(AVFormatContext *s)
if (ty->audio_type == TIVO_AUDIO_MPEG) { if (ty->audio_type == TIVO_AUDIO_MPEG) {
ast->codecpar->codec_id = AV_CODEC_ID_MP2; ast->codecpar->codec_id = AV_CODEC_ID_MP2;
ast->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(ast)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
} else { } else {
ast->codecpar->codec_id = AV_CODEC_ID_AC3; ast->codecpar->codec_id = AV_CODEC_ID_AC3;
} }

@ -103,7 +103,7 @@ static int is_relative(int64_t ts) {
*/ */
static int64_t wrap_timestamp(const AVStream *st, int64_t timestamp) static int64_t wrap_timestamp(const AVStream *st, int64_t timestamp)
{ {
const AVStreamInternal *const sti = st->internal; const FFStream *const sti = cffstream(st);
if (sti->pts_wrap_behavior != AV_PTS_WRAP_IGNORE && st->pts_wrap_bits < 64 && if (sti->pts_wrap_behavior != AV_PTS_WRAP_IGNORE && st->pts_wrap_bits < 64 &&
sti->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) { sti->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
if (sti->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET && if (sti->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
@ -118,20 +118,20 @@ static int64_t wrap_timestamp(const AVStream *st, int64_t timestamp)
int64_t av_stream_get_end_pts(const AVStream *st) int64_t av_stream_get_end_pts(const AVStream *st)
{ {
if (st->internal->priv_pts) { if (cffstream(st)->priv_pts) {
return st->internal->priv_pts->val; return cffstream(st)->priv_pts->val;
} else } else
return AV_NOPTS_VALUE; return AV_NOPTS_VALUE;
} }
struct AVCodecParserContext *av_stream_get_parser(const AVStream *st) struct AVCodecParserContext *av_stream_get_parser(const AVStream *st)
{ {
return st->internal->parser; return cffstream(st)->parser;
} }
void avpriv_stream_set_need_parsing(AVStream *st, enum AVStreamParseType type) void avpriv_stream_set_need_parsing(AVStream *st, enum AVStreamParseType type)
{ {
st->internal->need_parsing = type; ffstream(st)->need_parsing = type;
} }
void av_format_inject_global_side_data(AVFormatContext *s) void av_format_inject_global_side_data(AVFormatContext *s)
@ -140,7 +140,7 @@ void av_format_inject_global_side_data(AVFormatContext *s)
si->inject_global_side_data = 1; si->inject_global_side_data = 1;
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
st->internal->inject_global_side_data = 1; ffstream(st)->inject_global_side_data = 1;
} }
} }
@ -312,9 +312,9 @@ static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
}; };
int score; int score;
const AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score); const AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
FFStream *const sti = ffstream(st);
if (fmt) { if (fmt) {
AVStreamInternal *const sti = st->internal;
av_log(s, AV_LOG_DEBUG, av_log(s, AV_LOG_DEBUG,
"Probe with size=%d, packets=%d detected %s with score=%d\n", "Probe with size=%d, packets=%d detected %s with score=%d\n",
pd->buf_size, s->max_probe_packets - sti->probe_packets, pd->buf_size, s->max_probe_packets - sti->probe_packets,
@ -436,7 +436,7 @@ static int update_stream_avctx(AVFormatContext *s)
int ret; int ret;
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
if (!sti->need_context_update) if (!sti->need_context_update)
continue; continue;
@ -633,7 +633,7 @@ static void force_codec_ids(AVFormatContext *s, AVStream *st)
static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt) static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
{ {
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
if (sti->request_probe > 0) { if (sti->request_probe > 0) {
AVProbeData *pd = &sti->probe_data; AVProbeData *pd = &sti->probe_data;
@ -685,7 +685,7 @@ no_packet:
static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index, AVPacket *pkt) static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index, AVPacket *pkt)
{ {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int64_t ref = pkt->dts; int64_t ref = pkt->dts;
int pts_wrap_behavior; int pts_wrap_behavior;
int64_t pts_wrap_reference; int64_t pts_wrap_reference;
@ -708,10 +708,10 @@ static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_in
if (!first_program) { if (!first_program) {
int default_stream_index = av_find_default_stream_index(s); int default_stream_index = av_find_default_stream_index(s);
AVStreamInternal *const default_sti = s->streams[default_stream_index]->internal; FFStream *const default_sti = ffstream(s->streams[default_stream_index]);
if (default_sti->pts_wrap_reference == AV_NOPTS_VALUE) { if (default_sti->pts_wrap_reference == AV_NOPTS_VALUE) {
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
AVStreamInternal *const sti = s->streams[i]->internal; FFStream *const sti = ffstream(s->streams[i]);
if (av_find_program_from_stream(s, NULL, i)) if (av_find_program_from_stream(s, NULL, i))
continue; continue;
sti->pts_wrap_reference = pts_wrap_reference; sti->pts_wrap_reference = pts_wrap_reference;
@ -739,7 +739,7 @@ static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_in
while (program) { while (program) {
if (program->pts_wrap_reference != pts_wrap_reference) { if (program->pts_wrap_reference != pts_wrap_reference) {
for (unsigned i = 0; i < program->nb_stream_indexes; i++) { for (unsigned i = 0; i < program->nb_stream_indexes; i++) {
AVStreamInternal *const sti = s->streams[program->stream_index[i]]->internal; FFStream *const sti = ffstream(s->streams[program->stream_index[i]]);
sti->pts_wrap_reference = pts_wrap_reference; sti->pts_wrap_reference = pts_wrap_reference;
sti->pts_wrap_behavior = pts_wrap_behavior; sti->pts_wrap_behavior = pts_wrap_behavior;
} }
@ -771,7 +771,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
for (;;) { for (;;) {
PacketList *pktl = si->raw_packet_buffer; PacketList *pktl = si->raw_packet_buffer;
AVStreamInternal *sti; FFStream *sti;
const AVPacket *pkt1; const AVPacket *pkt1;
if (pktl) { if (pktl) {
@ -779,7 +779,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
if (si->raw_packet_buffer_remaining_size <= 0) if (si->raw_packet_buffer_remaining_size <= 0)
if ((err = probe_codec(s, st, NULL)) < 0) if ((err = probe_codec(s, st, NULL)) < 0)
return err; return err;
if (st->internal->request_probe <= 0) { if (ffstream(st)->request_probe <= 0) {
avpriv_packet_list_get(&si->raw_packet_buffer, avpriv_packet_list_get(&si->raw_packet_buffer,
&si->raw_packet_buffer_end, pkt); &si->raw_packet_buffer_end, pkt);
si->raw_packet_buffer_remaining_size += pkt->size; si->raw_packet_buffer_remaining_size += pkt->size;
@ -800,7 +800,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
return err; return err;
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
AVStream *const st = s->streams[i]; AVStream *const st = s->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
if (sti->probe_packets || sti->request_probe > 0) if (sti->probe_packets || sti->request_probe > 0)
if ((err = probe_codec(s, st, NULL)) < 0) if ((err = probe_codec(s, st, NULL)) < 0)
return err; return err;
@ -831,7 +831,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
"Invalid stream index.\n"); "Invalid stream index.\n");
st = s->streams[pkt->stream_index]; st = s->streams[pkt->stream_index];
sti = st->internal; sti = ffstream(st);
if (update_wrap_reference(s, st, pkt->stream_index, pkt) && sti->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) { if (update_wrap_reference(s, st, pkt->stream_index, pkt) && sti->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
// correct first time stamps to negative values // correct first time stamps to negative values
@ -893,7 +893,7 @@ static void compute_frame_duration(AVFormatContext *s, int *pnum, int *pden,
AVStream *st, AVCodecParserContext *pc, AVStream *st, AVCodecParserContext *pc,
AVPacket *pkt) AVPacket *pkt)
{ {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
AVRational codec_framerate = sti->avctx->framerate; AVRational codec_framerate = sti->avctx->framerate;
int frame_size, sample_rate; int frame_size, sample_rate;
@ -958,7 +958,7 @@ int ff_is_intra_only(enum AVCodecID id)
static int has_decode_delay_been_guessed(AVStream *st) static int has_decode_delay_been_guessed(AVStream *st)
{ {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
if (st->codecpar->codec_id != AV_CODEC_ID_H264) return 1; if (st->codecpar->codec_id != AV_CODEC_ID_H264) return 1;
if (!sti->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy if (!sti->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
return 1; return 1;
@ -986,7 +986,7 @@ static PacketList *get_next_pkt(AVFormatContext *s, AVStream *st, PacketList *pk
} }
static int64_t select_from_pts_buffer(AVStream *st, int64_t *pts_buffer, int64_t dts) { static int64_t select_from_pts_buffer(AVStream *st, int64_t *pts_buffer, int64_t dts) {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int onein_oneout = st->codecpar->codec_id != AV_CODEC_ID_H264 && int onein_oneout = st->codecpar->codec_id != AV_CODEC_ID_H264 &&
st->codecpar->codec_id != AV_CODEC_ID_HEVC; st->codecpar->codec_id != AV_CODEC_ID_HEVC;
@ -1035,7 +1035,7 @@ static void update_dts_from_pts(AVFormatContext *s, int stream_index,
PacketList *pkt_buffer) PacketList *pkt_buffer)
{ {
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
int delay = st->internal->avctx->has_b_frames; int delay = ffstream(st)->avctx->has_b_frames;
int64_t pts_buffer[MAX_REORDER_DELAY+1]; int64_t pts_buffer[MAX_REORDER_DELAY+1];
@ -1061,7 +1061,7 @@ static void update_initial_timestamps(AVFormatContext *s, int stream_index,
{ {
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
PacketList *pktl = si->packet_buffer ? si->packet_buffer : si->parse_queue; PacketList *pktl = si->packet_buffer ? si->packet_buffer : si->parse_queue;
PacketList *pktl_it; PacketList *pktl_it;
@ -1115,7 +1115,7 @@ static void update_initial_durations(AVFormatContext *s, AVStream *st,
int stream_index, int64_t duration) int stream_index, int64_t duration)
{ {
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
PacketList *pktl = si->packet_buffer ? si->packet_buffer : si->parse_queue; PacketList *pktl = si->packet_buffer ? si->packet_buffer : si->parse_queue;
int64_t cur_dts = RELATIVE_TS_BASE; int64_t cur_dts = RELATIVE_TS_BASE;
@ -1175,7 +1175,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
int64_t next_dts, int64_t next_pts) int64_t next_dts, int64_t next_pts)
{ {
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int num, den, presentation_delayed, delay; int num, den, presentation_delayed, delay;
int64_t offset; int64_t offset;
AVRational duration; AVRational duration;
@ -1369,7 +1369,7 @@ static int parse_packet(AVFormatContext *s, AVPacket *pkt,
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
AVPacket *out_pkt = si->parse_pkt; AVPacket *out_pkt = si->parse_pkt;
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
uint8_t *data = pkt->data; uint8_t *data = pkt->data;
int size = pkt->size; int size = pkt->size;
int ret = 0, got_output = flush; int ret = 0, got_output = flush;
@ -1487,7 +1487,7 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
while (!got_packet && !si->parse_queue) { while (!got_packet && !si->parse_queue) {
AVStream *st; AVStream *st;
AVStreamInternal *sti; FFStream *sti;
/* read next packet */ /* read next packet */
ret = ff_read_packet(s, pkt); ret = ff_read_packet(s, pkt);
@ -1497,7 +1497,8 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
/* flush the parsers */ /* flush the parsers */
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
AVStream *const st = s->streams[i]; AVStream *const st = s->streams[i];
if (st->internal->parser && st->internal->need_parsing) FFStream *const sti = ffstream(st);
if (sti->parser && sti->need_parsing)
parse_packet(s, pkt, st->index, 1); parse_packet(s, pkt, st->index, 1);
} }
/* all remaining packets are now in parse_queue => /* all remaining packets are now in parse_queue =>
@ -1506,7 +1507,7 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
} }
ret = 0; ret = 0;
st = s->streams[pkt->stream_index]; st = s->streams[pkt->stream_index];
sti = st->internal; sti = ffstream(st);
st->event_flags |= AVSTREAM_EVENT_FLAG_NEW_PACKETS; st->event_flags |= AVSTREAM_EVENT_FLAG_NEW_PACKETS;
@ -1602,7 +1603,7 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
if (ret >= 0) { if (ret >= 0) {
AVStream *st = s->streams[pkt->stream_index]; AVStream *st = s->streams[pkt->stream_index];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int discard_padding = 0; int discard_padding = 0;
if (sti->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) { if (sti->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) {
int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0); int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0);
@ -1792,6 +1793,7 @@ int av_find_default_stream_index(AVFormatContext *s)
return -1; return -1;
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
const AVStream *const st = s->streams[i]; const AVStream *const st = s->streams[i];
const FFStream *const sti = cffstream(st);
int score = 0; int score = 0;
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
if (st->disposition & AV_DISPOSITION_ATTACHED_PIC) if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
@ -1804,7 +1806,7 @@ int av_find_default_stream_index(AVFormatContext *s)
if (st->codecpar->sample_rate) if (st->codecpar->sample_rate)
score += 50; score += 50;
} }
if (st->internal->codec_info_nb_frames) if (sti->codec_info_nb_frames)
score += 12; score += 12;
if (st->discard != AVDISCARD_ALL) if (st->discard != AVDISCARD_ALL)
@ -1828,7 +1830,7 @@ void ff_read_frame_flush(AVFormatContext *s)
/* Reset read state for each stream. */ /* Reset read state for each stream. */
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
AVStream *const st = s->streams[i]; AVStream *const st = s->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
if (sti->parser) { if (sti->parser) {
av_parser_close(sti->parser); av_parser_close(sti->parser);
@ -1858,8 +1860,9 @@ void avpriv_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timesta
{ {
for (unsigned i = 0; i < s->nb_streams; i++) { for (unsigned i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i]; AVStream *st = s->streams[i];
FFStream *const sti = ffstream(st);
st->internal->cur_dts = sti->cur_dts =
av_rescale(timestamp, av_rescale(timestamp,
st->time_base.den * (int64_t) ref_st->time_base.num, st->time_base.den * (int64_t) ref_st->time_base.num,
st->time_base.num * (int64_t) ref_st->time_base.den); st->time_base.num * (int64_t) ref_st->time_base.den);
@ -1869,7 +1872,7 @@ void avpriv_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timesta
void ff_reduce_index(AVFormatContext *s, int stream_index) void ff_reduce_index(AVFormatContext *s, int stream_index)
{ {
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[stream_index];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry); unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
if ((unsigned) sti->nb_index_entries >= max_entries) { if ((unsigned) sti->nb_index_entries >= max_entries) {
@ -1942,7 +1945,7 @@ int ff_add_index_entry(AVIndexEntry **index_entries,
int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
int size, int distance, int flags) int size, int distance, int flags)
{ {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
timestamp = wrap_timestamp(st, timestamp); timestamp = wrap_timestamp(st, timestamp);
return ff_add_index_entry(&sti->index_entries, &sti->nb_index_entries, return ff_add_index_entry(&sti->index_entries, &sti->nb_index_entries,
&sti->index_entries_allocated_size, pos, &sti->index_entries_allocated_size, pos,
@ -2014,10 +2017,10 @@ void ff_configure_buffers_for_index(AVFormatContext *s, int64_t time_tolerance)
for (unsigned ist1 = 0; ist1 < s->nb_streams; ist1++) { for (unsigned ist1 = 0; ist1 < s->nb_streams; ist1++) {
AVStream *st1 = s->streams[ist1]; AVStream *st1 = s->streams[ist1];
AVStreamInternal *const sti1 = st1->internal; FFStream *const sti1 = ffstream(st1);
for (unsigned ist2 = 0; ist2 < s->nb_streams; ist2++) { for (unsigned ist2 = 0; ist2 < s->nb_streams; ist2++) {
AVStream *st2 = s->streams[ist2]; AVStream *st2 = s->streams[ist2];
AVStreamInternal *const sti2 = st2->internal; FFStream *const sti2 = ffstream(st2);
if (ist1 == ist2) if (ist1 == ist2)
continue; continue;
@ -2061,19 +2064,19 @@ void ff_configure_buffers_for_index(AVFormatContext *s, int64_t time_tolerance)
int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags) int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
{ {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
return ff_index_search_timestamp(sti->index_entries, sti->nb_index_entries, return ff_index_search_timestamp(sti->index_entries, sti->nb_index_entries,
wanted_timestamp, flags); wanted_timestamp, flags);
} }
int avformat_index_get_entries_count(const AVStream *st) int avformat_index_get_entries_count(const AVStream *st)
{ {
return st->internal->nb_index_entries; return cffstream(st)->nb_index_entries;
} }
const AVIndexEntry *avformat_index_get_entry(AVStream *st, int idx) const AVIndexEntry *avformat_index_get_entry(AVStream *st, int idx)
{ {
const AVStreamInternal *const sti = st->internal; const FFStream *const sti = ffstream(st);
if (idx < 0 || idx >= sti->nb_index_entries) if (idx < 0 || idx >= sti->nb_index_entries)
return NULL; return NULL;
@ -2084,7 +2087,7 @@ const AVIndexEntry *avformat_index_get_entry_from_timestamp(AVStream *st,
int64_t wanted_timestamp, int64_t wanted_timestamp,
int flags) int flags)
{ {
const AVStreamInternal *const sti = st->internal; const FFStream *const sti = ffstream(st);
int idx = ff_index_search_timestamp(sti->index_entries, int idx = ff_index_search_timestamp(sti->index_entries,
sti->nb_index_entries, sti->nb_index_entries,
wanted_timestamp, flags); wanted_timestamp, flags);
@ -2113,7 +2116,7 @@ int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
int index; int index;
int64_t ret; int64_t ret;
AVStream *st; AVStream *st;
AVStreamInternal *sti; FFStream *sti;
if (stream_index < 0) if (stream_index < 0)
return -1; return -1;
@ -2125,7 +2128,7 @@ int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
pos_limit = -1; // GCC falsely says it may be uninitialized. pos_limit = -1; // GCC falsely says it may be uninitialized.
st = s->streams[stream_index]; st = s->streams[stream_index];
sti = st->internal; sti = ffstream(st);
if (sti->index_entries) { if (sti->index_entries) {
AVIndexEntry *e; AVIndexEntry *e;
@ -2345,7 +2348,7 @@ static int seek_frame_generic(AVFormatContext *s, int stream_index,
{ {
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
AVStream *const st = s->streams[stream_index]; AVStream *const st = s->streams[stream_index];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int index; int index;
int64_t ret; int64_t ret;
AVIndexEntry *ie; AVIndexEntry *ie;
@ -2687,7 +2690,7 @@ static void estimate_timings_from_bit_rate(AVFormatContext *ic)
int64_t bit_rate = 0; int64_t bit_rate = 0;
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
const AVStream *const st = ic->streams[i]; const AVStream *const st = ic->streams[i];
const AVStreamInternal *const sti = st->internal; const FFStream *const sti = cffstream(st);
if (st->codecpar->bit_rate <= 0 && sti->avctx->bit_rate > 0) if (st->codecpar->bit_rate <= 0 && sti->avctx->bit_rate > 0)
st->codecpar->bit_rate = sti->avctx->bit_rate; st->codecpar->bit_rate = sti->avctx->bit_rate;
if (st->codecpar->bit_rate > 0) { if (st->codecpar->bit_rate > 0) {
@ -2749,7 +2752,7 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
AVStream *const st = ic->streams[i]; AVStream *const st = ic->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
if (st->start_time == AV_NOPTS_VALUE && if (st->start_time == AV_NOPTS_VALUE &&
sti->first_dts == AV_NOPTS_VALUE && sti->first_dts == AV_NOPTS_VALUE &&
@ -2782,7 +2785,7 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
read_size = 0; read_size = 0;
for (;;) { for (;;) {
AVStream *st; AVStream *st;
AVStreamInternal *sti; FFStream *sti;
if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0))) if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
break; break;
@ -2793,7 +2796,7 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
break; break;
read_size += pkt->size; read_size += pkt->size;
st = ic->streams[pkt->stream_index]; st = ic->streams[pkt->stream_index];
sti = st->internal; sti = ffstream(st);
if (pkt->pts != AV_NOPTS_VALUE && if (pkt->pts != AV_NOPTS_VALUE &&
(st->start_time != AV_NOPTS_VALUE || (st->start_time != AV_NOPTS_VALUE ||
sti->first_dts != AV_NOPTS_VALUE)) { sti->first_dts != AV_NOPTS_VALUE)) {
@ -2844,12 +2847,13 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
/* warn about audio/video streams which duration could not be estimated */ /* warn about audio/video streams which duration could not be estimated */
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
const AVStream *const st = ic->streams[i]; const AVStream *const st = ic->streams[i];
const FFStream *const sti = cffstream(st);
if (st->duration == AV_NOPTS_VALUE) { if (st->duration == AV_NOPTS_VALUE) {
switch (st->codecpar->codec_type) { switch (st->codecpar->codec_type) {
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
if (st->start_time != AV_NOPTS_VALUE || st->internal->first_dts != AV_NOPTS_VALUE) { if (st->start_time != AV_NOPTS_VALUE || sti->first_dts != AV_NOPTS_VALUE) {
av_log(ic, AV_LOG_WARNING, "stream %d : no PTS found at end of file, duration not set\n", i); av_log(ic, AV_LOG_WARNING, "stream %d : no PTS found at end of file, duration not set\n", i);
} else } else
av_log(ic, AV_LOG_WARNING, "stream %d : no TS found at start of file, duration not set\n", i); av_log(ic, AV_LOG_WARNING, "stream %d : no TS found at start of file, duration not set\n", i);
@ -2862,7 +2866,7 @@ skip_duration_calc:
avio_seek(ic->pb, old_offset, SEEK_SET); avio_seek(ic->pb, old_offset, SEEK_SET);
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
AVStream *const st = ic->streams[i]; AVStream *const st = ic->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
sti->cur_dts = sti->first_dts; sti->cur_dts = sti->first_dts;
sti->last_IP_pts = AV_NOPTS_VALUE; sti->last_IP_pts = AV_NOPTS_VALUE;
@ -2935,7 +2939,7 @@ static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
static int has_codec_parameters(AVStream *st, const char **errmsg_ptr) static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
{ {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
AVCodecContext *avctx = sti->avctx; AVCodecContext *avctx = sti->avctx;
#define FAIL(errmsg) do { \ #define FAIL(errmsg) do { \
@ -2985,7 +2989,7 @@ static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
static int try_decode_frame(AVFormatContext *s, AVStream *st, static int try_decode_frame(AVFormatContext *s, AVStream *st,
const AVPacket *avpkt, AVDictionary **options) const AVPacket *avpkt, AVDictionary **options)
{ {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
AVCodecContext *avctx = sti->avctx; AVCodecContext *avctx = sti->avctx;
const AVCodec *codec; const AVCodec *codec;
int got_picture = 1, ret = 0; int got_picture = 1, ret = 0;
@ -3313,7 +3317,7 @@ int ff_get_extradata(AVFormatContext *s, AVCodecParameters *par, AVIOContext *pb
int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t ts) int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t ts)
{ {
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int64_t last = sti->info->last_dts; int64_t last = sti->info->last_dts;
if ( ts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && ts > last if ( ts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && ts > last
@ -3375,7 +3379,7 @@ void ff_rfps_calculate(AVFormatContext *ic)
{ {
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i]; AVStream *st = ic->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
continue; continue;
@ -3455,7 +3459,7 @@ static int extract_extradata_check(AVStream *st)
static int extract_extradata_init(AVStream *st) static int extract_extradata_init(AVStream *st)
{ {
AVStreamInternal *sti = st->internal; FFStream *sti = ffstream(st);
const AVBitStreamFilter *f; const AVBitStreamFilter *f;
int ret; int ret;
@ -3494,7 +3498,7 @@ fail:
static int extract_extradata(FFFormatContext *si, AVStream *st, const AVPacket *pkt) static int extract_extradata(FFFormatContext *si, AVStream *st, const AVPacket *pkt)
{ {
AVStreamInternal *sti = st->internal; FFStream *sti = ffstream(st);
AVPacket *pkt_ref = si->parse_pkt; AVPacket *pkt_ref = si->parse_pkt;
int ret; int ret;
@ -3597,7 +3601,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
const AVCodec *codec; const AVCodec *codec;
AVDictionary *thread_opt = NULL; AVDictionary *thread_opt = NULL;
AVStream *const st = ic->streams[i]; AVStream *const st = ic->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
AVCodecContext *const avctx = sti->avctx; AVCodecContext *const avctx = sti->avctx;
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
@ -3661,7 +3665,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
for (;;) { for (;;) {
const AVPacket *pkt; const AVPacket *pkt;
AVStream *st; AVStream *st;
AVStreamInternal *sti; FFStream *sti;
AVCodecContext *avctx; AVCodecContext *avctx;
int analyzed_all_streams; int analyzed_all_streams;
unsigned i; unsigned i;
@ -3674,7 +3678,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
/* check if one codec still needs to be handled */ /* check if one codec still needs to be handled */
for (i = 0; i < ic->nb_streams; i++) { for (i = 0; i < ic->nb_streams; i++) {
AVStream *const st = ic->streams[i]; AVStream *const st = ic->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
int fps_analyze_framecount = 20; int fps_analyze_framecount = 20;
int count; int count;
@ -3736,8 +3740,9 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
"Probe buffer size limit of %"PRId64" bytes reached\n", probesize); "Probe buffer size limit of %"PRId64" bytes reached\n", probesize);
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
AVStream *const st = ic->streams[i]; AVStream *const st = ic->streams[i];
FFStream *const sti = ffstream(st);
if (!st->r_frame_rate.num && if (!st->r_frame_rate.num &&
st->internal->info->duration_count <= 1 && sti->info->duration_count <= 1 &&
st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
strcmp(ic->iformat->name, "image2")) strcmp(ic->iformat->name, "image2"))
av_log(ic, AV_LOG_WARNING, av_log(ic, AV_LOG_WARNING,
@ -3772,7 +3777,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
} }
st = ic->streams[pkt->stream_index]; st = ic->streams[pkt->stream_index];
sti = st->internal; sti = ffstream(st);
if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC))
read_size += pkt->size; read_size += pkt->size;
@ -3898,7 +3903,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
if (eof_reached) { if (eof_reached) {
for (unsigned stream_index = 0; stream_index < ic->nb_streams; stream_index++) { for (unsigned stream_index = 0; stream_index < ic->nb_streams; stream_index++) {
AVStream *const st = ic->streams[stream_index]; AVStream *const st = ic->streams[stream_index];
AVCodecContext *const avctx = st->internal->avctx; AVCodecContext *const avctx = ffstream(st)->avctx;
if (!has_codec_parameters(st, NULL)) { if (!has_codec_parameters(st, NULL)) {
const AVCodec *codec = find_probe_decoder(ic, st, st->codecpar->codec_id); const AVCodec *codec = find_probe_decoder(ic, st, st->codecpar->codec_id);
if (codec && !avctx->codec) { if (codec && !avctx->codec) {
@ -3927,9 +3932,10 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
AVStream *const st = ic->streams[i]; AVStream *const st = ic->streams[i];
FFStream *const sti = ffstream(st);
/* flush the decoders */ /* flush the decoders */
if (st->internal->info->found_decoder == 1) { if (sti->info->found_decoder == 1) {
do { do {
err = try_decode_frame(ic, st, empty_pkt, err = try_decode_frame(ic, st, empty_pkt,
(options && i < orig_nb_streams) (options && i < orig_nb_streams)
@ -3948,7 +3954,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
AVStream *const st = ic->streams[i]; AVStream *const st = ic->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
AVCodecContext *const avctx = sti->avctx; AVCodecContext *const avctx = sti->avctx;
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
@ -4050,7 +4056,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
ret = -1; ret = -1;
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
AVStream *const st = ic->streams[i]; AVStream *const st = ic->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
const char *errmsg; const char *errmsg;
/* if no packet was ever seen, update context now for has_codec_parameters */ /* if no packet was ever seen, update context now for has_codec_parameters */
@ -4081,7 +4087,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
/* update the stream parameters from the internal codec contexts */ /* update the stream parameters from the internal codec contexts */
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
AVStream *const st = ic->streams[i]; AVStream *const st = ic->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
if (sti->avctx_inited) { if (sti->avctx_inited) {
ret = avcodec_parameters_from_context(st->codecpar, sti->avctx); ret = avcodec_parameters_from_context(st->codecpar, sti->avctx);
@ -4098,7 +4104,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
find_stream_info_err: find_stream_info_err:
for (unsigned i = 0; i < ic->nb_streams; i++) { for (unsigned i = 0; i < ic->nb_streams; i++) {
AVStream *const st = ic->streams[i]; AVStream *const st = ic->streams[i];
AVStreamInternal *const sti = st->internal; FFStream *const sti = ffstream(st);
if (sti->info) { if (sti->info) {
av_freep(&sti->info->duration_error); av_freep(&sti->info->duration_error);
av_freep(&sti->info); av_freep(&sti->info);
@ -4173,7 +4179,7 @@ int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
} }
disposition = !(st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED | AV_DISPOSITION_VISUAL_IMPAIRED)) disposition = !(st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED | AV_DISPOSITION_VISUAL_IMPAIRED))
+ !! (st->disposition & AV_DISPOSITION_DEFAULT); + !! (st->disposition & AV_DISPOSITION_DEFAULT);
count = st->internal->codec_info_nb_frames; count = ffstream(st)->codec_info_nb_frames;
bitrate = par->bit_rate; bitrate = par->bit_rate;
multiframe = FFMIN(5, count); multiframe = FFMIN(5, count);
if ((best_disposition > disposition) || if ((best_disposition > disposition) ||
@ -4271,7 +4277,7 @@ int ff_stream_encode_params_copy(AVStream *dst, const AVStream *src)
static void free_stream(AVStream **pst) static void free_stream(AVStream **pst)
{ {
AVStream *st = *pst; AVStream *st = *pst;
AVStreamInternal *sti; FFStream *const sti = ffstream(st);
if (!st) if (!st)
return; return;
@ -4283,8 +4289,6 @@ static void free_stream(AVStream **pst)
if (st->attached_pic.data) if (st->attached_pic.data)
av_packet_unref(&st->attached_pic); av_packet_unref(&st->attached_pic);
sti = st->internal;
if (sti) {
av_parser_close(sti->parser); av_parser_close(sti->parser);
avcodec_free_context(&sti->avctx); avcodec_free_context(&sti->avctx);
av_bsf_free(&sti->bsfc); av_bsf_free(&sti->bsfc);
@ -4298,8 +4302,6 @@ static void free_stream(AVStream **pst)
av_freep(&sti->info->duration_error); av_freep(&sti->info->duration_error);
av_freep(&sti->info); av_freep(&sti->info);
} }
}
av_freep(&st->internal);
av_dict_free(&st->metadata); av_dict_free(&st->metadata);
avcodec_parameters_free(&st->codecpar); avcodec_parameters_free(&st->codecpar);
@ -4390,8 +4392,8 @@ void avformat_close_input(AVFormatContext **ps)
AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c) AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
{ {
FFFormatContext *const si = ffformatcontext(s); FFFormatContext *const si = ffformatcontext(s);
FFStream *sti;
AVStream *st; AVStream *st;
AVStreamInternal *sti;
AVStream **streams; AVStream **streams;
if (s->nb_streams >= s->max_streams) { if (s->nb_streams >= s->max_streams) {
@ -4405,13 +4407,11 @@ AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
return NULL; return NULL;
s->streams = streams; s->streams = streams;
st = av_mallocz(sizeof(AVStream));
if (!st)
return NULL;
sti = st->internal = av_mallocz(sizeof(*st->internal)); sti = av_mallocz(sizeof(*sti));
if (!sti) if (!sti)
goto fail; return NULL;
st = &sti->pub;
st->codecpar = avcodec_parameters_alloc(); st->codecpar = avcodec_parameters_alloc();
if (!st->codecpar) if (!st->codecpar)
@ -4814,6 +4814,7 @@ int ff_hex_to_data(uint8_t *data, const char *p)
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
unsigned int pts_num, unsigned int pts_den) unsigned int pts_num, unsigned int pts_den)
{ {
FFStream *const sti = ffstream(s);
AVRational new_tb; AVRational new_tb;
if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) { if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
if (new_tb.num != pts_num) if (new_tb.num != pts_num)
@ -4832,7 +4833,7 @@ void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
return; return;
} }
s->time_base = new_tb; s->time_base = new_tb;
s->internal->avctx->pkt_timebase = new_tb; sti->avctx->pkt_timebase = new_tb;
s->pts_wrap_bits = pts_wrap_bits; s->pts_wrap_bits = pts_wrap_bits;
} }
@ -5005,7 +5006,7 @@ AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *strea
AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame) AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
{ {
AVRational fr = st->r_frame_rate; AVRational fr = st->r_frame_rate;
AVCodecContext *const avctx = st->internal->avctx; AVCodecContext *const avctx = ffstream(st)->avctx;
AVRational codec_fr = avctx->framerate; AVRational codec_fr = avctx->framerate;
AVRational avg_fr = st->avg_frame_rate; AVRational avg_fr = st->avg_frame_rate;
@ -5407,9 +5408,10 @@ int ff_stream_add_bitstream_filter(AVStream *st, const char *name, const char *a
{ {
int ret; int ret;
const AVBitStreamFilter *bsf; const AVBitStreamFilter *bsf;
FFStream *const sti = ffstream(st);
AVBSFContext *bsfc; AVBSFContext *bsfc;
av_assert0(!st->internal->bsfc); av_assert0(!sti->bsfc);
if (!(bsf = av_bsf_get_by_name(name))) { if (!(bsf = av_bsf_get_by_name(name))) {
av_log(NULL, AV_LOG_ERROR, "Unknown bitstream filter '%s'\n", name); av_log(NULL, AV_LOG_ERROR, "Unknown bitstream filter '%s'\n", name);
@ -5443,7 +5445,7 @@ int ff_stream_add_bitstream_filter(AVStream *st, const char *name, const char *a
return ret; return ret;
} }
st->internal->bsfc = bsfc; sti->bsfc = bsfc;
av_log(NULL, AV_LOG_VERBOSE, av_log(NULL, AV_LOG_VERBOSE,
"Automatically inserted bitstream filter '%s'; args='%s'\n", "Automatically inserted bitstream filter '%s'; args='%s'\n",
@ -5550,8 +5552,8 @@ int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt,
AVStream *ost, const AVStream *ist, AVStream *ost, const AVStream *ist,
enum AVTimebaseSource copy_tb) enum AVTimebaseSource copy_tb)
{ {
const AVCodecContext *const dec_ctx = ist->internal->avctx; const AVCodecContext *const dec_ctx = cffstream(ist)->avctx;
AVCodecContext *const enc_ctx = ost->internal->avctx; AVCodecContext *const enc_ctx = ffstream(ost)->avctx;
enc_ctx->time_base = ist->time_base; enc_ctx->time_base = ist->time_base;
/* /*
@ -5607,7 +5609,7 @@ int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt,
AVRational av_stream_get_codec_timebase(const AVStream *st) AVRational av_stream_get_codec_timebase(const AVStream *st)
{ {
// See avformat_transfer_internal_stream_timing_info() TODO. // See avformat_transfer_internal_stream_timing_info() TODO.
return st->internal->avctx->time_base; return cffstream(st)->avctx->time_base;
} }
void ff_format_set_url(AVFormatContext *s, char *url) void ff_format_set_url(AVFormatContext *s, char *url)

@ -73,6 +73,7 @@ static int voc_read_seek(AVFormatContext *s, int stream_index,
{ {
VocDecContext *voc = s->priv_data; VocDecContext *voc = s->priv_data;
AVStream *st; AVStream *st;
FFStream *sti;
int index; int index;
if (s->nb_streams < 1) { if (s->nb_streams < 1) {
@ -81,16 +82,17 @@ static int voc_read_seek(AVFormatContext *s, int stream_index,
} }
st = s->streams[stream_index]; st = s->streams[stream_index];
sti = ffstream(st);
index = av_index_search_timestamp(st, timestamp, flags); index = av_index_search_timestamp(st, timestamp, flags);
if (index >= 0 && index < st->internal->nb_index_entries - 1) { if (index >= 0 && index < sti->nb_index_entries - 1) {
AVIndexEntry *e = &st->internal->index_entries[index]; const AVIndexEntry *const e = &sti->index_entries[index];
avio_seek(s->pb, e->pos, SEEK_SET); avio_seek(s->pb, e->pos, SEEK_SET);
voc->pts = e->timestamp; voc->pts = e->timestamp;
voc->remaining_size = e->size; voc->remaining_size = e->size;
return 0; return 0;
} else if (st->internal->nb_index_entries && st->internal->index_entries[0].timestamp <= timestamp) { } else if (sti->nb_index_entries && sti->index_entries[0].timestamp <= timestamp) {
AVIndexEntry *e = &st->internal->index_entries[st->internal->nb_index_entries - 1]; const AVIndexEntry *const e = &sti->index_entries[sti->nb_index_entries - 1];
// prepare context for seek_frame_generic() // prepare context for seek_frame_generic()
voc->pts = e->timestamp; voc->pts = e->timestamp;
voc->remaining_size = e->size; voc->remaining_size = e->size;

@ -277,7 +277,7 @@ static int vqf_read_seek(AVFormatContext *s,
AV_ROUND_DOWN : AV_ROUND_UP); AV_ROUND_DOWN : AV_ROUND_UP);
pos *= c->frame_bit_len; pos *= c->frame_bit_len;
st->internal->cur_dts = av_rescale(pos, st->time_base.den, ffstream(st)->cur_dts = av_rescale(pos, st->time_base.den,
st->codecpar->bit_rate * (int64_t)st->time_base.num); st->codecpar->bit_rate * (int64_t)st->time_base.num);
if ((ret = avio_seek(s->pb, ((pos-7) >> 3) + ffformatcontext(s)->data_offset, SEEK_SET)) < 0) if ((ret = avio_seek(s->pb, ((pos-7) >> 3) + ffformatcontext(s)->data_offset, SEEK_SET)) < 0)

@ -166,8 +166,9 @@ static int wav_probe(const AVProbeData *p)
static void handle_stream_probing(AVStream *st) static void handle_stream_probing(AVStream *st)
{ {
if (st->codecpar->codec_id == AV_CODEC_ID_PCM_S16LE) { if (st->codecpar->codec_id == AV_CODEC_ID_PCM_S16LE) {
st->internal->request_probe = AVPROBE_SCORE_EXTENSION; FFStream *const sti = ffstream(st);
st->internal->probe_packets = FFMIN(st->internal->probe_packets, 32); sti->request_probe = AVPROBE_SCORE_EXTENSION;
sti->probe_packets = FFMIN(sti->probe_packets, 32);
} }
} }
@ -183,7 +184,7 @@ static int wav_parse_fmt_tag(AVFormatContext *s, int64_t size, AVStream *st)
return ret; return ret;
handle_stream_probing(st); handle_stream_probing(st);
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate);
@ -200,7 +201,7 @@ static int wav_parse_xma2_tag(AVFormatContext *s, int64_t size, AVStream *st)
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_XMA2; st->codecpar->codec_id = AV_CODEC_ID_XMA2;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
version = avio_r8(pb); version = avio_r8(pb);
if (version != 3 && version != 4) if (version != 3 && version != 4)
@ -702,8 +703,8 @@ static int wav_read_packet(AVFormatContext *s, AVPacket *pkt)
int64_t audio_dts, video_dts; int64_t audio_dts, video_dts;
AVStream *vst = wav->vst; AVStream *vst = wav->vst;
smv_retry: smv_retry:
audio_dts = (int32_t)st->internal->cur_dts; audio_dts = (int32_t)ffstream( st)->cur_dts;
video_dts = (int32_t)vst->internal->cur_dts; video_dts = (int32_t)ffstream(vst)->cur_dts;
if (audio_dts != AV_NOPTS_VALUE && video_dts != AV_NOPTS_VALUE) { if (audio_dts != AV_NOPTS_VALUE && video_dts != AV_NOPTS_VALUE) {
/*We always return a video frame first to get the pixel format first*/ /*We always return a video frame first to get the pixel format first*/
@ -950,7 +951,7 @@ static int w64_read_header(AVFormatContext *s)
ff_metadata_conv_ctx(s, NULL, ff_riff_info_conv); ff_metadata_conv_ctx(s, NULL, ff_riff_info_conv);
handle_stream_probing(st); handle_stream_probing(st);
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
avio_seek(pb, data_ofs, SEEK_SET); avio_seek(pb, data_ofs, SEEK_SET);

@ -618,7 +618,7 @@ static AVStream * new_stream(AVFormatContext *s, AVStream *st, int sid, int code
st->priv_data = wst; st->priv_data = wst;
} }
st->codecpar->codec_type = codec_type; st->codecpar->codec_type = codec_type;
st->internal->need_parsing = AVSTREAM_PARSE_FULL; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL;
avpriv_set_pts_info(st, 64, 1, 10000000); avpriv_set_pts_info(st, 64, 1, 10000000);
return st; return st;
} }

@ -87,7 +87,7 @@ static int xvag_read_header(AVFormatContext *s)
if (avio_rb16(s->pb) == 0xFFFB) { if (avio_rb16(s->pb) == 0xFFFB) {
st->codecpar->codec_id = AV_CODEC_ID_MP3; st->codecpar->codec_id = AV_CODEC_ID_MP3;
st->codecpar->block_align = 0x1000; st->codecpar->block_align = 0x1000;
st->internal->need_parsing = AVSTREAM_PARSE_FULL_RAW; ffstream(st)->need_parsing = AVSTREAM_PARSE_FULL_RAW;
} }
avio_skip(s->pb, -2); avio_skip(s->pb, -2);

@ -78,7 +78,7 @@ static int xwma_read_header(AVFormatContext *s)
ret = ff_get_wav_header(s, pb, st->codecpar, size, 0); ret = ff_get_wav_header(s, pb, st->codecpar, size, 0);
if (ret < 0) if (ret < 0)
return ret; return ret;
st->internal->need_parsing = AVSTREAM_PARSE_NONE; ffstream(st)->need_parsing = AVSTREAM_PARSE_NONE;
/* XWMA encoder only allows a few channel/sample rate/bitrate combinations, /* XWMA encoder only allows a few channel/sample rate/bitrate combinations,
* but some create identical files with fake bitrate (1ch 22050hz at * but some create identical files with fake bitrate (1ch 22050hz at

Loading…
Cancel
Save