Move av_read_frame/parser related buffer variables from AVFormatContext

to AVStream, this is required if we want to allow the user to pull frames
from specific streams at some point in the future.

Originally committed as revision 16724 to svn://svn.ffmpeg.org/ffmpeg/trunk
pull/126/head
Michael Niedermayer 16 years ago
parent 959e000627
commit 3a41c2f769
  1. 13
      libavformat/avformat.h
  2. 79
      libavformat/utils.c

@ -487,6 +487,11 @@ typedef struct AVStream {
AVRational sample_aspect_ratio; AVRational sample_aspect_ratio;
AVMetadata *metadata; AVMetadata *metadata;
/* av_read_frame() support */
const uint8_t *cur_ptr;
int cur_len;
AVPacket cur_pkt;
} AVStream; } AVStream;
#define AV_PROGRAM_RUNNING 1 #define AV_PROGRAM_RUNNING 1
@ -573,9 +578,11 @@ typedef struct AVFormatContext {
/* av_read_frame() support */ /* av_read_frame() support */
AVStream *cur_st; AVStream *cur_st;
const uint8_t *cur_ptr; #if LIBAVFORMAT_VERSION_INT < (53<<16)
int cur_len; const uint8_t *cur_ptr_deprecated;
AVPacket cur_pkt; int cur_len_deprecated;
AVPacket cur_pkt_deprecated;
#endif
/* av_seek_frame() support */ /* av_seek_frame() support */
int64_t data_offset; /** offset of the first packet */ int64_t data_offset; /** offset of the first packet */

@ -928,24 +928,24 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
if (!st->need_parsing || !st->parser) { if (!st->need_parsing || !st->parser) {
/* no parsing needed: we just output the packet as is */ /* no parsing needed: we just output the packet as is */
/* raw data support */ /* raw data support */
*pkt = s->cur_pkt; *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
compute_pkt_fields(s, st, NULL, pkt); compute_pkt_fields(s, st, NULL, pkt);
s->cur_st = NULL; s->cur_st = NULL;
break; break;
} else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) { } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size, len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
s->cur_ptr, s->cur_len, st->cur_ptr, st->cur_len,
s->cur_pkt.pts, s->cur_pkt.dts); st->cur_pkt.pts, st->cur_pkt.dts);
s->cur_pkt.pts = AV_NOPTS_VALUE; st->cur_pkt.pts = AV_NOPTS_VALUE;
s->cur_pkt.dts = AV_NOPTS_VALUE; st->cur_pkt.dts = AV_NOPTS_VALUE;
/* increment read pointer */ /* increment read pointer */
s->cur_ptr += len; st->cur_ptr += len;
s->cur_len -= len; st->cur_len -= len;
/* return packet if any */ /* return packet if any */
if (pkt->size) { if (pkt->size) {
pkt->pos = st->cur_pkt.pos; // Isn't quite accurate but close.
got_packet: got_packet:
pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
pkt->duration = 0; pkt->duration = 0;
pkt->stream_index = st->index; pkt->stream_index = st->index;
pkt->pts = st->parser->pts; pkt->pts = st->parser->pts;
@ -963,12 +963,13 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
} }
} else { } else {
/* free packet */ /* free packet */
av_free_packet(&s->cur_pkt); av_free_packet(&st->cur_pkt);
s->cur_st = NULL; s->cur_st = NULL;
} }
} else { } else {
AVPacket cur_pkt;
/* read next packet */ /* read next packet */
ret = av_read_packet(s, &s->cur_pkt); ret = av_read_packet(s, &cur_pkt);
if (ret < 0) { if (ret < 0) {
if (ret == AVERROR(EAGAIN)) if (ret == AVERROR(EAGAIN))
return ret; return ret;
@ -987,31 +988,32 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
/* no more packets: really terminate parsing */ /* no more packets: really terminate parsing */
return ret; return ret;
} }
st = s->streams[cur_pkt.stream_index];
st->cur_pkt= cur_pkt;
if(s->cur_pkt.pts != AV_NOPTS_VALUE && if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
s->cur_pkt.dts != AV_NOPTS_VALUE && st->cur_pkt.dts != AV_NOPTS_VALUE &&
s->cur_pkt.pts < s->cur_pkt.dts){ st->cur_pkt.pts < st->cur_pkt.dts){
av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n", av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
s->cur_pkt.stream_index, st->cur_pkt.stream_index,
s->cur_pkt.pts, st->cur_pkt.pts,
s->cur_pkt.dts, st->cur_pkt.dts,
s->cur_pkt.size); st->cur_pkt.size);
// av_free_packet(&s->cur_pkt); // av_free_packet(&st->cur_pkt);
// return -1; // return -1;
} }
st = s->streams[s->cur_pkt.stream_index];
if(s->debug & FF_FDEBUG_TS) if(s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n", av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
s->cur_pkt.stream_index, st->cur_pkt.stream_index,
s->cur_pkt.pts, st->cur_pkt.pts,
s->cur_pkt.dts, st->cur_pkt.dts,
s->cur_pkt.size, st->cur_pkt.size,
s->cur_pkt.flags); st->cur_pkt.flags);
s->cur_st = st; s->cur_st = st;
s->cur_ptr = s->cur_pkt.data; st->cur_ptr = st->cur_pkt.data;
s->cur_len = s->cur_pkt.size; st->cur_len = st->cur_pkt.size;
if (st->need_parsing && !st->parser) { if (st->need_parsing && !st->parser) {
st->parser = av_parser_init(st->codec->codec_id); st->parser = av_parser_init(st->codec->codec_id);
if (!st->parser) { if (!st->parser) {
@ -1022,7 +1024,7 @@ static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
} }
if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){ if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
st->parser->next_frame_offset= st->parser->next_frame_offset=
st->parser->cur_offset= s->cur_pkt.pos; st->parser->cur_offset= st->cur_pkt.pos;
} }
} }
} }
@ -1139,15 +1141,7 @@ static void av_read_frame_flush(AVFormatContext *s)
flush_packet_queue(s); flush_packet_queue(s);
/* free previous packet */
if (s->cur_st) {
if (s->cur_st->parser)
av_free_packet(&s->cur_pkt);
s->cur_st = NULL; s->cur_st = NULL;
}
/* fail safe */
s->cur_ptr = NULL;
s->cur_len = 0;
/* for each stream, reset read state */ /* for each stream, reset read state */
for(i = 0; i < s->nb_streams; i++) { for(i = 0; i < s->nb_streams; i++) {
@ -1156,9 +1150,13 @@ static void av_read_frame_flush(AVFormatContext *s)
if (st->parser) { if (st->parser) {
av_parser_close(st->parser); av_parser_close(st->parser);
st->parser = NULL; st->parser = NULL;
av_free_packet(&st->cur_pkt);
} }
st->last_IP_pts = AV_NOPTS_VALUE; st->last_IP_pts = AV_NOPTS_VALUE;
st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */ st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
/* fail safe */
st->cur_ptr = NULL;
st->cur_len = 0;
} }
} }
@ -1689,9 +1687,6 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset
int64_t end_time; int64_t end_time;
int64_t filesize, offset, duration; int64_t filesize, offset, duration;
/* free previous packet */
if (ic->cur_st && ic->cur_st->parser)
av_free_packet(&ic->cur_pkt);
ic->cur_st = NULL; ic->cur_st = NULL;
/* flush packet queue */ /* flush packet queue */
@ -1702,6 +1697,7 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset
if (st->parser) { if (st->parser) {
av_parser_close(st->parser); av_parser_close(st->parser);
st->parser= NULL; st->parser= NULL;
av_free_packet(&st->cur_pkt);
} }
} }
@ -2248,10 +2244,6 @@ void av_close_input_stream(AVFormatContext *s)
int i; int i;
AVStream *st; AVStream *st;
/* free previous packet */
if (s->cur_st && s->cur_st->parser)
av_free_packet(&s->cur_pkt);
if (s->iformat->read_close) if (s->iformat->read_close)
s->iformat->read_close(s); s->iformat->read_close(s);
for(i=0;i<s->nb_streams;i++) { for(i=0;i<s->nb_streams;i++) {
@ -2259,6 +2251,7 @@ void av_close_input_stream(AVFormatContext *s)
st = s->streams[i]; st = s->streams[i];
if (st->parser) { if (st->parser) {
av_parser_close(st->parser); av_parser_close(st->parser);
av_free_packet(&st->cur_pkt);
} }
av_metadata_free(&st->metadata); av_metadata_free(&st->metadata);
av_free(st->index_entries); av_free(st->index_entries);

Loading…
Cancel
Save