|
|
|
@ -188,6 +188,7 @@ typedef struct VideoState { |
|
|
|
|
unsigned int audio_buf1_size; |
|
|
|
|
int audio_buf_index; /* in bytes */ |
|
|
|
|
int audio_write_buf_size; |
|
|
|
|
int audio_buf_frames_pending; |
|
|
|
|
AVPacket audio_pkt_temp; |
|
|
|
|
AVPacket audio_pkt; |
|
|
|
|
int audio_pkt_temp_serial; |
|
|
|
@ -1647,7 +1648,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial) |
|
|
|
|
static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial) |
|
|
|
|
{ |
|
|
|
|
int got_picture; |
|
|
|
|
|
|
|
|
@ -1678,22 +1679,22 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke |
|
|
|
|
int ret = 1; |
|
|
|
|
|
|
|
|
|
if (decoder_reorder_pts == -1) { |
|
|
|
|
*pts = av_frame_get_best_effort_timestamp(frame); |
|
|
|
|
frame->pts = av_frame_get_best_effort_timestamp(frame); |
|
|
|
|
} else if (decoder_reorder_pts) { |
|
|
|
|
*pts = frame->pkt_pts; |
|
|
|
|
frame->pts = frame->pkt_pts; |
|
|
|
|
} else { |
|
|
|
|
*pts = frame->pkt_dts; |
|
|
|
|
frame->pts = frame->pkt_dts; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (*pts == AV_NOPTS_VALUE) { |
|
|
|
|
*pts = 0; |
|
|
|
|
if (frame->pts == AV_NOPTS_VALUE) { |
|
|
|
|
frame->pts = 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) { |
|
|
|
|
SDL_LockMutex(is->pictq_mutex); |
|
|
|
|
if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) { |
|
|
|
|
if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts) { |
|
|
|
|
double clockdiff = get_video_clock(is) - get_master_clock(is); |
|
|
|
|
double dpts = av_q2d(is->video_st->time_base) * *pts; |
|
|
|
|
double dpts = av_q2d(is->video_st->time_base) * frame->pts; |
|
|
|
|
double ptsdiff = dpts - is->frame_last_pts; |
|
|
|
|
if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD && |
|
|
|
|
ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD && |
|
|
|
@ -1828,9 +1829,10 @@ static int configure_audio_filters(VideoState *is, const char *afilters, int for |
|
|
|
|
return AVERROR(ENOMEM); |
|
|
|
|
|
|
|
|
|
ret = snprintf(asrc_args, sizeof(asrc_args), |
|
|
|
|
"sample_rate=%d:sample_fmt=%s:channels=%d", |
|
|
|
|
"sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d", |
|
|
|
|
is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt), |
|
|
|
|
is->audio_filter_src.channels); |
|
|
|
|
is->audio_filter_src.channels, |
|
|
|
|
1, is->audio_filter_src.freq); |
|
|
|
|
if (is->audio_filter_src.channel_layout) |
|
|
|
|
snprintf(asrc_args + ret, sizeof(asrc_args) - ret, |
|
|
|
|
":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout); |
|
|
|
@ -1884,7 +1886,6 @@ static int video_thread(void *arg) |
|
|
|
|
AVPacket pkt = { 0 }; |
|
|
|
|
VideoState *is = arg; |
|
|
|
|
AVFrame *frame = av_frame_alloc(); |
|
|
|
|
int64_t pts_int = AV_NOPTS_VALUE, pos = -1; |
|
|
|
|
double pts; |
|
|
|
|
int ret; |
|
|
|
|
int serial = 0; |
|
|
|
@ -1899,19 +1900,15 @@ static int video_thread(void *arg) |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
for (;;) { |
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
AVRational tb; |
|
|
|
|
#endif |
|
|
|
|
while (is->paused && !is->videoq.abort_request) |
|
|
|
|
SDL_Delay(10); |
|
|
|
|
|
|
|
|
|
avcodec_get_frame_defaults(frame); |
|
|
|
|
av_free_packet(&pkt); |
|
|
|
|
|
|
|
|
|
ret = get_video_frame(is, frame, &pts_int, &pkt, &serial); |
|
|
|
|
ret = get_video_frame(is, frame, &pkt, &serial); |
|
|
|
|
if (ret < 0) |
|
|
|
|
goto the_end; |
|
|
|
|
|
|
|
|
|
if (!ret) |
|
|
|
|
continue; |
|
|
|
|
|
|
|
|
@ -1944,7 +1941,6 @@ static int video_thread(void *arg) |
|
|
|
|
last_serial = serial; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
frame->pts = pts_int; |
|
|
|
|
frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame); |
|
|
|
|
ret = av_buffersrc_add_frame(filt_in, frame); |
|
|
|
|
if (ret < 0) |
|
|
|
@ -1966,23 +1962,12 @@ static int video_thread(void *arg) |
|
|
|
|
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0) |
|
|
|
|
is->frame_last_filter_delay = 0; |
|
|
|
|
|
|
|
|
|
pts_int = frame->pts; |
|
|
|
|
tb = filt_out->inputs[0]->time_base; |
|
|
|
|
pos = av_frame_get_pkt_pos(frame); |
|
|
|
|
if (av_cmp_q(tb, is->video_st->time_base)) { |
|
|
|
|
av_unused int64_t pts1 = pts_int; |
|
|
|
|
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base); |
|
|
|
|
av_dlog(NULL, "video_thread(): " |
|
|
|
|
"tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n", |
|
|
|
|
tb.num, tb.den, pts1, |
|
|
|
|
is->video_st->time_base.num, is->video_st->time_base.den, pts_int); |
|
|
|
|
} |
|
|
|
|
pts = pts_int * av_q2d(is->video_st->time_base); |
|
|
|
|
ret = queue_picture(is, frame, pts, pos, serial); |
|
|
|
|
pts = frame->pts * av_q2d(filt_out->inputs[0]->time_base); |
|
|
|
|
ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial); |
|
|
|
|
av_frame_unref(frame); |
|
|
|
|
} |
|
|
|
|
#else |
|
|
|
|
pts = pts_int * av_q2d(is->video_st->time_base); |
|
|
|
|
pts = frame->pts * av_q2d(is->video_st->time_base); |
|
|
|
|
ret = queue_picture(is, frame, pts, pkt.pos, serial); |
|
|
|
|
av_frame_unref(frame); |
|
|
|
|
#endif |
|
|
|
@ -2152,10 +2137,12 @@ static int audio_decode_frame(VideoState *is) |
|
|
|
|
int flush_complete = 0; |
|
|
|
|
int wanted_nb_samples; |
|
|
|
|
AVRational tb; |
|
|
|
|
int ret; |
|
|
|
|
int reconfigure; |
|
|
|
|
|
|
|
|
|
for (;;) { |
|
|
|
|
/* NOTE: the audio packet can contain several frames */ |
|
|
|
|
while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) { |
|
|
|
|
while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet) || is->audio_buf_frames_pending) { |
|
|
|
|
if (!is->frame) { |
|
|
|
|
if (!(is->frame = avcodec_alloc_frame())) |
|
|
|
|
return AVERROR(ENOMEM); |
|
|
|
@ -2170,37 +2157,36 @@ static int audio_decode_frame(VideoState *is) |
|
|
|
|
if (is->paused) |
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
if (flush_complete) |
|
|
|
|
break; |
|
|
|
|
new_packet = 0; |
|
|
|
|
len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp); |
|
|
|
|
if (len1 < 0) { |
|
|
|
|
/* if error, we skip the frame */ |
|
|
|
|
pkt_temp->size = 0; |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
if (!is->audio_buf_frames_pending) { |
|
|
|
|
if (flush_complete) |
|
|
|
|
break; |
|
|
|
|
new_packet = 0; |
|
|
|
|
len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp); |
|
|
|
|
if (len1 < 0) { |
|
|
|
|
/* if error, we skip the frame */ |
|
|
|
|
pkt_temp->size = 0; |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
pkt_temp->data += len1; |
|
|
|
|
pkt_temp->size -= len1; |
|
|
|
|
pkt_temp->data += len1; |
|
|
|
|
pkt_temp->size -= len1; |
|
|
|
|
|
|
|
|
|
if (!got_frame) { |
|
|
|
|
/* stop sending empty packets if the decoder is finished */ |
|
|
|
|
if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY) |
|
|
|
|
flush_complete = 1; |
|
|
|
|
continue; |
|
|
|
|
} |
|
|
|
|
if (!got_frame) { |
|
|
|
|
/* stop sending empty packets if the decoder is finished */ |
|
|
|
|
if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY) |
|
|
|
|
flush_complete = 1; |
|
|
|
|
continue; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (is->frame->pts == AV_NOPTS_VALUE && pkt_temp->pts != AV_NOPTS_VALUE) |
|
|
|
|
is->frame->pts = av_rescale_q(pkt_temp->pts, is->audio_st->time_base, dec->time_base); |
|
|
|
|
if (pkt_temp->pts != AV_NOPTS_VALUE) |
|
|
|
|
pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base); |
|
|
|
|
tb = dec->time_base; |
|
|
|
|
tb = (AVRational){1, is->frame->sample_rate}; |
|
|
|
|
if (is->frame->pts != AV_NOPTS_VALUE) |
|
|
|
|
is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb); |
|
|
|
|
if (is->frame->pts == AV_NOPTS_VALUE && pkt_temp->pts != AV_NOPTS_VALUE) |
|
|
|
|
is->frame->pts = av_rescale_q(pkt_temp->pts, is->audio_st->time_base, tb); |
|
|
|
|
if (pkt_temp->pts != AV_NOPTS_VALUE) |
|
|
|
|
pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base); |
|
|
|
|
|
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
{ |
|
|
|
|
int ret; |
|
|
|
|
int reconfigure; |
|
|
|
|
|
|
|
|
|
dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame)); |
|
|
|
|
|
|
|
|
|
reconfigure = |
|
|
|
@ -2232,10 +2218,18 @@ static int audio_decode_frame(VideoState *is) |
|
|
|
|
if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0) |
|
|
|
|
return ret; |
|
|
|
|
av_frame_unref(is->frame); |
|
|
|
|
if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) |
|
|
|
|
return ret; |
|
|
|
|
tb = is->out_audio_filter->inputs[0]->time_base; |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
#if CONFIG_AVFILTER |
|
|
|
|
if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) { |
|
|
|
|
if (ret == AVERROR(EAGAIN)) { |
|
|
|
|
is->audio_buf_frames_pending = 0; |
|
|
|
|
continue; |
|
|
|
|
} |
|
|
|
|
return ret; |
|
|
|
|
} |
|
|
|
|
is->audio_buf_frames_pending = 1; |
|
|
|
|
tb = is->out_audio_filter->inputs[0]->time_base; |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame), |
|
|
|
@ -2337,6 +2331,7 @@ static int audio_decode_frame(VideoState *is) |
|
|
|
|
if (pkt->data == flush_pkt.data) { |
|
|
|
|
avcodec_flush_buffers(dec); |
|
|
|
|
flush_complete = 0; |
|
|
|
|
is->audio_buf_frames_pending = 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
*pkt_temp = *pkt; |
|
|
|
|