/* * Copyright (c) 2003 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /** * @file * libavformat API example. * * @example output.c * Output a media file in any supported libavformat format. The default * codecs are used. */ #include #include #include #include #include "libavutil/channel_layout.h" #include "libavutil/mathematics.h" #include "libavformat/avformat.h" #include "libswscale/swscale.h" /* 5 seconds stream duration */ #define STREAM_DURATION 5.0 #define STREAM_FRAME_RATE 25 /* 25 images/s */ #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ static int sws_flags = SWS_BICUBIC; // a wrapper around a single output AVStream typedef struct OutputStream { AVStream *st; AVFrame *frame; AVFrame *tmp_frame; } OutputStream; /**************************************************************/ /* audio output */ static float t, tincr, tincr2; static int audio_input_frame_size; /* * add an audio output stream */ static AVStream *add_audio_stream(AVFormatContext *oc, enum AVCodecID codec_id) { AVCodecContext *c; AVStream *st; AVCodec *codec; /* find the audio encoder */ codec = avcodec_find_encoder(codec_id); if (!codec) { fprintf(stderr, "codec not found\n"); exit(1); } st = avformat_new_stream(oc, codec); if (!st) { fprintf(stderr, "Could not alloc stream\n"); exit(1); } c = st->codec; /* put sample parameters */ c->sample_fmt = AV_SAMPLE_FMT_S16; c->bit_rate = 64000; c->sample_rate = 44100; c->channels = 2; c->channel_layout = AV_CH_LAYOUT_STEREO; // some formats want stream headers to be separate if (oc->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= CODEC_FLAG_GLOBAL_HEADER; return st; } static void open_audio(AVFormatContext *oc, AVStream *st) { AVCodecContext *c; c = st->codec; /* open it */ if (avcodec_open2(c, NULL, NULL) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } /* init signal generator */ t = 0; tincr = 2 * M_PI * 110.0 / c->sample_rate; /* increment frequency by 110 Hz per second */ tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) audio_input_frame_size = 10000; else audio_input_frame_size = c->frame_size; } /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and * 'nb_channels' channels. */ static void get_audio_frame(AVFrame *frame, int nb_channels) { int j, i, v, ret; int16_t *q = (int16_t*)frame->data[0]; /* when we pass a frame to the encoder, it may keep a reference to it * internally; * make sure we do not overwrite it here */ ret = av_frame_make_writable(frame); if (ret < 0) exit(1); for (j = 0; j < frame->nb_samples; j++) { v = (int)(sin(t) * 10000); for (i = 0; i < nb_channels; i++) *q++ = v; t += tincr; tincr += tincr2; } } static void write_audio_frame(AVFormatContext *oc, AVStream *st) { AVCodecContext *c; AVPacket pkt = { 0 }; // data and size must be 0; AVFrame *frame = av_frame_alloc(); int got_packet, ret; av_init_packet(&pkt); c = st->codec; frame->sample_rate = c->sample_rate; frame->nb_samples = audio_input_frame_size; frame->format = AV_SAMPLE_FMT_S16; frame->channel_layout = c->channel_layout; ret = av_frame_get_buffer(frame, 0); if (ret < 0) { fprintf(stderr, "Could not allocate an audio frame.\n"); exit(1); } get_audio_frame(frame, c->channels); avcodec_encode_audio2(c, &pkt, frame, &got_packet); if (!got_packet) return; pkt.stream_index = st->index; /* Write the compressed frame to the media file. */ if (av_interleaved_write_frame(oc, &pkt) != 0) { fprintf(stderr, "Error while writing audio frame\n"); exit(1); } av_frame_free(&frame); } static void close_audio(AVFormatContext *oc, AVStream *st) { avcodec_close(st->codec); } /**************************************************************/ /* video output */ static int frame_count; /* Add a video output stream. */ static void add_video_stream(OutputStream *ost, AVFormatContext *oc, enum AVCodecID codec_id) { AVCodecContext *c; AVCodec *codec; /* find the video encoder */ codec = avcodec_find_encoder(codec_id); if (!codec) { fprintf(stderr, "codec not found\n"); exit(1); } ost->st = avformat_new_stream(oc, codec); if (!ost->st) { fprintf(stderr, "Could not alloc stream\n"); exit(1); } c = ost->st->codec; /* Put sample parameters. */ c->bit_rate = 400000; /* Resolution must be a multiple of two. */ c->width = 352; c->height = 288; /* timebase: This is the fundamental unit of time (in seconds) in terms * of which frame timestamps are represented. For fixed-fps content, * timebase should be 1/framerate and timestamp increments should be * identical to 1. */ c->time_base.den = STREAM_FRAME_RATE; c->time_base.num = 1; c->gop_size = 12; /* emit one intra frame every twelve frames at most */ c->pix_fmt = STREAM_PIX_FMT; if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { /* just for testing, we also add B frames */ c->max_b_frames = 2; } if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) { /* Needed to avoid using macroblocks in which some coeffs overflow. * This does not happen with normal video, it just happens here as * the motion of the chroma plane does not match the luma plane. */ c->mb_decision = 2; } /* Some formats want stream headers to be separate. */ if (oc->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= CODEC_FLAG_GLOBAL_HEADER; } static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height) { AVFrame *picture; int ret; picture = av_frame_alloc(); if (!picture) return NULL; picture->format = pix_fmt; picture->width = width; picture->height = height; /* allocate the buffers for the frame data */ ret = av_frame_get_buffer(picture, 32); if (ret < 0) { fprintf(stderr, "Could not allocate frame data.\n"); exit(1); } return picture; } static void open_video(AVFormatContext *oc, OutputStream *ost) { AVCodecContext *c; c = ost->st->codec; /* open the codec */ if (avcodec_open2(c, NULL, NULL) < 0) { fprintf(stderr, "could not open codec\n"); exit(1); } /* Allocate the encoded raw picture. */ ost->frame = alloc_picture(c->pix_fmt, c->width, c->height); if (!ost->frame) { fprintf(stderr, "Could not allocate picture\n"); exit(1); } /* If the output format is not YUV420P, then a temporary YUV420P * picture is needed too. It is then converted to the required * output format. */ ost->tmp_frame = NULL; if (c->pix_fmt != AV_PIX_FMT_YUV420P) { ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height); if (!ost->tmp_frame) { fprintf(stderr, "Could not allocate temporary picture\n"); exit(1); } } } /* Prepare a dummy image. */ static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height) { int x, y, i, ret; /* when we pass a frame to the encoder, it may keep a reference to it * internally; * make sure we do not overwrite it here */ ret = av_frame_make_writable(pict); if (ret < 0) exit(1); i = frame_index; /* Y */ for (y = 0; y < height; y++) for (x = 0; x < width; x++) pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; /* Cb and Cr */ for (y = 0; y < height / 2; y++) { for (x = 0; x < width / 2; x++) { pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; } } } static void write_video_frame(AVFormatContext *oc, OutputStream *ost) { int ret; AVCodecContext *c; static struct SwsContext *img_convert_ctx; c = ost->st->codec; if (frame_count >= STREAM_NB_FRAMES) { /* No more frames to compress. The codec has a latency of a few * frames if using B-frames, so we get the last frames by * passing the same picture again. */ } else { if (c->pix_fmt != AV_PIX_FMT_YUV420P) { /* as we only generate a YUV420P picture, we must convert it * to the codec pixel format if needed */ if (img_convert_ctx == NULL) { img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P, c->width, c->height, c->pix_fmt, sws_flags, NULL, NULL, NULL); if (img_convert_ctx == NULL) { fprintf(stderr, "Cannot initialize the conversion context\n"); exit(1); } } fill_yuv_image(ost->tmp_frame, frame_count, c->width, c->height); sws_scale(img_convert_ctx, ost->tmp_frame->data, ost->tmp_frame->linesize, 0, c->height, ost->frame->data, ost->frame->linesize); } else { fill_yuv_image(ost->frame, frame_count, c->width, c->height); } } if (oc->oformat->flags & AVFMT_RAWPICTURE) { /* Raw video case - the API will change slightly in the near * future for that. */ AVPacket pkt; av_init_packet(&pkt); pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index = ost->st->index; pkt.data = (uint8_t *)ost->frame; pkt.size = sizeof(AVPicture); ret = av_interleaved_write_frame(oc, &pkt); } else { AVPacket pkt = { 0 }; int got_packet; av_init_packet(&pkt); /* encode the image */ ret = avcodec_encode_video2(c, &pkt, ost->frame, &got_packet); /* If size is zero, it means the image was buffered. */ if (!ret && got_packet && pkt.size) { av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base); pkt.stream_index = ost->st->index; /* Write the compressed frame to the media file. */ ret = av_interleaved_write_frame(oc, &pkt); } else { ret = 0; } } if (ret != 0) { fprintf(stderr, "Error while writing video frame\n"); exit(1); } frame_count++; } static void close_video(AVFormatContext *oc, OutputStream *ost) { avcodec_close(ost->st->codec); av_frame_free(&ost->frame); av_frame_free(&ost->tmp_frame); } /**************************************************************/ /* media file output */ int main(int argc, char **argv) { OutputStream video_st; const char *filename; AVOutputFormat *fmt; AVFormatContext *oc; AVStream *audio_st; double audio_pts, video_pts; int have_video = 0; int i; /* Initialize libavcodec, and register all codecs and formats. */ av_register_all(); if (argc != 2) { printf("usage: %s output_file\n" "API example program to output a media file with libavformat.\n" "The output format is automatically guessed according to the file extension.\n" "Raw images can also be output by using '%%d' in the filename\n" "\n", argv[0]); return 1; } filename = argv[1]; /* Autodetect the output format from the name. default is MPEG. */ fmt = av_guess_format(NULL, filename, NULL); if (!fmt) { printf("Could not deduce output format from file extension: using MPEG.\n"); fmt = av_guess_format("mpeg", NULL, NULL); } if (!fmt) { fprintf(stderr, "Could not find suitable output format\n"); return 1; } /* Allocate the output media context. */ oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "Memory error\n"); return 1; } oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", filename); /* Add the audio and video streams using the default format codecs * and initialize the codecs. */ audio_st = NULL; if (fmt->video_codec != AV_CODEC_ID_NONE) { add_video_stream(&video_st, oc, fmt->video_codec); have_video = 1; } if (fmt->audio_codec != AV_CODEC_ID_NONE) { audio_st = add_audio_stream(oc, fmt->audio_codec); } /* Now that all the parameters are set, we can open the audio and * video codecs and allocate the necessary encode buffers. */ if (have_video) open_video(oc, &video_st); if (audio_st) open_audio(oc, audio_st); av_dump_format(oc, 0, filename, 1); /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", filename); return 1; } } /* Write the stream header, if any. */ avformat_write_header(oc, NULL); for (;;) { /* Compute current audio and video time. */ if (audio_st) audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; else audio_pts = 0.0; if (have_video) video_pts = (double)video_st.st->pts.val * video_st.st->time_base.num / video_st.st->time_base.den; else video_pts = 0.0; if ((!audio_st || audio_pts >= STREAM_DURATION) && (!have_video || video_pts >= STREAM_DURATION)) break; /* write interleaved audio and video frames */ if (!have_video || (have_video && audio_st && audio_pts < video_pts)) { write_audio_frame(oc, audio_st); } else { write_video_frame(oc, &video_st); } } /* Write the trailer, if any. The trailer must be written before you * close the CodecContexts open when you wrote the header; otherwise * av_write_trailer() may try to use memory that was freed on * av_codec_close(). */ av_write_trailer(oc); /* Close each codec. */ if (have_video) close_video(oc, &video_st); if (audio_st) close_audio(oc, audio_st); /* Free the streams. */ for (i = 0; i < oc->nb_streams; i++) { av_freep(&oc->streams[i]->codec); av_freep(&oc->streams[i]); } if (!(fmt->flags & AVFMT_NOFILE)) /* Close the output file. */ avio_close(oc->pb); /* free the stream */ av_free(oc); return 0; }