diff --git a/.gitignore b/.gitignore index 3e8ed0046e..c922b8855d 100644 --- a/.gitignore +++ b/.gitignore @@ -44,5 +44,4 @@ tools/pktdumper tools/probetest tools/qt-faststart tools/trasher -tools/trasher*.d version.h diff --git a/doc/TODO b/doc/TODO deleted file mode 100644 index 8ff8a6b388..0000000000 --- a/doc/TODO +++ /dev/null @@ -1,82 +0,0 @@ -ffmpeg TODO list: ----------------- - -Fabrice's TODO list: (unordered) -------------------- -Short term: - -- use AVFMTCTX_DISCARD_PKT in ffplay so that DV has a chance to work -- add RTSP regression test (both client and server) -- make ffserver allocate AVFormatContext -- clean up (incompatible change, for 0.5.0): - * AVStream -> AVComponent - * AVFormatContext -> AVInputStream/AVOutputStream - * suppress rate_emu from AVCodecContext -- add new float/integer audio filterting and conversion : suppress - CODEC_ID_PCM_xxc and use CODEC_ID_RAWAUDIO. -- fix telecine and frame rate conversion - -Long term (ask me if you want to help): - -- commit new imgconvert API and new PIX_FMT_xxx alpha formats -- commit new LGPL'ed float and integer-only AC3 decoder -- add WMA integer-only decoder -- add new MPEG4-AAC audio decoder (both integer-only and float version) - -Michael's TODO list: (unordered) (if anyone wanna help with sth, just ask) -------------------- -- optimize H264 CABAC -- more optimizations -- simper rate control - -Philip'a TODO list: (alphabetically ordered) (please help) ------------------- -- Add a multi-ffm filetype so that feeds can be recorded into multiple files rather - than one big file. -- Authenticated users support -- where the authentication is in the URL -- Change ASF files so that the embedded timestamp in the frames is right rather - than being an offset from the start of the stream -- Make ffm files more resilient to changes in the codec structures so that you - can play old ffm files. - -Baptiste's TODO list: ------------------ -- mov edit list support (AVEditList) -- YUV 10 bit per component support "2vuy" -- mxf muxer -- mpeg2 non linear quantizer - -unassigned TODO: (unordered) ---------------- -- use AVFrame for audio codecs too -- rework aviobuf.c buffering strategy and fix url_fskip -- generate optimal huffman tables for mjpeg encoding -- fix ffserver regression tests -- support xvids motion estimation -- support x264s motion estimation -- support x264s rate control -- SNOW: non translational motion compensation -- SNOW: more optimal quantization -- SNOW: 4x4 block support -- SNOW: 1/8 pel motion compensation support -- SNOW: iterative motion estimation based on subsampled images -- SNOW: try B frames and MCTF and see how their PSNR/bitrate/complexity behaves -- SNOW: try to use the wavelet transformed MC-ed reference frame as context for the entropy coder -- SNOW: think about/analyize how to make snow use multiple cpus/threads -- SNOW: finish spec -- FLAC: lossy encoding (viterbi and naive scalar quantization) -- libavfilter -- JPEG2000 decoder & encoder -- MPEG4 GMC encoding support -- macroblock based pixel format (better cache locality, somewhat complex, one paper claimed it faster for high res) -- regression tests for codecs which do not have an encoder (I+P-frame bitstream in the 'master' branch) -- add support for using mplayers video filters to ffmpeg -- H264 encoder -- per MB ratecontrol (so VCD and such do work better) -- write a script which iteratively changes all functions between always_inline and noinline and benchmarks the result to find the best set of inlined functions -- convert all the non SIMD asm into small asm vs. C testcases and submit them to the gcc devels so they can improve gcc -- generic audio mixing API -- extract PES packetizer from PS muxer and use it for new TS muxer -- implement automatic AVBistreamFilter activation -- make cabac encoder use bytestream (see http://trac.videolan.org/x264/changeset/?format=diff&new=651) -- merge imdct and windowing, the current code does considerable amounts of redundant work diff --git a/ffmpeg.c b/ffmpeg.c index dc746488c3..781ebf5fa4 100644 --- a/ffmpeg.c +++ b/ffmpeg.c @@ -1155,7 +1155,7 @@ static void do_video_out(AVFormatContext *s, AVOutputStream *ost, AVInputStream *ist, AVFrame *in_picture, - int *frame_size) + int *frame_size, float quality) { int nb_frames, i, ret, av_unused resample_changed; AVFrame *final_picture, *formatted_picture; @@ -1286,7 +1286,7 @@ static void do_video_out(AVFormatContext *s, /* handles sameq here. This is not correct because it may not be a global option */ - big_picture.quality = same_quality ? ist->st->quality : ost->st->quality; + big_picture.quality = quality; if(!me_threshold) big_picture.pict_type = 0; // big_picture.pts = AV_NOPTS_VALUE; @@ -1530,6 +1530,7 @@ static int output_packet(AVInputStream *ist, int ist_index, #if CONFIG_AVFILTER int frame_available; #endif + float quality; AVPacket avpkt; int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt); @@ -1610,7 +1611,7 @@ static int output_packet(AVInputStream *ist, int ist_index, ret = avcodec_decode_video2(ist->st->codec, &picture, &got_output, &avpkt); - ist->st->quality= picture.quality; + quality = same_quality ? picture.quality : 0; if (ret < 0) goto fail_decode; if (!got_output) { @@ -1736,7 +1737,8 @@ static int output_packet(AVInputStream *ist, int ist_index, if (ost->picref->video && !ost->frame_aspect_ratio) ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio; #endif - do_video_out(os, ost, ist, &picture, &frame_size); + do_video_out(os, ost, ist, &picture, &frame_size, + same_quality ? quality : ost->st->codec->global_quality); if (vstats_filename && frame_size) do_video_stats(os, ost, frame_size); break; @@ -3602,8 +3604,7 @@ static void new_video_stream(AVFormatContext *oc, int file_idx) video_enc->gop_size = 0; if (video_qscale || same_quality) { video_enc->flags |= CODEC_FLAG_QSCALE; - video_enc->global_quality= - st->quality = FF_QP2LAMBDA * video_qscale; + video_enc->global_quality = FF_QP2LAMBDA * video_qscale; } if(intra_matrix) @@ -3721,7 +3722,7 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx) if (audio_qscale > QSCALE_NONE) { audio_enc->flags |= CODEC_FLAG_QSCALE; - audio_enc->global_quality = st->quality = FF_QP2LAMBDA * audio_qscale; + audio_enc->global_quality = FF_QP2LAMBDA * audio_qscale; } if (audio_channels) audio_enc->channels = audio_channels; diff --git a/ffserver.c b/ffserver.c index b2a32bb9fa..d589f6decf 100644 --- a/ffserver.c +++ b/ffserver.c @@ -2128,13 +2128,12 @@ static int open_input_stream(HTTPContext *c, const char *info) char buf[128]; char input_filename[1024]; AVFormatContext *s = NULL; - int buf_size, i, ret; + int i, ret; int64_t stream_pos; /* find file name */ if (c->stream->feed) { strcpy(input_filename, c->stream->feed->feed_filename); - buf_size = FFM_PACKET_SIZE; /* compute position (absolute time) */ if (av_find_info_tag(buf, sizeof(buf), "date", info)) { if ((ret = av_parse_time(&stream_pos, buf, 0)) < 0) @@ -2146,7 +2145,6 @@ static int open_input_stream(HTTPContext *c, const char *info) stream_pos = av_gettime() - c->stream->prebuffer * (int64_t)1000; } else { strcpy(input_filename, c->stream->feed_filename); - buf_size = 0; /* compute position (relative time) */ if (av_find_info_tag(buf, sizeof(buf), "date", info)) { if ((ret = av_parse_time(&stream_pos, buf, 1)) < 0) diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c index ff36458c7e..d7dbd283e8 100644 --- a/libavcodec/binkaudio.c +++ b/libavcodec/binkaudio.c @@ -90,8 +90,7 @@ static av_cold int decode_init(AVCodecContext *avctx) return -1; } - if (avctx->extradata && avctx->extradata_size > 0) - s->version_b = avctx->extradata[0]; + s->version_b = avctx->extradata && avctx->extradata[3] == 'b'; if (avctx->codec->id == CODEC_ID_BINKAUDIO_RDFT) { // audio is already interleaved for the RDFT format variant diff --git a/libavcodec/flashsv.c b/libavcodec/flashsv.c index a57e851078..c80fa33594 100644 --- a/libavcodec/flashsv.c +++ b/libavcodec/flashsv.c @@ -25,26 +25,10 @@ * Flash Screen Video decoder * @author Alex Beregszaszi * @author Benjamin Larsson - */ - -/* Bitstream description - * The picture is divided into blocks that are zlib compressed. - * - * The decoder is fed complete frames, the frameheader contains: - * 4bits of block width - * 12bits of frame width - * 4bits of block height - * 12bits of frame height * - * Directly after the header are the compressed blocks. The blocks - * have their compressed size represented with 16bits in the beginnig. - * If the size = 0 then the block is unchanged from the previous frame. - * All blocks are decompressed until the buffer is consumed. - * - * Encoding ideas, a basic encoder would just use a fixed block size. - * Block sizes can be multipels of 16, from 16 to 256. The blocks don't - * have to be quadratic. A brute force search with a set of diffrent - * block sizes should give a better result then to just use a fixed size. + * A description of the bitstream format for Flash Screen Video version 1/2 + * is part of the SWF File Format Specification (version 10), which can be + * downloaded from http://www.adobe.com/devnet/swf.html. */ #include @@ -71,7 +55,7 @@ static void copy_region(uint8_t *sptr, uint8_t *dptr, int i; for (i = dx + h; i > dx; i--) { - memcpy(dptr + (i * stride) + dy * 3, sptr, w * 3); + memcpy(dptr + i * stride + dy * 3, sptr, w * 3); sptr += w * 3; } } @@ -86,7 +70,7 @@ static av_cold int flashsv_decode_init(AVCodecContext *avctx) s->zstream.zalloc = Z_NULL; s->zstream.zfree = Z_NULL; s->zstream.opaque = Z_NULL; - zret = inflateInit(&(s->zstream)); + zret = inflateInit(&s->zstream); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret); return 1; @@ -102,7 +86,6 @@ static av_cold int flashsv_decode_init(AVCodecContext *avctx) static int flashsv_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { - const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; FlashSVContext *s = avctx->priv_data; int h_blocks, v_blocks, h_part, v_part, i, j; @@ -114,7 +97,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, if (buf_size < 4) return -1; - init_get_bits(&gb, buf, buf_size * 8); + init_get_bits(&gb, avpkt->data, buf_size * 8); /* start to parse the bitstream */ s->block_width = 16 * (get_bits(&gb, 4) + 1); @@ -122,7 +105,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, s->block_height = 16 * (get_bits(&gb, 4) + 1); s->image_height = get_bits(&gb, 12); - /* calculate amount of blocks and the size of the border blocks */ + /* calculate number of blocks and size of border (partial) blocks */ h_blocks = s->image_width / s->block_width; h_part = s->image_width % s->block_width; v_blocks = s->image_height / s->block_height; @@ -139,26 +122,29 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, } s->block_size = s->block_width * s->block_height; - /* init the image size once */ - if ((avctx->width == 0) && (avctx->height == 0)) { + /* initialize the image size once */ + if (avctx->width == 0 && avctx->height == 0) { avctx->width = s->image_width; avctx->height = s->image_height; } /* check for changes of image width and image height */ - if ((avctx->width != s->image_width) || (avctx->height != s->image_height)) { - av_log(avctx, AV_LOG_ERROR, "Frame width or height differs from first frames!\n"); - av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n", avctx->height, - avctx->width, s->image_height, s->image_width); - return -1; + if (avctx->width != s->image_width || avctx->height != s->image_height) { + av_log(avctx, AV_LOG_ERROR, + "Frame width or height differs from first frames!\n"); + av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n", + avctx->height, avctx->width, s->image_height, s->image_width); + return AVERROR_INVALIDDATA; } - av_log(avctx, AV_LOG_DEBUG, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n", - s->image_width, s->image_height, s->block_width, s->block_height, - h_blocks, v_blocks, h_part, v_part); + av_dlog(avctx, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n", + s->image_width, s->image_height, s->block_width, s->block_height, + h_blocks, v_blocks, h_part, v_part); - s->frame.reference = 1; - s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; + s->frame.reference = 3; + s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | + FF_BUFFER_HINTS_PRESERVE | + FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; @@ -167,48 +153,50 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, /* loop over all block columns */ for (j = 0; j < v_blocks + (v_part ? 1 : 0); j++) { - int hp = j * s->block_height; // horiz position in frame - int hs = (j < v_blocks) ? s->block_height : v_part; // size of block + int hp = j * s->block_height; // vertical position in frame + int hs = (j < v_blocks) ? s->block_height : v_part; // block size /* loop over all block rows */ for (i = 0; i < h_blocks + (h_part ? 1 : 0); i++) { - int wp = i * s->block_width; // vert position in frame - int ws = (i < h_blocks) ? s->block_width : h_part; // size of block + int wp = i * s->block_width; // horizontal position in frame + int ws = (i < h_blocks) ? s->block_width : h_part; // block size /* get the size of the compressed zlib chunk */ int size = get_bits(&gb, 16); if (8 * size > get_bits_left(&gb)) { avctx->release_buffer(avctx, &s->frame); s->frame.data[0] = NULL; - return -1; + return AVERROR_INVALIDDATA; } - if (size == 0) { - /* no change, don't do anything */ - } else { + /* skip unchanged blocks, which have size 0 */ + if (size) { /* decompress block */ - int ret = inflateReset(&(s->zstream)); + int ret = inflateReset(&s->zstream); if (ret != Z_OK) { - av_log(avctx, AV_LOG_ERROR, "error in decompression (reset) of block %dx%d\n", i, j); + av_log(avctx, AV_LOG_ERROR, + "error in decompression (reset) of block %dx%d\n", i, j); /* return -1; */ } - s->zstream.next_in = buf + (get_bits_count(&gb) / 8); + s->zstream.next_in = avpkt->data + get_bits_count(&gb) / 8; s->zstream.avail_in = size; s->zstream.next_out = s->tmpblock; s->zstream.avail_out = s->block_size * 3; - ret = inflate(&(s->zstream), Z_FINISH); + ret = inflate(&s->zstream, Z_FINISH); if (ret == Z_DATA_ERROR) { av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n"); - inflateSync(&(s->zstream)); - ret = inflate(&(s->zstream), Z_FINISH); + inflateSync(&s->zstream); + ret = inflate(&s->zstream, Z_FINISH); } - if ((ret != Z_OK) && (ret != Z_STREAM_END)) { - av_log(avctx, AV_LOG_ERROR, "error in decompression of block %dx%d: %d\n", i, j, ret); + if (ret != Z_OK && ret != Z_STREAM_END) { + av_log(avctx, AV_LOG_ERROR, + "error in decompression of block %dx%d: %d\n", i, j, ret); /* return -1; */ } - copy_region(s->tmpblock, s->frame.data[0], s->image_height - (hp + hs + 1), + copy_region(s->tmpblock, s->frame.data[0], + s->image_height - (hp + hs + 1), wp, hs, ws, s->frame.linesize[0]); skip_bits_long(&gb, 8 * size); /* skip the consumed bits */ } @@ -230,7 +218,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data, static av_cold int flashsv_decode_end(AVCodecContext *avctx) { FlashSVContext *s = avctx->priv_data; - inflateEnd(&(s->zstream)); + inflateEnd(&s->zstream); /* release the frame if needed */ if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); diff --git a/libavcodec/h264_loopfilter.c b/libavcodec/h264_loopfilter.c index c51ada287a..74c7dae523 100644 --- a/libavcodec/h264_loopfilter.c +++ b/libavcodec/h264_loopfilter.c @@ -100,7 +100,7 @@ static const uint8_t tc0_table[52*3][4] = { {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, }; -static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h) { +static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; @@ -118,7 +118,7 @@ static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, int16_t h->h264dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta); } } -static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { +static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; @@ -137,7 +137,7 @@ static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, int16_t } } -static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[7], int bsi, int qp ) { +static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; int alpha = alpha_table[index_a]; @@ -155,7 +155,7 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int h->h264dsp.h264_h_loop_filter_luma_mbaff_intra(pix, stride, alpha, beta); } } -static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[7], int bsi, int qp ) { +static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; int alpha = alpha_table[index_a]; @@ -174,7 +174,7 @@ static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, in } } -static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { +static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; @@ -193,7 +193,7 @@ static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, int16_t } } -static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) { +static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; @@ -247,9 +247,9 @@ void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, return; if( IS_INTRA(mb_type) ) { - int16_t bS4[4] = {4,4,4,4}; - int16_t bS3[4] = {3,3,3,3}; - int16_t *bSH = FIELD_PICTURE ? bS3 : bS4; + static const int16_t bS4[4] = {4,4,4,4}; + static const int16_t bS3[4] = {3,3,3,3}; + const int16_t *bSH = FIELD_PICTURE ? bS3 : bS4; if(left_type) filter_mb_edgev( &img_y[4*0], linesize, bS4, qp0, h); if( IS_8x8DCT(mb_type) ) { diff --git a/libavformat/avformat.h b/libavformat/avformat.h index 6b0cd4917b..f8eb7f7c9d 100644 --- a/libavformat/avformat.h +++ b/libavformat/avformat.h @@ -528,12 +528,14 @@ typedef struct AVStream { int stream_copy; /**< If set, just copy stream. */ enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. +#if FF_API_AVSTREAM_QUALITY //FIXME move stuff to a flags field? /** * Quality, as it has been removed from AVCodecContext and put in AVVideoFrame. * MN: dunno if that is the right place for it */ - float quality; + attribute_deprecated float quality; +#endif /** * Decoding: pts of the first frame of the stream, in stream time base. diff --git a/libavformat/bink.c b/libavformat/bink.c index eed52cdb49..34c1a9944f 100644 --- a/libavformat/bink.c +++ b/libavformat/bink.c @@ -134,15 +134,18 @@ static int read_header(AVFormatContext *s, AVFormatParameters *ap) if (!ast) return AVERROR(ENOMEM); ast->codec->codec_type = AVMEDIA_TYPE_AUDIO; + ast->codec->codec_tag = 0; ast->codec->sample_rate = avio_rl16(pb); av_set_pts_info(ast, 64, 1, ast->codec->sample_rate); flags = avio_rl16(pb); ast->codec->codec_id = flags & BINK_AUD_USEDCT ? CODEC_ID_BINKAUDIO_DCT : CODEC_ID_BINKAUDIO_RDFT; ast->codec->channels = flags & BINK_AUD_STEREO ? 2 : 1; - ast->codec->extradata = av_mallocz(1 + FF_INPUT_BUFFER_PADDING_SIZE); - ast->codec->extradata_size = 1; - ast->codec->extradata[0] = vst->codec->codec_tag == MKTAG('B','I','K','b'); + ast->codec->extradata = av_mallocz(4 + FF_INPUT_BUFFER_PADDING_SIZE); + if (!ast->codec->extradata) + return AVERROR(ENOMEM); + ast->codec->extradata_size = 4; + AV_WL32(ast->codec->extradata, vst->codec->codec_tag); } for (i = 0; i < bink->num_audio_tracks; i++) diff --git a/libavformat/ffmdec.c b/libavformat/ffmdec.c index f0832659d5..d4821da47f 100644 --- a/libavformat/ffmdec.c +++ b/libavformat/ffmdec.c @@ -301,7 +301,6 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap) codec->codec_id = avio_rb32(pb); codec->codec_type = avio_r8(pb); /* codec_type */ codec->bit_rate = avio_rb32(pb); - st->quality = avio_rb32(pb); codec->flags = avio_rb32(pb); codec->flags2 = avio_rb32(pb); codec->debug = avio_rb32(pb); diff --git a/libavformat/ffmenc.c b/libavformat/ffmenc.c index b3285b5c63..595ba88223 100644 --- a/libavformat/ffmenc.c +++ b/libavformat/ffmenc.c @@ -114,7 +114,6 @@ static int ffm_write_header(AVFormatContext *s) avio_wb32(pb, codec->codec_id); avio_w8(pb, codec->codec_type); avio_wb32(pb, codec->bit_rate); - avio_wb32(pb, st->quality); avio_wb32(pb, codec->flags); avio_wb32(pb, codec->flags2); avio_wb32(pb, codec->debug); diff --git a/libavformat/version.h b/libavformat/version.h index d358285e3c..9aaf395170 100644 --- a/libavformat/version.h +++ b/libavformat/version.h @@ -77,5 +77,8 @@ #ifndef FF_API_FLAG_RTP_HINT #define FF_API_FLAG_RTP_HINT (LIBAVFORMAT_VERSION_MAJOR < 54) #endif +#ifndef FF_API_AVSTREAM_QUALITY +#define FF_API_AVSTREAM_QUALITY (LIBAVFORMAT_VERSION_MAJOR < 54) +#endif #endif /* AVFORMAT_VERSION_H */ diff --git a/libavutil/eval.c b/libavutil/eval.c index 3833ca87f2..4e2cb1095c 100644 --- a/libavutil/eval.c +++ b/libavutil/eval.c @@ -500,6 +500,7 @@ int av_expr_parse(AVExpr **expr, const char *s, if ((ret = parse_expr(&e, &p)) < 0) goto end; if (*p.s) { + av_expr_free(e); av_log(&p, AV_LOG_ERROR, "Invalid chars '%s' at the end of expression '%s'\n", p.s, s0); ret = AVERROR(EINVAL); goto end; @@ -601,7 +602,7 @@ int main(int argc, char **argv) "-PI", "+PI", "1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)", - "80G/80Gi" + "80G/80Gi", "1k", "1Gi", "1gi", diff --git a/libpostproc/postprocess.c b/libpostproc/postprocess.c index 589c2cead6..944d581a0f 100644 --- a/libpostproc/postprocess.c +++ b/libpostproc/postprocess.c @@ -246,7 +246,6 @@ static inline int isVertDC_C(uint8_t src[], int stride, PPContext *c) static inline int isHorizMinMaxOk_C(uint8_t src[], int stride, int QP) { int i; -#if 1 for(i=0; i<2; i++){ if((unsigned)(src[0] - src[5] + 2*QP) > 4*QP) return 0; src += stride; @@ -257,19 +256,11 @@ static inline int isHorizMinMaxOk_C(uint8_t src[], int stride, int QP) if((unsigned)(src[6] - src[3] + 2*QP) > 4*QP) return 0; src += stride; } -#else - for(i=0; i<8; i++){ - if((unsigned)(src[0] - src[7] + 2*QP) > 4*QP) return 0; - src += stride; - } -#endif return 1; } static inline int isVertMinMaxOk_C(uint8_t src[], int stride, int QP) { -#if 1 -#if 1 int x; src+= stride*4; for(x=0; x 4*QP) return 0; if((unsigned)(src[3+x + 6*stride] - src[3+x + 3*stride] + 2*QP) > 4*QP) return 0; } -#else - int x; - src+= stride*3; - for(x=0; x 4*QP) return 0; - } -#endif - return 1; -#else - int x; - src+= stride*4; - for(x=0; xmax) max=v; - if(v 2*QP) return 0; - } return 1; -#endif } static inline int horizClassify_C(uint8_t src[], int stride, PPContext *c) diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h index 86ab4becb5..73dc25d547 100644 --- a/libswscale/swscale_internal.h +++ b/libswscale/swscale_internal.h @@ -435,7 +435,10 @@ typedef struct SwsContext { * @param dstW width of destination image * @param src pointer to source data to be scaled. If scaling depth * is 8, this is 8bpp in 8bpp (uint8_t) width. If scaling - * depth is 16, this is 16bpp in 16bpp (uint16_t) depth. + * depth is 16, this is native depth in 16bbp (uint16_t) + * width. In other words, for 9-bit YUV input, this is + * 9bpp, for 10-bit YUV input, this is 10bpp, and for + * 16-bit RGB or YUV, this is 16bpp. * @param filter filter coefficients to be used per output pixel for * scaling. This contains 14bpp filtering coefficients. * Guaranteed to contain dstW * filterSize entries. diff --git a/tests/ref/fate/eval b/tests/ref/fate/eval index 7042a017fd..3bc35db633 100644 --- a/tests/ref/fate/eval +++ b/tests/ref/fate/eval @@ -16,8 +16,11 @@ Evaluating '+PI' Evaluating '1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)' '1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)' -> 12.700000 -Evaluating '80G/80Gi1k' -'80G/80Gi1k' -> nan +Evaluating '80G/80Gi' +'80G/80Gi' -> 0.931323 + +Evaluating '1k' +'1k' -> 1000.000000 Evaluating '1Gi' '1Gi' -> 1073741824.000000 diff --git a/tests/ref/lavf/ffm b/tests/ref/lavf/ffm index b89af6e3e5..b20e132b45 100644 --- a/tests/ref/lavf/ffm +++ b/tests/ref/lavf/ffm @@ -1,3 +1,3 @@ -b6acf782a38d313153b68c4ca204fc90 *./tests/data/lavf/lavf.ffm +f9bee27ea1b6b83a06b5f9efb0a4ac1f *./tests/data/lavf/lavf.ffm 376832 ./tests/data/lavf/lavf.ffm ./tests/data/lavf/lavf.ffm CRC=0xf361ed74