avcodec: change number of plane pointers from 4 to 8 at next major bump.

Add AV_NUM_DATA_POINTERS to simplify the bump transition.
This will allow for supporting more planar audio channels without having to
allocate separate pointer arrays.
pull/2/head
Justin Ruggles 13 years ago
parent 9b9815eec4
commit 560f773c7d
  1. 6
      doc/APIchanges
  2. 23
      libavcodec/avcodec.h
  3. 7
      libavcodec/huffyuv.c
  4. 12
      libavcodec/mpegvideo.c
  5. 27
      libavcodec/utils.c
  6. 6
      libavcodec/version.h
  7. 7
      libavcodec/vp3.c

@ -13,6 +13,12 @@ libavutil: 2011-04-18
API changes, most recent first: API changes, most recent first:
2011-xx-xx - xxxxxxx - lavc 53.24.0
Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
Change AVCodecContext.error[4] to [8] at next major bump.
Add AV_NUM_DATA_POINTERS to simplify the bump transition.
2011-11-23 - bbb46f3 - lavu 51.18.0 2011-11-23 - bbb46f3 - lavu 51.18.0
Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and
av_samples_alloc(), to samplefmt.h. av_samples_alloc(), to samplefmt.h.

@ -927,21 +927,26 @@ typedef struct AVPacket {
* sizeof(AVFrame) must not be used outside libav*. * sizeof(AVFrame) must not be used outside libav*.
*/ */
typedef struct AVFrame { typedef struct AVFrame {
#if FF_API_DATA_POINTERS
#define AV_NUM_DATA_POINTERS 4
#else
#define AV_NUM_DATA_POINTERS 8
#endif
/** /**
* pointer to the picture planes. * pointer to the picture planes.
* This might be different from the first allocated byte * This might be different from the first allocated byte
* - encoding: * - encoding:
* - decoding: * - decoding:
*/ */
uint8_t *data[4]; uint8_t *data[AV_NUM_DATA_POINTERS];
int linesize[4]; int linesize[AV_NUM_DATA_POINTERS];
/** /**
* pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer. * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
* This isn't used by libavcodec unless the default get/release_buffer() is used. * This isn't used by libavcodec unless the default get/release_buffer() is used.
* - encoding: * - encoding:
* - decoding: * - decoding:
*/ */
uint8_t *base[4]; uint8_t *base[AV_NUM_DATA_POINTERS];
/** /**
* 1 -> keyframe, 0-> not * 1 -> keyframe, 0-> not
* - encoding: Set by libavcodec. * - encoding: Set by libavcodec.
@ -1065,7 +1070,7 @@ typedef struct AVFrame {
* - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR. * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.
* - decoding: unused * - decoding: unused
*/ */
uint64_t error[4]; uint64_t error[AV_NUM_DATA_POINTERS];
/** /**
* type of the buffer (to keep track of who has to deallocate data[*]) * type of the buffer (to keep track of who has to deallocate data[*])
@ -1319,7 +1324,7 @@ typedef struct AVCodecContext {
* @param offset offset into the AVFrame.data from which the slice should be read * @param offset offset into the AVFrame.data from which the slice should be read
*/ */
void (*draw_horiz_band)(struct AVCodecContext *s, void (*draw_horiz_band)(struct AVCodecContext *s,
const AVFrame *src, int offset[4], const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],
int y, int type, int height); int y, int type, int height);
/* audio only */ /* audio only */
@ -1867,7 +1872,7 @@ typedef struct AVCodecContext {
* - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR. * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.
* - decoding: unused * - decoding: unused
*/ */
uint64_t error[4]; uint64_t error[AV_NUM_DATA_POINTERS];
/** /**
* motion estimation comparison function * motion estimation comparison function
@ -3175,8 +3180,8 @@ typedef struct AVHWAccel {
* the last component is alpha * the last component is alpha
*/ */
typedef struct AVPicture { typedef struct AVPicture {
uint8_t *data[4]; uint8_t *data[AV_NUM_DATA_POINTERS];
int linesize[4]; ///< number of bytes per line int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line
} AVPicture; } AVPicture;
#define AVPALETTE_SIZE 1024 #define AVPALETTE_SIZE 1024
@ -3794,7 +3799,7 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
* according to avcodec_get_edge_width() before. * according to avcodec_get_edge_width() before.
*/ */
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
int linesize_align[4]); int linesize_align[AV_NUM_DATA_POINTERS]);
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt); enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);

@ -921,8 +921,8 @@ static int encode_bgr_bitstream(HYuvContext *s, int count){
#if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
static void draw_slice(HYuvContext *s, int y){ static void draw_slice(HYuvContext *s, int y){
int h, cy; int h, cy, i;
int offset[4]; int offset[AV_NUM_DATA_POINTERS];
if(s->avctx->draw_horiz_band==NULL) if(s->avctx->draw_horiz_band==NULL)
return; return;
@ -939,7 +939,8 @@ static void draw_slice(HYuvContext *s, int y){
offset[0] = s->picture.linesize[0]*y; offset[0] = s->picture.linesize[0]*y;
offset[1] = s->picture.linesize[1]*cy; offset[1] = s->picture.linesize[1]*cy;
offset[2] = s->picture.linesize[2]*cy; offset[2] = s->picture.linesize[2]*cy;
offset[3] = 0; for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
offset[i] = 0;
emms_c(); emms_c();
s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h); s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);

@ -2329,7 +2329,8 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
if (s->avctx->draw_horiz_band) { if (s->avctx->draw_horiz_band) {
AVFrame *src; AVFrame *src;
int offset[4]; int offset[AV_NUM_DATA_POINTERS];
int i;
if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER)) if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
src= (AVFrame*)s->current_picture_ptr; src= (AVFrame*)s->current_picture_ptr;
@ -2339,15 +2340,14 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
return; return;
if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){ if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
offset[0]= for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
offset[1]= offset[i] = 0;
offset[2]=
offset[3]= 0;
}else{ }else{
offset[0]= y * s->linesize; offset[0]= y * s->linesize;
offset[1]= offset[1]=
offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize; offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
offset[3]= 0; for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
offset[i] = 0;
} }
emms_c(); emms_c();

@ -127,7 +127,10 @@ void avcodec_set_dimensions(AVCodecContext *s, int width, int height){
#define INTERNAL_BUFFER_SIZE (32+1) #define INTERNAL_BUFFER_SIZE (32+1)
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[4]){ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
int linesize_align[AV_NUM_DATA_POINTERS])
{
int i;
int w_align= 1; int w_align= 1;
int h_align= 1; int h_align= 1;
@ -209,10 +212,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int l
*height+=2; // some of the optimized chroma MC reads one line too much *height+=2; // some of the optimized chroma MC reads one line too much
// which is also done in mpeg decoders with lowres > 0 // which is also done in mpeg decoders with lowres > 0
linesize_align[0] = for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
linesize_align[1] = linesize_align[i] = STRIDE_ALIGN;
linesize_align[2] =
linesize_align[3] = STRIDE_ALIGN;
//STRIDE_ALIGN is 8 for SSE* but this does not work for SVQ1 chroma planes //STRIDE_ALIGN is 8 for SSE* but this does not work for SVQ1 chroma planes
//we could change STRIDE_ALIGN to 16 for x86/sse but it would increase the //we could change STRIDE_ALIGN to 16 for x86/sse but it would increase the
//picture size unneccessarily in some cases. The solution here is not //picture size unneccessarily in some cases. The solution here is not
@ -230,7 +231,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int l
void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){
int chroma_shift = av_pix_fmt_descriptors[s->pix_fmt].log2_chroma_w; int chroma_shift = av_pix_fmt_descriptors[s->pix_fmt].log2_chroma_w;
int linesize_align[4]; int linesize_align[AV_NUM_DATA_POINTERS];
int align; int align;
avcodec_align_dimensions2(s, width, height, linesize_align); avcodec_align_dimensions2(s, width, height, linesize_align);
align = FFMAX(linesize_align[0], linesize_align[3]); align = FFMAX(linesize_align[0], linesize_align[3]);
@ -275,7 +276,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
return -1; return -1;
} }
for(i=0; i<4; i++){ for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
av_freep(&buf->base[i]); av_freep(&buf->base[i]);
buf->data[i]= NULL; buf->data[i]= NULL;
} }
@ -290,7 +291,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
int tmpsize; int tmpsize;
int unaligned; int unaligned;
AVPicture picture; AVPicture picture;
int stride_align[4]; int stride_align[AV_NUM_DATA_POINTERS];
const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1; const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
@ -343,6 +344,10 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
else else
buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (pixel_size*EDGE_WIDTH>>h_shift), stride_align[i]); buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (pixel_size*EDGE_WIDTH>>h_shift), stride_align[i]);
} }
for (; i < AV_NUM_DATA_POINTERS; i++) {
buf->base[i] = buf->data[i] = NULL;
buf->linesize[i] = 0;
}
if(size[1] && !size[2]) if(size[1] && !size[2])
ff_set_systematic_pal2((uint32_t*)buf->data[1], s->pix_fmt); ff_set_systematic_pal2((uint32_t*)buf->data[1], s->pix_fmt);
buf->width = s->width; buf->width = s->width;
@ -352,7 +357,7 @@ int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
} }
pic->type= FF_BUFFER_TYPE_INTERNAL; pic->type= FF_BUFFER_TYPE_INTERNAL;
for(i=0; i<4; i++){ for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
pic->base[i]= buf->base[i]; pic->base[i]= buf->base[i];
pic->data[i]= buf->data[i]; pic->data[i]= buf->data[i];
pic->linesize[i]= buf->linesize[i]; pic->linesize[i]= buf->linesize[i];
@ -392,7 +397,7 @@ void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
FFSWAP(InternalBuffer, *buf, *last); FFSWAP(InternalBuffer, *buf, *last);
} }
for(i=0; i<4; i++){ for (i = 0; i < AV_NUM_DATA_POINTERS; i++) {
pic->data[i]=NULL; pic->data[i]=NULL;
// pic->base[i]=NULL; // pic->base[i]=NULL;
} }
@ -426,7 +431,7 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){
* Not internal type and reget_buffer not overridden, emulate cr buffer * Not internal type and reget_buffer not overridden, emulate cr buffer
*/ */
temp_pic = *pic; temp_pic = *pic;
for(i = 0; i < 4; i++) for(i = 0; i < AV_NUM_DATA_POINTERS; i++)
pic->data[i] = pic->base[i] = NULL; pic->data[i] = pic->base[i] = NULL;
pic->opaque = NULL; pic->opaque = NULL;
/* Allocate new frame */ /* Allocate new frame */

@ -21,7 +21,7 @@
#define AVCODEC_VERSION_H #define AVCODEC_VERSION_H
#define LIBAVCODEC_VERSION_MAJOR 53 #define LIBAVCODEC_VERSION_MAJOR 53
#define LIBAVCODEC_VERSION_MINOR 23 #define LIBAVCODEC_VERSION_MINOR 24
#define LIBAVCODEC_VERSION_MICRO 0 #define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
@ -110,6 +110,8 @@
#ifndef FF_API_TIFFENC_COMPLEVEL #ifndef FF_API_TIFFENC_COMPLEVEL
#define FF_API_TIFFENC_COMPLEVEL (LIBAVCODEC_VERSION_MAJOR < 54) #define FF_API_TIFFENC_COMPLEVEL (LIBAVCODEC_VERSION_MAJOR < 54)
#endif #endif
#ifndef FF_API_DATA_POINTERS
#define FF_API_DATA_POINTERS (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
#endif /* AVCODEC_VERSION_H */ #endif /* AVCODEC_VERSION_H */

@ -1331,8 +1331,8 @@ end:
*/ */
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
{ {
int h, cy; int h, cy, i;
int offset[4]; int offset[AV_NUM_DATA_POINTERS];
if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
int y_flipped = s->flipped_image ? s->avctx->height-y : y; int y_flipped = s->flipped_image ? s->avctx->height-y : y;
@ -1358,7 +1358,8 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
offset[0] = s->current_frame.linesize[0]*y; offset[0] = s->current_frame.linesize[0]*y;
offset[1] = s->current_frame.linesize[1]*cy; offset[1] = s->current_frame.linesize[1]*cy;
offset[2] = s->current_frame.linesize[2]*cy; offset[2] = s->current_frame.linesize[2]*cy;
offset[3] = 0; for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
offset[i] = 0;
emms_c(); emms_c();
s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h); s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h);

Loading…
Cancel
Save