motion estimation pre pass

Originally committed as revision 1389 to svn://svn.ffmpeg.org/ffmpeg/trunk
pull/126/head
Michael Niedermayer 22 years ago
parent 0a13093de8
commit f5fb6b34fd
  1. 11
      libavcodec/avcodec.h
  2. 66
      libavcodec/motion_est.c
  3. 13
      libavcodec/mpegvideo.c
  4. 1
      libavcodec/mpegvideo.h

@ -5,8 +5,8 @@
#define LIBAVCODEC_VERSION_INT 0x000406 #define LIBAVCODEC_VERSION_INT 0x000406
#define LIBAVCODEC_VERSION "0.4.6" #define LIBAVCODEC_VERSION "0.4.6"
#define LIBAVCODEC_BUILD 4649 #define LIBAVCODEC_BUILD 4650
#define LIBAVCODEC_BUILD_STR "4649" #define LIBAVCODEC_BUILD_STR "4650"
enum CodecID { enum CodecID {
CODEC_ID_NONE, CODEC_ID_NONE,
@ -894,6 +894,13 @@ typedef struct AVCodecContext {
* decoding: unused * decoding: unused
*/ */
int last_predictor_count; int last_predictor_count;
/**
* pre pass for motion estimation
* encoding: set by user.
* decoding: unused
*/
int pre_me;
} AVCodecContext; } AVCodecContext;

@ -754,12 +754,10 @@ static inline void get_limits(MpegEncContext *s, int *range, int *xmin, int *ymi
*ymin = -16; *ymin = -16;
if (s->h263_plus) if (s->h263_plus)
*range *= 2; *range *= 2;
if(s->avctx==NULL || s->avctx->codec->id!=CODEC_ID_MPEG4){ if(s->avctx->codec->id!=CODEC_ID_MPEG4){
*xmax = s->mb_width*16; *xmax = s->mb_width*16;
*ymax = s->mb_height*16; *ymax = s->mb_height*16;
}else { }else {
/* XXX: dunno if this is correct but ffmpeg4 decoder wont like it otherwise
(cuz the drawn edge isnt large enough))*/
*xmax = s->width; *xmax = s->width;
*ymax = s->height; *ymax = s->height;
} }
@ -1024,6 +1022,68 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
s->mb_type[mb_y*s->mb_width + mb_x]= mb_type; s->mb_type[mb_y*s->mb_width + mb_x]= mb_type;
} }
int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
int mb_x, int mb_y)
{
int mx, my, range, dmin;
int xmin, ymin, xmax, ymax;
int rel_xmin, rel_ymin, rel_xmax, rel_ymax;
int pred_x=0, pred_y=0;
int P[10][2];
const int shift= 1+s->quarter_sample;
uint16_t * const mv_penalty= s->me.mv_penalty[s->f_code] + MAX_MV;
const int mv_stride= s->mb_width + 2;
const int xy= mb_x + 1 + (mb_y + 1)*mv_stride;
assert(s->quarter_sample==0 || s->quarter_sample==1);
s->me.penalty_factor = get_penalty_factor(s, s->avctx->me_cmp);
get_limits(s, &range, &xmin, &ymin, &xmax, &ymax, s->f_code);
rel_xmin= xmin - mb_x*16;
rel_xmax= xmax - mb_x*16;
rel_ymin= ymin - mb_y*16;
rel_ymax= ymax - mb_y*16;
s->me.skip=0;
P_LEFT[0] = s->p_mv_table[xy + 1][0];
P_LEFT[1] = s->p_mv_table[xy + 1][1];
if(P_LEFT[0] < (rel_xmin<<shift)) P_LEFT[0] = (rel_xmin<<shift);
/* special case for first line */
if (mb_y == s->mb_height-1) {
pred_x= P_LEFT[0];
pred_y= P_LEFT[1];
} else {
P_TOP[0] = s->p_mv_table[xy + mv_stride ][0];
P_TOP[1] = s->p_mv_table[xy + mv_stride ][1];
P_TOPRIGHT[0] = s->p_mv_table[xy + mv_stride - 1][0];
P_TOPRIGHT[1] = s->p_mv_table[xy + mv_stride - 1][1];
if(P_TOP[1] < (rel_ymin<<shift)) P_TOP[1] = (rel_ymin<<shift);
if(P_TOPRIGHT[0] > (rel_xmax<<shift)) P_TOPRIGHT[0]= (rel_xmax<<shift);
if(P_TOPRIGHT[1] < (rel_ymin<<shift)) P_TOPRIGHT[1]= (rel_ymin<<shift);
P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]);
P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]);
if(s->out_format == FMT_H263){
pred_x = P_MEDIAN[0];
pred_y = P_MEDIAN[1];
}else { /* mpeg1 at least */
pred_x= P_LEFT[0];
pred_y= P_LEFT[1];
}
}
dmin = s->me.motion_search[0](s, 0, &mx, &my, P, pred_x, pred_y, rel_xmin, rel_ymin, rel_xmax, rel_ymax,
&s->last_picture, s->p_mv_table, (1<<16)>>shift, mv_penalty);
s->p_mv_table[xy][0] = mx<<shift;
s->p_mv_table[xy][1] = my<<shift;
return dmin;
}
int ff_estimate_motion_b(MpegEncContext * s, int ff_estimate_motion_b(MpegEncContext * s,
int mb_x, int mb_y, int16_t (*mv_table)[2], Picture *picture, int f_code) int mb_x, int mb_y, int16_t (*mv_table)[2], Picture *picture, int f_code)
{ {

@ -2789,6 +2789,19 @@ static void encode_picture(MpegEncContext *s, int picture_number)
/* Estimate motion for every MB */ /* Estimate motion for every MB */
if(s->pict_type != I_TYPE){ if(s->pict_type != I_TYPE){
if(s->pict_type != B_TYPE){
if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
s->mb_x = mb_x;
s->mb_y = mb_y;
ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
}
}
}
}
for(mb_y=0; mb_y < s->mb_height; mb_y++) { for(mb_y=0; mb_y < s->mb_height; mb_y++) {
s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1; s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1); s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);

@ -620,6 +620,7 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type);
void ff_fix_long_p_mvs(MpegEncContext * s); void ff_fix_long_p_mvs(MpegEncContext * s);
void ff_fix_long_b_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, int type); void ff_fix_long_b_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, int type);
void ff_init_me(MpegEncContext *s); void ff_init_me(MpegEncContext *s);
int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y);
/* mpeg12.c */ /* mpeg12.c */

Loading…
Cancel
Save