|
|
@ -927,23 +927,39 @@ int MPV_encode_init(AVCodecContext *avctx) |
|
|
|
|
|
|
|
|
|
|
|
MPV_encode_defaults(s); |
|
|
|
MPV_encode_defaults(s); |
|
|
|
|
|
|
|
|
|
|
|
if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUV420P){ |
|
|
|
switch (avctx->codec_id) { |
|
|
|
av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n"); |
|
|
|
case CODEC_ID_MPEG2VIDEO: |
|
|
|
return -1; |
|
|
|
if(avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P){ |
|
|
|
} |
|
|
|
av_log(avctx, AV_LOG_ERROR, "only YUV420 and YUV422 are supported\n"); |
|
|
|
|
|
|
|
return -1; |
|
|
|
if(avctx->codec_id == CODEC_ID_MJPEG || avctx->codec_id == CODEC_ID_LJPEG){ |
|
|
|
} |
|
|
|
if(avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL && avctx->pix_fmt != PIX_FMT_YUVJ420P){ |
|
|
|
break; |
|
|
|
|
|
|
|
case CODEC_ID_LJPEG: |
|
|
|
|
|
|
|
case CODEC_ID_MJPEG: |
|
|
|
|
|
|
|
if(avctx->pix_fmt != PIX_FMT_YUVJ420P && (avctx->pix_fmt != PIX_FMT_YUV420P || avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL)){ |
|
|
|
av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n"); |
|
|
|
av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n"); |
|
|
|
return -1; |
|
|
|
return -1; |
|
|
|
} |
|
|
|
} |
|
|
|
}else{ |
|
|
|
break; |
|
|
|
if(avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL && avctx->pix_fmt != PIX_FMT_YUV420P){ |
|
|
|
default: |
|
|
|
av_log(avctx, AV_LOG_ERROR, "colorspace not supported\n"); |
|
|
|
if(avctx->pix_fmt != PIX_FMT_YUV420P){ |
|
|
|
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n"); |
|
|
|
return -1; |
|
|
|
return -1; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
switch (avctx->pix_fmt) { |
|
|
|
|
|
|
|
case PIX_FMT_YUVJ422P: |
|
|
|
|
|
|
|
case PIX_FMT_YUV422P: |
|
|
|
|
|
|
|
s->chroma_format = CHROMA_422; |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
case PIX_FMT_YUVJ420P: |
|
|
|
|
|
|
|
case PIX_FMT_YUV420P: |
|
|
|
|
|
|
|
default: |
|
|
|
|
|
|
|
s->chroma_format = CHROMA_420; |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
s->bit_rate = avctx->bit_rate; |
|
|
|
s->bit_rate = avctx->bit_rate; |
|
|
|
s->width = avctx->width; |
|
|
|
s->width = avctx->width; |
|
|
|
s->height = avctx->height; |
|
|
|
s->height = avctx->height; |
|
|
@ -2466,11 +2482,6 @@ int MPV_encode_picture(AVCodecContext *avctx, |
|
|
|
AVFrame *pic_arg = data; |
|
|
|
AVFrame *pic_arg = data; |
|
|
|
int i, stuffing_count; |
|
|
|
int i, stuffing_count; |
|
|
|
|
|
|
|
|
|
|
|
if(avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUVJ420P){ |
|
|
|
|
|
|
|
av_log(avctx, AV_LOG_ERROR, "this codec supports only YUV420P\n"); |
|
|
|
|
|
|
|
return -1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for(i=0; i<avctx->thread_count; i++){ |
|
|
|
for(i=0; i<avctx->thread_count; i++){ |
|
|
|
int start_y= s->thread_context[i]->start_mb_y; |
|
|
|
int start_y= s->thread_context[i]->start_mb_y; |
|
|
|
int end_y= s->thread_context[i]-> end_mb_y; |
|
|
|
int end_y= s->thread_context[i]-> end_mb_y; |
|
|
@ -3968,8 +3979,17 @@ static always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM bloc |
|
|
|
add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); |
|
|
|
add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); |
|
|
|
|
|
|
|
|
|
|
|
if(!(s->flags&CODEC_FLAG_GRAY)){ |
|
|
|
if(!(s->flags&CODEC_FLAG_GRAY)){ |
|
|
|
add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); |
|
|
|
if (s->chroma_y_shift){ |
|
|
|
add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); |
|
|
|
add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); |
|
|
|
|
|
|
|
add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); |
|
|
|
|
|
|
|
}else{ |
|
|
|
|
|
|
|
dct_linesize >>= 1; |
|
|
|
|
|
|
|
dct_offset >>=1; |
|
|
|
|
|
|
|
add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale); |
|
|
|
|
|
|
|
add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale); |
|
|
|
|
|
|
|
add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale); |
|
|
|
|
|
|
|
add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} else if(s->codec_id != CODEC_ID_WMV2){ |
|
|
|
} else if(s->codec_id != CODEC_ID_WMV2){ |
|
|
|
add_dct(s, block[0], 0, dest_y , dct_linesize); |
|
|
|
add_dct(s, block[0], 0, dest_y , dct_linesize); |
|
|
@ -4011,8 +4031,17 @@ static always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM bloc |
|
|
|
put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); |
|
|
|
put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); |
|
|
|
|
|
|
|
|
|
|
|
if(!(s->flags&CODEC_FLAG_GRAY)){ |
|
|
|
if(!(s->flags&CODEC_FLAG_GRAY)){ |
|
|
|
put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); |
|
|
|
if(s->chroma_y_shift){ |
|
|
|
put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); |
|
|
|
put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); |
|
|
|
|
|
|
|
put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); |
|
|
|
|
|
|
|
}else{ |
|
|
|
|
|
|
|
dct_offset >>=1; |
|
|
|
|
|
|
|
dct_linesize >>=1; |
|
|
|
|
|
|
|
put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale); |
|
|
|
|
|
|
|
put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale); |
|
|
|
|
|
|
|
put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale); |
|
|
|
|
|
|
|
put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
}else{ |
|
|
|
}else{ |
|
|
|
s->dsp.idct_put(dest_y , dct_linesize, block[0]); |
|
|
|
s->dsp.idct_put(dest_y , dct_linesize, block[0]); |
|
|
@ -4234,19 +4263,19 @@ static void get_vissual_weight(int16_t *weight, uint8_t *ptr, int stride){ |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
static always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_count) |
|
|
|
{ |
|
|
|
{ |
|
|
|
int16_t weight[6][64]; |
|
|
|
int16_t weight[8][64]; |
|
|
|
DCTELEM orig[6][64]; |
|
|
|
DCTELEM orig[8][64]; |
|
|
|
const int mb_x= s->mb_x; |
|
|
|
const int mb_x= s->mb_x; |
|
|
|
const int mb_y= s->mb_y; |
|
|
|
const int mb_y= s->mb_y; |
|
|
|
int i; |
|
|
|
int i; |
|
|
|
int skip_dct[6]; |
|
|
|
int skip_dct[8]; |
|
|
|
int dct_offset = s->linesize*8; //default for progressive frames
|
|
|
|
int dct_offset = s->linesize*8; //default for progressive frames
|
|
|
|
uint8_t *ptr_y, *ptr_cb, *ptr_cr; |
|
|
|
uint8_t *ptr_y, *ptr_cb, *ptr_cr; |
|
|
|
int wrap_y, wrap_c; |
|
|
|
int wrap_y, wrap_c; |
|
|
|
|
|
|
|
|
|
|
|
for(i=0; i<6; i++) skip_dct[i]=0; |
|
|
|
for(i=0; i<mb_block_count; i++) skip_dct[i]=0; |
|
|
|
|
|
|
|
|
|
|
|
if(s->adaptive_quant){ |
|
|
|
if(s->adaptive_quant){ |
|
|
|
const int last_qp= s->qscale; |
|
|
|
const int last_qp= s->qscale; |
|
|
@ -4282,16 +4311,16 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
wrap_y = s->linesize; |
|
|
|
wrap_y = s->linesize; |
|
|
|
wrap_c = s->uvlinesize; |
|
|
|
wrap_c = s->uvlinesize; |
|
|
|
ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; |
|
|
|
ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; |
|
|
|
ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8; |
|
|
|
ptr_cb = s->new_picture.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8; |
|
|
|
ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8; |
|
|
|
ptr_cr = s->new_picture.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8; |
|
|
|
|
|
|
|
|
|
|
|
if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ |
|
|
|
if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ |
|
|
|
uint8_t *ebuf= s->edge_emu_buffer + 32; |
|
|
|
uint8_t *ebuf= s->edge_emu_buffer + 32; |
|
|
|
ff_emulated_edge_mc(ebuf , ptr_y , wrap_y,16,16,mb_x*16,mb_y*16, s->width , s->height); |
|
|
|
ff_emulated_edge_mc(ebuf , ptr_y , wrap_y,16,16,mb_x*16,mb_y*16, s->width , s->height); |
|
|
|
ptr_y= ebuf; |
|
|
|
ptr_y= ebuf; |
|
|
|
ff_emulated_edge_mc(ebuf+18*wrap_y , ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); |
|
|
|
ff_emulated_edge_mc(ebuf+18*wrap_y , ptr_cb, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1); |
|
|
|
ptr_cb= ebuf+18*wrap_y; |
|
|
|
ptr_cb= ebuf+18*wrap_y; |
|
|
|
ff_emulated_edge_mc(ebuf+18*wrap_y+8, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); |
|
|
|
ff_emulated_edge_mc(ebuf+18*wrap_y+8, ptr_cr, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1); |
|
|
|
ptr_cr= ebuf+18*wrap_y+8; |
|
|
|
ptr_cr= ebuf+18*wrap_y+8; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -4311,6 +4340,8 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
|
|
|
|
|
|
|
|
dct_offset= wrap_y; |
|
|
|
dct_offset= wrap_y; |
|
|
|
wrap_y<<=1; |
|
|
|
wrap_y<<=1; |
|
|
|
|
|
|
|
if (s->chroma_format == CHROMA_422) |
|
|
|
|
|
|
|
wrap_c<<=1; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
@ -4326,6 +4357,10 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
}else{ |
|
|
|
}else{ |
|
|
|
s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c); |
|
|
|
s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c); |
|
|
|
s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c); |
|
|
|
s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c); |
|
|
|
|
|
|
|
if(!s->chroma_y_shift){ /* 422 */ |
|
|
|
|
|
|
|
s->dsp.get_pixels(s->block[6], ptr_cb + (dct_offset>>1), wrap_c); |
|
|
|
|
|
|
|
s->dsp.get_pixels(s->block[7], ptr_cr + (dct_offset>>1), wrap_c); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
}else{ |
|
|
|
}else{ |
|
|
|
op_pixels_func (*op_pix)[4]; |
|
|
|
op_pixels_func (*op_pix)[4]; |
|
|
@ -4371,6 +4406,8 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
|
|
|
|
|
|
|
|
dct_offset= wrap_y; |
|
|
|
dct_offset= wrap_y; |
|
|
|
wrap_y<<=1; |
|
|
|
wrap_y<<=1; |
|
|
|
|
|
|
|
if (s->chroma_format == CHROMA_422) |
|
|
|
|
|
|
|
wrap_c<<=1; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
@ -4386,6 +4423,10 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
}else{ |
|
|
|
}else{ |
|
|
|
s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c); |
|
|
|
s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c); |
|
|
|
s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); |
|
|
|
s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); |
|
|
|
|
|
|
|
if(!s->chroma_y_shift){ /* 422 */ |
|
|
|
|
|
|
|
s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset>>1), dest_cb + (dct_offset>>1), wrap_c); |
|
|
|
|
|
|
|
s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset>>1), dest_cr + (dct_offset>>1), wrap_c); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
/* pre quantization */ |
|
|
|
/* pre quantization */ |
|
|
|
if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){ |
|
|
|
if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){ |
|
|
@ -4396,6 +4437,10 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1; |
|
|
|
if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1; |
|
|
|
if(s->dsp.sad[1](NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1; |
|
|
|
if(s->dsp.sad[1](NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1; |
|
|
|
if(s->dsp.sad[1](NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1; |
|
|
|
if(s->dsp.sad[1](NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1; |
|
|
|
|
|
|
|
if(!s->chroma_y_shift){ /* 422 */ |
|
|
|
|
|
|
|
if(s->dsp.sad[1](NULL, ptr_cb +(dct_offset>>1), dest_cb +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[6]= 1; |
|
|
|
|
|
|
|
if(s->dsp.sad[1](NULL, ptr_cr +(dct_offset>>1), dest_cr +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[7]= 1; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -4406,13 +4451,17 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
if(!skip_dct[3]) get_vissual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y); |
|
|
|
if(!skip_dct[3]) get_vissual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y); |
|
|
|
if(!skip_dct[4]) get_vissual_weight(weight[4], ptr_cb , wrap_c); |
|
|
|
if(!skip_dct[4]) get_vissual_weight(weight[4], ptr_cb , wrap_c); |
|
|
|
if(!skip_dct[5]) get_vissual_weight(weight[5], ptr_cr , wrap_c); |
|
|
|
if(!skip_dct[5]) get_vissual_weight(weight[5], ptr_cr , wrap_c); |
|
|
|
memcpy(orig[0], s->block[0], sizeof(DCTELEM)*64*6); |
|
|
|
if(!s->chroma_y_shift){ /* 422 */ |
|
|
|
|
|
|
|
if(!skip_dct[6]) get_vissual_weight(weight[6], ptr_cb + (dct_offset>>1), wrap_c); |
|
|
|
|
|
|
|
if(!skip_dct[7]) get_vissual_weight(weight[7], ptr_cr + (dct_offset>>1), wrap_c); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
memcpy(orig[0], s->block[0], sizeof(DCTELEM)*64*mb_block_count); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* DCT & quantize */ |
|
|
|
/* DCT & quantize */ |
|
|
|
assert(s->out_format!=FMT_MJPEG || s->qscale==8); |
|
|
|
assert(s->out_format!=FMT_MJPEG || s->qscale==8); |
|
|
|
{ |
|
|
|
{ |
|
|
|
for(i=0;i<6;i++) { |
|
|
|
for(i=0;i<mb_block_count;i++) { |
|
|
|
if(!skip_dct[i]){ |
|
|
|
if(!skip_dct[i]){ |
|
|
|
int overflow; |
|
|
|
int overflow; |
|
|
|
s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow); |
|
|
|
s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow); |
|
|
@ -4424,7 +4473,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
s->block_last_index[i]= -1; |
|
|
|
s->block_last_index[i]= -1; |
|
|
|
} |
|
|
|
} |
|
|
|
if(s->avctx->quantizer_noise_shaping){ |
|
|
|
if(s->avctx->quantizer_noise_shaping){ |
|
|
|
for(i=0;i<6;i++) { |
|
|
|
for(i=0;i<mb_block_count;i++) { |
|
|
|
if(!skip_dct[i]){ |
|
|
|
if(!skip_dct[i]){ |
|
|
|
s->block_last_index[i] = dct_quantize_refine(s, s->block[i], weight[i], orig[i], i, s->qscale); |
|
|
|
s->block_last_index[i] = dct_quantize_refine(s, s->block[i], weight[i], orig[i], i, s->qscale); |
|
|
|
} |
|
|
|
} |
|
|
@ -4435,11 +4484,11 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
for(i=0; i<4; i++) |
|
|
|
for(i=0; i<4; i++) |
|
|
|
dct_single_coeff_elimination(s, i, s->luma_elim_threshold); |
|
|
|
dct_single_coeff_elimination(s, i, s->luma_elim_threshold); |
|
|
|
if(s->chroma_elim_threshold && !s->mb_intra) |
|
|
|
if(s->chroma_elim_threshold && !s->mb_intra) |
|
|
|
for(i=4; i<6; i++) |
|
|
|
for(i=4; i<mb_block_count; i++) |
|
|
|
dct_single_coeff_elimination(s, i, s->chroma_elim_threshold); |
|
|
|
dct_single_coeff_elimination(s, i, s->chroma_elim_threshold); |
|
|
|
|
|
|
|
|
|
|
|
if(s->flags & CODEC_FLAG_CBP_RD){ |
|
|
|
if(s->flags & CODEC_FLAG_CBP_RD){ |
|
|
|
for(i=0;i<6;i++) { |
|
|
|
for(i=0;i<mb_block_count;i++) { |
|
|
|
if(s->block_last_index[i] == -1) |
|
|
|
if(s->block_last_index[i] == -1) |
|
|
|
s->coded_score[i]= INT_MAX/256; |
|
|
|
s->coded_score[i]= INT_MAX/256; |
|
|
|
} |
|
|
|
} |
|
|
@ -4455,7 +4504,7 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
|
|
|
|
|
|
|
|
//non c quantize code returns incorrect block_last_index FIXME
|
|
|
|
//non c quantize code returns incorrect block_last_index FIXME
|
|
|
|
if(s->alternate_scan && s->dct_quantize != dct_quantize_c){ |
|
|
|
if(s->alternate_scan && s->dct_quantize != dct_quantize_c){ |
|
|
|
for(i=0; i<6; i++){ |
|
|
|
for(i=0; i<mb_block_count; i++){ |
|
|
|
int j; |
|
|
|
int j; |
|
|
|
if(s->block_last_index[i]>0){ |
|
|
|
if(s->block_last_index[i]>0){ |
|
|
|
for(j=63; j>0; j--){ |
|
|
|
for(j=63; j>0; j--){ |
|
|
@ -4496,6 +4545,12 @@ static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y) |
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6); |
|
|
|
|
|
|
|
else encode_mb_internal(s, motion_x, motion_y, 16, 8); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#endif //CONFIG_ENCODERS
|
|
|
|
#endif //CONFIG_ENCODERS
|
|
|
|
|
|
|
|
|
|
|
|
void ff_mpeg_flush(AVCodecContext *avctx){ |
|
|
|
void ff_mpeg_flush(AVCodecContext *avctx){ |
|
|
@ -4605,7 +4660,7 @@ static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext * |
|
|
|
d->tex_pb= s->tex_pb; |
|
|
|
d->tex_pb= s->tex_pb; |
|
|
|
} |
|
|
|
} |
|
|
|
d->block= s->block; |
|
|
|
d->block= s->block; |
|
|
|
for(i=0; i<6; i++) |
|
|
|
for(i=0; i<8; i++) |
|
|
|
d->block_last_index[i]= s->block_last_index[i]; |
|
|
|
d->block_last_index[i]= s->block_last_index[i]; |
|
|
|
d->interlaced_dct= s->interlaced_dct; |
|
|
|
d->interlaced_dct= s->interlaced_dct; |
|
|
|
d->qscale= s->qscale; |
|
|
|
d->qscale= s->qscale; |
|
|
|