mpegvideo: Move various temporary buffers to a separate context

pull/147/head
Vittorio Giovara 10 years ago
parent a6f19d6a9f
commit da0c8664b4
  1. 4
      libavcodec/intrax8.c
  2. 47
      libavcodec/mpegvideo.c
  3. 13
      libavcodec/mpegvideo.h
  4. 14
      libavcodec/mpegvideo_enc.c
  5. 42
      libavcodec/mpegvideo_motion.c
  6. 6
      libavcodec/rv34.c
  7. 40
      libavcodec/vc1_mc.c
  8. 12
      libavcodec/wmv2.c

@ -306,7 +306,7 @@ static int x8_setup_spatial_predictor(IntraX8Context * const w, const int chroma
int sum;
int quant;
w->dsp.setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer,
w->dsp.setup_spatial_compensation(s->dest[chroma], s->sc.edge_emu_buffer,
s->current_picture.f->linesize[chroma>0],
&range, &sum, w->edges);
if(chroma){
@ -642,7 +642,7 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
if(w->flat_dc){
dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.f->linesize[!!chroma]);
}else{
w->dsp.spatial_compensation[w->orient]( s->edge_emu_buffer,
w->dsp.spatial_compensation[w->orient]( s->sc.edge_emu_buffer,
s->dest[chroma],
s->current_picture.f->linesize[!!chroma] );
}

@ -346,25 +346,26 @@ av_cold void ff_mpv_idct_init(MpegEncContext *s)
static int frame_size_alloc(MpegEncContext *s, int linesize)
{
int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
ScratchpadContext *sc = &s->sc;
// edge emu needs blocksize + filter length - 1
// (= 17x17 for halfpel / 21x21 for h264)
// VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
// at uvlinesize. It supports only YUV420 so 24x24 is enough
// linesize * interlaced * MBsize
FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
FF_ALLOCZ_OR_GOTO(s->avctx, sc->edge_emu_buffer, alloc_size * 2 * 24,
fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
fail)
s->me.temp = s->me.scratchpad;
s->rd_scratchpad = s->me.scratchpad;
s->b_scratchpad = s->me.scratchpad;
s->obmc_scratchpad = s->me.scratchpad + 16;
sc->rd_scratchpad = s->me.scratchpad;
sc->b_scratchpad = s->me.scratchpad;
sc->obmc_scratchpad = s->me.scratchpad + 16;
return 0;
fail:
av_freep(&s->edge_emu_buffer);
av_freep(&sc->edge_emu_buffer);
return AVERROR(ENOMEM);
}
@ -439,7 +440,7 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
return -1;
}
if (!s->edge_emu_buffer &&
if (!s->sc.edge_emu_buffer &&
(ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"get_buffer() failed to allocate context scratch buffers.\n");
@ -691,12 +692,12 @@ static int init_duplicate_context(MpegEncContext *s)
int yc_size = y_size + 2 * c_size;
int i;
s->edge_emu_buffer =
s->sc.edge_emu_buffer =
s->me.scratchpad =
s->me.temp =
s->rd_scratchpad =
s->b_scratchpad =
s->obmc_scratchpad = NULL;
s->sc.rd_scratchpad =
s->sc.b_scratchpad =
s->sc.obmc_scratchpad = NULL;
if (s->encoding) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
@ -741,12 +742,12 @@ static void free_duplicate_context(MpegEncContext *s)
if (!s)
return;
av_freep(&s->edge_emu_buffer);
av_freep(&s->sc.edge_emu_buffer);
av_freep(&s->me.scratchpad);
s->me.temp =
s->rd_scratchpad =
s->b_scratchpad =
s->obmc_scratchpad = NULL;
s->sc.rd_scratchpad =
s->sc.b_scratchpad =
s->sc.obmc_scratchpad = NULL;
av_freep(&s->dct_error_sum);
av_freep(&s->me.map);
@ -759,12 +760,12 @@ static void free_duplicate_context(MpegEncContext *s)
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
{
#define COPY(a) bak->a = src->a
COPY(edge_emu_buffer);
COPY(sc.edge_emu_buffer);
COPY(me.scratchpad);
COPY(me.temp);
COPY(rd_scratchpad);
COPY(b_scratchpad);
COPY(obmc_scratchpad);
COPY(sc.rd_scratchpad);
COPY(sc.b_scratchpad);
COPY(sc.obmc_scratchpad);
COPY(me.map);
COPY(me.score_map);
COPY(blocks);
@ -802,7 +803,7 @@ int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
dst->pblocks[4] = dst->pblocks[5];
dst->pblocks[5] = tmp;
}
if (!dst->edge_emu_buffer &&
if (!dst->sc.edge_emu_buffer &&
(ret = frame_size_alloc(dst, dst->linesize)) < 0) {
av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
"scratch buffers.\n");
@ -916,7 +917,7 @@ do {\
}
// linesize dependend scratch buffer allocation
if (!s->edge_emu_buffer)
if (!s->sc.edge_emu_buffer)
if (s1->linesize) {
if (frame_size_alloc(s, s1->linesize) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
@ -2013,9 +2014,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
dest_cb= s->dest[1];
dest_cr= s->dest[2];
}else{
dest_y = s->b_scratchpad;
dest_cb= s->b_scratchpad+16*linesize;
dest_cr= s->b_scratchpad+32*linesize;
dest_y = s->sc.b_scratchpad;
dest_cb= s->sc.b_scratchpad+16*linesize;
dest_cr= s->sc.b_scratchpad+32*linesize;
}
if (!s->mb_intra) {

@ -137,6 +137,13 @@ typedef struct Picture{
int shared;
} Picture;
typedef struct ScratchpadContext {
uint8_t *edge_emu_buffer; ///< temporary buffer for if MVs point to out-of-frame data
uint8_t *rd_scratchpad; ///< scratchpad for rate distortion mb decision
uint8_t *obmc_scratchpad;
uint8_t *b_scratchpad; ///< scratchpad used for writing into write only buffers
} ScratchpadContext;
/**
* MpegEncContext.
*/
@ -260,10 +267,8 @@ typedef struct MpegEncContext {
uint8_t *mbintra_table; ///< used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
uint8_t *cbp_table; ///< used to store cbp, ac_pred for partitioned decoding
uint8_t *pred_dir_table; ///< used to store pred_dir for partitioned decoding
uint8_t *edge_emu_buffer; ///< temporary buffer for if MVs point to out-of-frame data
uint8_t *rd_scratchpad; ///< scratchpad for rate distortion mb decision
uint8_t *obmc_scratchpad;
uint8_t *b_scratchpad; ///< scratchpad used for writing into write only buffers
ScratchpadContext sc;
int qscale; ///< QP
int chroma_qscale; ///< chroma QP

@ -1936,7 +1936,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
(mb_y * mb_block_height * wrap_c) + mb_x * 8;
if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
uint8_t *ebuf = s->edge_emu_buffer + 32;
uint8_t *ebuf = s->sc.edge_emu_buffer + 32;
s->vdsp.emulated_edge_mc(ebuf, ptr_y,
wrap_y, wrap_y,
16, 16, mb_x * 16, mb_y * 16,
@ -2328,9 +2328,9 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
if(*next_block){
memcpy(dest_backup, s->dest, sizeof(s->dest));
s->dest[0] = s->rd_scratchpad;
s->dest[1] = s->rd_scratchpad + 16*s->linesize;
s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
s->dest[0] = s->sc.rd_scratchpad;
s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
assert(s->linesize >= 32); //FIXME
}
@ -2982,9 +2982,9 @@ static int encode_thread(AVCodecContext *c, void *arg){
ff_h263_update_motion_val(s);
if(next_block==0){ //FIXME 16 vs linesize16
s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
}
if(s->avctx->mb_decision == FF_MB_DECISION_BITS)

@ -63,12 +63,12 @@ static void gmc1_motion(MpegEncContext *s,
if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
(unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
linesize, linesize,
17, 17,
src_x, src_y,
s->h_edge_pos, s->v_edge_pos);
ptr = s->edge_emu_buffer;
ptr = s->sc.edge_emu_buffer;
}
if ((motion_x | motion_y) & 7) {
@ -107,12 +107,12 @@ static void gmc1_motion(MpegEncContext *s,
ptr = ref_picture[1] + offset;
if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
(unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
uvlinesize, uvlinesize,
9, 9,
src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr = s->edge_emu_buffer;
ptr = s->sc.edge_emu_buffer;
emu = 1;
}
s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8,
@ -120,12 +120,12 @@ static void gmc1_motion(MpegEncContext *s,
ptr = ref_picture[2] + offset;
if (emu) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
uvlinesize, uvlinesize,
9, 9,
src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr = s->edge_emu_buffer;
ptr = s->sc.edge_emu_buffer;
}
s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8,
motion_x & 15, motion_y & 15, 128 - s->no_rounding);
@ -213,12 +213,12 @@ static inline int hpel_motion(MpegEncContext *s,
if (s->unrestricted_mv) {
if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 8, 0) ||
(unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 1) - 8, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
s->linesize, s->linesize,
9, 9,
src_x, src_y, s->h_edge_pos,
s->v_edge_pos);
src = s->edge_emu_buffer;
src = s->sc.edge_emu_buffer;
emu = 1;
}
}
@ -318,14 +318,14 @@ void mpeg_motion_internal(MpegEncContext *s,
src_y);
return;
}
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
s->linesize, s->linesize,
17, 17 + field_based,
src_x, src_y << field_based,
s->h_edge_pos, s->v_edge_pos);
ptr_y = s->edge_emu_buffer;
ptr_y = s->sc.edge_emu_buffer;
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
uint8_t *uvbuf = s->sc.edge_emu_buffer + 18 * s->linesize;
s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
@ -476,7 +476,7 @@ static inline void obmc_motion(MpegEncContext *s,
if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
ptr[i] = ptr[MID];
} else {
ptr[i] = s->obmc_scratchpad + 8 * (i & 1) +
ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) +
s->linesize * 8 * (i >> 1);
hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
mv[i][0], mv[i][1]);
@ -539,14 +539,14 @@ static inline void qpel_motion(MpegEncContext *s,
if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 16, 0) ||
(unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 3) - h, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
s->linesize, s->linesize,
17, 17 + field_based,
src_x, src_y << field_based,
s->h_edge_pos, s->v_edge_pos);
ptr_y = s->edge_emu_buffer;
ptr_y = s->sc.edge_emu_buffer;
if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
uint8_t *uvbuf = s->sc.edge_emu_buffer + 18 * s->linesize;
s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
@ -622,22 +622,22 @@ static void chroma_4mv_motion(MpegEncContext *s,
ptr = ref_picture[1] + offset;
if ((unsigned)src_x > FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 8, 0) ||
(unsigned)src_y > FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 8, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
s->uvlinesize, s->uvlinesize,
9, 9, src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr = s->edge_emu_buffer;
ptr = s->sc.edge_emu_buffer;
emu = 1;
}
pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
ptr = ref_picture[2] + offset;
if (emu) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
s->uvlinesize, s->uvlinesize,
9, 9, src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr = s->edge_emu_buffer;
ptr = s->sc.edge_emu_buffer;
}
pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
}
@ -780,13 +780,13 @@ static inline void apply_8x8(MpegEncContext *s,
ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 8, 0) ||
(unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 3) - 8, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
s->linesize, s->linesize,
9, 9,
src_x, src_y,
s->h_edge_pos,
s->v_edge_pos);
ptr = s->edge_emu_buffer;
ptr = s->sc.edge_emu_buffer;
}
dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
qpix_op[1][dxy](dest, ptr, s->linesize);

@ -723,14 +723,14 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
(unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
(unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
uint8_t *uvbuf = s->edge_emu_buffer + 22 * s->linesize;
uint8_t *uvbuf = s->sc.edge_emu_buffer + 22 * s->linesize;
srcY -= 2 + 2*s->linesize;
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
s->linesize, s->linesize,
(width << 3) + 6, (height << 3) + 6,
src_x - 2, src_y - 2, s->h_edge_pos, s->v_edge_pos);
srcY = s->edge_emu_buffer + 2 + 2*s->linesize;
srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
s->vdsp.emulated_edge_mc(uvbuf, srcU,
s->uvlinesize,s->uvlinesize,
(width << 2) + 1, (height << 2) + 1,

@ -137,23 +137,23 @@ void ff_vc1_mc_1mv(VC1Context *v, int dir)
/* for grayscale we should not try to read from unknown area */
if (s->avctx->flags & CODEC_FLAG_GRAY) {
srcU = s->edge_emu_buffer + 18 * s->linesize;
srcV = s->edge_emu_buffer + 18 * s->linesize;
srcU = s->sc.edge_emu_buffer + 18 * s->linesize;
srcV = s->sc.edge_emu_buffer + 18 * s->linesize;
}
if (v->rangeredfrm || use_ic
|| s->h_edge_pos < 22 || v_edge_pos < 22
|| (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
|| (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
uint8_t *uvbuf = s->sc.edge_emu_buffer + 19 * s->linesize;
srcY -= s->mspel * (1 + s->linesize);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
s->linesize, s->linesize,
17 + s->mspel * 2, 17 + s->mspel * 2,
src_x - s->mspel, src_y - s->mspel,
s->h_edge_pos, v_edge_pos);
srcY = s->edge_emu_buffer;
srcY = s->sc.edge_emu_buffer;
s->vdsp.emulated_edge_mc(uvbuf, srcU,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1,
@ -395,12 +395,12 @@ void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
|| (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
srcY -= s->mspel * (1 + (s->linesize << fieldmv));
/* check emulate edge stride and offset */
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
s->linesize, s->linesize,
9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
src_x - s->mspel, src_y - (s->mspel << fieldmv),
s->h_edge_pos, v_edge_pos);
srcY = s->edge_emu_buffer;
srcY = s->sc.edge_emu_buffer;
/* if we deal with range reduction we need to scale source blocks */
if (v->rangeredfrm) {
int i, j;
@ -611,16 +611,16 @@ void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
|| s->h_edge_pos < 18 || v_edge_pos < 18
|| (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
|| (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcU,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
s->h_edge_pos >> 1, v_edge_pos >> 1);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16, srcV,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
s->h_edge_pos >> 1, v_edge_pos >> 1);
srcU = s->edge_emu_buffer;
srcV = s->edge_emu_buffer + 16;
srcU = s->sc.edge_emu_buffer;
srcV = s->sc.edge_emu_buffer + 16;
/* if we deal with range reduction we need to scale source blocks */
if (v->rangeredfrm) {
@ -729,16 +729,16 @@ void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
|| s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
|| (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
|| (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcU,
s->uvlinesize, s->uvlinesize,
5, (5 << fieldmv), uvsrc_x, uvsrc_y,
s->h_edge_pos >> 1, v_edge_pos);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16, srcV,
s->uvlinesize, s->uvlinesize,
5, (5 << fieldmv), uvsrc_x, uvsrc_y,
s->h_edge_pos >> 1, v_edge_pos);
srcU = s->edge_emu_buffer;
srcV = s->edge_emu_buffer + 16;
srcU = s->sc.edge_emu_buffer;
srcV = s->sc.edge_emu_buffer + 16;
/* if we deal with intensity compensation we need to scale source blocks */
if (use_ic) {
@ -838,22 +838,22 @@ void ff_vc1_interp_mc(VC1Context *v)
/* for grayscale we should not try to read from unknown area */
if (s->avctx->flags & CODEC_FLAG_GRAY) {
srcU = s->edge_emu_buffer + 18 * s->linesize;
srcV = s->edge_emu_buffer + 18 * s->linesize;
srcU = s->sc.edge_emu_buffer + 18 * s->linesize;
srcV = s->sc.edge_emu_buffer + 18 * s->linesize;
}
if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
|| (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
|| (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
uint8_t *uvbuf = s->sc.edge_emu_buffer + 19 * s->linesize;
srcY -= s->mspel * (1 + s->linesize);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
s->linesize, s->linesize,
17 + s->mspel * 2, 17 + s->mspel * 2,
src_x - s->mspel, src_y - s->mspel,
s->h_edge_pos, v_edge_pos);
srcY = s->edge_emu_buffer;
srcY = s->sc.edge_emu_buffer;
s->vdsp.emulated_edge_mc(uvbuf, srcU,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1,

@ -127,11 +127,11 @@ void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y,
if (src_x < 1 || src_y < 1 || src_x + 17 >= s->h_edge_pos ||
src_y + h + 1 >= v_edge_pos) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr - 1 - s->linesize,
s->linesize, s->linesize, 19, 19,
src_x - 1, src_y - 1,
s->h_edge_pos, s->v_edge_pos);
ptr = s->edge_emu_buffer + 1 + s->linesize;
ptr = s->sc.edge_emu_buffer + 1 + s->linesize;
emu = 1;
}
@ -170,23 +170,23 @@ void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y,
offset = (src_y * uvlinesize) + src_x;
ptr = ref_picture[1] + offset;
if (emu) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
s->uvlinesize, s->uvlinesize,
9, 9,
src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr = s->edge_emu_buffer;
ptr = s->sc.edge_emu_buffer;
}
pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1);
ptr = ref_picture[2] + offset;
if (emu) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
s->uvlinesize, s->uvlinesize,
9, 9,
src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr = s->edge_emu_buffer;
ptr = s->sc.edge_emu_buffer;
}
pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1);
}

Loading…
Cancel
Save