lavc: Edge emulation with dst/src linesize

Allow supporting files for which the image stride is smaller than
the maximum block size + number of subpel mc taps, e.g. a 64x64 VP9
file or a 16x16 VP8 file with -fflags +emu_edge.
pull/48/head
Ronald S. Bultje 11 years ago committed by Luca Barbato
parent 3cbe112653
commit 458446acfa
  1. 7
      libavcodec/cavs.c
  2. 12
      libavcodec/h264.c
  3. 9
      libavcodec/hevc.c
  4. 16
      libavcodec/mpegvideo_enc.c
  5. 30
      libavcodec/mpegvideo_motion.c
  6. 12
      libavcodec/rv34.c
  7. 6
      libavcodec/svq3.c
  8. 37
      libavcodec/vc1dec.c
  9. 10
      libavcodec/videodsp.h
  10. 23
      libavcodec/videodsp_template.c
  11. 6
      libavcodec/vp3.c
  12. 3
      libavcodec/vp56.c
  13. 12
      libavcodec/vp8.c
  14. 22
      libavcodec/wmv2.c
  15. 840
      libavcodec/x86/videodsp.asm
  16. 190
      libavcodec/x86/videodsp_init.c

@ -408,7 +408,8 @@ static inline void mc_dir_part(AVSContext *h, AVFrame *pic, int chroma_height,
full_mx + 16 /* FIXME */ > pic_width + extra_width ||
full_my + 16 /* FIXME */ > pic_height + extra_height) {
h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
src_y - 2 - 2 * h->l_stride, h->l_stride,
src_y - 2 - 2 * h->l_stride,
h->l_stride, h->l_stride,
16 + 5, 16 + 5 /* FIXME */,
full_mx - 2, full_my - 2,
pic_width, pic_height);
@ -421,7 +422,7 @@ static inline void mc_dir_part(AVSContext *h, AVFrame *pic, int chroma_height,
if (emu) {
h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb,
h->c_stride,
h->c_stride, h->c_stride,
9, 9 /* FIXME */,
mx >> 3, my >> 3,
pic_width >> 1, pic_height >> 1);
@ -431,7 +432,7 @@ static inline void mc_dir_part(AVSContext *h, AVFrame *pic, int chroma_height,
if (emu) {
h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr,
h->c_stride,
h->c_stride, h->c_stride,
9, 9 /* FIXME */,
mx >> 3, my >> 3,
pic_width >> 1, pic_height >> 1);

@ -899,7 +899,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic,
full_my + 16 /*FIXME*/ > pic_height + extra_height) {
h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
src_y - (2 << pixel_shift) - 2 * h->mb_linesize,
h->mb_linesize,
h->mb_linesize, h->mb_linesize,
16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
full_my - 2, pic_width, pic_height);
src_y = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
@ -918,7 +918,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic,
if (emu) {
h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
src_cb - (2 << pixel_shift) - 2 * h->mb_linesize,
h->mb_linesize,
h->mb_linesize, h->mb_linesize,
16 + 5, 16 + 5 /*FIXME*/,
full_mx - 2, full_my - 2,
pic_width, pic_height);
@ -932,7 +932,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic,
if (emu) {
h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
src_cr - (2 << pixel_shift) - 2 * h->mb_linesize,
h->mb_linesize,
h->mb_linesize, h->mb_linesize,
16 + 5, 16 + 5 /*FIXME*/,
full_mx - 2, full_my - 2,
pic_width, pic_height);
@ -957,7 +957,8 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic,
(my >> ysh) * h->mb_uvlinesize;
if (emu) {
h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb, h->mb_uvlinesize,
h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb,
h->mb_uvlinesize, h->mb_uvlinesize,
9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
src_cb = h->edge_emu_buffer;
@ -967,7 +968,8 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic,
mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
if (emu) {
h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr, h->mb_uvlinesize,
h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr,
h->mb_uvlinesize, h->mb_uvlinesize,
9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
src_cr = h->edge_emu_buffer;

@ -1481,7 +1481,8 @@ static void luma_mc(HEVCContext *s, int16_t *dst, ptrdiff_t dststride,
y_off >= pic_height - block_h - ff_hevc_qpel_extra_after[my]) {
int offset = extra_top * srcstride + (extra_left << s->sps->pixel_shift);
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset, srcstride,
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
srcstride, srcstride,
block_w + ff_hevc_qpel_extra[mx],
block_h + ff_hevc_qpel_extra[my],
x_off - extra_left, y_off - extra_top,
@ -1532,7 +1533,8 @@ static void chroma_mc(HEVCContext *s, int16_t *dst1, int16_t *dst2,
int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->sps->pixel_shift));
int offset2 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->sps->pixel_shift));
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1, src1stride,
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
src1stride, src1stride,
block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
x_off - EPEL_EXTRA_BEFORE,
y_off - EPEL_EXTRA_BEFORE,
@ -1542,7 +1544,8 @@ static void chroma_mc(HEVCContext *s, int16_t *dst1, int16_t *dst2,
s->hevcdsp.put_hevc_epel[!!my][!!mx](dst1, dststride, src1, src1stride,
block_w, block_h, mx, my, lc->mc_buffer);
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src2 - offset2, src2stride,
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src2 - offset2,
src2stride, src2stride,
block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
x_off - EPEL_EXTRA_BEFORE,
y_off - EPEL_EXTRA_BEFORE,

@ -1699,15 +1699,19 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
uint8_t *ebuf = s->edge_emu_buffer + 32;
s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
mb_y * 16, s->width, s->height);
s->vdsp.emulated_edge_mc(ebuf, ptr_y,
wrap_y, wrap_y,
16, 16, mb_x * 16, mb_y * 16,
s->width, s->height);
ptr_y = ebuf;
s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
mb_block_height, mb_x * 8, mb_y * 8,
s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
wrap_c, wrap_c,
8, mb_block_height, mb_x * 8, mb_y * 8,
s->width >> 1, s->height >> 1);
ptr_cb = ebuf + 18 * wrap_y;
s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
mb_block_height, mb_x * 8, mb_y * 8,
s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
wrap_c, wrap_c,
8, mb_block_height, mb_x * 8, mb_y * 8,
s->width >> 1, s->height >> 1);
ptr_cr = ebuf + 18 * wrap_y + 8;
}

@ -63,7 +63,7 @@ static void gmc1_motion(MpegEncContext *s,
if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) ||
(unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
linesize,
linesize, linesize,
17, 17,
src_x, src_y,
s->h_edge_pos, s->v_edge_pos);
@ -109,7 +109,7 @@ static void gmc1_motion(MpegEncContext *s,
if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) ||
(unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
uvlinesize,
uvlinesize, uvlinesize,
9, 9,
src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
@ -123,7 +123,7 @@ static void gmc1_motion(MpegEncContext *s,
ptr = ref_picture[2] + offset;
if (emu) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
uvlinesize,
uvlinesize, uvlinesize,
9, 9,
src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
@ -216,7 +216,7 @@ static inline int hpel_motion(MpegEncContext *s,
if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 8, 0) ||
(unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 1) - 8, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
s->linesize,
s->linesize, s->linesize,
9, 9,
src_x, src_y, s->h_edge_pos,
s->v_edge_pos);
@ -321,7 +321,7 @@ void mpeg_motion_internal(MpegEncContext *s,
return;
}
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
s->linesize,
s->linesize, s->linesize,
17, 17 + field_based,
src_x, src_y << field_based,
s->h_edge_pos, s->v_edge_pos);
@ -329,12 +329,12 @@ void mpeg_motion_internal(MpegEncContext *s,
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
s->uvlinesize,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
uvsrc_x, uvsrc_y << field_based,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
s->uvlinesize,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
uvsrc_x, uvsrc_y << field_based,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
@ -542,7 +542,7 @@ static inline void qpel_motion(MpegEncContext *s,
if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 16, 0) ||
(unsigned)src_y > FFMAX(v_edge_pos - (motion_y & 3) - h, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
s->linesize,
s->linesize, s->linesize,
17, 17 + field_based,
src_x, src_y << field_based,
s->h_edge_pos, s->v_edge_pos);
@ -550,12 +550,12 @@ static inline void qpel_motion(MpegEncContext *s,
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
s->uvlinesize,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
uvsrc_x, uvsrc_y << field_based,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
s->uvlinesize,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
uvsrc_x, uvsrc_y << field_based,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
@ -625,8 +625,8 @@ static void chroma_4mv_motion(MpegEncContext *s,
if (s->flags & CODEC_FLAG_EMU_EDGE) {
if ((unsigned)src_x > FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 8, 0) ||
(unsigned)src_y > FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 8, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
ptr, s->uvlinesize,
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->uvlinesize, s->uvlinesize,
9, 9, src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr = s->edge_emu_buffer;
@ -637,8 +637,8 @@ static void chroma_4mv_motion(MpegEncContext *s,
ptr = ref_picture[2] + offset;
if (emu) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
ptr, s->uvlinesize,
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->uvlinesize, s->uvlinesize,
9, 9, src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr = s->edge_emu_buffer;
@ -786,7 +786,7 @@ static inline void apply_8x8(MpegEncContext *s,
if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 3) - 8, 0) ||
(unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 3) - 8, 0)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->linesize,
s->linesize, s->linesize,
9, 9,
src_x, src_y,
s->h_edge_pos,

@ -723,12 +723,18 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
uint8_t *uvbuf = s->edge_emu_buffer + 22 * s->linesize;
srcY -= 2 + 2*s->linesize;
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, (width<<3)+6, (height<<3)+6,
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
s->linesize, s->linesize,
(width << 3) + 6, (height << 3) + 6,
src_x - 2, src_y - 2, s->h_edge_pos, s->v_edge_pos);
srcY = s->edge_emu_buffer + 2 + 2*s->linesize;
s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, (width<<2)+1, (height<<2)+1,
s->vdsp.emulated_edge_mc(uvbuf, srcU,
s->uvlinesize,s->uvlinesize,
(width << 2) + 1, (height << 2) + 1,
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, (width<<2)+1, (height<<2)+1,
s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
s->uvlinesize, s->uvlinesize,
(width << 2) + 1, (height << 2) + 1,
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
srcU = uvbuf;
srcV = uvbuf + 16;

@ -312,7 +312,8 @@ static inline void svq3_mc_dir_part(SVQ3Context *s,
src = pic->f.data[0] + mx + my * h->linesize;
if (emu) {
h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->linesize,
h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
h->linesize, h->linesize,
width + 1, height + 1,
mx, my, s->h_edge_pos, s->v_edge_pos);
src = h->edge_emu_buffer;
@ -338,7 +339,8 @@ static inline void svq3_mc_dir_part(SVQ3Context *s,
src = pic->f.data[i] + mx + my * h->uvlinesize;
if (emu) {
h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src, h->uvlinesize,
h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src,
h->uvlinesize, h->uvlinesize,
width + 1, height + 1,
mx, my, (s->h_edge_pos >> 1),
s->v_edge_pos >> 1);

@ -452,14 +452,19 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
srcY -= s->mspel * (1 + s->linesize);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
s->linesize, s->linesize,
17 + s->mspel * 2, 17 + s->mspel * 2,
src_x - s->mspel, src_y - s->mspel,
s->h_edge_pos, v_edge_pos);
srcY = s->edge_emu_buffer;
s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
s->vdsp.emulated_edge_mc(uvbuf, srcU,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1,
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1,
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
srcU = uvbuf;
srcV = uvbuf + 16;
@ -693,7 +698,8 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
|| (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
srcY -= s->mspel * (1 + (s->linesize << fieldmv));
/* check emulate edge stride and offset */
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
s->linesize, s->linesize,
9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
src_x - s->mspel, src_y - (s->mspel << fieldmv),
s->h_edge_pos, v_edge_pos);
@ -908,10 +914,12 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
|| s->h_edge_pos < 18 || v_edge_pos < 18
|| (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
|| (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize,
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
s->h_edge_pos >> 1, v_edge_pos >> 1);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
s->h_edge_pos >> 1, v_edge_pos >> 1);
srcU = s->edge_emu_buffer;
@ -1024,10 +1032,12 @@ static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
|| s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
|| (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
|| (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize,
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
s->uvlinesize, s->uvlinesize,
5, (5 << fieldmv), uvsrc_x, uvsrc_y,
s->h_edge_pos >> 1, v_edge_pos);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
s->uvlinesize, s->uvlinesize,
5, (5 << fieldmv), uvsrc_x, uvsrc_y,
s->h_edge_pos >> 1, v_edge_pos);
srcU = s->edge_emu_buffer;
@ -1955,14 +1965,19 @@ static void vc1_interp_mc(VC1Context *v)
uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
srcY -= s->mspel * (1 + s->linesize);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize,
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
s->linesize, s->linesize,
17 + s->mspel * 2, 17 + s->mspel * 2,
src_x - s->mspel, src_y - s->mspel,
s->h_edge_pos, v_edge_pos);
srcY = s->edge_emu_buffer;
s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
s->vdsp.emulated_edge_mc(uvbuf, srcU,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1,
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
s->uvlinesize, s->uvlinesize,
8 + 1, 8 + 1,
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
srcU = uvbuf;
srcV = uvbuf + 16;

@ -36,8 +36,10 @@ typedef struct VideoDSPContext {
*
* @param buf destination buffer
* @param src source buffer
* @param linesize number of bytes between 2 vertically adjacent samples
* in both the source and destination buffers
* @param buf_linesize number of bytes between 2 vertically adjacent
* samples in the destination buffer
* @param src_linesize number of bytes between 2 vertically adjacent
* samples in both the source buffer
* @param block_w width of block
* @param block_h height of block
* @param src_x x coordinate of the top left sample of the block in the
@ -48,7 +50,9 @@ typedef struct VideoDSPContext {
* @param h height of the source buffer
*/
void (*emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
ptrdiff_t linesize, int block_w, int block_h,
ptrdiff_t buf_linesize,
ptrdiff_t src_linesize,
int block_w, int block_h,
int src_x, int src_y, int w, int h);
/**

@ -22,7 +22,8 @@
#include "bit_depth_template.c"
static void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
ptrdiff_t linesize,
ptrdiff_t buf_linesize,
ptrdiff_t src_linesize,
int block_w, int block_h,
int src_x, int src_y, int w, int h)
{
@ -30,10 +31,10 @@ static void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
int start_y, start_x, end_y, end_x;
if (src_y >= h) {
src += (h - 1 - src_y) * linesize;
src += (h - 1 - src_y) * src_linesize;
src_y = h - 1;
} else if (src_y <= -block_h) {
src += (1 - block_h - src_y) * linesize;
src += (1 - block_h - src_y) * src_linesize;
src_y = 1 - block_h;
}
if (src_x >= w) {
@ -52,30 +53,30 @@ static void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
assert(start_x < end_x && block_w);
w = end_x - start_x;
src += start_y * linesize + start_x * sizeof(pixel);
src += start_y * src_linesize + start_x * sizeof(pixel);
buf += start_x * sizeof(pixel);
// top
for (y = 0; y < start_y; y++) {
memcpy(buf, src, w * sizeof(pixel));
buf += linesize;
buf += buf_linesize;
}
// copy existing part
for (; y < end_y; y++) {
memcpy(buf, src, w * sizeof(pixel));
src += linesize;
buf += linesize;
src += src_linesize;
buf += buf_linesize;
}
// bottom
src -= linesize;
src -= src_linesize;
for (; y < block_h; y++) {
memcpy(buf, src, w * sizeof(pixel));
buf += linesize;
buf += buf_linesize;
}
buf -= block_h * linesize + start_x * sizeof(pixel);
buf -= block_h * buf_linesize + start_x * sizeof(pixel);
while (block_h--) {
pixel *bufp = (pixel *) buf;
@ -88,6 +89,6 @@ static void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
for (x = end_x; x < block_w; x++) {
bufp[x] = bufp[end_x - 1];
}
buf += linesize;
buf += buf_linesize;
}
}

@ -1543,7 +1543,11 @@ static void render_slice(Vp3DecodeContext *s, int slice)
uint8_t *temp= s->edge_emu_buffer;
if(stride<0) temp -= 8*stride;
s->vdsp.emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height);
s->vdsp.emulated_edge_mc(temp, motion_source,
stride, stride,
9, 9, src_x, src_y,
plane_width,
plane_height);
motion_source= temp;
}
}

@ -341,7 +341,8 @@ static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src,
y<0 || y+12>=s->plane_height[plane]) {
s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
src + s->block_offset[b] + (dy-2)*stride + (dx-2),
stride, 12, 12, x, y,
stride, stride,
12, 12, x, y,
s->plane_width[plane],
s->plane_height[plane]);
src_block = s->edge_emu_buffer;

@ -1196,7 +1196,9 @@ void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst,
src += y_off * linesize + x_off;
if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
s->vdsp.emulated_edge_mc(td->edge_emu_buffer, src - my_idx * linesize - mx_idx, linesize,
s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
src - my_idx * linesize - mx_idx,
linesize, linesize,
block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
x_off - mx_idx, y_off - my_idx, width, height);
src = td->edge_emu_buffer + mx_idx + linesize * my_idx;
@ -1246,13 +1248,17 @@ void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst
ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
s->vdsp.emulated_edge_mc(td->edge_emu_buffer, src1 - my_idx * linesize - mx_idx, linesize,
s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
src1 - my_idx * linesize - mx_idx,
linesize, linesize,
block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
x_off - mx_idx, y_off - my_idx, width, height);
src1 = td->edge_emu_buffer + mx_idx + linesize * my_idx;
mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
s->vdsp.emulated_edge_mc(td->edge_emu_buffer, src2 - my_idx * linesize - mx_idx, linesize,
s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
src2 - my_idx * linesize - mx_idx,
linesize, linesize,
block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
x_off - mx_idx, y_off - my_idx, width, height);
src2 = td->edge_emu_buffer + mx_idx + linesize * my_idx;

@ -120,8 +120,12 @@ void ff_mspel_motion(MpegEncContext *s,
if(s->flags&CODEC_FLAG_EMU_EDGE){
if(src_x<1 || src_y<1 || src_x + 17 >= s->h_edge_pos
|| src_y + h+1 >= v_edge_pos){
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr - 1 - s->linesize, s->linesize, 19, 19,
src_x-1, src_y-1, s->h_edge_pos, s->v_edge_pos);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
ptr - 1 - s->linesize,
s->linesize, s->linesize,
19, 19,
src_x - 1, src_y - 1,
s->h_edge_pos, s->v_edge_pos);
ptr= s->edge_emu_buffer + 1 + s->linesize;
emu=1;
}
@ -161,16 +165,22 @@ void ff_mspel_motion(MpegEncContext *s,
offset = (src_y * uvlinesize) + src_x;
ptr = ref_picture[1] + offset;
if(emu){
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->uvlinesize, s->uvlinesize,
9, 9,
src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr= s->edge_emu_buffer;
}
pix_op[1][dxy](dest_cb, ptr, uvlinesize, h >> 1);
ptr = ref_picture[2] + offset;
if(emu){
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
s->uvlinesize, s->uvlinesize,
9, 9,
src_x, src_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
ptr= s->edge_emu_buffer;
}
pix_op[1][dxy](dest_cr, ptr, uvlinesize, h >> 1);

@ -23,576 +23,394 @@
SECTION .text
; void ff_emu_edge_core(uint8_t *buf, const uint8_t *src, x86_reg linesize,
; x86_reg start_y, x86_reg end_y, x86_reg block_h,
; x86_reg start_x, x86_reg end_x, x86_reg block_w);
;
; The actual function itself is below. It basically wraps a very simple
; w = end_x - start_x
; if (w) {
; if (w > 22) {
; jump to the slow loop functions
; } else {
; jump to the fast loop functions
; }
; }
;
; ... and then the same for left/right extend also. See below for loop
; function implementations. Fast are fixed-width, slow is variable-width
%macro EMU_EDGE_FUNC 0
%if ARCH_X86_64
%define w_reg r7
cglobal emu_edge_core, 6, 9, 1
mov r8, r5 ; save block_h
%else
%define w_reg r6
cglobal emu_edge_core, 2, 7, 0
mov r4, r4m ; end_y
mov r5, r5m ; block_h
%endif
; start with vertical extend (top/bottom) and body pixel copy
mov w_reg, r7m
sub w_reg, r6m ; w = start_x - end_x
sub r5, r4
%if ARCH_X86_64
sub r4, r3
%else
sub r4, dword r3m
%endif
cmp w_reg, 22
jg .slow_v_extend_loop
%if ARCH_X86_32
mov r2, r2m ; linesize
%endif
sal w_reg, 7 ; w * 128
%ifdef PIC
lea rax, [.emuedge_v_extend_1 - (.emuedge_v_extend_2 - .emuedge_v_extend_1)]
add w_reg, rax
%else
lea w_reg, [.emuedge_v_extend_1 - (.emuedge_v_extend_2 - .emuedge_v_extend_1)+w_reg]
%endif
call w_reg ; fast top extend, body copy and bottom extend
.v_extend_end:
; slow vertical extension loop function. Works with variable-width, and
; does per-line reading/writing of source data
%macro V_COPY_ROW 2 ; type (top/body/bottom), h
.%1_y_loop: ; do {
mov wq, r7mp ; initialize w (r7mp = wmp)
.%1_x_loop: ; do {
movu m0, [srcq+wq] ; m0 = read($mmsize)
movu [dstq+wq], m0 ; write(m0, $mmsize)
add wq, mmsize ; w -= $mmsize
cmp wq, -mmsize ; } while (w > $mmsize);
jl .%1_x_loop
movu m0, [srcq-mmsize] ; m0 = read($mmsize)
movu [dstq-mmsize], m0 ; write(m0, $mmsize)
%ifidn %1, body ; if ($type == body) {
add srcq, src_strideq ; src += src_stride
%endif ; }
add dstq, dst_strideq ; dst += dst_stride
dec %2 ; } while (--$h);
jnz .%1_y_loop
%endmacro
; horizontal extend (left/right)
mov w_reg, r6m ; start_x
sub r0, w_reg
%macro vvar_fn 0
; .----. <- zero
; | | <- top is copied from first line in body of source
; |----| <- start_y
; | | <- body is copied verbatim (line-by-line) from source
; |----| <- end_y
; | | <- bottom is copied from last line in body of source
; '----' <- bh
%if ARCH_X86_64
mov r3, r0 ; backup of buf+block_h*linesize
mov r5, r8
%else
mov r0m, r0 ; backup of buf+block_h*linesize
mov r5, r5m
cglobal emu_edge_vvar, 7, 8, 1, dst, src, dst_stride, src_stride, \
start_y, end_y, bh, w
%else ; x86-32
cglobal emu_edge_vvar, 1, 6, 1, dst, src, start_y, end_y, bh, w
%define src_strideq r3mp
%define dst_strideq r2mp
mov srcq, r1mp
mov start_yq, r4mp
mov end_yq, r5mp
mov bhq, r6mp
%endif
test w_reg, w_reg
jz .right_extend
cmp w_reg, 22
jg .slow_left_extend_loop
mov r1, w_reg
dec w_reg
; FIXME we can do a if size == 1 here if that makes any speed difference, test me
sar w_reg, 1
sal w_reg, 6
; r0=buf+block_h*linesize,r7(64)/r6(32)=start_x offset for funcs
; r6(rax)/r3(ebx)=val,r2=linesize,r1=start_x,r5=block_h
%ifdef PIC
lea rax, [.emuedge_extend_left_2]
add w_reg, rax
%else
lea w_reg, [.emuedge_extend_left_2+w_reg]
%endif
call w_reg
sub bhq, end_yq ; bh -= end_q
sub end_yq, start_yq ; end_q -= start_q
add srcq, r7mp ; (r7mp = wmp)
add dstq, r7mp ; (r7mp = wmp)
neg r7mp ; (r7mp = wmp)
test start_yq, start_yq ; if (start_q) {
jz .body
V_COPY_ROW top, start_yq ; v_copy_row(top, start_yq)
.body: ; }
V_COPY_ROW body, end_yq ; v_copy_row(body, end_yq)
test bhq, bhq ; if (bh) {
jz .end
sub srcq, src_strideq ; src -= src_stride
V_COPY_ROW bottom, bhq ; v_copy_row(bottom, bh)
.end: ; }
RET
%endmacro
; now r3(64)/r0(32)=buf,r2=linesize,r8/r5=block_h,r6/r3=val, r7/r6=end_x, r1=block_w
.right_extend:
%if ARCH_X86_32
mov r0, r0m
mov r5, r5m
INIT_MMX mmx
vvar_fn
%endif
mov w_reg, r7m ; end_x
mov r1, r8m ; block_w
mov r4, r1
sub r1, w_reg
jz .h_extend_end ; if (end_x == block_w) goto h_extend_end
cmp r1, 22
jg .slow_right_extend_loop
dec r1
; FIXME we can do a if size == 1 here if that makes any speed difference, test me
sar r1, 1
sal r1, 6
%ifdef PIC
lea rax, [.emuedge_extend_right_2]
add r1, rax
%else
lea r1, [.emuedge_extend_right_2+r1]
%endif
call r1
.h_extend_end:
INIT_XMM sse
vvar_fn
%macro hvar_fn 0
cglobal emu_edge_hvar, 5, 6, 1, dst, dst_stride, start_x, n_words, h, w
lea dstq, [dstq+n_wordsq*2]
neg n_wordsq
lea start_xq, [start_xq+n_wordsq*2]
.y_loop: ; do {
; FIXME also write a ssse3 version using pshufb
movzx wd, byte [dstq+start_xq] ; w = read(1)
imul wd, 0x01010101 ; w *= 0x01010101
movd m0, wd
mov wq, n_wordsq ; initialize w
%if cpuflag(sse)
shufps m0, m0, q0000 ; splat
%else ; mmx
punpckldq m0, m0 ; splat
%endif ; mmx/sse
.x_loop: ; do {
movu [dstq+wq*2], m0 ; write($reg, $mmsize)
add wq, mmsize/2 ; w -= $mmsize/2
cmp wq, -mmsize/2 ; } while (w > $mmsize/2)
jl .x_loop
movu [dstq-mmsize], m0 ; write($reg, $mmsize)
add dstq, dst_strideq ; dst += dst_stride
dec hq ; } while (h--)
jnz .y_loop
RET
%endmacro
%if ARCH_X86_64
%define vall al
%define valh ah
%define valw ax
%define valw2 r7w
%define valw3 r3w
%if WIN64
%define valw4 r7w
%else ; unix64
%define valw4 r3w
%endif
%define vald eax
%else
%define vall bl
%define valh bh
%define valw bx
%define valw2 r6w
%define valw3 valw2
%define valw4 valw3
%define vald ebx
%define stack_offset 0x14
%if ARCH_X86_32
INIT_MMX mmx
hvar_fn
%endif
%endmacro
INIT_XMM sse
hvar_fn
; macro to read/write a horizontal number of pixels (%2) to/from registers
; on x86-64, - fills xmm0-15 for consecutive sets of 16 pixels
; - if (%2 & 15 == 8) fills the last 8 bytes into rax
; - else if (%2 & 8) fills 8 bytes into mm0
; - if (%2 & 7 == 4) fills the last 4 bytes into rax
; - else if (%2 & 4) fills 4 bytes into mm0-1
; - if (%2 & 3 == 3) fills 2 bytes into r7/r3, and 1 into eax
; (note that we're using r3 for body/bottom because it's a shorter
; opcode, and then the loop fits in 128 bytes)
; - else fills remaining bytes into rax
; on x86-32, - fills mm0-7 for consecutive sets of 8 pixels
; - if (%2 & 7 == 4) fills 4 bytes into ebx
; - else if (%2 & 4) fills 4 bytes into mm0-7
; - if (%2 & 3 == 3) fills 2 bytes into r6, and 1 into ebx
; - else fills remaining bytes into ebx
; on sse, - fills xmm0-15 for consecutive sets of 16 pixels
; - if (%2 & 8) fills 8 bytes into xmm$next
; - if (%2 & 4) fills 4 bytes into xmm$next
; - if (%2 & 3) fills 1, 2 or 4 bytes in eax
; on mmx, - fills mm0-7 for consecutive sets of 8 pixels
; - if (%2 & 4) fills 4 bytes into mm$next
; - if (%2 & 3) fills 1, 2 or 4 bytes in eax
; writing data out is in the same way
%macro READ_NUM_BYTES 2
%assign %%src_off 0 ; offset in source buffer
%assign %%smidx 0 ; mmx register idx
%assign %%sxidx 0 ; xmm register idx
%if cpuflag(sse)
%rep %2/16
movups xmm %+ %%sxidx, [r1+%%src_off]
%assign %%src_off %%src_off+16
%assign %%sxidx %%sxidx+1
%endrep ; %2/16
%assign %%off 0 ; offset in source buffer
%assign %%idx 0 ; mmx/xmm register index
%rep %2/mmsize
movu m %+ %%idx, [srcq+%%off]
%assign %%off %%off+mmsize
%assign %%idx %%idx+1
%endrep ; %2/mmsize
%if mmsize == 16
%if (%2-%%off) >= 8
%if %2 > 16 && (%2-%%off) > 8
movu m %+ %%idx, [srcq+%2-16]
%assign %%off %2
%else
movq m %+ %%idx, [srcq+%%off]
%assign %%off %%off+8
%endif
%assign %%idx %%idx+1
%endif ; (%2-%%off) >= 8
%endif
%if ARCH_X86_64
%if (%2-%%src_off) == 8
mov rax, [r1+%%src_off]
%assign %%src_off %%src_off+8
%endif ; (%2-%%src_off) == 8
%endif ; x86-64
%rep (%2-%%src_off)/8
movq mm %+ %%smidx, [r1+%%src_off]
%assign %%src_off %%src_off+8
%assign %%smidx %%smidx+1
%endrep ; (%2-%%dst_off)/8
%if (%2-%%src_off) == 4
mov vald, [r1+%%src_off]
%elif (%2-%%src_off) & 4
movd mm %+ %%smidx, [r1+%%src_off]
%assign %%src_off %%src_off+4
%endif ; (%2-%%src_off) ==/& 4
%if (%2-%%src_off) == 1
mov vall, [r1+%%src_off]
%elif (%2-%%src_off) == 2
mov valw, [r1+%%src_off]
%elif (%2-%%src_off) == 3
%ifidn %1, top
mov valw2, [r1+%%src_off]
%if (%2-%%off) >= 4
%if %2 > 8 && (%2-%%off) > 4
movq m %+ %%idx, [srcq+%2-8]
%assign %%off %2
%else
movd m %+ %%idx, [srcq+%%off]
%assign %%off %%off+4
%endif
%assign %%idx %%idx+1
%endif ; (%2-%%off) >= 4
%if (%2-%%off) >= 1
%if %2 >= 4
movd m %+ %%idx, [srcq+%2-4]
%elif (%2-%%off) == 1
mov valb, [srcq+%2-1]
%elif (%2-%%off) == 2
mov valw, [srcq+%2-2]
%elifidn %1, body
mov valw3, [r1+%%src_off]
%elifidn %1, bottom
mov valw4, [r1+%%src_off]
%endif ; %1 ==/!= top
mov vall, [r1+%%src_off+2]
%endif ; (%2-%%src_off) == 1/2/3
mov vald, [srcq+%2-3]
%else
movd m %+ %%idx, [srcq+%2-3]
%endif
%endif ; (%2-%%off) >= 1
%endmacro ; READ_NUM_BYTES
%macro WRITE_NUM_BYTES 2
%assign %%dst_off 0 ; offset in destination buffer
%assign %%dmidx 0 ; mmx register idx
%assign %%dxidx 0 ; xmm register idx
%if cpuflag(sse)
%rep %2/16
movups [r0+%%dst_off], xmm %+ %%dxidx
%assign %%dst_off %%dst_off+16
%assign %%dxidx %%dxidx+1
%endrep ; %2/16
%assign %%off 0 ; offset in destination buffer
%assign %%idx 0 ; mmx/xmm register index
%rep %2/mmsize
movu [dstq+%%off], m %+ %%idx
%assign %%off %%off+mmsize
%assign %%idx %%idx+1
%endrep ; %2/mmsize
%if mmsize == 16
%if (%2-%%off) >= 8
%if %2 > 16 && (%2-%%off) > 8
movu [dstq+%2-16], m %+ %%idx
%assign %%off %2
%else
movq [dstq+%%off], m %+ %%idx
%assign %%off %%off+8
%endif
%assign %%idx %%idx+1
%endif ; (%2-%%off) >= 8
%endif
%if ARCH_X86_64
%if (%2-%%dst_off) == 8
mov [r0+%%dst_off], rax
%assign %%dst_off %%dst_off+8
%endif ; (%2-%%dst_off) == 8
%endif ; x86-64
%rep (%2-%%dst_off)/8
movq [r0+%%dst_off], mm %+ %%dmidx
%assign %%dst_off %%dst_off+8
%assign %%dmidx %%dmidx+1
%endrep ; (%2-%%dst_off)/8
%if (%2-%%dst_off) == 4
mov [r0+%%dst_off], vald
%elif (%2-%%dst_off) & 4
movd [r0+%%dst_off], mm %+ %%dmidx
%assign %%dst_off %%dst_off+4
%endif ; (%2-%%dst_off) ==/& 4
%if (%2-%%dst_off) == 1
mov [r0+%%dst_off], vall
%elif (%2-%%dst_off) == 2
mov [r0+%%dst_off], valw
%elif (%2-%%dst_off) == 3
%ifidn %1, top
mov [r0+%%dst_off], valw2
%if (%2-%%off) >= 4
%if %2 > 8 && (%2-%%off) > 4
movq [dstq+%2-8], m %+ %%idx
%assign %%off %2
%else
movd [dstq+%%off], m %+ %%idx
%assign %%off %%off+4
%endif
%assign %%idx %%idx+1
%endif ; (%2-%%off) >= 4
%if (%2-%%off) >= 1
%if %2 >= 4
movd [dstq+%2-4], m %+ %%idx
%elif (%2-%%off) == 1
mov [dstq+%2-1], valb
%elif (%2-%%off) == 2
mov [dstq+%2-2], valw
%elifidn %1, body
mov [r0+%%dst_off], valw3
%elifidn %1, bottom
mov [r0+%%dst_off], valw4
%endif ; %1 ==/!= top
mov [r0+%%dst_off+2], vall
%endif ; (%2-%%dst_off) == 1/2/3
mov [dstq+%2-3], valw
shr vald, 16
mov [dstq+%2-1], valb
%else
movd vald, m %+ %%idx
mov [dstq+%2-3], valw
shr vald, 16
mov [dstq+%2-1], valb
%endif
%endif ; (%2-%%off) >= 1
%endmacro ; WRITE_NUM_BYTES
; vertical top/bottom extend and body copy fast loops
; these are function pointers to set-width line copy functions, i.e.
; they read a fixed number of pixels into set registers, and write
; those out into the destination buffer
; r0=buf,r1=src,r2=linesize,r3(64)/r3m(32)=start_x,r4=end_y,r5=block_h
; r6(eax/64)/r3(ebx/32)=val_reg
%macro VERTICAL_EXTEND 0
%assign %%n 1
%rep 22
ALIGN 128
.emuedge_v_extend_ %+ %%n:
; extend pixels above body
%macro VERTICAL_EXTEND 2
%assign %%n %1
%rep 1+%2-%1
%if %%n <= 3
%if ARCH_X86_64
test r3 , r3 ; if (!start_y)
jz .emuedge_copy_body_ %+ %%n %+ _loop ; goto body
%else ; ARCH_X86_32
cmp dword r3m, 0
je .emuedge_copy_body_ %+ %%n %+ _loop
%endif ; ARCH_X86_64/32
READ_NUM_BYTES top, %%n ; read bytes
.emuedge_extend_top_ %+ %%n %+ _loop: ; do {
WRITE_NUM_BYTES top, %%n ; write bytes
add r0 , r2 ; dst += linesize
cglobal emu_edge_vfix %+ %%n, 6, 8, 0, dst, src, dst_stride, src_stride, \
start_y, end_y, val, bh
mov bhq, r6mp ; r6mp = bhmp
%else ; x86-32
cglobal emu_edge_vfix %+ %%n, 0, 6, 0, val, dst, src, start_y, end_y, bh
mov dstq, r0mp
mov srcq, r1mp
mov start_yq, r4mp
mov end_yq, r5mp
mov bhq, r6mp
%define dst_strideq r2mp
%define src_strideq r3mp
%endif ; x86-64/32
%else
%if ARCH_X86_64
dec r3d
%else ; ARCH_X86_32
dec dword r3m
%endif ; ARCH_X86_64/32
jnz .emuedge_extend_top_ %+ %%n %+ _loop ; } while (--start_y)
cglobal emu_edge_vfix %+ %%n, 7, 7, 1, dst, src, dst_stride, src_stride, \
start_y, end_y, bh
%else ; x86-32
cglobal emu_edge_vfix %+ %%n, 1, 5, 1, dst, src, start_y, end_y, bh
mov srcq, r1mp
mov start_yq, r4mp
mov end_yq, r5mp
mov bhq, r6mp
%define dst_strideq r2mp
%define src_strideq r3mp
%endif ; x86-64/32
%endif
; FIXME move this to c wrapper?
sub bhq, end_yq ; bh -= end_y
sub end_yq, start_yq ; end_y -= start_y
; extend pixels above body
test start_yq, start_yq ; if (start_y) {
jz .body_loop
READ_NUM_BYTES top, %%n ; $variable_regs = read($n)
.top_loop: ; do {
WRITE_NUM_BYTES top, %%n ; write($variable_regs, $n)
add dstq, dst_strideq ; dst += linesize
dec start_yq ; } while (--start_y)
jnz .top_loop ; }
; copy body pixels
.emuedge_copy_body_ %+ %%n %+ _loop: ; do {
READ_NUM_BYTES body, %%n ; read bytes
WRITE_NUM_BYTES body, %%n ; write bytes
add r0 , r2 ; dst += linesize
add r1 , r2 ; src += linesize
dec r4d
jnz .emuedge_copy_body_ %+ %%n %+ _loop ; } while (--end_y)
.body_loop: ; do {
READ_NUM_BYTES body, %%n ; $variable_regs = read($n)
WRITE_NUM_BYTES body, %%n ; write($variable_regs, $n)
add dstq, dst_strideq ; dst += dst_stride
add srcq, src_strideq ; src += src_stride
dec end_yq ; } while (--end_y)
jnz .body_loop
; copy bottom pixels
test r5 , r5 ; if (!block_h)
jz .emuedge_v_extend_end_ %+ %%n ; goto end
sub r1 , r2 ; src -= linesize
READ_NUM_BYTES bottom, %%n ; read bytes
.emuedge_extend_bottom_ %+ %%n %+ _loop: ; do {
WRITE_NUM_BYTES bottom, %%n ; write bytes
add r0 , r2 ; dst += linesize
dec r5d
jnz .emuedge_extend_bottom_ %+ %%n %+ _loop ; } while (--block_h)
.emuedge_v_extend_end_ %+ %%n:
%if ARCH_X86_64
ret
%else ; ARCH_X86_32
rep ret
%endif ; ARCH_X86_64/32
test bhq, bhq ; if (block_h) {
jz .end
sub srcq, src_strideq ; src -= linesize
READ_NUM_BYTES bottom, %%n ; $variable_regs = read($n)
.bottom_loop: ; do {
WRITE_NUM_BYTES bottom, %%n ; write($variable_regs, $n)
add dstq, dst_strideq ; dst += linesize
dec bhq ; } while (--bh)
jnz .bottom_loop ; }
.end:
RET
%assign %%n %%n+1
%endrep
%endmacro VERTICAL_EXTEND
%endrep ; 1+%2-%1
%endmacro ; VERTICAL_EXTEND
INIT_MMX mmx
VERTICAL_EXTEND 1, 15
%if ARCH_X86_32
VERTICAL_EXTEND 16, 22
%endif
INIT_XMM sse
VERTICAL_EXTEND 16, 22
; left/right (horizontal) fast extend functions
; these are essentially identical to the vertical extend ones above,
; just left/right separated because number of pixels to extend is
; obviously not the same on both sides.
; for reading, pixels are placed in eax (x86-64) or ebx (x86-64) in the
; lowest two bytes of the register (so val*0x0101), and are splatted
; into each byte of mm0 as well if n_pixels >= 8
%macro READ_V_PIXEL 2
mov vall, %2
mov valh, vall
%if %1 >= 8
movd mm0, vald
%if cpuflag(mmxext)
pshufw mm0, mm0, 0
%else ; mmx
punpcklwd mm0, mm0
punpckldq mm0, mm0
%endif ; sse
%endif ; %1 >= 8
%endmacro
%macro WRITE_V_PIXEL 2
%assign %%dst_off 0
%rep %1/8
movq [%2+%%dst_off], mm0
%assign %%dst_off %%dst_off+8
%endrep
%if %1 & 4
%if %1 == 2
movzx valw, byte %2
imul valw, 0x0101
%else
movzx vald, byte %2
imul vald, 0x01010101
%if %1 >= 8
movd [%2+%%dst_off], mm0
%else ; %1 < 8
mov [%2+%%dst_off] , valw
mov [%2+%%dst_off+2], valw
%endif ; %1 >=/< 8
%assign %%dst_off %%dst_off+4
%endif ; %1 & 4
%if %1&2
mov [%2+%%dst_off], valw
%endif ; %1 & 2
%endmacro
; r0=buf+block_h*linesize, r1=start_x, r2=linesize, r5=block_h, r6/r3=val
%macro LEFT_EXTEND 0
%assign %%n 2
%rep 11
ALIGN 64
.emuedge_extend_left_ %+ %%n: ; do {
sub r0, r2 ; dst -= linesize
READ_V_PIXEL %%n, [r0+r1] ; read pixels
WRITE_V_PIXEL %%n, r0 ; write pixels
dec r5
jnz .emuedge_extend_left_ %+ %%n ; } while (--block_h)
%if ARCH_X86_64
ret
%else ; ARCH_X86_32
rep ret
%endif ; ARCH_X86_64/32
%assign %%n %%n+2
%endrep
%endmacro ; LEFT_EXTEND
; r3/r0=buf+block_h*linesize, r2=linesize, r8/r5=block_h, r0/r6=end_x, r6/r3=val
%macro RIGHT_EXTEND 0
%assign %%n 2
%rep 11
ALIGN 64
.emuedge_extend_right_ %+ %%n: ; do {
%if ARCH_X86_64
sub r3, r2 ; dst -= linesize
READ_V_PIXEL %%n, [r3+w_reg-1] ; read pixels
WRITE_V_PIXEL %%n, r3+r4-%%n ; write pixels
dec r8
%else ; ARCH_X86_32
sub r0, r2 ; dst -= linesize
READ_V_PIXEL %%n, [r0+w_reg-1] ; read pixels
WRITE_V_PIXEL %%n, r0+r4-%%n ; write pixels
dec r5
%endif ; ARCH_X86_64/32
jnz .emuedge_extend_right_ %+ %%n ; } while (--block_h)
%if ARCH_X86_64
ret
%else ; ARCH_X86_32
rep ret
%endif ; ARCH_X86_64/32
%assign %%n %%n+2
%endrep
%if ARCH_X86_32
%define stack_offset 0x10
%endif
%endmacro ; RIGHT_EXTEND
; below follow the "slow" copy/extend functions, these act on a non-fixed
; width specified in a register, and run a loop to copy the full amount
; of bytes. They are optimized for copying of large amounts of pixels per
; line, so they unconditionally splat data into mm registers to copy 8
; bytes per loop iteration. It could be considered to use xmm for x86-64
; also, but I haven't optimized this as much (i.e. FIXME)
%macro V_COPY_NPX 4-5
%if %0 == 4
test w_reg, %4
jz .%1_skip_%4_px
%else ; %0 == 5
.%1_%4_px_loop:
movd m0, vald
%if mmsize == 16
shufps m0, m0, q0000
%else
punpckldq m0, m0
%endif
%3 %2, [r1+cnt_reg]
%3 [r0+cnt_reg], %2
add cnt_reg, %4
%if %0 == 5
sub w_reg, %4
test w_reg, %5
jnz .%1_%4_px_loop
%endif ; %1 >= 8
%endif
.%1_skip_%4_px:
%endmacro
%endmacro ; READ_V_PIXEL
%macro V_COPY_ROW 2
%ifidn %1, bottom
sub r1, linesize
%macro WRITE_V_PIXEL 2
%assign %%off 0
%rep %1/mmsize
movu [%2+%%off], m0
%assign %%off %%off+mmsize
%endrep ; %1/mmsize
%if mmsize == 16
%if %1-%%off >= 8
%if %1 > 16 && %1-%%off > 8
movu [%2+%1-16], m0
%assign %%off %1
%else
movq [%2+%%off], m0
%assign %%off %%off+8
%endif
.%1_copy_loop:
xor cnt_reg, cnt_reg
%if notcpuflag(sse)
%define linesize r2m
V_COPY_NPX %1, mm0, movq, 8, 0xFFFFFFF8
%else ; sse
V_COPY_NPX %1, xmm0, movups, 16, 0xFFFFFFF0
%if ARCH_X86_64
%define linesize r2
V_COPY_NPX %1, rax , mov, 8
%else ; ARCH_X86_32
%define linesize r2m
V_COPY_NPX %1, mm0, movq, 8
%endif ; ARCH_X86_64/32
%endif ; sse
V_COPY_NPX %1, vald, mov, 4
V_COPY_NPX %1, valw, mov, 2
V_COPY_NPX %1, vall, mov, 1
mov w_reg, cnt_reg
%ifidn %1, body
add r1, linesize
%endif ; %1-%%off >= 8
%endif
add r0, linesize
dec %2
jnz .%1_copy_loop
%endmacro
%macro SLOW_V_EXTEND 0
.slow_v_extend_loop:
; r0=buf,r1=src,r2(64)/r2m(32)=linesize,r3(64)/r3m(32)=start_x,r4=end_y,r5=block_h
; r8(64)/r3(later-64)/r2(32)=cnt_reg,r6(64)/r3(32)=val_reg,r7(64)/r6(32)=w=end_x-start_x
%if ARCH_X86_64
push r8 ; save old value of block_h
test r3, r3
%define cnt_reg r8
jz .do_body_copy ; if (!start_y) goto do_body_copy
V_COPY_ROW top, r3
%if %1-%%off >= 4
%if %1 > 8 %% %1-%%off > 4
movq [%2+%1-8], m0
%assign %%off %1
%elif %1 >= 8 && %1-%%off >= 4
movd [%2+%%off], m0
%assign %%off %%off+4
%else
cmp dword r3m, 0
%define cnt_reg r2
je .do_body_copy ; if (!start_y) goto do_body_copy
V_COPY_ROW top, dword r3m
mov [%2+%%off], vald
%assign %%off %%off+4
%endif
%endif ; %1-%%off >= 4
.do_body_copy:
V_COPY_ROW body, r4
%if ARCH_X86_64
pop r8 ; restore old value of block_h
%define cnt_reg r3
%endif
test r5, r5
%if ARCH_X86_64
jz .v_extend_end
%if %1-%%off >= 2
%if %1 >= 8
movd [%2+%1-4], m0
%else
jz .skip_bottom_extend
mov [%2+%%off], valw
%endif
V_COPY_ROW bottom, r5
%if ARCH_X86_32
.skip_bottom_extend:
mov r2, r2m
%endif
jmp .v_extend_end
%endmacro
%endif ; (%1-%%off)/2
%endmacro ; WRITE_V_PIXEL
%macro H_EXTEND 2
%assign %%n %1
%rep 1+(%2-%1)/2
cglobal emu_edge_hfix %+ %%n, 4, 5, 1, dst, dst_stride, start_x, bh, val
.loop_y: ; do {
READ_V_PIXEL %%n, [dstq+start_xq] ; $variable_regs = read($n)
WRITE_V_PIXEL %%n, dstq ; write($variable_regs, $n)
add dstq, dst_strideq ; dst += dst_stride
dec bhq ; } while (--bh)
jnz .loop_y
RET
%assign %%n %%n+2
%endrep ; 1+(%2-%1)/2
%endmacro ; H_EXTEND
%macro SLOW_LEFT_EXTEND 0
.slow_left_extend_loop:
; r0=buf+block_h*linesize,r2=linesize,r6(64)/r3(32)=val,r5=block_h,r4=cntr,r7/r6=start_x
mov r4, 8
sub r0, linesize
READ_V_PIXEL 8, [r0+w_reg]
.left_extend_8px_loop:
movq [r0+r4-8], mm0
add r4, 8
cmp r4, w_reg
jle .left_extend_8px_loop
sub r4, 8
cmp r4, w_reg
jge .left_extend_loop_end
.left_extend_2px_loop:
mov [r0+r4], valw
add r4, 2
cmp r4, w_reg
jl .left_extend_2px_loop
.left_extend_loop_end:
dec r5
jnz .slow_left_extend_loop
INIT_MMX mmx
H_EXTEND 2, 14
%if ARCH_X86_32
mov r2, r2m
%endif
jmp .right_extend
%endmacro
%macro SLOW_RIGHT_EXTEND 0
.slow_right_extend_loop:
; r3(64)/r0(32)=buf+block_h*linesize,r2=linesize,r4=block_w,r8(64)/r5(32)=block_h,
; r7(64)/r6(32)=end_x,r6/r3=val,r1=cntr
%if ARCH_X86_64
%define buf_reg r3
%define bh_reg r8
%else
%define buf_reg r0
%define bh_reg r5
H_EXTEND 16, 22
%endif
lea r1, [r4-8]
sub buf_reg, linesize
READ_V_PIXEL 8, [buf_reg+w_reg-1]
.right_extend_8px_loop:
movq [buf_reg+r1], mm0
sub r1, 8
cmp r1, w_reg
jge .right_extend_8px_loop
add r1, 8
cmp r1, w_reg
je .right_extend_loop_end
.right_extend_2px_loop:
sub r1, 2
mov [buf_reg+r1], valw
cmp r1, w_reg
jg .right_extend_2px_loop
.right_extend_loop_end:
dec bh_reg
jnz .slow_right_extend_loop
jmp .h_extend_end
%endmacro
%macro emu_edge 1
INIT_XMM %1
EMU_EDGE_FUNC
VERTICAL_EXTEND
LEFT_EXTEND
RIGHT_EXTEND
SLOW_V_EXTEND
SLOW_LEFT_EXTEND
SLOW_RIGHT_EXTEND
%endmacro
emu_edge sse
%if ARCH_X86_32
emu_edge mmx
%endif
INIT_XMM sse
H_EXTEND 16, 22
%macro PREFETCH_FN 1
cglobal prefetch, 3, 3, 0, buf, stride, h

@ -28,29 +28,127 @@
#include "libavcodec/videodsp.h"
#if HAVE_YASM
typedef void emu_edge_core_func(uint8_t *buf, const uint8_t *src,
x86_reg linesize, x86_reg start_y,
x86_reg end_y, x86_reg block_h,
x86_reg start_x, x86_reg end_x,
x86_reg block_w);
extern emu_edge_core_func ff_emu_edge_core_mmx;
extern emu_edge_core_func ff_emu_edge_core_sse;
static av_always_inline void emulated_edge_mc(uint8_t *buf, const uint8_t *src,
ptrdiff_t linesize,
int block_w, int block_h,
int src_x, int src_y,
int w, int h,
emu_edge_core_func *core_fn)
typedef void emu_edge_vfix_func(uint8_t *dst, const uint8_t *src,
x86_reg dst_stride, x86_reg src_stride,
x86_reg start_y, x86_reg end_y, x86_reg bh);
typedef void emu_edge_vvar_func(uint8_t *dst, const uint8_t *src,
x86_reg dst_stride, x86_reg src_stride,
x86_reg start_y, x86_reg end_y, x86_reg bh,
x86_reg w);
extern emu_edge_vfix_func ff_emu_edge_vfix1_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix2_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix3_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix4_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix5_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix6_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix7_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix8_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix9_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix10_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix11_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix12_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix13_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix14_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix15_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix16_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix17_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix18_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix19_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix20_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix21_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix22_mmx;
#if ARCH_X86_32
static emu_edge_vfix_func *vfixtbl_mmx[22] = {
&ff_emu_edge_vfix1_mmx, &ff_emu_edge_vfix2_mmx, &ff_emu_edge_vfix3_mmx,
&ff_emu_edge_vfix4_mmx, &ff_emu_edge_vfix5_mmx, &ff_emu_edge_vfix6_mmx,
&ff_emu_edge_vfix7_mmx, &ff_emu_edge_vfix8_mmx, &ff_emu_edge_vfix9_mmx,
&ff_emu_edge_vfix10_mmx, &ff_emu_edge_vfix11_mmx, &ff_emu_edge_vfix12_mmx,
&ff_emu_edge_vfix13_mmx, &ff_emu_edge_vfix14_mmx, &ff_emu_edge_vfix15_mmx,
&ff_emu_edge_vfix16_mmx, &ff_emu_edge_vfix17_mmx, &ff_emu_edge_vfix18_mmx,
&ff_emu_edge_vfix19_mmx, &ff_emu_edge_vfix20_mmx, &ff_emu_edge_vfix21_mmx,
&ff_emu_edge_vfix22_mmx
};
#endif
extern emu_edge_vvar_func ff_emu_edge_vvar_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix16_sse;
extern emu_edge_vfix_func ff_emu_edge_vfix17_sse;
extern emu_edge_vfix_func ff_emu_edge_vfix18_sse;
extern emu_edge_vfix_func ff_emu_edge_vfix19_sse;
extern emu_edge_vfix_func ff_emu_edge_vfix20_sse;
extern emu_edge_vfix_func ff_emu_edge_vfix21_sse;
extern emu_edge_vfix_func ff_emu_edge_vfix22_sse;
static emu_edge_vfix_func *vfixtbl_sse[22] = {
ff_emu_edge_vfix1_mmx, ff_emu_edge_vfix2_mmx, ff_emu_edge_vfix3_mmx,
ff_emu_edge_vfix4_mmx, ff_emu_edge_vfix5_mmx, ff_emu_edge_vfix6_mmx,
ff_emu_edge_vfix7_mmx, ff_emu_edge_vfix8_mmx, ff_emu_edge_vfix9_mmx,
ff_emu_edge_vfix10_mmx, ff_emu_edge_vfix11_mmx, ff_emu_edge_vfix12_mmx,
ff_emu_edge_vfix13_mmx, ff_emu_edge_vfix14_mmx, ff_emu_edge_vfix15_mmx,
ff_emu_edge_vfix16_sse, ff_emu_edge_vfix17_sse, ff_emu_edge_vfix18_sse,
ff_emu_edge_vfix19_sse, ff_emu_edge_vfix20_sse, ff_emu_edge_vfix21_sse,
ff_emu_edge_vfix22_sse
};
extern emu_edge_vvar_func ff_emu_edge_vvar_sse;
typedef void emu_edge_hfix_func(uint8_t *dst, x86_reg dst_stride,
x86_reg start_x, x86_reg bh);
typedef void emu_edge_hvar_func(uint8_t *dst, x86_reg dst_stride,
x86_reg start_x, x86_reg n_words, x86_reg bh);
extern emu_edge_hfix_func ff_emu_edge_hfix2_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix4_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix6_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix8_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix10_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix12_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix14_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix16_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix18_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix20_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix22_mmx;
#if ARCH_X86_32
static emu_edge_hfix_func *hfixtbl_mmx[11] = {
ff_emu_edge_hfix2_mmx, ff_emu_edge_hfix4_mmx, ff_emu_edge_hfix6_mmx,
ff_emu_edge_hfix8_mmx, ff_emu_edge_hfix10_mmx, ff_emu_edge_hfix12_mmx,
ff_emu_edge_hfix14_mmx, ff_emu_edge_hfix16_mmx, ff_emu_edge_hfix18_mmx,
ff_emu_edge_hfix20_mmx, ff_emu_edge_hfix22_mmx
};
#endif
extern emu_edge_hvar_func ff_emu_edge_hvar_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix16_sse;
extern emu_edge_hfix_func ff_emu_edge_hfix18_sse;
extern emu_edge_hfix_func ff_emu_edge_hfix20_sse;
extern emu_edge_hfix_func ff_emu_edge_hfix22_sse;
static emu_edge_hfix_func *hfixtbl_sse[11] = {
ff_emu_edge_hfix2_mmx, ff_emu_edge_hfix4_mmx, ff_emu_edge_hfix6_mmx,
ff_emu_edge_hfix8_mmx, ff_emu_edge_hfix10_mmx, ff_emu_edge_hfix12_mmx,
ff_emu_edge_hfix14_mmx, ff_emu_edge_hfix16_sse, ff_emu_edge_hfix18_sse,
ff_emu_edge_hfix20_sse, ff_emu_edge_hfix22_sse
};
extern emu_edge_hvar_func ff_emu_edge_hvar_sse;
static av_always_inline void emulated_edge_mc(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride,
ptrdiff_t src_stride,
x86_reg block_w, x86_reg block_h,
x86_reg src_x, x86_reg src_y,
x86_reg w, x86_reg h,
emu_edge_vfix_func **vfix_tbl,
emu_edge_vvar_func *v_extend_var,
emu_edge_hfix_func **hfix_tbl,
emu_edge_hvar_func *h_extend_var)
{
int start_y, start_x, end_y, end_x, src_y_add = 0;
x86_reg start_y, start_x, end_y, end_x, src_y_add = 0, p;
if (!w || !h)
return;
if (src_y >= h) {
src_y_add = h - 1 - src_y;
src_y = h - 1;
src -= src_y * src_stride;
src_y = src_y_add = h - 1;
} else if (src_y <= -block_h) {
src_y_add = 1 - block_h - src_y;
src_y = 1 - block_h;
src -= src_y*src_stride;
src_y = src_y_add = 1 - block_h;
}
if (src_x >= w) {
src += w - 1 - src_x;
@ -68,30 +166,62 @@ static av_always_inline void emulated_edge_mc(uint8_t *buf, const uint8_t *src,
assert(start_y < end_y && block_h > 0);
// fill in the to-be-copied part plus all above/below
src += (src_y_add + start_y) * linesize + start_x;
buf += start_x;
core_fn(buf, src, linesize, start_y, end_y,
block_h, start_x, end_x, block_w);
src += (src_y_add + start_y) * src_stride + start_x;
w = end_x - start_x;
if (w <= 22) {
vfix_tbl[w - 1](dst + start_x, src,
dst_stride, src_stride,
start_y, end_y, block_h);
} else {
v_extend_var(dst + start_x, src, dst_stride, src_stride,
start_y, end_y, block_h, w);
}
// fill left
if (start_x) {
if (start_x <= 22) {
hfix_tbl[(start_x - 1) >> 1](dst, dst_stride, start_x, block_h);
} else {
h_extend_var(dst, dst_stride,
start_x, (start_x + 1) >> 1, block_h);
}
}
// fill right
p = block_w - end_x;
if (p) {
if (p <= 22) {
hfix_tbl[(p - 1) >> 1](dst + end_x - (p & 1), dst_stride,
-!(p & 1), block_h);
} else {
h_extend_var(dst + end_x - (p & 1), dst_stride,
-!(p & 1), (p + 1) >> 1, block_h);
}
}
}
#if ARCH_X86_32
static av_noinline void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src,
ptrdiff_t linesize,
ptrdiff_t buf_stride,
ptrdiff_t src_stride,
int block_w, int block_h,
int src_x, int src_y, int w, int h)
{
emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
w, h, &ff_emu_edge_core_mmx);
emulated_edge_mc(buf, src, buf_stride, src_stride, block_w, block_h,
src_x, src_y, w, h, vfixtbl_mmx, &ff_emu_edge_vvar_mmx,
hfixtbl_mmx, &ff_emu_edge_hvar_mmx);
}
#endif
static av_noinline void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src,
ptrdiff_t linesize,
static av_noinline void emulated_edge_mc_sse(uint8_t * buf,const uint8_t *src,
ptrdiff_t buf_stride,
ptrdiff_t src_stride,
int block_w, int block_h,
int src_x, int src_y, int w, int h)
{
emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y,
w, h, &ff_emu_edge_core_sse);
emulated_edge_mc(buf, src, buf_stride, src_stride, block_w, block_h,
src_x, src_y, w, h, vfixtbl_sse, &ff_emu_edge_vvar_sse,
hfixtbl_sse, &ff_emu_edge_hvar_sse);
}
#endif /* HAVE_YASM */

Loading…
Cancel
Save