@ -23,576 +23,394 @@
SECTION .text
; void ff_emu_edge_core(uint8_t *buf, const uint8_t *src, x86_reg linesize,
; x86_reg start_y, x86_reg end_y, x86_reg block_h,
; x86_reg start_x, x86_reg end_x, x86_reg block_w);
;
; The actual function itself is below. It basically wraps a very simple
; w = end_x - start_x
; if (w) {
; if (w > 22) {
; jump to the slow loop functions
; } else {
; jump to the fast loop functions
; }
; }
;
; ... and then the same for left/right extend also. See below for loop
; function implementations. Fast are fixed-width, slow is variable-width
% macro EMU_EDGE_FUNC 0
% if ARCH_X86_64
% define w_reg r7
cglobal emu_edge_core , 6 , 9 , 1
mov r8 , r5 ; save block_h
% else
% define w_reg r6
cglobal emu_edge_core , 2 , 7 , 0
mov r4 , r4m ; end_y
mov r5 , r5m ; block_h
% endif
; start with vertical extend (top/bottom) and body pixel copy
mov w_reg , r7m
sub w_reg , r6m ; w = start_x - end_x
sub r5 , r4
% if ARCH_X86_64
sub r4 , r3
% else
sub r4 , dword r3m
% endif
cmp w_reg , 22
jg .slow_v_extend_loop
% if ARCH_X86_32
mov r2 , r2m ; linesize
% endif
sal w_reg , 7 ; w * 128
% ifdef PIC
lea rax , [ .emuedge_v_extend_1 - ( .emuedge_v_extend_2 - .emuedge_v_extend_1 )]
add w_reg , rax
% else
lea w_reg , [ .emuedge_v_extend_1 - ( .emuedge_v_extend_2 - .emuedge_v_extend_1 ) + w_reg ]
% endif
call w_reg ; fast top extend, body copy and bottom extend
.v_extend_end:
; slow vertical extension loop function. Works with variable-width, and
; does per-line reading/writing of source data
% macro V_COPY_ROW 2 ; type (top/body/bottom), h
. % 1 _y_loop : ; do {
mov wq , r7mp ; initialize w (r7mp = wmp)
. % 1 _x_loop : ; do {
movu m0 , [ srcq + wq ] ; m0 = read($mmsize)
movu [ ds tq + wq ], m0 ; write(m0, $mmsize)
add wq , mmsize ; w -= $mmsize
cmp wq , - mmsize ; } while (w > $mmsize);
jl . % 1 _x_loop
movu m0 , [ srcq - mmsize ] ; m0 = read($mmsize)
movu [ ds tq - mmsize ], m0 ; write(m0, $mmsize)
% ifidn %1, body ; if ($type == body) {
add srcq , src_strideq ; src += src_stride
% endif ; }
add ds tq , ds t_strideq ; dst += dst_stride
dec % 2 ; } while (--$h);
jnz . % 1 _y_loop
% endmacro
; horizontal extend (left/right)
mov w_reg , r6m ; start_x
sub r0 , w_reg
% macro vvar_fn 0
; .----. <- zero
; | | <- top is copied from first line in body of source
; |----| <- start_y
; | | <- body is copied verbatim (line-by-line) from source
; |----| <- end_y
; | | <- bottom is copied from last line in body of source
; '----' <- bh
% if ARCH_X86_64
mov r3 , r0 ; backup of buf+block_h*linesize
mov r5 , r8
% else
mov r0m , r0 ; backup of buf+block_h*linesize
mov r5 , r5m
cglobal emu_edge_vvar , 7 , 8 , 1 , ds t , src , ds t_stride , src_stride , \
start_y , end_y , bh , w
% else ; x86-32
cglobal emu_edge_vvar , 1 , 6 , 1 , ds t , src , start_y , end_y , bh , w
% define src_strideq r3mp
% define dst_strideq r2mp
mov srcq , r1mp
mov start_yq , r4mp
mov end_yq , r5mp
mov bh q , r6mp
% endif
test w_reg , w_reg
jz .right_extend
cmp w_reg , 22
jg .slow_left_extend_loop
mov r1 , w_reg
dec w_reg
; FIXME we can do a if size == 1 here if that makes any speed difference, test me
sar w_reg , 1
sal w_reg , 6
; r0=buf+block_h*linesize,r7(64)/r6(32)=start_x offset for funcs
; r6(rax)/r3(ebx)=val,r2=linesize,r1=start_x,r5=block_h
% ifdef PIC
lea rax , [ .emuedge_extend_left_2 ]
add w_reg , rax
% else
lea w_reg , [ .emuedge_extend_left_2 + w_reg ]
% endif
call w_reg
sub bh q , end_yq ; bh -= end_q
sub end_yq , start_yq ; end_q -= start_q
add srcq , r7mp ; (r7mp = wmp)
add ds tq , r7mp ; (r7mp = wmp)
neg r7mp ; (r7mp = wmp)
test start_yq , start_yq ; if (start_q) {
jz .body
V_COPY_ROW top , start_yq ; v_copy_row(top, start_yq)
.body: ; }
V_COPY_ROW body , end_yq ; v_copy_row(body, end_yq)
test bh q , bh q ; if (bh) {
jz .end
sub srcq , src_strideq ; src -= src_stride
V_COPY_ROW bottom , bh q ; v_copy_row(bottom, bh)
.end: ; }
RET
% endmacro
; now r3(64)/r0(32)=buf,r2=linesize,r8/r5=block_h,r6/r3=val, r7/r6=end_x, r1=block_w
.right_extend:
% if ARCH_X86_32
mov r0 , r0m
mov r5 , r5m
INIT_MMX mmx
vvar_fn
% endif
mov w_reg , r7m ; end_x
mov r1 , r8m ; block_w
mov r4 , r1
sub r1 , w_reg
jz .h_extend_end ; if (end_x == block_w) goto h_extend_end
cmp r1 , 22
jg .slow_right_extend_loop
dec r1
; FIXME we can do a if size == 1 here if that makes any speed difference, test me
sar r1 , 1
sal r1 , 6
% ifdef PIC
lea rax , [ .emuedge_extend_right_2 ]
add r1 , rax
% else
lea r1 , [ .emuedge_extend_right_2 + r1 ]
% endif
call r1
.h_extend_end:
INIT_XMM ss e
vvar_fn
% macro hvar_fn 0
cglobal emu_edge_hvar , 5 , 6 , 1 , ds t , ds t_stride , start_x , n_words , h , w
lea ds tq , [ ds tq + n_wordsq * 2 ]
neg n_wordsq
lea start_xq , [ start_xq + n_wordsq * 2 ]
.y_loop: ; do {
; FIXME also write a ssse3 version using pshufb
movzx wd , byte [ ds tq + start_xq ] ; w = read(1)
imul wd , 0x01010101 ; w *= 0x01010101
movd m0 , wd
mov wq , n_wordsq ; initialize w
% if cpuflag(sse)
shufps m0 , m0 , q0000 ; splat
% else ; mmx
punpckldq m0 , m0 ; splat
% endif ; mmx/sse
.x_loop: ; do {
movu [ ds tq + wq * 2 ], m0 ; write($reg, $mmsize)
add wq , mmsize / 2 ; w -= $mmsize/2
cmp wq , - mmsize / 2 ; } while (w > $mmsize/2)
jl .x_loop
movu [ ds tq - mmsize ], m0 ; write($reg, $mmsize)
add ds tq , ds t_strideq ; dst += dst_stride
dec hq ; } while (h--)
jnz .y_loop
RET
% endmacro
% if ARCH_X86_64
% define vall al
% define valh ah
% define valw ax
% define valw2 r7w
% define valw3 r3w
% if WIN64
% define valw4 r7w
% else ; unix64
% define valw4 r3w
% endif
% define vald eax
% else
% define vall bl
% define valh bh
% define valw bx
% define valw2 r6w
% define valw3 valw2
% define valw4 valw3
% define vald ebx
% define stack_offset 0x14
% if ARCH_X86_32
INIT_MMX mmx
hvar_fn
% endif
% endmacro
INIT_XMM ss e
hvar_fn
; macro to read/write a horizontal number of pixels (%2) to/from registers
; on x86-64, - fills xmm0-15 for consecutive sets of 16 pixels
; - if (%2 & 15 == 8) fills the last 8 bytes into rax
; - else if (%2 & 8) fills 8 bytes into mm0
; - if (%2 & 7 == 4) fills the last 4 bytes into rax
; - else if (%2 & 4) fills 4 bytes into mm0-1
; - if (%2 & 3 == 3) fills 2 bytes into r7/r3, and 1 into eax
; (note that we're using r3 for body/bottom because it's a shorter
; opcode, and then the loop fits in 128 bytes)
; - else fills remaining bytes into rax
; on x86-32, - fills mm0-7 for consecutive sets of 8 pixels
; - if (%2 & 7 == 4) fills 4 bytes into ebx
; - else if (%2 & 4) fills 4 bytes into mm0-7
; - if (%2 & 3 == 3) fills 2 bytes into r6, and 1 into ebx
; - else fills remaining bytes into ebx
; on sse, - fills xmm0-15 for consecutive sets of 16 pixels
; - if (%2 & 8) fills 8 bytes into xmm$next
; - if (%2 & 4) fills 4 bytes into xmm$next
; - if (%2 & 3) fills 1, 2 or 4 bytes in eax
; on mmx, - fills mm0-7 for consecutive sets of 8 pixels
; - if (%2 & 4) fills 4 bytes into mm$next
; - if (%2 & 3) fills 1, 2 or 4 bytes in eax
; writing data out is in the same way
% macro READ_NUM_BYTES 2
% assign %%src_off 0 ; offset in source buffer
% assign %%smidx 0 ; mmx register idx
% assign %%sxidx 0 ; xmm register idx
% if cpuflag(sse)
% rep %2/16
movups xmm %+ %% sxidx , [ r1 +%% src_off ]
% assign %%src_off %%src_off+16
% assign %%sxidx %%sxidx+1
% endrep ; %2/16
% assign %%off 0 ; offset in source buffer
% assign %%idx 0 ; mmx/xmm register index
% rep %2/mmsize
movu m %+ %% idx , [ srcq +%% off ]
% assign %%off %%off+mmsize
% assign %%idx %%idx+1
% endrep ; %2/mmsize
% if mmsize == 16
% if (%2-%%off) >= 8
% if %2 > 16 && (%2-%%off) > 8
movu m %+ %% idx , [ srcq +% 2 - 16 ]
% assign %%off %2
% else
movq m %+ %% idx , [ srcq +%% off ]
% assign %%off %%off+8
% endif
% assign %%idx %%idx+1
% endif ; (%2-%%off) >= 8
% endif
% if ARCH_X86_64
% if (%2-%%src_off) == 8
mov rax , [ r1 +%% src_off ]
% assign %%src_off %%src_off+8
% endif ; (%2-%%src_off) == 8
% endif ; x86-64
% rep (%2-%%src_off)/8
movq mm %+ %% smidx , [ r1 +%% src_off ]
% assign %%src_off %%src_off+8
% assign %%smidx %%smidx+1
% endrep ; (%2-%%dst_off)/8
% if (%2-%%src_off) == 4
mov vald , [ r1 +%% src_off ]
% elif (%2-%%src_off) & 4
movd mm %+ %% smidx , [ r1 +%% src_off ]
% assign %%src_off %%src_off+4
% endif ; (%2-%%src_off) ==/& 4
% if (%2-%%src_off) == 1
mov vall , [ r1 +%% src_off ]
% elif (%2-%%src_off) == 2
mov valw , [ r1 +%% src_off ]
% elif (%2-%%src_off) == 3
% ifidn %1, top
mov valw2 , [ r1 +%% src_off ]
% if (%2-%%off) >= 4
% if %2 > 8 && (%2-%%off) > 4
movq m %+ %% idx , [ srcq +% 2 - 8 ]
% assign %%off %2
% else
movd m %+ %% idx , [ srcq +%% off ]
% assign %%off %%off+4
% endif
% assign %%idx %%idx+1
% endif ; (%2-%%off) >= 4
% if (%2-%%off) >= 1
% if %2 >= 4
movd m %+ %% idx , [ srcq +% 2 - 4 ]
% elif (%2-%%off) == 1
mov valb , [ srcq +% 2 - 1 ]
% elif (%2-%%off) == 2
mov valw , [ srcq +% 2 - 2 ]
% elifidn %1, body
mov valw3 , [ r1 +%% src_off ]
% elifidn %1, bottom
mov valw4 , [ r1 +%% src_off ]
% endif ; %1 ==/!= top
mov vall , [ r1 +%% src_off + 2 ]
% endif ; (%2-%%src_off) == 1/2/3
mov vald , [ srcq +% 2 - 3 ]
% else
movd m %+ %% idx , [ srcq +% 2 - 3 ]
% endif
% endif ; (%2-%%off) >= 1
% endmacro ; READ_NUM_BYTES
% macro WRITE_NUM_BYTES 2
% assign %%dst_off 0 ; offset in destination buffer
% assign %%dmidx 0 ; mmx register idx
% assign %%dxidx 0 ; xmm register idx
% if cpuflag(sse)
% rep %2/16
movups [ r0 +%% ds t_off ], xmm %+ %% dx idx
% assign %%dst_off %%dst_off+16
% assign %%dxidx %%dxidx+1
% endrep ; %2/16
% assign %%off 0 ; offset in destination buffer
% assign %%idx 0 ; mmx/xmm register index
% rep %2/mmsize
movu [ ds tq +%% off ], m %+ %% idx
% assign %%off %%off+mmsize
% assign %%idx %%idx+1
% endrep ; %2/mmsize
% if mmsize == 16
% if (%2-%%off) >= 8
% if %2 > 16 && (%2-%%off) > 8
movu [ ds tq +% 2 - 16 ], m %+ %% idx
% assign %%off %2
% else
movq [ ds tq +%% off ], m %+ %% idx
% assign %%off %%off+8
% endif
% assign %%idx %%idx+1
% endif ; (%2-%%off) >= 8
% endif
% if ARCH_X86_64
% if (%2-%%dst_off) == 8
mov [ r0 +%% ds t_off ], rax
% assign %%dst_off %%dst_off+8
% endif ; (%2-%%dst_off) == 8
% endif ; x86-64
% rep (%2-%%dst_off)/8
movq [ r0 +%% ds t_off ], mm %+ %% dmidx
% assign %%dst_off %%dst_off+8
% assign %%dmidx %%dmidx+1
% endrep ; (%2-%%dst_off)/8
% if (%2-%%dst_off) == 4
mov [ r0 +%% ds t_off ], vald
% elif (%2-%%dst_off) & 4
movd [ r0 +%% ds t_off ], mm %+ %% dmidx
% assign %%dst_off %%dst_off+4
% endif ; (%2-%%dst_off) ==/& 4
% if (%2-%%dst_off) == 1
mov [ r0 +%% ds t_off ], vall
% elif (%2-%%dst_off) == 2
mov [ r0 +%% ds t_off ], valw
% elif (%2-%%dst_off) == 3
% ifidn %1, top
mov [ r0 +%% ds t_off ], valw2
% if (%2-%%off) >= 4
% if %2 > 8 && (%2-%%off) > 4
movq [ ds tq +% 2 - 8 ], m %+ %% idx
% assign %%off %2
% else
movd [ ds tq +%% off ], m %+ %% idx
% assign %%off %%off+4
% endif
% assign %%idx %%idx+1
% endif ; (%2-%%off) >= 4
% if (%2-%%off) >= 1
% if %2 >= 4
movd [ ds tq +% 2 - 4 ], m %+ %% idx
% elif (%2-%%off) == 1
mov [ ds tq +% 2 - 1 ], valb
% elif (%2-%%off) == 2
mov [ ds tq +% 2 - 2 ], valw
% elifidn %1, body
mov [ r0 +%% ds t_off ], valw3
% elifidn %1, bottom
mov [ r0 +%% ds t_off ], valw4
% endif ; %1 ==/!= top
mov [ r0 +%% ds t_off + 2 ], vall
% endif ; (%2-%%dst_off) == 1/2/3
mov [ ds tq +% 2 - 3 ], valw
shr vald , 16
mov [ ds tq +% 2 - 1 ], valb
% else
movd vald , m %+ %% idx
mov [ ds tq +% 2 - 3 ], valw
shr vald , 16
mov [ ds tq +% 2 - 1 ], valb
% endif
% endif ; (%2-%%off) >= 1
% endmacro ; WRITE_NUM_BYTES
; vertical top/bottom extend and body copy fast loops
; these are function pointers to set-width line copy functions, i.e.
; they read a fixed number of pixels into set registers, and write
; those out into the destination buffer
; r0=buf,r1=src,r2=linesize,r3(64)/r3m(32)=start_x,r4=end_y,r5=block_h
; r6(eax/64)/r3(ebx/32)=val_reg
% macro VERTICAL_EXTEND 0
% assign %%n 1
% rep 22
ALIGN 128
.emuedge_v_extend_ %+ %% n :
; extend pixels above body
% macro VERTICAL_EXTEND 2
% assign %%n %1
% rep 1+%2-%1
% if %%n <= 3
% if ARCH_X86_64
test r3 , r3 ; if (!start_y)
jz .emuedge_copy_body_ %+ %% n %+ _loop ; goto body
% else ; ARCH_X86_32
cmp dword r3m , 0
je .emuedge_copy_body_ %+ %% n %+ _loop
% endif ; ARCH_X86_64/32
READ_NUM_BYTES top , %% n ; read bytes
.emuedge_extend_top_ %+ %% n %+ _loop : ; do {
WRITE_NUM_BYTES top , %% n ; write bytes
add r0 , r2 ; dst += linesize
cglobal emu_edge_vfix %+ %% n , 6 , 8 , 0 , ds t , src , ds t_stride , src_stride , \
start_y , end_y , val , bh
mov bh q , r6mp ; r6mp = bhmp
% else ; x86-32
cglobal emu_edge_vfix %+ %% n , 0 , 6 , 0 , val , ds t , src , start_y , end_y , bh
mov ds tq , r0mp
mov srcq , r1mp
mov start_yq , r4mp
mov end_yq , r5mp
mov bh q , r6mp
% define dst_strideq r2mp
% define src_strideq r3mp
% endif ; x86-64/32
% else
% if ARCH_X86_64
dec r3d
% else ; ARCH_X86_32
dec dword r3m
% endif ; ARCH_X86_64/32
jnz .emuedge_extend_top_ %+ %% n %+ _loop ; } while (--start_y)
cglobal emu_edge_vfix %+ %% n , 7 , 7 , 1 , ds t , src , ds t_stride , src_stride , \
start_y , end_y , bh
% else ; x86-32
cglobal emu_edge_vfix %+ %% n , 1 , 5 , 1 , ds t , src , start_y , end_y , bh
mov srcq , r1mp
mov start_yq , r4mp
mov end_yq , r5mp
mov bh q , r6mp
% define dst_strideq r2mp
% define src_strideq r3mp
% endif ; x86-64/32
% endif
; FIXME move this to c wrapper?
sub bh q , end_yq ; bh -= end_y
sub end_yq , start_yq ; end_y -= start_y
; extend pixels above body
test start_yq , start_yq ; if (start_y) {
jz .body_loop
READ_NUM_BYTES top , %% n ; $variable_regs = read($n)
.top_loop: ; do {
WRITE_NUM_BYTES top , %% n ; write($variable_regs, $n)
add ds tq , ds t_strideq ; dst += linesize
dec start_yq ; } while (--start_y)
jnz .top_loop ; }
; copy body pixels
.emuedge_copy_body_ %+ %% n %+ _loop : ; do {
READ_NUM_BYTES body , %% n ; read bytes
WRITE_NUM_BYTES body , %% n ; write bytes
add r0 , r2 ; dst += linesize
add r1 , r2 ; src += linesize
dec r4d
jnz .emuedge_copy_body_ %+ %% n %+ _loop ; } while (--end_y)
.body_loop: ; do {
READ_NUM_BYTES body , %% n ; $variable_regs = read($n)
WRITE_NUM_BYTES body , %% n ; write($variable_regs, $n)
add ds tq , ds t_strideq ; dst += dst_strid e
add srcq , src_strideq ; src += src_strid e
dec end_yq ; } while (--end_y)
jnz .body_loop
; copy bottom pixels
test r5 , r5 ; if (!block_h)
jz .emuedge_v_extend_end_ %+ %% n ; goto end
sub r1 , r2 ; src -= linesize
READ_NUM_BYTES bottom , %% n ; read bytes
.emuedge_extend_bottom_ %+ %% n %+ _loop : ; do {
WRITE_NUM_BYTES bottom , %% n ; write bytes
add r0 , r2 ; dst += linesize
dec r5d
jnz .emuedge_extend_bottom_ %+ %% n %+ _loop ; } while (--block_h)
.emuedge_v_extend_end_ %+ %% n :
% if ARCH_X86_64
ret
% else ; ARCH_X86_32
rep ret
% endif ; ARCH_X86_64/32
test bh q , bh q ; if (block_h) {
jz .end
sub srcq , src_strideq ; src -= linesize
READ_NUM_BYTES bottom , %% n ; $variable_regs = read($n)
.bottom_loop: ; do {
WRITE_NUM_BYTES bottom , %% n ; write($variable_regs, $n)
add ds tq , ds t_strideq ; dst += linesize
dec bh q ; } while (--bh)
jnz .bottom_loop ; }
.end:
RET
% assign %%n %%n+1
% endrep
% endmacro VERTICAL_EXTEND
% endrep ; 1+%2-%1
% endmacro ; VERTICAL_EXTEND
INIT_MMX mmx
VERTICAL_EXTEND 1 , 15
% if ARCH_X86_32
VERTICAL_EXTEND 16 , 22
% endif
INIT_XMM ss e
VERTICAL_EXTEND 16 , 22
; left/right (horizontal) fast extend functions
; these are essentially identical to the vertical extend ones above,
; just left/right separated because number of pixels to extend is
; obviously not the same on both sides.
; for reading, pixels are placed in eax (x86-64) or ebx (x86-64) in the
; lowest two bytes of the register (so val*0x0101), and are splatted
; into each byte of mm0 as well if n_pixels >= 8
% macro READ_V_PIXEL 2
mov vall , % 2
mov valh , vall
% if %1 >= 8
movd mm0 , vald
% if cpuflag(mmxext)
pshufw mm0 , mm0 , 0
% else ; mmx
punpcklwd mm0 , mm0
punpckldq mm0 , mm0
% endif ; sse
% endif ; %1 >= 8
% endmacro
% macro WRITE_V_PIXEL 2
% assign %%dst_off 0
% rep %1/8
movq [ % 2 +%% ds t_off ], mm0
% assign %%dst_off %%dst_off+8
% endrep
% if %1 & 4
% if %1 == 2
movzx valw , byte % 2
imul valw , 0x0101
% else
movzx vald , byte % 2
imul vald , 0x01010101
% if %1 >= 8
movd [ % 2 +%% ds t_off ], mm0
% else ; %1 < 8
mov [ % 2 +%% ds t_off ] , valw
mov [ % 2 +%% ds t_off + 2 ], valw
% endif ; %1 >=/< 8
% assign %%dst_off %%dst_off+4
% endif ; %1 & 4
% if %1&2
mov [ % 2 +%% ds t_off ], valw
% endif ; %1 & 2
% endmacro
; r0=buf+block_h*linesize, r1=start_x, r2=linesize, r5=block_h, r6/r3=val
% macro LEFT_EXTEND 0
% assign %%n 2
% rep 11
ALIGN 64
.emuedge_extend_left_ %+ %% n : ; do {
sub r0 , r2 ; dst -= linesize
READ_V_PIXEL %% n , [ r0 + r1 ] ; read pixels
WRITE_V_PIXEL %% n , r0 ; write pixels
dec r5
jnz .emuedge_extend_left_ %+ %% n ; } while (--block_h)
% if ARCH_X86_64
ret
% else ; ARCH_X86_32
rep ret
% endif ; ARCH_X86_64/32
% assign %%n %%n+2
% endrep
% endmacro ; LEFT_EXTEND
; r3/r0=buf+block_h*linesize, r2=linesize, r8/r5=block_h, r0/r6=end_x, r6/r3=val
% macro RIGHT_EXTEND 0
% assign %%n 2
% rep 11
ALIGN 64
.emuedge_extend_right_ %+ %% n : ; do {
% if ARCH_X86_64
sub r3 , r2 ; dst -= linesize
READ_V_PIXEL %% n , [ r3 + w_reg - 1 ] ; read pixels
WRITE_V_PIXEL %% n , r3 + r4 -%% n ; write pixels
dec r8
% else ; ARCH_X86_32
sub r0 , r2 ; dst -= linesize
READ_V_PIXEL %% n , [ r0 + w_reg - 1 ] ; read pixels
WRITE_V_PIXEL %% n , r0 + r4 -%% n ; write pixels
dec r5
% endif ; ARCH_X86_64/32
jnz .emuedge_extend_right_ %+ %% n ; } while (--block_h)
% if ARCH_X86_64
ret
% else ; ARCH_X86_32
rep ret
% endif ; ARCH_X86_64/32
% assign %%n %%n+2
% endrep
% if ARCH_X86_32
% define stack_offset 0x10
% endif
% endmacro ; RIGHT_EXTEND
; below follow the "slow" copy/extend functions, these act on a non-fixed
; width specified in a register, and run a loop to copy the full amount
; of bytes. They are optimized for copying of large amounts of pixels per
; line, so they unconditionally splat data into mm registers to copy 8
; bytes per loop iteration. It could be considered to use xmm for x86-64
; also, but I haven't optimized this as much (i.e. FIXME)
% macro V_COPY_NPX 4-5
% if %0 == 4
test w_reg , % 4
jz . % 1 _skip_ % 4 _px
% else ; %0 == 5
. % 1 _ % 4 _px_loop :
movd m0 , vald
% if mmsize == 16
shufps m0 , m0 , q0000
% else
punpckldq m0 , m0
% endif
% 3 %2, [r1+cnt_reg]
% 3 [r0+cnt_reg], %2
add cnt_reg , % 4
% if %0 == 5
sub w_reg , % 4
test w_reg , % 5
jnz . % 1 _ % 4 _px_loop
% endif ; %1 >= 8
% endif
. % 1 _skip_ % 4 _px :
% endmacro
% endmacro ; READ_V_PIXEL
% macro V_COPY_ROW 2
% ifidn %1, bottom
sub r1 , linesize
% macro WRITE_V_PIXEL 2
% assign %%off 0
% rep %1/mmsize
movu [ % 2 +%% off ], m0
% assign %%off %%off+mmsize
% endrep ; %1/mmsize
% if mmsize == 16
% if %1-%%off >= 8
% if %1 > 16 && %1-%%off > 8
movu [ % 2 +% 1 - 16 ], m0
% assign %%off %1
% else
movq [ % 2 +%% off ], m0
% assign %%off %%off+8
% endif
. % 1 _copy_loop :
xor cnt_reg , cnt_reg
% if notcpuflag(sse)
% define linesize r2m
V_COPY_NPX % 1 , mm0 , movq , 8 , 0xFFFFFFF8
% else ; sse
V_COPY_NPX % 1 , xmm0 , movups , 16 , 0xFFFFFFF0
% if ARCH_X86_64
% define linesize r2
V_COPY_NPX % 1 , rax , mov , 8
% else ; ARCH_X86_32
% define linesize r2m
V_COPY_NPX % 1 , mm0 , movq , 8
% endif ; ARCH_X86_64/32
% endif ; sse
V_COPY_NPX % 1 , vald , mov , 4
V_COPY_NPX % 1 , valw , mov , 2
V_COPY_NPX % 1 , vall , mov , 1
mov w_reg , cnt_reg
% ifidn %1, body
add r1 , linesize
% endif ; %1-%%off >= 8
% endif
add r0 , linesize
dec % 2
jnz . % 1 _copy_loop
% endmacro
% macro SLOW_V_EXTEND 0
.slow_v_extend_loop:
; r0=buf,r1=src,r2(64)/r2m(32)=linesize,r3(64)/r3m(32)=start_x,r4=end_y,r5=block_h
; r8(64)/r3(later-64)/r2(32)=cnt_reg,r6(64)/r3(32)=val_reg,r7(64)/r6(32)=w=end_x-start_x
% if ARCH_X86_64
push r8 ; save old value of block_h
test r3 , r3
% define cnt_reg r8
jz .do_body_copy ; if (!start_y) goto do_body_copy
V_COPY_ROW top , r3
% if %1-%%off >= 4
% if %1 > 8 %% %1-%%off > 4
movq [ % 2 +% 1 - 8 ], m0
% assign %%off %1
% elif %1 >= 8 && %1-%%off >= 4
movd [ % 2 +%% off ], m0
% assign %%off %%off+4
% else
cmp dword r3m , 0
% define cnt_reg r2
je .do_body_copy ; if (!start_y) goto do_body_copy
V_COPY_ROW top , dword r3m
mov [ % 2 +%% off ], vald
% assign %%off %%off+4
% endif
% endif ; %1-%%off >= 4
.do_body_copy:
V_COPY_ROW body , r4
% if ARCH_X86_64
pop r8 ; restore old value of block_h
% define cnt_reg r3
% endif
test r5 , r5
% if ARCH_X86_64
jz .v_extend_end
% if %1-%%off >= 2
% if %1 >= 8
movd [ % 2 +% 1 - 4 ], m0
% else
jz .skip_bottom_extend
mov [ % 2 +%% off ], valw
% endif
V_COPY_ROW bottom , r5
% if ARCH_X86_32
.skip_bottom_extend:
mov r2 , r2m
% endif
jmp .v_extend_end
% endmacro
% endif ; (%1-%%off)/2
% endmacro ; WRITE_V_PIXEL
% macro H_EXTEND 2
% assign %%n %1
% rep 1+(%2-%1)/2
cglobal emu_edge_hfix %+ %% n , 4 , 5 , 1 , ds t , ds t_stride , start_x , bh , val
.loop_y: ; do {
READ_V_PIXEL %% n , [ ds tq + start_xq ] ; $variable_regs = read($n)
WRITE_V_PIXEL %% n , ds tq ; write($variable_regs, $n)
add ds tq , ds t_strideq ; dst += dst_stride
dec bh q ; } while (--bh)
jnz .loop_y
RET
% assign %%n %%n+2
% endrep ; 1+(%2-%1)/2
% endmacro ; H_EXTEND
% macro SLOW_LEFT_EXTEND 0
.slow_left_extend_loop:
; r0=buf+block_h*linesize,r2=linesize,r6(64)/r3(32)=val,r5=block_h,r4=cntr,r7/r6=start_x
mov r4 , 8
sub r0 , linesize
READ_V_PIXEL 8 , [ r0 + w_reg ]
.left_extend_8px_loop:
movq [ r0 + r4 - 8 ], mm0
add r4 , 8
cmp r4 , w_reg
jle .left_extend_8px_loop
sub r4 , 8
cmp r4 , w_reg
jge .left_extend_loop_end
.left_extend_2px_loop:
mov [ r0 + r4 ], valw
add r4 , 2
cmp r4 , w_reg
jl .left_extend_2px_loop
.left_extend_loop_end:
dec r5
jnz .slow_left_extend_loop
INIT_MMX mmx
H_EXTEND 2 , 14
% if ARCH_X86_32
mov r2 , r2m
% endif
jmp .right_extend
% endmacro
% macro SLOW_RIGHT_EXTEND 0
.slow_right_extend_loop:
; r3(64)/r0(32)=buf+block_h*linesize,r2=linesize,r4=block_w,r8(64)/r5(32)=block_h,
; r7(64)/r6(32)=end_x,r6/r3=val,r1=cntr
% if ARCH_X86_64
% define buf_reg r3
% define bh_reg r8
% else
% define buf_reg r0
% define bh_reg r5
H_EXTEND 16 , 22
% endif
lea r1 , [ r4 - 8 ]
sub buf_reg , linesize
READ_V_PIXEL 8 , [ buf_reg + w_reg - 1 ]
.right_extend_8px_loop:
movq [ buf_reg + r1 ], mm0
sub r1 , 8
cmp r1 , w_reg
jge .right_extend_8px_loop
add r1 , 8
cmp r1 , w_reg
je .right_extend_loop_end
.right_extend_2px_loop:
sub r1 , 2
mov [ buf_reg + r1 ], valw
cmp r1 , w_reg
jg .right_extend_2px_loop
.right_extend_loop_end:
dec bh _reg
jnz .slow_right_extend_loop
jmp .h_extend_end
% endmacro
% macro emu_edge 1
INIT_XMM % 1
EMU_EDGE_FUNC
VERTICAL_EXTEND
LEFT_EXTEND
RIGHT_EXTEND
SLOW_V_EXTEND
SLOW_LEFT_EXTEND
SLOW_RIGHT_EXTEND
% endmacro
emu_edge ss e
% if ARCH_X86_32
emu_edge mmx
% endif
INIT_XMM ss e
H_EXTEND 16 , 22
% macro PREFETCH_FN 1
cglobal prefetch , 3 , 3 , 0 , buf , stride , h