|
|
|
@ -291,11 +291,17 @@ void palette8torgb15(const uint8_t *src, uint8_t *dst, unsigned num_pixels, cons |
|
|
|
|
} |
|
|
|
|
/**
|
|
|
|
|
* |
|
|
|
|
* num_pixels must be a multiple of 16 for the MMX version |
|
|
|
|
* width must be a multiple of 16 for the MMX version |
|
|
|
|
*/ |
|
|
|
|
void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, unsigned num_pixels) |
|
|
|
|
void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, |
|
|
|
|
int width, int height, int lumStride, int chromStride, int dstStride) |
|
|
|
|
{ |
|
|
|
|
int y; |
|
|
|
|
const int chromWidth= width>>1; |
|
|
|
|
for(y=0; y<height; y++) |
|
|
|
|
{ |
|
|
|
|
#ifdef HAVE_MMX |
|
|
|
|
//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
|
|
|
|
|
asm volatile( |
|
|
|
|
"xorl %%eax, %%eax \n\t" |
|
|
|
|
"1: \n\t" |
|
|
|
@ -325,22 +331,31 @@ void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, u |
|
|
|
|
"addl $8, %%eax \n\t" |
|
|
|
|
"cmpl %4, %%eax \n\t" |
|
|
|
|
" jb 1b \n\t" |
|
|
|
|
EMMS" \n\t" |
|
|
|
|
SFENCE |
|
|
|
|
::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "r" (num_pixels>>1) |
|
|
|
|
: "memory", "%eax" |
|
|
|
|
::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "r" (chromWidth) |
|
|
|
|
: "%eax" |
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
#else |
|
|
|
|
int i; |
|
|
|
|
num_pixels>>=1; |
|
|
|
|
for(i=0; i<num_pixels; i++) |
|
|
|
|
for(i=0; i<chromWidth; i++) |
|
|
|
|
{ |
|
|
|
|
dst[4*i+0] = ysrc[2*i+0]; |
|
|
|
|
dst[4*i+1] = usrc[i]; |
|
|
|
|
dst[4*i+2] = ysrc[2*i+1]; |
|
|
|
|
dst[4*i+3] = vsrc[i]; |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
if(y&1) |
|
|
|
|
{ |
|
|
|
|
usrc += chromStride; |
|
|
|
|
vsrc += chromStride; |
|
|
|
|
} |
|
|
|
|
ysrc += lumStride; |
|
|
|
|
dst += dstStride; |
|
|
|
|
} |
|
|
|
|
#ifdef HAVE_MMX |
|
|
|
|
asm( EMMS" \n\t" |
|
|
|
|
SFENCE" \n\t" |
|
|
|
|
:::"memory"); |
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|