Cosmetics: whitespaces

Originally committed as revision 25778 to svn://svn.mplayerhq.hu/mplayer/trunk/libswscale
pull/126/head
Benoit Fouet 17 years ago
parent e5091488a5
commit 30c48a0af4
  1. 74
      libswscale/rgb2rgb.c
  2. 8
      libswscale/rgb2rgb_template.c
  3. 10
      libswscale/swscale.c
  4. 28
      libswscale/swscale_altivec_template.c
  5. 16
      libswscale/yuv2rgb_bfin.c
  6. 2
      libswscale/yuv2rgb_vis.c

@ -35,18 +35,18 @@
#define FAST_BGR2YV12 // use 7 bit coeffs instead of 15bit
void (*rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb24to16)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb24to15)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb32to16)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb32to15)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb15to24)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb15to32)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb16to24)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb16to32)(const uint8_t *src,uint8_t *dst,long src_size);
void (*rgb24to32)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb32to24)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb15to24)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb16to24)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size);
//void (*rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
@ -231,20 +231,20 @@ void palette8torgb32(const uint8_t *src, uint8_t *dst, long num_pixels, const ui
/*
for (i=0; i<num_pixels; i++)
((unsigned *)dst)[i] = ((unsigned *)palette)[ src[i] ];
((unsigned *)dst)[i] = ((unsigned *)palette)[src[i]];
*/
for (i=0; i<num_pixels; i++)
{
#ifdef WORDS_BIGENDIAN
dst[3]= palette[ src[i]*4+2 ];
dst[2]= palette[ src[i]*4+1 ];
dst[1]= palette[ src[i]*4+0 ];
dst[3]= palette[src[i]*4+2];
dst[2]= palette[src[i]*4+1];
dst[1]= palette[src[i]*4+0];
#else
//FIXME slow?
dst[0]= palette[ src[i]*4+2 ];
dst[1]= palette[ src[i]*4+1 ];
dst[2]= palette[ src[i]*4+0 ];
dst[0]= palette[src[i]*4+2];
dst[1]= palette[src[i]*4+1];
dst[2]= palette[src[i]*4+0];
//dst[3]= 0; /* do we need this cleansing? */
#endif
dst+= 4;
@ -257,14 +257,14 @@ void palette8tobgr32(const uint8_t *src, uint8_t *dst, long num_pixels, const ui
for (i=0; i<num_pixels; i++)
{
#ifdef WORDS_BIGENDIAN
dst[3]= palette[ src[i]*4+0 ];
dst[2]= palette[ src[i]*4+1 ];
dst[1]= palette[ src[i]*4+2 ];
dst[3]= palette[src[i]*4+0];
dst[2]= palette[src[i]*4+1];
dst[1]= palette[src[i]*4+2];
#else
//FIXME slow?
dst[0]= palette[ src[i]*4+0 ];
dst[1]= palette[ src[i]*4+1 ];
dst[2]= palette[ src[i]*4+2 ];
dst[0]= palette[src[i]*4+0];
dst[1]= palette[src[i]*4+1];
dst[2]= palette[src[i]*4+2];
//dst[3]= 0; /* do we need this cleansing? */
#endif
@ -281,14 +281,14 @@ void palette8torgb24(const uint8_t *src, uint8_t *dst, long num_pixels, const ui
/*
writes 1 byte o much and might cause alignment issues on some architectures?
for (i=0; i<num_pixels; i++)
((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[src[i]];
*/
for (i=0; i<num_pixels; i++)
{
//FIXME slow?
dst[0]= palette[ src[i]*4+2 ];
dst[1]= palette[ src[i]*4+1 ];
dst[2]= palette[ src[i]*4+0 ];
dst[0]= palette[src[i]*4+2];
dst[1]= palette[src[i]*4+1];
dst[2]= palette[src[i]*4+0];
dst+= 3;
}
}
@ -299,14 +299,14 @@ void palette8tobgr24(const uint8_t *src, uint8_t *dst, long num_pixels, const ui
/*
writes 1 byte o much and might cause alignment issues on some architectures?
for (i=0; i<num_pixels; i++)
((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[src[i]];
*/
for (i=0; i<num_pixels; i++)
{
//FIXME slow?
dst[0]= palette[ src[i]*4+0 ];
dst[1]= palette[ src[i]*4+1 ];
dst[2]= palette[ src[i]*4+2 ];
dst[0]= palette[src[i]*4+0];
dst[1]= palette[src[i]*4+1];
dst[2]= palette[src[i]*4+2];
dst+= 3;
}
}
@ -318,13 +318,13 @@ void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const ui
{
long i;
for (i=0; i<num_pixels; i++)
((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
((uint16_t *)dst)[i] = ((uint16_t *)palette)[src[i]];
}
void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
long i;
for (i=0; i<num_pixels; i++)
((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[ src[i] ]);
((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[src[i]]);
}
/**
@ -334,13 +334,13 @@ void palette8torgb15(const uint8_t *src, uint8_t *dst, long num_pixels, const ui
{
long i;
for (i=0; i<num_pixels; i++)
((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
((uint16_t *)dst)[i] = ((uint16_t *)palette)[src[i]];
}
void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
long i;
for (i=0; i<num_pixels; i++)
((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[ src[i] ]);
((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[src[i]]);
}
void rgb32tobgr24(const uint8_t *src, uint8_t *dst, long src_size)

@ -82,7 +82,7 @@
#define SFENCE " # nop"
#endif
static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size)
static inline void RENAME(rgb24to32)(const uint8_t *src, uint8_t *dst, long src_size)
{
uint8_t *dest = dst;
const uint8_t *s = src;
@ -142,7 +142,7 @@ static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,long src_si
}
}
static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size)
static inline void RENAME(rgb32to24)(const uint8_t *src, uint8_t *dst, long src_size)
{
uint8_t *dest = dst;
const uint8_t *s = src;
@ -234,7 +234,7 @@ static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,long src_si
MMX2, 3DNOW optimization by Nick Kurshev
32 bit C version, and and&add trick by Michael Niedermayer
*/
static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size)
static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size)
{
register const uint8_t* s=src;
register uint8_t* d=dst;
@ -283,7 +283,7 @@ static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,long src_si
}
}
static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size)
static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size)
{
register const uint8_t* s=src;
register uint8_t* d=dst;

@ -1597,9 +1597,9 @@ static int PlanarToNV12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], i
}
dst = dstParam[1] + dstStride[1]*srcSliceY/2;
if (c->dstFormat == PIX_FMT_NV12)
interleaveBytes(src[1],src[2],dst,c->srcW/2,srcSliceH/2,srcStride[1],srcStride[2],dstStride[0]);
interleaveBytes(src[1], src[2], dst, c->srcW/2, srcSliceH/2, srcStride[1], srcStride[2], dstStride[0]);
else
interleaveBytes(src[2],src[1],dst,c->srcW/2,srcSliceH/2,srcStride[2],srcStride[1],dstStride[0]);
interleaveBytes(src[2], src[1], dst, c->srcW/2, srcSliceH/2, srcStride[2], srcStride[1], dstStride[0]);
return srcSliceH;
}
@ -1608,7 +1608,7 @@ static int PlanarToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], i
int srcSliceH, uint8_t* dstParam[], int dstStride[]){
uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
yv12toyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
return srcSliceH;
}
@ -1617,7 +1617,7 @@ static int PlanarToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], i
int srcSliceH, uint8_t* dstParam[], int dstStride[]){
uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
yv12touyvy(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
return srcSliceH;
}
@ -2489,7 +2489,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int srcFormat, int dstW, int dstH
{
if (c->vLumFilterSize==1 && c->vChrFilterSize==2)
av_log(c, AV_LOG_VERBOSE, "SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n"
"SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",(flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
"SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
else if (c->vLumFilterSize==2 && c->vChrFilterSize==2)
av_log(c, AV_LOG_VERBOSE, "SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
else

@ -27,7 +27,7 @@ static inline void
altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) {
register int i;
vector unsigned int altivec_vectorShiftInt19 =
vec_add(vec_splat_u32(10),vec_splat_u32(9));
vec_add(vec_splat_u32(10), vec_splat_u32(9));
if ((unsigned long)dest % 16) {
/* badly aligned store, we force store alignment */
/* and will handle load misalignment on val w/ vec_perm */
@ -46,10 +46,10 @@ altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) {
vector signed int v3 = vec_ld(offset + 32, val);
vector signed int v4 = vec_ld(offset + 48, val);
vector signed int v5 = vec_ld(offset + 64, val);
vector signed int v12 = vec_perm(v1,v2,perm1);
vector signed int v23 = vec_perm(v2,v3,perm1);
vector signed int v34 = vec_perm(v3,v4,perm1);
vector signed int v45 = vec_perm(v4,v5,perm1);
vector signed int v12 = vec_perm(v1, v2, perm1);
vector signed int v23 = vec_perm(v2, v3, perm1);
vector signed int v34 = vec_perm(v3, v4, perm1);
vector signed int v45 = vec_perm(v4, v5, perm1);
vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19);
vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19);
@ -137,7 +137,7 @@ yuv2yuvX_altivec_real(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
val[i] += lumSrc[j][i] * lumFilter[j];
}
}
altivec_packIntArrayToCharArray(val,dest,dstW);
altivec_packIntArrayToCharArray(val, dest, dstW);
}
if (uDest != 0) {
int __attribute__ ((aligned (16))) u[chrDstW];
@ -203,8 +203,8 @@ yuv2yuvX_altivec_real(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
v[i] += chrSrc[j][i + 2048] * chrFilter[j];
}
}
altivec_packIntArrayToCharArray(u,uDest,chrDstW);
altivec_packIntArrayToCharArray(v,vDest,chrDstW);
altivec_packIntArrayToCharArray(u, uDest, chrDstW);
altivec_packIntArrayToCharArray(v, vDest, chrDstW);
}
}
@ -252,9 +252,9 @@ static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int
// and we're going to use vec_mule, so we chose
// carefully how to "unpack" the elements into the even slots
if ((i << 3) % 16)
filter_v = vec_mergel(filter_v,(vector signed short)vzero);
filter_v = vec_mergel(filter_v, (vector signed short)vzero);
else
filter_v = vec_mergeh(filter_v,(vector signed short)vzero);
filter_v = vec_mergeh(filter_v, (vector signed short)vzero);
val_vEven = vec_mule(src_v, filter_v);
val_s = vec_sums(val_vEven, vzero);
@ -387,7 +387,7 @@ static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int
static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
// yv12toyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
// yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
uint8_t *ysrc = src[0];
uint8_t *usrc = src[1];
uint8_t *vsrc = src[2];
@ -401,7 +401,7 @@ static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int
register unsigned int y;
if (width&15) {
yv12toyuy2(ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride);
yv12toyuy2(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride);
return srcSliceH;
}
@ -464,7 +464,7 @@ static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int
static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
// yv12toyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
// yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
uint8_t *ysrc = src[0];
uint8_t *usrc = src[1];
uint8_t *vsrc = src[2];
@ -478,7 +478,7 @@ static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int
register unsigned int y;
if (width&15) {
yv12touyvy(ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride);
yv12touyvy(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride);
return srcSliceH;
}

@ -110,12 +110,12 @@ static int core_yuv420_rgb (SwsContext *c,
for (i=0;i<h2;i++) {
lcscf (py,pu,pv,op,w,&c->oy);
lcscf (py, pu, pv, op, w, &c->oy);
py += instrides[0];
op += outstrides[0];
lcscf (py,pu,pv,op,w,&c->oy);
lcscf (py, pu, pv, op, w, &c->oy);
py += instrides[0];
pu += instrides[1];
@ -132,7 +132,7 @@ static int bfin_yuv420_rgb555 (SwsContext *c,
int srcSliceY, int srcSliceH,
uint8_t **oplanes, int *outstrides)
{
return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides,
return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
ff_bfin_yuv2rgb555_line, 1, 555);
}
@ -141,7 +141,7 @@ static int bfin_yuv420_bgr555 (SwsContext *c,
int srcSliceY, int srcSliceH,
uint8_t **oplanes, int *outstrides)
{
return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides,
return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
ff_bfin_yuv2rgb555_line, 0, 555);
}
@ -150,7 +150,7 @@ static int bfin_yuv420_rgb24 (SwsContext *c,
int srcSliceY, int srcSliceH,
uint8_t **oplanes, int *outstrides)
{
return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides,
return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
ff_bfin_yuv2rgb24_line, 1, 888);
}
@ -159,7 +159,7 @@ static int bfin_yuv420_bgr24 (SwsContext *c,
int srcSliceY, int srcSliceH,
uint8_t **oplanes, int *outstrides)
{
return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides,
return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
ff_bfin_yuv2rgb24_line, 0, 888);
}
@ -168,7 +168,7 @@ static int bfin_yuv420_rgb565 (SwsContext *c,
int srcSliceY, int srcSliceH,
uint8_t **oplanes, int *outstrides)
{
return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides,
return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
ff_bfin_yuv2rgb565_line, 1, 565);
}
@ -177,7 +177,7 @@ static int bfin_yuv420_bgr565 (SwsContext *c,
int srcSliceY, int srcSliceH,
uint8_t **oplanes, int *outstrides)
{
return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides,
return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
ff_bfin_yuv2rgb565_line, 0, 565);
}

@ -188,7 +188,7 @@ SwsFunc yuv2rgb_init_vis(SwsContext *c) {
c->sparc_coeffs[8]=c->ubCoeff;
c->sparc_coeffs[9]=c->ugCoeff;
c->sparc_coeffs[0]=(((int16_t)c->yOffset*(int16_t)c->yCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
c->sparc_coeffs[0]=(((int16_t)c->yOffset*(int16_t)c->yCoeff >>11) & 0xffff) * 0x0001000100010001ULL;
c->sparc_coeffs[1]=(((int16_t)c->uOffset*(int16_t)c->ubCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
c->sparc_coeffs[2]=(((int16_t)c->uOffset*(int16_t)c->ugCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
c->sparc_coeffs[3]=(((int16_t)c->vOffset*(int16_t)c->vgCoeff>>11) & 0xffff) * 0x0001000100010001ULL;

Loading…
Cancel
Save