|
|
@ -58,7 +58,7 @@ static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, |
|
|
|
*(uint16_t*)(p + 2*stride)= v; |
|
|
|
*(uint16_t*)(p + 2*stride)= v; |
|
|
|
*(uint16_t*)(p + 3*stride)= v; |
|
|
|
*(uint16_t*)(p + 3*stride)= v; |
|
|
|
}else if(w==4){ |
|
|
|
}else if(w==4){ |
|
|
|
const uint32_t v= size==4 ? val : val*0x01010101; |
|
|
|
const uint32_t v= size==4 ? val : size==2 ? val*0x00010001 : val*0x01010101; |
|
|
|
*(uint32_t*)(p + 0*stride)= v; |
|
|
|
*(uint32_t*)(p + 0*stride)= v; |
|
|
|
if(h==1) return; |
|
|
|
if(h==1) return; |
|
|
|
*(uint32_t*)(p + 1*stride)= v; |
|
|
|
*(uint32_t*)(p + 1*stride)= v; |
|
|
@ -68,7 +68,7 @@ static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, |
|
|
|
}else if(w==8){ |
|
|
|
}else if(w==8){ |
|
|
|
//gcc can't optimize 64bit math on x86_32
|
|
|
|
//gcc can't optimize 64bit math on x86_32
|
|
|
|
#if HAVE_FAST_64BIT |
|
|
|
#if HAVE_FAST_64BIT |
|
|
|
const uint64_t v= val*0x0100000001ULL; |
|
|
|
const uint64_t v= size==2 ? val*0x0001000100010001ULL : val*0x0100000001ULL; |
|
|
|
*(uint64_t*)(p + 0*stride)= v; |
|
|
|
*(uint64_t*)(p + 0*stride)= v; |
|
|
|
if(h==1) return; |
|
|
|
if(h==1) return; |
|
|
|
*(uint64_t*)(p + 1*stride)= v; |
|
|
|
*(uint64_t*)(p + 1*stride)= v; |
|
|
@ -87,34 +87,35 @@ static av_always_inline void fill_rectangle(void *vp, int w, int h, int stride, |
|
|
|
*(uint64_t*)(p + 0+3*stride)= v; |
|
|
|
*(uint64_t*)(p + 0+3*stride)= v; |
|
|
|
*(uint64_t*)(p + 8+3*stride)= v; |
|
|
|
*(uint64_t*)(p + 8+3*stride)= v; |
|
|
|
#else |
|
|
|
#else |
|
|
|
*(uint32_t*)(p + 0+0*stride)= val; |
|
|
|
const uint32_t v= size==2 ? val*0x00010001 : val*0x01010101; |
|
|
|
*(uint32_t*)(p + 4+0*stride)= val; |
|
|
|
*(uint32_t*)(p + 0+0*stride)= v; |
|
|
|
|
|
|
|
*(uint32_t*)(p + 4+0*stride)= v; |
|
|
|
if(h==1) return; |
|
|
|
if(h==1) return; |
|
|
|
*(uint32_t*)(p + 0+1*stride)= val; |
|
|
|
*(uint32_t*)(p + 0+1*stride)= v; |
|
|
|
*(uint32_t*)(p + 4+1*stride)= val; |
|
|
|
*(uint32_t*)(p + 4+1*stride)= v; |
|
|
|
if(h==2) return; |
|
|
|
if(h==2) return; |
|
|
|
*(uint32_t*)(p + 0+2*stride)= val; |
|
|
|
*(uint32_t*)(p + 0+2*stride)= v; |
|
|
|
*(uint32_t*)(p + 4+2*stride)= val; |
|
|
|
*(uint32_t*)(p + 4+2*stride)= v; |
|
|
|
*(uint32_t*)(p + 0+3*stride)= val; |
|
|
|
*(uint32_t*)(p + 0+3*stride)= v; |
|
|
|
*(uint32_t*)(p + 4+3*stride)= val; |
|
|
|
*(uint32_t*)(p + 4+3*stride)= v; |
|
|
|
}else if(w==16){ |
|
|
|
}else if(w==16){ |
|
|
|
*(uint32_t*)(p + 0+0*stride)= val; |
|
|
|
*(uint32_t*)(p + 0+0*stride)= v; |
|
|
|
*(uint32_t*)(p + 4+0*stride)= val; |
|
|
|
*(uint32_t*)(p + 4+0*stride)= v; |
|
|
|
*(uint32_t*)(p + 8+0*stride)= val; |
|
|
|
*(uint32_t*)(p + 8+0*stride)= v; |
|
|
|
*(uint32_t*)(p +12+0*stride)= val; |
|
|
|
*(uint32_t*)(p +12+0*stride)= v; |
|
|
|
*(uint32_t*)(p + 0+1*stride)= val; |
|
|
|
*(uint32_t*)(p + 0+1*stride)= v; |
|
|
|
*(uint32_t*)(p + 4+1*stride)= val; |
|
|
|
*(uint32_t*)(p + 4+1*stride)= v; |
|
|
|
*(uint32_t*)(p + 8+1*stride)= val; |
|
|
|
*(uint32_t*)(p + 8+1*stride)= v; |
|
|
|
*(uint32_t*)(p +12+1*stride)= val; |
|
|
|
*(uint32_t*)(p +12+1*stride)= v; |
|
|
|
if(h==2) return; |
|
|
|
if(h==2) return; |
|
|
|
*(uint32_t*)(p + 0+2*stride)= val; |
|
|
|
*(uint32_t*)(p + 0+2*stride)= v; |
|
|
|
*(uint32_t*)(p + 4+2*stride)= val; |
|
|
|
*(uint32_t*)(p + 4+2*stride)= v; |
|
|
|
*(uint32_t*)(p + 8+2*stride)= val; |
|
|
|
*(uint32_t*)(p + 8+2*stride)= v; |
|
|
|
*(uint32_t*)(p +12+2*stride)= val; |
|
|
|
*(uint32_t*)(p +12+2*stride)= v; |
|
|
|
*(uint32_t*)(p + 0+3*stride)= val; |
|
|
|
*(uint32_t*)(p + 0+3*stride)= v; |
|
|
|
*(uint32_t*)(p + 4+3*stride)= val; |
|
|
|
*(uint32_t*)(p + 4+3*stride)= v; |
|
|
|
*(uint32_t*)(p + 8+3*stride)= val; |
|
|
|
*(uint32_t*)(p + 8+3*stride)= v; |
|
|
|
*(uint32_t*)(p +12+3*stride)= val; |
|
|
|
*(uint32_t*)(p +12+3*stride)= v; |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
}else |
|
|
|
}else |
|
|
|
assert(0); |
|
|
|
assert(0); |
|
|
|