|
|
|
@ -51,6 +51,11 @@ |
|
|
|
|
#endif |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
#ifdef ARITHM_ADD |
|
|
|
|
#define ARITHM_OP(A,B) ((A)+(B)) |
|
|
|
|
#elif defined ARITHM_SUB |
|
|
|
|
#define ARITHM_OP(A,B) ((A)-(B)) |
|
|
|
|
#endif |
|
|
|
|
/**************************************add with scalar with mask**************************************/ |
|
|
|
|
__kernel void arithm_s_add_with_mask_C1_D0 (__global uchar *src1, int src1_step, int src1_offset, |
|
|
|
|
__global uchar *dst, int dst_step, int dst_offset, |
|
|
|
@ -94,7 +99,7 @@ __kernel void arithm_s_add_with_mask_C1_D0 (__global uchar *src1, int src1_ste |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
uchar4 data = *((__global uchar4 *)(dst + dst_index)); |
|
|
|
|
int4 tmp = convert_int4_sat(src1_data) + src2_data; |
|
|
|
|
int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), src2_data); |
|
|
|
|
uchar4 tmp_data = convert_uchar4_sat(tmp); |
|
|
|
|
|
|
|
|
|
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : data.x; |
|
|
|
@ -134,7 +139,7 @@ __kernel void arithm_s_add_with_mask_C1_D2 (__global ushort *src1, int src1_st |
|
|
|
|
uchar2 mask_data = vload2(0, mask + mask_index); |
|
|
|
|
|
|
|
|
|
ushort2 data = *((__global ushort2 *)((__global uchar *)dst + dst_index)); |
|
|
|
|
int2 tmp = convert_int2_sat(src1_data) + src2_data; |
|
|
|
|
int2 tmp = ARITHM_OP(convert_int2_sat(src1_data), src2_data); |
|
|
|
|
ushort2 tmp_data = convert_ushort2_sat(tmp); |
|
|
|
|
|
|
|
|
|
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.x : data.x; |
|
|
|
@ -172,7 +177,7 @@ __kernel void arithm_s_add_with_mask_C1_D3 (__global short *src1, int src1_ste |
|
|
|
|
uchar2 mask_data = vload2(0, mask + mask_index); |
|
|
|
|
|
|
|
|
|
short2 data = *((__global short2 *)((__global uchar *)dst + dst_index)); |
|
|
|
|
int2 tmp = convert_int2_sat(src1_data) + src2_data; |
|
|
|
|
int2 tmp = ARITHM_OP(convert_int2_sat(src1_data), src2_data); |
|
|
|
|
short2 tmp_data = convert_short2_sat(tmp); |
|
|
|
|
|
|
|
|
|
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.x : data.x; |
|
|
|
@ -202,7 +207,7 @@ __kernel void arithm_s_add_with_mask_C1_D4 (__global int *src1, int src1_ste |
|
|
|
|
int src_data2 = src2.x; |
|
|
|
|
int dst_data = *((__global int *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
int data = convert_int_sat((long)src_data1 + (long)src_data2); |
|
|
|
|
int data = convert_int_sat(ARITHM_OP((long)src_data1, (long)src_data2)); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global int *)((__global char *)dst + dst_index)) = data; |
|
|
|
@ -230,7 +235,7 @@ __kernel void arithm_s_add_with_mask_C1_D5 (__global float *src1, int src1_s |
|
|
|
|
float src_data2 = src2.x; |
|
|
|
|
float dst_data = *((__global float *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
float data = src_data1 + src_data2; |
|
|
|
|
float data = ARITHM_OP(src_data1, src_data2); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global float *)((__global char *)dst + dst_index)) = data; |
|
|
|
@ -260,7 +265,7 @@ __kernel void arithm_s_add_with_mask_C1_D6 (__global double *src1, int src1_ |
|
|
|
|
double src_data2 = src2.x; |
|
|
|
|
double dst_data = *((__global double *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
double data = src_data1 + src_data2; |
|
|
|
|
double data = ARITHM_OP(src_data1, src_data2); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global double *)((__global char *)dst + dst_index)) = data; |
|
|
|
@ -296,7 +301,7 @@ __kernel void arithm_s_add_with_mask_C2_D0 (__global uchar *src1, int src1_ste |
|
|
|
|
uchar2 mask_data = vload2(0, mask + mask_index); |
|
|
|
|
|
|
|
|
|
uchar4 data = *((__global uchar4 *)(dst + dst_index)); |
|
|
|
|
int4 tmp = convert_int4_sat(src1_data) + src2_data; |
|
|
|
|
int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), src2_data); |
|
|
|
|
uchar4 tmp_data = convert_uchar4_sat(tmp); |
|
|
|
|
|
|
|
|
|
data.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.xy : data.xy; |
|
|
|
@ -326,7 +331,7 @@ __kernel void arithm_s_add_with_mask_C2_D2 (__global ushort *src1, int src1_st |
|
|
|
|
int2 src_data2 = (int2)(src2.x, src2.y); |
|
|
|
|
ushort2 dst_data = *((__global ushort2 *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
int2 tmp = convert_int2_sat(src_data1) + src_data2; |
|
|
|
|
int2 tmp = ARITHM_OP(convert_int2_sat(src_data1), src_data2); |
|
|
|
|
ushort2 data = convert_ushort2_sat(tmp); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
@ -354,7 +359,7 @@ __kernel void arithm_s_add_with_mask_C2_D3 (__global short *src1, int src1_ste |
|
|
|
|
int2 src_data2 = (int2)(src2.x, src2.y); |
|
|
|
|
short2 dst_data = *((__global short2 *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
int2 tmp = convert_int2_sat(src_data1) + src_data2; |
|
|
|
|
int2 tmp = ARITHM_OP(convert_int2_sat(src_data1), src_data2); |
|
|
|
|
short2 data = convert_short2_sat(tmp); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
@ -382,7 +387,7 @@ __kernel void arithm_s_add_with_mask_C2_D4 (__global int *src1, int src1_step, |
|
|
|
|
int2 src_data2 = (int2)(src2.x, src2.y); |
|
|
|
|
int2 dst_data = *((__global int2 *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
int2 data = convert_int2_sat(convert_long2_sat(src_data1) + convert_long2_sat(src_data2)); |
|
|
|
|
int2 data = convert_int2_sat(ARITHM_OP(convert_long2_sat(src_data1), convert_long2_sat(src_data2))); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global int2 *)((__global char *)dst + dst_index)) = data; |
|
|
|
@ -409,7 +414,7 @@ __kernel void arithm_s_add_with_mask_C2_D5 (__global float *src1, int src1_ste |
|
|
|
|
float2 src_data2 = (float2)(src2.x, src2.y); |
|
|
|
|
float2 dst_data = *((__global float2 *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
float2 data = src_data1 + src_data2; |
|
|
|
|
float2 data = ARITHM_OP(src_data1, src_data2); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global float2 *)((__global char *)dst + dst_index)) = data; |
|
|
|
@ -438,7 +443,7 @@ __kernel void arithm_s_add_with_mask_C2_D6 (__global double *src1, int src1_st |
|
|
|
|
double2 src_data2 = (double2)(src2.x, src2.y); |
|
|
|
|
double2 dst_data = *((__global double2 *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
double2 data = src_data1 + src_data2; |
|
|
|
|
double2 data = ARITHM_OP(src_data1, src_data2); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global double2 *)((__global char *)dst + dst_index)) = data; |
|
|
|
@ -446,317 +451,11 @@ __kernel void arithm_s_add_with_mask_C2_D6 (__global double *src1, int src1_st |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
__kernel void arithm_s_add_with_mask_C3_D0 (__global uchar *src1, int src1_step, int src1_offset, |
|
|
|
|
__global uchar *dst, int dst_step, int dst_offset, |
|
|
|
|
__global uchar *mask, int mask_step, int mask_offset, |
|
|
|
|
int4 src2, int rows, int cols, int dst_step1) |
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
int x = get_global_id(0); |
|
|
|
|
int y = get_global_id(1); |
|
|
|
|
|
|
|
|
|
if (x < cols && y < rows) |
|
|
|
|
{ |
|
|
|
|
x = x << 2; |
|
|
|
|
|
|
|
|
|
#ifdef dst_align |
|
|
|
|
#undef dst_align |
|
|
|
|
#endif |
|
|
|
|
#define dst_align (((dst_offset % dst_step) / 3 ) & 3) |
|
|
|
|
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3)); |
|
|
|
|
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align); |
|
|
|
|
|
|
|
|
|
int dst_start = mad24(y, dst_step, dst_offset); |
|
|
|
|
int dst_end = mad24(y, dst_step, dst_offset + dst_step1); |
|
|
|
|
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3)); |
|
|
|
|
|
|
|
|
|
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0); |
|
|
|
|
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4); |
|
|
|
|
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8); |
|
|
|
|
|
|
|
|
|
int4 src2_data_0 = (int4)(src2.x, src2.y, src2.z, src2.x); |
|
|
|
|
int4 src2_data_1 = (int4)(src2.y, src2.z, src2.x, src2.y); |
|
|
|
|
int4 src2_data_2 = (int4)(src2.z, src2.x, src2.y, src2.z); |
|
|
|
|
|
|
|
|
|
uchar4 mask_data = vload4(0, mask + mask_index); |
|
|
|
|
|
|
|
|
|
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0)); |
|
|
|
|
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4)); |
|
|
|
|
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8)); |
|
|
|
|
|
|
|
|
|
uchar4 tmp_data_0 = convert_uchar4_sat(convert_int4_sat(src1_data_0) + src2_data_0); |
|
|
|
|
uchar4 tmp_data_1 = convert_uchar4_sat(convert_int4_sat(src1_data_1) + src2_data_1); |
|
|
|
|
uchar4 tmp_data_2 = convert_uchar4_sat(convert_int4_sat(src1_data_2) + src2_data_2); |
|
|
|
|
|
|
|
|
|
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz; |
|
|
|
|
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end)) |
|
|
|
|
? tmp_data_0.w : data_0.w; |
|
|
|
|
|
|
|
|
|
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end)) |
|
|
|
|
? tmp_data_1.xy : data_1.xy; |
|
|
|
|
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end)) |
|
|
|
|
? tmp_data_1.zw : data_1.zw; |
|
|
|
|
|
|
|
|
|
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end)) |
|
|
|
|
? tmp_data_2.x : data_2.x; |
|
|
|
|
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end)) |
|
|
|
|
? tmp_data_2.yzw : data_2.yzw; |
|
|
|
|
|
|
|
|
|
*((__global uchar4 *)(dst + dst_index + 0)) = data_0; |
|
|
|
|
*((__global uchar4 *)(dst + dst_index + 4)) = data_1; |
|
|
|
|
*((__global uchar4 *)(dst + dst_index + 8)) = data_2; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
__kernel void arithm_s_add_with_mask_C3_D2 (__global ushort *src1, int src1_step, int src1_offset, |
|
|
|
|
__global ushort *dst, int dst_step, int dst_offset, |
|
|
|
|
__global uchar *mask, int mask_step, int mask_offset, |
|
|
|
|
int4 src2, int rows, int cols, int dst_step1) |
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
int x = get_global_id(0); |
|
|
|
|
int y = get_global_id(1); |
|
|
|
|
|
|
|
|
|
if (x < cols && y < rows) |
|
|
|
|
{ |
|
|
|
|
x = x << 1; |
|
|
|
|
|
|
|
|
|
#ifdef dst_align |
|
|
|
|
#undef dst_align |
|
|
|
|
#endif |
|
|
|
|
#define dst_align (((dst_offset % dst_step) / 6 ) & 1) |
|
|
|
|
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6)); |
|
|
|
|
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align); |
|
|
|
|
|
|
|
|
|
int dst_start = mad24(y, dst_step, dst_offset); |
|
|
|
|
int dst_end = mad24(y, dst_step, dst_offset + dst_step1); |
|
|
|
|
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6)); |
|
|
|
|
|
|
|
|
|
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0)); |
|
|
|
|
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4)); |
|
|
|
|
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8)); |
|
|
|
|
|
|
|
|
|
int2 src2_data_0 = (int2)(src2.x, src2.y); |
|
|
|
|
int2 src2_data_1 = (int2)(src2.z, src2.x); |
|
|
|
|
int2 src2_data_2 = (int2)(src2.y, src2.z); |
|
|
|
|
|
|
|
|
|
uchar2 mask_data = vload2(0, mask + mask_index); |
|
|
|
|
|
|
|
|
|
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0)); |
|
|
|
|
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4)); |
|
|
|
|
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8)); |
|
|
|
|
|
|
|
|
|
ushort2 tmp_data_0 = convert_ushort2_sat(convert_int2_sat(src1_data_0) + src2_data_0); |
|
|
|
|
ushort2 tmp_data_1 = convert_ushort2_sat(convert_int2_sat(src1_data_1) + src2_data_1); |
|
|
|
|
ushort2 tmp_data_2 = convert_ushort2_sat(convert_int2_sat(src1_data_2) + src2_data_2); |
|
|
|
|
|
|
|
|
|
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy; |
|
|
|
|
|
|
|
|
|
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) |
|
|
|
|
? tmp_data_1.x : data_1.x; |
|
|
|
|
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end)) |
|
|
|
|
? tmp_data_1.y : data_1.y; |
|
|
|
|
|
|
|
|
|
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end)) |
|
|
|
|
? tmp_data_2.xy : data_2.xy; |
|
|
|
|
|
|
|
|
|
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0; |
|
|
|
|
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1; |
|
|
|
|
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
__kernel void arithm_s_add_with_mask_C3_D3 (__global short *src1, int src1_step, int src1_offset, |
|
|
|
|
__global short *dst, int dst_step, int dst_offset, |
|
|
|
|
__global uchar *mask, int mask_step, int mask_offset, |
|
|
|
|
int4 src2, int rows, int cols, int dst_step1) |
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
int x = get_global_id(0); |
|
|
|
|
int y = get_global_id(1); |
|
|
|
|
|
|
|
|
|
if (x < cols && y < rows) |
|
|
|
|
{ |
|
|
|
|
x = x << 1; |
|
|
|
|
|
|
|
|
|
#ifdef dst_align |
|
|
|
|
#undef dst_align |
|
|
|
|
#endif |
|
|
|
|
#define dst_align (((dst_offset % dst_step) / 6 ) & 1) |
|
|
|
|
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6)); |
|
|
|
|
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align); |
|
|
|
|
|
|
|
|
|
int dst_start = mad24(y, dst_step, dst_offset); |
|
|
|
|
int dst_end = mad24(y, dst_step, dst_offset + dst_step1); |
|
|
|
|
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6)); |
|
|
|
|
|
|
|
|
|
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0)); |
|
|
|
|
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4)); |
|
|
|
|
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8)); |
|
|
|
|
|
|
|
|
|
int2 src2_data_0 = (int2)(src2.x, src2.y); |
|
|
|
|
int2 src2_data_1 = (int2)(src2.z, src2.x); |
|
|
|
|
int2 src2_data_2 = (int2)(src2.y, src2.z); |
|
|
|
|
|
|
|
|
|
uchar2 mask_data = vload2(0, mask + mask_index); |
|
|
|
|
|
|
|
|
|
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0)); |
|
|
|
|
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4)); |
|
|
|
|
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8)); |
|
|
|
|
|
|
|
|
|
short2 tmp_data_0 = convert_short2_sat(convert_int2_sat(src1_data_0) + src2_data_0); |
|
|
|
|
short2 tmp_data_1 = convert_short2_sat(convert_int2_sat(src1_data_1) + src2_data_1); |
|
|
|
|
short2 tmp_data_2 = convert_short2_sat(convert_int2_sat(src1_data_2) + src2_data_2); |
|
|
|
|
|
|
|
|
|
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy; |
|
|
|
|
|
|
|
|
|
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) |
|
|
|
|
? tmp_data_1.x : data_1.x; |
|
|
|
|
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end)) |
|
|
|
|
? tmp_data_1.y : data_1.y; |
|
|
|
|
|
|
|
|
|
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end)) |
|
|
|
|
? tmp_data_2.xy : data_2.xy; |
|
|
|
|
|
|
|
|
|
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0; |
|
|
|
|
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1; |
|
|
|
|
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
__kernel void arithm_s_add_with_mask_C3_D4 (__global int *src1, int src1_step, int src1_offset, |
|
|
|
|
__global int *dst, int dst_step, int dst_offset, |
|
|
|
|
__global uchar *mask, int mask_step, int mask_offset, |
|
|
|
|
int4 src2, int rows, int cols, int dst_step1) |
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
int x = get_global_id(0); |
|
|
|
|
int y = get_global_id(1); |
|
|
|
|
|
|
|
|
|
if (x < cols && y < rows) |
|
|
|
|
{ |
|
|
|
|
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset); |
|
|
|
|
int mask_index = mad24(y, mask_step, x + mask_offset); |
|
|
|
|
int dst_index = mad24(y, dst_step, dst_offset + (x * 12)); |
|
|
|
|
|
|
|
|
|
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0)); |
|
|
|
|
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4)); |
|
|
|
|
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8)); |
|
|
|
|
|
|
|
|
|
int src2_data_0 = src2.x; |
|
|
|
|
int src2_data_1 = src2.y; |
|
|
|
|
int src2_data_2 = src2.z; |
|
|
|
|
|
|
|
|
|
uchar mask_data = * (mask + mask_index); |
|
|
|
|
|
|
|
|
|
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0)); |
|
|
|
|
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4)); |
|
|
|
|
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8)); |
|
|
|
|
|
|
|
|
|
int tmp_data_0 = convert_int_sat((long)src1_data_0 + (long)src2_data_0); |
|
|
|
|
int tmp_data_1 = convert_int_sat((long)src1_data_1 + (long)src2_data_1); |
|
|
|
|
int tmp_data_2 = convert_int_sat((long)src1_data_2 + (long)src2_data_2); |
|
|
|
|
|
|
|
|
|
data_0 = mask_data ? tmp_data_0 : data_0; |
|
|
|
|
data_1 = mask_data ? tmp_data_1 : data_1; |
|
|
|
|
data_2 = mask_data ? tmp_data_2 : data_2; |
|
|
|
|
|
|
|
|
|
*((__global int *)((__global char *)dst + dst_index + 0))= data_0; |
|
|
|
|
*((__global int *)((__global char *)dst + dst_index + 4))= data_1; |
|
|
|
|
*((__global int *)((__global char *)dst + dst_index + 8))= data_2; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
__kernel void arithm_s_add_with_mask_C3_D5 (__global float *src1, int src1_step, int src1_offset, |
|
|
|
|
__global float *dst, int dst_step, int dst_offset, |
|
|
|
|
__global uchar *mask, int mask_step, int mask_offset, |
|
|
|
|
float4 src2, int rows, int cols, int dst_step1) |
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
int x = get_global_id(0); |
|
|
|
|
int y = get_global_id(1); |
|
|
|
|
|
|
|
|
|
if (x < cols && y < rows) |
|
|
|
|
{ |
|
|
|
|
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset); |
|
|
|
|
int mask_index = mad24(y, mask_step, x + mask_offset); |
|
|
|
|
int dst_index = mad24(y, dst_step, dst_offset + (x * 12)); |
|
|
|
|
|
|
|
|
|
float src1_data_0 = *((__global float *)((__global char *)src1 + src1_index + 0)); |
|
|
|
|
float src1_data_1 = *((__global float *)((__global char *)src1 + src1_index + 4)); |
|
|
|
|
float src1_data_2 = *((__global float *)((__global char *)src1 + src1_index + 8)); |
|
|
|
|
|
|
|
|
|
float src2_data_0 = src2.x; |
|
|
|
|
float src2_data_1 = src2.y; |
|
|
|
|
float src2_data_2 = src2.z; |
|
|
|
|
|
|
|
|
|
uchar mask_data = * (mask + mask_index); |
|
|
|
|
|
|
|
|
|
float data_0 = *((__global float *)((__global char *)dst + dst_index + 0)); |
|
|
|
|
float data_1 = *((__global float *)((__global char *)dst + dst_index + 4)); |
|
|
|
|
float data_2 = *((__global float *)((__global char *)dst + dst_index + 8)); |
|
|
|
|
|
|
|
|
|
float tmp_data_0 = src1_data_0 + src2_data_0; |
|
|
|
|
float tmp_data_1 = src1_data_1 + src2_data_1; |
|
|
|
|
float tmp_data_2 = src1_data_2 + src2_data_2; |
|
|
|
|
|
|
|
|
|
data_0 = mask_data ? tmp_data_0 : data_0; |
|
|
|
|
data_1 = mask_data ? tmp_data_1 : data_1; |
|
|
|
|
data_2 = mask_data ? tmp_data_2 : data_2; |
|
|
|
|
|
|
|
|
|
*((__global float *)((__global char *)dst + dst_index + 0))= data_0; |
|
|
|
|
*((__global float *)((__global char *)dst + dst_index + 4))= data_1; |
|
|
|
|
*((__global float *)((__global char *)dst + dst_index + 8))= data_2; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#if defined (DOUBLE_SUPPORT) |
|
|
|
|
__kernel void arithm_s_add_with_mask_C3_D6 (__global double *src1, int src1_step, int src1_offset, |
|
|
|
|
__global double *dst, int dst_step, int dst_offset, |
|
|
|
|
__global uchar *mask, int mask_step, int mask_offset, |
|
|
|
|
double4 src2, int rows, int cols, int dst_step1) |
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
int x = get_global_id(0); |
|
|
|
|
int y = get_global_id(1); |
|
|
|
|
|
|
|
|
|
if (x < cols && y < rows) |
|
|
|
|
{ |
|
|
|
|
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset); |
|
|
|
|
int mask_index = mad24(y, mask_step, x + mask_offset); |
|
|
|
|
int dst_index = mad24(y, dst_step, dst_offset + (x * 24)); |
|
|
|
|
|
|
|
|
|
double src1_data_0 = *((__global double *)((__global char *)src1 + src1_index + 0 )); |
|
|
|
|
double src1_data_1 = *((__global double *)((__global char *)src1 + src1_index + 8 )); |
|
|
|
|
double src1_data_2 = *((__global double *)((__global char *)src1 + src1_index + 16)); |
|
|
|
|
|
|
|
|
|
double src2_data_0 = src2.x; |
|
|
|
|
double src2_data_1 = src2.y; |
|
|
|
|
double src2_data_2 = src2.z; |
|
|
|
|
|
|
|
|
|
uchar mask_data = * (mask + mask_index); |
|
|
|
|
|
|
|
|
|
double data_0 = *((__global double *)((__global char *)dst + dst_index + 0 )); |
|
|
|
|
double data_1 = *((__global double *)((__global char *)dst + dst_index + 8 )); |
|
|
|
|
double data_2 = *((__global double *)((__global char *)dst + dst_index + 16)); |
|
|
|
|
|
|
|
|
|
double tmp_data_0 = src1_data_0 + src2_data_0; |
|
|
|
|
double tmp_data_1 = src1_data_1 + src2_data_1; |
|
|
|
|
double tmp_data_2 = src1_data_2 + src2_data_2; |
|
|
|
|
|
|
|
|
|
data_0 = mask_data ? tmp_data_0 : data_0; |
|
|
|
|
data_1 = mask_data ? tmp_data_1 : data_1; |
|
|
|
|
data_2 = mask_data ? tmp_data_2 : data_2; |
|
|
|
|
|
|
|
|
|
*((__global double *)((__global char *)dst + dst_index + 0 ))= data_0; |
|
|
|
|
*((__global double *)((__global char *)dst + dst_index + 8 ))= data_1; |
|
|
|
|
*((__global double *)((__global char *)dst + dst_index + 16))= data_2; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
__kernel void arithm_s_add_with_mask_C4_D0 (__global uchar *src1, int src1_step, int src1_offset, |
|
|
|
|
__global uchar *dst, int dst_step, int dst_offset, |
|
|
|
|
__global uchar *mask, int mask_step, int mask_offset, |
|
|
|
|
int4 src2, int rows, int cols, int dst_step1) |
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
int x = get_global_id(0); |
|
|
|
|
int y = get_global_id(1); |
|
|
|
|
|
|
|
|
@ -771,7 +470,7 @@ __kernel void arithm_s_add_with_mask_C4_D0 (__global uchar *src1, int src1_ste |
|
|
|
|
uchar4 src_data1 = *((__global uchar4 *)(src1 + src1_index)); |
|
|
|
|
uchar4 dst_data = *((__global uchar4 *)(dst + dst_index)); |
|
|
|
|
|
|
|
|
|
uchar4 data = convert_uchar4_sat(convert_int4_sat(src_data1) + src2); |
|
|
|
|
uchar4 data = convert_uchar4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2)); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global uchar4 *)(dst + dst_index)) = data; |
|
|
|
@ -797,7 +496,7 @@ __kernel void arithm_s_add_with_mask_C4_D2 (__global ushort *src1, int src1_st |
|
|
|
|
ushort4 src_data1 = *((__global ushort4 *)((__global char *)src1 + src1_index)); |
|
|
|
|
ushort4 dst_data = *((__global ushort4 *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
ushort4 data = convert_ushort4_sat(convert_int4_sat(src_data1) + src2); |
|
|
|
|
ushort4 data = convert_ushort4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2)); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global ushort4 *)((__global char *)dst + dst_index)) = data; |
|
|
|
@ -823,7 +522,7 @@ __kernel void arithm_s_add_with_mask_C4_D3 (__global short *src1, int src1_ste |
|
|
|
|
short4 src_data1 = *((__global short4 *)((__global char *)src1 + src1_index)); |
|
|
|
|
short4 dst_data = *((__global short4 *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
short4 data = convert_short4_sat(convert_int4_sat(src_data1) + src2); |
|
|
|
|
short4 data = convert_short4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2)); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global short4 *)((__global char *)dst + dst_index)) = data; |
|
|
|
@ -849,7 +548,7 @@ __kernel void arithm_s_add_with_mask_C4_D4 (__global int *src1, int src1_step, |
|
|
|
|
int4 src_data1 = *((__global int4 *)((__global char *)src1 + src1_index)); |
|
|
|
|
int4 dst_data = *((__global int4 *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
int4 data = convert_int4_sat(convert_long4_sat(src_data1) + convert_long4_sat(src2)); |
|
|
|
|
int4 data = convert_int4_sat(ARITHM_OP(convert_long4_sat(src_data1), convert_long4_sat(src2))); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global int4 *)((__global char *)dst + dst_index)) = data; |
|
|
|
@ -875,7 +574,7 @@ __kernel void arithm_s_add_with_mask_C4_D5 (__global float *src1, int src1_ste |
|
|
|
|
float4 src_data1 = *((__global float4 *)((__global char *)src1 + src1_index)); |
|
|
|
|
float4 dst_data = *((__global float4 *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
float4 data = src_data1 + src2; |
|
|
|
|
float4 data = ARITHM_OP(src_data1, src2); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global float4 *)((__global char *)dst + dst_index)) = data; |
|
|
|
@ -903,7 +602,7 @@ __kernel void arithm_s_add_with_mask_C4_D6 (__global double *src1, int src1_st |
|
|
|
|
double4 src_data1 = *((__global double4 *)((__global char *)src1 + src1_index)); |
|
|
|
|
double4 dst_data = *((__global double4 *)((__global char *)dst + dst_index)); |
|
|
|
|
|
|
|
|
|
double4 data = src_data1 + src2; |
|
|
|
|
double4 data = ARITHM_OP(src_data1, src2); |
|
|
|
|
data = mask_data ? data : dst_data; |
|
|
|
|
|
|
|
|
|
*((__global double4 *)((__global char *)dst + dst_index)) = data; |
|
|
|
|