diff --git a/modules/gpu/src/cuda/mathfunc.cu b/modules/gpu/src/cuda/mathfunc.cu index 5fbb73ab7c..2de34ec6ea 100644 --- a/modules/gpu/src/cuda/mathfunc.cu +++ b/modules/gpu/src/cuda/mathfunc.cu @@ -291,16 +291,11 @@ namespace cv { namespace gpu { namespace mathfunc //------------------------------------------------------------------------ // Unary operations - enum - { - UN_OP_NOT - }; - + enum { UN_OP_NOT }; template struct UnOp; - template struct UnOp { @@ -380,7 +375,8 @@ namespace cv { namespace gpu { namespace mathfunc dim3 grid(divUp(cols * elem_size, threads.x * sizeof(uint)), divUp(rows, threads.y)); bitwise_un_op<<>>(rows, cols * elem_size, src, dst); - if (stream == 0) cudaSafeCall(cudaThreadSynchronize()); + if (stream == 0) + cudaSafeCall(cudaThreadSynchronize()); } @@ -422,7 +418,8 @@ namespace cv { namespace gpu { namespace mathfunc bitwise_un_op_two_loads<<>>(rows, cols, src, dst, mask); break; } - if (stream == 0) cudaSafeCall(cudaThreadSynchronize()); + if (stream == 0) + cudaSafeCall(cudaThreadSynchronize()); } @@ -442,134 +439,201 @@ namespace cv { namespace gpu { namespace mathfunc enum { BIN_OP_OR, BIN_OP_AND, BIN_OP_XOR }; - template struct BinOp; - template struct BinOp { - static __device__ T call(T lhs, T rhs) - { - return lhs | rhs; - } + typedef typename TypeVec::vec_t Vec2; + typedef typename TypeVec::vec_t Vec3; + typedef typename TypeVec::vec_t Vec4; + static __device__ T call(T a, T b) { return a | b; } + static __device__ Vec2 call(Vec2 a, Vec2 b) { return VecTraits::make(a.x | b.x, a.y | b.y); } + static __device__ Vec3 call(Vec3 a, Vec3 b) { return VecTraits::make(a.x | b.x, a.y | b.y, a.z | b.z); } + static __device__ Vec4 call(Vec4 a, Vec4 b) { return VecTraits::make(a.x | b.x, a.y | b.y, a.z | b.z, a.w | b.w); } }; template struct BinOp { - static __device__ T call(T lhs, T rhs) - { - return lhs & rhs; - } + typedef typename TypeVec::vec_t Vec2; + typedef typename TypeVec::vec_t Vec3; + typedef typename TypeVec::vec_t Vec4; + static __device__ T call(T a, T b) { return a & b; } + static __device__ Vec2 call(Vec2 a, Vec2 b) { return VecTraits::make(a.x & b.x, a.y & b.y); } + static __device__ Vec3 call(Vec3 a, Vec3 b) { return VecTraits::make(a.x & b.x, a.y & b.y, a.z & b.z); } + static __device__ Vec4 call(Vec4 a, Vec4 b) { return VecTraits::make(a.x & b.x, a.y & b.y, a.z & b.z, a.w & b.w); } }; template struct BinOp { - static __device__ T call(T lhs, T rhs) - { - return lhs ^ rhs; - } + typedef typename TypeVec::vec_t Vec2; + typedef typename TypeVec::vec_t Vec3; + typedef typename TypeVec::vec_t Vec4; + static __device__ T call(T a, T b) { return a ^ b; } + static __device__ Vec2 call(Vec2 a, Vec2 b) { return VecTraits::make(a.x ^ b.x, a.y ^ b.y); } + static __device__ Vec3 call(Vec3 a, Vec3 b) { return VecTraits::make(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z); } + static __device__ Vec4 call(Vec4 a, Vec4 b) { return VecTraits::make(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w); } }; - template - __global__ void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, Mask mask) + template + __global__ void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst) { + const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4; + const int y = blockDim.y * blockIdx.y + threadIdx.y; + + if (y < rows) + { + uchar* dst_ptr = dst.ptr(y) + x; + const uchar* src1_ptr = src1.ptr(y) + x; + const uchar* src2_ptr = src2.ptr(y) + x; + if (x + sizeof(uint) - 1 < cols) + { + *(uint*)dst_ptr = BinOp::call(*(uint*)src1_ptr, *(uint*)src2_ptr); + } + else + { + const uchar* src1_end = src1.ptr(y) + cols; + while (src1_ptr < src1_end) + { + *dst_ptr++ = BinOp::call(*src1_ptr++, *src2_ptr++); + } + } + } + } + + + template + __global__ void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, + PtrStep dst, const PtrStep mask) + { + typedef typename TypeVec::vec_t Type; const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; - if (x < cols && y < rows && mask(y, x)) + if (x < cols && y < rows && mask.ptr(y)[x]) { - T* dsty = (T*)dst.ptr(y); - const T* src1y = (const T*)src1.ptr(y); - const T* src2y = (const T*)src2.ptr(y); + Type* dst_row = (Type*)dst.ptr(y); + const Type* src1_row = (const Type*)src1.ptr(y); + const Type* src2_row = (const Type*)src2.ptr(y); + dst_row[x] = BinOp::call(src1_row[x], src2_row[x]); + } + } - #pragma unroll - for (int i = 0; i < cn; ++i) - dsty[cn * x + i] = BinOp::call(src1y[cn * x + i], src2y[cn * x + i]); + + template + __global__ void bitwise_bin_op_two_loads(int rows, int cols, const PtrStep src1, const PtrStep src2, + PtrStep dst, const PtrStep mask) + { + typedef typename TypeVec::vec_t Type; + const int x = blockDim.x * blockIdx.x + threadIdx.x; + const int y = blockDim.y * blockIdx.y + threadIdx.y; + + if (x < cols && y < rows && mask.ptr(y)[x]) + { + Type* dst_row = (Type*)dst.ptr(y); + const Type* src1_row = (const Type*)src1.ptr(y); + const Type* src2_row = (const Type*)src2.ptr(y); + dst_row[2 * x] = BinOp::call(src1_row[2 * x], src2_row[2 * x]); + dst_row[2 * x + 1] = BinOp::call(src1_row[2 * x + 1], src2_row[2 * x + 1]); } } - template - void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, int elem_size, Mask mask, cudaStream_t stream) + template + void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, + int elem_size, cudaStream_t stream) + { + dim3 threads(16, 16); + dim3 grid(divUp(cols * elem_size, threads.x * sizeof(uint)), + divUp(rows, threads.y)); + bitwise_bin_op<<>>(rows, cols * elem_size, src1, src2, dst); + if (stream == 0) + cudaSafeCall(cudaThreadSynchronize()); + } + + + template + void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, + int elem_size, const PtrStep mask, cudaStream_t stream) { dim3 threads(16, 16); dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y)); switch (elem_size) { case 1: - bitwise_bin_op ><<>>(rows, cols, src1, src2, dst, mask); + bitwise_bin_op<<>>(rows, cols, src1, src2, dst, mask); break; case 2: - bitwise_bin_op ><<>>(rows, cols, src1, src2, dst, mask); + bitwise_bin_op<<>>(rows, cols, src1, src2, dst, mask); break; case 3: - bitwise_bin_op ><<>>(rows, cols, src1, src2, dst, mask); + bitwise_bin_op<<>>(rows, cols, src1, src2, dst, mask); break; case 4: - bitwise_bin_op ><<>>(rows, cols, src1, src2, dst, mask); + bitwise_bin_op<<>>(rows, cols, src1, src2, dst, mask); break; case 6: - bitwise_bin_op ><<>>(rows, cols, src1, src2, dst, mask); + bitwise_bin_op<<>>(rows, cols, src1, src2, dst, mask); break; case 8: - bitwise_bin_op ><<>>(rows, cols, src1, src2, dst, mask); + bitwise_bin_op<<>>(rows, cols, src1, src2, dst, mask); break; case 12: - bitwise_bin_op ><<>>(rows, cols, src1, src2, dst, mask); + bitwise_bin_op<<>>(rows, cols, src1, src2, dst, mask); break; case 16: - bitwise_bin_op ><<>>(rows, cols, src1, src2, dst, mask); + bitwise_bin_op<<>>(rows, cols, src1, src2, dst, mask); break; case 24: - bitwise_bin_op ><<>>(rows, cols, src1, src2, dst, mask); + bitwise_bin_op_two_loads<<>>(rows, cols, src1, src2, dst, mask); break; case 32: - bitwise_bin_op ><<>>(rows, cols, src1, src2, dst, mask); + bitwise_bin_op_two_loads<<>>(rows, cols, src1, src2, dst, mask); break; } - if (stream == 0) cudaSafeCall(cudaThreadSynchronize()); + if (stream == 0) + cudaSafeCall(cudaThreadSynchronize()); } void bitwise_or_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, cudaStream_t stream) { - bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, MaskTrue(), stream); + bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, stream); } void bitwise_or_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream) { - bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, Mask8U(mask), stream); + bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, mask, stream); } void bitwise_and_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, cudaStream_t stream) { - bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, MaskTrue(), stream); + bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, stream); } void bitwise_and_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream) { - bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, Mask8U(mask), stream); + bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, mask, stream); } void bitwise_xor_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, cudaStream_t stream) { - bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, MaskTrue(), stream); + bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, stream); } void bitwise_xor_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream) { - bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, Mask8U(mask), stream); + bitwise_bin_op(rows, cols, src1, src2, dst, elem_size, mask, stream); } @@ -2247,3 +2311,4 @@ namespace cv { namespace gpu { namespace mathfunc }}} +