|
|
|
@ -50,62 +50,36 @@ __constant__ __align__(16) float scalar_d[4]; |
|
|
|
|
|
|
|
|
|
namespace mat_operators |
|
|
|
|
{ |
|
|
|
|
template <typename T, int channels, int count = channels> |
|
|
|
|
struct unroll |
|
|
|
|
{ |
|
|
|
|
__device__ static void unroll_set(T * mat, size_t i) |
|
|
|
|
{ |
|
|
|
|
mat[i] = static_cast<T>(scalar_d[channels - count]); |
|
|
|
|
unroll<T, channels, count - 1>::unroll_set(mat, i+1); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ static void unroll_set_with_mask(T * mat, unsigned char mask, size_t i) |
|
|
|
|
{ |
|
|
|
|
if ( mask != 0 ) |
|
|
|
|
mat[i] = static_cast<T>(scalar_d[channels - count]); |
|
|
|
|
|
|
|
|
|
unroll<T, channels, count - 1>::unroll_set_with_mask(mat, mask, i+1); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <typename T, int channels> |
|
|
|
|
struct unroll<T, channels, 0> |
|
|
|
|
{ |
|
|
|
|
__device__ static void unroll_set(T * , size_t){} |
|
|
|
|
__device__ static void unroll_set_with_mask(T * , unsigned char, size_t){} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <typename T, int channels> |
|
|
|
|
__device__ size_t GetIndex(size_t i, int cols, int step) |
|
|
|
|
{ |
|
|
|
|
size_t ret = (i / static_cast<size_t>(cols))*static_cast<size_t>(step) / static_cast<size_t>(sizeof(T)) + |
|
|
|
|
(i % static_cast<size_t>(cols))*static_cast<size_t>(channels); |
|
|
|
|
return ret; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template <typename T, int channels> |
|
|
|
|
template<typename T, int channels> |
|
|
|
|
__global__ void kernel_set_to_without_mask(T * mat, int cols, int rows, int step) |
|
|
|
|
{ |
|
|
|
|
size_t i = (blockIdx.x * blockDim.x + threadIdx.x); |
|
|
|
|
size_t x = blockIdx.x * blockDim.x + threadIdx.x; |
|
|
|
|
size_t y = blockIdx.y * blockDim.y + threadIdx.y; |
|
|
|
|
|
|
|
|
|
if (i < cols * rows) |
|
|
|
|
if ((x < cols * channels ) && (y < rows)) |
|
|
|
|
{ |
|
|
|
|
unroll<T, channels>::unroll_set(mat, GetIndex<T,channels>(i, cols, step)); |
|
|
|
|
size_t idx = y * (step / sizeof(T)) + x; |
|
|
|
|
mat[idx] = scalar_d[ x % channels ]; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template <typename T, int channels> |
|
|
|
|
__global__ void kernel_set_to_with_mask(T * mat, const unsigned char * mask, int cols, int rows, int step) |
|
|
|
|
template<typename T, int channels> |
|
|
|
|
__global__ void kernel_set_to_with_mask(T * mat, const unsigned char * mask, int cols, int rows, int step, int step_mask) |
|
|
|
|
{ |
|
|
|
|
size_t i = (blockIdx.x * blockDim.x + threadIdx.x); |
|
|
|
|
if (i < cols * rows) |
|
|
|
|
unroll<T, channels>::unroll_set_with_mask(mat, mask[i], GetIndex<T,channels>(i, cols, step)); |
|
|
|
|
size_t x = blockIdx.x * blockDim.x + threadIdx.x; |
|
|
|
|
size_t y = blockIdx.y * blockDim.y + threadIdx.y; |
|
|
|
|
|
|
|
|
|
if (mask[y * step_mask + x] != 0) |
|
|
|
|
if ((x < cols * channels ) && (y < rows)) |
|
|
|
|
{ |
|
|
|
|
size_t idx = y * (step / sizeof(T)) + x; |
|
|
|
|
mat[idx] = scalar_d[ x % channels ]; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const double * scalar, const DevMem2D& mask, int elemSize1, int channels) |
|
|
|
|
extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const double * scalar, int elemSize1, int channels) |
|
|
|
|
{ |
|
|
|
|
// download scalar to constant memory |
|
|
|
|
float data[4]; |
|
|
|
|
data[0] = static_cast<float>(scalar[0]); |
|
|
|
|
data[1] = static_cast<float>(scalar[1]); |
|
|
|
@ -113,37 +87,38 @@ extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const doubl |
|
|
|
|
data[3] = static_cast<float>(scalar[3]); |
|
|
|
|
cudaSafeCall( cudaMemcpyToSymbol(scalar_d, &data, sizeof(data))); |
|
|
|
|
|
|
|
|
|
dim3 threadsPerBlock(256,1,1); |
|
|
|
|
dim3 numBlocks (mat.rows * mat.cols / threadsPerBlock.x + 1, 1, 1); |
|
|
|
|
dim3 threadsPerBlock(16, 16, 1); |
|
|
|
|
dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1); |
|
|
|
|
|
|
|
|
|
if (channels == 1) |
|
|
|
|
{ |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 1><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 1><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float , 1><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 1><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 1><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<float, 1><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
} |
|
|
|
|
if (channels == 2) |
|
|
|
|
{ |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 2><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 2><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float , 2><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 2><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 2><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<float, 2><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
} |
|
|
|
|
if (channels == 3) |
|
|
|
|
{ |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 3><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 3><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float , 3><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 3><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 3><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<float, 3><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
} |
|
|
|
|
if (channels == 4) |
|
|
|
|
{ |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 4><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 4><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float , 4><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 4><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 4><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<float, 4><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
} |
|
|
|
|
cudaSafeCall( cudaThreadSynchronize() ); |
|
|
|
|
|
|
|
|
|
cudaSafeCall ( cudaThreadSynchronize() ); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const double * scalar, int elemSize1, int channels) |
|
|
|
|
extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const double * scalar, const DevMem2D& mask, int elemSize1, int channels) |
|
|
|
|
{ |
|
|
|
|
float data[4]; |
|
|
|
|
data[0] = static_cast<float>(scalar[0]); |
|
|
|
@ -152,33 +127,34 @@ extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const do |
|
|
|
|
data[3] = static_cast<float>(scalar[3]); |
|
|
|
|
cudaSafeCall( cudaMemcpyToSymbol(scalar_d, &data, sizeof(data))); |
|
|
|
|
|
|
|
|
|
dim3 threadsPerBlock(256, 1, 1); |
|
|
|
|
dim3 numBlocks (mat.rows * mat.cols / threadsPerBlock.x + 1, 1, 1); |
|
|
|
|
dim3 threadsPerBlock(16, 16, 1); |
|
|
|
|
dim3 numBlocks (mat.cols * channels / threadsPerBlock.x + 1, mat.rows / threadsPerBlock.y + 1, 1); |
|
|
|
|
|
|
|
|
|
if (channels == 1) |
|
|
|
|
{ |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 1><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 1><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask< float, 1><<<numBlocks,threadsPerBlock>>>(( float *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 1><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 1><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float, 1><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
} |
|
|
|
|
if (channels == 2) |
|
|
|
|
{ |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 2><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 2><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask< float , 2><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 2><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 2><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float, 2><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
} |
|
|
|
|
if (channels == 3) |
|
|
|
|
{ |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 3><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 3><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask< float, 3><<<numBlocks,threadsPerBlock>>>(( float *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 3><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 3><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float, 3><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
} |
|
|
|
|
if (channels == 4) |
|
|
|
|
{ |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_without_mask<unsigned char, 4><<<numBlocks,threadsPerBlock>>>(mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_without_mask<unsigned short, 4><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_without_mask<float, 4><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, mat.cols, mat.rows, mat.step); |
|
|
|
|
if (elemSize1 == 1) ::mat_operators::kernel_set_to_with_mask<unsigned char, 4><<<numBlocks,threadsPerBlock>>>(mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
if (elemSize1 == 2) ::mat_operators::kernel_set_to_with_mask<unsigned short, 4><<<numBlocks,threadsPerBlock>>>((unsigned short *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
if (elemSize1 == 4) ::mat_operators::kernel_set_to_with_mask<float, 4><<<numBlocks,threadsPerBlock>>>((float *)mat.ptr, (unsigned char *)mask.ptr, mat.cols, mat.rows, mat.step, mask.step); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
cudaSafeCall( cudaThreadSynchronize() ); |
|
|
|
|
cudaSafeCall ( cudaThreadSynchronize() ); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|