|
|
|
@ -43,182 +43,115 @@ |
|
|
|
|
|
|
|
|
|
#if !defined CUDA_DISABLER |
|
|
|
|
|
|
|
|
|
#include "internal_shared.hpp" |
|
|
|
|
#include "opencv2/gpu/device/utility.hpp" |
|
|
|
|
#include "opencv2/gpu/device/saturate_cast.hpp" |
|
|
|
|
#include "opencv2/gpu/device/common.hpp" |
|
|
|
|
#include "opencv2/gpu/device/functional.hpp" |
|
|
|
|
#include "opencv2/gpu/device/emulation.hpp" |
|
|
|
|
#include "opencv2/gpu/device/transform.hpp" |
|
|
|
|
|
|
|
|
|
namespace cv { namespace gpu { namespace device |
|
|
|
|
{ |
|
|
|
|
#define UINT_BITS 32U |
|
|
|
|
|
|
|
|
|
//Warps == subhistograms per threadblock |
|
|
|
|
#define WARP_COUNT 6 |
|
|
|
|
|
|
|
|
|
//Threadblock size |
|
|
|
|
#define HISTOGRAM256_THREADBLOCK_SIZE (WARP_COUNT * OPENCV_GPU_WARP_SIZE) |
|
|
|
|
#define HISTOGRAM256_BIN_COUNT 256 |
|
|
|
|
|
|
|
|
|
//Shared memory per threadblock |
|
|
|
|
#define HISTOGRAM256_THREADBLOCK_MEMORY (WARP_COUNT * HISTOGRAM256_BIN_COUNT) |
|
|
|
|
|
|
|
|
|
#define PARTIAL_HISTOGRAM256_COUNT 240 |
|
|
|
|
|
|
|
|
|
#define MERGE_THREADBLOCK_SIZE 256 |
|
|
|
|
using namespace cv::gpu; |
|
|
|
|
using namespace cv::gpu::device; |
|
|
|
|
|
|
|
|
|
#define USE_SMEM_ATOMICS (defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 120)) |
|
|
|
|
|
|
|
|
|
namespace hist |
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
__global__ void histogram256(const uchar* src, int cols, int rows, size_t step, int* hist) |
|
|
|
|
{ |
|
|
|
|
#if (!USE_SMEM_ATOMICS) |
|
|
|
|
|
|
|
|
|
#define TAG_MASK ( (1U << (UINT_BITS - OPENCV_GPU_LOG_WARP_SIZE)) - 1U ) |
|
|
|
|
|
|
|
|
|
__forceinline__ __device__ void addByte(volatile uint* s_WarpHist, uint data, uint threadTag) |
|
|
|
|
{ |
|
|
|
|
uint count; |
|
|
|
|
do |
|
|
|
|
{ |
|
|
|
|
count = s_WarpHist[data] & TAG_MASK; |
|
|
|
|
count = threadTag | (count + 1); |
|
|
|
|
s_WarpHist[data] = count; |
|
|
|
|
} while (s_WarpHist[data] != count); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#else |
|
|
|
|
__shared__ int shist[256]; |
|
|
|
|
|
|
|
|
|
#define TAG_MASK 0xFFFFFFFFU |
|
|
|
|
const int y = blockIdx.x * blockDim.y + threadIdx.y; |
|
|
|
|
const int tid = threadIdx.y * blockDim.x + threadIdx.x; |
|
|
|
|
|
|
|
|
|
__forceinline__ __device__ void addByte(uint* s_WarpHist, uint data, uint threadTag) |
|
|
|
|
{ |
|
|
|
|
atomicAdd(s_WarpHist + data, 1); |
|
|
|
|
} |
|
|
|
|
shist[tid] = 0; |
|
|
|
|
__syncthreads(); |
|
|
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
__forceinline__ __device__ void addWord(uint* s_WarpHist, uint data, uint tag, uint pos_x, uint cols) |
|
|
|
|
if (y < rows) |
|
|
|
|
{ |
|
|
|
|
uint x = pos_x << 2; |
|
|
|
|
|
|
|
|
|
if (x + 0 < cols) addByte(s_WarpHist, (data >> 0) & 0xFFU, tag); |
|
|
|
|
if (x + 1 < cols) addByte(s_WarpHist, (data >> 8) & 0xFFU, tag); |
|
|
|
|
if (x + 2 < cols) addByte(s_WarpHist, (data >> 16) & 0xFFU, tag); |
|
|
|
|
if (x + 3 < cols) addByte(s_WarpHist, (data >> 24) & 0xFFU, tag); |
|
|
|
|
} |
|
|
|
|
const unsigned int* rowPtr = (const unsigned int*) (src + y * step); |
|
|
|
|
|
|
|
|
|
__global__ void histogram256(const PtrStep<uint> d_Data, uint* d_PartialHistograms, uint dataCount, uint cols) |
|
|
|
|
{ |
|
|
|
|
//Per-warp subhistogram storage |
|
|
|
|
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY]; |
|
|
|
|
uint* s_WarpHist= s_Hist + (threadIdx.x >> OPENCV_GPU_LOG_WARP_SIZE) * HISTOGRAM256_BIN_COUNT; |
|
|
|
|
|
|
|
|
|
//Clear shared memory storage for current threadblock before processing |
|
|
|
|
#pragma unroll |
|
|
|
|
for (uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++) |
|
|
|
|
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0; |
|
|
|
|
|
|
|
|
|
//Cycle through the entire data set, update subhistograms for each warp |
|
|
|
|
const uint tag = threadIdx.x << (UINT_BITS - OPENCV_GPU_LOG_WARP_SIZE); |
|
|
|
|
|
|
|
|
|
__syncthreads(); |
|
|
|
|
const uint colsui = d_Data.step / sizeof(uint); |
|
|
|
|
for(uint pos = blockIdx.x * blockDim.x + threadIdx.x; pos < dataCount; pos += blockDim.x * gridDim.x) |
|
|
|
|
const int cols_4 = cols / 4; |
|
|
|
|
for (int x = threadIdx.x; x < cols_4; x += blockDim.x) |
|
|
|
|
{ |
|
|
|
|
uint pos_y = pos / colsui; |
|
|
|
|
uint pos_x = pos % colsui; |
|
|
|
|
uint data = d_Data.ptr(pos_y)[pos_x]; |
|
|
|
|
addWord(s_WarpHist, data, tag, pos_x, cols); |
|
|
|
|
unsigned int data = rowPtr[x]; |
|
|
|
|
|
|
|
|
|
Emulation::smem::atomicAdd(&shist[(data >> 0) & 0xFFU], 1); |
|
|
|
|
Emulation::smem::atomicAdd(&shist[(data >> 8) & 0xFFU], 1); |
|
|
|
|
Emulation::smem::atomicAdd(&shist[(data >> 16) & 0xFFU], 1); |
|
|
|
|
Emulation::smem::atomicAdd(&shist[(data >> 24) & 0xFFU], 1); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//Merge per-warp histograms into per-block and write to global memory |
|
|
|
|
__syncthreads(); |
|
|
|
|
for(uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE) |
|
|
|
|
if (cols % 4 != 0 && threadIdx.x == 0) |
|
|
|
|
{ |
|
|
|
|
uint sum = 0; |
|
|
|
|
|
|
|
|
|
for (uint i = 0; i < WARP_COUNT; i++) |
|
|
|
|
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK; |
|
|
|
|
|
|
|
|
|
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum; |
|
|
|
|
for (int x = cols_4 * 4; x < cols; ++x) |
|
|
|
|
{ |
|
|
|
|
unsigned int data = ((const uchar*)rowPtr)[x]; |
|
|
|
|
Emulation::smem::atomicAdd(&shist[data], 1); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////// |
|
|
|
|
// Merge histogram256() output |
|
|
|
|
// Run one threadblock per bin; each threadblock adds up the same bin counter |
|
|
|
|
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256 |
|
|
|
|
// takes only a fraction of total processing time |
|
|
|
|
//////////////////////////////////////////////////////////////////////////////// |
|
|
|
|
|
|
|
|
|
__global__ void mergeHistogram256(const uint* d_PartialHistograms, int* d_Histogram) |
|
|
|
|
{ |
|
|
|
|
uint sum = 0; |
|
|
|
|
__syncthreads(); |
|
|
|
|
|
|
|
|
|
#pragma unroll |
|
|
|
|
for (uint i = threadIdx.x; i < PARTIAL_HISTOGRAM256_COUNT; i += MERGE_THREADBLOCK_SIZE) |
|
|
|
|
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT]; |
|
|
|
|
const int histVal = shist[tid]; |
|
|
|
|
if (histVal > 0) |
|
|
|
|
::atomicAdd(hist + tid, histVal); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__shared__ uint data[MERGE_THREADBLOCK_SIZE]; |
|
|
|
|
data[threadIdx.x] = sum; |
|
|
|
|
|
|
|
|
|
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1) |
|
|
|
|
{ |
|
|
|
|
__syncthreads(); |
|
|
|
|
if(threadIdx.x < stride) |
|
|
|
|
data[threadIdx.x] += data[threadIdx.x + stride]; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if(threadIdx.x == 0) |
|
|
|
|
d_Histogram[blockIdx.x] = saturate_cast<int>(data[0]); |
|
|
|
|
} |
|
|
|
|
namespace hist |
|
|
|
|
{ |
|
|
|
|
void histogram256(PtrStepSzb src, int* hist, cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
const dim3 block(32, 8); |
|
|
|
|
const dim3 grid(divUp(src.rows, block.y)); |
|
|
|
|
|
|
|
|
|
void histogram256_gpu(PtrStepSzb src, int* hist, uint* buf, cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
histogram256<<<PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE, 0, stream>>>( |
|
|
|
|
PtrStepSz<uint>(src), |
|
|
|
|
buf, |
|
|
|
|
static_cast<uint>(src.rows * src.step / sizeof(uint)), |
|
|
|
|
src.cols); |
|
|
|
|
::histogram256<<<grid, block, 0, stream>>>(src.data, src.cols, src.rows, src.step, hist); |
|
|
|
|
cudaSafeCall( cudaGetLastError() ); |
|
|
|
|
|
|
|
|
|
cudaSafeCall( cudaGetLastError() ); |
|
|
|
|
if (stream == 0) |
|
|
|
|
cudaSafeCall( cudaDeviceSynchronize() ); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
mergeHistogram256<<<HISTOGRAM256_BIN_COUNT, MERGE_THREADBLOCK_SIZE, 0, stream>>>(buf, hist); |
|
|
|
|
///////////////////////////////////////////////////////////////////////// |
|
|
|
|
|
|
|
|
|
cudaSafeCall( cudaGetLastError() ); |
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
__constant__ int c_lut[256]; |
|
|
|
|
|
|
|
|
|
if (stream == 0) |
|
|
|
|
cudaSafeCall( cudaDeviceSynchronize() ); |
|
|
|
|
} |
|
|
|
|
struct EqualizeHist : unary_function<uchar, uchar> |
|
|
|
|
{ |
|
|
|
|
float scale; |
|
|
|
|
|
|
|
|
|
__constant__ int c_lut[256]; |
|
|
|
|
__host__ EqualizeHist(float _scale) : scale(_scale) {} |
|
|
|
|
|
|
|
|
|
__global__ void equalizeHist(const PtrStepSzb src, PtrStepb dst) |
|
|
|
|
__device__ __forceinline__ uchar operator ()(uchar val) const |
|
|
|
|
{ |
|
|
|
|
const int x = blockIdx.x * blockDim.x + threadIdx.x; |
|
|
|
|
const int y = blockIdx.y * blockDim.y + threadIdx.y; |
|
|
|
|
|
|
|
|
|
if (x < src.cols && y < src.rows) |
|
|
|
|
{ |
|
|
|
|
const uchar val = src.ptr(y)[x]; |
|
|
|
|
const int lut = c_lut[val]; |
|
|
|
|
dst.ptr(y)[x] = __float2int_rn(255.0f / (src.cols * src.rows) * lut); |
|
|
|
|
} |
|
|
|
|
const int lut = c_lut[val]; |
|
|
|
|
return __float2int_rn(scale * lut); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void equalizeHist_gpu(PtrStepSzb src, PtrStepSzb dst, const int* lut, cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
dim3 block(16, 16); |
|
|
|
|
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); |
|
|
|
|
namespace cv { namespace gpu { namespace device |
|
|
|
|
{ |
|
|
|
|
template <> struct TransformFunctorTraits<EqualizeHist> : DefaultTransformFunctorTraits<EqualizeHist> |
|
|
|
|
{ |
|
|
|
|
enum { smart_shift = 4 }; |
|
|
|
|
}; |
|
|
|
|
}}} |
|
|
|
|
|
|
|
|
|
namespace hist |
|
|
|
|
{ |
|
|
|
|
void equalizeHist(PtrStepSzb src, PtrStepSzb dst, const int* lut, cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
if (stream == 0) |
|
|
|
|
cudaSafeCall( cudaMemcpyToSymbol(c_lut, lut, 256 * sizeof(int), 0, cudaMemcpyDeviceToDevice) ); |
|
|
|
|
else |
|
|
|
|
cudaSafeCall( cudaMemcpyToSymbolAsync(c_lut, lut, 256 * sizeof(int), 0, cudaMemcpyDeviceToDevice, stream) ); |
|
|
|
|
|
|
|
|
|
equalizeHist<<<grid, block, 0, stream>>>(src, dst); |
|
|
|
|
cudaSafeCall( cudaGetLastError() ); |
|
|
|
|
|
|
|
|
|
if (stream == 0) |
|
|
|
|
cudaSafeCall( cudaDeviceSynchronize() ); |
|
|
|
|
} |
|
|
|
|
} // namespace hist |
|
|
|
|
}}} // namespace cv { namespace gpu { namespace device |
|
|
|
|
const float scale = 255.0f / (src.cols * src.rows); |
|
|
|
|
|
|
|
|
|
transform(src, dst, EqualizeHist(scale), WithOutMask(), stream); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#endif /* CUDA_DISABLER */ |
|
|
|
|
#endif /* CUDA_DISABLER */ |
|
|
|
|