|
|
|
@ -55,13 +55,13 @@ using namespace cv::gpu; |
|
|
|
|
|
|
|
|
|
#define ROWSperTHREAD 21 // the number of rows a thread will process |
|
|
|
|
|
|
|
|
|
namespace stereobm_gpu |
|
|
|
|
namespace stereobm_gpu |
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
#define BLOCK_W 128 // the thread block width (464) |
|
|
|
|
#define N_DISPARITIES 8 |
|
|
|
|
|
|
|
|
|
#define STEREO_MIND 0 // The minimum d range to check |
|
|
|
|
#define STEREO_MIND 0 // The minimum d range to check |
|
|
|
|
#define STEREO_DISP_STEP N_DISPARITIES // the d step, must be <= 1 to avoid aliasing |
|
|
|
|
|
|
|
|
|
__constant__ unsigned int* cminSSDImage; |
|
|
|
@ -71,7 +71,7 @@ __constant__ int cheight; |
|
|
|
|
|
|
|
|
|
__device__ int SQ(int a) |
|
|
|
|
{ |
|
|
|
|
return a * a; |
|
|
|
|
return a * a; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template<int RADIUS> |
|
|
|
@ -82,7 +82,7 @@ __device__ unsigned int CalcSSD(unsigned int *col_ssd_cache, unsigned int *col_s |
|
|
|
|
|
|
|
|
|
for(int i = 1; i <= RADIUS; i++) |
|
|
|
|
cache += col_ssd[i]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
col_ssd_cache[0] = cache; |
|
|
|
|
|
|
|
|
|
__syncthreads(); |
|
|
|
@ -101,7 +101,7 @@ __device__ uint2 MinSSD(unsigned int *col_ssd_cache, unsigned int *col_ssd) |
|
|
|
|
{ |
|
|
|
|
unsigned int ssd[N_DISPARITIES]; |
|
|
|
|
|
|
|
|
|
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) |
|
|
|
|
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) |
|
|
|
|
ssd[0] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 0 * (BLOCK_W + 2 * RADIUS)); |
|
|
|
|
ssd[1] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 1 * (BLOCK_W + 2 * RADIUS)); |
|
|
|
|
ssd[2] = CalcSSD<RADIUS>(col_ssd_cache, col_ssd + 2 * (BLOCK_W + 2 * RADIUS)); |
|
|
|
@ -146,7 +146,7 @@ __device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned cha |
|
|
|
|
rightPixel1[4] = imageR[idx1 - 4]; |
|
|
|
|
rightPixel1[5] = imageR[idx1 - 5]; |
|
|
|
|
rightPixel1[6] = imageR[idx1 - 6]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rightPixel2[7] = imageR[idx2 - 7]; |
|
|
|
|
rightPixel2[0] = imageR[idx2 - 0]; |
|
|
|
|
rightPixel2[1] = imageR[idx2 - 1]; |
|
|
|
@ -155,16 +155,16 @@ __device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned cha |
|
|
|
|
rightPixel2[4] = imageR[idx2 - 4]; |
|
|
|
|
rightPixel2[5] = imageR[idx2 - 5]; |
|
|
|
|
rightPixel2[6] = imageR[idx2 - 6]; |
|
|
|
|
|
|
|
|
|
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) |
|
|
|
|
diff1 = leftPixel1 - rightPixel1[0]; |
|
|
|
|
diff2 = leftPixel2 - rightPixel2[0]; |
|
|
|
|
|
|
|
|
|
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) |
|
|
|
|
diff1 = leftPixel1 - rightPixel1[0]; |
|
|
|
|
diff2 = leftPixel2 - rightPixel2[0]; |
|
|
|
|
col_ssd[0 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); |
|
|
|
|
|
|
|
|
|
diff1 = leftPixel1 - rightPixel1[1]; |
|
|
|
|
diff2 = leftPixel2 - rightPixel2[1]; |
|
|
|
|
col_ssd[1 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
diff1 = leftPixel1 - rightPixel1[2]; |
|
|
|
|
diff2 = leftPixel2 - rightPixel2[2]; |
|
|
|
|
col_ssd[2 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); |
|
|
|
@ -172,19 +172,19 @@ __device__ void StepDown(int idx1, int idx2, unsigned char* imageL, unsigned cha |
|
|
|
|
diff1 = leftPixel1 - rightPixel1[3]; |
|
|
|
|
diff2 = leftPixel2 - rightPixel2[3]; |
|
|
|
|
col_ssd[3 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); |
|
|
|
|
|
|
|
|
|
diff1 = leftPixel1 - rightPixel1[4]; |
|
|
|
|
diff2 = leftPixel2 - rightPixel2[4]; |
|
|
|
|
|
|
|
|
|
diff1 = leftPixel1 - rightPixel1[4]; |
|
|
|
|
diff2 = leftPixel2 - rightPixel2[4]; |
|
|
|
|
col_ssd[4 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
diff1 = leftPixel1 - rightPixel1[5]; |
|
|
|
|
diff2 = leftPixel2 - rightPixel2[5]; |
|
|
|
|
col_ssd[5 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
diff1 = leftPixel1 - rightPixel1[6]; |
|
|
|
|
diff2 = leftPixel2 - rightPixel2[6]; |
|
|
|
|
col_ssd[6 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
diff1 = leftPixel1 - rightPixel1[7]; |
|
|
|
|
diff2 = leftPixel2 - rightPixel2[7]; |
|
|
|
|
col_ssd[7 * (BLOCK_W + 2 * RADIUS)] += SQ(diff2) - SQ(diff1); |
|
|
|
@ -203,7 +203,7 @@ __device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* im |
|
|
|
|
leftPixel1 = imageL[idx]; |
|
|
|
|
idx = idx - d; |
|
|
|
|
|
|
|
|
|
diffa[0] += SQ(leftPixel1 - imageR[idx - 0]); |
|
|
|
|
diffa[0] += SQ(leftPixel1 - imageR[idx - 0]); |
|
|
|
|
diffa[1] += SQ(leftPixel1 - imageR[idx - 1]); |
|
|
|
|
diffa[2] += SQ(leftPixel1 - imageR[idx - 2]); |
|
|
|
|
diffa[3] += SQ(leftPixel1 - imageR[idx - 3]); |
|
|
|
@ -213,7 +213,7 @@ __device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* im |
|
|
|
|
diffa[7] += SQ(leftPixel1 - imageR[idx - 7]); |
|
|
|
|
|
|
|
|
|
y_tex += 1; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) |
|
|
|
|
col_ssd[0 * (BLOCK_W + 2 * RADIUS)] = diffa[0]; |
|
|
|
|
col_ssd[1 * (BLOCK_W + 2 * RADIUS)] = diffa[1]; |
|
|
|
@ -225,11 +225,11 @@ __device__ void InitColSSD(int x_tex, int y_tex, int im_pitch, unsigned char* im |
|
|
|
|
col_ssd[7 * (BLOCK_W + 2 * RADIUS)] = diffa[7]; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template<int RADIUS> |
|
|
|
|
template<int RADIUS> |
|
|
|
|
__global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t img_step, unsigned char* disp, size_t disp_pitch, int maxdisp) |
|
|
|
|
{ |
|
|
|
|
extern __shared__ unsigned int col_ssd_cache[]; |
|
|
|
|
unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x; |
|
|
|
|
unsigned int *col_ssd = col_ssd_cache + BLOCK_W + threadIdx.x; |
|
|
|
|
unsigned int *col_ssd_extra = threadIdx.x < (2 * RADIUS) ? col_ssd + BLOCK_W : 0; //#define N_DIRTY_PIXELS (2 * RADIUS) |
|
|
|
|
|
|
|
|
|
//#define X (blockIdx.x * BLOCK_W + threadIdx.x + STEREO_MAXD) |
|
|
|
@ -241,13 +241,13 @@ __global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t i |
|
|
|
|
unsigned int* minSSDImage = cminSSDImage + X + Y * cminSSD_step; |
|
|
|
|
unsigned char* disparImage = disp + X + Y * disp_pitch; |
|
|
|
|
/* if (X < cwidth) |
|
|
|
|
{ |
|
|
|
|
{ |
|
|
|
|
unsigned int *minSSDImage_end = minSSDImage + min(ROWSperTHREAD, cheight - Y) * minssd_step; |
|
|
|
|
for(uint *ptr = minSSDImage; ptr != minSSDImage_end; ptr += minssd_step ) |
|
|
|
|
*ptr = 0xFFFFFFFF; |
|
|
|
|
*ptr = 0xFFFFFFFF; |
|
|
|
|
}*/ |
|
|
|
|
int end_row = min(ROWSperTHREAD, cheight - Y); |
|
|
|
|
int y_tex; |
|
|
|
|
int y_tex; |
|
|
|
|
int x_tex = X - RADIUS; |
|
|
|
|
|
|
|
|
|
if (x_tex >= cwidth) |
|
|
|
@ -257,7 +257,7 @@ __global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t i |
|
|
|
|
{ |
|
|
|
|
y_tex = Y - RADIUS; |
|
|
|
|
|
|
|
|
|
InitColSSD<RADIUS>(x_tex, y_tex, img_step, left, right, d, col_ssd); |
|
|
|
|
InitColSSD<RADIUS>(x_tex, y_tex, img_step, left, right, d, col_ssd); |
|
|
|
|
|
|
|
|
|
if (col_ssd_extra > 0) |
|
|
|
|
if (x_tex + BLOCK_W < cwidth) |
|
|
|
@ -289,13 +289,13 @@ __global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t i |
|
|
|
|
StepDown<RADIUS>(idx1, idx2, left + BLOCK_W, right + BLOCK_W, d, col_ssd_extra); |
|
|
|
|
|
|
|
|
|
y_tex += 1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__syncthreads(); //before MinSSD function |
|
|
|
|
|
|
|
|
|
if (X < cwidth - RADIUS && row < cheight - RADIUS - Y) |
|
|
|
|
{ |
|
|
|
|
int idx = row * cminSSD_step; |
|
|
|
|
uint2 minSSD = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd); |
|
|
|
|
{ |
|
|
|
|
int idx = row * cminSSD_step; |
|
|
|
|
uint2 minSSD = MinSSD<RADIUS>(col_ssd_cache + threadIdx.x, col_ssd); |
|
|
|
|
if (minSSD.x < minSSDImage[idx]) |
|
|
|
|
{ |
|
|
|
|
disparImage[disp_pitch * row] = (unsigned char)(d + minSSD.y); |
|
|
|
@ -310,49 +310,57 @@ __global__ void stereoKernel(unsigned char *left, unsigned char *right, size_t i |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace cv { namespace gpu { namespace impl |
|
|
|
|
{ |
|
|
|
|
template<int RADIUS> void kernel_caller(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int maxdisp) |
|
|
|
|
{ |
|
|
|
|
template<int RADIUS> void kernel_caller(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int maxdisp, const cudaStream_t & stream) |
|
|
|
|
{ |
|
|
|
|
dim3 grid(1,1,1); |
|
|
|
|
dim3 threads(BLOCK_W, 1, 1); |
|
|
|
|
dim3 threads(BLOCK_W, 1, 1); |
|
|
|
|
|
|
|
|
|
grid.x = divUp(left.cols - maxdisp - 2 * RADIUS, BLOCK_W); |
|
|
|
|
grid.y = divUp(left.rows - 2 * RADIUS, ROWSperTHREAD); |
|
|
|
|
|
|
|
|
|
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) |
|
|
|
|
size_t smem_size = (BLOCK_W + N_DISPARITIES * (BLOCK_W + 2 * RADIUS)) * sizeof(unsigned int); |
|
|
|
|
//See above: #define COL_SSD_SIZE (BLOCK_W + 2 * RADIUS) |
|
|
|
|
size_t smem_size = (BLOCK_W + N_DISPARITIES * (BLOCK_W + 2 * RADIUS)) * sizeof(unsigned int); |
|
|
|
|
|
|
|
|
|
if (stream == 0) |
|
|
|
|
{ |
|
|
|
|
stereobm_gpu::stereoKernel<RADIUS><<<grid, threads, smem_size>>>(left.ptr, right.ptr, left.step, disp.ptr, disp.step, maxdisp); |
|
|
|
|
cudaSafeCall( cudaThreadSynchronize() ); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
stereobm_gpu::stereoKernel<RADIUS><<<grid, threads, smem_size, stream>>>(left.ptr, right.ptr, left.step, disp.ptr, disp.step, maxdisp); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
stereobm_gpu::stereoKernel<RADIUS><<<grid, threads, smem_size>>>(left.ptr, right.ptr, left.step, disp.ptr, disp.step, maxdisp); |
|
|
|
|
cudaSafeCall( cudaThreadSynchronize() ); |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
typedef void (*kernel_caller_t)(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int maxdisp); |
|
|
|
|
typedef void (*kernel_caller_t)(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int maxdisp, const cudaStream_t & stream); |
|
|
|
|
|
|
|
|
|
const static kernel_caller_t callers[] = |
|
|
|
|
{ |
|
|
|
|
0, |
|
|
|
|
kernel_caller< 1>, kernel_caller< 2>, kernel_caller< 3>, kernel_caller< 4>, kernel_caller< 5>, |
|
|
|
|
kernel_caller< 6>, kernel_caller< 7>, kernel_caller< 8>, kernel_caller< 9>, kernel_caller<10>, |
|
|
|
|
kernel_caller<11>, kernel_caller<12>, kernel_caller<13>, kernel_caller<15>, kernel_caller<15>, |
|
|
|
|
kernel_caller<16>, kernel_caller<17>, kernel_caller<18>, kernel_caller<19>, kernel_caller<20>, |
|
|
|
|
0, |
|
|
|
|
kernel_caller< 1>, kernel_caller< 2>, kernel_caller< 3>, kernel_caller< 4>, kernel_caller< 5>, |
|
|
|
|
kernel_caller< 6>, kernel_caller< 7>, kernel_caller< 8>, kernel_caller< 9>, kernel_caller<10>, |
|
|
|
|
kernel_caller<11>, kernel_caller<12>, kernel_caller<13>, kernel_caller<15>, kernel_caller<15>, |
|
|
|
|
kernel_caller<16>, kernel_caller<17>, kernel_caller<18>, kernel_caller<19>, kernel_caller<20>, |
|
|
|
|
kernel_caller<21>, kernel_caller<22>, kernel_caller<23>, kernel_caller<24>, kernel_caller<25> |
|
|
|
|
|
|
|
|
|
//0,0,0, 0,0,0, 0,0,kernel_caller<9> |
|
|
|
|
}; |
|
|
|
|
const int calles_num = sizeof(callers)/sizeof(callers[0]); |
|
|
|
|
|
|
|
|
|
extern "C" void stereoBM_GPU(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int maxdisp, int winsz, const DevMem2D_<unsigned int>& minSSD_buf) |
|
|
|
|
{ |
|
|
|
|
extern "C" void stereoBM_GPU(const DevMem2D& left, const DevMem2D& right, const DevMem2D& disp, int maxdisp, int winsz, const DevMem2D_<unsigned int>& minSSD_buf, const cudaStream_t & stream) |
|
|
|
|
{ |
|
|
|
|
int winsz2 = winsz >> 1; |
|
|
|
|
|
|
|
|
|
if (winsz2 == 0 || winsz2 >= calles_num) |
|
|
|
|
cv::gpu::error("Unsupported window size", __FILE__, __LINE__); |
|
|
|
|
|
|
|
|
|
//cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferL1) ); |
|
|
|
|
//cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferShared) ); |
|
|
|
|
//cudaSafeCall( cudaFuncSetCacheConfig(&stereoKernel, cudaFuncCachePreferShared) ); |
|
|
|
|
|
|
|
|
|
cudaSafeCall( cudaMemset2D(disp.ptr, disp.step, 0, disp.cols, disp.rows) ); |
|
|
|
|
cudaSafeCall( cudaMemset2D(minSSD_buf.ptr, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp.rows) ); |
|
|
|
|
cudaSafeCall( cudaMemset2D(minSSD_buf.ptr, minSSD_buf.step, 0xFF, minSSD_buf.cols * minSSD_buf.elemSize(), disp.rows) ); |
|
|
|
|
|
|
|
|
|
cudaSafeCall( cudaMemcpyToSymbol( stereobm_gpu::cwidth, &left.cols, sizeof(left.cols) ) ); |
|
|
|
|
cudaSafeCall( cudaMemcpyToSymbol( stereobm_gpu::cheight, &left.rows, sizeof(left.rows) ) ); |
|
|
|
@ -361,7 +369,7 @@ namespace cv { namespace gpu { namespace impl |
|
|
|
|
size_t minssd_step = minSSD_buf.step/minSSD_buf.elemSize(); |
|
|
|
|
cudaSafeCall( cudaMemcpyToSymbol( stereobm_gpu::cminSSD_step, &minssd_step, sizeof(minssd_step) ) ); |
|
|
|
|
|
|
|
|
|
callers[winsz2](left, right, disp, maxdisp); |
|
|
|
|
callers[winsz2](left, right, disp, maxdisp, stream); |
|
|
|
|
} |
|
|
|
|
}}} |
|
|
|
|
|
|
|
|
@ -381,7 +389,7 @@ extern "C" __global__ void prefilter_kernel(unsigned char *output, size_t step, |
|
|
|
|
|
|
|
|
|
if (x < width && y < height) |
|
|
|
|
{ |
|
|
|
|
int conv = (int)tex2D(texForSobel, x - 1, y - 1) * (-1) + (int)tex2D(texForSobel, x + 1, y - 1) * (1) + |
|
|
|
|
int conv = (int)tex2D(texForSobel, x - 1, y - 1) * (-1) + (int)tex2D(texForSobel, x + 1, y - 1) * (1) + |
|
|
|
|
(int)tex2D(texForSobel, x - 1, y ) * (-2) + (int)tex2D(texForSobel, x + 1, y ) * (2) + |
|
|
|
|
(int)tex2D(texForSobel, x - 1, y + 1) * (-1) + (int)tex2D(texForSobel, x + 1, y + 1) * (1); |
|
|
|
|
|
|
|
|
@ -398,18 +406,18 @@ namespace cv { namespace gpu { namespace impl |
|
|
|
|
extern "C" void prefilter_xsobel(const DevMem2D& input, const DevMem2D& output, int prefilterCap) |
|
|
|
|
{ |
|
|
|
|
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>(); |
|
|
|
|
cudaSafeCall( cudaBindTexture2D( 0, stereobm_gpu::texForSobel, input.ptr, desc, input.cols, input.rows, input.step ) ); |
|
|
|
|
cudaSafeCall( cudaBindTexture2D( 0, stereobm_gpu::texForSobel, input.ptr, desc, input.cols, input.rows, input.step ) ); |
|
|
|
|
|
|
|
|
|
dim3 threads(16, 16, 1); |
|
|
|
|
dim3 grid(1, 1, 1); |
|
|
|
|
|
|
|
|
|
grid.x = divUp(input.cols, threads.x); |
|
|
|
|
grid.y = divUp(input.rows, threads.y); |
|
|
|
|
grid.y = divUp(input.rows, threads.y); |
|
|
|
|
|
|
|
|
|
stereobm_gpu::prefilter_kernel<<<grid, threads>>>(output.ptr, output.step, output.cols, output.rows, prefilterCap); |
|
|
|
|
cudaSafeCall( cudaThreadSynchronize() ); |
|
|
|
|
|
|
|
|
|
cudaSafeCall( cudaUnbindTexture (stereobm_gpu::texForSobel ) ); |
|
|
|
|
cudaSafeCall( cudaUnbindTexture (stereobm_gpu::texForSobel ) ); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
}}} |
|
|
|
@ -424,8 +432,8 @@ namespace stereobm_gpu |
|
|
|
|
texture<unsigned char, 2, cudaReadModeNormalizedFloat> texForTF; |
|
|
|
|
|
|
|
|
|
__device__ float sobel(int x, int y) |
|
|
|
|
{ |
|
|
|
|
float conv = tex2D(texForTF, x - 1, y - 1) * (-1) + tex2D(texForTF, x + 1, y - 1) * (1) + |
|
|
|
|
{ |
|
|
|
|
float conv = tex2D(texForTF, x - 1, y - 1) * (-1) + tex2D(texForTF, x + 1, y - 1) * (1) + |
|
|
|
|
tex2D(texForTF, x - 1, y ) * (-2) + tex2D(texForTF, x + 1, y ) * (2) + |
|
|
|
|
tex2D(texForTF, x - 1, y + 1) * (-1) + tex2D(texForTF, x + 1, y + 1) * (1); |
|
|
|
|
return fabs(conv); |
|
|
|
@ -453,28 +461,28 @@ __device__ float CalcSums(float *cols, float *cols_cache, int winsz) |
|
|
|
|
return cols[0] + cache + cache2; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#define RpT (2 * ROWSperTHREAD) // got experimentally |
|
|
|
|
#define RpT (2 * ROWSperTHREAD) // got experimentally |
|
|
|
|
|
|
|
|
|
extern "C" __global__ void textureness_kernel(unsigned char *disp, size_t disp_step, int winsz, float threshold, int width, int height) |
|
|
|
|
{ |
|
|
|
|
{ |
|
|
|
|
int winsz2 = winsz/2; |
|
|
|
|
int n_dirty_pixels = (winsz2) * 2; |
|
|
|
|
|
|
|
|
|
extern __shared__ float cols_cache[]; |
|
|
|
|
float *cols = cols_cache + blockDim.x + threadIdx.x; |
|
|
|
|
float *cols_extra = threadIdx.x < n_dirty_pixels ? cols + blockDim.x : 0; |
|
|
|
|
float *cols = cols_cache + blockDim.x + threadIdx.x; |
|
|
|
|
float *cols_extra = threadIdx.x < n_dirty_pixels ? cols + blockDim.x : 0; |
|
|
|
|
|
|
|
|
|
int x = blockIdx.x * blockDim.x + threadIdx.x; |
|
|
|
|
int x = blockIdx.x * blockDim.x + threadIdx.x; |
|
|
|
|
int beg_row = blockIdx.y * RpT; |
|
|
|
|
int end_row = min(beg_row + RpT, height); |
|
|
|
|
|
|
|
|
|
if (x < width) |
|
|
|
|
{ |
|
|
|
|
if (x < width) |
|
|
|
|
{ |
|
|
|
|
int y = beg_row; |
|
|
|
|
|
|
|
|
|
float sum = 0; |
|
|
|
|
float sum_extra = 0; |
|
|
|
|
|
|
|
|
|
float sum = 0; |
|
|
|
|
float sum_extra = 0; |
|
|
|
|
|
|
|
|
|
for(int i = y - winsz2; i <= y + winsz2; ++i) |
|
|
|
|
{ |
|
|
|
|
sum += sobel(x - winsz2, i); |
|
|
|
@ -486,11 +494,11 @@ extern "C" __global__ void textureness_kernel(unsigned char *disp, size_t disp_s |
|
|
|
|
*cols_extra = sum_extra; |
|
|
|
|
|
|
|
|
|
__syncthreads(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255; |
|
|
|
|
if (sum_win < threshold) |
|
|
|
|
disp[y * disp_step + x] = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
__syncthreads(); |
|
|
|
|
|
|
|
|
|
for(int y = beg_row + 1; y < end_row; ++y) |
|
|
|
@ -505,12 +513,12 @@ extern "C" __global__ void textureness_kernel(unsigned char *disp, size_t disp_s |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__syncthreads(); |
|
|
|
|
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255; |
|
|
|
|
float sum_win = CalcSums(cols, cols_cache + threadIdx.x, winsz) * 255; |
|
|
|
|
if (sum_win < threshold) |
|
|
|
|
disp[y * disp_step + x] = 0; |
|
|
|
|
|
|
|
|
|
__syncthreads(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -521,21 +529,21 @@ namespace cv { namespace gpu { namespace impl |
|
|
|
|
{ |
|
|
|
|
avgTexturenessThreshold *= winsz * winsz; |
|
|
|
|
|
|
|
|
|
stereobm_gpu::texForTF.filterMode = cudaFilterModeLinear; |
|
|
|
|
stereobm_gpu::texForTF.filterMode = cudaFilterModeLinear; |
|
|
|
|
stereobm_gpu::texForTF.addressMode[0] = cudaAddressModeWrap; |
|
|
|
|
stereobm_gpu::texForTF.addressMode[1] = cudaAddressModeWrap; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>(); |
|
|
|
|
cudaSafeCall( cudaBindTexture2D( 0, stereobm_gpu::texForTF, input.ptr, desc, input.cols, input.rows, input.step ) ); |
|
|
|
|
cudaSafeCall( cudaBindTexture2D( 0, stereobm_gpu::texForTF, input.ptr, desc, input.cols, input.rows, input.step ) ); |
|
|
|
|
|
|
|
|
|
dim3 threads(128, 1, 1); |
|
|
|
|
dim3 grid(1, 1, 1); |
|
|
|
|
|
|
|
|
|
grid.x = divUp(input.cols, threads.x); |
|
|
|
|
grid.y = divUp(input.rows, RpT); |
|
|
|
|
|
|
|
|
|
size_t smem_size = (threads.x + threads.x + (winsz/2) * 2 ) * sizeof(float); |
|
|
|
|
|
|
|
|
|
grid.y = divUp(input.rows, RpT); |
|
|
|
|
|
|
|
|
|
size_t smem_size = (threads.x + threads.x + (winsz/2) * 2 ) * sizeof(float); |
|
|
|
|
|
|
|
|
|
stereobm_gpu::textureness_kernel<<<grid, threads, smem_size>>>(disp.ptr, disp.step, winsz, avgTexturenessThreshold, disp.cols, disp.rows); |
|
|
|
|
cudaSafeCall( cudaThreadSynchronize() ); |
|
|
|
|
|
|
|
|
|