refactored gpu module.pull/13383/head
parent
ea040ce71a
commit
b08f60828b
19 changed files with 1500 additions and 934 deletions
@ -1,233 +0,0 @@ |
||||
/*M/////////////////////////////////////////////////////////////////////////////////////// |
||||
// |
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
// |
||||
// By downloading, copying, installing or using the software you agree to this license. |
||||
// If you do not agree to this license, do not download, install, |
||||
// copy or use the software. |
||||
// |
||||
// |
||||
// License Agreement |
||||
// For Open Source Computer Vision Library |
||||
// |
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. |
||||
// Third party copyrights are property of their respective owners. |
||||
// |
||||
// Redistribution and use in source and binary forms, with or without modification, |
||||
// are permitted provided that the following conditions are met: |
||||
// |
||||
// * Redistribution's of source code must retain the above copyright notice, |
||||
// this list of conditions and the following disclaimer. |
||||
// |
||||
// * Redistribution's in binary form must reproduce the above copyright notice, |
||||
// this list of conditions and the following disclaimer in the documentation |
||||
// and/or other materials provided with the distribution. |
||||
// |
||||
// * The name of the copyright holders may not be used to endorse or promote products |
||||
// derived from this software without specific prior written permission. |
||||
// |
||||
// This software is provided by the copyright holders and contributors "as is" and |
||||
// any express or implied warranties, including, but not limited to, the implied |
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
// In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
// indirect, incidental, special, exemplary, or consequential damages |
||||
// (including, but not limited to, procurement of substitute goods or services; |
||||
// loss of use, data, or profits; or business interruption) however caused |
||||
// and on any theory of liability, whether in contract, strict liability, |
||||
// or tort (including negligence or otherwise) arising in any way out of |
||||
// the use of this software, even if advised of the possibility of such damage. |
||||
// |
||||
//M*/ |
||||
|
||||
#include "opencv2/gpu/devmem2d.hpp" |
||||
#include "saturate_cast.hpp" |
||||
#include "safe_call.hpp" |
||||
|
||||
using namespace cv::gpu; |
||||
|
||||
#ifndef FLT_MAX |
||||
#define FLT_MAX 3.402823466e+30F |
||||
#endif |
||||
|
||||
namespace bf_krnls |
||||
{ |
||||
__constant__ float* ctable_color; |
||||
__constant__ float* ctable_space; |
||||
__constant__ size_t ctable_space_step; |
||||
|
||||
__constant__ int cndisp; |
||||
__constant__ int cradius; |
||||
|
||||
__constant__ short cedge_disc; |
||||
__constant__ short cmax_disc; |
||||
} |
||||
|
||||
namespace cv { namespace gpu { namespace bf |
||||
{ |
||||
void load_constants(float* table_color, const DevMem2Df& table_space, int ndisp, int radius, short edge_disc, short max_disc) |
||||
{ |
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::ctable_color, &table_color, sizeof(table_color)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::ctable_space, &table_space.ptr, sizeof(table_space.ptr)) ); |
||||
size_t table_space_step = table_space.step / sizeof(float); |
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::ctable_space_step, &table_space_step, sizeof(size_t)) ); |
||||
|
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cndisp, &ndisp, sizeof(int)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cradius, &radius, sizeof(int)) ); |
||||
|
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cedge_disc, &edge_disc, sizeof(short)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cmax_disc, &max_disc, sizeof(short)) ); |
||||
} |
||||
}}} |
||||
|
||||
namespace bf_krnls |
||||
{ |
||||
template <int channels> |
||||
struct DistRgbMax |
||||
{ |
||||
static __device__ uchar calc(const uchar* a, const uchar* b) |
||||
{ |
||||
uchar x = abs(a[0] - b[0]); |
||||
uchar y = abs(a[1] - b[1]); |
||||
uchar z = abs(a[2] - b[2]); |
||||
return (max(max(x, y), z)); |
||||
} |
||||
}; |
||||
|
||||
template <> |
||||
struct DistRgbMax<1> |
||||
{ |
||||
static __device__ uchar calc(const uchar* a, const uchar* b) |
||||
{ |
||||
return abs(a[0] - b[0]); |
||||
} |
||||
}; |
||||
|
||||
template <int channels, typename T> |
||||
__global__ void bilateral_filter(int t, T* disp, size_t disp_step, const uchar* img, size_t img_step, int h, int w) |
||||
{ |
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y; |
||||
const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1); |
||||
|
||||
T dp[5]; |
||||
|
||||
if (y > 0 && y < h - 1 && x > 0 && x < w - 1) |
||||
{ |
||||
dp[0] = *(disp + (y ) * disp_step + x + 0); |
||||
dp[1] = *(disp + (y-1) * disp_step + x + 0); |
||||
dp[2] = *(disp + (y ) * disp_step + x - 1); |
||||
dp[3] = *(disp + (y+1) * disp_step + x + 0); |
||||
dp[4] = *(disp + (y ) * disp_step + x + 1); |
||||
|
||||
if(abs(dp[1] - dp[0]) >= cedge_disc || abs(dp[2] - dp[0]) >= cedge_disc || abs(dp[3] - dp[0]) >= cedge_disc || abs(dp[4] - dp[0]) >= cedge_disc) |
||||
{ |
||||
const int ymin = max(0, y - cradius); |
||||
const int xmin = max(0, x - cradius); |
||||
const int ymax = min(h - 1, y + cradius); |
||||
const int xmax = min(w - 1, x + cradius); |
||||
|
||||
float cost[] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f}; |
||||
|
||||
const uchar* ic = img + y * img_step + channels * x; |
||||
|
||||
for(int yi = ymin; yi <= ymax; yi++) |
||||
{ |
||||
const T* disp_y = disp + yi * disp_step; |
||||
|
||||
for(int xi = xmin; xi <= xmax; xi++) |
||||
{ |
||||
const uchar* in = img + yi * img_step + channels * xi; |
||||
|
||||
uchar dist_rgb = DistRgbMax<channels>::calc(in, ic); |
||||
|
||||
const float weight = ctable_color[dist_rgb] * (ctable_space + abs(y-yi)* ctable_space_step)[abs(x-xi)]; |
||||
|
||||
const T disp_reg = disp_y[xi]; |
||||
|
||||
cost[0] += min(cmax_disc, abs(disp_reg - dp[0])) * weight; |
||||
cost[1] += min(cmax_disc, abs(disp_reg - dp[1])) * weight; |
||||
cost[2] += min(cmax_disc, abs(disp_reg - dp[2])) * weight; |
||||
cost[3] += min(cmax_disc, abs(disp_reg - dp[3])) * weight; |
||||
cost[4] += min(cmax_disc, abs(disp_reg - dp[4])) * weight; |
||||
} |
||||
} |
||||
|
||||
float minimum = FLT_MAX; |
||||
int id = 0; |
||||
|
||||
if (cost[0] < minimum) |
||||
{ |
||||
minimum = cost[0]; |
||||
id = 0; |
||||
} |
||||
if (cost[1] < minimum) |
||||
{ |
||||
minimum = cost[1]; |
||||
id = 1; |
||||
} |
||||
if (cost[2] < minimum) |
||||
{ |
||||
minimum = cost[2]; |
||||
id = 2; |
||||
} |
||||
if (cost[3] < minimum) |
||||
{ |
||||
minimum = cost[3]; |
||||
id = 3; |
||||
} |
||||
if (cost[4] < minimum) |
||||
{ |
||||
minimum = cost[4]; |
||||
id = 4; |
||||
} |
||||
|
||||
*(disp + y * disp_step + x) = dp[id]; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
namespace cv { namespace gpu { namespace bf |
||||
{ |
||||
template <typename T> |
||||
void bilateral_filter_caller(const DevMem2D_<T>& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream) |
||||
{ |
||||
dim3 threads(32, 8, 1); |
||||
dim3 grid(1, 1, 1); |
||||
grid.x = divUp(disp.cols, threads.x << 1); |
||||
grid.y = divUp(disp.rows, threads.y); |
||||
|
||||
switch (channels) |
||||
{ |
||||
case 1: |
||||
for (int i = 0; i < iters; ++i) |
||||
{ |
||||
bf_krnls::bilateral_filter<1><<<grid, threads, 0, stream>>>(0, disp.ptr, disp.step/sizeof(T), img.ptr, img.step, disp.rows, disp.cols); |
||||
bf_krnls::bilateral_filter<1><<<grid, threads, 0, stream>>>(1, disp.ptr, disp.step/sizeof(T), img.ptr, img.step, disp.rows, disp.cols); |
||||
} |
||||
break; |
||||
case 3: |
||||
for (int i = 0; i < iters; ++i) |
||||
{ |
||||
bf_krnls::bilateral_filter<3><<<grid, threads, 0, stream>>>(0, disp.ptr, disp.step/sizeof(T), img.ptr, img.step, disp.rows, disp.cols); |
||||
bf_krnls::bilateral_filter<3><<<grid, threads, 0, stream>>>(1, disp.ptr, disp.step/sizeof(T), img.ptr, img.step, disp.rows, disp.cols); |
||||
} |
||||
break; |
||||
default: |
||||
cv::gpu::error("Unsupported channels count", __FILE__, __LINE__); |
||||
} |
||||
|
||||
if (stream != 0) |
||||
cudaSafeCall( cudaThreadSynchronize() ); |
||||
} |
||||
|
||||
void bilateral_filter_gpu(const DevMem2D& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream) |
||||
{ |
||||
bilateral_filter_caller(disp, img, channels, iters, stream); |
||||
} |
||||
|
||||
void bilateral_filter_gpu(const DevMem2D_<short>& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream) |
||||
{ |
||||
bilateral_filter_caller(disp, img, channels, iters, stream); |
||||
} |
||||
}}} |
@ -0,0 +1,455 @@ |
||||
/*M/////////////////////////////////////////////////////////////////////////////////////// |
||||
// |
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
// |
||||
// By downloading, copying, installing or using the software you agree to this license. |
||||
// If you do not agree to this license, do not download, install, |
||||
// copy or use the software. |
||||
// |
||||
// |
||||
// License Agreement |
||||
// For Open Source Computer Vision Library |
||||
// |
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. |
||||
// Third party copyrights are property of their respective owners. |
||||
// |
||||
// Redistribution and use in source and binary forms, with or without modification, |
||||
// are permitted provided that the following conditions are met: |
||||
// |
||||
// * Redistribution's of source code must retain the above copyright notice, |
||||
// this list of conditions and the following disclaimer. |
||||
// |
||||
// * Redistribution's in binary form must reproduce the above copyright notice, |
||||
// this list of conditions and the following disclaimer in the documentation |
||||
// and/or other materials provided with the distribution. |
||||
// |
||||
// * The name of the copyright holders may not be used to endorse or promote products |
||||
// derived from this software without specific prior written permission. |
||||
// |
||||
// This software is provided by the copyright holders and contributors "as is" and |
||||
// any express or implied warranties, including, but not limited to, the implied |
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
// In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
// indirect, incidental, special, exemplary, or consequential damages |
||||
// (including, but not limited to, procurement of substitute goods or services; |
||||
// loss of use, data, or profits; or business interruption) however caused |
||||
// and on any theory of liability, whether in contract, strict liability, |
||||
// or tort (including negligence or otherwise) arising in any way out of |
||||
// the use of this software, even if advised of the possibility of such damage. |
||||
// |
||||
//M*/ |
||||
|
||||
#include "opencv2/gpu/devmem2d.hpp" |
||||
#include "saturate_cast.hpp" |
||||
#include "safe_call.hpp" |
||||
#include "cuda_shared.hpp" |
||||
|
||||
using namespace cv::gpu; |
||||
|
||||
#ifndef FLT_MAX |
||||
#define FLT_MAX 3.402823466e+30F |
||||
#endif |
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////// |
||||
// Linear filters |
||||
|
||||
#define MAX_KERNEL_SIZE 16 |
||||
|
||||
namespace filter_krnls |
||||
{ |
||||
__constant__ float cLinearKernel[MAX_KERNEL_SIZE]; |
||||
} |
||||
|
||||
namespace cv { namespace gpu { namespace filters |
||||
{ |
||||
void loadLinearKernel(const float kernel[], int ksize) |
||||
{ |
||||
cudaSafeCall( cudaMemcpyToSymbol(filter_krnls::cLinearKernel, kernel, ksize * sizeof(float)) ); |
||||
} |
||||
}}} |
||||
|
||||
namespace filter_krnls |
||||
{ |
||||
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int KERNEL_SIZE, typename T, typename D> |
||||
__global__ void linearRowFilter(const T* src, size_t src_step, D* dst, size_t dst_step, int anchor, int width, int height) |
||||
{ |
||||
__shared__ T smem[BLOCK_DIM_Y * BLOCK_DIM_X * 3]; |
||||
|
||||
const int blockStartX = blockDim.x * blockIdx.x; |
||||
const int blockStartY = blockDim.y * blockIdx.y; |
||||
|
||||
const int threadX = blockStartX + threadIdx.x; |
||||
const int prevThreadX = threadX - blockDim.x; |
||||
const int nextThreadX = threadX + blockDim.x; |
||||
|
||||
const int threadY = blockStartY + threadIdx.y; |
||||
|
||||
T* sDataRow = smem + threadIdx.y * blockDim.x * 3; |
||||
|
||||
if (threadY < height) |
||||
{ |
||||
const T* rowSrc = src + threadY * src_step; |
||||
|
||||
sDataRow[threadIdx.x + blockDim.x] = threadX < width ? rowSrc[threadX] : 0; |
||||
|
||||
sDataRow[threadIdx.x] = prevThreadX >= 0 ? rowSrc[prevThreadX] : 0; |
||||
|
||||
sDataRow[(blockDim.x << 1) + threadIdx.x] = nextThreadX < width ? rowSrc[nextThreadX] : 0; |
||||
|
||||
__syncthreads(); |
||||
|
||||
if (threadX < width) |
||||
{ |
||||
float sum = 0; |
||||
|
||||
sDataRow += threadIdx.x + blockDim.x - anchor; |
||||
|
||||
#pragma unroll |
||||
for(int i = 0; i < KERNEL_SIZE; ++i) |
||||
sum += cLinearKernel[i] * sDataRow[i]; |
||||
|
||||
dst[threadY * dst_step + threadX] = saturate_cast<D>(sum); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
namespace cv { namespace gpu { namespace filters |
||||
{ |
||||
template <int KERNEL_SIZE, typename T, typename D> |
||||
void linearRowFilter_caller(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor) |
||||
{ |
||||
const int BLOCK_DIM_X = 16; |
||||
const int BLOCK_DIM_Y = 16; |
||||
|
||||
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); |
||||
dim3 blocks(divUp(src.cols, BLOCK_DIM_X), divUp(src.rows, BLOCK_DIM_Y)); |
||||
|
||||
filter_krnls::linearRowFilter<BLOCK_DIM_X, BLOCK_DIM_Y, KERNEL_SIZE><<<blocks, threads>>>(src.ptr, src.elem_step, |
||||
dst.ptr, dst.elem_step, anchor, src.cols, src.rows); |
||||
|
||||
cudaSafeCall( cudaThreadSynchronize() ); |
||||
} |
||||
|
||||
template <typename T, typename D> |
||||
inline void linearRowFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) |
||||
{ |
||||
typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor); |
||||
static const caller_t callers[] = |
||||
{linearRowFilter_caller<0 , T, D>, linearRowFilter_caller<1 , T, D>, |
||||
linearRowFilter_caller<2 , T, D>, linearRowFilter_caller<3 , T, D>, |
||||
linearRowFilter_caller<4 , T, D>, linearRowFilter_caller<5 , T, D>, |
||||
linearRowFilter_caller<6 , T, D>, linearRowFilter_caller<7 , T, D>, |
||||
linearRowFilter_caller<8 , T, D>, linearRowFilter_caller<9 , T, D>, |
||||
linearRowFilter_caller<10, T, D>, linearRowFilter_caller<11, T, D>, |
||||
linearRowFilter_caller<12, T, D>, linearRowFilter_caller<13, T, D>, |
||||
linearRowFilter_caller<14, T, D>, linearRowFilter_caller<15, T, D>}; |
||||
|
||||
loadLinearKernel(kernel, ksize); |
||||
callers[ksize]((DevMem2D_<T>)src, (DevMem2D_<D>)dst, anchor); |
||||
} |
||||
|
||||
void linearRowFilter_gpu_32s32s(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) |
||||
{ |
||||
linearRowFilter_gpu<int, int>(src, dst, kernel, ksize, anchor); |
||||
} |
||||
void linearRowFilter_gpu_32s32f(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) |
||||
{ |
||||
linearRowFilter_gpu<int, float>(src, dst, kernel, ksize, anchor); |
||||
} |
||||
void linearRowFilter_gpu_32f32s(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) |
||||
{ |
||||
linearRowFilter_gpu<float, int>(src, dst, kernel, ksize, anchor); |
||||
} |
||||
void linearRowFilter_gpu_32f32f(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) |
||||
{ |
||||
linearRowFilter_gpu<float, float>(src, dst, kernel, ksize, anchor); |
||||
} |
||||
}}} |
||||
|
||||
namespace filter_krnls |
||||
{ |
||||
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int KERNEL_SIZE, typename T, typename D> |
||||
__global__ void linearColumnFilter(const T* src, size_t src_step, D* dst, size_t dst_step, int anchor, int width, int height) |
||||
{ |
||||
__shared__ T smem[BLOCK_DIM_Y * BLOCK_DIM_X * 3]; |
||||
|
||||
const int blockStartX = blockDim.x * blockIdx.x; |
||||
const int blockStartY = blockDim.y * blockIdx.y; |
||||
|
||||
const int threadX = blockStartX + threadIdx.x; |
||||
|
||||
const int threadY = blockStartY + threadIdx.y; |
||||
const int prevThreadY = threadY - blockDim.y; |
||||
const int nextThreadY = threadY + blockDim.y; |
||||
|
||||
const int smem_step = blockDim.x; |
||||
|
||||
T* sDataColumn = smem + threadIdx.x; |
||||
|
||||
if (threadX < width) |
||||
{ |
||||
const T* colSrc = src + threadX; |
||||
|
||||
sDataColumn[(threadIdx.y + blockDim.y) * smem_step] = threadY < height ? colSrc[threadY * src_step] : 0; |
||||
|
||||
sDataColumn[threadIdx.y * smem_step] = prevThreadY >= 0 ? colSrc[prevThreadY * src_step] : 0; |
||||
|
||||
sDataColumn[(threadIdx.y + (blockDim.y << 1)) * smem_step] = nextThreadY < height ? colSrc[nextThreadY * src_step] : 0; |
||||
|
||||
__syncthreads(); |
||||
|
||||
if (threadY < height) |
||||
{ |
||||
float sum = 0; |
||||
|
||||
sDataColumn += (threadIdx.y + blockDim.y - anchor)* smem_step; |
||||
|
||||
#pragma unroll |
||||
for(int i = 0; i < KERNEL_SIZE; ++i) |
||||
sum += cLinearKernel[i] * sDataColumn[i * smem_step]; |
||||
|
||||
dst[threadY * dst_step + threadX] = saturate_cast<D>(sum); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
namespace cv { namespace gpu { namespace filters |
||||
{ |
||||
template <int KERNEL_SIZE, typename T, typename D> |
||||
void linearColumnFilter_caller(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor) |
||||
{ |
||||
const int BLOCK_DIM_X = 16; |
||||
const int BLOCK_DIM_Y = 16; |
||||
|
||||
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); |
||||
dim3 blocks(divUp(src.cols, BLOCK_DIM_X), divUp(src.rows, BLOCK_DIM_Y)); |
||||
|
||||
filter_krnls::linearColumnFilter<BLOCK_DIM_X, BLOCK_DIM_Y, KERNEL_SIZE><<<blocks, threads>>>(src.ptr, src.elem_step, |
||||
dst.ptr, dst.elem_step, anchor, src.cols, src.rows); |
||||
|
||||
cudaSafeCall( cudaThreadSynchronize() ); |
||||
} |
||||
|
||||
template <typename T, typename D> |
||||
inline void linearColumnFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) |
||||
{ |
||||
typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, int anchor); |
||||
static const caller_t callers[] = |
||||
{linearColumnFilter_caller<0 , T, D>, linearColumnFilter_caller<1 , T, D>, |
||||
linearColumnFilter_caller<2 , T, D>, linearColumnFilter_caller<3 , T, D>, |
||||
linearColumnFilter_caller<4 , T, D>, linearColumnFilter_caller<5 , T, D>, |
||||
linearColumnFilter_caller<6 , T, D>, linearColumnFilter_caller<7 , T, D>, |
||||
linearColumnFilter_caller<8 , T, D>, linearColumnFilter_caller<9 , T, D>, |
||||
linearColumnFilter_caller<10, T, D>, linearColumnFilter_caller<11, T, D>, |
||||
linearColumnFilter_caller<12, T, D>, linearColumnFilter_caller<13, T, D>, |
||||
linearColumnFilter_caller<14, T, D>, linearColumnFilter_caller<15, T, D>}; |
||||
|
||||
loadLinearKernel(kernel, ksize); |
||||
callers[ksize]((DevMem2D_<T>)src, (DevMem2D_<D>)dst, anchor); |
||||
} |
||||
|
||||
void linearColumnFilter_gpu_32s32s(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) |
||||
{ |
||||
linearColumnFilter_gpu<int, int>(src, dst, kernel, ksize, anchor); |
||||
} |
||||
void linearColumnFilter_gpu_32s32f(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) |
||||
{ |
||||
linearColumnFilter_gpu<int, float>(src, dst, kernel, ksize, anchor); |
||||
} |
||||
void linearColumnFilter_gpu_32f32s(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) |
||||
{ |
||||
linearColumnFilter_gpu<float, int>(src, dst, kernel, ksize, anchor); |
||||
} |
||||
void linearColumnFilter_gpu_32f32f(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) |
||||
{ |
||||
linearColumnFilter_gpu<float, float>(src, dst, kernel, ksize, anchor); |
||||
} |
||||
}}} |
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////// |
||||
// Bilateral filters |
||||
|
||||
namespace bf_krnls |
||||
{ |
||||
__constant__ float* ctable_color; |
||||
__constant__ float* ctable_space; |
||||
__constant__ size_t ctable_space_step; |
||||
|
||||
__constant__ int cndisp; |
||||
__constant__ int cradius; |
||||
|
||||
__constant__ short cedge_disc; |
||||
__constant__ short cmax_disc; |
||||
} |
||||
|
||||
namespace cv { namespace gpu { namespace bf |
||||
{ |
||||
void load_constants(float* table_color, const DevMem2Df& table_space, int ndisp, int radius, short edge_disc, short max_disc) |
||||
{ |
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::ctable_color, &table_color, sizeof(table_color)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::ctable_space, &table_space.ptr, sizeof(table_space.ptr)) ); |
||||
size_t table_space_step = table_space.step / sizeof(float); |
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::ctable_space_step, &table_space_step, sizeof(size_t)) ); |
||||
|
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cndisp, &ndisp, sizeof(int)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cradius, &radius, sizeof(int)) ); |
||||
|
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cedge_disc, &edge_disc, sizeof(short)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cmax_disc, &max_disc, sizeof(short)) ); |
||||
} |
||||
}}} |
||||
|
||||
namespace bf_krnls |
||||
{ |
||||
template <int channels> |
||||
struct DistRgbMax |
||||
{ |
||||
static __device__ uchar calc(const uchar* a, const uchar* b) |
||||
{ |
||||
uchar x = abs(a[0] - b[0]); |
||||
uchar y = abs(a[1] - b[1]); |
||||
uchar z = abs(a[2] - b[2]); |
||||
return (max(max(x, y), z)); |
||||
} |
||||
}; |
||||
|
||||
template <> |
||||
struct DistRgbMax<1> |
||||
{ |
||||
static __device__ uchar calc(const uchar* a, const uchar* b) |
||||
{ |
||||
return abs(a[0] - b[0]); |
||||
} |
||||
}; |
||||
|
||||
template <int channels, typename T> |
||||
__global__ void bilateral_filter(int t, T* disp, size_t disp_step, const uchar* img, size_t img_step, int h, int w) |
||||
{ |
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y; |
||||
const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1); |
||||
|
||||
T dp[5]; |
||||
|
||||
if (y > 0 && y < h - 1 && x > 0 && x < w - 1) |
||||
{ |
||||
dp[0] = *(disp + (y ) * disp_step + x + 0); |
||||
dp[1] = *(disp + (y-1) * disp_step + x + 0); |
||||
dp[2] = *(disp + (y ) * disp_step + x - 1); |
||||
dp[3] = *(disp + (y+1) * disp_step + x + 0); |
||||
dp[4] = *(disp + (y ) * disp_step + x + 1); |
||||
|
||||
if(abs(dp[1] - dp[0]) >= cedge_disc || abs(dp[2] - dp[0]) >= cedge_disc || abs(dp[3] - dp[0]) >= cedge_disc || abs(dp[4] - dp[0]) >= cedge_disc) |
||||
{ |
||||
const int ymin = max(0, y - cradius); |
||||
const int xmin = max(0, x - cradius); |
||||
const int ymax = min(h - 1, y + cradius); |
||||
const int xmax = min(w - 1, x + cradius); |
||||
|
||||
float cost[] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f}; |
||||
|
||||
const uchar* ic = img + y * img_step + channels * x; |
||||
|
||||
for(int yi = ymin; yi <= ymax; yi++) |
||||
{ |
||||
const T* disp_y = disp + yi * disp_step; |
||||
|
||||
for(int xi = xmin; xi <= xmax; xi++) |
||||
{ |
||||
const uchar* in = img + yi * img_step + channels * xi; |
||||
|
||||
uchar dist_rgb = DistRgbMax<channels>::calc(in, ic); |
||||
|
||||
const float weight = ctable_color[dist_rgb] * (ctable_space + abs(y-yi)* ctable_space_step)[abs(x-xi)]; |
||||
|
||||
const T disp_reg = disp_y[xi]; |
||||
|
||||
cost[0] += min(cmax_disc, abs(disp_reg - dp[0])) * weight; |
||||
cost[1] += min(cmax_disc, abs(disp_reg - dp[1])) * weight; |
||||
cost[2] += min(cmax_disc, abs(disp_reg - dp[2])) * weight; |
||||
cost[3] += min(cmax_disc, abs(disp_reg - dp[3])) * weight; |
||||
cost[4] += min(cmax_disc, abs(disp_reg - dp[4])) * weight; |
||||
} |
||||
} |
||||
|
||||
float minimum = FLT_MAX; |
||||
int id = 0; |
||||
|
||||
if (cost[0] < minimum) |
||||
{ |
||||
minimum = cost[0]; |
||||
id = 0; |
||||
} |
||||
if (cost[1] < minimum) |
||||
{ |
||||
minimum = cost[1]; |
||||
id = 1; |
||||
} |
||||
if (cost[2] < minimum) |
||||
{ |
||||
minimum = cost[2]; |
||||
id = 2; |
||||
} |
||||
if (cost[3] < minimum) |
||||
{ |
||||
minimum = cost[3]; |
||||
id = 3; |
||||
} |
||||
if (cost[4] < minimum) |
||||
{ |
||||
minimum = cost[4]; |
||||
id = 4; |
||||
} |
||||
|
||||
*(disp + y * disp_step + x) = dp[id]; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
namespace cv { namespace gpu { namespace bf |
||||
{ |
||||
template <typename T> |
||||
void bilateral_filter_caller(const DevMem2D_<T>& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream) |
||||
{ |
||||
dim3 threads(32, 8, 1); |
||||
dim3 grid(1, 1, 1); |
||||
grid.x = divUp(disp.cols, threads.x << 1); |
||||
grid.y = divUp(disp.rows, threads.y); |
||||
|
||||
switch (channels) |
||||
{ |
||||
case 1: |
||||
for (int i = 0; i < iters; ++i) |
||||
{ |
||||
bf_krnls::bilateral_filter<1><<<grid, threads, 0, stream>>>(0, disp.ptr, disp.step/sizeof(T), img.ptr, img.step, disp.rows, disp.cols); |
||||
bf_krnls::bilateral_filter<1><<<grid, threads, 0, stream>>>(1, disp.ptr, disp.step/sizeof(T), img.ptr, img.step, disp.rows, disp.cols); |
||||
} |
||||
break; |
||||
case 3: |
||||
for (int i = 0; i < iters; ++i) |
||||
{ |
||||
bf_krnls::bilateral_filter<3><<<grid, threads, 0, stream>>>(0, disp.ptr, disp.step/sizeof(T), img.ptr, img.step, disp.rows, disp.cols); |
||||
bf_krnls::bilateral_filter<3><<<grid, threads, 0, stream>>>(1, disp.ptr, disp.step/sizeof(T), img.ptr, img.step, disp.rows, disp.cols); |
||||
} |
||||
break; |
||||
default: |
||||
cv::gpu::error("Unsupported channels count", __FILE__, __LINE__); |
||||
} |
||||
|
||||
if (stream != 0) |
||||
cudaSafeCall( cudaThreadSynchronize() ); |
||||
} |
||||
|
||||
void bilateral_filter_gpu(const DevMem2D& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream) |
||||
{ |
||||
bilateral_filter_caller(disp, img, channels, iters, stream); |
||||
} |
||||
|
||||
void bilateral_filter_gpu(const DevMem2D_<short>& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream) |
||||
{ |
||||
bilateral_filter_caller(disp, img, channels, iters, stream); |
||||
} |
||||
}}} |
@ -0,0 +1,118 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_GPU_TRANSFORM_HPP__ |
||||
#define __OPENCV_GPU_TRANSFORM_HPP__ |
||||
|
||||
#include "cuda_shared.hpp" |
||||
#include "saturate_cast.hpp" |
||||
#include "vecmath.hpp" |
||||
|
||||
namespace cv { namespace gpu { namespace algo_krnls |
||||
{ |
||||
template <typename T, typename D, typename UnOp> |
||||
static __global__ void transform(const T* src, size_t src_step,
|
||||
D* dst, size_t dst_step, int width, int height, UnOp op) |
||||
{ |
||||
const int x = blockDim.x * blockIdx.x + threadIdx.x; |
||||
const int y = blockDim.y * blockIdx.y + threadIdx.y; |
||||
|
||||
if (x < width && y < height) |
||||
{ |
||||
T src_data = src[y * src_step + x]; |
||||
dst[y * dst_step + x] = op(src_data, x, y); |
||||
} |
||||
} |
||||
template <typename T1, typename T2, typename D, typename BinOp> |
||||
static __global__ void transform(const T1* src1, size_t src1_step, const T2* src2, size_t src2_step,
|
||||
D* dst, size_t dst_step, int width, int height, BinOp op) |
||||
{ |
||||
const int x = blockDim.x * blockIdx.x + threadIdx.x; |
||||
const int y = blockDim.y * blockIdx.y + threadIdx.y; |
||||
|
||||
if (x < width && y < height) |
||||
{ |
||||
T1 src1_data = src1[y * src1_step + x]; |
||||
T2 src2_data = src2[y * src2_step + x]; |
||||
dst[y * dst_step + x] = op(src1_data, src2_data, x, y); |
||||
} |
||||
} |
||||
}}} |
||||
|
||||
namespace cv
|
||||
{
|
||||
namespace gpu
|
||||
{ |
||||
template <typename T, typename D, typename UnOp> |
||||
static void transform(const DevMem2D_<T>& src, const DevMem2D_<D>& dst, UnOp op, cudaStream_t stream) |
||||
{ |
||||
dim3 threads(16, 16, 1); |
||||
dim3 grid(1, 1, 1); |
||||
|
||||
grid.x = divUp(src.cols, threads.x); |
||||
grid.y = divUp(src.rows, threads.y);
|
||||
|
||||
algo_krnls::transform<<<grid, threads, 0, stream>>>(src.ptr, src.elem_step,
|
||||
dst.ptr, dst.elem_step, src.cols, src.rows, op); |
||||
|
||||
if (stream == 0) |
||||
cudaSafeCall( cudaThreadSynchronize() ); |
||||
} |
||||
template <typename T1, typename T2, typename D, typename BinOp> |
||||
static void transform(const DevMem2D_<T1>& src1, const DevMem2D_<T2>& src2, const DevMem2D_<D>& dst, BinOp op, cudaStream_t stream) |
||||
{ |
||||
dim3 threads(16, 16, 1); |
||||
dim3 grid(1, 1, 1); |
||||
|
||||
grid.x = divUp(src1.cols, threads.x); |
||||
grid.y = divUp(src1.rows, threads.y);
|
||||
|
||||
algo_krnls::transform<<<grid, threads, 0, stream>>>(src1.ptr, src1.elem_step,
|
||||
src2.ptr, src2.elem_step, dst.ptr, dst.elem_step, src1.cols, src1.rows, op); |
||||
|
||||
if (stream == 0) |
||||
cudaSafeCall( cudaThreadSynchronize() ); |
||||
} |
||||
} |
||||
} |
||||
|
||||
#endif // __OPENCV_GPU_TRANSFORM_HPP__
|
@ -0,0 +1,126 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#ifndef __OPENCV_GPU_VECMATH_HPP__ |
||||
#define __OPENCV_GPU_VECMATH_HPP__ |
||||
|
||||
#include "cuda_shared.hpp" |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace gpu |
||||
{ |
||||
template<typename T, int N> struct TypeVec; |
||||
template<typename T> struct TypeVec<T, 1> { typedef T vec_t; }; |
||||
template<> struct TypeVec<unsigned char, 2> { typedef uchar2 vec_t; }; |
||||
template<> struct TypeVec<uchar2, 2> { typedef uchar2 vec_t; }; |
||||
template<> struct TypeVec<unsigned char, 3> { typedef uchar3 vec_t; };; |
||||
template<> struct TypeVec<uchar3, 3> { typedef uchar3 vec_t; }; |
||||
template<> struct TypeVec<unsigned char, 4> { typedef uchar4 vec_t; };; |
||||
template<> struct TypeVec<uchar4, 4> { typedef uchar4 vec_t; }; |
||||
template<> struct TypeVec<char, 2> { typedef char2 vec_t; }; |
||||
template<> struct TypeVec<char2, 2> { typedef char2 vec_t; }; |
||||
template<> struct TypeVec<char, 3> { typedef char3 vec_t; }; |
||||
template<> struct TypeVec<char3, 3> { typedef char3 vec_t; }; |
||||
template<> struct TypeVec<char, 4> { typedef char4 vec_t; }; |
||||
template<> struct TypeVec<char4, 4> { typedef char4 vec_t; }; |
||||
template<> struct TypeVec<unsigned short, 2> { typedef ushort2 vec_t; }; |
||||
template<> struct TypeVec<ushort2, 2> { typedef ushort2 vec_t; }; |
||||
template<> struct TypeVec<unsigned short, 3> { typedef ushort3 vec_t; }; |
||||
template<> struct TypeVec<ushort3, 3> { typedef ushort3 vec_t; }; |
||||
template<> struct TypeVec<unsigned short, 4> { typedef ushort4 vec_t; }; |
||||
template<> struct TypeVec<ushort4, 4> { typedef ushort4 vec_t; }; |
||||
template<> struct TypeVec<short, 2> { typedef short2 vec_t; }; |
||||
template<> struct TypeVec<short2, 2> { typedef short2 vec_t; }; |
||||
template<> struct TypeVec<short, 3> { typedef short3 vec_t; }; |
||||
template<> struct TypeVec<short3, 3> { typedef short3 vec_t; }; |
||||
template<> struct TypeVec<short, 4> { typedef short4 vec_t; }; |
||||
template<> struct TypeVec<short4, 4> { typedef short4 vec_t; }; |
||||
template<> struct TypeVec<unsigned int, 2> { typedef uint2 vec_t; }; |
||||
template<> struct TypeVec<uint2, 2> { typedef uint2 vec_t; }; |
||||
template<> struct TypeVec<unsigned int, 3> { typedef uint3 vec_t; }; |
||||
template<> struct TypeVec<uint3, 3> { typedef uint3 vec_t; }; |
||||
template<> struct TypeVec<unsigned int, 4> { typedef uint4 vec_t; }; |
||||
template<> struct TypeVec<uint4, 4> { typedef uint4 vec_t; }; |
||||
template<> struct TypeVec<int, 2> { typedef int2 vec_t; }; |
||||
template<> struct TypeVec<int2, 2> { typedef int2 vec_t; }; |
||||
template<> struct TypeVec<int, 3> { typedef int3 vec_t; }; |
||||
template<> struct TypeVec<int3, 3> { typedef int3 vec_t; }; |
||||
template<> struct TypeVec<int, 4> { typedef int4 vec_t; }; |
||||
template<> struct TypeVec<int4, 4> { typedef int4 vec_t; }; |
||||
template<> struct TypeVec<float, 2> { typedef float2 vec_t; }; |
||||
template<> struct TypeVec<float2, 2> { typedef float2 vec_t; }; |
||||
template<> struct TypeVec<float, 3> { typedef float3 vec_t; }; |
||||
template<> struct TypeVec<float3, 3> { typedef float3 vec_t; }; |
||||
template<> struct TypeVec<float, 4> { typedef float4 vec_t; }; |
||||
template<> struct TypeVec<float4, 4> { typedef float4 vec_t; };
|
||||
|
||||
static __device__ uchar4 operator+(const uchar4& a, const uchar4& b) |
||||
{ |
||||
return make_uchar4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); |
||||
} |
||||
static __device__ uchar4 operator-(const uchar4& a, const uchar4& b) |
||||
{ |
||||
return make_uchar4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); |
||||
} |
||||
static __device__ uchar4 operator*(const uchar4& a, const uchar4& b) |
||||
{ |
||||
return make_uchar4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); |
||||
} |
||||
static __device__ uchar4 operator/(const uchar4& a, const uchar4& b) |
||||
{ |
||||
return make_uchar4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); |
||||
} |
||||
template <typename T> |
||||
static __device__ uchar4 operator*(const uchar4& a, T s) |
||||
{ |
||||
return make_uchar4(a.x * s, a.y * s, a.z * s, a.w * s); |
||||
} |
||||
template <typename T> |
||||
static __device__ uchar4 operator*(T s, const uchar4& a) |
||||
{ |
||||
return a * s; |
||||
} |
||||
} |
||||
} |
||||
|
||||
#endif // __OPENCV_GPU_VECMATH_HPP__
|
Loading…
Reference in new issue