added GPU bilateral filter + tests

added GPU non local means brute force filter + tests
pull/26/head
Anatoly Baksheev 12 years ago
parent a09679044e
commit 0ba01afd83
  1. 1
      CMakeLists.txt
  2. 7
      cmake/OpenCVCompilerOptions.cmake
  3. 48
      modules/gpu/doc/image_processing.rst
  4. 8
      modules/gpu/include/opencv2/gpu/gpu.hpp
  5. 18
      modules/gpu/perf/perf_core.cpp
  6. 98
      modules/gpu/perf/perf_denoising.cpp
  7. 28
      modules/gpu/perf/perf_imgproc.cpp
  8. 6
      modules/gpu/perf/perf_matop.cpp
  9. 10
      modules/gpu/perf/perf_video.cpp
  10. 1
      modules/gpu/perf/utility.hpp
  11. 270
      modules/gpu/src/cuda/bilateral_filter.cu
  12. 143
      modules/gpu/src/cuda/nlm.cu
  13. 135
      modules/gpu/src/denoising.cpp
  14. 4
      modules/gpu/src/hough.cpp
  15. 2
      modules/gpu/src/opencv2/gpu/device/functional.hpp
  16. 140
      modules/gpu/test/test_denoising.cpp
  17. 8
      modules/gpu/test/utility.cpp
  18. 5
      modules/gpu/test/utility.hpp
  19. 2
      modules/imgproc/src/smooth.cpp

@ -197,7 +197,6 @@ OCV_OPTION(ENABLE_SSE41 "Enable SSE4.1 instructions"
OCV_OPTION(ENABLE_SSE42 "Enable SSE4.2 instructions" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) )
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
OCV_OPTION(ENABLE_MULTI_PROCESSOR_COMPILATION "Enabling multi-processory compilation" OFF IF MSVC)
# uncategorized options

@ -282,9 +282,4 @@ if(MSVC)
if(NOT ENABLE_NOISY_WARNINGS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4251") #class 'std::XXX' needs to have dll-interface to be used by clients of YYY
endif()
endif()
if (MSVC AND ENABLE_MULTI_PROCESSOR_COMPILATION)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
endif()
endif()

@ -818,9 +818,57 @@ Performs linear blending of two images.
:param result: Destination image.
:param stream: Stream for the asynchronous version.
gpu::bilateralFilter
-------------------
Performs bilateral filtering of passed image
.. ocv:function:: void gpu::bilateralFilter(const GpuMat& src, GpuMat& dst, int kernel_size, float sigma_color, float sigma_spatial, int borderMode, Stream& stream = Stream::Null());
:param src: Source image. Supports only (channles != 2 && depth() != CV_8S && depth() != CV_32S && depth() != CV_64F).
:param dst: Destination imagwe.
:param kernel_size: Kernel window size.
:param sigma_color: Filter sigma in the color space.
:param sigma_spatial: Filter sigma in the coordinate space.
:param borderMode: Border type. See :ocv:func:`borderInterpolate` for details. ``BORDER_REFLECT101`` , ``BORDER_REPLICATE`` , ``BORDER_CONSTANT`` , ``BORDER_REFLECT`` and ``BORDER_WRAP`` are supported for now.
:param stream: Stream for the asynchronous version.
.. seealso::
:ocv:func:`bilateralFilter`,
gpu::nonLocalMeans
-------------------
Performs pure non local means denoising without any simplification, and thus it is not fast.
.. ocv:function:: void nonLocalMeans(const GpuMat& src, GpuMat& dst, float h, int search_widow_size = 11, int block_size = 7, int borderMode = BORDER_DEFAULT, Stream& s = Stream::Null());
:param src: Source image. Supports only CV_8UC1, CV_8UC3.
:param dst: Destination imagwe.
:param h: Filter sigma regulating filter strength for color.
:param search_widow_size: Size of search window.
:param block_size: Size of block used for computing weights.
:param borderMode: Border type. See :ocv:func:`borderInterpolate` for details. ``BORDER_REFLECT101`` , ``BORDER_REPLICATE`` , ``BORDER_CONSTANT`` , ``BORDER_REFLECT`` and ``BORDER_WRAP`` are supported for now.
:param stream: Stream for the asynchronous version.
.. seealso::
:ocv:func:`fastNlMeansDenoising`
gpu::alphaComp
-------------------
Composites two images using alpha opacity values contained in each image.

@ -769,6 +769,14 @@ CV_EXPORTS void pyrUp(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::N
CV_EXPORTS void blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2,
GpuMat& result, Stream& stream = Stream::Null());
//! Performa bilateral filtering of passsed image
CV_EXPORTS void bilateralFilter(const GpuMat& src, GpuMat& dst, int kernel_size, float sigma_color, float sigma_spatial,
int borderMode = BORDER_DEFAULT, Stream& stream = Stream::Null());
//! Brute force non-local means algorith (slow but universal)
CV_EXPORTS void nonLocalMeans(const GpuMat& src, GpuMat& dst, float h,
int search_widow_size = 11, int block_size = 7, int borderMode = BORDER_DEFAULT, Stream& s = Stream::Null());
struct CV_EXPORTS CannyBuf;

@ -882,7 +882,7 @@ PERF_TEST_P(Sz_Depth, Core_BitwiseAndMat, Combine(GPU_TYPICAL_MAT_SIZES, Values(
//////////////////////////////////////////////////////////////////////
// BitwiseAndScalar
PERF_TEST_P(Sz_Depth_Cn, Core_BitwiseAndScalar, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32S), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, Core_BitwiseAndScalar, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32S), GPU_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
@ -963,7 +963,7 @@ PERF_TEST_P(Sz_Depth, Core_BitwiseOrMat, Combine(GPU_TYPICAL_MAT_SIZES, Values(C
//////////////////////////////////////////////////////////////////////
// BitwiseOrScalar
PERF_TEST_P(Sz_Depth_Cn, Core_BitwiseOrScalar, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32S), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, Core_BitwiseOrScalar, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32S), GPU_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
@ -1044,7 +1044,7 @@ PERF_TEST_P(Sz_Depth, Core_BitwiseXorMat, Combine(GPU_TYPICAL_MAT_SIZES, Values(
//////////////////////////////////////////////////////////////////////
// BitwiseXorScalar
PERF_TEST_P(Sz_Depth_Cn, Core_BitwiseXorScalar, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32S), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, Core_BitwiseXorScalar, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32S), GPU_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
@ -1085,7 +1085,7 @@ PERF_TEST_P(Sz_Depth_Cn, Core_BitwiseXorScalar, Combine(GPU_TYPICAL_MAT_SIZES, V
//////////////////////////////////////////////////////////////////////
// RShift
PERF_TEST_P(Sz_Depth_Cn, Core_RShift, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32S), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, Core_RShift, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32S), GPU_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
@ -1119,7 +1119,7 @@ PERF_TEST_P(Sz_Depth_Cn, Core_RShift, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8
//////////////////////////////////////////////////////////////////////
// LShift
PERF_TEST_P(Sz_Depth_Cn, Core_LShift, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32S), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, Core_LShift, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32S), GPU_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
@ -1461,7 +1461,7 @@ DEF_PARAM_TEST(Sz_Depth_Cn_Code, cv::Size, MatDepth, int, FlipCode);
PERF_TEST_P(Sz_Depth_Cn_Code, Core_Flip, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4),
GPU_CHANNELS_1_3_4,
ALL_FLIP_CODES))
{
cv::Size size = GET_PARAM(0);
@ -1973,7 +1973,7 @@ PERF_TEST_P(Sz_Norm, Core_NormDiff, Combine(
PERF_TEST_P(Sz_Depth_Cn, Core_Sum, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4)))
GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);
@ -2015,7 +2015,7 @@ PERF_TEST_P(Sz_Depth_Cn, Core_Sum, Combine(
PERF_TEST_P(Sz_Depth_Cn, Core_SumAbs, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4)))
GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);
@ -2052,7 +2052,7 @@ PERF_TEST_P(Sz_Depth_Cn, Core_SumAbs, Combine(
PERF_TEST_P(Sz_Depth_Cn, Core_SumSqr, Combine(
GPU_TYPICAL_MAT_SIZES,
Values<MatDepth>(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4)))
GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);

@ -0,0 +1,98 @@
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
//////////////////////////////////////////////////////////////////////
// BilateralFilter
DEF_PARAM_TEST(Sz_Depth_Cn_KernelSz, cv::Size, MatDepth , int, int);
PERF_TEST_P(Sz_Depth_Cn_KernelSz, Denoising_BilateralFilter,
Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F), GPU_CHANNELS_1_3_4, Values(3, 5, 9)))
{
declare.time(30.0);
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);
int channels = GET_PARAM(2);
int kernel_size = GET_PARAM(3);
float sigma_color = 7;
float sigma_spatial = 5;
int borderMode = cv::BORDER_REFLECT101;
int type = CV_MAKE_TYPE(depth, channels);
cv::Mat src(size, type);
fillRandom(src);
if (runOnGpu)
{
cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_dst;
cv::gpu::bilateralFilter(d_src, d_dst, kernel_size, sigma_color, sigma_spatial, borderMode);
TEST_CYCLE()
{
cv::gpu::bilateralFilter(d_src, d_dst, kernel_size, sigma_color, sigma_spatial, borderMode);
}
}
else
{
cv::Mat dst;
cv::bilateralFilter(src, dst, kernel_size, sigma_color, sigma_spatial, borderMode);
TEST_CYCLE()
{
cv::bilateralFilter(src, dst, kernel_size, sigma_color, sigma_spatial, borderMode);
}
}
}
//////////////////////////////////////////////////////////////////////
// nonLocalMeans
DEF_PARAM_TEST(Sz_Depth_Cn_WinSz_BlockSz, cv::Size, MatDepth , int, int, int);
PERF_TEST_P(Sz_Depth_Cn_WinSz_BlockSz, Denoising_NonLocalMeans,
Combine(GPU_TYPICAL_MAT_SIZES, Values<MatDepth>(CV_8U), Values(1), Values(21), Values(5, 7)))
{
declare.time(30.0);
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);
int channels = GET_PARAM(2);
int search_widow_size = GET_PARAM(3);
int block_size = GET_PARAM(4);
float h = 10;
int borderMode = cv::BORDER_REFLECT101;
int type = CV_MAKE_TYPE(depth, channels);
cv::Mat src(size, type);
fillRandom(src);
if (runOnGpu)
{
cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_dst;
cv::gpu::nonLocalMeans(d_src, d_dst, h, search_widow_size, block_size, borderMode);
TEST_CYCLE()
{
cv::gpu::nonLocalMeans(d_src, d_dst, h, search_widow_size, block_size, borderMode);
}
}
else
{
FAIL();
}
}

@ -54,7 +54,7 @@ DEF_PARAM_TEST(Sz_Depth_Cn_Inter_Border_Mode, cv::Size, MatDepth, int, Interpola
PERF_TEST_P(Sz_Depth_Cn_Inter_Border_Mode, ImgProc_Remap, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4),
GPU_CHANNELS_1_3_4,
Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_CUBIC)),
ALL_BORDER_MODES,
ALL_REMAP_MODES))
@ -113,7 +113,7 @@ DEF_PARAM_TEST(Sz_Depth_Cn_Inter_Scale, cv::Size, MatDepth, int, Interpolation,
PERF_TEST_P(Sz_Depth_Cn_Inter_Scale, ImgProc_Resize, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4),
GPU_CHANNELS_1_3_4,
ALL_INTERPOLATIONS,
Values(0.5, 0.3, 2.0)))
{
@ -163,7 +163,7 @@ DEF_PARAM_TEST(Sz_Depth_Cn_Scale, cv::Size, MatDepth, int, double);
PERF_TEST_P(Sz_Depth_Cn_Scale, ImgProc_ResizeArea, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4),
GPU_CHANNELS_1_3_4,
Values(0.2, 0.1, 0.05)))
{
declare.time(1.0);
@ -212,7 +212,7 @@ DEF_PARAM_TEST(Sz_Depth_Cn_Inter_Border, cv::Size, MatDepth, int, Interpolation,
PERF_TEST_P(Sz_Depth_Cn_Inter_Border, ImgProc_WarpAffine, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4),
GPU_CHANNELS_1_3_4,
Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_CUBIC)),
ALL_BORDER_MODES))
{
@ -265,7 +265,7 @@ PERF_TEST_P(Sz_Depth_Cn_Inter_Border, ImgProc_WarpAffine, Combine(
PERF_TEST_P(Sz_Depth_Cn_Inter_Border, ImgProc_WarpPerspective, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4),
GPU_CHANNELS_1_3_4,
Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_CUBIC)),
ALL_BORDER_MODES))
{
@ -321,7 +321,7 @@ DEF_PARAM_TEST(Sz_Depth_Cn_Border, cv::Size, MatDepth, int, BorderMode);
PERF_TEST_P(Sz_Depth_Cn_Border, ImgProc_CopyMakeBorder, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4),
GPU_CHANNELS_1_3_4,
ALL_BORDER_MODES))
{
cv::Size size = GET_PARAM(0);
@ -789,7 +789,7 @@ PERF_TEST_P(Image, ImgProc_MeanShiftSegmentation, Values<string>("gpu/meanshift/
//////////////////////////////////////////////////////////////////////
// BlendLinear
PERF_TEST_P(Sz_Depth_Cn, ImgProc_BlendLinear, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_32F), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, ImgProc_BlendLinear, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_32F), GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);
@ -887,7 +887,7 @@ DEF_PARAM_TEST(Sz_TemplateSz_Cn_Method, cv::Size, cv::Size, int, TemplateMethod)
PERF_TEST_P(Sz_TemplateSz_Cn_Method, ImgProc_MatchTemplate8U, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(cv::Size(5, 5), cv::Size(16, 16), cv::Size(30, 30)),
Values(1, 3, 4),
GPU_CHANNELS_1_3_4,
ALL_TEMPLATE_METHODS))
{
cv::Size size = GET_PARAM(0);
@ -933,7 +933,7 @@ PERF_TEST_P(Sz_TemplateSz_Cn_Method, ImgProc_MatchTemplate8U, Combine(
PERF_TEST_P(Sz_TemplateSz_Cn_Method, ImgProc_MatchTemplate32F, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(cv::Size(5, 5), cv::Size(16, 16), cv::Size(30, 30)),
Values(1, 3, 4),
GPU_CHANNELS_1_3_4,
Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_CCORR))))
{
cv::Size size = GET_PARAM(0);
@ -1287,7 +1287,7 @@ DEF_PARAM_TEST(Sz_Depth_Cn_Inter, cv::Size, MatDepth, int, Interpolation);
PERF_TEST_P(Sz_Depth_Cn_Inter, ImgProc_Rotate, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4),
GPU_CHANNELS_1_3_4,
Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_CUBIC))))
{
cv::Size size = GET_PARAM(0);
@ -1324,7 +1324,7 @@ PERF_TEST_P(Sz_Depth_Cn_Inter, ImgProc_Rotate, Combine(
PERF_TEST_P(Sz_Depth_Cn, ImgProc_PyrDown, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4)))
GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);
@ -1366,7 +1366,7 @@ PERF_TEST_P(Sz_Depth_Cn, ImgProc_PyrDown, Combine(
PERF_TEST_P(Sz_Depth_Cn, ImgProc_PyrUp, Combine(
GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_32F),
Values(1, 3, 4)))
GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);
@ -1540,7 +1540,7 @@ PERF_TEST_P(Sz_Type_Op, ImgProc_AlphaComp, Combine(GPU_TYPICAL_MAT_SIZES, Values
//////////////////////////////////////////////////////////////////////
// ImagePyramidBuild
PERF_TEST_P(Sz_Depth_Cn, ImgProc_ImagePyramidBuild, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, ImgProc_ImagePyramidBuild, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F), GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);
@ -1573,7 +1573,7 @@ PERF_TEST_P(Sz_Depth_Cn, ImgProc_ImagePyramidBuild, Combine(GPU_TYPICAL_MAT_SIZE
//////////////////////////////////////////////////////////////////////
// ImagePyramidGetLayer
PERF_TEST_P(Sz_Depth_Cn, ImgProc_ImagePyramidGetLayer, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, ImgProc_ImagePyramidGetLayer, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F), GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);

@ -8,7 +8,7 @@ namespace {
//////////////////////////////////////////////////////////////////////
// SetTo
PERF_TEST_P(Sz_Depth_Cn, MatOp_SetTo, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F, CV_64F), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, MatOp_SetTo, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F, CV_64F), GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);
@ -45,7 +45,7 @@ PERF_TEST_P(Sz_Depth_Cn, MatOp_SetTo, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8
//////////////////////////////////////////////////////////////////////
// SetToMasked
PERF_TEST_P(Sz_Depth_Cn, MatOp_SetToMasked, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F, CV_64F), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, MatOp_SetToMasked, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F, CV_64F), GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);
@ -87,7 +87,7 @@ PERF_TEST_P(Sz_Depth_Cn, MatOp_SetToMasked, Combine(GPU_TYPICAL_MAT_SIZES, Value
//////////////////////////////////////////////////////////////////////
// CopyToMasked
PERF_TEST_P(Sz_Depth_Cn, MatOp_CopyToMasked, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F, CV_64F), Values(1, 3, 4)))
PERF_TEST_P(Sz_Depth_Cn, MatOp_CopyToMasked, Combine(GPU_TYPICAL_MAT_SIZES, Values(CV_8U, CV_16U, CV_32F, CV_64F), GPU_CHANNELS_1_3_4))
{
cv::Size size = GET_PARAM(0);
int depth = GET_PARAM(1);

@ -423,7 +423,7 @@ PERF_TEST_P(Video, Video_FGDStatModel, Values("gpu/video/768x576.avi", "gpu/vide
DEF_PARAM_TEST(Video_Cn_LearningRate, string, int, double);
PERF_TEST_P(Video_Cn_LearningRate, Video_MOG, Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"), Values(1, 3, 4), Values(0.0, 0.01)))
PERF_TEST_P(Video_Cn_LearningRate, Video_MOG, Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"), GPU_CHANNELS_1_3_4, Values(0.0, 0.01)))
{
string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
int cn = GET_PARAM(1);
@ -511,7 +511,7 @@ PERF_TEST_P(Video_Cn_LearningRate, Video_MOG, Combine(Values("gpu/video/768x576.
DEF_PARAM_TEST(Video_Cn, string, int);
PERF_TEST_P(Video_Cn, Video_MOG2, Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"), Values(1, 3, 4)))
PERF_TEST_P(Video_Cn, Video_MOG2, Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"), GPU_CHANNELS_1_3_4))
{
string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
int cn = GET_PARAM(1);
@ -596,7 +596,7 @@ PERF_TEST_P(Video_Cn, Video_MOG2, Combine(Values("gpu/video/768x576.avi", "gpu/v
//////////////////////////////////////////////////////
// MOG2GetBackgroundImage
PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage, Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"), Values(1, 3, 4)))
PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage, Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"), GPU_CHANNELS_1_3_4))
{
string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
int cn = GET_PARAM(1);
@ -676,7 +676,7 @@ PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage, Combine(Values("gpu/video/76
//////////////////////////////////////////////////////
// VIBE
PERF_TEST_P(Video_Cn, Video_VIBE, Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"), Values(1, 3, 4)))
PERF_TEST_P(Video_Cn, Video_VIBE, Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"), GPU_CHANNELS_1_3_4))
{
string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
int cn = GET_PARAM(1);
@ -739,7 +739,7 @@ PERF_TEST_P(Video_Cn, Video_VIBE, Combine(Values("gpu/video/768x576.avi", "gpu/v
DEF_PARAM_TEST(Video_Cn_MaxFeatures, string, int, int);
PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG, Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"), Values(1, 3, 4), Values(20, 40, 60)))
PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG, Combine(Values("gpu/video/768x576.avi", "gpu/video/1920x1080.avi"), GPU_CHANNELS_1_3_4, Values(20, 40, 60)))
{
std::string inputFile = perf::TestBase::getDataPath(GET_PARAM(0));
int cn = GET_PARAM(1);

@ -41,5 +41,6 @@ DEF_PARAM_TEST(Sz_Depth, cv::Size, MatDepth);
DEF_PARAM_TEST(Sz_Depth_Cn, cv::Size, MatDepth, int);
#define GPU_TYPICAL_MAT_SIZES testing::Values(perf::sz720p, perf::szSXGA, perf::sz1080p)
#define GPU_CHANNELS_1_3_4 testing::Values(1, 3, 4)
#endif // __OPENCV_PERF_GPU_UTILITY_HPP__

@ -12,6 +12,7 @@
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
@ -28,7 +29,7 @@
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
@ -41,186 +42,155 @@
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
namespace cv { namespace gpu { namespace device
{
namespace bilateral_filter
{
__constant__ float* ctable_color;
__constant__ float* ctable_space;
__constant__ size_t ctable_space_step;
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
__constant__ int cndisp;
__constant__ int cradius;
using namespace cv::gpu;
__constant__ short cedge_disc;
__constant__ short cmax_disc;
typedef unsigned char uchar;
typedef unsigned short ushort;
void load_constants(float* table_color, PtrStepSzf table_space, int ndisp, int radius, short edge_disc, short max_disc)
{
cudaSafeCall( cudaMemcpyToSymbol(ctable_color, &table_color, sizeof(table_color)) );
cudaSafeCall( cudaMemcpyToSymbol(ctable_space, &table_space.data, sizeof(table_space.data)) );
size_t table_space_step = table_space.step / sizeof(float);
cudaSafeCall( cudaMemcpyToSymbol(ctable_space_step, &table_space_step, sizeof(size_t)) );
cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(cradius, &radius, sizeof(int)) );
//////////////////////////////////////////////////////////////////////////////////
/// Bilateral filtering
cudaSafeCall( cudaMemcpyToSymbol(cedge_disc, &edge_disc, sizeof(short)) );
cudaSafeCall( cudaMemcpyToSymbol(cmax_disc, &max_disc, sizeof(short)) );
}
template <int channels>
struct DistRgbMax
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
uchar x = ::abs(a[0] - b[0]);
uchar y = ::abs(a[1] - b[1]);
uchar z = ::abs(a[2] - b[2]);
return (::max(::max(x, y), z));
}
};
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
__device__ __forceinline__ float norm_l1(const float& a) { return ::fabs(a); }
__device__ __forceinline__ float norm_l1(const float2& a) { return ::fabs(a.x) + ::fabs(a.y); }
__device__ __forceinline__ float norm_l1(const float3& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z); }
__device__ __forceinline__ float norm_l1(const float4& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z) + ::fabs(a.w); }
template <>
struct DistRgbMax<1>
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
return ::abs(a[0] - b[0]);
}
};
__device__ __forceinline__ float sqr(const float& a) { return a * a; }
template <int channels, typename T>
__global__ void bilateral_filter(int t, T* disp, size_t disp_step, const uchar* img, size_t img_step, int h, int w)
template<typename T, typename B>
__global__ void bilateral_kernel(const PtrStepSz<T> src, PtrStep<T> dst, const B b, const int ksz, const float sigma_spatial2_inv_half, const float sigma_color2_inv_half)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1);
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
T dp[5];
if (x >= src.cols || y >= src.rows)
return;
if (y > 0 && y < h - 1 && x > 0 && x < w - 1)
{
dp[0] = *(disp + (y ) * disp_step + x + 0);
dp[1] = *(disp + (y-1) * disp_step + x + 0);
dp[2] = *(disp + (y ) * disp_step + x - 1);
dp[3] = *(disp + (y+1) * disp_step + x + 0);
dp[4] = *(disp + (y ) * disp_step + x + 1);
value_type center = saturate_cast<value_type>(src(y, x));
if(::abs(dp[1] - dp[0]) >= cedge_disc || ::abs(dp[2] - dp[0]) >= cedge_disc || ::abs(dp[3] - dp[0]) >= cedge_disc || ::abs(dp[4] - dp[0]) >= cedge_disc)
{
const int ymin = ::max(0, y - cradius);
const int xmin = ::max(0, x - cradius);
const int ymax = ::min(h - 1, y + cradius);
const int xmax = ::min(w - 1, x + cradius);
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0;
float cost[] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
int r = ksz / 2;
float r2 = (float)(r * r);
const uchar* ic = img + y * img_step + channels * x;
int tx = x - r + ksz;
int ty = y - r + ksz;
for(int yi = ymin; yi <= ymax; yi++)
if (x - ksz/2 >=0 && y - ksz/2 >=0 && tx < src.cols && ty < src.rows)
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
const T* disp_y = disp + yi * disp_step;
for(int xi = xmin; xi <= xmax; xi++)
{
const uchar* in = img + yi * img_step + channels * xi;
uchar dist_rgb = DistRgbMax<channels>::calc(in, ic);
const float weight = ctable_color[dist_rgb] * (ctable_space + ::abs(y-yi)* ctable_space_step)[::abs(x-xi)];
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
const T disp_reg = disp_y[xi];
value_type value = saturate_cast<value_type>(src(cy, cx));
cost[0] += ::min(cmax_disc, ::abs(disp_reg - dp[0])) * weight;
cost[1] += ::min(cmax_disc, ::abs(disp_reg - dp[1])) * weight;
cost[2] += ::min(cmax_disc, ::abs(disp_reg - dp[2])) * weight;
cost[3] += ::min(cmax_disc, ::abs(disp_reg - dp[3])) * weight;
cost[4] += ::min(cmax_disc, ::abs(disp_reg - dp[4])) * weight;
}
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
else
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
float minimum = numeric_limits<float>::max();
int id = 0;
value_type value = saturate_cast<value_type>(b.at(cy, cx, src.data, src.step));
if (cost[0] < minimum)
{
minimum = cost[0];
id = 0;
}
if (cost[1] < minimum)
{
minimum = cost[1];
id = 1;
}
if (cost[2] < minimum)
{
minimum = cost[2];
id = 2;
}
if (cost[3] < minimum)
{
minimum = cost[3];
id = 3;
}
if (cost[4] < minimum)
{
minimum = cost[4];
id = 4;
}
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
*(disp + y * disp_step + x) = dp[id];
}
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
dst(y, x) = saturate_cast<T>(sum1 / sum2);
}
template <typename T>
void bilateral_filter_caller(PtrStepSz<T> disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream)
template<typename T, template <typename> class B>
void bilateral_caller(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(disp.cols, threads.x << 1);
grid.y = divUp(disp.rows, threads.y);
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
switch (channels)
{
case 1:
for (int i = 0; i < iters; ++i)
{
bilateral_filter<1><<<grid, threads, 0, stream>>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
bilateral_filter<1><<<grid, threads, 0, stream>>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
}
break;
case 3:
for (int i = 0; i < iters; ++i)
{
bilateral_filter<3><<<grid, threads, 0, stream>>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
bilateral_filter<3><<<grid, threads, 0, stream>>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
}
break;
default:
cv::gpu::error("Unsupported channels count", __FILE__, __LINE__, "bilateral_filter_caller");
}
B<T> b(src.rows, src.cols);
float sigma_spatial2_inv_half = -0.5f/(sigma_spatial * sigma_spatial);
float sigma_color2_inv_half = -0.5f/(sigma_color * sigma_color);
cudaSafeCall( cudaFuncSetCacheConfig (bilateral_kernel<T, B<T> >, cudaFuncCachePreferL1) );
bilateral_kernel<<<grid, block>>>((PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, kernel_size, sigma_spatial2_inv_half, sigma_color2_inv_half);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void bilateral_filter_gpu(PtrStepSzb disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream)
template<typename T>
void bilateral_filter_gpu(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float gauss_spatial_coeff, float gauss_color_coeff, int borderMode, cudaStream_t stream)
{
bilateral_filter_caller(disp, img, channels, iters, stream);
}
typedef void (*caller_t)(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, cudaStream_t stream);
void bilateral_filter_gpu(PtrStepSz<short> disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream)
{
bilateral_filter_caller(disp, img, channels, iters, stream);
static caller_t funcs[] =
{
bilateral_caller<T, BrdReflect101>,
bilateral_caller<T, BrdReplicate>,
bilateral_caller<T, BrdConstant>,
bilateral_caller<T, BrdReflect>,
bilateral_caller<T, BrdWrap>,
};
funcs[borderMode](src, dst, kernel_size, gauss_spatial_coeff, gauss_color_coeff, stream);
}
} // namespace bilateral_filter
}}} // namespace cv { namespace gpu { namespace device
}
}}}
#define OCV_INSTANTIATE_BILATERAL_FILTER(T) \
template void cv::gpu::device::imgproc::bilateral_filter_gpu<T>(const PtrStepSzb&, PtrStepSzb, int, float, float, int, cudaStream_t);
OCV_INSTANTIATE_BILATERAL_FILTER(uchar)
//OCV_INSTANTIATE_BILATERAL_FILTER(uchar2)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar3)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar4)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar2)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar3)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar4)
OCV_INSTANTIATE_BILATERAL_FILTER(short)
//OCV_INSTANTIATE_BILATERAL_FILTER(short2)
OCV_INSTANTIATE_BILATERAL_FILTER(short3)
OCV_INSTANTIATE_BILATERAL_FILTER(short4)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort)
//OCV_INSTANTIATE_BILATERAL_FILTER(ushort2)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort3)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort4)
//OCV_INSTANTIATE_BILATERAL_FILTER(int)
//OCV_INSTANTIATE_BILATERAL_FILTER(int2)
//OCV_INSTANTIATE_BILATERAL_FILTER(int3)
//OCV_INSTANTIATE_BILATERAL_FILTER(int4)
OCV_INSTANTIATE_BILATERAL_FILTER(float)
//OCV_INSTANTIATE_BILATERAL_FILTER(float2)
OCV_INSTANTIATE_BILATERAL_FILTER(float3)
OCV_INSTANTIATE_BILATERAL_FILTER(float4)

@ -0,0 +1,143 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
using namespace cv::gpu;
typedef unsigned char uchar;
typedef unsigned short ushort;
//////////////////////////////////////////////////////////////////////////////////
/// Non local means denosings
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
__device__ __forceinline__ float norm2(const float& v) { return v*v; }
__device__ __forceinline__ float norm2(const float2& v) { return v.x*v.x + v.y*v.y; }
__device__ __forceinline__ float norm2(const float3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; }
__device__ __forceinline__ float norm2(const float4& v) { return v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w; }
template<typename T, typename B>
__global__ void nlm_kernel(const PtrStepSz<T> src, PtrStep<T> dst, const B b, int search_radius, int block_radius, float h2_inv_half)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= src.cols || y >= src.rows)
return;
float block_radius2_inv = -1.f/(block_radius * block_radius);
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0.f;
for(float cy = -search_radius; cy <= search_radius; ++cy)
for(float cx = -search_radius; cx <= search_radius; ++cx)
{
float color2 = 0;
for(float by = -block_radius; by <= block_radius; ++by)
for(float bx = -block_radius; bx <= block_radius; ++bx)
{
value_type v1 = saturate_cast<value_type>(src(y + by, x + bx));
value_type v2 = saturate_cast<value_type>(src(y + cy + by, x + cx + bx));
color2 += norm2(v1 - v2);
}
float dist2 = cx * cx + cy * cy;
float w = __expf(color2 * h2_inv_half + dist2 * block_radius2_inv);
sum1 = sum1 + saturate_cast<value_type>(src(y + cy, x + cy)) * w;
sum2 += w;
}
dst(y, x) = saturate_cast<T>(sum1 / sum2);
}
template<typename T, template <typename> class B>
void nlm_caller(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
B<T> b(src.rows, src.cols);
float h2_inv_half = -0.5f/(h * h * VecTraits<T>::cn);
cudaSafeCall( cudaFuncSetCacheConfig (nlm_kernel<T, B<T> >, cudaFuncCachePreferL1) );
nlm_kernel<<<grid, block>>>((PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, search_radius, block_radius, h2_inv_half);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template<typename T>
void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, cudaStream_t stream)
{
typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream);
static func_t funcs[] =
{
nlm_caller<T, BrdReflect101>,
nlm_caller<T, BrdReplicate>,
nlm_caller<T, BrdConstant>,
nlm_caller<T, BrdReflect>,
nlm_caller<T, BrdWrap>,
};
funcs[borderMode](src, dst, search_radius, block_radius, h, stream);
}
template void nlm_bruteforce_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t);
template void nlm_bruteforce_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t);
}
}}}

@ -0,0 +1,135 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
#if !defined (HAVE_CUDA)
cv::gpu::bilateralFilter(const GpuMat&, GpuMat&, int, float, float, int, Stream&) { throw_nogpu(); }
#else
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
template<typename T>
void bilateral_filter_gpu(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, int borderMode, cudaStream_t stream);
template<typename T>
void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, cudaStream_t stream);
}
}}}
void cv::gpu::bilateralFilter(const GpuMat& src, GpuMat& dst, int kernel_size, float sigma_color, float sigma_spatial, int borderMode, Stream& s)
{
using cv::gpu::device::imgproc::bilateral_filter_gpu;
typedef void (*func_t)(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, int borderMode, cudaStream_t s);
static const func_t funcs[6][4] =
{
{bilateral_filter_gpu<uchar> , 0 /*bilateral_filter_gpu<uchar2>*/ , bilateral_filter_gpu<uchar3> , bilateral_filter_gpu<uchar4> },
{0 /*bilateral_filter_gpu<schar>*/, 0 /*bilateral_filter_gpu<schar2>*/ , 0 /*bilateral_filter_gpu<schar3>*/, 0 /*bilateral_filter_gpu<schar4>*/},
{bilateral_filter_gpu<ushort> , 0 /*bilateral_filter_gpu<ushort2>*/, bilateral_filter_gpu<ushort3> , bilateral_filter_gpu<ushort4> },
{bilateral_filter_gpu<short> , 0 /*bilateral_filter_gpu<short2>*/ , bilateral_filter_gpu<short3> , bilateral_filter_gpu<short4> },
{0 /*bilateral_filter_gpu<int>*/ , 0 /*bilateral_filter_gpu<int2>*/ , 0 /*bilateral_filter_gpu<int3>*/ , 0 /*bilateral_filter_gpu<int4>*/ },
{bilateral_filter_gpu<float> , 0 /*bilateral_filter_gpu<float2>*/ , bilateral_filter_gpu<float3> , bilateral_filter_gpu<float4> }
};
sigma_color = (sigma_color <= 0 ) ? 1 : sigma_color;
sigma_spatial = (sigma_spatial <= 0 ) ? 1 : sigma_spatial;
int radius = (kernel_size <= 0) ? cvRound(sigma_spatial*1.5) : kernel_size/2;
kernel_size = std::max(radius, 1)*2 + 1;
CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
const func_t func = funcs[src.depth()][src.channels() - 1];
CV_Assert(func != 0);
CV_Assert(borderMode == BORDER_REFLECT101 || borderMode == BORDER_REPLICATE || borderMode == BORDER_CONSTANT || borderMode == BORDER_REFLECT || borderMode == BORDER_WRAP);
int gpuBorderType;
CV_Assert(tryConvertToGpuBorderType(borderMode, gpuBorderType));
dst.create(src.size(), src.type());
func(src, dst, kernel_size, sigma_spatial, sigma_color, gpuBorderType, StreamAccessor::getStream(s));
}
void cv::gpu::nonLocalMeans(const GpuMat& src, GpuMat& dst, float h, int search_window_size, int block_size, int borderMode, Stream& s)
{
using cv::gpu::device::imgproc::nlm_bruteforce_gpu;
typedef void (*func_t)(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, cudaStream_t stream);
static const func_t funcs[4] = { nlm_bruteforce_gpu<uchar>, 0 /*nlm_bruteforce_gpu<uchar2>*/ , nlm_bruteforce_gpu<uchar3>, 0/*nlm_bruteforce_gpu<uchar4>,*/ };
CV_Assert(src.type() == CV_8U || src.type() == CV_8UC3);
const func_t func = funcs[src.channels() - 1];
CV_Assert(func != 0);
int b = borderMode;
CV_Assert(b == BORDER_REFLECT101 || b == BORDER_REPLICATE || b == BORDER_CONSTANT || b == BORDER_REFLECT || b == BORDER_WRAP);
int gpuBorderType;
CV_Assert(tryConvertToGpuBorderType(borderMode, gpuBorderType));
int search_radius = search_window_size/2;
int block_radius = block_size/2;
dst.create(src.size(), src.type());
func(src, dst, search_radius, block_radius, h, gpuBorderType, StreamAccessor::getStream(s));
}
#endif

@ -239,8 +239,8 @@ void cv::gpu::HoughCircles(const GpuMat& src, GpuMat& circles, HoughCirclesBuf&
for(size_t j = 0; j < m.size(); ++j)
{
float dx = p.x - m[j].x;
float dy = p.y - m[j].y;
float dx = (float)(p.x - m[j].x);
float dy = (float)(p.y - m[j].y);
if (dx * dx + dy * dy < minDist)
{

@ -47,6 +47,7 @@
#include "saturate_cast.hpp"
#include "vec_traits.hpp"
#include "type_traits.hpp"
#include "device_functions.h"
namespace cv { namespace gpu { namespace device
{
@ -408,6 +409,7 @@ namespace cv { namespace gpu { namespace device
OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(pow, ::pow)
#undef OPENCV_GPU_IMPLEMENT_UN_FUNCTOR
#undef OPENCV_GPU_IMPLEMENT_UN_FUNCTOR_NO_DOUBLE
#undef OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR
template<typename T> struct hypot_sqr_func : binary_function<T, T, float>

@ -0,0 +1,140 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
////////////////////////////////////////////////////////
// BilateralFilter
PARAM_TEST_CASE(BilateralFilter, cv::gpu::DeviceInfo, cv::Size, MatType)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
int kernel_size;
float sigma_color;
float sigma_spatial;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
kernel_size = 5;
sigma_color = 10.f;
sigma_spatial = 3.5f;
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(BilateralFilter, Accuracy)
{
cv::Mat src = randomMat(size, type);
//cv::Mat src = readImage("hog/road.png", cv::IMREAD_GRAYSCALE);
//cv::Mat src = readImage("csstereobp/aloe-R.png", cv::IMREAD_GRAYSCALE);
src.convertTo(src, type);
cv::gpu::GpuMat dst;
cv::gpu::bilateralFilter(loadMat(src), dst, kernel_size, sigma_color, sigma_spatial);
cv::Mat dst_gold;
cv::bilateralFilter(src, dst_gold, kernel_size, sigma_color, sigma_spatial);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-3 : 1.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, BilateralFilter, testing::Combine(
ALL_DEVICES,
testing::Values(cv::Size(128, 128), cv::Size(113, 113), cv::Size(639, 481)),
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_32FC1), MatType(CV_32FC3))
));
////////////////////////////////////////////////////////
// Brute Force Non local means
struct NonLocalMeans: testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(NonLocalMeans, Regression)
{
using cv::gpu::GpuMat;
cv::Mat bgr = readImage("denoising/lena_noised_gaussian_sigma=20_multi_0.png", cv::IMREAD_COLOR);
ASSERT_FALSE(bgr.empty());
cv::Mat gray;
cv::cvtColor(bgr, gray, CV_BGR2GRAY);
GpuMat dbgr, dgray;
cv::gpu::nonLocalMeans(GpuMat(bgr), dbgr, 10);
cv::gpu::nonLocalMeans(GpuMat(gray), dgray, 10);
#if 0
dumpImage("denoising/denoised_lena_bgr.png", cv::Mat(dbgr));
dumpImage("denoising/denoised_lena_gray.png", cv::Mat(dgray));
#endif
cv::Mat bgr_gold = readImage("denoising/denoised_lena_bgr.png", cv::IMREAD_COLOR);
cv::Mat gray_gold = readImage("denoising/denoised_lena_gray.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(bgr_gold.empty() || gray_gold.empty());
EXPECT_MAT_NEAR(bgr_gold, dbgr, 1e-4);
EXPECT_MAT_NEAR(gray_gold, dgray, 1e-4);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, NonLocalMeans, ALL_DEVICES);
#endif // HAVE_CUDA

@ -127,6 +127,14 @@ Mat readImageType(const std::string& fname, int type)
return src;
}
//////////////////////////////////////////////////////////////////////
// Image dumping
void dumpImage(const std::string& fileName, const cv::Mat& image)
{
cv::imwrite(TS::ptr()->get_data_path() + fileName, image);
}
//////////////////////////////////////////////////////////////////////
// Gpu devices

@ -74,6 +74,11 @@ cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
//! read image from testdata folder and convert it to specified type
cv::Mat readImageType(const std::string& fname, int type);
//////////////////////////////////////////////////////////////////////
// Image dumping
void dumpImage(const std::string& fileName, const cv::Mat& image);
//////////////////////////////////////////////////////////////////////
// Gpu devices

@ -1285,6 +1285,8 @@ void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
Bilateral Filtering
\****************************************************************************************/
#undef CV_SSE3
namespace cv
{

Loading…
Cancel
Save