gpuimgproc module fixes

pull/836/head
Vladislav Vinogradov 12 years ago
parent b4f3d08725
commit c0b3424a23
  1. 4
      modules/gpuimgproc/CMakeLists.txt
  2. 74
      modules/gpuimgproc/doc/color.rst
  3. 81
      modules/gpuimgproc/doc/feature_detection.rst
  4. 12
      modules/gpuimgproc/doc/gpuimgproc.rst
  5. 104
      modules/gpuimgproc/doc/histogram.rst
  6. 96
      modules/gpuimgproc/doc/hough.rst
  7. 589
      modules/gpuimgproc/doc/image_processing.rst
  8. 203
      modules/gpuimgproc/doc/imgproc.rst
  9. 211
      modules/gpuimgproc/include/opencv2/gpuimgproc.hpp
  10. 93
      modules/gpuimgproc/perf/perf_bilateral_filter.cpp
  11. 86
      modules/gpuimgproc/perf/perf_blend.cpp
  12. 87
      modules/gpuimgproc/perf/perf_canny.cpp
  13. 252
      modules/gpuimgproc/perf/perf_color.cpp
  14. 137
      modules/gpuimgproc/perf/perf_corners.cpp
  15. 86
      modules/gpuimgproc/perf/perf_gftt.cpp
  16. 221
      modules/gpuimgproc/perf/perf_histogram.cpp
  17. 317
      modules/gpuimgproc/perf/perf_hough.cpp
  18. 1133
      modules/gpuimgproc/perf/perf_imgproc.cpp
  19. 131
      modules/gpuimgproc/perf/perf_match_template.cpp
  20. 152
      modules/gpuimgproc/perf/perf_mean_shift.cpp
  21. 3
      modules/gpuimgproc/src/blend.cpp
  22. 186
      modules/gpuimgproc/src/canny.cpp
  23. 93
      modules/gpuimgproc/src/color.cpp
  24. 149
      modules/gpuimgproc/src/corners.cpp
  25. 138
      modules/gpuimgproc/src/cuda/corners.cu
  26. 182
      modules/gpuimgproc/src/cuda/mean_shift.cu
  27. 9
      modules/gpuimgproc/src/gftt.cpp
  28. 408
      modules/gpuimgproc/src/histogram.cpp
  29. 46
      modules/gpuimgproc/src/match_template.cpp
  30. 128
      modules/gpuimgproc/src/mean_shift.cpp
  31. 7
      modules/gpuimgproc/src/precomp.hpp
  32. 97
      modules/gpuimgproc/test/test_bilateral_filter.cpp
  33. 124
      modules/gpuimgproc/test/test_blend.cpp
  34. 114
      modules/gpuimgproc/test/test_canny.cpp
  35. 145
      modules/gpuimgproc/test/test_corners.cpp
  36. 131
      modules/gpuimgproc/test/test_gftt.cpp
  37. 227
      modules/gpuimgproc/test/test_histogram.cpp
  38. 890
      modules/gpuimgproc/test/test_imgproc.cpp
  39. 305
      modules/gpuimgproc/test/test_match_template.cpp
  40. 174
      modules/gpuimgproc/test/test_mean_shift.cpp

@ -4,6 +4,6 @@ endif()
set(the_description "GPU-accelerated Image Processing")
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter)
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4100 /wd4324 /wd4512 -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter)
ocv_define_module(gpuimgproc opencv_imgproc opencv_gpuarithm opencv_gpufilters)
ocv_define_module(gpuimgproc opencv_imgproc opencv_gpufilters OPTIONAL opencv_gpuarithm)

@ -0,0 +1,74 @@
Color space processing
======================
.. highlight:: cpp
gpu::cvtColor
-----------------
Converts an image from one color space to another.
.. ocv:function:: void gpu::cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0, Stream& stream = Stream::Null())
:param src: Source image with ``CV_8U`` , ``CV_16U`` , or ``CV_32F`` depth and 1, 3, or 4 channels.
:param dst: Destination image with the same size and depth as ``src`` .
:param code: Color space conversion code. For details, see :ocv:func:`cvtColor` . Conversion to/from Luv and Bayer color spaces is not supported.
:param dcn: Number of channels in the destination image. If the parameter is 0, the number of the channels is derived automatically from ``src`` and the ``code`` .
:param stream: Stream for the asynchronous version.
3-channel color spaces (like ``HSV``, ``XYZ``, and so on) can be stored in a 4-channel image for better performance.
.. seealso:: :ocv:func:`cvtColor`
gpu::swapChannels
-----------------
Exchanges the color channels of an image in-place.
.. ocv:function:: void gpu::swapChannels(GpuMat& image, const int dstOrder[4], Stream& stream = Stream::Null())
:param image: Source image. Supports only ``CV_8UC4`` type.
:param dstOrder: Integer array describing how channel values are permutated. The n-th entry of the array contains the number of the channel that is stored in the n-th channel of the output image. E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR channel order.
:param stream: Stream for the asynchronous version.
The methods support arbitrary permutations of the original channels, including replication.
gpu::alphaComp
-------------------
Composites two images using alpha opacity values contained in each image.
.. ocv:function:: void gpu::alphaComp(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, int alpha_op, Stream& stream = Stream::Null())
:param img1: First image. Supports ``CV_8UC4`` , ``CV_16UC4`` , ``CV_32SC4`` and ``CV_32FC4`` types.
:param img2: Second image. Must have the same size and the same type as ``img1`` .
:param dst: Destination image.
:param alpha_op: Flag specifying the alpha-blending operation:
* **ALPHA_OVER**
* **ALPHA_IN**
* **ALPHA_OUT**
* **ALPHA_ATOP**
* **ALPHA_XOR**
* **ALPHA_PLUS**
* **ALPHA_OVER_PREMUL**
* **ALPHA_IN_PREMUL**
* **ALPHA_OUT_PREMUL**
* **ALPHA_ATOP_PREMUL**
* **ALPHA_XOR_PREMUL**
* **ALPHA_PLUS_PREMUL**
* **ALPHA_PREMUL**
:param stream: Stream for the asynchronous version.

@ -0,0 +1,81 @@
Feature Detection
=================
.. highlight:: cpp
gpu::cornerHarris
---------------------
Computes the Harris cornerness criteria at each image pixel.
.. ocv:function:: void gpu::cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType=BORDER_REFLECT101)
:param src: Source image. Only ``CV_8UC1`` and ``CV_32FC1`` images are supported for now.
:param dst: Destination image containing cornerness values. It has the same size as ``src`` and ``CV_32FC1`` type.
:param blockSize: Neighborhood size.
:param ksize: Aperture parameter for the Sobel operator.
:param k: Harris detector free parameter.
:param borderType: Pixel extrapolation method. Only ``BORDER_REFLECT101`` and ``BORDER_REPLICATE`` are supported for now.
.. seealso:: :ocv:func:`cornerHarris`
gpu::cornerMinEigenVal
--------------------------
Computes the minimum eigen value of a 2x2 derivative covariation matrix at each pixel (the cornerness criteria).
.. ocv:function:: void gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType=BORDER_REFLECT101)
.. ocv:function:: void gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType=BORDER_REFLECT101)
.. ocv:function:: void gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, int borderType=BORDER_REFLECT101, Stream& stream = Stream::Null())
:param src: Source image. Only ``CV_8UC1`` and ``CV_32FC1`` images are supported for now.
:param dst: Destination image containing cornerness values. The size is the same. The type is ``CV_32FC1`` .
:param blockSize: Neighborhood size.
:param ksize: Aperture parameter for the Sobel operator.
:param borderType: Pixel extrapolation method. Only ``BORDER_REFLECT101`` and ``BORDER_REPLICATE`` are supported for now.
.. seealso:: :ocv:func:`cornerMinEigenVal`
gpu::GoodFeaturesToTrackDetector_GPU
------------------------------------
.. ocv:class:: gpu::GoodFeaturesToTrackDetector_GPU
Class used for strong corners detection on an image. ::
class GoodFeaturesToTrackDetector_GPU
{
public:
explicit GoodFeaturesToTrackDetector_GPU(int maxCorners_ = 1000, double qualityLevel_ = 0.01, double minDistance_ = 0.0,
int blockSize_ = 3, bool useHarrisDetector_ = false, double harrisK_ = 0.04);
void operator ()(const GpuMat& image, GpuMat& corners, const GpuMat& mask = GpuMat());
int maxCorners;
double qualityLevel;
double minDistance;
int blockSize;
bool useHarrisDetector;
double harrisK;
void releaseMemory();
};
The class finds the most prominent corners in the image.
.. seealso:: :ocv:func:`goodFeaturesToTrack`

@ -1,8 +1,12 @@
*************************************
gpu. GPU-accelerated Image Processing
*************************************
********************************************
gpuimgproc. GPU-accelerated Image Processing
********************************************
.. toctree::
:maxdepth: 1
image_processing
color
histogram
hough
feature_detection
imgproc

@ -0,0 +1,104 @@
Histogram Calculation
=====================
.. highlight:: cpp
gpu::evenLevels
-------------------
Computes levels with even distribution.
.. ocv:function:: void gpu::evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel)
:param levels: Destination array. ``levels`` has 1 row, ``nLevels`` columns, and the ``CV_32SC1`` type.
:param nLevels: Number of computed levels. ``nLevels`` must be at least 2.
:param lowerLevel: Lower boundary value of the lowest level.
:param upperLevel: Upper boundary value of the greatest level.
gpu::histEven
-----------------
Calculates a histogram with evenly distributed bins.
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histEven( const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::histEven( const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8U``, ``CV_16U``, or ``CV_16S`` depth and 1 or 4 channels are supported. For a four-channel image, all channels are processed separately.
:param hist: Destination histogram with one row, ``histSize`` columns, and the ``CV_32S`` type.
:param histSize: Size of the histogram.
:param lowerLevel: Lower boundary of lowest-level bin.
:param upperLevel: Upper boundary of highest-level bin.
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
:param stream: Stream for the asynchronous version.
gpu::histRange
------------------
Calculates a histogram with bins determined by the ``levels`` array.
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream = Stream::Null())
:param src: Source image. ``CV_8U`` , ``CV_16U`` , or ``CV_16S`` depth and 1 or 4 channels are supported. For a four-channel image, all channels are processed separately.
:param hist: Destination histogram with one row, ``(levels.cols-1)`` columns, and the ``CV_32SC1`` type.
:param levels: Number of levels in the histogram.
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
:param stream: Stream for the asynchronous version.
gpu::calcHist
------------------
Calculates histogram for one channel 8-bit image.
.. ocv:function:: void gpu::calcHist(const GpuMat& src, GpuMat& hist, Stream& stream = Stream::Null())
:param src: Source image.
:param hist: Destination histogram with one row, 256 columns, and the ``CV_32SC1`` type.
:param stream: Stream for the asynchronous version.
gpu::equalizeHist
------------------
Equalizes the histogram of a grayscale image.
.. ocv:function:: void gpu::equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null())
:param src: Source image.
:param dst: Destination image.
:param hist: Destination histogram with one row, 256 columns, and the ``CV_32SC1`` type.
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
:param stream: Stream for the asynchronous version.
.. seealso:: :ocv:func:`equalizeHist`

@ -0,0 +1,96 @@
Hough Transform
===============
.. highlight:: cpp
gpu::HoughLines
---------------
Finds lines in a binary image using the classical Hough transform.
.. ocv:function:: void gpu::HoughLines(const GpuMat& src, GpuMat& lines, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096)
.. ocv:function:: void gpu::HoughLines(const GpuMat& src, GpuMat& lines, HoughLinesBuf& buf, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096)
:param src: 8-bit, single-channel binary source image.
:param lines: Output vector of lines. Each line is represented by a two-element vector :math:`(\rho, \theta)` . :math:`\rho` is the distance from the coordinate origin :math:`(0,0)` (top-left corner of the image). :math:`\theta` is the line rotation angle in radians ( :math:`0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}` ).
:param rho: Distance resolution of the accumulator in pixels.
:param theta: Angle resolution of the accumulator in radians.
:param threshold: Accumulator threshold parameter. Only those lines are returned that get enough votes ( :math:`>\texttt{threshold}` ).
:param doSort: Performs lines sort by votes.
:param maxLines: Maximum number of output lines.
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
.. seealso:: :ocv:func:`HoughLines`
gpu::HoughLinesDownload
-----------------------
Downloads results from :ocv:func:`gpu::HoughLines` to host memory.
.. ocv:function:: void gpu::HoughLinesDownload(const GpuMat& d_lines, OutputArray h_lines, OutputArray h_votes = noArray())
:param d_lines: Result of :ocv:func:`gpu::HoughLines` .
:param h_lines: Output host array.
:param h_votes: Optional output array for line's votes.
.. seealso:: :ocv:func:`gpu::HoughLines`
gpu::HoughCircles
-----------------
Finds circles in a grayscale image using the Hough transform.
.. ocv:function:: void gpu::HoughCircles(const GpuMat& src, GpuMat& circles, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096)
.. ocv:function:: void gpu::HoughCircles(const GpuMat& src, GpuMat& circles, HoughCirclesBuf& buf, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096)
:param src: 8-bit, single-channel grayscale input image.
:param circles: Output vector of found circles. Each vector is encoded as a 3-element floating-point vector :math:`(x, y, radius)` .
:param method: Detection method to use. Currently, the only implemented method is ``CV_HOUGH_GRADIENT`` , which is basically *21HT* , described in [Yuen90]_.
:param dp: Inverse ratio of the accumulator resolution to the image resolution. For example, if ``dp=1`` , the accumulator has the same resolution as the input image. If ``dp=2`` , the accumulator has half as big width and height.
:param minDist: Minimum distance between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed.
:param cannyThreshold: The higher threshold of the two passed to the :ocv:func:`gpu::Canny` edge detector (the lower one is twice smaller).
:param votesThreshold: The accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected.
:param minRadius: Minimum circle radius.
:param maxRadius: Maximum circle radius.
:param maxCircles: Maximum number of output circles.
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
.. seealso:: :ocv:func:`HoughCircles`
gpu::HoughCirclesDownload
-------------------------
Downloads results from :ocv:func:`gpu::HoughCircles` to host memory.
.. ocv:function:: void gpu::HoughCirclesDownload(const GpuMat& d_circles, OutputArray h_circles)
:param d_circles: Result of :ocv:func:`gpu::HoughCircles` .
:param h_circles: Output host array.
.. seealso:: :ocv:func:`gpu::HoughCircles`

@ -1,589 +0,0 @@
Image Processing
================
.. highlight:: cpp
gpu::meanShiftFiltering
---------------------------
Performs mean-shift filtering for each point of the source image.
.. ocv:function:: void gpu::meanShiftFiltering( const GpuMat& src, GpuMat& dst, int sp, int sr, TermCriteria criteria=TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), Stream& stream=Stream::Null() )
:param src: Source image. Only ``CV_8UC4`` images are supported for now.
:param dst: Destination image containing the color of mapped points. It has the same size and type as ``src`` .
:param sp: Spatial window radius.
:param sr: Color window radius.
:param criteria: Termination criteria. See :ocv:class:`TermCriteria`.
It maps each point of the source image into another point. As a result, you have a new color and new position of each point.
gpu::meanShiftProc
----------------------
Performs a mean-shift procedure and stores information about processed points (their colors and positions) in two images.
.. ocv:function:: void gpu::meanShiftProc( const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, TermCriteria criteria=TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), Stream& stream=Stream::Null() )
:param src: Source image. Only ``CV_8UC4`` images are supported for now.
:param dstr: Destination image containing the color of mapped points. The size and type is the same as ``src`` .
:param dstsp: Destination image containing the position of mapped points. The size is the same as ``src`` size. The type is ``CV_16SC2`` .
:param sp: Spatial window radius.
:param sr: Color window radius.
:param criteria: Termination criteria. See :ocv:class:`TermCriteria`.
.. seealso:: :ocv:func:`gpu::meanShiftFiltering`
gpu::meanShiftSegmentation
------------------------------
Performs a mean-shift segmentation of the source image and eliminates small segments.
.. ocv:function:: void gpu::meanShiftSegmentation(const GpuMat& src, Mat& dst, int sp, int sr, int minsize, TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1))
:param src: Source image. Only ``CV_8UC4`` images are supported for now.
:param dst: Segmented image with the same size and type as ``src`` .
:param sp: Spatial window radius.
:param sr: Color window radius.
:param minsize: Minimum segment size. Smaller segments are merged.
:param criteria: Termination criteria. See :ocv:class:`TermCriteria`.
gpu::integral
-----------------
Computes an integral image.
.. ocv:function:: void gpu::integral(const GpuMat& src, GpuMat& sum, Stream& stream = Stream::Null())
:param src: Source image. Only ``CV_8UC1`` images are supported for now.
:param sum: Integral image containing 32-bit unsigned integer values packed into ``CV_32SC1`` .
:param stream: Stream for the asynchronous version.
.. seealso:: :ocv:func:`integral`
gpu::sqrIntegral
--------------------
Computes a squared integral image.
.. ocv:function:: void gpu::sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& stream = Stream::Null())
:param src: Source image. Only ``CV_8UC1`` images are supported for now.
:param sqsum: Squared integral image containing 64-bit unsigned integer values packed into ``CV_64FC1`` .
:param stream: Stream for the asynchronous version.
gpu::cornerHarris
---------------------
Computes the Harris cornerness criteria at each image pixel.
.. ocv:function:: void gpu::cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType=BORDER_REFLECT101)
:param src: Source image. Only ``CV_8UC1`` and ``CV_32FC1`` images are supported for now.
:param dst: Destination image containing cornerness values. It has the same size as ``src`` and ``CV_32FC1`` type.
:param blockSize: Neighborhood size.
:param ksize: Aperture parameter for the Sobel operator.
:param k: Harris detector free parameter.
:param borderType: Pixel extrapolation method. Only ``BORDER_REFLECT101`` and ``BORDER_REPLICATE`` are supported for now.
.. seealso:: :ocv:func:`cornerHarris`
gpu::cornerMinEigenVal
--------------------------
Computes the minimum eigen value of a 2x2 derivative covariation matrix at each pixel (the cornerness criteria).
.. ocv:function:: void gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType=BORDER_REFLECT101)
.. ocv:function:: void gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType=BORDER_REFLECT101)
.. ocv:function:: void gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, int borderType=BORDER_REFLECT101, Stream& stream = Stream::Null())
:param src: Source image. Only ``CV_8UC1`` and ``CV_32FC1`` images are supported for now.
:param dst: Destination image containing cornerness values. The size is the same. The type is ``CV_32FC1`` .
:param blockSize: Neighborhood size.
:param ksize: Aperture parameter for the Sobel operator.
:param borderType: Pixel extrapolation method. Only ``BORDER_REFLECT101`` and ``BORDER_REPLICATE`` are supported for now.
.. seealso:: :ocv:func:`cornerMinEigenVal`
gpu::MatchTemplateBuf
---------------------
.. ocv:struct:: gpu::MatchTemplateBuf
Class providing memory buffers for :ocv:func:`gpu::matchTemplate` function, plus it allows to adjust some specific parameters. ::
struct CV_EXPORTS MatchTemplateBuf
{
Size user_block_size;
GpuMat imagef, templf;
std::vector<GpuMat> images;
std::vector<GpuMat> image_sums;
std::vector<GpuMat> image_sqsums;
};
You can use field `user_block_size` to set specific block size for :ocv:func:`gpu::matchTemplate` function. If you leave its default value `Size(0,0)` then automatic estimation of block size will be used (which is optimized for speed). By varying `user_block_size` you can reduce memory requirements at the cost of speed.
gpu::matchTemplate
----------------------
Computes a proximity map for a raster template and an image where the template is searched for.
.. ocv:function:: void gpu::matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, Stream &stream = Stream::Null())
.. ocv:function:: void gpu::matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, MatchTemplateBuf &buf, Stream& stream = Stream::Null())
:param image: Source image. ``CV_32F`` and ``CV_8U`` depth images (1..4 channels) are supported for now.
:param templ: Template image with the size and type the same as ``image`` .
:param result: Map containing comparison results ( ``CV_32FC1`` ). If ``image`` is *W x H* and ``templ`` is *w x h*, then ``result`` must be *W-w+1 x H-h+1*.
:param method: Specifies the way to compare the template with the image.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:struct:`gpu::MatchTemplateBuf`.
:param stream: Stream for the asynchronous version.
The following methods are supported for the ``CV_8U`` depth images for now:
* ``CV_TM_SQDIFF``
* ``CV_TM_SQDIFF_NORMED``
* ``CV_TM_CCORR``
* ``CV_TM_CCORR_NORMED``
* ``CV_TM_CCOEFF``
* ``CV_TM_CCOEFF_NORMED``
The following methods are supported for the ``CV_32F`` images for now:
* ``CV_TM_SQDIFF``
* ``CV_TM_CCORR``
.. seealso:: :ocv:func:`matchTemplate`
gpu::cvtColor
-----------------
Converts an image from one color space to another.
.. ocv:function:: void gpu::cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0, Stream& stream = Stream::Null())
:param src: Source image with ``CV_8U`` , ``CV_16U`` , or ``CV_32F`` depth and 1, 3, or 4 channels.
:param dst: Destination image with the same size and depth as ``src`` .
:param code: Color space conversion code. For details, see :ocv:func:`cvtColor` . Conversion to/from Luv and Bayer color spaces is not supported.
:param dcn: Number of channels in the destination image. If the parameter is 0, the number of the channels is derived automatically from ``src`` and the ``code`` .
:param stream: Stream for the asynchronous version.
3-channel color spaces (like ``HSV``, ``XYZ``, and so on) can be stored in a 4-channel image for better performance.
.. seealso:: :ocv:func:`cvtColor`
gpu::swapChannels
-----------------
Exchanges the color channels of an image in-place.
.. ocv:function:: void gpu::swapChannels(GpuMat& image, const int dstOrder[4], Stream& stream = Stream::Null())
:param image: Source image. Supports only ``CV_8UC4`` type.
:param dstOrder: Integer array describing how channel values are permutated. The n-th entry of the array contains the number of the channel that is stored in the n-th channel of the output image. E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR channel order.
:param stream: Stream for the asynchronous version.
The methods support arbitrary permutations of the original channels, including replication.
gpu::rectStdDev
-------------------
Computes a standard deviation of integral images.
.. ocv:function:: void gpu::rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect, Stream& stream = Stream::Null())
:param src: Source image. Only the ``CV_32SC1`` type is supported.
:param sqr: Squared source image. Only the ``CV_32FC1`` type is supported.
:param dst: Destination image with the same type and size as ``src`` .
:param rect: Rectangular window.
:param stream: Stream for the asynchronous version.
gpu::evenLevels
-------------------
Computes levels with even distribution.
.. ocv:function:: void gpu::evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel)
:param levels: Destination array. ``levels`` has 1 row, ``nLevels`` columns, and the ``CV_32SC1`` type.
:param nLevels: Number of computed levels. ``nLevels`` must be at least 2.
:param lowerLevel: Lower boundary value of the lowest level.
:param upperLevel: Upper boundary value of the greatest level.
gpu::histEven
-----------------
Calculates a histogram with evenly distributed bins.
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histEven( const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream=Stream::Null() )
.. ocv:function:: void gpu::histEven( const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream=Stream::Null() )
:param src: Source image. ``CV_8U``, ``CV_16U``, or ``CV_16S`` depth and 1 or 4 channels are supported. For a four-channel image, all channels are processed separately.
:param hist: Destination histogram with one row, ``histSize`` columns, and the ``CV_32S`` type.
:param histSize: Size of the histogram.
:param lowerLevel: Lower boundary of lowest-level bin.
:param upperLevel: Upper boundary of highest-level bin.
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
:param stream: Stream for the asynchronous version.
gpu::histRange
------------------
Calculates a histogram with bins determined by the ``levels`` array.
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream = Stream::Null())
:param src: Source image. ``CV_8U`` , ``CV_16U`` , or ``CV_16S`` depth and 1 or 4 channels are supported. For a four-channel image, all channels are processed separately.
:param hist: Destination histogram with one row, ``(levels.cols-1)`` columns, and the ``CV_32SC1`` type.
:param levels: Number of levels in the histogram.
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
:param stream: Stream for the asynchronous version.
gpu::calcHist
------------------
Calculates histogram for one channel 8-bit image.
.. ocv:function:: void gpu::calcHist(const GpuMat& src, GpuMat& hist, Stream& stream = Stream::Null())
:param src: Source image.
:param hist: Destination histogram with one row, 256 columns, and the ``CV_32SC1`` type.
:param stream: Stream for the asynchronous version.
gpu::equalizeHist
------------------
Equalizes the histogram of a grayscale image.
.. ocv:function:: void gpu::equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null())
.. ocv:function:: void gpu::equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null())
:param src: Source image.
:param dst: Destination image.
:param hist: Destination histogram with one row, 256 columns, and the ``CV_32SC1`` type.
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
:param stream: Stream for the asynchronous version.
.. seealso:: :ocv:func:`equalizeHist`
gpu::blendLinear
-------------------
Performs linear blending of two images.
.. ocv:function:: void gpu::blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2, GpuMat& result, Stream& stream = Stream::Null())
:param img1: First image. Supports only ``CV_8U`` and ``CV_32F`` depth.
:param img2: Second image. Must have the same size and the same type as ``img1`` .
:param weights1: Weights for first image. Must have tha same size as ``img1`` . Supports only ``CV_32F`` type.
:param weights2: Weights for second image. Must have tha same size as ``img2`` . Supports only ``CV_32F`` type.
:param result: Destination image.
:param stream: Stream for the asynchronous version.
gpu::bilateralFilter
--------------------
Performs bilateral filtering of passed image
.. ocv:function:: void gpu::bilateralFilter( const GpuMat& src, GpuMat& dst, int kernel_size, float sigma_color, float sigma_spatial, int borderMode=BORDER_DEFAULT, Stream& stream=Stream::Null() )
:param src: Source image. Supports only (channles != 2 && depth() != CV_8S && depth() != CV_32S && depth() != CV_64F).
:param dst: Destination imagwe.
:param kernel_size: Kernel window size.
:param sigma_color: Filter sigma in the color space.
:param sigma_spatial: Filter sigma in the coordinate space.
:param borderMode: Border type. See :ocv:func:`borderInterpolate` for details. ``BORDER_REFLECT101`` , ``BORDER_REPLICATE`` , ``BORDER_CONSTANT`` , ``BORDER_REFLECT`` and ``BORDER_WRAP`` are supported for now.
:param stream: Stream for the asynchronous version.
.. seealso::
:ocv:func:`bilateralFilter`
gpu::alphaComp
-------------------
Composites two images using alpha opacity values contained in each image.
.. ocv:function:: void gpu::alphaComp(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, int alpha_op, Stream& stream = Stream::Null())
:param img1: First image. Supports ``CV_8UC4`` , ``CV_16UC4`` , ``CV_32SC4`` and ``CV_32FC4`` types.
:param img2: Second image. Must have the same size and the same type as ``img1`` .
:param dst: Destination image.
:param alpha_op: Flag specifying the alpha-blending operation:
* **ALPHA_OVER**
* **ALPHA_IN**
* **ALPHA_OUT**
* **ALPHA_ATOP**
* **ALPHA_XOR**
* **ALPHA_PLUS**
* **ALPHA_OVER_PREMUL**
* **ALPHA_IN_PREMUL**
* **ALPHA_OUT_PREMUL**
* **ALPHA_ATOP_PREMUL**
* **ALPHA_XOR_PREMUL**
* **ALPHA_PLUS_PREMUL**
* **ALPHA_PREMUL**
:param stream: Stream for the asynchronous version.
gpu::Canny
-------------------
Finds edges in an image using the [Canny86]_ algorithm.
.. ocv:function:: void gpu::Canny(const GpuMat& image, GpuMat& edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false)
.. ocv:function:: void gpu::Canny(const GpuMat& image, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false)
.. ocv:function:: void gpu::Canny(const GpuMat& dx, const GpuMat& dy, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false)
.. ocv:function:: void gpu::Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false)
:param image: Single-channel 8-bit input image.
:param dx: First derivative of image in the vertical direction. Support only ``CV_32S`` type.
:param dy: First derivative of image in the horizontal direction. Support only ``CV_32S`` type.
:param edges: Output edge map. It has the same size and type as ``image`` .
:param low_thresh: First threshold for the hysteresis procedure.
:param high_thresh: Second threshold for the hysteresis procedure.
:param apperture_size: Aperture size for the :ocv:func:`Sobel` operator.
:param L2gradient: Flag indicating whether a more accurate :math:`L_2` norm :math:`=\sqrt{(dI/dx)^2 + (dI/dy)^2}` should be used to compute the image gradient magnitude ( ``L2gradient=true`` ), or a faster default :math:`L_1` norm :math:`=|dI/dx|+|dI/dy|` is enough ( ``L2gradient=false`` ).
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
.. seealso:: :ocv:func:`Canny`
gpu::HoughLines
---------------
Finds lines in a binary image using the classical Hough transform.
.. ocv:function:: void gpu::HoughLines(const GpuMat& src, GpuMat& lines, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096)
.. ocv:function:: void gpu::HoughLines(const GpuMat& src, GpuMat& lines, HoughLinesBuf& buf, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096)
:param src: 8-bit, single-channel binary source image.
:param lines: Output vector of lines. Each line is represented by a two-element vector :math:`(\rho, \theta)` . :math:`\rho` is the distance from the coordinate origin :math:`(0,0)` (top-left corner of the image). :math:`\theta` is the line rotation angle in radians ( :math:`0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}` ).
:param rho: Distance resolution of the accumulator in pixels.
:param theta: Angle resolution of the accumulator in radians.
:param threshold: Accumulator threshold parameter. Only those lines are returned that get enough votes ( :math:`>\texttt{threshold}` ).
:param doSort: Performs lines sort by votes.
:param maxLines: Maximum number of output lines.
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
.. seealso:: :ocv:func:`HoughLines`
gpu::HoughLinesDownload
-----------------------
Downloads results from :ocv:func:`gpu::HoughLines` to host memory.
.. ocv:function:: void gpu::HoughLinesDownload(const GpuMat& d_lines, OutputArray h_lines, OutputArray h_votes = noArray())
:param d_lines: Result of :ocv:func:`gpu::HoughLines` .
:param h_lines: Output host array.
:param h_votes: Optional output array for line's votes.
.. seealso:: :ocv:func:`gpu::HoughLines`
gpu::HoughCircles
-----------------
Finds circles in a grayscale image using the Hough transform.
.. ocv:function:: void gpu::HoughCircles(const GpuMat& src, GpuMat& circles, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096)
.. ocv:function:: void gpu::HoughCircles(const GpuMat& src, GpuMat& circles, HoughCirclesBuf& buf, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096)
:param src: 8-bit, single-channel grayscale input image.
:param circles: Output vector of found circles. Each vector is encoded as a 3-element floating-point vector :math:`(x, y, radius)` .
:param method: Detection method to use. Currently, the only implemented method is ``CV_HOUGH_GRADIENT`` , which is basically *21HT* , described in [Yuen90]_.
:param dp: Inverse ratio of the accumulator resolution to the image resolution. For example, if ``dp=1`` , the accumulator has the same resolution as the input image. If ``dp=2`` , the accumulator has half as big width and height.
:param minDist: Minimum distance between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed.
:param cannyThreshold: The higher threshold of the two passed to the :ocv:func:`gpu::Canny` edge detector (the lower one is twice smaller).
:param votesThreshold: The accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected.
:param minRadius: Minimum circle radius.
:param maxRadius: Maximum circle radius.
:param maxCircles: Maximum number of output circles.
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
.. seealso:: :ocv:func:`HoughCircles`
gpu::HoughCirclesDownload
-------------------------
Downloads results from :ocv:func:`gpu::HoughCircles` to host memory.
.. ocv:function:: void gpu::HoughCirclesDownload(const GpuMat& d_circles, OutputArray h_circles)
:param d_circles: Result of :ocv:func:`gpu::HoughCircles` .
:param h_circles: Output host array.
.. seealso:: :ocv:func:`gpu::HoughCircles`
gpu::GoodFeaturesToTrackDetector_GPU
------------------------------------
.. ocv:class:: gpu::GoodFeaturesToTrackDetector_GPU
Class used for strong corners detection on an image. ::
class GoodFeaturesToTrackDetector_GPU
{
public:
explicit GoodFeaturesToTrackDetector_GPU(int maxCorners_ = 1000, double qualityLevel_ = 0.01, double minDistance_ = 0.0,
int blockSize_ = 3, bool useHarrisDetector_ = false, double harrisK_ = 0.04);
void operator ()(const GpuMat& image, GpuMat& corners, const GpuMat& mask = GpuMat());
int maxCorners;
double qualityLevel;
double minDistance;
int blockSize;
bool useHarrisDetector;
double harrisK;
void releaseMemory();
};
The class finds the most prominent corners in the image.
.. seealso:: :ocv:func:`goodFeaturesToTrack`

@ -0,0 +1,203 @@
Image Processing
================
.. highlight:: cpp
gpu::meanShiftFiltering
---------------------------
Performs mean-shift filtering for each point of the source image.
.. ocv:function:: void gpu::meanShiftFiltering( const GpuMat& src, GpuMat& dst, int sp, int sr, TermCriteria criteria=TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), Stream& stream=Stream::Null() )
:param src: Source image. Only ``CV_8UC4`` images are supported for now.
:param dst: Destination image containing the color of mapped points. It has the same size and type as ``src`` .
:param sp: Spatial window radius.
:param sr: Color window radius.
:param criteria: Termination criteria. See :ocv:class:`TermCriteria`.
It maps each point of the source image into another point. As a result, you have a new color and new position of each point.
gpu::meanShiftProc
----------------------
Performs a mean-shift procedure and stores information about processed points (their colors and positions) in two images.
.. ocv:function:: void gpu::meanShiftProc( const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, TermCriteria criteria=TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1), Stream& stream=Stream::Null() )
:param src: Source image. Only ``CV_8UC4`` images are supported for now.
:param dstr: Destination image containing the color of mapped points. The size and type is the same as ``src`` .
:param dstsp: Destination image containing the position of mapped points. The size is the same as ``src`` size. The type is ``CV_16SC2`` .
:param sp: Spatial window radius.
:param sr: Color window radius.
:param criteria: Termination criteria. See :ocv:class:`TermCriteria`.
.. seealso:: :ocv:func:`gpu::meanShiftFiltering`
gpu::meanShiftSegmentation
------------------------------
Performs a mean-shift segmentation of the source image and eliminates small segments.
.. ocv:function:: void gpu::meanShiftSegmentation(const GpuMat& src, Mat& dst, int sp, int sr, int minsize, TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1))
:param src: Source image. Only ``CV_8UC4`` images are supported for now.
:param dst: Segmented image with the same size and type as ``src`` .
:param sp: Spatial window radius.
:param sr: Color window radius.
:param minsize: Minimum segment size. Smaller segments are merged.
:param criteria: Termination criteria. See :ocv:class:`TermCriteria`.
gpu::MatchTemplateBuf
---------------------
.. ocv:struct:: gpu::MatchTemplateBuf
Class providing memory buffers for :ocv:func:`gpu::matchTemplate` function, plus it allows to adjust some specific parameters. ::
struct CV_EXPORTS MatchTemplateBuf
{
Size user_block_size;
GpuMat imagef, templf;
std::vector<GpuMat> images;
std::vector<GpuMat> image_sums;
std::vector<GpuMat> image_sqsums;
};
You can use field `user_block_size` to set specific block size for :ocv:func:`gpu::matchTemplate` function. If you leave its default value `Size(0,0)` then automatic estimation of block size will be used (which is optimized for speed). By varying `user_block_size` you can reduce memory requirements at the cost of speed.
gpu::matchTemplate
----------------------
Computes a proximity map for a raster template and an image where the template is searched for.
.. ocv:function:: void gpu::matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, Stream &stream = Stream::Null())
.. ocv:function:: void gpu::matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, MatchTemplateBuf &buf, Stream& stream = Stream::Null())
:param image: Source image. ``CV_32F`` and ``CV_8U`` depth images (1..4 channels) are supported for now.
:param templ: Template image with the size and type the same as ``image`` .
:param result: Map containing comparison results ( ``CV_32FC1`` ). If ``image`` is *W x H* and ``templ`` is *w x h*, then ``result`` must be *W-w+1 x H-h+1*.
:param method: Specifies the way to compare the template with the image.
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:struct:`gpu::MatchTemplateBuf`.
:param stream: Stream for the asynchronous version.
The following methods are supported for the ``CV_8U`` depth images for now:
* ``CV_TM_SQDIFF``
* ``CV_TM_SQDIFF_NORMED``
* ``CV_TM_CCORR``
* ``CV_TM_CCORR_NORMED``
* ``CV_TM_CCOEFF``
* ``CV_TM_CCOEFF_NORMED``
The following methods are supported for the ``CV_32F`` images for now:
* ``CV_TM_SQDIFF``
* ``CV_TM_CCORR``
.. seealso:: :ocv:func:`matchTemplate`
gpu::Canny
-------------------
Finds edges in an image using the [Canny86]_ algorithm.
.. ocv:function:: void gpu::Canny(const GpuMat& image, GpuMat& edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false)
.. ocv:function:: void gpu::Canny(const GpuMat& image, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false)
.. ocv:function:: void gpu::Canny(const GpuMat& dx, const GpuMat& dy, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false)
.. ocv:function:: void gpu::Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false)
:param image: Single-channel 8-bit input image.
:param dx: First derivative of image in the vertical direction. Support only ``CV_32S`` type.
:param dy: First derivative of image in the horizontal direction. Support only ``CV_32S`` type.
:param edges: Output edge map. It has the same size and type as ``image`` .
:param low_thresh: First threshold for the hysteresis procedure.
:param high_thresh: Second threshold for the hysteresis procedure.
:param apperture_size: Aperture size for the :ocv:func:`Sobel` operator.
:param L2gradient: Flag indicating whether a more accurate :math:`L_2` norm :math:`=\sqrt{(dI/dx)^2 + (dI/dy)^2}` should be used to compute the image gradient magnitude ( ``L2gradient=true`` ), or a faster default :math:`L_1` norm :math:`=|dI/dx|+|dI/dy|` is enough ( ``L2gradient=false`` ).
:param buf: Optional buffer to avoid extra memory allocations (for many calls with the same sizes).
.. seealso:: :ocv:func:`Canny`
gpu::bilateralFilter
--------------------
Performs bilateral filtering of passed image
.. ocv:function:: void gpu::bilateralFilter( const GpuMat& src, GpuMat& dst, int kernel_size, float sigma_color, float sigma_spatial, int borderMode=BORDER_DEFAULT, Stream& stream=Stream::Null() )
:param src: Source image. Supports only (channles != 2 && depth() != CV_8S && depth() != CV_32S && depth() != CV_64F).
:param dst: Destination imagwe.
:param kernel_size: Kernel window size.
:param sigma_color: Filter sigma in the color space.
:param sigma_spatial: Filter sigma in the coordinate space.
:param borderMode: Border type. See :ocv:func:`borderInterpolate` for details. ``BORDER_REFLECT101`` , ``BORDER_REPLICATE`` , ``BORDER_CONSTANT`` , ``BORDER_REFLECT`` and ``BORDER_WRAP`` are supported for now.
:param stream: Stream for the asynchronous version.
.. seealso::
:ocv:func:`bilateralFilter`
gpu::blendLinear
-------------------
Performs linear blending of two images.
.. ocv:function:: void gpu::blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2, GpuMat& result, Stream& stream = Stream::Null())
:param img1: First image. Supports only ``CV_8U`` and ``CV_32F`` depth.
:param img2: Second image. Must have the same size and the same type as ``img1`` .
:param weights1: Weights for first image. Must have tha same size as ``img1`` . Supports only ``CV_32F`` type.
:param weights2: Weights for second image. Must have tha same size as ``img2`` . Supports only ``CV_32F`` type.
:param result: Destination image.
:param stream: Stream for the asynchronous version.

@ -48,31 +48,13 @@
#endif
#include "opencv2/core/gpumat.hpp"
#include "opencv2/gpufilters.hpp"
#include "opencv2/core/base.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/gpufilters.hpp"
namespace cv { namespace gpu {
enum { ALPHA_OVER, ALPHA_IN, ALPHA_OUT, ALPHA_ATOP, ALPHA_XOR, ALPHA_PLUS, ALPHA_OVER_PREMUL, ALPHA_IN_PREMUL, ALPHA_OUT_PREMUL,
ALPHA_ATOP_PREMUL, ALPHA_XOR_PREMUL, ALPHA_PLUS_PREMUL, ALPHA_PREMUL};
//! Composite two images using alpha opacity values contained in each image
//! Supports CV_8UC4, CV_16UC4, CV_32SC4 and CV_32FC4 types
CV_EXPORTS void alphaComp(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, int alpha_op, Stream& stream = Stream::Null());
//! Does mean shift filtering on GPU.
CV_EXPORTS void meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
Stream& stream = Stream::Null());
//! Does mean shift procedure on GPU.
CV_EXPORTS void meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
Stream& stream = Stream::Null());
//! Does mean shift segmentation with elimination of small regions.
CV_EXPORTS void meanShiftSegmentation(const GpuMat& src, Mat& dst, int sp, int sr, int minsize,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
/////////////////////////// Color Processing ///////////////////////////
//! converts image from one color space to another
CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0, Stream& stream = Stream::Null());
@ -107,41 +89,65 @@ CV_EXPORTS void swapChannels(GpuMat& image, const int dstOrder[4], Stream& strea
//! Routines for correcting image color gamma
CV_EXPORTS void gammaCorrection(const GpuMat& src, GpuMat& dst, bool forward = true, Stream& stream = Stream::Null());
//! computes Harris cornerness criteria at each image pixel
CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101);
CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101);
CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, double k,
int borderType = BORDER_REFLECT101, Stream& stream = Stream::Null());
enum { ALPHA_OVER, ALPHA_IN, ALPHA_OUT, ALPHA_ATOP, ALPHA_XOR, ALPHA_PLUS, ALPHA_OVER_PREMUL, ALPHA_IN_PREMUL, ALPHA_OUT_PREMUL,
ALPHA_ATOP_PREMUL, ALPHA_XOR_PREMUL, ALPHA_PLUS_PREMUL, ALPHA_PREMUL};
//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria
CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType=BORDER_REFLECT101);
CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType=BORDER_REFLECT101);
CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize,
int borderType=BORDER_REFLECT101, Stream& stream = Stream::Null());
//! Composite two images using alpha opacity values contained in each image
//! Supports CV_8UC4, CV_16UC4, CV_32SC4 and CV_32FC4 types
CV_EXPORTS void alphaComp(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, int alpha_op, Stream& stream = Stream::Null());
struct CV_EXPORTS MatchTemplateBuf
{
Size user_block_size;
GpuMat imagef, templf;
std::vector<GpuMat> images;
std::vector<GpuMat> image_sums;
std::vector<GpuMat> image_sqsums;
};
////////////////////////////// Histogram ///////////////////////////////
//! computes the proximity map for the raster template and the image where the template is searched for
CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, Stream &stream = Stream::Null());
//! Compute levels with even distribution. levels will have 1 row and nLevels cols and CV_32SC1 type.
CV_EXPORTS void evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel);
//! computes the proximity map for the raster template and the image where the template is searched for
CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, MatchTemplateBuf &buf, Stream& stream = Stream::Null());
//! Calculates histogram with evenly distributed bins for signle channel source.
//! Supports CV_8UC1, CV_16UC1 and CV_16SC1 source types.
//! Output hist will have one row and histSize cols and CV_32SC1 type.
CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
//! performs linear blending of two images
//! to avoid accuracy errors sum of weigths shouldn't be very close to zero
CV_EXPORTS void blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2,
GpuMat& result, Stream& stream = Stream::Null());
//! Calculates histogram with evenly distributed bins for four-channel source.
//! All channels of source are processed separately.
//! Supports CV_8UC4, CV_16UC4 and CV_16SC4 source types.
//! Output hist[i] will have one row and histSize[i] cols and CV_32SC1 type.
CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());
CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());
//! Performa bilateral filtering of passsed image
CV_EXPORTS void bilateralFilter(const GpuMat& src, GpuMat& dst, int kernel_size, float sigma_color, float sigma_spatial,
int borderMode = BORDER_DEFAULT, Stream& stream = Stream::Null());
//! Calculates histogram with bins determined by levels array.
//! levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.
//! Supports CV_8UC1, CV_16UC1, CV_16SC1 and CV_32FC1 source types.
//! Output hist will have one row and (levels.cols-1) cols and CV_32SC1 type.
CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream = Stream::Null());
CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream = Stream::Null());
//! Calculates histogram with bins determined by levels array.
//! All levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.
//! All channels of source are processed separately.
//! Supports CV_8UC4, CV_16UC4, CV_16SC4 and CV_32FC4 source types.
//! Output hist[i] will have one row and (levels[i].cols-1) cols and CV_32SC1 type.
CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null());
CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, Stream& stream = Stream::Null());
//! Calculates histogram for 8u one channel image
//! Output hist will have one row, 256 cols and CV32SC1 type.
CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, Stream& stream = Stream::Null());
CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null());
//! normalizes the grayscale image brightness and contrast by normalizing its histogram
CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, Stream& stream = Stream::Null());
CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null());
class CV_EXPORTS CLAHE : public cv::CLAHE
{
public:
using cv::CLAHE::apply;
virtual void apply(InputArray src, OutputArray dst, Stream& stream) = 0;
};
CV_EXPORTS Ptr<cv::gpu::CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
//////////////////////////////// Canny ////////////////////////////////
struct CV_EXPORTS CannyBuf
{
@ -160,7 +166,7 @@ CV_EXPORTS void Canny(const GpuMat& image, CannyBuf& buf, GpuMat& edges, double
CV_EXPORTS void Canny(const GpuMat& dx, const GpuMat& dy, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false);
CV_EXPORTS void Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false);
//! HoughLines
/////////////////////////// Hough Transform ////////////////////////////
struct HoughLinesBuf
{
@ -172,13 +178,9 @@ CV_EXPORTS void HoughLines(const GpuMat& src, GpuMat& lines, float rho, float th
CV_EXPORTS void HoughLines(const GpuMat& src, GpuMat& lines, HoughLinesBuf& buf, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096);
CV_EXPORTS void HoughLinesDownload(const GpuMat& d_lines, OutputArray h_lines, OutputArray h_votes = noArray());
//! HoughLinesP
//! finds line segments in the black-n-white image using probabalistic Hough transform
CV_EXPORTS void HoughLinesP(const GpuMat& image, GpuMat& lines, HoughLinesBuf& buf, float rho, float theta, int minLineLength, int maxLineGap, int maxLines = 4096);
//! HoughCircles
struct HoughCirclesBuf
{
GpuMat edges;
@ -223,50 +225,21 @@ private:
CannyBuf cannyBuf_;
};
//! Compute levels with even distribution. levels will have 1 row and nLevels cols and CV_32SC1 type.
CV_EXPORTS void evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel);
//! Calculates histogram with evenly distributed bins for signle channel source.
//! Supports CV_8UC1, CV_16UC1 and CV_16SC1 source types.
//! Output hist will have one row and histSize cols and CV_32SC1 type.
CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
//! Calculates histogram with evenly distributed bins for four-channel source.
//! All channels of source are processed separately.
//! Supports CV_8UC4, CV_16UC4 and CV_16SC4 source types.
//! Output hist[i] will have one row and histSize[i] cols and CV_32SC1 type.
CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());
CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());
//! Calculates histogram with bins determined by levels array.
//! levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.
//! Supports CV_8UC1, CV_16UC1, CV_16SC1 and CV_32FC1 source types.
//! Output hist will have one row and (levels.cols-1) cols and CV_32SC1 type.
CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream = Stream::Null());
CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream = Stream::Null());
//! Calculates histogram with bins determined by levels array.
//! All levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.
//! All channels of source are processed separately.
//! Supports CV_8UC4, CV_16UC4, CV_16SC4 and CV_32FC4 source types.
//! Output hist[i] will have one row and (levels[i].cols-1) cols and CV_32SC1 type.
CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null());
CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, Stream& stream = Stream::Null());
////////////////////////// Corners Detection ///////////////////////////
//! Calculates histogram for 8u one channel image
//! Output hist will have one row, 256 cols and CV32SC1 type.
CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, Stream& stream = Stream::Null());
CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null());
//! computes Harris cornerness criteria at each image pixel
CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101);
CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101);
CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, double k,
int borderType = BORDER_REFLECT101, Stream& stream = Stream::Null());
//! normalizes the grayscale image brightness and contrast by normalizing its histogram
CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, Stream& stream = Stream::Null());
CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null());
//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria
CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType=BORDER_REFLECT101);
CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType=BORDER_REFLECT101);
CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize,
int borderType=BORDER_REFLECT101, Stream& stream = Stream::Null());
class CV_EXPORTS CLAHE : public cv::CLAHE
{
public:
using cv::CLAHE::apply;
virtual void apply(InputArray src, OutputArray dst, Stream& stream) = 0;
};
CV_EXPORTS Ptr<cv::gpu::CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
////////////////////////// Feature Detection ///////////////////////////
class CV_EXPORTS GoodFeaturesToTrackDetector_GPU
{
@ -315,6 +288,52 @@ inline GoodFeaturesToTrackDetector_GPU::GoodFeaturesToTrackDetector_GPU(int maxC
harrisK = harrisK_;
}
///////////////////////////// Mean Shift //////////////////////////////
//! Does mean shift filtering on GPU.
CV_EXPORTS void meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
Stream& stream = Stream::Null());
//! Does mean shift procedure on GPU.
CV_EXPORTS void meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
Stream& stream = Stream::Null());
//! Does mean shift segmentation with elimination of small regions.
CV_EXPORTS void meanShiftSegmentation(const GpuMat& src, Mat& dst, int sp, int sr, int minsize,
TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
/////////////////////////// Match Template ////////////////////////////
struct CV_EXPORTS MatchTemplateBuf
{
Size user_block_size;
GpuMat imagef, templf;
std::vector<GpuMat> images;
std::vector<GpuMat> image_sums;
std::vector<GpuMat> image_sqsums;
};
//! computes the proximity map for the raster template and the image where the template is searched for
CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, Stream &stream = Stream::Null());
//! computes the proximity map for the raster template and the image where the template is searched for
CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, MatchTemplateBuf &buf, Stream& stream = Stream::Null());
////////////////////////// Bilateral Filter ///////////////////////////
//! Performa bilateral filtering of passsed image
CV_EXPORTS void bilateralFilter(const GpuMat& src, GpuMat& dst, int kernel_size, float sigma_color, float sigma_spatial,
int borderMode = BORDER_DEFAULT, Stream& stream = Stream::Null());
///////////////////////////// Blending ////////////////////////////////
//! performs linear blending of two images
//! to avoid accuracy errors sum of weigths shouldn't be very close to zero
CV_EXPORTS void blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2,
GpuMat& result, Stream& stream = Stream::Null());
}} // namespace cv { namespace gpu {
#endif /* __OPENCV_GPUIMGPROC_HPP__ */

@ -0,0 +1,93 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// BilateralFilter
DEF_PARAM_TEST(Sz_Depth_Cn_KernelSz, cv::Size, MatDepth, MatCn, int);
PERF_TEST_P(Sz_Depth_Cn_KernelSz, BilateralFilter,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_32F),
GPU_CHANNELS_1_3,
Values(3, 5, 9)))
{
declare.time(60.0);
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const int channels = GET_PARAM(2);
const int kernel_size = GET_PARAM(3);
const float sigma_color = 7;
const float sigma_spatial = 5;
const int borderMode = cv::BORDER_REFLECT101;
const int type = CV_MAKE_TYPE(depth, channels);
cv::Mat src(size, type);
declare.in(src, WARMUP_RNG);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
TEST_CYCLE() cv::gpu::bilateralFilter(d_src, dst, kernel_size, sigma_color, sigma_spatial, borderMode);
GPU_SANITY_CHECK(dst);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::bilateralFilter(src, dst, kernel_size, sigma_color, sigma_spatial, borderMode);
CPU_SANITY_CHECK(dst);
}
}

@ -0,0 +1,86 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// BlendLinear
PERF_TEST_P(Sz_Depth_Cn, BlendLinear,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_32F),
GPU_CHANNELS_1_3_4))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const int channels = GET_PARAM(2);
const int type = CV_MAKE_TYPE(depth, channels);
cv::Mat img1(size, type);
cv::Mat img2(size, type);
declare.in(img1, img2, WARMUP_RNG);
const cv::Mat weights1(size, CV_32FC1, cv::Scalar::all(0.5));
const cv::Mat weights2(size, CV_32FC1, cv::Scalar::all(0.5));
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_img1(img1);
const cv::gpu::GpuMat d_img2(img2);
const cv::gpu::GpuMat d_weights1(weights1);
const cv::gpu::GpuMat d_weights2(weights2);
cv::gpu::GpuMat dst;
TEST_CYCLE() cv::gpu::blendLinear(d_img1, d_img2, d_weights1, d_weights2, dst);
GPU_SANITY_CHECK(dst);
}
else
{
FAIL_NO_CPU();
}
}

@ -0,0 +1,87 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// Canny
DEF_PARAM_TEST(Image_AppertureSz_L2gradient, string, int, bool);
PERF_TEST_P(Image_AppertureSz_L2gradient, Canny,
Combine(Values("perf/800x600.png", "perf/1280x1024.png", "perf/1680x1050.png"),
Values(3, 5),
Bool()))
{
const string fileName = GET_PARAM(0);
const int apperture_size = GET_PARAM(1);
const bool useL2gradient = GET_PARAM(2);
const cv::Mat image = readImage(fileName, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
const double low_thresh = 50.0;
const double high_thresh = 100.0;
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_image(image);
cv::gpu::GpuMat dst;
cv::gpu::CannyBuf d_buf;
TEST_CYCLE() cv::gpu::Canny(d_image, d_buf, dst, low_thresh, high_thresh, apperture_size, useL2gradient);
GPU_SANITY_CHECK(dst);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::Canny(image, dst, low_thresh, high_thresh, apperture_size, useL2gradient);
CPU_SANITY_CHECK(dst);
}
}

@ -0,0 +1,252 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// CvtColor
DEF_PARAM_TEST(Sz_Depth_Code, cv::Size, MatDepth, CvtColorInfo);
PERF_TEST_P(Sz_Depth_Code, CvtColor,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_32F),
Values(CvtColorInfo(4, 4, cv::COLOR_RGBA2BGRA),
CvtColorInfo(4, 1, cv::COLOR_BGRA2GRAY),
CvtColorInfo(1, 4, cv::COLOR_GRAY2BGRA),
CvtColorInfo(3, 3, cv::COLOR_BGR2XYZ),
CvtColorInfo(3, 3, cv::COLOR_XYZ2BGR),
CvtColorInfo(3, 3, cv::COLOR_BGR2YCrCb),
CvtColorInfo(3, 3, cv::COLOR_YCrCb2BGR),
CvtColorInfo(3, 3, cv::COLOR_BGR2YUV),
CvtColorInfo(3, 3, cv::COLOR_YUV2BGR),
CvtColorInfo(3, 3, cv::COLOR_BGR2HSV),
CvtColorInfo(3, 3, cv::COLOR_HSV2BGR),
CvtColorInfo(3, 3, cv::COLOR_BGR2HLS),
CvtColorInfo(3, 3, cv::COLOR_HLS2BGR),
CvtColorInfo(3, 3, cv::COLOR_BGR2Lab),
CvtColorInfo(3, 3, cv::COLOR_LBGR2Lab),
CvtColorInfo(3, 3, cv::COLOR_BGR2Luv),
CvtColorInfo(3, 3, cv::COLOR_LBGR2Luv),
CvtColorInfo(3, 3, cv::COLOR_Lab2BGR),
CvtColorInfo(3, 3, cv::COLOR_Lab2LBGR),
CvtColorInfo(3, 3, cv::COLOR_Luv2RGB),
CvtColorInfo(3, 3, cv::COLOR_Luv2LRGB))))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const CvtColorInfo info = GET_PARAM(2);
cv::Mat src(size, CV_MAKETYPE(depth, info.scn));
cv::randu(src, 0, depth == CV_8U ? 255.0 : 1.0);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
TEST_CYCLE() cv::gpu::cvtColor(d_src, dst, info.code, info.dcn);
GPU_SANITY_CHECK(dst, 1e-4);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::cvtColor(src, dst, info.code, info.dcn);
CPU_SANITY_CHECK(dst);
}
}
PERF_TEST_P(Sz_Depth_Code, CvtColorBayer,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U),
Values(CvtColorInfo(1, 3, cv::COLOR_BayerBG2BGR),
CvtColorInfo(1, 3, cv::COLOR_BayerGB2BGR),
CvtColorInfo(1, 3, cv::COLOR_BayerRG2BGR),
CvtColorInfo(1, 3, cv::COLOR_BayerGR2BGR),
CvtColorInfo(1, 1, cv::COLOR_BayerBG2GRAY),
CvtColorInfo(1, 1, cv::COLOR_BayerGB2GRAY),
CvtColorInfo(1, 1, cv::COLOR_BayerRG2GRAY),
CvtColorInfo(1, 1, cv::COLOR_BayerGR2GRAY))))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
const CvtColorInfo info = GET_PARAM(2);
cv::Mat src(size, CV_MAKETYPE(depth, info.scn));
declare.in(src, WARMUP_RNG);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
TEST_CYCLE() cv::gpu::cvtColor(d_src, dst, info.code, info.dcn);
GPU_SANITY_CHECK(dst);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::cvtColor(src, dst, info.code, info.dcn);
CPU_SANITY_CHECK(dst);
}
}
//////////////////////////////////////////////////////////////////////
// Demosaicing
CV_ENUM(DemosaicingCode,
cv::COLOR_BayerBG2BGR, cv::COLOR_BayerGB2BGR, cv::COLOR_BayerRG2BGR, cv::COLOR_BayerGR2BGR,
cv::COLOR_BayerBG2GRAY, cv::COLOR_BayerGB2GRAY, cv::COLOR_BayerRG2GRAY, cv::COLOR_BayerGR2GRAY,
cv::gpu::COLOR_BayerBG2BGR_MHT, cv::gpu::COLOR_BayerGB2BGR_MHT, cv::gpu::COLOR_BayerRG2BGR_MHT, cv::gpu::COLOR_BayerGR2BGR_MHT,
cv::gpu::COLOR_BayerBG2GRAY_MHT, cv::gpu::COLOR_BayerGB2GRAY_MHT, cv::gpu::COLOR_BayerRG2GRAY_MHT, cv::gpu::COLOR_BayerGR2GRAY_MHT)
DEF_PARAM_TEST(Sz_Code, cv::Size, DemosaicingCode);
PERF_TEST_P(Sz_Code, Demosaicing,
Combine(GPU_TYPICAL_MAT_SIZES,
DemosaicingCode::all()))
{
const cv::Size size = GET_PARAM(0);
const int code = GET_PARAM(1);
cv::Mat src(size, CV_8UC1);
declare.in(src, WARMUP_RNG);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
TEST_CYCLE() cv::gpu::demosaicing(d_src, dst, code);
GPU_SANITY_CHECK(dst);
}
else
{
if (code >= cv::COLOR_COLORCVT_MAX)
{
FAIL_NO_CPU();
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::cvtColor(src, dst, code);
CPU_SANITY_CHECK(dst);
}
}
}
//////////////////////////////////////////////////////////////////////
// SwapChannels
PERF_TEST_P(Sz, SwapChannels,
GPU_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src(size, CV_8UC4);
declare.in(src, WARMUP_RNG);
const int dstOrder[] = {2, 1, 0, 3};
if (PERF_RUN_GPU())
{
cv::gpu::GpuMat dst(src);
TEST_CYCLE() cv::gpu::swapChannels(dst, dstOrder);
GPU_SANITY_CHECK(dst);
}
else
{
FAIL_NO_CPU();
}
}
//////////////////////////////////////////////////////////////////////
// AlphaComp
CV_ENUM(AlphaOp, cv::gpu::ALPHA_OVER, cv::gpu::ALPHA_IN, cv::gpu::ALPHA_OUT, cv::gpu::ALPHA_ATOP, cv::gpu::ALPHA_XOR, cv::gpu::ALPHA_PLUS, cv::gpu::ALPHA_OVER_PREMUL, cv::gpu::ALPHA_IN_PREMUL, cv::gpu::ALPHA_OUT_PREMUL, cv::gpu::ALPHA_ATOP_PREMUL, cv::gpu::ALPHA_XOR_PREMUL, cv::gpu::ALPHA_PLUS_PREMUL, cv::gpu::ALPHA_PREMUL)
DEF_PARAM_TEST(Sz_Type_Op, cv::Size, MatType, AlphaOp);
PERF_TEST_P(Sz_Type_Op, AlphaComp,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(CV_8UC4, CV_16UC4, CV_32SC4, CV_32FC4),
AlphaOp::all()))
{
const cv::Size size = GET_PARAM(0);
const int type = GET_PARAM(1);
const int alpha_op = GET_PARAM(2);
cv::Mat img1(size, type);
cv::Mat img2(size, type);
declare.in(img1, img2, WARMUP_RNG);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_img1(img1);
const cv::gpu::GpuMat d_img2(img2);
cv::gpu::GpuMat dst;
TEST_CYCLE() cv::gpu::alphaComp(d_img1, d_img2, dst, alpha_op);
GPU_SANITY_CHECK(dst, 1e-3, ERROR_RELATIVE);
}
else
{
FAIL_NO_CPU();
}
}

@ -0,0 +1,137 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// CornerHarris
DEF_PARAM_TEST(Image_Type_Border_BlockSz_ApertureSz, string, MatType, BorderMode, int, int);
PERF_TEST_P(Image_Type_Border_BlockSz_ApertureSz, CornerHarris,
Combine(Values<string>("gpu/stereobm/aloe-L.png"),
Values(CV_8UC1, CV_32FC1),
Values(BorderMode(cv::BORDER_REFLECT101), BorderMode(cv::BORDER_REPLICATE), BorderMode(cv::BORDER_REFLECT)),
Values(3, 5, 7),
Values(0, 3, 5, 7)))
{
const string fileName = GET_PARAM(0);
const int type = GET_PARAM(1);
const int borderMode = GET_PARAM(2);
const int blockSize = GET_PARAM(3);
const int apertureSize = GET_PARAM(4);
cv::Mat img = readImage(fileName, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
img.convertTo(img, type, type == CV_32F ? 1.0 / 255.0 : 1.0);
const double k = 0.5;
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_img(img);
cv::gpu::GpuMat dst;
cv::gpu::GpuMat d_Dx;
cv::gpu::GpuMat d_Dy;
cv::gpu::GpuMat d_buf;
TEST_CYCLE() cv::gpu::cornerHarris(d_img, dst, d_Dx, d_Dy, d_buf, blockSize, apertureSize, k, borderMode);
GPU_SANITY_CHECK(dst, 1e-4);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::cornerHarris(img, dst, blockSize, apertureSize, k, borderMode);
CPU_SANITY_CHECK(dst);
}
}
//////////////////////////////////////////////////////////////////////
// CornerMinEigenVal
PERF_TEST_P(Image_Type_Border_BlockSz_ApertureSz, CornerMinEigenVal,
Combine(Values<string>("gpu/stereobm/aloe-L.png"),
Values(CV_8UC1, CV_32FC1),
Values(BorderMode(cv::BORDER_REFLECT101), BorderMode(cv::BORDER_REPLICATE), BorderMode(cv::BORDER_REFLECT)),
Values(3, 5, 7),
Values(0, 3, 5, 7)))
{
const string fileName = GET_PARAM(0);
const int type = GET_PARAM(1);
const int borderMode = GET_PARAM(2);
const int blockSize = GET_PARAM(3);
const int apertureSize = GET_PARAM(4);
cv::Mat img = readImage(fileName, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
img.convertTo(img, type, type == CV_32F ? 1.0 / 255.0 : 1.0);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_img(img);
cv::gpu::GpuMat dst;
cv::gpu::GpuMat d_Dx;
cv::gpu::GpuMat d_Dy;
cv::gpu::GpuMat d_buf;
TEST_CYCLE() cv::gpu::cornerMinEigenVal(d_img, dst, d_Dx, d_Dy, d_buf, blockSize, apertureSize, borderMode);
GPU_SANITY_CHECK(dst, 1e-4);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::cornerMinEigenVal(img, dst, blockSize, apertureSize, borderMode);
CPU_SANITY_CHECK(dst);
}
}

@ -0,0 +1,86 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////
// GoodFeaturesToTrack
DEF_PARAM_TEST(Image_MinDistance, string, double);
PERF_TEST_P(Image_MinDistance, GoodFeaturesToTrack,
Combine(Values<string>("gpu/perf/aloe.png"),
Values(0.0, 3.0)))
{
const string fileName = GET_PARAM(0);
const double minDistance = GET_PARAM(1);
const cv::Mat image = readImage(fileName, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
const int maxCorners = 8000;
const double qualityLevel = 0.01;
if (PERF_RUN_GPU())
{
cv::gpu::GoodFeaturesToTrackDetector_GPU d_detector(maxCorners, qualityLevel, minDistance);
const cv::gpu::GpuMat d_image(image);
cv::gpu::GpuMat pts;
TEST_CYCLE() d_detector(d_image, pts);
GPU_SANITY_CHECK(pts);
}
else
{
cv::Mat pts;
TEST_CYCLE() cv::goodFeaturesToTrack(image, pts, maxCorners, qualityLevel, minDistance);
CPU_SANITY_CHECK(pts);
}
}

@ -0,0 +1,221 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// HistEvenC1
PERF_TEST_P(Sz_Depth, HistEvenC1,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_16S)))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
cv::Mat src(size, depth);
declare.in(src, WARMUP_RNG);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
cv::gpu::GpuMat d_buf;
TEST_CYCLE() cv::gpu::histEven(d_src, dst, d_buf, 30, 0, 180);
GPU_SANITY_CHECK(dst);
}
else
{
const int hbins = 30;
const float hranges[] = {0.0f, 180.0f};
const int histSize[] = {hbins};
const float* ranges[] = {hranges};
const int channels[] = {0};
cv::Mat dst;
TEST_CYCLE() cv::calcHist(&src, 1, channels, cv::Mat(), dst, 1, histSize, ranges);
CPU_SANITY_CHECK(dst);
}
}
//////////////////////////////////////////////////////////////////////
// HistEvenC4
PERF_TEST_P(Sz_Depth, HistEvenC4,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(CV_8U, CV_16U, CV_16S)))
{
const cv::Size size = GET_PARAM(0);
const int depth = GET_PARAM(1);
cv::Mat src(size, CV_MAKE_TYPE(depth, 4));
declare.in(src, WARMUP_RNG);
int histSize[] = {30, 30, 30, 30};
int lowerLevel[] = {0, 0, 0, 0};
int upperLevel[] = {180, 180, 180, 180};
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_hist[4];
cv::gpu::GpuMat d_buf;
TEST_CYCLE() cv::gpu::histEven(d_src, d_hist, d_buf, histSize, lowerLevel, upperLevel);
cv::Mat cpu_hist0, cpu_hist1, cpu_hist2, cpu_hist3;
d_hist[0].download(cpu_hist0);
d_hist[1].download(cpu_hist1);
d_hist[2].download(cpu_hist2);
d_hist[3].download(cpu_hist3);
SANITY_CHECK(cpu_hist0);
SANITY_CHECK(cpu_hist1);
SANITY_CHECK(cpu_hist2);
SANITY_CHECK(cpu_hist3);
}
else
{
FAIL_NO_CPU();
}
}
//////////////////////////////////////////////////////////////////////
// CalcHist
PERF_TEST_P(Sz, CalcHist,
GPU_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src(size, CV_8UC1);
declare.in(src, WARMUP_RNG);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
TEST_CYCLE() cv::gpu::calcHist(d_src, dst);
GPU_SANITY_CHECK(dst);
}
else
{
FAIL_NO_CPU();
}
}
//////////////////////////////////////////////////////////////////////
// EqualizeHist
PERF_TEST_P(Sz, EqualizeHist,
GPU_TYPICAL_MAT_SIZES)
{
const cv::Size size = GetParam();
cv::Mat src(size, CV_8UC1);
declare.in(src, WARMUP_RNG);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
cv::gpu::GpuMat d_hist;
cv::gpu::GpuMat d_buf;
TEST_CYCLE() cv::gpu::equalizeHist(d_src, dst, d_hist, d_buf);
GPU_SANITY_CHECK(dst);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::equalizeHist(src, dst);
CPU_SANITY_CHECK(dst);
}
}
//////////////////////////////////////////////////////////////////////
// CLAHE
DEF_PARAM_TEST(Sz_ClipLimit, cv::Size, double);
PERF_TEST_P(Sz_ClipLimit, CLAHE,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(0.0, 40.0)))
{
const cv::Size size = GET_PARAM(0);
const double clipLimit = GET_PARAM(1);
cv::Mat src(size, CV_8UC1);
declare.in(src, WARMUP_RNG);
if (PERF_RUN_GPU())
{
cv::Ptr<cv::gpu::CLAHE> clahe = cv::gpu::createCLAHE(clipLimit);
cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat dst;
TEST_CYCLE() clahe->apply(d_src, dst);
GPU_SANITY_CHECK(dst);
}
else
{
cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(clipLimit);
cv::Mat dst;
TEST_CYCLE() clahe->apply(src, dst);
CPU_SANITY_CHECK(dst);
}
}

@ -0,0 +1,317 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// HoughLines
namespace
{
struct Vec4iComparator
{
bool operator()(const cv::Vec4i& a, const cv::Vec4i b) const
{
if (a[0] != b[0]) return a[0] < b[0];
else if(a[1] != b[1]) return a[1] < b[1];
else if(a[2] != b[2]) return a[2] < b[2];
else return a[3] < b[3];
}
};
struct Vec3fComparator
{
bool operator()(const cv::Vec3f& a, const cv::Vec3f b) const
{
if(a[0] != b[0]) return a[0] < b[0];
else if(a[1] != b[1]) return a[1] < b[1];
else return a[2] < b[2];
}
};
struct Vec2fComparator
{
bool operator()(const cv::Vec2f& a, const cv::Vec2f b) const
{
if(a[0] != b[0]) return a[0] < b[0];
else return a[1] < b[1];
}
};
}
PERF_TEST_P(Sz, HoughLines,
GPU_TYPICAL_MAT_SIZES)
{
declare.time(30.0);
const cv::Size size = GetParam();
const float rho = 1.0f;
const float theta = static_cast<float>(CV_PI / 180.0);
const int threshold = 300;
cv::Mat src(size, CV_8UC1, cv::Scalar::all(0));
cv::line(src, cv::Point(0, 100), cv::Point(src.cols, 100), cv::Scalar::all(255), 1);
cv::line(src, cv::Point(0, 200), cv::Point(src.cols, 200), cv::Scalar::all(255), 1);
cv::line(src, cv::Point(0, 400), cv::Point(src.cols, 400), cv::Scalar::all(255), 1);
cv::line(src, cv::Point(100, 0), cv::Point(100, src.rows), cv::Scalar::all(255), 1);
cv::line(src, cv::Point(200, 0), cv::Point(200, src.rows), cv::Scalar::all(255), 1);
cv::line(src, cv::Point(400, 0), cv::Point(400, src.rows), cv::Scalar::all(255), 1);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_lines;
cv::gpu::HoughLinesBuf d_buf;
TEST_CYCLE() cv::gpu::HoughLines(d_src, d_lines, d_buf, rho, theta, threshold);
cv::Mat gpu_lines(d_lines.row(0));
cv::Vec2f* begin = gpu_lines.ptr<cv::Vec2f>(0);
cv::Vec2f* end = begin + gpu_lines.cols;
std::sort(begin, end, Vec2fComparator());
SANITY_CHECK(gpu_lines);
}
else
{
std::vector<cv::Vec2f> cpu_lines;
TEST_CYCLE() cv::HoughLines(src, cpu_lines, rho, theta, threshold);
SANITY_CHECK(cpu_lines);
}
}
//////////////////////////////////////////////////////////////////////
// HoughLinesP
DEF_PARAM_TEST_1(Image, std::string);
PERF_TEST_P(Image, HoughLinesP,
testing::Values("cv/shared/pic5.png", "stitching/a1.png"))
{
declare.time(30.0);
const std::string fileName = getDataPath(GetParam());
const float rho = 1.0f;
const float theta = static_cast<float>(CV_PI / 180.0);
const int threshold = 100;
const int minLineLenght = 50;
const int maxLineGap = 5;
const cv::Mat image = cv::imread(fileName, cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
cv::Mat mask;
cv::Canny(image, mask, 50, 100);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_mask(mask);
cv::gpu::GpuMat d_lines;
cv::gpu::HoughLinesBuf d_buf;
TEST_CYCLE() cv::gpu::HoughLinesP(d_mask, d_lines, d_buf, rho, theta, minLineLenght, maxLineGap);
cv::Mat gpu_lines(d_lines);
cv::Vec4i* begin = gpu_lines.ptr<cv::Vec4i>();
cv::Vec4i* end = begin + gpu_lines.cols;
std::sort(begin, end, Vec4iComparator());
SANITY_CHECK(gpu_lines);
}
else
{
std::vector<cv::Vec4i> cpu_lines;
TEST_CYCLE() cv::HoughLinesP(mask, cpu_lines, rho, theta, threshold, minLineLenght, maxLineGap);
SANITY_CHECK(cpu_lines);
}
}
//////////////////////////////////////////////////////////////////////
// HoughCircles
DEF_PARAM_TEST(Sz_Dp_MinDist, cv::Size, float, float);
PERF_TEST_P(Sz_Dp_MinDist, HoughCircles,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(1.0f, 2.0f, 4.0f),
Values(1.0f)))
{
declare.time(30.0);
const cv::Size size = GET_PARAM(0);
const float dp = GET_PARAM(1);
const float minDist = GET_PARAM(2);
const int minRadius = 10;
const int maxRadius = 30;
const int cannyThreshold = 100;
const int votesThreshold = 15;
cv::Mat src(size, CV_8UC1, cv::Scalar::all(0));
cv::circle(src, cv::Point(100, 100), 20, cv::Scalar::all(255), -1);
cv::circle(src, cv::Point(200, 200), 25, cv::Scalar::all(255), -1);
cv::circle(src, cv::Point(200, 100), 25, cv::Scalar::all(255), -1);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_circles;
cv::gpu::HoughCirclesBuf d_buf;
TEST_CYCLE() cv::gpu::HoughCircles(d_src, d_circles, d_buf, cv::HOUGH_GRADIENT, dp, minDist, cannyThreshold, votesThreshold, minRadius, maxRadius);
cv::Mat gpu_circles(d_circles);
cv::Vec3f* begin = gpu_circles.ptr<cv::Vec3f>(0);
cv::Vec3f* end = begin + gpu_circles.cols;
std::sort(begin, end, Vec3fComparator());
SANITY_CHECK(gpu_circles);
}
else
{
std::vector<cv::Vec3f> cpu_circles;
TEST_CYCLE() cv::HoughCircles(src, cpu_circles, cv::HOUGH_GRADIENT, dp, minDist, cannyThreshold, votesThreshold, minRadius, maxRadius);
SANITY_CHECK(cpu_circles);
}
}
//////////////////////////////////////////////////////////////////////
// GeneralizedHough
enum { GHT_POSITION = cv::GeneralizedHough::GHT_POSITION,
GHT_SCALE = cv::GeneralizedHough::GHT_SCALE,
GHT_ROTATION = cv::GeneralizedHough::GHT_ROTATION
};
CV_FLAGS(GHMethod, GHT_POSITION, GHT_SCALE, GHT_ROTATION);
DEF_PARAM_TEST(Method_Sz, GHMethod, cv::Size);
PERF_TEST_P(Method_Sz, GeneralizedHough,
Combine(Values(GHMethod(GHT_POSITION), GHMethod(GHT_POSITION | GHT_SCALE), GHMethod(GHT_POSITION | GHT_ROTATION), GHMethod(GHT_POSITION | GHT_SCALE | GHT_ROTATION)),
GPU_TYPICAL_MAT_SIZES))
{
declare.time(10);
const int method = GET_PARAM(0);
const cv::Size imageSize = GET_PARAM(1);
const cv::Mat templ = readImage("cv/shared/templ.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(templ.empty());
cv::Mat image(imageSize, CV_8UC1, cv::Scalar::all(0));
templ.copyTo(image(cv::Rect(50, 50, templ.cols, templ.rows)));
cv::RNG rng(123456789);
const int objCount = rng.uniform(5, 15);
for (int i = 0; i < objCount; ++i)
{
double scale = rng.uniform(0.7, 1.3);
bool rotate = 1 == rng.uniform(0, 2);
cv::Mat obj;
cv::resize(templ, obj, cv::Size(), scale, scale);
if (rotate)
obj = obj.t();
cv::Point pos;
pos.x = rng.uniform(0, image.cols - obj.cols);
pos.y = rng.uniform(0, image.rows - obj.rows);
cv::Mat roi = image(cv::Rect(pos, obj.size()));
cv::add(roi, obj, roi);
}
cv::Mat edges;
cv::Canny(image, edges, 50, 100);
cv::Mat dx, dy;
cv::Sobel(image, dx, CV_32F, 1, 0);
cv::Sobel(image, dy, CV_32F, 0, 1);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_edges(edges);
const cv::gpu::GpuMat d_dx(dx);
const cv::gpu::GpuMat d_dy(dy);
cv::gpu::GpuMat posAndVotes;
cv::Ptr<cv::gpu::GeneralizedHough_GPU> d_hough = cv::gpu::GeneralizedHough_GPU::create(method);
if (method & GHT_ROTATION)
{
d_hough->set("maxAngle", 90.0);
d_hough->set("angleStep", 2.0);
}
d_hough->setTemplate(cv::gpu::GpuMat(templ));
TEST_CYCLE() d_hough->detect(d_edges, d_dx, d_dy, posAndVotes);
const cv::gpu::GpuMat positions(1, posAndVotes.cols, CV_32FC4, posAndVotes.data);
GPU_SANITY_CHECK(positions);
}
else
{
cv::Mat positions;
cv::Ptr<cv::GeneralizedHough> hough = cv::GeneralizedHough::create(method);
if (method & GHT_ROTATION)
{
hough->set("maxAngle", 90.0);
hough->set("angleStep", 2.0);
}
hough->setTemplate(templ);
TEST_CYCLE() hough->detect(edges, dx, dy, positions);
CPU_SANITY_CHECK(positions);
}
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,131 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate8U
CV_ENUM(TemplateMethod, TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED)
DEF_PARAM_TEST(Sz_TemplateSz_Cn_Method, cv::Size, cv::Size, MatCn, TemplateMethod);
PERF_TEST_P(Sz_TemplateSz_Cn_Method, MatchTemplate8U,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(cv::Size(5, 5), cv::Size(16, 16), cv::Size(30, 30)),
GPU_CHANNELS_1_3_4,
TemplateMethod::all()))
{
declare.time(300.0);
const cv::Size size = GET_PARAM(0);
const cv::Size templ_size = GET_PARAM(1);
const int cn = GET_PARAM(2);
const int method = GET_PARAM(3);
cv::Mat image(size, CV_MAKE_TYPE(CV_8U, cn));
cv::Mat templ(templ_size, CV_MAKE_TYPE(CV_8U, cn));
declare.in(image, templ, WARMUP_RNG);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_image(image);
const cv::gpu::GpuMat d_templ(templ);
cv::gpu::GpuMat dst;
TEST_CYCLE() cv::gpu::matchTemplate(d_image, d_templ, dst, method);
GPU_SANITY_CHECK(dst, 1e-5, ERROR_RELATIVE);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::matchTemplate(image, templ, dst, method);
CPU_SANITY_CHECK(dst);
}
};
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate32F
PERF_TEST_P(Sz_TemplateSz_Cn_Method, MatchTemplate32F,
Combine(GPU_TYPICAL_MAT_SIZES,
Values(cv::Size(5, 5), cv::Size(16, 16), cv::Size(30, 30)),
GPU_CHANNELS_1_3_4,
Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_CCORR))))
{
declare.time(300.0);
const cv::Size size = GET_PARAM(0);
const cv::Size templ_size = GET_PARAM(1);
const int cn = GET_PARAM(2);
int method = GET_PARAM(3);
cv::Mat image(size, CV_MAKE_TYPE(CV_32F, cn));
cv::Mat templ(templ_size, CV_MAKE_TYPE(CV_32F, cn));
declare.in(image, templ, WARMUP_RNG);
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_image(image);
const cv::gpu::GpuMat d_templ(templ);
cv::gpu::GpuMat dst;
TEST_CYCLE() cv::gpu::matchTemplate(d_image, d_templ, dst, method);
GPU_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::matchTemplate(image, templ, dst, method);
CPU_SANITY_CHECK(dst);
}
}

@ -0,0 +1,152 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace std;
using namespace testing;
using namespace perf;
//////////////////////////////////////////////////////////////////////
// MeanShiftFiltering
DEF_PARAM_TEST_1(Image, string);
PERF_TEST_P(Image, MeanShiftFiltering,
Values<string>("gpu/meanshift/cones.png"))
{
declare.time(300.0);
const cv::Mat img = readImage(GetParam());
ASSERT_FALSE(img.empty());
cv::Mat rgba;
cv::cvtColor(img, rgba, cv::COLOR_BGR2BGRA);
const int sp = 50;
const int sr = 50;
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(rgba);
cv::gpu::GpuMat dst;
TEST_CYCLE() cv::gpu::meanShiftFiltering(d_src, dst, sp, sr);
GPU_SANITY_CHECK(dst);
}
else
{
cv::Mat dst;
TEST_CYCLE() cv::pyrMeanShiftFiltering(img, dst, sp, sr);
CPU_SANITY_CHECK(dst);
}
}
//////////////////////////////////////////////////////////////////////
// MeanShiftProc
PERF_TEST_P(Image, MeanShiftProc,
Values<string>("gpu/meanshift/cones.png"))
{
declare.time(300.0);
const cv::Mat img = readImage(GetParam());
ASSERT_FALSE(img.empty());
cv::Mat rgba;
cv::cvtColor(img, rgba, cv::COLOR_BGR2BGRA);
const int sp = 50;
const int sr = 50;
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(rgba);
cv::gpu::GpuMat dstr;
cv::gpu::GpuMat dstsp;
TEST_CYCLE() cv::gpu::meanShiftProc(d_src, dstr, dstsp, sp, sr);
GPU_SANITY_CHECK(dstr);
GPU_SANITY_CHECK(dstsp);
}
else
{
FAIL_NO_CPU();
}
}
//////////////////////////////////////////////////////////////////////
// MeanShiftSegmentation
PERF_TEST_P(Image, MeanShiftSegmentation,
Values<string>("gpu/meanshift/cones.png"))
{
declare.time(300.0);
const cv::Mat img = readImage(GetParam());
ASSERT_FALSE(img.empty());
cv::Mat rgba;
cv::cvtColor(img, rgba, cv::COLOR_BGR2BGRA);
const int sp = 10;
const int sr = 10;
const int minsize = 20;
if (PERF_RUN_GPU())
{
const cv::gpu::GpuMat d_src(rgba);
cv::Mat dst;
TEST_CYCLE() cv::gpu::meanShiftSegmentation(d_src, dst, sp, sr, minsize);
GPU_SANITY_CHECK(dst);
}
else
{
FAIL_NO_CPU();
}
}

@ -51,6 +51,9 @@ void cv::gpu::blendLinear(const GpuMat&, const GpuMat&, const GpuMat&, const Gpu
#else
////////////////////////////////////////////////////////////////////////
// blendLinear
namespace cv { namespace gpu { namespace cudev
{
namespace blend

@ -0,0 +1,186 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
void cv::gpu::Canny(const GpuMat&, GpuMat&, double, double, int, bool) { throw_no_cuda(); }
void cv::gpu::Canny(const GpuMat&, CannyBuf&, GpuMat&, double, double, int, bool) { throw_no_cuda(); }
void cv::gpu::Canny(const GpuMat&, const GpuMat&, GpuMat&, double, double, bool) { throw_no_cuda(); }
void cv::gpu::Canny(const GpuMat&, const GpuMat&, CannyBuf&, GpuMat&, double, double, bool) { throw_no_cuda(); }
void cv::gpu::CannyBuf::create(const Size&, int) { throw_no_cuda(); }
void cv::gpu::CannyBuf::release() { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */
void cv::gpu::CannyBuf::create(const Size& image_size, int apperture_size)
{
if (apperture_size > 0)
{
ensureSizeIsEnough(image_size, CV_32SC1, dx);
ensureSizeIsEnough(image_size, CV_32SC1, dy);
if (apperture_size != 3)
{
filterDX = createDerivFilter_GPU(CV_8UC1, CV_32S, 1, 0, apperture_size, BORDER_REPLICATE);
filterDY = createDerivFilter_GPU(CV_8UC1, CV_32S, 0, 1, apperture_size, BORDER_REPLICATE);
}
}
ensureSizeIsEnough(image_size, CV_32FC1, mag);
ensureSizeIsEnough(image_size, CV_32SC1, map);
ensureSizeIsEnough(1, image_size.area(), CV_16UC2, st1);
ensureSizeIsEnough(1, image_size.area(), CV_16UC2, st2);
}
void cv::gpu::CannyBuf::release()
{
dx.release();
dy.release();
mag.release();
map.release();
st1.release();
st2.release();
}
namespace canny
{
void calcMagnitude(PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad);
void calcMagnitude(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad);
void calcMap(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, PtrStepSzi map, float low_thresh, float high_thresh);
void edgesHysteresisLocal(PtrStepSzi map, ushort2* st1);
void edgesHysteresisGlobal(PtrStepSzi map, ushort2* st1, ushort2* st2);
void getEdges(PtrStepSzi map, PtrStepSzb dst);
}
namespace
{
void CannyCaller(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& dst, float low_thresh, float high_thresh)
{
using namespace canny;
buf.map.setTo(Scalar::all(0));
calcMap(dx, dy, buf.mag, buf.map, low_thresh, high_thresh);
edgesHysteresisLocal(buf.map, buf.st1.ptr<ushort2>());
edgesHysteresisGlobal(buf.map, buf.st1.ptr<ushort2>(), buf.st2.ptr<ushort2>());
getEdges(buf.map, dst);
}
}
void cv::gpu::Canny(const GpuMat& src, GpuMat& dst, double low_thresh, double high_thresh, int apperture_size, bool L2gradient)
{
CannyBuf buf;
Canny(src, buf, dst, low_thresh, high_thresh, apperture_size, L2gradient);
}
void cv::gpu::Canny(const GpuMat& src, CannyBuf& buf, GpuMat& dst, double low_thresh, double high_thresh, int apperture_size, bool L2gradient)
{
using namespace canny;
CV_Assert(src.type() == CV_8UC1);
if (!deviceSupports(SHARED_ATOMICS))
CV_Error(cv::Error::StsNotImplemented, "The device doesn't support shared atomics");
if( low_thresh > high_thresh )
std::swap( low_thresh, high_thresh);
dst.create(src.size(), CV_8U);
buf.create(src.size(), apperture_size);
if (apperture_size == 3)
{
Size wholeSize;
Point ofs;
src.locateROI(wholeSize, ofs);
GpuMat srcWhole(wholeSize, src.type(), src.datastart, src.step);
calcMagnitude(srcWhole, ofs.x, ofs.y, buf.dx, buf.dy, buf.mag, L2gradient);
}
else
{
buf.filterDX->apply(src, buf.dx, Rect(0, 0, src.cols, src.rows));
buf.filterDY->apply(src, buf.dy, Rect(0, 0, src.cols, src.rows));
calcMagnitude(buf.dx, buf.dy, buf.mag, L2gradient);
}
CannyCaller(buf.dx, buf.dy, buf, dst, static_cast<float>(low_thresh), static_cast<float>(high_thresh));
}
void cv::gpu::Canny(const GpuMat& dx, const GpuMat& dy, GpuMat& dst, double low_thresh, double high_thresh, bool L2gradient)
{
CannyBuf buf;
Canny(dx, dy, buf, dst, low_thresh, high_thresh, L2gradient);
}
void cv::gpu::Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& dst, double low_thresh, double high_thresh, bool L2gradient)
{
using namespace canny;
CV_Assert(TargetArchs::builtWith(SHARED_ATOMICS) && DeviceInfo().supports(SHARED_ATOMICS));
CV_Assert(dx.type() == CV_32SC1 && dy.type() == CV_32SC1 && dx.size() == dy.size());
if( low_thresh > high_thresh )
std::swap( low_thresh, high_thresh);
dst.create(dx.size(), CV_8U);
buf.create(dx.size(), -1);
calcMagnitude(dx, dy, buf.mag, L2gradient);
CannyCaller(dx, dy, buf, dst, static_cast<float>(low_thresh), static_cast<float>(high_thresh));
}
#endif /* !defined (HAVE_CUDA) */

@ -48,10 +48,16 @@ using namespace cv::gpu;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
void cv::gpu::cvtColor(const GpuMat&, GpuMat&, int, int, Stream&) { throw_no_cuda(); }
void cv::gpu::demosaicing(const GpuMat&, GpuMat&, int, int, Stream&) { throw_no_cuda(); }
void cv::gpu::swapChannels(GpuMat&, const int[], Stream&) { throw_no_cuda(); }
void cv::gpu::gammaCorrection(const GpuMat&, GpuMat&, bool, Stream&) { throw_no_cuda(); }
void cv::gpu::alphaComp(const GpuMat&, const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */
#include "cvt_color_internal.h"
@ -1581,7 +1587,7 @@ namespace
(void)src;
(void)dst;
(void)st;
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );
CV_Error( cv::Error::StsBadFlag, "Unknown/unsupported color conversion code" );
#else
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4);
@ -1676,6 +1682,9 @@ namespace
}
}
////////////////////////////////////////////////////////////////////////
// cvtColor
void cv::gpu::cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn, Stream& stream)
{
typedef void (*func_t)(const GpuMat& src, GpuMat& dst, int dcn, Stream& stream);
@ -1859,6 +1868,9 @@ void cv::gpu::cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn, Stream
func(src, dst, dcn, stream);
}
////////////////////////////////////////////////////////////////////////
// demosaicing
void cv::gpu::demosaicing(const GpuMat& src, GpuMat& dst, int code, int dcn, Stream& stream)
{
const int depth = src.depth();
@ -1927,6 +1939,9 @@ void cv::gpu::demosaicing(const GpuMat& src, GpuMat& dst, int code, int dcn, Str
}
}
////////////////////////////////////////////////////////////////////////
// swapChannels
void cv::gpu::swapChannels(GpuMat& image, const int dstOrder[4], Stream& s)
{
CV_Assert(image.type() == CV_8UC4);
@ -1945,6 +1960,9 @@ void cv::gpu::swapChannels(GpuMat& image, const int dstOrder[4], Stream& s)
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// gammaCorrection
void cv::gpu::gammaCorrection(const GpuMat& src, GpuMat& dst, bool forward, Stream& stream)
{
#if (CUDA_VERSION < 5000)
@ -1986,4 +2004,77 @@ void cv::gpu::gammaCorrection(const GpuMat& src, GpuMat& dst, bool forward, Stre
#endif
}
////////////////////////////////////////////////////////////////////////
// alphaComp
namespace
{
template <int DEPTH> struct NppAlphaCompFunc
{
typedef typename NPPTypeTraits<DEPTH>::npp_type npp_t;
typedef NppStatus (*func_t)(const npp_t* pSrc1, int nSrc1Step, const npp_t* pSrc2, int nSrc2Step, npp_t* pDst, int nDstStep, NppiSize oSizeROI, NppiAlphaOp eAlphaOp);
};
template <int DEPTH, typename NppAlphaCompFunc<DEPTH>::func_t func> struct NppAlphaComp
{
typedef typename NPPTypeTraits<DEPTH>::npp_type npp_t;
static void call(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, NppiAlphaOp eAlphaOp, cudaStream_t stream)
{
NppStreamHandler h(stream);
NppiSize oSizeROI;
oSizeROI.width = img1.cols;
oSizeROI.height = img2.rows;
nppSafeCall( func(img1.ptr<npp_t>(), static_cast<int>(img1.step), img2.ptr<npp_t>(), static_cast<int>(img2.step),
dst.ptr<npp_t>(), static_cast<int>(dst.step), oSizeROI, eAlphaOp) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
}
void cv::gpu::alphaComp(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, int alpha_op, Stream& stream)
{
static const NppiAlphaOp npp_alpha_ops[] = {
NPPI_OP_ALPHA_OVER,
NPPI_OP_ALPHA_IN,
NPPI_OP_ALPHA_OUT,
NPPI_OP_ALPHA_ATOP,
NPPI_OP_ALPHA_XOR,
NPPI_OP_ALPHA_PLUS,
NPPI_OP_ALPHA_OVER_PREMUL,
NPPI_OP_ALPHA_IN_PREMUL,
NPPI_OP_ALPHA_OUT_PREMUL,
NPPI_OP_ALPHA_ATOP_PREMUL,
NPPI_OP_ALPHA_XOR_PREMUL,
NPPI_OP_ALPHA_PLUS_PREMUL,
NPPI_OP_ALPHA_PREMUL
};
typedef void (*func_t)(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, NppiAlphaOp eAlphaOp, cudaStream_t stream);
static const func_t funcs[] =
{
NppAlphaComp<CV_8U, nppiAlphaComp_8u_AC4R>::call,
0,
NppAlphaComp<CV_16U, nppiAlphaComp_16u_AC4R>::call,
0,
NppAlphaComp<CV_32S, nppiAlphaComp_32s_AC4R>::call,
NppAlphaComp<CV_32F, nppiAlphaComp_32f_AC4R>::call
};
CV_Assert( img1.type() == CV_8UC4 || img1.type() == CV_16UC4 || img1.type() == CV_32SC4 || img1.type() == CV_32FC4 );
CV_Assert( img1.size() == img2.size() && img1.type() == img2.type() );
dst.create(img1.size(), img1.type());
const func_t func = funcs[img1.depth()];
func(img1, img2, dst, npp_alpha_ops[alpha_op], StreamAccessor::getStream(stream));
}
#endif /* !defined (HAVE_CUDA) */

@ -0,0 +1,149 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, int, int, double, int) { throw_no_cuda(); }
void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, double, int) { throw_no_cuda(); }
void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, double, int, Stream&) { throw_no_cuda(); }
void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, int, int, int) { throw_no_cuda(); }
void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, int) { throw_no_cuda(); }
void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, int, Stream&) { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
void cornerHarris_gpu(int block_size, float k, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream);
void cornerMinEigenVal_gpu(int block_size, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream);
}
}}}
namespace
{
void extractCovData(const GpuMat& src, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, int borderType, Stream& stream)
{
double scale = static_cast<double>(1 << ((ksize > 0 ? ksize : 3) - 1)) * blockSize;
if (ksize < 0)
scale *= 2.;
if (src.depth() == CV_8U)
scale *= 255.;
scale = 1./scale;
Dx.create(src.size(), CV_32F);
Dy.create(src.size(), CV_32F);
if (ksize > 0)
{
Sobel(src, Dx, CV_32F, 1, 0, buf, ksize, scale, borderType, -1, stream);
Sobel(src, Dy, CV_32F, 0, 1, buf, ksize, scale, borderType, -1, stream);
}
else
{
Scharr(src, Dx, CV_32F, 1, 0, buf, scale, borderType, -1, stream);
Scharr(src, Dy, CV_32F, 0, 1, buf, scale, borderType, -1, stream);
}
}
}
void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType)
{
GpuMat Dx, Dy;
cornerHarris(src, dst, Dx, Dy, blockSize, ksize, k, borderType);
}
void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType)
{
GpuMat buf;
cornerHarris(src, dst, Dx, Dy, buf, blockSize, ksize, k, borderType);
}
void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, double k, int borderType, Stream& stream)
{
using namespace cv::gpu::cudev::imgproc;
CV_Assert(borderType == cv::BORDER_REFLECT101 || borderType == cv::BORDER_REPLICATE || borderType == cv::BORDER_REFLECT);
extractCovData(src, Dx, Dy, buf, blockSize, ksize, borderType, stream);
dst.create(src.size(), CV_32F);
cornerHarris_gpu(blockSize, static_cast<float>(k), Dx, Dy, dst, borderType, StreamAccessor::getStream(stream));
}
void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType)
{
GpuMat Dx, Dy;
cornerMinEigenVal(src, dst, Dx, Dy, blockSize, ksize, borderType);
}
void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType)
{
GpuMat buf;
cornerMinEigenVal(src, dst, Dx, Dy, buf, blockSize, ksize, borderType);
}
void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, int borderType, Stream& stream)
{
using namespace ::cv::gpu::cudev::imgproc;
CV_Assert(borderType == cv::BORDER_REFLECT101 || borderType == cv::BORDER_REPLICATE || borderType == cv::BORDER_REFLECT);
extractCovData(src, Dx, Dy, buf, blockSize, ksize, borderType, stream);
dst.create(src.size(), CV_32F);
cornerMinEigenVal_gpu(blockSize, Dx, Dy, dst, borderType, StreamAccessor::getStream(stream));
}
#endif /* !defined (HAVE_CUDA) */

@ -52,137 +52,6 @@ namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
/////////////////////////////////// MeanShiftfiltering ///////////////////////////////////////////////
texture<uchar4, 2> tex_meanshift;
__device__ short2 do_mean_shift(int x0, int y0, unsigned char* out,
size_t out_step, int cols, int rows,
int sp, int sr, int maxIter, float eps)
{
int isr2 = sr*sr;
uchar4 c = tex2D(tex_meanshift, x0, y0 );
// iterate meanshift procedure
for( int iter = 0; iter < maxIter; iter++ )
{
int count = 0;
int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0;
float icount;
//mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp)
int minx = x0-sp;
int miny = y0-sp;
int maxx = x0+sp;
int maxy = y0+sp;
for( int y = miny; y <= maxy; y++)
{
int rowCount = 0;
for( int x = minx; x <= maxx; x++ )
{
uchar4 t = tex2D( tex_meanshift, x, y );
int norm2 = (t.x - c.x) * (t.x - c.x) + (t.y - c.y) * (t.y - c.y) + (t.z - c.z) * (t.z - c.z);
if( norm2 <= isr2 )
{
s0 += t.x; s1 += t.y; s2 += t.z;
sx += x; rowCount++;
}
}
count += rowCount;
sy += y*rowCount;
}
if( count == 0 )
break;
icount = 1.f/count;
int x1 = __float2int_rz(sx*icount);
int y1 = __float2int_rz(sy*icount);
s0 = __float2int_rz(s0*icount);
s1 = __float2int_rz(s1*icount);
s2 = __float2int_rz(s2*icount);
int norm2 = (s0 - c.x) * (s0 - c.x) + (s1 - c.y) * (s1 - c.y) + (s2 - c.z) * (s2 - c.z);
bool stopFlag = (x0 == x1 && y0 == y1) || (::abs(x1-x0) + ::abs(y1-y0) + norm2 <= eps);
x0 = x1; y0 = y1;
c.x = s0; c.y = s1; c.z = s2;
if( stopFlag )
break;
}
int base = (blockIdx.y * blockDim.y + threadIdx.y) * out_step + (blockIdx.x * blockDim.x + threadIdx.x) * 4 * sizeof(uchar);
*(uchar4*)(out + base) = c;
return make_short2((short)x0, (short)y0);
}
__global__ void meanshift_kernel(unsigned char* out, size_t out_step, int cols, int rows, int sp, int sr, int maxIter, float eps )
{
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
if( x0 < cols && y0 < rows )
do_mean_shift(x0, y0, out, out_step, cols, rows, sp, sr, maxIter, eps);
}
__global__ void meanshiftproc_kernel(unsigned char* outr, size_t outrstep,
unsigned char* outsp, size_t outspstep,
int cols, int rows,
int sp, int sr, int maxIter, float eps)
{
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
if( x0 < cols && y0 < rows )
{
int basesp = (blockIdx.y * blockDim.y + threadIdx.y) * outspstep + (blockIdx.x * blockDim.x + threadIdx.x) * 2 * sizeof(short);
*(short2*)(outsp + basesp) = do_mean_shift(x0, y0, outr, outrstep, cols, rows, sp, sr, maxIter, eps);
}
}
void meanShiftFiltering_gpu(const PtrStepSzb& src, PtrStepSzb dst, int sp, int sr, int maxIter, float eps, cudaStream_t stream)
{
dim3 grid(1, 1, 1);
dim3 threads(32, 8, 1);
grid.x = divUp(src.cols, threads.x);
grid.y = divUp(src.rows, threads.y);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
cudaSafeCall( cudaBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) );
meanshift_kernel<<< grid, threads, 0, stream >>>( dst.data, dst.step, dst.cols, dst.rows, sp, sr, maxIter, eps );
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
//cudaSafeCall( cudaUnbindTexture( tex_meanshift ) );
}
void meanShiftProc_gpu(const PtrStepSzb& src, PtrStepSzb dstr, PtrStepSzb dstsp, int sp, int sr, int maxIter, float eps, cudaStream_t stream)
{
dim3 grid(1, 1, 1);
dim3 threads(32, 8, 1);
grid.x = divUp(src.cols, threads.x);
grid.y = divUp(src.rows, threads.y);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
cudaSafeCall( cudaBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) );
meanshiftproc_kernel<<< grid, threads, 0, stream >>>( dstr.data, dstr.step, dstsp.data, dstsp.step, dstr.cols, dstr.rows, sp, sr, maxIter, eps );
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
//cudaSafeCall( cudaUnbindTexture( tex_meanshift ) );
}
/////////////////////////////////////////// Corner Harris /////////////////////////////////////////////////
texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDxTex(0, cudaFilterModePoint, cudaAddressModeClamp);
@ -399,8 +268,7 @@ namespace cv { namespace gpu { namespace cudev
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace cudev {
}
}}}
#endif /* CUDA_DISABLER */
#endif

@ -0,0 +1,182 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
texture<uchar4, 2> tex_meanshift;
__device__ short2 do_mean_shift(int x0, int y0, unsigned char* out,
size_t out_step, int cols, int rows,
int sp, int sr, int maxIter, float eps)
{
int isr2 = sr*sr;
uchar4 c = tex2D(tex_meanshift, x0, y0 );
// iterate meanshift procedure
for( int iter = 0; iter < maxIter; iter++ )
{
int count = 0;
int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0;
float icount;
//mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp)
int minx = x0-sp;
int miny = y0-sp;
int maxx = x0+sp;
int maxy = y0+sp;
for( int y = miny; y <= maxy; y++)
{
int rowCount = 0;
for( int x = minx; x <= maxx; x++ )
{
uchar4 t = tex2D( tex_meanshift, x, y );
int norm2 = (t.x - c.x) * (t.x - c.x) + (t.y - c.y) * (t.y - c.y) + (t.z - c.z) * (t.z - c.z);
if( norm2 <= isr2 )
{
s0 += t.x; s1 += t.y; s2 += t.z;
sx += x; rowCount++;
}
}
count += rowCount;
sy += y*rowCount;
}
if( count == 0 )
break;
icount = 1.f/count;
int x1 = __float2int_rz(sx*icount);
int y1 = __float2int_rz(sy*icount);
s0 = __float2int_rz(s0*icount);
s1 = __float2int_rz(s1*icount);
s2 = __float2int_rz(s2*icount);
int norm2 = (s0 - c.x) * (s0 - c.x) + (s1 - c.y) * (s1 - c.y) + (s2 - c.z) * (s2 - c.z);
bool stopFlag = (x0 == x1 && y0 == y1) || (::abs(x1-x0) + ::abs(y1-y0) + norm2 <= eps);
x0 = x1; y0 = y1;
c.x = s0; c.y = s1; c.z = s2;
if( stopFlag )
break;
}
int base = (blockIdx.y * blockDim.y + threadIdx.y) * out_step + (blockIdx.x * blockDim.x + threadIdx.x) * 4 * sizeof(uchar);
*(uchar4*)(out + base) = c;
return make_short2((short)x0, (short)y0);
}
__global__ void meanshift_kernel(unsigned char* out, size_t out_step, int cols, int rows, int sp, int sr, int maxIter, float eps )
{
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
if( x0 < cols && y0 < rows )
do_mean_shift(x0, y0, out, out_step, cols, rows, sp, sr, maxIter, eps);
}
void meanShiftFiltering_gpu(const PtrStepSzb& src, PtrStepSzb dst, int sp, int sr, int maxIter, float eps, cudaStream_t stream)
{
dim3 grid(1, 1, 1);
dim3 threads(32, 8, 1);
grid.x = divUp(src.cols, threads.x);
grid.y = divUp(src.rows, threads.y);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
cudaSafeCall( cudaBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) );
meanshift_kernel<<< grid, threads, 0, stream >>>( dst.data, dst.step, dst.cols, dst.rows, sp, sr, maxIter, eps );
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void meanshiftproc_kernel(unsigned char* outr, size_t outrstep,
unsigned char* outsp, size_t outspstep,
int cols, int rows,
int sp, int sr, int maxIter, float eps)
{
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
if( x0 < cols && y0 < rows )
{
int basesp = (blockIdx.y * blockDim.y + threadIdx.y) * outspstep + (blockIdx.x * blockDim.x + threadIdx.x) * 2 * sizeof(short);
*(short2*)(outsp + basesp) = do_mean_shift(x0, y0, outr, outrstep, cols, rows, sp, sr, maxIter, eps);
}
}
void meanShiftProc_gpu(const PtrStepSzb& src, PtrStepSzb dstr, PtrStepSzb dstsp, int sp, int sr, int maxIter, float eps, cudaStream_t stream)
{
dim3 grid(1, 1, 1);
dim3 threads(32, 8, 1);
grid.x = divUp(src.cols, threads.x);
grid.y = divUp(src.rows, threads.y);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
cudaSafeCall( cudaBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) );
meanshiftproc_kernel<<< grid, threads, 0, stream >>>( dstr.data, dstr.step, dstsp.data, dstsp.step, dstr.cols, dstr.rows, sp, sr, maxIter, eps );
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
}
}}}
#endif

@ -62,6 +62,12 @@ namespace cv { namespace gpu { namespace cudev
void cv::gpu::GoodFeaturesToTrackDetector_GPU::operator ()(const GpuMat& image, GpuMat& corners, const GpuMat& mask)
{
#ifndef HAVE_OPENCV_GPUARITHM
(void) image;
(void) corners;
(void) mask;
throw_no_cuda();
#else
using namespace cv::gpu::cudev::gfft;
CV_Assert(qualityLevel > 0 && minDistance >= 0 && maxCorners >= 0);
@ -75,7 +81,7 @@ void cv::gpu::GoodFeaturesToTrackDetector_GPU::operator ()(const GpuMat& image,
cornerMinEigenVal(image, eig_, Dx_, Dy_, buf_, blockSize, 3);
double maxVal = 0;
minMax(eig_, 0, &maxVal, GpuMat(), minMaxbuf_);
gpu::minMax(eig_, 0, &maxVal, GpuMat(), minMaxbuf_);
ensureSizeIsEnough(1, std::max(1000, static_cast<int>(image.size().area() * 0.05)), CV_32FC2, tmpCorners_);
@ -164,6 +170,7 @@ void cv::gpu::GoodFeaturesToTrackDetector_GPU::operator ()(const GpuMat& image,
corners.upload(Mat(1, static_cast<int>(tmp2.size()), CV_32FC2, &tmp2[0]));
}
#endif
}
#endif /* !defined (HAVE_CUDA) */

@ -47,113 +47,29 @@ using namespace cv::gpu;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
void cv::gpu::meanShiftFiltering(const GpuMat&, GpuMat&, int, int, TermCriteria, Stream&) { throw_no_cuda(); }
void cv::gpu::meanShiftProc(const GpuMat&, GpuMat&, GpuMat&, int, int, TermCriteria, Stream&) { throw_no_cuda(); }
void cv::gpu::evenLevels(GpuMat&, int, int, int) { throw_no_cuda(); }
void cv::gpu::histEven(const GpuMat&, GpuMat&, int, int, int, Stream&) { throw_no_cuda(); }
void cv::gpu::histEven(const GpuMat&, GpuMat&, GpuMat&, int, int, int, Stream&) { throw_no_cuda(); }
void cv::gpu::histEven(const GpuMat&, GpuMat*, int*, int*, int*, Stream&) { throw_no_cuda(); }
void cv::gpu::histEven(const GpuMat&, GpuMat*, GpuMat&, int*, int*, int*, Stream&) { throw_no_cuda(); }
void cv::gpu::histRange(const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_no_cuda(); }
void cv::gpu::histRange(const GpuMat&, GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*, Stream&) { throw_no_cuda(); }
void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::gpu::calcHist(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_no_cuda(); }
void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, int, int, double, int) { throw_no_cuda(); }
void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, double, int) { throw_no_cuda(); }
void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, double, int, Stream&) { throw_no_cuda(); }
void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, int, int, int) { throw_no_cuda(); }
void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, int) { throw_no_cuda(); }
void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, int, Stream&) { throw_no_cuda(); }
void cv::gpu::Canny(const GpuMat&, GpuMat&, double, double, int, bool) { throw_no_cuda(); }
void cv::gpu::Canny(const GpuMat&, CannyBuf&, GpuMat&, double, double, int, bool) { throw_no_cuda(); }
void cv::gpu::Canny(const GpuMat&, const GpuMat&, GpuMat&, double, double, bool) { throw_no_cuda(); }
void cv::gpu::Canny(const GpuMat&, const GpuMat&, CannyBuf&, GpuMat&, double, double, bool) { throw_no_cuda(); }
void cv::gpu::CannyBuf::create(const Size&, int) { throw_no_cuda(); }
void cv::gpu::CannyBuf::release() { throw_no_cuda(); }
cv::Ptr<cv::gpu::CLAHE> cv::gpu::createCLAHE(double, cv::Size) { throw_no_cuda(); return cv::Ptr<cv::gpu::CLAHE>(); }
void cv::gpu::alphaComp(const GpuMat&, const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */
////////////////////////////////////////////////////////////////////////
// meanShiftFiltering_GPU
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
void meanShiftFiltering_gpu(const PtrStepSzb& src, PtrStepSzb dst, int sp, int sr, int maxIter, float eps, cudaStream_t stream);
}
}}}
void cv::gpu::meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr, TermCriteria criteria, Stream& stream)
{
using namespace ::cv::gpu::cudev::imgproc;
if( src.empty() )
CV_Error( cv::Error::StsBadArg, "The input image is empty" );
if( src.depth() != CV_8U || src.channels() != 4 )
CV_Error( cv::Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
dst.create( src.size(), CV_8UC4 );
if( !(criteria.type & TermCriteria::MAX_ITER) )
criteria.maxCount = 5;
int maxIter = std::min(std::max(criteria.maxCount, 1), 100);
float eps;
if( !(criteria.type & TermCriteria::EPS) )
eps = 1.f;
eps = (float)std::max(criteria.epsilon, 0.0);
meanShiftFiltering_gpu(src, dst, sp, sr, maxIter, eps, StreamAccessor::getStream(stream));
}
////////////////////////////////////////////////////////////////////////
// meanShiftProc_GPU
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
void meanShiftProc_gpu(const PtrStepSzb& src, PtrStepSzb dstr, PtrStepSzb dstsp, int sp, int sr, int maxIter, float eps, cudaStream_t stream);
}
}}}
void cv::gpu::meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, TermCriteria criteria, Stream& stream)
{
using namespace ::cv::gpu::cudev::imgproc;
if( src.empty() )
CV_Error( cv::Error::StsBadArg, "The input image is empty" );
if( src.depth() != CV_8U || src.channels() != 4 )
CV_Error( cv::Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
dstr.create( src.size(), CV_8UC4 );
dstsp.create( src.size(), CV_16SC2 );
if( !(criteria.type & TermCriteria::MAX_ITER) )
criteria.maxCount = 5;
int maxIter = std::min(std::max(criteria.maxCount, 1), 100);
float eps;
if( !(criteria.type & TermCriteria::EPS) )
eps = 1.f;
eps = (float)std::max(criteria.epsilon, 0.0);
meanShiftProc_gpu(src, dstr, dstsp, sp, sr, maxIter, eps, StreamAccessor::getStream(stream));
}
////////////////////////////////////////////////////////////////////////
// Histogram
// NPP Histogram
namespace
{
@ -444,10 +360,12 @@ void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4
hist_callers[src.depth()](src, hist, levels, buf, StreamAccessor::getStream(stream));
}
////////////////////////////////////////////////////////////////////////
// calcHist
namespace hist
{
void histogram256(PtrStepSzb src, int* hist, cudaStream_t stream);
void equalizeHist(PtrStepSzb src, PtrStepSzb dst, const int* lut, cudaStream_t stream);
}
void cv::gpu::calcHist(const GpuMat& src, GpuMat& hist, Stream& stream)
@ -460,6 +378,14 @@ void cv::gpu::calcHist(const GpuMat& src, GpuMat& hist, Stream& stream)
hist::histogram256(src, hist.ptr<int>(), StreamAccessor::getStream(stream));
}
////////////////////////////////////////////////////////////////////////
// equalizeHist
namespace hist
{
void equalizeHist(PtrStepSzb src, PtrStepSzb dst, const int* lut, cudaStream_t stream);
}
void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream)
{
GpuMat hist;
@ -492,229 +418,6 @@ void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat&
hist::equalizeHist(src, dst, lut.ptr<int>(), stream);
}
////////////////////////////////////////////////////////////////////////
// cornerHarris & minEgenVal
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
void cornerHarris_gpu(int block_size, float k, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream);
void cornerMinEigenVal_gpu(int block_size, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream);
}
}}}
namespace
{
void extractCovData(const GpuMat& src, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, int borderType, Stream& stream)
{
double scale = static_cast<double>(1 << ((ksize > 0 ? ksize : 3) - 1)) * blockSize;
if (ksize < 0)
scale *= 2.;
if (src.depth() == CV_8U)
scale *= 255.;
scale = 1./scale;
Dx.create(src.size(), CV_32F);
Dy.create(src.size(), CV_32F);
if (ksize > 0)
{
Sobel(src, Dx, CV_32F, 1, 0, buf, ksize, scale, borderType, -1, stream);
Sobel(src, Dy, CV_32F, 0, 1, buf, ksize, scale, borderType, -1, stream);
}
else
{
Scharr(src, Dx, CV_32F, 1, 0, buf, scale, borderType, -1, stream);
Scharr(src, Dy, CV_32F, 0, 1, buf, scale, borderType, -1, stream);
}
}
}
void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType)
{
GpuMat Dx, Dy;
cornerHarris(src, dst, Dx, Dy, blockSize, ksize, k, borderType);
}
void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType)
{
GpuMat buf;
cornerHarris(src, dst, Dx, Dy, buf, blockSize, ksize, k, borderType);
}
void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, double k, int borderType, Stream& stream)
{
using namespace cv::gpu::cudev::imgproc;
CV_Assert(borderType == cv::BORDER_REFLECT101 || borderType == cv::BORDER_REPLICATE || borderType == cv::BORDER_REFLECT);
extractCovData(src, Dx, Dy, buf, blockSize, ksize, borderType, stream);
dst.create(src.size(), CV_32F);
cornerHarris_gpu(blockSize, static_cast<float>(k), Dx, Dy, dst, borderType, StreamAccessor::getStream(stream));
}
void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType)
{
GpuMat Dx, Dy;
cornerMinEigenVal(src, dst, Dx, Dy, blockSize, ksize, borderType);
}
void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType)
{
GpuMat buf;
cornerMinEigenVal(src, dst, Dx, Dy, buf, blockSize, ksize, borderType);
}
void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, int borderType, Stream& stream)
{
using namespace ::cv::gpu::cudev::imgproc;
CV_Assert(borderType == cv::BORDER_REFLECT101 || borderType == cv::BORDER_REPLICATE || borderType == cv::BORDER_REFLECT);
extractCovData(src, Dx, Dy, buf, blockSize, ksize, borderType, stream);
dst.create(src.size(), CV_32F);
cornerMinEigenVal_gpu(blockSize, Dx, Dy, dst, borderType, StreamAccessor::getStream(stream));
}
//////////////////////////////////////////////////////////////////////////////
// Canny
void cv::gpu::CannyBuf::create(const Size& image_size, int apperture_size)
{
if (apperture_size > 0)
{
ensureSizeIsEnough(image_size, CV_32SC1, dx);
ensureSizeIsEnough(image_size, CV_32SC1, dy);
if (apperture_size != 3)
{
filterDX = createDerivFilter_GPU(CV_8UC1, CV_32S, 1, 0, apperture_size, BORDER_REPLICATE);
filterDY = createDerivFilter_GPU(CV_8UC1, CV_32S, 0, 1, apperture_size, BORDER_REPLICATE);
}
}
ensureSizeIsEnough(image_size, CV_32FC1, mag);
ensureSizeIsEnough(image_size, CV_32SC1, map);
ensureSizeIsEnough(1, image_size.area(), CV_16UC2, st1);
ensureSizeIsEnough(1, image_size.area(), CV_16UC2, st2);
}
void cv::gpu::CannyBuf::release()
{
dx.release();
dy.release();
mag.release();
map.release();
st1.release();
st2.release();
}
namespace canny
{
void calcMagnitude(PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad);
void calcMagnitude(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad);
void calcMap(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, PtrStepSzi map, float low_thresh, float high_thresh);
void edgesHysteresisLocal(PtrStepSzi map, ushort2* st1);
void edgesHysteresisGlobal(PtrStepSzi map, ushort2* st1, ushort2* st2);
void getEdges(PtrStepSzi map, PtrStepSzb dst);
}
namespace
{
void CannyCaller(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& dst, float low_thresh, float high_thresh)
{
using namespace canny;
buf.map.setTo(Scalar::all(0));
calcMap(dx, dy, buf.mag, buf.map, low_thresh, high_thresh);
edgesHysteresisLocal(buf.map, buf.st1.ptr<ushort2>());
edgesHysteresisGlobal(buf.map, buf.st1.ptr<ushort2>(), buf.st2.ptr<ushort2>());
getEdges(buf.map, dst);
}
}
void cv::gpu::Canny(const GpuMat& src, GpuMat& dst, double low_thresh, double high_thresh, int apperture_size, bool L2gradient)
{
CannyBuf buf;
Canny(src, buf, dst, low_thresh, high_thresh, apperture_size, L2gradient);
}
void cv::gpu::Canny(const GpuMat& src, CannyBuf& buf, GpuMat& dst, double low_thresh, double high_thresh, int apperture_size, bool L2gradient)
{
using namespace canny;
CV_Assert(src.type() == CV_8UC1);
if (!deviceSupports(SHARED_ATOMICS))
CV_Error(cv::Error::StsNotImplemented, "The device doesn't support shared atomics");
if( low_thresh > high_thresh )
std::swap( low_thresh, high_thresh);
dst.create(src.size(), CV_8U);
buf.create(src.size(), apperture_size);
if (apperture_size == 3)
{
Size wholeSize;
Point ofs;
src.locateROI(wholeSize, ofs);
GpuMat srcWhole(wholeSize, src.type(), src.datastart, src.step);
calcMagnitude(srcWhole, ofs.x, ofs.y, buf.dx, buf.dy, buf.mag, L2gradient);
}
else
{
buf.filterDX->apply(src, buf.dx, Rect(0, 0, src.cols, src.rows));
buf.filterDY->apply(src, buf.dy, Rect(0, 0, src.cols, src.rows));
calcMagnitude(buf.dx, buf.dy, buf.mag, L2gradient);
}
CannyCaller(buf.dx, buf.dy, buf, dst, static_cast<float>(low_thresh), static_cast<float>(high_thresh));
}
void cv::gpu::Canny(const GpuMat& dx, const GpuMat& dy, GpuMat& dst, double low_thresh, double high_thresh, bool L2gradient)
{
CannyBuf buf;
Canny(dx, dy, buf, dst, low_thresh, high_thresh, L2gradient);
}
void cv::gpu::Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& dst, double low_thresh, double high_thresh, bool L2gradient)
{
using namespace canny;
CV_Assert(TargetArchs::builtWith(SHARED_ATOMICS) && DeviceInfo().supports(SHARED_ATOMICS));
CV_Assert(dx.type() == CV_32SC1 && dy.type() == CV_32SC1 && dx.size() == dy.size());
if( low_thresh > high_thresh )
std::swap( low_thresh, high_thresh);
dst.create(dx.size(), CV_8U);
buf.create(dx.size(), -1);
calcMagnitude(dx, dy, buf.mag, L2gradient);
CannyCaller(dx, dy, buf, dst, static_cast<float>(low_thresh), static_cast<float>(high_thresh));
}
////////////////////////////////////////////////////////////////////////
// CLAHE
@ -793,7 +496,11 @@ namespace
}
else
{
#ifndef HAVE_OPENCV_GPUARITHM
throw_no_cuda();
#else
cv::gpu::copyMakeBorder(src, srcExt_, 0, tilesY_ - (src.rows % tilesY_), 0, tilesX_ - (src.cols % tilesX_), cv::BORDER_REFLECT_101, cv::Scalar(), s);
#endif
tileSize = cv::Size(srcExt_.cols / tilesX_, srcExt_.rows / tilesY_);
srcForLut = srcExt_;
@ -847,77 +554,4 @@ cv::Ptr<cv::gpu::CLAHE> cv::gpu::createCLAHE(double clipLimit, cv::Size tileGrid
return new CLAHE_Impl(clipLimit, tileGridSize.width, tileGridSize.height);
}
////////////////////////////////////////////////////////////////////////
// alphaComp
namespace
{
template <int DEPTH> struct NppAlphaCompFunc
{
typedef typename NPPTypeTraits<DEPTH>::npp_type npp_t;
typedef NppStatus (*func_t)(const npp_t* pSrc1, int nSrc1Step, const npp_t* pSrc2, int nSrc2Step, npp_t* pDst, int nDstStep, NppiSize oSizeROI, NppiAlphaOp eAlphaOp);
};
template <int DEPTH, typename NppAlphaCompFunc<DEPTH>::func_t func> struct NppAlphaComp
{
typedef typename NPPTypeTraits<DEPTH>::npp_type npp_t;
static void call(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, NppiAlphaOp eAlphaOp, cudaStream_t stream)
{
NppStreamHandler h(stream);
NppiSize oSizeROI;
oSizeROI.width = img1.cols;
oSizeROI.height = img2.rows;
nppSafeCall( func(img1.ptr<npp_t>(), static_cast<int>(img1.step), img2.ptr<npp_t>(), static_cast<int>(img2.step),
dst.ptr<npp_t>(), static_cast<int>(dst.step), oSizeROI, eAlphaOp) );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
}
void cv::gpu::alphaComp(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, int alpha_op, Stream& stream)
{
static const NppiAlphaOp npp_alpha_ops[] = {
NPPI_OP_ALPHA_OVER,
NPPI_OP_ALPHA_IN,
NPPI_OP_ALPHA_OUT,
NPPI_OP_ALPHA_ATOP,
NPPI_OP_ALPHA_XOR,
NPPI_OP_ALPHA_PLUS,
NPPI_OP_ALPHA_OVER_PREMUL,
NPPI_OP_ALPHA_IN_PREMUL,
NPPI_OP_ALPHA_OUT_PREMUL,
NPPI_OP_ALPHA_ATOP_PREMUL,
NPPI_OP_ALPHA_XOR_PREMUL,
NPPI_OP_ALPHA_PLUS_PREMUL,
NPPI_OP_ALPHA_PREMUL
};
typedef void (*func_t)(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, NppiAlphaOp eAlphaOp, cudaStream_t stream);
static const func_t funcs[] =
{
NppAlphaComp<CV_8U, nppiAlphaComp_8u_AC4R>::call,
0,
NppAlphaComp<CV_16U, nppiAlphaComp_16u_AC4R>::call,
0,
NppAlphaComp<CV_32S, nppiAlphaComp_32s_AC4R>::call,
NppAlphaComp<CV_32F, nppiAlphaComp_32f_AC4R>::call
};
CV_Assert( img1.type() == CV_8UC4 || img1.type() == CV_16UC4 || img1.type() == CV_32SC4 || img1.type() == CV_32FC4 );
CV_Assert( img1.size() == img2.size() && img1.type() == img2.type() );
dst.create(img1.size(), img1.type());
const func_t func = funcs[img1.depth()];
func(img1, img2, dst, npp_alpha_ops[alpha_op], StreamAccessor::getStream(stream));
}
#endif /* !defined (HAVE_CUDA) */

@ -45,7 +45,7 @@
using namespace cv;
using namespace cv::gpu;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
#if !defined (HAVE_CUDA) || !defined (HAVE_OPENCV_GPUARITHM) || defined (CUDA_DISABLER)
void cv::gpu::matchTemplate(const GpuMat&, const GpuMat&, GpuMat&, int, Stream&) { throw_no_cuda(); }
@ -172,15 +172,15 @@ namespace
return;
}
ConvolveBuf convolve_buf;
gpu::ConvolveBuf convolve_buf;
convolve_buf.user_block_size = buf.user_block_size;
if (image.channels() == 1)
convolve(image.reshape(1), templ.reshape(1), result, true, convolve_buf, stream);
gpu::convolve(image.reshape(1), templ.reshape(1), result, true, convolve_buf, stream);
else
{
GpuMat result_;
convolve(image.reshape(1), templ.reshape(1), result_, true, convolve_buf, stream);
gpu::convolve(image.reshape(1), templ.reshape(1), result_, true, convolve_buf, stream);
extractFirstChannel_32F(result_, result, image.channels(), StreamAccessor::getStream(stream));
}
}
@ -216,9 +216,9 @@ namespace
matchTemplate_CCORR_8U(image, templ, result, buf, stream);
buf.image_sqsums.resize(1);
sqrIntegral(image.reshape(1), buf.image_sqsums[0], stream);
gpu::sqrIntegral(image.reshape(1), buf.image_sqsums[0], stream);
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ.reshape(1))[0];
unsigned long long templ_sqsum = (unsigned long long)gpu::sqrSum(templ.reshape(1))[0];
normalize_8U(templ.cols, templ.rows, buf.image_sqsums[0], templ_sqsum, result, image.channels(), StreamAccessor::getStream(stream));
}
@ -243,9 +243,9 @@ namespace
}
buf.image_sqsums.resize(1);
sqrIntegral(image.reshape(1), buf.image_sqsums[0], stream);
gpu::sqrIntegral(image.reshape(1), buf.image_sqsums[0], stream);
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ.reshape(1))[0];
unsigned long long templ_sqsum = (unsigned long long)gpu::sqrSum(templ.reshape(1))[0];
matchTemplate_CCORR_8U(image, templ, result, buf, stream);
matchTemplatePrepared_SQDIFF_8U(templ.cols, templ.rows, buf.image_sqsums[0], templ_sqsum, result, image.channels(), StreamAccessor::getStream(stream));
@ -256,9 +256,9 @@ namespace
const GpuMat& image, const GpuMat& templ, GpuMat& result, MatchTemplateBuf &buf, Stream& stream)
{
buf.image_sqsums.resize(1);
sqrIntegral(image.reshape(1), buf.image_sqsums[0], stream);
gpu::sqrIntegral(image.reshape(1), buf.image_sqsums[0], stream);
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ.reshape(1))[0];
unsigned long long templ_sqsum = (unsigned long long)gpu::sqrSum(templ.reshape(1))[0];
matchTemplate_CCORR_8U(image, templ, result, buf, stream);
matchTemplatePrepared_SQDIFF_NORMED_8U(templ.cols, templ.rows, buf.image_sqsums[0], templ_sqsum, result, image.channels(), StreamAccessor::getStream(stream));
@ -273,19 +273,19 @@ namespace
if (image.channels() == 1)
{
buf.image_sums.resize(1);
integral(image, buf.image_sums[0], stream);
gpu::integral(image, buf.image_sums[0], stream);
unsigned int templ_sum = (unsigned int)sum(templ)[0];
matchTemplatePrepared_CCOFF_8U(templ.cols, templ.rows, buf.image_sums[0], templ_sum, result, StreamAccessor::getStream(stream));
}
else
{
split(image, buf.images);
gpu::split(image, buf.images);
buf.image_sums.resize(buf.images.size());
for (int i = 0; i < image.channels(); ++i)
integral(buf.images[i], buf.image_sums[i], stream);
gpu::integral(buf.images[i], buf.image_sums[i], stream);
Scalar templ_sum = sum(templ);
Scalar templ_sum = gpu::sum(templ);
switch (image.channels())
{
@ -333,12 +333,12 @@ namespace
if (image.channels() == 1)
{
buf.image_sums.resize(1);
integral(image, buf.image_sums[0], stream);
gpu::integral(image, buf.image_sums[0], stream);
buf.image_sqsums.resize(1);
sqrIntegral(image, buf.image_sqsums[0], stream);
gpu::sqrIntegral(image, buf.image_sqsums[0], stream);
unsigned int templ_sum = (unsigned int)sum(templ)[0];
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ)[0];
unsigned int templ_sum = (unsigned int)gpu::sum(templ)[0];
unsigned long long templ_sqsum = (unsigned long long)gpu::sqrSum(templ)[0];
matchTemplatePrepared_CCOFF_NORMED_8U(
templ.cols, templ.rows, buf.image_sums[0], buf.image_sqsums[0],
@ -346,17 +346,17 @@ namespace
}
else
{
split(image, buf.images);
gpu::split(image, buf.images);
buf.image_sums.resize(buf.images.size());
buf.image_sqsums.resize(buf.images.size());
for (int i = 0; i < image.channels(); ++i)
{
integral(buf.images[i], buf.image_sums[i], stream);
sqrIntegral(buf.images[i], buf.image_sqsums[i], stream);
gpu::integral(buf.images[i], buf.image_sums[i], stream);
gpu::sqrIntegral(buf.images[i], buf.image_sqsums[i], stream);
}
Scalar templ_sum = sum(templ);
Scalar templ_sqsum = sqrSum(templ);
Scalar templ_sum = gpu::sum(templ);
Scalar templ_sqsum = gpu::sqrSum(templ);
switch (image.channels())
{

@ -0,0 +1,128 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::gpu;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
void cv::gpu::meanShiftFiltering(const GpuMat&, GpuMat&, int, int, TermCriteria, Stream&) { throw_no_cuda(); }
void cv::gpu::meanShiftProc(const GpuMat&, GpuMat&, GpuMat&, int, int, TermCriteria, Stream&) { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */
////////////////////////////////////////////////////////////////////////
// meanShiftFiltering_GPU
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
void meanShiftFiltering_gpu(const PtrStepSzb& src, PtrStepSzb dst, int sp, int sr, int maxIter, float eps, cudaStream_t stream);
}
}}}
void cv::gpu::meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr, TermCriteria criteria, Stream& stream)
{
using namespace ::cv::gpu::cudev::imgproc;
if( src.empty() )
CV_Error( cv::Error::StsBadArg, "The input image is empty" );
if( src.depth() != CV_8U || src.channels() != 4 )
CV_Error( cv::Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
dst.create( src.size(), CV_8UC4 );
if( !(criteria.type & TermCriteria::MAX_ITER) )
criteria.maxCount = 5;
int maxIter = std::min(std::max(criteria.maxCount, 1), 100);
float eps;
if( !(criteria.type & TermCriteria::EPS) )
eps = 1.f;
eps = (float)std::max(criteria.epsilon, 0.0);
meanShiftFiltering_gpu(src, dst, sp, sr, maxIter, eps, StreamAccessor::getStream(stream));
}
////////////////////////////////////////////////////////////////////////
// meanShiftProc_GPU
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
void meanShiftProc_gpu(const PtrStepSzb& src, PtrStepSzb dstr, PtrStepSzb dstsp, int sp, int sr, int maxIter, float eps, cudaStream_t stream);
}
}}}
void cv::gpu::meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, TermCriteria criteria, Stream& stream)
{
using namespace ::cv::gpu::cudev::imgproc;
if( src.empty() )
CV_Error( cv::Error::StsBadArg, "The input image is empty" );
if( src.depth() != CV_8U || src.channels() != 4 )
CV_Error( cv::Error::StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
dstr.create( src.size(), CV_8UC4 );
dstsp.create( src.size(), CV_16SC2 );
if( !(criteria.type & TermCriteria::MAX_ITER) )
criteria.maxCount = 5;
int maxIter = std::min(std::max(criteria.maxCount, 1), 100);
float eps;
if( !(criteria.type & TermCriteria::EPS) )
eps = 1.f;
eps = (float)std::max(criteria.epsilon, 0.0);
meanShiftProc_gpu(src, dstr, dstsp, sp, sr, maxIter, eps, StreamAccessor::getStream(stream));
}
#endif /* !defined (HAVE_CUDA) */

@ -45,9 +45,14 @@
#include "opencv2/gpuimgproc.hpp"
#include "opencv2/gpufilters.hpp"
#include "opencv2/gpuarithm.hpp"
#include "opencv2/core/private.hpp"
#include "opencv2/core/gpu_private.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPUARITHM
# include "opencv2/gpuarithm.hpp"
#endif
#endif /* __OPENCV_PRECOMP_H__ */

@ -0,0 +1,97 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
////////////////////////////////////////////////////////
// BilateralFilter
PARAM_TEST_CASE(BilateralFilter, cv::gpu::DeviceInfo, cv::Size, MatType)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
int kernel_size;
float sigma_color;
float sigma_spatial;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
kernel_size = 5;
sigma_color = 10.f;
sigma_spatial = 3.5f;
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(BilateralFilter, Accuracy)
{
cv::Mat src = randomMat(size, type);
src.convertTo(src, type);
cv::gpu::GpuMat dst;
cv::gpu::bilateralFilter(loadMat(src), dst, kernel_size, sigma_color, sigma_spatial);
cv::Mat dst_gold;
cv::bilateralFilter(src, dst_gold, kernel_size, sigma_color, sigma_spatial);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-3 : 1.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, BilateralFilter, testing::Combine(
ALL_DEVICES,
testing::Values(cv::Size(128, 128), cv::Size(113, 113), cv::Size(639, 481)),
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_32FC1), MatType(CV_32FC3))
));
#endif // HAVE_CUDA

@ -0,0 +1,124 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
////////////////////////////////////////////////////////////////////////////
// Blend
namespace
{
template <typename T>
void blendLinearGold(const cv::Mat& img1, const cv::Mat& img2, const cv::Mat& weights1, const cv::Mat& weights2, cv::Mat& result_gold)
{
result_gold.create(img1.size(), img1.type());
int cn = img1.channels();
for (int y = 0; y < img1.rows; ++y)
{
const float* weights1_row = weights1.ptr<float>(y);
const float* weights2_row = weights2.ptr<float>(y);
const T* img1_row = img1.ptr<T>(y);
const T* img2_row = img2.ptr<T>(y);
T* result_gold_row = result_gold.ptr<T>(y);
for (int x = 0; x < img1.cols * cn; ++x)
{
float w1 = weights1_row[x / cn];
float w2 = weights2_row[x / cn];
result_gold_row[x] = static_cast<T>((img1_row[x] * w1 + img2_row[x] * w2) / (w1 + w2 + 1e-5f));
}
}
}
}
PARAM_TEST_CASE(Blend, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(Blend, Accuracy)
{
int depth = CV_MAT_DEPTH(type);
cv::Mat img1 = randomMat(size, type, 0.0, depth == CV_8U ? 255.0 : 1.0);
cv::Mat img2 = randomMat(size, type, 0.0, depth == CV_8U ? 255.0 : 1.0);
cv::Mat weights1 = randomMat(size, CV_32F, 0, 1);
cv::Mat weights2 = randomMat(size, CV_32F, 0, 1);
cv::gpu::GpuMat result;
cv::gpu::blendLinear(loadMat(img1, useRoi), loadMat(img2, useRoi), loadMat(weights1, useRoi), loadMat(weights2, useRoi), result);
cv::Mat result_gold;
if (depth == CV_8U)
blendLinearGold<uchar>(img1, img2, weights1, weights2, result_gold);
else
blendLinearGold<float>(img1, img2, weights1, weights2, result_gold);
EXPECT_MAT_NEAR(result_gold, result, CV_MAT_DEPTH(type) == CV_8U ? 1.0 : 1e-5);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Blend, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
WHOLE_SUBMAT));
#endif // HAVE_CUDA

@ -0,0 +1,114 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
////////////////////////////////////////////////////////
// Canny
namespace
{
IMPLEMENT_PARAM_CLASS(AppertureSize, int)
IMPLEMENT_PARAM_CLASS(L2gradient, bool)
}
PARAM_TEST_CASE(Canny, cv::gpu::DeviceInfo, AppertureSize, L2gradient, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int apperture_size;
bool useL2gradient;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
apperture_size = GET_PARAM(1);
useL2gradient = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(Canny, Accuracy)
{
cv::Mat img = readImage("stereobm/aloe-L.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
double low_thresh = 50.0;
double high_thresh = 100.0;
if (!supportFeature(devInfo, cv::gpu::SHARED_ATOMICS))
{
try
{
cv::gpu::GpuMat edges;
cv::gpu::Canny(loadMat(img), edges, low_thresh, high_thresh, apperture_size, useL2gradient);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
}
}
else
{
cv::gpu::GpuMat edges;
cv::gpu::Canny(loadMat(img, useRoi), edges, low_thresh, high_thresh, apperture_size, useL2gradient);
cv::Mat edges_gold;
cv::Canny(img, edges_gold, low_thresh, high_thresh, apperture_size, useL2gradient);
EXPECT_MAT_SIMILAR(edges_gold, edges, 2e-2);
}
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Canny, testing::Combine(
ALL_DEVICES,
testing::Values(AppertureSize(3), AppertureSize(5)),
testing::Values(L2gradient(false), L2gradient(true)),
WHOLE_SUBMAT));
#endif // HAVE_CUDA

@ -0,0 +1,145 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CornerHarris
namespace
{
IMPLEMENT_PARAM_CLASS(BlockSize, int);
IMPLEMENT_PARAM_CLASS(ApertureSize, int);
}
PARAM_TEST_CASE(CornerHarris, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)
{
cv::gpu::DeviceInfo devInfo;
int type;
int borderType;
int blockSize;
int apertureSize;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
borderType = GET_PARAM(2);
blockSize = GET_PARAM(3);
apertureSize = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(CornerHarris, Accuracy)
{
cv::Mat src = readImageType("stereobm/aloe-L.png", type);
ASSERT_FALSE(src.empty());
double k = randomDouble(0.1, 0.9);
cv::gpu::GpuMat dst;
cv::gpu::cornerHarris(loadMat(src), dst, blockSize, apertureSize, k, borderType);
cv::Mat dst_gold;
cv::cornerHarris(src, dst_gold, blockSize, apertureSize, k, borderType);
EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerHarris, testing::Combine(
ALL_DEVICES,
testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),
testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),
testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),
testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// cornerMinEigen
PARAM_TEST_CASE(CornerMinEigen, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)
{
cv::gpu::DeviceInfo devInfo;
int type;
int borderType;
int blockSize;
int apertureSize;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
borderType = GET_PARAM(2);
blockSize = GET_PARAM(3);
apertureSize = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(CornerMinEigen, Accuracy)
{
cv::Mat src = readImageType("stereobm/aloe-L.png", type);
ASSERT_FALSE(src.empty());
cv::gpu::GpuMat dst;
cv::gpu::cornerMinEigenVal(loadMat(src), dst, blockSize, apertureSize, borderType);
cv::Mat dst_gold;
cv::cornerMinEigenVal(src, dst_gold, blockSize, apertureSize, borderType);
EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerMinEigen, testing::Combine(
ALL_DEVICES,
testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),
testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),
testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),
testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));
#endif // HAVE_CUDA

@ -0,0 +1,131 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
//////////////////////////////////////////////////////
// GoodFeaturesToTrack
namespace
{
IMPLEMENT_PARAM_CLASS(MinDistance, double)
}
PARAM_TEST_CASE(GoodFeaturesToTrack, cv::gpu::DeviceInfo, MinDistance)
{
cv::gpu::DeviceInfo devInfo;
double minDistance;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
minDistance = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(GoodFeaturesToTrack, Accuracy)
{
cv::Mat image = readImage("opticalflow/frame0.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
int maxCorners = 1000;
double qualityLevel = 0.01;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);
cv::gpu::GpuMat d_pts;
detector(loadMat(image), d_pts);
ASSERT_FALSE(d_pts.empty());
std::vector<cv::Point2f> pts(d_pts.cols);
cv::Mat pts_mat(1, d_pts.cols, CV_32FC2, (void*) &pts[0]);
d_pts.download(pts_mat);
std::vector<cv::Point2f> pts_gold;
cv::goodFeaturesToTrack(image, pts_gold, maxCorners, qualityLevel, minDistance);
ASSERT_EQ(pts_gold.size(), pts.size());
size_t mistmatch = 0;
for (size_t i = 0; i < pts.size(); ++i)
{
cv::Point2i a = pts_gold[i];
cv::Point2i b = pts[i];
bool eq = std::abs(a.x - b.x) < 1 && std::abs(a.y - b.y) < 1;
if (!eq)
++mistmatch;
}
double bad_ratio = static_cast<double>(mistmatch) / pts.size();
ASSERT_LE(bad_ratio, 0.01);
}
GPU_TEST_P(GoodFeaturesToTrack, EmptyCorners)
{
int maxCorners = 1000;
double qualityLevel = 0.01;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);
cv::gpu::GpuMat src(100, 100, CV_8UC1, cv::Scalar::all(0));
cv::gpu::GpuMat corners(1, maxCorners, CV_32FC2);
detector(src, corners);
ASSERT_TRUE(corners.empty());
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, GoodFeaturesToTrack, testing::Combine(
ALL_DEVICES,
testing::Values(MinDistance(0.0), MinDistance(3.0))));
#endif // HAVE_CUDA

@ -0,0 +1,227 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// HistEven
struct HistEven : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(HistEven, Accuracy)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
ASSERT_FALSE(img.empty());
cv::Mat hsv;
cv::cvtColor(img, hsv, cv::COLOR_BGR2HSV);
int hbins = 30;
float hranges[] = {0.0f, 180.0f};
std::vector<cv::gpu::GpuMat> srcs;
cv::gpu::split(loadMat(hsv), srcs);
cv::gpu::GpuMat hist;
cv::gpu::histEven(srcs[0], hist, hbins, (int)hranges[0], (int)hranges[1]);
cv::MatND histnd;
int histSize[] = {hbins};
const float* ranges[] = {hranges};
int channels[] = {0};
cv::calcHist(&hsv, 1, channels, cv::Mat(), histnd, 1, histSize, ranges);
cv::Mat hist_gold = histnd;
hist_gold = hist_gold.t();
hist_gold.convertTo(hist_gold, CV_32S);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CalcHist
namespace
{
void calcHistGold(const cv::Mat& src, cv::Mat& hist)
{
hist.create(1, 256, CV_32SC1);
hist.setTo(cv::Scalar::all(0));
int* hist_row = hist.ptr<int>();
for (int y = 0; y < src.rows; ++y)
{
const uchar* src_row = src.ptr(y);
for (int x = 0; x < src.cols; ++x)
++hist_row[src_row[x]];
}
}
}
PARAM_TEST_CASE(CalcHist, cv::gpu::DeviceInfo, cv::Size)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(CalcHist, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
cv::gpu::GpuMat hist;
cv::gpu::calcHist(loadMat(src), hist);
cv::Mat hist_gold;
calcHistGold(src, hist_gold);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CalcHist, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// EqualizeHist
PARAM_TEST_CASE(EqualizeHist, cv::gpu::DeviceInfo, cv::Size)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(EqualizeHist, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
cv::gpu::GpuMat dst;
cv::gpu::equalizeHist(loadMat(src), dst);
cv::Mat dst_gold;
cv::equalizeHist(src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 3.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, EqualizeHist, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CLAHE
namespace
{
IMPLEMENT_PARAM_CLASS(ClipLimit, double)
}
PARAM_TEST_CASE(CLAHE, cv::gpu::DeviceInfo, cv::Size, ClipLimit)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
double clipLimit;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
clipLimit = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(CLAHE, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
cv::Ptr<cv::gpu::CLAHE> clahe = cv::gpu::createCLAHE(clipLimit);
cv::gpu::GpuMat dst;
clahe->apply(loadMat(src), dst);
cv::Ptr<cv::CLAHE> clahe_gold = cv::createCLAHE(clipLimit);
cv::Mat dst_gold;
clahe_gold->apply(src, dst_gold);
ASSERT_MAT_NEAR(dst_gold, dst, 1.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CLAHE, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(0.0, 40.0)));
#endif // HAVE_CUDA

@ -1,890 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// HistEven
struct HistEven : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(HistEven, Accuracy)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
ASSERT_FALSE(img.empty());
cv::Mat hsv;
cv::cvtColor(img, hsv, cv::COLOR_BGR2HSV);
int hbins = 30;
float hranges[] = {0.0f, 180.0f};
std::vector<cv::gpu::GpuMat> srcs;
cv::gpu::split(loadMat(hsv), srcs);
cv::gpu::GpuMat hist;
cv::gpu::histEven(srcs[0], hist, hbins, (int)hranges[0], (int)hranges[1]);
cv::MatND histnd;
int histSize[] = {hbins};
const float* ranges[] = {hranges};
int channels[] = {0};
cv::calcHist(&hsv, 1, channels, cv::Mat(), histnd, 1, histSize, ranges);
cv::Mat hist_gold = histnd;
hist_gold = hist_gold.t();
hist_gold.convertTo(hist_gold, CV_32S);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CalcHist
namespace
{
void calcHistGold(const cv::Mat& src, cv::Mat& hist)
{
hist.create(1, 256, CV_32SC1);
hist.setTo(cv::Scalar::all(0));
int* hist_row = hist.ptr<int>();
for (int y = 0; y < src.rows; ++y)
{
const uchar* src_row = src.ptr(y);
for (int x = 0; x < src.cols; ++x)
++hist_row[src_row[x]];
}
}
}
PARAM_TEST_CASE(CalcHist, cv::gpu::DeviceInfo, cv::Size)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(CalcHist, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
cv::gpu::GpuMat hist;
cv::gpu::calcHist(loadMat(src), hist);
cv::Mat hist_gold;
calcHistGold(src, hist_gold);
EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CalcHist, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// EqualizeHist
PARAM_TEST_CASE(EqualizeHist, cv::gpu::DeviceInfo, cv::Size)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(EqualizeHist, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
cv::gpu::GpuMat dst;
cv::gpu::equalizeHist(loadMat(src), dst);
cv::Mat dst_gold;
cv::equalizeHist(src, dst_gold);
EXPECT_MAT_NEAR(dst_gold, dst, 3.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, EqualizeHist, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CLAHE
namespace
{
IMPLEMENT_PARAM_CLASS(ClipLimit, double)
}
PARAM_TEST_CASE(CLAHE, cv::gpu::DeviceInfo, cv::Size, ClipLimit)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
double clipLimit;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
clipLimit = GET_PARAM(2);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(CLAHE, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
cv::Ptr<cv::gpu::CLAHE> clahe = cv::gpu::createCLAHE(clipLimit);
cv::gpu::GpuMat dst;
clahe->apply(loadMat(src), dst);
cv::Ptr<cv::CLAHE> clahe_gold = cv::createCLAHE(clipLimit);
cv::Mat dst_gold;
clahe_gold->apply(src, dst_gold);
ASSERT_MAT_NEAR(dst_gold, dst, 1.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CLAHE, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(0.0, 40.0)));
////////////////////////////////////////////////////////
// Canny
namespace
{
IMPLEMENT_PARAM_CLASS(AppertureSize, int);
IMPLEMENT_PARAM_CLASS(L2gradient, bool);
}
PARAM_TEST_CASE(Canny, cv::gpu::DeviceInfo, AppertureSize, L2gradient, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
int apperture_size;
bool useL2gradient;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
apperture_size = GET_PARAM(1);
useL2gradient = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(Canny, Accuracy)
{
cv::Mat img = readImage("stereobm/aloe-L.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
double low_thresh = 50.0;
double high_thresh = 100.0;
if (!supportFeature(devInfo, cv::gpu::SHARED_ATOMICS))
{
try
{
cv::gpu::GpuMat edges;
cv::gpu::Canny(loadMat(img), edges, low_thresh, high_thresh, apperture_size, useL2gradient);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
}
}
else
{
cv::gpu::GpuMat edges;
cv::gpu::Canny(loadMat(img, useRoi), edges, low_thresh, high_thresh, apperture_size, useL2gradient);
cv::Mat edges_gold;
cv::Canny(img, edges_gold, low_thresh, high_thresh, apperture_size, useL2gradient);
EXPECT_MAT_SIMILAR(edges_gold, edges, 2e-2);
}
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Canny, testing::Combine(
ALL_DEVICES,
testing::Values(AppertureSize(3), AppertureSize(5)),
testing::Values(L2gradient(false), L2gradient(true)),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// MeanShift
struct MeanShift : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::Mat img;
int spatialRad;
int colorRad;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
img = readImageType("meanshift/cones.png", CV_8UC4);
ASSERT_FALSE(img.empty());
spatialRad = 30;
colorRad = 30;
}
};
GPU_TEST_P(MeanShift, Filtering)
{
cv::Mat img_template;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
img_template = readImage("meanshift/con_result.png");
else
img_template = readImage("meanshift/con_result_CC1X.png");
ASSERT_FALSE(img_template.empty());
cv::gpu::GpuMat d_dst;
cv::gpu::meanShiftFiltering(loadMat(img), d_dst, spatialRad, colorRad);
ASSERT_EQ(CV_8UC4, d_dst.type());
cv::Mat dst(d_dst);
cv::Mat result;
cv::cvtColor(dst, result, cv::COLOR_BGRA2BGR);
EXPECT_MAT_NEAR(img_template, result, 0.0);
}
GPU_TEST_P(MeanShift, Proc)
{
cv::FileStorage fs;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
fs.open(std::string(cvtest::TS::ptr()->get_data_path()) + "meanshift/spmap.yaml", cv::FileStorage::READ);
else
fs.open(std::string(cvtest::TS::ptr()->get_data_path()) + "meanshift/spmap_CC1X.yaml", cv::FileStorage::READ);
ASSERT_TRUE(fs.isOpened());
cv::Mat spmap_template;
fs["spmap"] >> spmap_template;
ASSERT_FALSE(spmap_template.empty());
cv::gpu::GpuMat rmap_filtered;
cv::gpu::meanShiftFiltering(loadMat(img), rmap_filtered, spatialRad, colorRad);
cv::gpu::GpuMat rmap;
cv::gpu::GpuMat spmap;
cv::gpu::meanShiftProc(loadMat(img), rmap, spmap, spatialRad, colorRad);
ASSERT_EQ(CV_8UC4, rmap.type());
EXPECT_MAT_NEAR(rmap_filtered, rmap, 0.0);
EXPECT_MAT_NEAR(spmap_template, spmap, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShift, ALL_DEVICES);
////////////////////////////////////////////////////////////////////////////////
// MeanShiftSegmentation
namespace
{
IMPLEMENT_PARAM_CLASS(MinSize, int);
}
PARAM_TEST_CASE(MeanShiftSegmentation, cv::gpu::DeviceInfo, MinSize)
{
cv::gpu::DeviceInfo devInfo;
int minsize;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
minsize = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MeanShiftSegmentation, Regression)
{
cv::Mat img = readImageType("meanshift/cones.png", CV_8UC4);
ASSERT_FALSE(img.empty());
std::ostringstream path;
path << "meanshift/cones_segmented_sp10_sr10_minsize" << minsize;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
path << ".png";
else
path << "_CC1X.png";
cv::Mat dst_gold = readImage(path.str());
ASSERT_FALSE(dst_gold.empty());
cv::Mat dst;
cv::gpu::meanShiftSegmentation(loadMat(img), dst, 10, 10, minsize);
cv::Mat dst_rgb;
cv::cvtColor(dst, dst_rgb, cv::COLOR_BGRA2BGR);
EXPECT_MAT_SIMILAR(dst_gold, dst_rgb, 1e-3);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShiftSegmentation, testing::Combine(
ALL_DEVICES,
testing::Values(MinSize(0), MinSize(4), MinSize(20), MinSize(84), MinSize(340), MinSize(1364))));
////////////////////////////////////////////////////////////////////////////
// Blend
namespace
{
template <typename T>
void blendLinearGold(const cv::Mat& img1, const cv::Mat& img2, const cv::Mat& weights1, const cv::Mat& weights2, cv::Mat& result_gold)
{
result_gold.create(img1.size(), img1.type());
int cn = img1.channels();
for (int y = 0; y < img1.rows; ++y)
{
const float* weights1_row = weights1.ptr<float>(y);
const float* weights2_row = weights2.ptr<float>(y);
const T* img1_row = img1.ptr<T>(y);
const T* img2_row = img2.ptr<T>(y);
T* result_gold_row = result_gold.ptr<T>(y);
for (int x = 0; x < img1.cols * cn; ++x)
{
float w1 = weights1_row[x / cn];
float w2 = weights2_row[x / cn];
result_gold_row[x] = static_cast<T>((img1_row[x] * w1 + img2_row[x] * w2) / (w1 + w2 + 1e-5f));
}
}
}
}
PARAM_TEST_CASE(Blend, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
useRoi = GET_PARAM(3);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(Blend, Accuracy)
{
int depth = CV_MAT_DEPTH(type);
cv::Mat img1 = randomMat(size, type, 0.0, depth == CV_8U ? 255.0 : 1.0);
cv::Mat img2 = randomMat(size, type, 0.0, depth == CV_8U ? 255.0 : 1.0);
cv::Mat weights1 = randomMat(size, CV_32F, 0, 1);
cv::Mat weights2 = randomMat(size, CV_32F, 0, 1);
cv::gpu::GpuMat result;
cv::gpu::blendLinear(loadMat(img1, useRoi), loadMat(img2, useRoi), loadMat(weights1, useRoi), loadMat(weights2, useRoi), result);
cv::Mat result_gold;
if (depth == CV_8U)
blendLinearGold<uchar>(img1, img2, weights1, weights2, result_gold);
else
blendLinearGold<float>(img1, img2, weights1, weights2, result_gold);
EXPECT_MAT_NEAR(result_gold, result, CV_MAT_DEPTH(type) == CV_8U ? 1.0 : 1e-5);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Blend, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
WHOLE_SUBMAT));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate8U
CV_ENUM(TemplateMethod, TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED)
namespace
{
IMPLEMENT_PARAM_CLASS(TemplateSize, cv::Size);
}
PARAM_TEST_CASE(MatchTemplate8U, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
cv::Size templ_size;
int cn;
int method;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
templ_size = GET_PARAM(2);
cn = GET_PARAM(3);
method = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MatchTemplate8U, Accuracy)
{
cv::Mat image = randomMat(size, CV_MAKETYPE(CV_8U, cn));
cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_8U, cn));
cv::gpu::GpuMat dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dst, method);
cv::Mat dst_gold;
cv::matchTemplate(image, templ, dst_gold, method);
EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate8U, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),
testing::Values(Channels(1), Channels(3), Channels(4)),
TemplateMethod::all()));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate32F
PARAM_TEST_CASE(MatchTemplate32F, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
cv::Size templ_size;
int cn;
int method;
int n, m, h, w;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
templ_size = GET_PARAM(2);
cn = GET_PARAM(3);
method = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MatchTemplate32F, Regression)
{
cv::Mat image = randomMat(size, CV_MAKETYPE(CV_32F, cn));
cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_32F, cn));
cv::gpu::GpuMat dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dst, method);
cv::Mat dst_gold;
cv::matchTemplate(image, templ, dst_gold, method);
EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate32F, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),
testing::Values(Channels(1), Channels(3), Channels(4)),
testing::Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_CCORR))));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplateBlackSource
PARAM_TEST_CASE(MatchTemplateBlackSource, cv::gpu::DeviceInfo, TemplateMethod)
{
cv::gpu::DeviceInfo devInfo;
int method;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
method = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MatchTemplateBlackSource, Accuracy)
{
cv::Mat image = readImage("matchtemplate/black.png");
ASSERT_FALSE(image.empty());
cv::Mat pattern = readImage("matchtemplate/cat.png");
ASSERT_FALSE(pattern.empty());
cv::gpu::GpuMat d_dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, method);
cv::Mat dst(d_dst);
double maxValue;
cv::Point maxLoc;
cv::minMaxLoc(dst, NULL, &maxValue, NULL, &maxLoc);
cv::Point maxLocGold = cv::Point(284, 12);
ASSERT_EQ(maxLocGold, maxLoc);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplateBlackSource, testing::Combine(
ALL_DEVICES,
testing::Values(TemplateMethod(cv::TM_CCOEFF_NORMED), TemplateMethod(cv::TM_CCORR_NORMED))));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate_CCOEF_NORMED
PARAM_TEST_CASE(MatchTemplate_CCOEF_NORMED, cv::gpu::DeviceInfo, std::pair<std::string, std::string>)
{
cv::gpu::DeviceInfo devInfo;
std::string imageName;
std::string patternName;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
imageName = GET_PARAM(1).first;
patternName = GET_PARAM(1).second;
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MatchTemplate_CCOEF_NORMED, Accuracy)
{
cv::Mat image = readImage(imageName);
ASSERT_FALSE(image.empty());
cv::Mat pattern = readImage(patternName);
ASSERT_FALSE(pattern.empty());
cv::gpu::GpuMat d_dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, cv::TM_CCOEFF_NORMED);
cv::Mat dst(d_dst);
cv::Point minLoc, maxLoc;
double minVal, maxVal;
cv::minMaxLoc(dst, &minVal, &maxVal, &minLoc, &maxLoc);
cv::Mat dstGold;
cv::matchTemplate(image, pattern, dstGold, cv::TM_CCOEFF_NORMED);
double minValGold, maxValGold;
cv::Point minLocGold, maxLocGold;
cv::minMaxLoc(dstGold, &minValGold, &maxValGold, &minLocGold, &maxLocGold);
ASSERT_EQ(minLocGold, minLoc);
ASSERT_EQ(maxLocGold, maxLoc);
ASSERT_LE(maxVal, 1.0);
ASSERT_GE(minVal, -1.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CCOEF_NORMED, testing::Combine(
ALL_DEVICES,
testing::Values(std::make_pair(std::string("matchtemplate/source-0.png"), std::string("matchtemplate/target-0.png")))));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate_CanFindBigTemplate
struct MatchTemplate_CanFindBigTemplate : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)
{
cv::Mat scene = readImage("matchtemplate/scene.png");
ASSERT_FALSE(scene.empty());
cv::Mat templ = readImage("matchtemplate/template.png");
ASSERT_FALSE(templ.empty());
cv::gpu::GpuMat d_result;
cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, cv::TM_SQDIFF_NORMED);
cv::Mat result(d_result);
double minVal;
cv::Point minLoc;
cv::minMaxLoc(result, &minVal, 0, &minLoc, 0);
ASSERT_GE(minVal, 0);
ASSERT_LT(minVal, 1e-3);
ASSERT_EQ(344, minLoc.x);
ASSERT_EQ(0, minLoc.y);
}
GPU_TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF)
{
cv::Mat scene = readImage("matchtemplate/scene.png");
ASSERT_FALSE(scene.empty());
cv::Mat templ = readImage("matchtemplate/template.png");
ASSERT_FALSE(templ.empty());
cv::gpu::GpuMat d_result;
cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, cv::TM_SQDIFF);
cv::Mat result(d_result);
double minVal;
cv::Point minLoc;
cv::minMaxLoc(result, &minVal, 0, &minLoc, 0);
ASSERT_GE(minVal, 0);
ASSERT_EQ(344, minLoc.x);
ASSERT_EQ(0, minLoc.y);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CanFindBigTemplate, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CornerHarris
namespace
{
IMPLEMENT_PARAM_CLASS(BlockSize, int);
IMPLEMENT_PARAM_CLASS(ApertureSize, int);
}
PARAM_TEST_CASE(CornerHarris, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)
{
cv::gpu::DeviceInfo devInfo;
int type;
int borderType;
int blockSize;
int apertureSize;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
borderType = GET_PARAM(2);
blockSize = GET_PARAM(3);
apertureSize = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(CornerHarris, Accuracy)
{
cv::Mat src = readImageType("stereobm/aloe-L.png", type);
ASSERT_FALSE(src.empty());
double k = randomDouble(0.1, 0.9);
cv::gpu::GpuMat dst;
cv::gpu::cornerHarris(loadMat(src), dst, blockSize, apertureSize, k, borderType);
cv::Mat dst_gold;
cv::cornerHarris(src, dst_gold, blockSize, apertureSize, k, borderType);
EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerHarris, testing::Combine(
ALL_DEVICES,
testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),
testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),
testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),
testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));
///////////////////////////////////////////////////////////////////////////////////////////////////////
// cornerMinEigen
PARAM_TEST_CASE(CornerMinEigen, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)
{
cv::gpu::DeviceInfo devInfo;
int type;
int borderType;
int blockSize;
int apertureSize;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
type = GET_PARAM(1);
borderType = GET_PARAM(2);
blockSize = GET_PARAM(3);
apertureSize = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(CornerMinEigen, Accuracy)
{
cv::Mat src = readImageType("stereobm/aloe-L.png", type);
ASSERT_FALSE(src.empty());
cv::gpu::GpuMat dst;
cv::gpu::cornerMinEigenVal(loadMat(src), dst, blockSize, apertureSize, borderType);
cv::Mat dst_gold;
cv::cornerMinEigenVal(src, dst_gold, blockSize, apertureSize, borderType);
EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerMinEigen, testing::Combine(
ALL_DEVICES,
testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),
testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),
testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),
testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));
////////////////////////////////////////////////////////
// BilateralFilter
PARAM_TEST_CASE(BilateralFilter, cv::gpu::DeviceInfo, cv::Size, MatType)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
int kernel_size;
float sigma_color;
float sigma_spatial;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
kernel_size = 5;
sigma_color = 10.f;
sigma_spatial = 3.5f;
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(BilateralFilter, Accuracy)
{
cv::Mat src = randomMat(size, type);
src.convertTo(src, type);
cv::gpu::GpuMat dst;
cv::gpu::bilateralFilter(loadMat(src), dst, kernel_size, sigma_color, sigma_spatial);
cv::Mat dst_gold;
cv::bilateralFilter(src, dst_gold, kernel_size, sigma_color, sigma_spatial);
EXPECT_MAT_NEAR(dst_gold, dst, src.depth() == CV_32F ? 1e-3 : 1.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, BilateralFilter, testing::Combine(
ALL_DEVICES,
testing::Values(cv::Size(128, 128), cv::Size(113, 113), cv::Size(639, 481)),
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_32FC1), MatType(CV_32FC3))
));
#endif // HAVE_CUDA

@ -0,0 +1,305 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate8U
CV_ENUM(TemplateMethod, cv::TM_SQDIFF, cv::TM_SQDIFF_NORMED, cv::TM_CCORR, cv::TM_CCORR_NORMED, cv::TM_CCOEFF, cv::TM_CCOEFF_NORMED)
#define ALL_TEMPLATE_METHODS testing::Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_SQDIFF_NORMED), TemplateMethod(cv::TM_CCORR), TemplateMethod(cv::TM_CCORR_NORMED), TemplateMethod(cv::TM_CCOEFF), TemplateMethod(cv::TM_CCOEFF_NORMED))
namespace
{
IMPLEMENT_PARAM_CLASS(TemplateSize, cv::Size);
}
PARAM_TEST_CASE(MatchTemplate8U, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
cv::Size templ_size;
int cn;
int method;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
templ_size = GET_PARAM(2);
cn = GET_PARAM(3);
method = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MatchTemplate8U, Accuracy)
{
cv::Mat image = randomMat(size, CV_MAKETYPE(CV_8U, cn));
cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_8U, cn));
cv::gpu::GpuMat dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dst, method);
cv::Mat dst_gold;
cv::matchTemplate(image, templ, dst_gold, method);
EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate8U, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),
testing::Values(Channels(1), Channels(3), Channels(4)),
ALL_TEMPLATE_METHODS));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate32F
PARAM_TEST_CASE(MatchTemplate32F, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
cv::Size templ_size;
int cn;
int method;
int n, m, h, w;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
templ_size = GET_PARAM(2);
cn = GET_PARAM(3);
method = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MatchTemplate32F, Regression)
{
cv::Mat image = randomMat(size, CV_MAKETYPE(CV_32F, cn));
cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_32F, cn));
cv::gpu::GpuMat dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dst, method);
cv::Mat dst_gold;
cv::matchTemplate(image, templ, dst_gold, method);
EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate32F, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),
testing::Values(Channels(1), Channels(3), Channels(4)),
testing::Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_CCORR))));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplateBlackSource
PARAM_TEST_CASE(MatchTemplateBlackSource, cv::gpu::DeviceInfo, TemplateMethod)
{
cv::gpu::DeviceInfo devInfo;
int method;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
method = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MatchTemplateBlackSource, Accuracy)
{
cv::Mat image = readImage("matchtemplate/black.png");
ASSERT_FALSE(image.empty());
cv::Mat pattern = readImage("matchtemplate/cat.png");
ASSERT_FALSE(pattern.empty());
cv::gpu::GpuMat d_dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, method);
cv::Mat dst(d_dst);
double maxValue;
cv::Point maxLoc;
cv::minMaxLoc(dst, NULL, &maxValue, NULL, &maxLoc);
cv::Point maxLocGold = cv::Point(284, 12);
ASSERT_EQ(maxLocGold, maxLoc);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplateBlackSource, testing::Combine(
ALL_DEVICES,
testing::Values(TemplateMethod(cv::TM_CCOEFF_NORMED), TemplateMethod(cv::TM_CCORR_NORMED))));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate_CCOEF_NORMED
PARAM_TEST_CASE(MatchTemplate_CCOEF_NORMED, cv::gpu::DeviceInfo, std::pair<std::string, std::string>)
{
cv::gpu::DeviceInfo devInfo;
std::string imageName;
std::string patternName;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
imageName = GET_PARAM(1).first;
patternName = GET_PARAM(1).second;
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MatchTemplate_CCOEF_NORMED, Accuracy)
{
cv::Mat image = readImage(imageName);
ASSERT_FALSE(image.empty());
cv::Mat pattern = readImage(patternName);
ASSERT_FALSE(pattern.empty());
cv::gpu::GpuMat d_dst;
cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, cv::TM_CCOEFF_NORMED);
cv::Mat dst(d_dst);
cv::Point minLoc, maxLoc;
double minVal, maxVal;
cv::minMaxLoc(dst, &minVal, &maxVal, &minLoc, &maxLoc);
cv::Mat dstGold;
cv::matchTemplate(image, pattern, dstGold, cv::TM_CCOEFF_NORMED);
double minValGold, maxValGold;
cv::Point minLocGold, maxLocGold;
cv::minMaxLoc(dstGold, &minValGold, &maxValGold, &minLocGold, &maxLocGold);
ASSERT_EQ(minLocGold, minLoc);
ASSERT_EQ(maxLocGold, maxLoc);
ASSERT_LE(maxVal, 1.0);
ASSERT_GE(minVal, -1.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CCOEF_NORMED, testing::Combine(
ALL_DEVICES,
testing::Values(std::make_pair(std::string("matchtemplate/source-0.png"), std::string("matchtemplate/target-0.png")))));
////////////////////////////////////////////////////////////////////////////////
// MatchTemplate_CanFindBigTemplate
struct MatchTemplate_CanFindBigTemplate : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)
{
cv::Mat scene = readImage("matchtemplate/scene.png");
ASSERT_FALSE(scene.empty());
cv::Mat templ = readImage("matchtemplate/template.png");
ASSERT_FALSE(templ.empty());
cv::gpu::GpuMat d_result;
cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, cv::TM_SQDIFF_NORMED);
cv::Mat result(d_result);
double minVal;
cv::Point minLoc;
cv::minMaxLoc(result, &minVal, 0, &minLoc, 0);
ASSERT_GE(minVal, 0);
ASSERT_LT(minVal, 1e-3);
ASSERT_EQ(344, minLoc.x);
ASSERT_EQ(0, minLoc.y);
}
GPU_TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF)
{
cv::Mat scene = readImage("matchtemplate/scene.png");
ASSERT_FALSE(scene.empty());
cv::Mat templ = readImage("matchtemplate/template.png");
ASSERT_FALSE(templ.empty());
cv::gpu::GpuMat d_result;
cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, cv::TM_SQDIFF);
cv::Mat result(d_result);
double minVal;
cv::Point minLoc;
cv::minMaxLoc(result, &minVal, 0, &minLoc, 0);
ASSERT_GE(minVal, 0);
ASSERT_EQ(344, minLoc.x);
ASSERT_EQ(0, minLoc.y);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CanFindBigTemplate, ALL_DEVICES);
#endif // HAVE_CUDA

@ -0,0 +1,174 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
using namespace cvtest;
////////////////////////////////////////////////////////////////////////////////
// MeanShift
struct MeanShift : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
cv::Mat img;
int spatialRad;
int colorRad;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
img = readImageType("meanshift/cones.png", CV_8UC4);
ASSERT_FALSE(img.empty());
spatialRad = 30;
colorRad = 30;
}
};
GPU_TEST_P(MeanShift, Filtering)
{
cv::Mat img_template;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
img_template = readImage("meanshift/con_result.png");
else
img_template = readImage("meanshift/con_result_CC1X.png");
ASSERT_FALSE(img_template.empty());
cv::gpu::GpuMat d_dst;
cv::gpu::meanShiftFiltering(loadMat(img), d_dst, spatialRad, colorRad);
ASSERT_EQ(CV_8UC4, d_dst.type());
cv::Mat dst(d_dst);
cv::Mat result;
cv::cvtColor(dst, result, cv::COLOR_BGRA2BGR);
EXPECT_MAT_NEAR(img_template, result, 0.0);
}
GPU_TEST_P(MeanShift, Proc)
{
cv::FileStorage fs;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
fs.open(std::string(cvtest::TS::ptr()->get_data_path()) + "meanshift/spmap.yaml", cv::FileStorage::READ);
else
fs.open(std::string(cvtest::TS::ptr()->get_data_path()) + "meanshift/spmap_CC1X.yaml", cv::FileStorage::READ);
ASSERT_TRUE(fs.isOpened());
cv::Mat spmap_template;
fs["spmap"] >> spmap_template;
ASSERT_FALSE(spmap_template.empty());
cv::gpu::GpuMat rmap_filtered;
cv::gpu::meanShiftFiltering(loadMat(img), rmap_filtered, spatialRad, colorRad);
cv::gpu::GpuMat rmap;
cv::gpu::GpuMat spmap;
cv::gpu::meanShiftProc(loadMat(img), rmap, spmap, spatialRad, colorRad);
ASSERT_EQ(CV_8UC4, rmap.type());
EXPECT_MAT_NEAR(rmap_filtered, rmap, 0.0);
EXPECT_MAT_NEAR(spmap_template, spmap, 0.0);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShift, ALL_DEVICES);
////////////////////////////////////////////////////////////////////////////////
// MeanShiftSegmentation
namespace
{
IMPLEMENT_PARAM_CLASS(MinSize, int);
}
PARAM_TEST_CASE(MeanShiftSegmentation, cv::gpu::DeviceInfo, MinSize)
{
cv::gpu::DeviceInfo devInfo;
int minsize;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
minsize = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(MeanShiftSegmentation, Regression)
{
cv::Mat img = readImageType("meanshift/cones.png", CV_8UC4);
ASSERT_FALSE(img.empty());
std::ostringstream path;
path << "meanshift/cones_segmented_sp10_sr10_minsize" << minsize;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
path << ".png";
else
path << "_CC1X.png";
cv::Mat dst_gold = readImage(path.str());
ASSERT_FALSE(dst_gold.empty());
cv::Mat dst;
cv::gpu::meanShiftSegmentation(loadMat(img), dst, 10, 10, minsize);
cv::Mat dst_rgb;
cv::cvtColor(dst, dst_rgb, cv::COLOR_BGRA2BGR);
EXPECT_MAT_SIMILAR(dst_gold, dst_rgb, 1e-3);
}
INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShiftSegmentation, testing::Combine(
ALL_DEVICES,
testing::Values(MinSize(0), MinSize(4), MinSize(20), MinSize(84), MinSize(340), MinSize(1364))));
#endif // HAVE_CUDA
Loading…
Cancel
Save