Merge pull request #3800 from vrabaud:cuda

Get CUDA code to compile with clang CUDA and without CUDA #3800

Changelist:
- there are some syntactic changes: `<< <` -> `<<<`. For some reason, I do not need to change all those in the code.
-  `::min` -> `std::min` in `__host__` code
- `modules/cudaimgproc/src/moments.cpp` needs to have the CUDA code in the `#ifdef`
- The signature of `cv::cuda::swapChannels` is not exactly the same as the C++ one in `modules/cudaimgproc/src/color.cpp` 
- `cv::cuda::FarnebackOpticalFlow::create` needs to be explicit about which FarnebackOpticalFlow it returns

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
pull/3819/head^2
Vincent Rabaud 6 months ago committed by GitHub
parent 80f1ca2442
commit 843b6ede6a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      modules/cudaarithm/src/arithm.cpp
  2. 10
      modules/cudaarithm/src/cuda/polar_cart.cu
  3. 5
      modules/cudaarithm/src/element_operations.cpp
  4. 4
      modules/cudaarithm/src/reductions.cpp
  5. 4
      modules/cudacodec/src/cuda/nv12_to_rgb.cu
  6. 2
      modules/cudaimgproc/src/color.cpp
  7. 4
      modules/cudaimgproc/src/connectedcomponents.cpp
  8. 4
      modules/cudaimgproc/src/cuda/canny.cu
  9. 10
      modules/cudaimgproc/src/cuda/connectedcomponents.cu
  10. 4
      modules/cudaimgproc/src/cuda/generalized_hough.cu
  11. 2
      modules/cudaimgproc/src/cuda/hough_circles.cu
  12. 2
      modules/cudaimgproc/src/cuda/hough_lines.cu
  13. 2
      modules/cudaimgproc/src/cuda/hough_segments.cu
  14. 6
      modules/cudaimgproc/src/cuda/moments.cu
  15. 1
      modules/cudaimgproc/src/histogram.cpp
  16. 14
      modules/cudaimgproc/src/moments.cpp
  17. 2
      modules/cudaoptflow/src/cuda/nvidiaOpticalFlow.cu
  18. 2
      modules/cudaoptflow/src/farneback.cpp
  19. 2
      modules/cudaoptflow/src/precomp.hpp
  20. 4
      modules/cudev/include/opencv2/cudev/grid/detail/minmaxloc.hpp
  21. 18
      modules/hfs/src/cuda/gslic_seg_engine_gpu.cu
  22. 4
      modules/hfs/src/cuda/magnitude.cu

@ -54,6 +54,8 @@ void cv::cuda::mulAndScaleSpectrums(InputArray, InputArray, OutputArray, int, fl
void cv::cuda::dft(InputArray, OutputArray, Size, int, Stream&) { throw_no_cuda(); }
Ptr<DFT> cv::cuda::createDFT(Size, int) { throw_no_cuda(); return Ptr<DFT>(); }
Ptr<Convolution> cv::cuda::createConvolution(Size) { throw_no_cuda(); return Ptr<Convolution>(); }
#else /* !defined (HAVE_CUDA) */

@ -289,9 +289,9 @@ namespace
const T scale = angleInDegrees ? static_cast<T>(CV_PI / 180.0) : static_cast<T>(1.0);
if (mag.empty())
polarToCartImpl_<T, false> << <grid, block, 0, stream >> >(mag, angle, x, y, scale);
polarToCartImpl_<T, false> <<<grid, block, 0, stream >>>(mag, angle, x, y, scale);
else
polarToCartImpl_<T, true> << <grid, block, 0, stream >> >(mag, angle, x, y, scale);
polarToCartImpl_<T, true> <<<grid, block, 0, stream >>>(mag, angle, x, y, scale);
}
template <typename T>
@ -305,9 +305,9 @@ namespace
const T scale = angleInDegrees ? static_cast<T>(CV_PI / 180.0) : static_cast<T>(1.0);
if (mag.empty())
polarToCartDstInterleavedImpl_<T, false> << <grid, block, 0, stream >> >(mag, angle, xy, scale);
polarToCartDstInterleavedImpl_<T, false> <<<grid, block, 0, stream >>>(mag, angle, xy, scale);
else
polarToCartDstInterleavedImpl_<T, true> << <grid, block, 0, stream >> >(mag, angle, xy, scale);
polarToCartDstInterleavedImpl_<T, true> <<<grid, block, 0, stream >>>(mag, angle, xy, scale);
}
template <typename T>
@ -320,7 +320,7 @@ namespace
const T scale = angleInDegrees ? static_cast<T>(CV_PI / 180.0) : static_cast<T>(1.0);
polarToCartInterleavedImpl_<T> << <grid, block, 0, stream >> >(magAngle, xy, scale);
polarToCartInterleavedImpl_<T> <<<grid, block, 0, stream >>>(magAngle, xy, scale);
}
}

@ -84,8 +84,13 @@ void cv::cuda::magnitude(InputArray, InputArray, OutputArray, Stream&) { throw_n
void cv::cuda::magnitudeSqr(InputArray, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::magnitudeSqr(InputArray, InputArray, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::phase(InputArray, InputArray, OutputArray, bool, Stream&) { throw_no_cuda(); }
void cv::cuda::phase(InputArray, OutputArray, bool, Stream&) { throw_no_cuda(); }
void cv::cuda::cartToPolar(InputArray, InputArray, OutputArray, OutputArray, bool, Stream&) { throw_no_cuda(); }
void cv::cuda::cartToPolar(InputArray, OutputArray, OutputArray, bool, Stream&) { throw_no_cuda(); }
void cv::cuda::cartToPolar(InputArray, OutputArray, bool, Stream&) { throw_no_cuda(); }
void cv::cuda::polarToCart(InputArray, InputArray, OutputArray, OutputArray, bool, Stream&) { throw_no_cuda(); }
void cv::cuda::polarToCart(InputArray, InputArray, OutputArray, bool, Stream&) { throw_no_cuda(); }
void cv::cuda::polarToCart(InputArray, OutputArray, bool, Stream&) { throw_no_cuda(); }
#else

@ -69,8 +69,10 @@ void cv::cuda::countNonZero(InputArray, OutputArray, Stream&) { throw_no_cuda();
void cv::cuda::reduce(InputArray, OutputArray, int, int, int, Stream&) { throw_no_cuda(); }
void cv::cuda::meanStdDev(InputArray, Scalar&, Scalar&) { throw_no_cuda(); }
void cv::cuda::meanStdDev(InputArray, OutputArray, InputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::meanStdDev(InputArray, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::meanStdDev(InputArray, Scalar&, Scalar&, InputArray) { throw_no_cuda(); }
void cv::cuda::meanStdDev(InputArray, Scalar&, Scalar&) { throw_no_cuda(); }
void cv::cuda::rectStdDev(InputArray, InputArray, OutputArray, Rect, Stream&) { throw_no_cuda(); }

@ -179,9 +179,9 @@ void nv12ToBgra(const GpuMat& decodedFrame, GpuMat& outFrame, int width, int hei
dim3 block(32, 8);
dim3 grid(divUp(width, 2 * block.x), divUp(height, block.y));
if (videoFullRangeFlag)
NV12_to_BGRA<true> << <grid, block, 0, stream >> > (decodedFrame.ptr<uchar>(), decodedFrame.step, outFrame.ptr<uint>(), outFrame.step, width, height);
NV12_to_BGRA<true> <<<grid, block, 0, stream >>> (decodedFrame.ptr<uchar>(), decodedFrame.step, outFrame.ptr<uint>(), outFrame.step, width, height);
else
NV12_to_BGRA<false> << <grid, block, 0, stream >> > (decodedFrame.ptr<uchar>(), decodedFrame.step, outFrame.ptr<uint>(), outFrame.step, width, height);
NV12_to_BGRA<false> <<<grid, block, 0, stream >>> (decodedFrame.ptr<uchar>(), decodedFrame.step, outFrame.ptr<uint>(), outFrame.step, width, height);
CV_CUDEV_SAFE_CALL(cudaGetLastError());
if (stream == 0)
CV_CUDEV_SAFE_CALL(cudaDeviceSynchronize());

@ -51,7 +51,7 @@ void cv::cuda::cvtColor(InputArray, OutputArray, int, int, Stream&) { throw_no_c
void cv::cuda::demosaicing(InputArray, OutputArray, int, int, Stream&) { throw_no_cuda(); }
void cv::cuda::swapChannels(InputOutputArray, const int[], Stream&) { throw_no_cuda(); }
void cv::cuda::swapChannels(InputOutputArray, const int[4], Stream&) { throw_no_cuda(); }
void cv::cuda::gammaCorrection(InputArray, OutputArray, bool, Stream&) { throw_no_cuda(); }

@ -9,8 +9,8 @@ using namespace cv::cuda;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
void cv::cuda::connectedComponents(InputArray img_, OutputArray labels_, int connectivity,
int ltype, ConnectedComponentsAlgorithmsTypes ccltype) { throw_no_cuda(); }
void cv::cuda::connectedComponents(InputArray, OutputArray, int, int, ConnectedComponentsAlgorithmsTypes) { throw_no_cuda(); }
void cv::cuda::connectedComponents(InputArray, OutputArray, int, int) { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */

@ -428,7 +428,7 @@ namespace canny
cudaSafeCall( cudaMemsetAsync(d_counter, 0, sizeof(int), stream) );
const dim3 block(128);
const dim3 grid(::min(count, 65535u), divUp(count, 65535), 1);
const dim3 grid(std::min(count, 65535), divUp(count, 65535), 1);
edgesHysteresisGlobalKernel<<<grid, block, 0, stream>>>(map, st1, st2, d_counter, count);
cudaSafeCall( cudaGetLastError() );
@ -439,7 +439,7 @@ namespace canny
cudaSafeCall( cudaMemcpyAsync(&count, d_counter, sizeof(int), cudaMemcpyDeviceToHost, stream) );
cudaSafeCall( cudaStreamSynchronize(stream) );
count = min(count, map.cols * map.rows);
count = std::min(count, map.cols * map.rows);
//std::swap(st1, st2);
short2* tmp = st1;

@ -317,19 +317,19 @@ void BlockBasedKomuraEquivalence(const cv::cuda::GpuMat& img, cv::cuda::GpuMat&
grid_size = dim3((((img.cols + 1) / 2) - 1) / kblock_cols + 1, (((img.rows + 1) / 2) - 1) / kblock_rows + 1, 1);
block_size = dim3(kblock_cols, kblock_rows, 1);
InitLabeling << <grid_size, block_size >> > (img, labels, last_pixel);
InitLabeling <<<grid_size, block_size >>> (img, labels, last_pixel);
cudaSafeCall(cudaGetLastError());
Compression << <grid_size, block_size >> > (labels);
Compression <<<grid_size, block_size >>> (labels);
cudaSafeCall(cudaGetLastError());
Merge << <grid_size, block_size >> > (labels, last_pixel);
Merge <<<grid_size, block_size >>> (labels, last_pixel);
cudaSafeCall(cudaGetLastError());
Compression << <grid_size, block_size >> > (labels);
Compression <<<grid_size, block_size >>> (labels);
cudaSafeCall(cudaGetLastError());
FinalLabeling << <grid_size, block_size >> > (img, labels);
FinalLabeling <<<grid_size, block_size >>> (img, labels);
cudaSafeCall(cudaGetLastError());
if (last_pixel_allocated) {

@ -302,7 +302,7 @@ namespace cv { namespace cuda { namespace device
int totalCount;
cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
totalCount = std::min(totalCount, maxSize);
return totalCount;
}
@ -812,7 +812,7 @@ namespace cv { namespace cuda { namespace device
int totalCount;
cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
totalCount = std::min(totalCount, maxSize);
return totalCount;
}

@ -238,7 +238,7 @@ namespace cv { namespace cuda { namespace device
cudaSafeCall( cudaMemcpyAsync(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost, stream) );
cudaSafeCall( cudaStreamSynchronize(stream) );
totalCount = ::min(totalCount, maxCircles);
totalCount = std::min(totalCount, maxCircles);
return totalCount;
}

@ -189,7 +189,7 @@ namespace cv { namespace cuda { namespace device
cudaSafeCall( cudaStreamSynchronize(stream) );
totalCount = ::min(totalCount, maxSize);
totalCount = std::min(totalCount, maxSize);
if (doSort && totalCount > 0)
{

@ -241,7 +241,7 @@ namespace cv { namespace cuda { namespace device
cudaSafeCall( cudaStreamSynchronize(stream) );
totalCount = ::min(totalCount, maxSize);
totalCount = std::min(totalCount, maxSize);
return totalCount;
}
}

@ -139,7 +139,7 @@ template <typename TSrc, typename TMoments, int nMoments> struct momentsDispatch
static void call(const PtrStepSz<TSrc> src, PtrStepSz<TMoments> moments, const bool binary, const int offsetX, const cudaStream_t stream) {
dim3 blockSize(blockSizeX, blockSizeY);
dim3 gridSize = dim3(divUp(src.rows, blockSizeY));
spatialMoments<TSrc, TMoments, false, false, nMoments> << <gridSize, blockSize, 0, stream >> > (src, binary, moments.ptr());
spatialMoments<TSrc, TMoments, false, false, nMoments> <<<gridSize, blockSize, 0, stream >>> (src, binary, moments.ptr());
if (stream == 0)
cudaSafeCall(cudaStreamSynchronize(stream));
};
@ -150,9 +150,9 @@ template <typename TSrc, int nMoments> struct momentsDispatcherChar {
dim3 blockSize(blockSizeX, blockSizeY);
dim3 gridSize = dim3(divUp(src.rows, blockSizeY));
if (offsetX)
spatialMoments<TSrc, float, true, false, nMoments> << <gridSize, blockSize, 0, stream >> > (src, binary, moments.ptr(), offsetX);
spatialMoments<TSrc, float, true, false, nMoments> <<<gridSize, blockSize, 0, stream >>> (src, binary, moments.ptr(), offsetX);
else
spatialMoments<TSrc, float, true, true, nMoments> << <gridSize, blockSize, 0, stream >> > (src, binary, moments.ptr());
spatialMoments<TSrc, float, true, true, nMoments> <<<gridSize, blockSize, 0, stream >>> (src, binary, moments.ptr());
if (stream == 0)
cudaSafeCall(cudaStreamSynchronize(stream));

@ -48,6 +48,7 @@ using namespace cv::cuda;
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
void cv::cuda::calcHist(InputArray, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::calcHist(InputArray, InputArray, OutputArray, Stream&) { throw_no_cuda(); }
void cv::cuda::equalizeHist(InputArray, OutputArray, Stream&) { throw_no_cuda(); }

@ -3,15 +3,10 @@
// of this distribution and at http://opencv.org/license.html.
#include "precomp.hpp"
#include "cuda/moments.cuh"
using namespace cv;
using namespace cv::cuda;
int cv::cuda::numMoments(const MomentsOrder order) {
return order == MomentsOrder::FIRST_ORDER_MOMENTS ? device::imgproc::n1 : order == MomentsOrder::SECOND_ORDER_MOMENTS ? device::imgproc::n12 : device::imgproc::n123;
}
template<typename T>
cv::Moments convertSpatialMomentsT(Mat spatialMoments, const MomentsOrder order) {
switch (order) {
@ -32,10 +27,17 @@ cv::Moments cv::cuda::convertSpatialMoments(Mat spatialMoments, const MomentsOrd
}
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
int cv::cuda::numMoments(MomentsOrder) { throw_no_cuda(); return 0; }
Moments cv::cuda::moments(InputArray src, const bool binary, const MomentsOrder order, const int momentsType) { throw_no_cuda(); }
void spatialMoments(InputArray src, OutputArray moments, const bool binary, const MomentsOrder order, const int momentsType, Stream& stream) { throw_no_cuda(); }
void cv::cuda::spatialMoments(InputArray src, OutputArray moments, const bool binary, const MomentsOrder order, const int momentsType, Stream& stream) { throw_no_cuda(); }
#else /* !defined (HAVE_CUDA) */
#include "cuda/moments.cuh"
int cv::cuda::numMoments(const MomentsOrder order) {
return order == MomentsOrder::FIRST_ORDER_MOMENTS ? device::imgproc::n1 : order == MomentsOrder::SECOND_ORDER_MOMENTS ? device::imgproc::n12 : device::imgproc::n123;
}
namespace cv { namespace cuda { namespace device { namespace imgproc {
template <typename TSrc, typename TMoments>
void moments(const PtrStepSzb src, PtrStepSzb moments, const bool binary, const int order, const int offsetX, const cudaStream_t stream);

@ -90,7 +90,7 @@ void FlowUpsample(void* srcDevPtr, uint32_t nSrcWidth, uint32_t nSrcPitch, uint3
dim3 blockDim(BLOCKDIM_X, BLOCKDIM_Y);
dim3 gridDim((nDstWidth + blockDim.x - 1) / blockDim.x, (nDstHeight + blockDim.y - 1) / blockDim.y);
NearestNeighborFlowKernel << <gridDim, blockDim >> > (0, srcDevPtr, nSrcWidth, nSrcPitch, nSrcHeight,
NearestNeighborFlowKernel <<<gridDim, blockDim >>> (0, srcDevPtr, nSrcWidth, nSrcPitch, nSrcHeight,
0, dstDevPtr, nDstWidth, nDstPitch, nDstHeight,
nScaleFactor);

@ -47,7 +47,7 @@ using namespace cv::cuda;
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
Ptr<FarnebackOpticalFlow> cv::cuda::FarnebackOpticalFlow::create(int, double, bool, int, int, int, double, int) { throw_no_cuda(); return Ptr<FarnebackOpticalFlow>(); }
Ptr<cv::cuda::FarnebackOpticalFlow> cv::cuda::FarnebackOpticalFlow::create(int, double, bool, int, int, int, double, int) { throw_no_cuda(); return Ptr<FarnebackOpticalFlow>(); }
#else

@ -52,7 +52,9 @@
#include "opencv2/video.hpp"
#include "opencv2/core/private.cuda.hpp"
#if defined HAVE_CUDA
#include "opencv2/core/cuda/vec_traits.hpp"
#endif
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_CUDALEGACY

@ -148,8 +148,8 @@ namespace grid_minmaxloc_detail
block = dim3(Policy::block_size_x, Policy::block_size_y);
grid = dim3(divUp(cols, block.x * Policy::patch_size_x), divUp(rows, block.y * Policy::patch_size_y));
grid.x = ::min(grid.x, block.x);
grid.y = ::min(grid.y, block.y);
grid.x = std::min(grid.x, block.x);
grid.y = std::min(grid.y, block.y);
}
template <class Policy, class SrcPtr, typename ResType, class MaskPtr>

@ -75,7 +75,7 @@ void SegEngineGPU::cvtImgSpace(Ptr<UChar4Image> inimg, Ptr<Float4Image> outimg)
dim3 blockSize(HFS_BLOCK_DIM, HFS_BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
cvtImgSpaceDevice << <gridSize, blockSize >> >(inimg_ptr, img_size, outimg_ptr);
cvtImgSpaceDevice <<<gridSize, blockSize >>>(inimg_ptr, img_size, outimg_ptr);
}
void SegEngineGPU::initClusterCenters()
@ -85,7 +85,7 @@ void SegEngineGPU::initClusterCenters()
dim3 blockSize(HFS_BLOCK_DIM, HFS_BLOCK_DIM);
dim3 gridSize = getGridSize(map_size, blockSize);
initClusterCentersDevice << <gridSize, blockSize >> >
initClusterCentersDevice <<<gridSize, blockSize >>>
(img_ptr, map_size, img_size, spixel_size, spixel_list);
}
@ -98,7 +98,7 @@ void SegEngineGPU::findCenterAssociation()
dim3 blockSize(HFS_BLOCK_DIM, HFS_BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
findCenterAssociationDevice << <gridSize, blockSize >> >
findCenterAssociationDevice <<<gridSize, blockSize >>>
(img_ptr, spixel_list, map_size, img_size,
spixel_size, slic_settings.coh_weight,
max_xy_dist, max_color_dist, idx_ptr);
@ -116,13 +116,13 @@ void SegEngineGPU::updateClusterCenter()
dim3 blockSize(HFS_BLOCK_DIM, HFS_BLOCK_DIM);
dim3 gridSize(map_size.x, map_size.y, no_grid_per_center);
updateClusterCenterDevice << <gridSize, blockSize >> >
updateClusterCenterDevice <<<gridSize, blockSize >>>
(img_ptr, idx_ptr, map_size, img_size,
spixel_size, no_blocks_per_line, accum_map_ptr);
dim3 gridSize2(map_size.x, map_size.y);
finalizeReductionResultDevice << <gridSize2, blockSize >> >
finalizeReductionResultDevice <<<gridSize2, blockSize >>>
(accum_map_ptr, map_size, no_grid_per_center, spixel_list_ptr);
}
@ -134,13 +134,13 @@ void SegEngineGPU::enforceConnectivity()
dim3 blockSize(HFS_BLOCK_DIM, HFS_BLOCK_DIM);
dim3 gridSize = getGridSize(img_size, blockSize);
enforceConnectivityDevice << <gridSize, blockSize >> >
enforceConnectivityDevice <<<gridSize, blockSize >>>
(idx_ptr, img_size, tmp_idx_ptr);
enforceConnectivityDevice << <gridSize, blockSize >> >
enforceConnectivityDevice <<<gridSize, blockSize >>>
(tmp_idx_ptr, img_size, idx_ptr);
enforceConnectivityDevice1_2 << <gridSize, blockSize >> >
enforceConnectivityDevice1_2 <<<gridSize, blockSize >>>
(idx_ptr, img_size, tmp_idx_ptr);
enforceConnectivityDevice1_2 << <gridSize, blockSize >> >
enforceConnectivityDevice1_2 <<<gridSize, blockSize >>>
(tmp_idx_ptr, img_size, idx_ptr);
}

@ -194,7 +194,7 @@ void Magnitude::derrivativeXYGpu()
dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x),
(int)ceil((float)img_size.y / (float)blockSize.y));
derrivativeXYDevice << <gridSize, blockSize >> >
derrivativeXYDevice <<<gridSize, blockSize >>>
(gray_ptr, dx_ptr, dy_ptr, mag_ptr, img_size);
}
@ -209,7 +209,7 @@ void Magnitude::nonMaxSuppGpu()
dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x),
(int)ceil((float)img_size.y / (float)blockSize.y));
nonMaxSuppDevice << <gridSize, blockSize >> >
nonMaxSuppDevice <<<gridSize, blockSize >>>
(nms_ptr, dx_ptr, dy_ptr, mag_ptr, img_size);
}

Loading…
Cancel
Save