added support to build without cuda.

pull/13383/head
Anatoly Baksheev 15 years ago
parent e1bd5aeadd
commit 2c84a66ec7
  1. 16
      CMakeLists.txt
  2. 3
      cvconfig.h.cmake
  3. 4
      modules/core/include/opencv2/core/types_c.h
  4. 30
      modules/gpu/CMakeLists.txt
  5. 11
      modules/gpu/include/opencv2/gpu/gpu.hpp
  6. 22
      modules/gpu/src/cudastream.cpp
  7. 12
      modules/gpu/src/initialization.cpp
  8. 34
      modules/gpu/src/precomp.hpp
  9. 3
      modules/gpu/src/stereobm_gpu.cpp

@ -282,6 +282,7 @@ endif()
set(WITH_TBB OFF CACHE BOOL "Include TBB support")
set(WITH_EIGEN2 ON CACHE BOOL "Include Eigen2 support")
set(WITH_CUDA OFF CACHE BOOL "Include NVidia Cuda Runtime support")
# ===================================================
# Macros that checks if module have been installed.
@ -608,6 +609,15 @@ if (WITH_TBB)
endif()
endif()
############################### TBB ################################
if (WITH_CUDA)
find_package(CUDA)
if (CUDA_FOUND)
message(STATUS "CUDA detected.")
set(HAVE_CUDA 1)
endif()
endif()
############################## Eigen2 ##############################
@ -1249,6 +1259,12 @@ else()
message(STATUS " Use TBB: NO")
endif()
if (HAVE_CUDA)
message(STATUS " Uue Cuda: YES")
else()
message(STATUS " Use Cuda: No")
endif()
if(HAVE_EIGEN2)
message(STATUS " Use Eigen2: YES")
else()

@ -165,3 +165,6 @@
/* Qt bindings use OpenGL */
#cmakedefine HAVE_HAVE_QT_OPENGL
/* NVidia Cuda Runtime API*/
#cmakedefine HAVE_CUDA

@ -229,7 +229,9 @@ enum {
CV_StsParseError= -212, /* invalid syntax/structure of the parsed file */
CV_StsNotImplemented= -213, /* the requested function/feature is not implemented */
CV_StsBadMemBlock= -214, /* an allocated block has been corrupted */
CV_StsAssert= -215 /* assertion failed */
CV_StsAssert= -215, /* assertion failed */
CV_GpuNotFound= -216,
CV_GpuApiCallError= -217
};
/****************************************************************************************\

@ -1,23 +1,12 @@
include(FindCUDA)
if (CUDA_FOUND)
include_directories(${CUDA_INCLUDE_DIRS})
link_directories(${CUDA_LIBRARIES})
#message ("CUDA_LIBRARIES = ${CUDA_LIBRARIES}")
#message ("CUDA_INCLUDE_DIRS = ${CUDA_INCLUDE_DIRS}")
#message ("CUDA_TARGET_LINK = ${CUDA_TARGET_LINK}")
set(name "gpu")
set(DEPS "opencv_core")
#CUDA_GENERATED_OUTPUT_DIR (Default CMAKE_CURRENT_BINARY_DIR)
set(the_target "opencv_${name}")
#====================================================================================
project(${the_target})
set(name "gpu")
set(DEPS "opencv_core")
project(opencv_${name})
add_definitions(-DCVAPI_EXPORTS)
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include"
@ -42,16 +31,19 @@ if (CUDA_FOUND)
file(GLOB lib_hdrs "include/opencv2/${name}/*.h*")
source_group("Include" FILES ${lib_hdrs})
if (HAVE_CUDA)
include_directories(${CUDA_INCLUDE_DIRS})
link_directories(${CUDA_LIBRARIES})
if (UNIX OR APPLE)
set (CUDA_NVCC_FLAGS "-Xcompiler;-fPIC")
endif()
CUDA_COMPILE(cuda_objs ${lib_cuda})
#message ("lib cuda : ${cuda_objs}")
#CUDA_BUILD_CLEAN_TARGET()
endif()
set(the_target "opencv_${name}")
#message ("cuda_add_library : ${the_target} ${lib_srcs} ${lib_hdrs} ${lib_int_hdrs} ${lib_cuda} ${lib_cuda_hdrs}")
add_library(${the_target} ${lib_srcs} ${lib_hdrs} ${lib_int_hdrs} ${lib_cuda} ${lib_cuda_hdrs} ${cuda_objs})
if(PCHSupport_FOUND)
@ -82,7 +74,6 @@ if (CUDA_FOUND)
)
# Add the required libraries for linking:
#message (" ++++ target_link_libraries = ${the_target} ${OPENCV_LINKER_LIBS} ${IPP_LIBS} ${DEPS} ${CUDA_LIBRARIES}")
target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${IPP_LIBS} ${DEPS} ${CUDA_LIBRARIES})
if(MSVC)
@ -108,5 +99,4 @@ if (CUDA_FOUND)
DESTINATION include/opencv2/${name}
COMPONENT main)
endif()

@ -216,6 +216,9 @@ namespace cv
class CudaStream
{
public:
static CudaStream empty();
CudaStream();
~CudaStream();
@ -236,9 +239,13 @@ namespace cv
// converts matrix type, ex from float to uchar depending on type
void enqueueConvert(const GpuMat& src, GpuMat& dst, int type);
//CUstream_st& getStream();
struct Impl;
const Impl& getImpl() const;
private:
void *impl;
Impl *impl;
CudaStream(const CudaStream&);
CudaStream& operator=(const CudaStream&);

@ -41,33 +41,37 @@
//M*/
#include "precomp.hpp"
#include "opencv2/gpu/stream_access.hpp"
using namespace cv;
using namespace cv::gpu;
cv::gpu::CudaStream::CudaStream() : impl( fastMalloc(sizeof(cudaStream_t)) )
cv::gpu::CudaStream::CudaStream() : impl( (Impl*)fastMalloc(sizeof(Impl)) )
{
cudaSafeCall( cudaStreamCreate((cudaStream_t*)impl) );
//cudaSafeCall( cudaStreamCreate( &impl->stream) );
}
cv::gpu::CudaStream::~CudaStream()
{
if (impl)
{
cudaSafeCall( cudaStreamDestroy( *(cudaStream_t*)impl ) );
cv::fastFree( impl );
}
}
bool cv::gpu::CudaStream::queryIfComplete()
{
cudaError_t err = cudaStreamQuery( *(cudaStream_t*)impl );
//cudaError_t err = cudaStreamQuery( *(cudaStream_t*)impl );
if (err == cudaSuccess)
return true;
//if (err == cudaSuccess)
// return true;
if (err == cudaErrorNotReady)
return false;
//if (err == cudaErrorNotReady)
// return false;
//cudaErrorInvalidResourceHandle
cudaSafeCall( err );
////cudaErrorInvalidResourceHandle
//cudaSafeCall( err );
return true;
}
void cv::gpu::CudaStream::waitForCompletion()

@ -45,6 +45,16 @@
using namespace cv;
using namespace cv::gpu;
#ifndef HAVE_CUDA
CV_EXPORTS int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
CV_EXPORTS string cv::gpu::getDeviceName(int /*device*/) { cudaSafeCall(0); return 0; }
CV_EXPORTS void cv::gpu::setDevice(int /*device*/) { cudaSafeCall(0); }
CV_EXPORTS void cv::gpu::getComputeCapability(int /*device*/, int* /*major*/, int* /*minor*/) { cudaSafeCall(0); }
CV_EXPORTS int cv::gpu::getNumberOfSMs(int /*device*/) { cudaSafeCall(0); return 0; }
#else
CV_EXPORTS int cv::gpu::getCudaEnabledDeviceCount()
{
int count;
@ -79,3 +89,5 @@ CV_EXPORTS int cv::gpu::getNumberOfSMs(int device)
cudaSafeCall( cudaGetDeviceProperties( &prop, device ) );
return prop.multiProcessorCount;
}
#endif

@ -57,41 +57,39 @@
#include "cuda_shared.hpp"
#ifndef HAVE_CUDA
#define cudaSafeCall(err) CV_Error(CV_GpuNotFound, "The library is compilled with no GPU support")
#define cudaCallerSafeCall(err) CV_Error(CV_GpuNotFound, "The library is compilled with no GPU support")
#else /* HAVE_CUDA */
#if _MSC_VER >= 1200
#pragma warning (disable : 4100 4211 4201 4408)
#endif
#include "cuda_runtime_api.h"
#define cudaCallerSafeCall(err) err;
#ifdef __GNUC__
#define cudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, __func__)
#else
#define cudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__)
//inline void __cudaSafeCall( cudaError err, const char *file, const int line )
//{
// if( cudaSuccess != err)
// CV_Error_(CV_StsAssert, ("%s(%i) : Runtime API error : %s.\n", cudaGetErrorString(err)));
//}
#endif
namespace cv
{
namespace gpu
{
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
static inline void __cudaSafeCall( cudaError err, const char *file, const int line, const char *func = "")
{
if( cudaSuccess != err)
{
std::cerr << file << "(" << line << ") : cudaSafeCall() Runtime API error : " << cudaGetErrorString(err) << "\n";
exit(-1);
}
}
template<class T>
inline DevMem2D_<T> getDevMem(const GpuMat& mat)
{
return DevMem2D_<T>(mat.rows, mat.cols, mat.data, mat.step);
cv::error( cv::Exception(CV_GpuApiCallError, cudaGetErrorString(err), func, file, line) );
}
}
}
#endif /* HAVE_CUDA */
#endif

@ -41,7 +41,6 @@
//M*/
#include "precomp.hpp"
#include <limits>
using namespace cv;
using namespace cv::gpu;
@ -69,5 +68,5 @@ void StereoBM_GPU::operator() ( const GpuMat& left, const GpuMat& right, GpuMat&
DevMem2D disp = disparity;
DevMem2D_<uint> mssd = minSSD;
impl::stereoBM_GPU(left, right, disp, ndisp, mssd);
cudaCallerSafeCall( impl::stereoBM_GPU(left, right, disp, ndisp, mssd) );
}

Loading…
Cancel
Save