mirror of https://github.com/opencv/opencv.git
Open Source Computer Vision Library
https://opencv.org/
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
154 lines
4.5 KiB
154 lines
4.5 KiB
\section{Initalization and Information} |
|
|
|
|
|
\cvCppFunc{gpu::getCudaEnabledDeviceCount} |
|
Returns number of CUDA-enabled devices installed. It is to be used before any other GPU functions calls. If OpenCV is compiled without GPU support this function returns 0. |
|
|
|
\cvdefCpp{int getCudaEnabledDeviceCount();} |
|
|
|
|
|
\cvCppFunc{gpu::setDevice} |
|
Sets device and initializes it for the current thread. Call of this function can be omitted, but in this case a default device will be initialized on fist GPU usage. |
|
|
|
\cvdefCpp{void setDevice(int device);} |
|
\begin{description} |
|
\cvarg{device}{index of GPU device in system starting with 0.} |
|
\end{description} |
|
|
|
|
|
\cvCppFunc{gpu::getDevice} |
|
Returns the current device index, which was set by {gpu::getDevice} or initialized by default. |
|
|
|
\cvdefCpp{int getDevice();} |
|
|
|
|
|
\cvclass{gpu::GpuFeature}\label{cpp.gpu.GpuFeature} |
|
GPU compute features. |
|
|
|
\begin{lstlisting} |
|
enum GpuFeature |
|
{ |
|
COMPUTE_10, COMPUTE_11, |
|
COMPUTE_12, COMPUTE_13, |
|
COMPUTE_20, COMPUTE_21, |
|
ATOMICS, NATIVE_DOUBLE |
|
}; |
|
\end{lstlisting} |
|
|
|
|
|
\cvclass{gpu::DeviceInfo} |
|
This class provides functionality for querying the specified GPU properties. |
|
|
|
\begin{lstlisting} |
|
class CV_EXPORTS DeviceInfo |
|
{ |
|
public: |
|
DeviceInfo(); |
|
DeviceInfo(int device_id); |
|
|
|
string name() const; |
|
|
|
int majorVersion() const; |
|
int minorVersion() const; |
|
|
|
int multiProcessorCount() const; |
|
|
|
size_t freeMemory() const; |
|
size_t totalMemory() const; |
|
|
|
bool has(GpuFeature feature) const; |
|
bool isCompatible() const; |
|
}; |
|
\end{lstlisting} |
|
|
|
|
|
\cvCppFunc{gpu::DeviceInfo::DeviceInfo} |
|
Constructs DeviceInfo object for the specified device. If \texttt{device\_id} parameter is missed it constructs object for the current device. |
|
|
|
\cvdefCpp{DeviceInfo::DeviceInfo();\newline |
|
DeviceInfo::DeviceInfo(int device\_id);} |
|
\begin{description} |
|
\cvarg{device\_id}{Index of the GPU device in system starting with 0.} |
|
\end{description} |
|
|
|
|
|
\cvCppFunc{gpu::DeviceInfo::name} |
|
Returns the device name. |
|
|
|
\cvdefCpp{string DeviceInfo::name();} |
|
|
|
|
|
\cvCppFunc{gpu::DeviceInfo::majorVersion} |
|
Returns the major compute capability version. |
|
|
|
\cvdefCpp{int DeviceInfo::majorVersion();} |
|
|
|
|
|
\cvCppFunc{gpu::DeviceInfo::minorVersion} |
|
Returns the minor compute capability version. |
|
|
|
\cvdefCpp{int DeviceInfo::minorVersion();} |
|
|
|
|
|
\cvCppFunc{gpu::DeviceInfo::multiProcessorCount} |
|
Returns the number of streaming multiprocessors. |
|
|
|
\cvdefCpp{int DeviceInfo::multiProcessorCount();} |
|
|
|
|
|
\cvCppFunc{gpu::DeviceInfo::freeMemory} |
|
Returns the amount of free memory in bytes. |
|
|
|
\cvdefCpp{size\_t DeviceInfo::freeMemory();} |
|
|
|
|
|
\cvCppFunc{gpu::DeviceInfo::totalMemory} |
|
Returns the amount of total memory in bytes. |
|
|
|
\cvdefCpp{size\_t DeviceInfo::totalMemory();} |
|
|
|
|
|
\cvCppFunc{gpu::DeviceInfo::has} |
|
Returns true if the device has the given GPU feature, otherwise false. |
|
|
|
\cvdefCpp{bool DeviceInfo::has(GpuFeature feature);} |
|
\begin{description} |
|
\cvarg{feature}{Feature to be checked. See \hyperref[cpp.gpu.GpuFeature]{cv::gpu::GpuFeature}.} |
|
\end{description} |
|
|
|
|
|
\cvCppFunc{gpu::DeviceInfo::isCompatible} |
|
Returns true if the GPU module can be run on the specified device, otherwise false. |
|
|
|
\cvdefCpp{bool DeviceInfo::isCompatible();} |
|
|
|
|
|
\cvclass{gpu::TargetArchs} |
|
This class provides functionality (as set of static methods) for checking which NVIDIA card architectures the GPU module was built for. |
|
|
|
\bigskip |
|
|
|
The following method checks whether the module was built with the support of the given feature: |
|
\cvdefCpp{static bool builtWith(GpuFeature feature);} |
|
\begin{description} |
|
\cvarg{feature}{Feature to be checked. See \hyperref[cpp.gpu.GpuFeature]{cv::gpu::GpuFeature}.} |
|
\end{description} |
|
|
|
There are a set of methods for checking whether the module contains intermediate (PTX) or binary GPU code for the given architecture(s): |
|
\cvdefCpp{ |
|
static bool has(int major, int minor);\newline |
|
static bool hasPtx(int major, int minor);\newline |
|
static bool hasBin(int major, int minor);\newline |
|
static bool hasEqualOrLessPtx(int major, int minor);\newline |
|
static bool hasEqualOrGreater(int major, int minor);\newline |
|
static bool hasEqualOrGreaterPtx(int major, int minor);\newline |
|
static bool hasEqualOrGreaterBin(int major, int minor);} |
|
\begin{description} |
|
\cvarg{major}{Major compute capability version.} |
|
\cvarg{minor}{Minor compute capability version.} |
|
\end{description} |
|
|
|
% By default GPU module is no compiled for devices with compute capability equal to 1.0. So if you run |
|
|
|
According to the CUDA C Programming Guide Version 3.2: "PTX code produced for some specific compute capability can always be compiled to binary code of greater or equal compute capability". |
|
|
|
|