added resetDevice function, removed MultiGpuManager

pull/13383/head
Vladislav Vinogradov 14 years ago
parent b6c195d44c
commit f906c9b259
  1. 32
      modules/gpu/include/opencv2/gpu/gpu.hpp
  2. 35
      modules/gpu/src/initialization.cpp
  3. 151
      modules/gpu/src/multi_gpu_manager.cpp
  4. 8
      samples/gpu/multi.cpp
  5. 20
      samples/gpu/stereo_multi.cpp

@ -64,6 +64,10 @@ namespace cv
CV_EXPORTS void setDevice(int device);
CV_EXPORTS int getDevice();
//! Explicitly destroys and cleans up all resources associated with the current device in the current process.
//! Any subsequent API call to this device will reinitialize the device.
CV_EXPORTS void resetDevice();
enum FeatureSet
{
FEATURE_SET_COMPUTE_10 = 10,
@ -132,34 +136,6 @@ namespace cv
int minorVersion_;
};
/////////////////////////// Multi GPU Manager //////////////////////////////
// Provides functionality for working with many GPUs
class CV_EXPORTS MultiGpuManager
{
public:
MultiGpuManager();
~MultiGpuManager();
// Must be called before any other GPU calls
void init();
// Makes the given GPU active
void gpuOn(int gpu_id);
// Finishes the piece of work on the current GPU
void gpuOff();
static const int BAD_GPU_ID = -1;
private:
void operator=(const MultiGpuManager&);
MultiGpuManager(const MultiGpuManager&);
class Impl;
Ptr<Impl> impl_;
};
//////////////////////////////// Error handling ////////////////////////
CV_EXPORTS void error(const char *error_string, const char *file, const int line, const char *func);

@ -72,7 +72,7 @@ namespace
}
CV_EXPORTS bool cv::gpu::TargetArchs::builtWith(cv::gpu::FeatureSet feature_set)
bool cv::gpu::TargetArchs::builtWith(cv::gpu::FeatureSet feature_set)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_FEATURES, feature_set, std::greater_equal<int>());
@ -83,13 +83,13 @@ CV_EXPORTS bool cv::gpu::TargetArchs::builtWith(cv::gpu::FeatureSet feature_set)
}
CV_EXPORTS bool cv::gpu::TargetArchs::has(int major, int minor)
bool cv::gpu::TargetArchs::has(int major, int minor)
{
return hasPtx(major, minor) || hasBin(major, minor);
}
CV_EXPORTS bool cv::gpu::TargetArchs::hasPtx(int major, int minor)
bool cv::gpu::TargetArchs::hasPtx(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_PTX, major * 10 + minor, std::equal_to<int>());
@ -101,7 +101,7 @@ CV_EXPORTS bool cv::gpu::TargetArchs::hasPtx(int major, int minor)
}
CV_EXPORTS bool cv::gpu::TargetArchs::hasBin(int major, int minor)
bool cv::gpu::TargetArchs::hasBin(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_BIN, major * 10 + minor, std::equal_to<int>());
@ -113,7 +113,7 @@ CV_EXPORTS bool cv::gpu::TargetArchs::hasBin(int major, int minor)
}
CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_PTX, major * 10 + minor,
@ -126,14 +126,14 @@ CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrLessPtx(int major, int minor)
}
CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrGreater(int major, int minor)
bool cv::gpu::TargetArchs::hasEqualOrGreater(int major, int minor)
{
return hasEqualOrGreaterPtx(major, minor) ||
hasEqualOrGreaterBin(major, minor);
}
CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_PTX, major * 10 + minor,
@ -146,7 +146,7 @@ CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrGreaterPtx(int major, int minor)
}
CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
{
#if defined (HAVE_CUDA)
return ::compareToSet(CUDA_ARCH_BIN, major * 10 + minor,
@ -161,9 +161,10 @@ CV_EXPORTS bool cv::gpu::TargetArchs::hasEqualOrGreaterBin(int major, int minor)
#if !defined (HAVE_CUDA)
CV_EXPORTS int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
CV_EXPORTS void cv::gpu::setDevice(int) { throw_nogpu(); }
CV_EXPORTS int cv::gpu::getDevice() { throw_nogpu(); return 0; }
int cv::gpu::getCudaEnabledDeviceCount() { return 0; }
void cv::gpu::setDevice(int) { throw_nogpu(); }
int cv::gpu::getDevice() { throw_nogpu(); return 0; }
void cv::gpu::resetDevice() { throw_nogpu(); }
size_t cv::gpu::DeviceInfo::freeMemory() const { throw_nogpu(); return 0; }
size_t cv::gpu::DeviceInfo::totalMemory() const { throw_nogpu(); return 0; }
bool cv::gpu::DeviceInfo::supports(cv::gpu::FeatureSet) const { throw_nogpu(); return false; }
@ -173,7 +174,7 @@ void cv::gpu::DeviceInfo::queryMemory(size_t&, size_t&) const { throw_nogpu(); }
#else /* !defined (HAVE_CUDA) */
CV_EXPORTS int cv::gpu::getCudaEnabledDeviceCount()
int cv::gpu::getCudaEnabledDeviceCount()
{
int count;
cudaSafeCall( cudaGetDeviceCount( &count ) );
@ -181,13 +182,13 @@ CV_EXPORTS int cv::gpu::getCudaEnabledDeviceCount()
}
CV_EXPORTS void cv::gpu::setDevice(int device)
void cv::gpu::setDevice(int device)
{
cudaSafeCall( cudaSetDevice( device ) );
}
CV_EXPORTS int cv::gpu::getDevice()
int cv::gpu::getDevice()
{
int device;
cudaSafeCall( cudaGetDevice( &device ) );
@ -195,6 +196,12 @@ CV_EXPORTS int cv::gpu::getDevice()
}
void cv::gpu::resetDevice()
{
cudaSafeCall( cudaDeviceReset() );
}
size_t cv::gpu::DeviceInfo::freeMemory() const
{
size_t free_memory, total_memory;

@ -1,151 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#if !defined(HAVE_CUDA)
namespace cv { namespace gpu {
class MultiGpuManager::Impl {};
MultiGpuManager::MultiGpuManager() { throw_nogpu(); }
MultiGpuManager::~MultiGpuManager() {}
void MultiGpuManager::init() { throw_nogpu(); }
void MultiGpuManager::gpuOn(int) { throw_nogpu(); }
void MultiGpuManager::gpuOff() { throw_nogpu(); }
}}
#else
#include <vector>
#include <cuda.h>
#define cuSafeCall(expr) safeCall(expr, #expr, __FILE__, __LINE__)
using namespace std;
namespace cv { namespace gpu {
class MultiGpuManager::Impl
{
public:
Impl();
~Impl()
{
for (int i = 0; i < num_devices_; ++i)
cuSafeCall(cuCtxDestroy(contexts_[i]));
}
void gpuOn(int gpu_id)
{
if (gpu_id < 0 || gpu_id >= num_devices_)
CV_Error(CV_StsBadArg, "MultiGpuManager::gpuOn: GPU ID is out of range");
cuSafeCall(cuCtxPushCurrent(contexts_[gpu_id]));
}
void gpuOff()
{
CUcontext prev_context;
cuSafeCall(cuCtxPopCurrent(&prev_context));
}
private:
void safeCall(CUresult code, const char* expr, const char* file, int line)
{
if (code != CUDA_SUCCESS)
error(expr, file, line, "");
}
int num_devices_;
vector<CUcontext> contexts_;
};
MultiGpuManager::Impl::Impl(): num_devices_(0)
{
num_devices_ = getCudaEnabledDeviceCount();
contexts_.resize(num_devices_);
cuSafeCall(cuInit(0));
CUdevice device;
CUcontext prev_context;
for (int i = 0; i < num_devices_; ++i)
{
cuSafeCall(cuDeviceGet(&device, i));
cuSafeCall(cuCtxCreate(&contexts_[i], 0, device));
cuSafeCall(cuCtxPopCurrent(&prev_context));
}
}
MultiGpuManager::MultiGpuManager() {}
MultiGpuManager::~MultiGpuManager() {}
void MultiGpuManager::init()
{
impl_ = Ptr<Impl>(new Impl());
}
void MultiGpuManager::gpuOn(int gpu_id)
{
if (impl_.empty())
CV_Error(CV_StsNullPtr, "MultiGpuManager::gpuOn: must be initialized before any calls");
impl_->gpuOn(gpu_id);
}
void MultiGpuManager::gpuOff()
{
if (impl_.empty())
CV_Error(CV_StsNullPtr, "MultiGpuManager::gpuOff: must be initialized before any calls");
impl_->gpuOff();
}
}}
#endif

@ -36,8 +36,6 @@ using namespace cv::gpu;
struct Worker { void operator()(int device_id) const; };
MultiGpuManager multi_gpu_mgr;
int main()
{
int num_devices = getCudaEnabledDeviceCount();
@ -58,8 +56,6 @@ int main()
}
}
multi_gpu_mgr.init();
// Execute calculation in two threads using two GPUs
int devices[] = {0, 1};
parallel_do(devices, devices + 2, Worker());
@ -70,7 +66,7 @@ int main()
void Worker::operator()(int device_id) const
{
multi_gpu_mgr.gpuOn(device_id);
setDevice(device_id);
Mat src(1000, 1000, CV_32F);
Mat dst;
@ -95,8 +91,6 @@ void Worker::operator()(int device_id) const
// after context is extracted from the stack
d_src.release();
d_dst.release();
multi_gpu_mgr.gpuOff();
}
#endif

@ -38,8 +38,6 @@ using namespace cv::gpu;
struct Worker { void operator()(int device_id) const; };
MultiGpuManager multi_gpu_mgr;
// GPUs data
GpuMat d_left[2];
GpuMat d_right[2];
@ -89,43 +87,37 @@ int main(int argc, char** argv)
return -1;
}
multi_gpu_mgr.init();
// Split source images for processing on the GPU #0
multi_gpu_mgr.gpuOn(0);
setDevice(0);
d_left[0].upload(left.rowRange(0, left.rows / 2));
d_right[0].upload(right.rowRange(0, right.rows / 2));
bm[0] = new StereoBM_GPU();
multi_gpu_mgr.gpuOff();
// Split source images for processing on the GPU #1
multi_gpu_mgr.gpuOn(1);
setDevice(1);
d_left[1].upload(left.rowRange(left.rows / 2, left.rows));
d_right[1].upload(right.rowRange(right.rows / 2, right.rows));
bm[1] = new StereoBM_GPU();
multi_gpu_mgr.gpuOff();
// Execute calculation in two threads using two GPUs
int devices[] = {0, 1};
parallel_do(devices, devices + 2, Worker());
// Release the first GPU resources
multi_gpu_mgr.gpuOn(0);
setDevice(0);
imshow("GPU #0 result", Mat(d_result[0]));
d_left[0].release();
d_right[0].release();
d_result[0].release();
delete bm[0];
multi_gpu_mgr.gpuOff();
// Release the second GPU resources
multi_gpu_mgr.gpuOn(1);
setDevice(1);
imshow("GPU #1 result", Mat(d_result[1]));
d_left[1].release();
d_right[1].release();
d_result[1].release();
delete bm[1];
multi_gpu_mgr.gpuOff();
waitKey();
return 0;
@ -134,15 +126,13 @@ int main(int argc, char** argv)
void Worker::operator()(int device_id) const
{
multi_gpu_mgr.gpuOn(device_id);
setDevice(device_id);
bm[device_id]->operator()(d_left[device_id], d_right[device_id],
d_result[device_id]);
std::cout << "GPU #" << device_id << " (" << DeviceInfo().name()
<< "): finished\n";
multi_gpu_mgr.gpuOff();
}
#endif

Loading…
Cancel
Save