|
|
|
@ -49,94 +49,210 @@ using namespace cv::gpu::device; |
|
|
|
|
|
|
|
|
|
namespace cv { namespace gpu { namespace bf_radius_match |
|
|
|
|
{ |
|
|
|
|
__device__ __forceinline__ void store(const int* sidx, const float* sdist, const unsigned int scount, int* trainIdx, float* distance, int& sglob_ind, const int tid) |
|
|
|
|
template <typename T> struct SingleTrain |
|
|
|
|
{ |
|
|
|
|
if (tid < scount) |
|
|
|
|
enum {USE_IMG_IDX = 0}; |
|
|
|
|
|
|
|
|
|
explicit SingleTrain(const DevMem2D_<T>& train_) : train(train_) |
|
|
|
|
{ |
|
|
|
|
trainIdx[sglob_ind + tid] = sidx[tid]; |
|
|
|
|
distance[sglob_ind + tid] = sdist[tid]; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (tid == 0) |
|
|
|
|
sglob_ind += scount; |
|
|
|
|
} |
|
|
|
|
static __device__ __forceinline__ void store(const int* s_trainIdx, const int* s_imgIdx, const float* s_dist, unsigned int& s_count, int& s_globInd, |
|
|
|
|
int* trainIdx, int* imgIdx, float* distance, int maxCount) |
|
|
|
|
{ |
|
|
|
|
const int tid = threadIdx.y * blockDim.x + threadIdx.x; |
|
|
|
|
|
|
|
|
|
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, typename VecDiff, typename Dist, typename T, typename Mask> |
|
|
|
|
__global__ void radiusMatch(const PtrStep_<T> query, const DevMem2D_<T> train, const float maxDistance, const Mask mask, |
|
|
|
|
DevMem2Di trainIdx_, PtrStepf distance, unsigned int* nMatches) |
|
|
|
|
{ |
|
|
|
|
#if __CUDA_ARCH__ >= 120 |
|
|
|
|
if (tid < s_count && s_globInd + tid < maxCount) |
|
|
|
|
{ |
|
|
|
|
trainIdx[s_globInd + tid] = s_trainIdx[tid]; |
|
|
|
|
distance[s_globInd + tid] = s_dist[tid]; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
typedef typename Dist::result_type result_type; |
|
|
|
|
typedef typename Dist::value_type value_type; |
|
|
|
|
if (tid == 0) |
|
|
|
|
{ |
|
|
|
|
s_globInd += s_count; |
|
|
|
|
s_count = 0; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__shared__ result_type smem[BLOCK_DIM_X * BLOCK_DIM_Y]; |
|
|
|
|
__shared__ int sidx[BLOCK_STACK]; |
|
|
|
|
__shared__ float sdist[BLOCK_STACK]; |
|
|
|
|
__shared__ unsigned int scount; |
|
|
|
|
__shared__ int sglob_ind; |
|
|
|
|
template <int BLOCK_STACK, typename Dist, typename VecDiff, typename Mask> |
|
|
|
|
__device__ __forceinline__ void loop(float maxDistance, Mask& mask, const VecDiff& vecDiff, |
|
|
|
|
int* s_trainIdx, int* s_imgIdx, float* s_dist, unsigned int& s_count, int& s_globInd, |
|
|
|
|
int* trainIdxRow, int* imgIdxRow, float* distanceRow, int maxCount, |
|
|
|
|
typename Dist::result_type* s_diffRow) const |
|
|
|
|
{ |
|
|
|
|
#if __CUDA_ARCH__ >= 120 |
|
|
|
|
|
|
|
|
|
const int queryIdx = blockIdx.x; |
|
|
|
|
const int tid = threadIdx.y * BLOCK_DIM_X + threadIdx.x; |
|
|
|
|
for (int i = 0; i < train.rows; i += blockDim.y) |
|
|
|
|
{ |
|
|
|
|
int trainIdx = i + threadIdx.y; |
|
|
|
|
|
|
|
|
|
if (tid == 0) |
|
|
|
|
if (trainIdx < train.rows && mask(blockIdx.x, trainIdx)) |
|
|
|
|
{ |
|
|
|
|
Dist dist; |
|
|
|
|
|
|
|
|
|
vecDiff.calc(train.ptr(trainIdx), train.cols, dist, s_diffRow, threadIdx.x); |
|
|
|
|
|
|
|
|
|
const typename Dist::result_type val = dist; |
|
|
|
|
|
|
|
|
|
if (threadIdx.x == 0 && val < maxDistance) |
|
|
|
|
{ |
|
|
|
|
unsigned int ind = atomicInc(&s_count, (unsigned int) -1); |
|
|
|
|
s_trainIdx[ind] = trainIdx; |
|
|
|
|
s_dist[ind] = val; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__syncthreads(); |
|
|
|
|
|
|
|
|
|
if (s_count >= BLOCK_STACK - blockDim.y) |
|
|
|
|
store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount); |
|
|
|
|
|
|
|
|
|
__syncthreads(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount); |
|
|
|
|
|
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ __forceinline__ int descLen() const |
|
|
|
|
{ |
|
|
|
|
scount = 0; |
|
|
|
|
sglob_ind = 0; |
|
|
|
|
return train.cols; |
|
|
|
|
} |
|
|
|
|
__syncthreads(); |
|
|
|
|
|
|
|
|
|
int* trainIdx_row = trainIdx_.ptr(queryIdx); |
|
|
|
|
float* distance_row = distance.ptr(queryIdx); |
|
|
|
|
const DevMem2D_<T> train; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
const VecDiff vecDiff(query.ptr(queryIdx), train.cols, (typename Dist::value_type*)smem, tid, threadIdx.x); |
|
|
|
|
|
|
|
|
|
typename Dist::result_type* sdiffRow = smem + BLOCK_DIM_X * threadIdx.y; |
|
|
|
|
template <typename T> struct TrainCollection |
|
|
|
|
{ |
|
|
|
|
enum {USE_IMG_IDX = 1}; |
|
|
|
|
|
|
|
|
|
TrainCollection(const DevMem2D_<T>* trainCollection_, int nImg_, int desclen_) : |
|
|
|
|
trainCollection(trainCollection_), nImg(nImg_), desclen(desclen_) |
|
|
|
|
{ |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
for (int trainIdx = threadIdx.y; trainIdx < train.rows; trainIdx += BLOCK_DIM_Y) |
|
|
|
|
static __device__ __forceinline__ void store(const int* s_trainIdx, const int* s_imgIdx, const float* s_dist, unsigned int& s_count, int& s_globInd, |
|
|
|
|
int* trainIdx, int* imgIdx, float* distance, int maxCount) |
|
|
|
|
{ |
|
|
|
|
if (mask(queryIdx, trainIdx)) |
|
|
|
|
const int tid = threadIdx.y * blockDim.x + threadIdx.x; |
|
|
|
|
|
|
|
|
|
if (tid < s_count && s_globInd + tid < maxCount) |
|
|
|
|
{ |
|
|
|
|
Dist dist; |
|
|
|
|
trainIdx[s_globInd + tid] = s_trainIdx[tid]; |
|
|
|
|
imgIdx[s_globInd + tid] = s_imgIdx[tid]; |
|
|
|
|
distance[s_globInd + tid] = s_dist[tid]; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (tid == 0) |
|
|
|
|
{ |
|
|
|
|
s_globInd += s_count; |
|
|
|
|
s_count = 0; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
const T* trainRow = train.ptr(trainIdx); |
|
|
|
|
|
|
|
|
|
vecDiff.calc(trainRow, train.cols, dist, sdiffRow, threadIdx.x); |
|
|
|
|
template <int BLOCK_STACK, typename Dist, typename VecDiff, typename Mask> |
|
|
|
|
__device__ void loop(float maxDistance, Mask& mask, const VecDiff& vecDiff, |
|
|
|
|
int* s_trainIdx, int* s_imgIdx, float* s_dist, unsigned int& s_count, int& s_globInd, |
|
|
|
|
int* trainIdxRow, int* imgIdxRow, float* distanceRow, int maxCount, |
|
|
|
|
typename Dist::result_type* s_diffRow) const |
|
|
|
|
{ |
|
|
|
|
#if __CUDA_ARCH__ >= 120 |
|
|
|
|
|
|
|
|
|
for (int imgIdx = 0; imgIdx < nImg; ++imgIdx) |
|
|
|
|
{ |
|
|
|
|
const DevMem2D_<T> train = trainCollection[imgIdx]; |
|
|
|
|
|
|
|
|
|
const typename Dist::result_type val = dist; |
|
|
|
|
mask.next(); |
|
|
|
|
|
|
|
|
|
if (threadIdx.x == 0 && val < maxDistance) |
|
|
|
|
for (int i = 0; i < train.rows; i += blockDim.y) |
|
|
|
|
{ |
|
|
|
|
unsigned int i = atomicInc(&scount, (unsigned int) -1); |
|
|
|
|
sidx[i] = trainIdx; |
|
|
|
|
sdist[i] = val; |
|
|
|
|
int trainIdx = i + threadIdx.y; |
|
|
|
|
|
|
|
|
|
if (trainIdx < train.rows && mask(blockIdx.x, trainIdx)) |
|
|
|
|
{ |
|
|
|
|
Dist dist; |
|
|
|
|
|
|
|
|
|
vecDiff.calc(train.ptr(trainIdx), desclen, dist, s_diffRow, threadIdx.x); |
|
|
|
|
|
|
|
|
|
const typename Dist::result_type val = dist; |
|
|
|
|
|
|
|
|
|
if (threadIdx.x == 0 && val < maxDistance) |
|
|
|
|
{ |
|
|
|
|
unsigned int ind = atomicInc(&s_count, (unsigned int) -1); |
|
|
|
|
s_trainIdx[ind] = trainIdx; |
|
|
|
|
s_imgIdx[ind] = imgIdx; |
|
|
|
|
s_dist[ind] = val; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__syncthreads(); |
|
|
|
|
|
|
|
|
|
if (s_count >= BLOCK_STACK - blockDim.y) |
|
|
|
|
store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount); |
|
|
|
|
|
|
|
|
|
__syncthreads(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
__syncthreads(); |
|
|
|
|
|
|
|
|
|
if (scount > BLOCK_STACK - BLOCK_DIM_Y) |
|
|
|
|
{ |
|
|
|
|
store(sidx, sdist, scount, trainIdx_row, distance_row, sglob_ind, tid); |
|
|
|
|
if (tid == 0) |
|
|
|
|
scount = 0; |
|
|
|
|
} |
|
|
|
|
__syncthreads(); |
|
|
|
|
store(s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, trainIdxRow, imgIdxRow, distanceRow, maxCount); |
|
|
|
|
|
|
|
|
|
#endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
__device__ __forceinline__ int descLen() const |
|
|
|
|
{ |
|
|
|
|
return desclen; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
const DevMem2D_<T>* trainCollection; |
|
|
|
|
const int nImg; |
|
|
|
|
const int desclen; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, typename VecDiff, typename Dist, typename T, typename Train, typename Mask> |
|
|
|
|
__global__ void radiusMatch(const PtrStep_<T> query, const Train train, float maxDistance, const Mask mask, |
|
|
|
|
PtrStepi trainIdx, PtrStepi imgIdx, PtrStepf distance, int* nMatches, int maxCount) |
|
|
|
|
{ |
|
|
|
|
typedef typename Dist::result_type result_type; |
|
|
|
|
typedef typename Dist::value_type value_type; |
|
|
|
|
|
|
|
|
|
__shared__ result_type s_mem[BLOCK_DIM_X * BLOCK_DIM_Y]; |
|
|
|
|
|
|
|
|
|
__shared__ int s_trainIdx[BLOCK_STACK]; |
|
|
|
|
__shared__ int s_imgIdx[Train::USE_IMG_IDX ? BLOCK_STACK : 1]; |
|
|
|
|
__shared__ float s_dist[BLOCK_STACK]; |
|
|
|
|
__shared__ unsigned int s_count; |
|
|
|
|
|
|
|
|
|
__shared__ int s_globInd; |
|
|
|
|
|
|
|
|
|
if (threadIdx.x == 0 && threadIdx.y == 0) |
|
|
|
|
{ |
|
|
|
|
s_count = 0; |
|
|
|
|
s_globInd = 0; |
|
|
|
|
} |
|
|
|
|
__syncthreads(); |
|
|
|
|
|
|
|
|
|
store(sidx, sdist, scount, trainIdx_row, distance_row, sglob_ind, tid); |
|
|
|
|
const VecDiff vecDiff(query.ptr(blockIdx.x), train.descLen(), (typename Dist::value_type*)s_mem, threadIdx.y * BLOCK_DIM_X + threadIdx.x, threadIdx.x); |
|
|
|
|
|
|
|
|
|
if (tid == 0) |
|
|
|
|
nMatches[queryIdx] = sglob_ind; |
|
|
|
|
Mask m = mask; |
|
|
|
|
|
|
|
|
|
#endif |
|
|
|
|
train.template loop<BLOCK_STACK, Dist>(maxDistance, m, vecDiff, |
|
|
|
|
s_trainIdx, s_imgIdx, s_dist, s_count, s_globInd, |
|
|
|
|
trainIdx.ptr(blockIdx.x), imgIdx.ptr(blockIdx.x), distance.ptr(blockIdx.x), maxCount, |
|
|
|
|
s_mem + BLOCK_DIM_X * threadIdx.y); |
|
|
|
|
|
|
|
|
|
if (threadIdx.x == 0 && threadIdx.y == 0) |
|
|
|
|
nMatches[blockIdx.x] = s_globInd; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////////// |
|
|
|
|
// Radius Match kernel caller |
|
|
|
|
|
|
|
|
|
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, typename Dist, typename T, typename Mask> |
|
|
|
|
void radiusMatchSimple_caller(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask, |
|
|
|
|
const DevMem2Di& trainIdx, const DevMem2Df& distance, unsigned int* nMatches, |
|
|
|
|
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, typename Dist, typename T, typename Train, typename Mask> |
|
|
|
|
void radiusMatchSimple_caller(const DevMem2D_<T>& query, const Train& train, float maxDistance, const Mask& mask, |
|
|
|
|
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int* nMatches, |
|
|
|
|
cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
StaticAssert<BLOCK_STACK >= BLOCK_DIM_Y>::check(); |
|
|
|
@ -146,16 +262,16 @@ namespace cv { namespace gpu { namespace bf_radius_match |
|
|
|
|
const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1); |
|
|
|
|
|
|
|
|
|
radiusMatch<BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_STACK, VecDiffGlobal<BLOCK_DIM_X, T>, Dist, T> |
|
|
|
|
<<<grid, threads, 0, stream>>>(query, train, maxDistance, mask, trainIdx, distance, nMatches); |
|
|
|
|
<<<grid, threads, 0, stream>>>(query, train, maxDistance, mask, trainIdx, imgIdx, distance, nMatches, trainIdx.cols); |
|
|
|
|
cudaSafeCall( cudaGetLastError() ); |
|
|
|
|
|
|
|
|
|
if (stream == 0) |
|
|
|
|
cudaSafeCall( cudaDeviceSynchronize() ); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename Dist, typename T, typename Mask> |
|
|
|
|
void radiusMatchCached_caller(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask, |
|
|
|
|
const DevMem2Di& trainIdx, const DevMem2Df& distance, unsigned int* nMatches, |
|
|
|
|
template <int BLOCK_DIM_X, int BLOCK_DIM_Y, int BLOCK_STACK, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename Dist, typename T, typename Train, typename Mask> |
|
|
|
|
void radiusMatchCached_caller(const DevMem2D_<T>& query, const Train& train, float maxDistance, const Mask& mask, |
|
|
|
|
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int* nMatches, |
|
|
|
|
cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
StaticAssert<BLOCK_STACK >= BLOCK_DIM_Y>::check(); |
|
|
|
@ -167,7 +283,7 @@ namespace cv { namespace gpu { namespace bf_radius_match |
|
|
|
|
const dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y, 1); |
|
|
|
|
|
|
|
|
|
radiusMatch<BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_STACK, VecDiffCachedRegister<BLOCK_DIM_X, MAX_LEN, LEN_EQ_MAX_LEN, typename Dist::value_type>, Dist, T> |
|
|
|
|
<<<grid, threads, 0, stream>>>(query, train, maxDistance, mask, trainIdx, distance, nMatches); |
|
|
|
|
<<<grid, threads, 0, stream>>>(query, train, maxDistance, mask, trainIdx, imgIdx, distance, nMatches, trainIdx.cols); |
|
|
|
|
cudaSafeCall( cudaGetLastError() ); |
|
|
|
|
|
|
|
|
|
if (stream == 0) |
|
|
|
@ -177,58 +293,58 @@ namespace cv { namespace gpu { namespace bf_radius_match |
|
|
|
|
/////////////////////////////////////////////////////////////////////////////// |
|
|
|
|
// Radius Match Dispatcher |
|
|
|
|
|
|
|
|
|
template <typename Dist, typename T, typename Mask> |
|
|
|
|
void radiusMatchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, |
|
|
|
|
template <typename Dist, typename T, typename Train, typename Mask> |
|
|
|
|
void radiusMatchDispatcher(const DevMem2D_<T>& query, const Train& train, float maxDistance, const Mask& mask, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, |
|
|
|
|
cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
if (query.cols < 64) |
|
|
|
|
{ |
|
|
|
|
radiusMatchCached_caller<16, 16, 64, 64, false, Dist>( |
|
|
|
|
query, train, maxDistance, mask, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), (unsigned int*)nMatches.data, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else if (query.cols == 64) |
|
|
|
|
{ |
|
|
|
|
radiusMatchCached_caller<16, 16, 64, 64, true, Dist>( |
|
|
|
|
query, train, maxDistance, mask, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), (unsigned int*)nMatches.data, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else if (query.cols < 128) |
|
|
|
|
{ |
|
|
|
|
radiusMatchCached_caller<16, 16, 64, 128, false, Dist>( |
|
|
|
|
query, train, maxDistance, mask, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), (unsigned int*)nMatches.data, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else if (query.cols == 128) |
|
|
|
|
{ |
|
|
|
|
radiusMatchCached_caller<16, 16, 64, 128, true, Dist>( |
|
|
|
|
query, train, maxDistance, mask, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), (unsigned int*)nMatches.data, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else if (query.cols < 256) |
|
|
|
|
{ |
|
|
|
|
radiusMatchCached_caller<16, 16, 64, 256, false, Dist>( |
|
|
|
|
query, train, maxDistance, mask, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), (unsigned int*)nMatches.data, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else if (query.cols == 256) |
|
|
|
|
{ |
|
|
|
|
radiusMatchCached_caller<16, 16, 64, 256, true, Dist>( |
|
|
|
|
query, train, maxDistance, mask, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), (unsigned int*)nMatches.data, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
radiusMatchSimple_caller<16, 16, 64, Dist>( |
|
|
|
|
query, train, maxDistance, mask, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Df>(distance), (unsigned int*)nMatches.data, |
|
|
|
|
static_cast<DevMem2Di>(trainIdx), static_cast<DevMem2Di>(imgIdx), static_cast<DevMem2Df>(distance), (int*)nMatches.data, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -236,77 +352,163 @@ namespace cv { namespace gpu { namespace bf_radius_match |
|
|
|
|
/////////////////////////////////////////////////////////////////////////////// |
|
|
|
|
// Radius Match caller |
|
|
|
|
|
|
|
|
|
template <typename T> void radiusMatchL1_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, |
|
|
|
|
template <typename T> void radiusMatchSingleL1_gpu(const DevMem2D& query, const DevMem2D& train_, float maxDistance, const DevMem2D& mask, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, |
|
|
|
|
cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_)); |
|
|
|
|
|
|
|
|
|
if (mask.data) |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask), |
|
|
|
|
trainIdx, distance, nMatches, |
|
|
|
|
radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, SingleMask(mask), |
|
|
|
|
trainIdx, DevMem2D(), distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(), |
|
|
|
|
trainIdx, distance, nMatches, |
|
|
|
|
radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), |
|
|
|
|
trainIdx, DevMem2D(), distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template void radiusMatchL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
template void radiusMatchL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
template void radiusMatchL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
template void radiusMatchL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
template void radiusMatchL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
template void radiusMatchSingleL1_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchSingleL1_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchSingleL1_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchSingleL1_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchSingleL1_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchSingleL1_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
|
|
|
|
|
template <typename T> void radiusMatchL2_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, |
|
|
|
|
template <typename T> void radiusMatchSingleL2_gpu(const DevMem2D& query, const DevMem2D& train_, float maxDistance, const DevMem2D& mask, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, |
|
|
|
|
cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_)); |
|
|
|
|
|
|
|
|
|
if (mask.data) |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask), |
|
|
|
|
trainIdx, distance, nMatches, |
|
|
|
|
radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, SingleMask(mask), |
|
|
|
|
trainIdx, DevMem2D(), distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(), |
|
|
|
|
trainIdx, distance, nMatches, |
|
|
|
|
radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), |
|
|
|
|
trainIdx, DevMem2D(), distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//template void radiusMatchL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
template void radiusMatchL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchSingleL2_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchSingleL2_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchSingleL2_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchSingleL2_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchSingleL2_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchSingleL2_gpu<float >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
|
|
|
|
|
template <typename T> void radiusMatchHamming_gpu(const DevMem2D& query, const DevMem2D& train, float maxDistance, const DevMem2D& mask, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, |
|
|
|
|
template <typename T> void radiusMatchSingleHamming_gpu(const DevMem2D& query, const DevMem2D& train_, float maxDistance, const DevMem2D& mask, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, |
|
|
|
|
cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
SingleTrain<T> train(static_cast< DevMem2D_<T> >(train_)); |
|
|
|
|
|
|
|
|
|
if (mask.data) |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask), |
|
|
|
|
trainIdx, distance, nMatches, |
|
|
|
|
radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, SingleMask(mask), |
|
|
|
|
trainIdx, DevMem2D(), distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), |
|
|
|
|
trainIdx, DevMem2D(), distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template void radiusMatchSingleHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchSingleHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchSingleHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchSingleHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchSingleHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
|
|
|
|
|
template <typename T> void radiusMatchCollectionL1_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, |
|
|
|
|
cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols); |
|
|
|
|
|
|
|
|
|
if (maskCollection.data) |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, MaskCollection(maskCollection.data), |
|
|
|
|
trainIdx, imgIdx, distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), |
|
|
|
|
trainIdx, imgIdx, distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template void radiusMatchCollectionL1_gpu<uchar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchCollectionL1_gpu<schar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchCollectionL1_gpu<ushort>(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchCollectionL1_gpu<short >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchCollectionL1_gpu<int >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchCollectionL1_gpu<float >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
|
|
|
|
|
template <typename T> void radiusMatchCollectionL2_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, |
|
|
|
|
cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols); |
|
|
|
|
|
|
|
|
|
if (maskCollection.data) |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, MaskCollection(maskCollection.data), |
|
|
|
|
trainIdx, imgIdx, distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), |
|
|
|
|
trainIdx, imgIdx, distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//template void radiusMatchCollectionL2_gpu<uchar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchCollectionL2_gpu<schar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchCollectionL2_gpu<ushort>(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchCollectionL2_gpu<short >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchCollectionL2_gpu<int >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchCollectionL2_gpu<float >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
|
|
|
|
|
template <typename T> void radiusMatchCollectionHamming_gpu(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, |
|
|
|
|
const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, |
|
|
|
|
cudaStream_t stream) |
|
|
|
|
{ |
|
|
|
|
TrainCollection<T> train((DevMem2D_<T>*)trainCollection.ptr(), trainCollection.cols, query.cols); |
|
|
|
|
|
|
|
|
|
if (maskCollection.data) |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, MaskCollection(maskCollection.data), |
|
|
|
|
trainIdx, imgIdx, distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(), |
|
|
|
|
trainIdx, distance, nMatches, |
|
|
|
|
radiusMatchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), train, maxDistance, WithOutMask(), |
|
|
|
|
trainIdx, imgIdx, distance, nMatches, |
|
|
|
|
stream); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
template void radiusMatchHamming_gpu<uchar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchHamming_gpu<schar >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
template void radiusMatchHamming_gpu<ushort>(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchHamming_gpu<short >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
template void radiusMatchHamming_gpu<int >(const DevMem2D& queryDescs, const DevMem2D& trainDescs, float maxDistance, const DevMem2D& mask, const DevMem2D& trainIdx, const DevMem2D& nMatches, const DevMem2D& distance, cudaStream_t stream); |
|
|
|
|
template void radiusMatchCollectionHamming_gpu<uchar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchCollectionHamming_gpu<schar >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchCollectionHamming_gpu<ushort>(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
//template void radiusMatchCollectionHamming_gpu<short >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
template void radiusMatchCollectionHamming_gpu<int >(const DevMem2D& query, const DevMem2D& trainCollection, float maxDistance, const DevMem2D_<PtrStep>& maskCollection, const DevMem2D& trainIdx, const DevMem2D& imgIdx, const DevMem2D& distance, const DevMem2D& nMatches, cudaStream_t stream); |
|
|
|
|
}}} |
|
|
|
|