mirror of https://github.com/opencv/opencv.git
parent
edc9d4f951
commit
a10fed8fd1
11 changed files with 1913 additions and 6 deletions
@ -0,0 +1,146 @@ |
||||
/*M/////////////////////////////////////////////////////////////////////////////////////// |
||||
// |
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
// |
||||
// By downloading, copying, installing or using the software you agree to this license. |
||||
// If you do not agree to this license, do not download, install, |
||||
// copy or use the software. |
||||
// |
||||
// |
||||
// License Agreement |
||||
// For Open Source Computer Vision Library |
||||
// |
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. |
||||
// Third party copyrights are property of their respective owners. |
||||
// |
||||
// Redistribution and use in source and binary forms, with or without modification, |
||||
// are permitted provided that the following conditions are met: |
||||
// |
||||
// * Redistribution's of source code must retain the above copyright notice, |
||||
// this list of conditions and the following disclaimer. |
||||
// |
||||
// * Redistribution's in binary form must reproduce the above copyright notice, |
||||
// this list of conditions and the following disclaimer in the documentation |
||||
// and/or other materials provided with the distribution. |
||||
// |
||||
// * The name of the copyright holders may not be used to endorse or promote products |
||||
// derived from this software without specific prior written permission. |
||||
// |
||||
// This software is provided by the copyright holders and contributors "as is" and |
||||
// any express or implied warranties, including, but not limited to, the implied |
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
// In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
// indirect, incidental, special, exemplary, or consequential damages |
||||
// (including, but not limited to, procurement of substitute goods or services; |
||||
// loss of use, data, or profits; or business interruption) however caused |
||||
// and on any theory of liability, whether in contract, strict liability, |
||||
// or tort (including negligence or otherwise) arising in any way out of |
||||
// the use of this software, even if advised of the possibility of such damage. |
||||
// |
||||
// Copyright (c) 2010, Paul Furgale, Chi Hay Tong |
||||
// |
||||
// The original code was written by Paul Furgale and Chi Hay Tong |
||||
// and later optimized and prepared for integration into OpenCV by Itseez. |
||||
// |
||||
//M*/ |
||||
|
||||
#include <thrust/sort.h> |
||||
|
||||
#include "opencv2/gpu/device/common.hpp" |
||||
#include "opencv2/gpu/device/utility.hpp" |
||||
|
||||
namespace cv { namespace gpu { namespace device |
||||
{ |
||||
namespace gfft |
||||
{ |
||||
texture<float, cudaTextureType2D, cudaReadModeElementType> eigTex(0, cudaFilterModePoint, cudaAddressModeClamp); |
||||
|
||||
__device__ uint g_counter = 0; |
||||
|
||||
template <class Mask> __global__ void findCorners(float threshold, const Mask mask, float2* corners, uint max_count, int rows, int cols) |
||||
{ |
||||
#if __CUDA_ARCH__ >= 110 |
||||
|
||||
const int j = blockIdx.x * blockDim.x + threadIdx.x; |
||||
const int i = blockIdx.y * blockDim.y + threadIdx.y; |
||||
|
||||
if (i > 0 && i < rows - 1 && j > 0 && j < cols - 1 && mask(i, j)) |
||||
{ |
||||
float val = tex2D(eigTex, j, i); |
||||
|
||||
if (val > threshold) |
||||
{ |
||||
float maxVal = val; |
||||
|
||||
maxVal = ::fmax(tex2D(eigTex, j - 1, i - 1), maxVal); |
||||
maxVal = ::fmax(tex2D(eigTex, j , i - 1), maxVal); |
||||
maxVal = ::fmax(tex2D(eigTex, j + 1, i - 1), maxVal); |
||||
|
||||
maxVal = ::fmax(tex2D(eigTex, j - 1, i), maxVal); |
||||
maxVal = ::fmax(tex2D(eigTex, j + 1, i), maxVal); |
||||
|
||||
maxVal = ::fmax(tex2D(eigTex, j - 1, i + 1), maxVal); |
||||
maxVal = ::fmax(tex2D(eigTex, j , i + 1), maxVal); |
||||
maxVal = ::fmax(tex2D(eigTex, j + 1, i + 1), maxVal); |
||||
|
||||
if (val == maxVal) |
||||
{ |
||||
const uint ind = atomicInc(&g_counter, (uint)(-1)); |
||||
|
||||
if (ind < max_count) |
||||
corners[ind] = make_float2(j, i); |
||||
} |
||||
} |
||||
} |
||||
|
||||
#endif // __CUDA_ARCH__ >= 110 |
||||
} |
||||
|
||||
int findCorners_gpu(DevMem2Df eig, float threshold, DevMem2Db mask, float2* corners, int max_count) |
||||
{ |
||||
void* counter_ptr; |
||||
cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) ); |
||||
|
||||
cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(uint)) ); |
||||
|
||||
bindTexture(&eigTex, eig); |
||||
|
||||
dim3 block(16, 16); |
||||
dim3 grid(divUp(eig.cols, block.x), divUp(eig.rows, block.y)); |
||||
|
||||
if (mask.data) |
||||
findCorners<<<grid, block>>>(threshold, SingleMask(mask), corners, max_count, eig.rows, eig.cols); |
||||
else |
||||
findCorners<<<grid, block>>>(threshold, WithOutMask(), corners, max_count, eig.rows, eig.cols); |
||||
|
||||
cudaSafeCall( cudaGetLastError() ); |
||||
|
||||
cudaSafeCall( cudaDeviceSynchronize() ); |
||||
|
||||
uint count; |
||||
cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(uint), cudaMemcpyDeviceToHost) ); |
||||
|
||||
return min(count, max_count); |
||||
} |
||||
|
||||
class EigGreater |
||||
{ |
||||
public: |
||||
__device__ __forceinline__ bool operator()(float2 a, float2 b) const |
||||
{ |
||||
return tex2D(eigTex, a.x, a.y) > tex2D(eigTex, b.x, b.y); |
||||
} |
||||
}; |
||||
|
||||
|
||||
void sortCorners_gpu(DevMem2Df eig, float2* corners, int count) |
||||
{ |
||||
bindTexture(&eigTex, eig); |
||||
|
||||
thrust::device_ptr<float2> ptr(corners); |
||||
|
||||
thrust::sort(ptr, ptr + count, EigGreater()); |
||||
} |
||||
} // namespace optical_flow |
||||
}}} |
@ -0,0 +1,599 @@ |
||||
/*M/////////////////////////////////////////////////////////////////////////////////////// |
||||
// |
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||
// |
||||
// By downloading, copying, installing or using the software you agree to this license. |
||||
// If you do not agree to this license, do not download, install, |
||||
// copy or use the software. |
||||
// |
||||
// |
||||
// License Agreement |
||||
// For Open Source Computer Vision Library |
||||
// |
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. |
||||
// Third party copyrights are property of their respective owners. |
||||
// |
||||
// Redistribution and use in source and binary forms, with or without modification, |
||||
// are permitted provided that the following conditions are met: |
||||
// |
||||
// * Redistribution's of source code must retain the above copyright notice, |
||||
// this list of conditions and the following disclaimer. |
||||
// |
||||
// * Redistribution's in binary form must reproduce the above copyright notice, |
||||
// this list of conditions and the following disclaimer in the documentation |
||||
// and/or other materials provided with the distribution. |
||||
// |
||||
// * The name of the copyright holders may not be used to endorse or promote products |
||||
// derived from this software without specific prior written permission. |
||||
// |
||||
// This software is provided by the copyright holders and contributors "as is" and |
||||
// any express or implied warranties, including, but not limited to, the implied |
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||
// In no event shall the Intel Corporation or contributors be liable for any direct, |
||||
// indirect, incidental, special, exemplary, or consequential damages |
||||
// (including, but not limited to, procurement of substitute goods or services; |
||||
// loss of use, data, or profits; or business interruption) however caused |
||||
// and on any theory of liability, whether in contract, strict liability, |
||||
// or tort (including negligence or otherwise) arising in any way out of |
||||
// the use of this software, even if advised of the possibility of such damage. |
||||
// |
||||
// Copyright (c) 2010, Paul Furgale, Chi Hay Tong |
||||
// |
||||
// The original code was written by Paul Furgale and Chi Hay Tong |
||||
// and later optimized and prepared for integration into OpenCV by Itseez. |
||||
// |
||||
//M*/ |
||||
|
||||
#include "opencv2/gpu/device/common.hpp" |
||||
#include "opencv2/gpu/device/utility.hpp" |
||||
#include "opencv2/gpu/device/functional.hpp" |
||||
#include "opencv2/gpu/device/limits.hpp" |
||||
|
||||
namespace cv { namespace gpu { namespace device |
||||
{ |
||||
namespace pyrlk |
||||
{ |
||||
__constant__ int c_cn; |
||||
__constant__ float c_minEigThreshold; |
||||
__constant__ int c_winSize_x; |
||||
__constant__ int c_winSize_y; |
||||
__constant__ int c_winSize_x_cn; |
||||
__constant__ int c_halfWin_x; |
||||
__constant__ int c_halfWin_y; |
||||
__constant__ int c_iters; |
||||
|
||||
void loadConstants(int cn, float minEigThreshold, int2 winSize, int iters) |
||||
{ |
||||
int2 halfWin = make_int2((winSize.x - 1) / 2, (winSize.y - 1) / 2); |
||||
cudaSafeCall( cudaMemcpyToSymbol(c_cn, &cn, sizeof(int)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(c_minEigThreshold, &minEigThreshold, sizeof(float)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(c_winSize_x, &winSize.x, sizeof(int)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(c_winSize_y, &winSize.y, sizeof(int)) ); |
||||
winSize.x *= cn; |
||||
cudaSafeCall( cudaMemcpyToSymbol(c_winSize_x_cn, &winSize.x, sizeof(int)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_x, &halfWin.x, sizeof(int)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(c_halfWin_y, &halfWin.y, sizeof(int)) ); |
||||
cudaSafeCall( cudaMemcpyToSymbol(c_iters, &iters, sizeof(int)) ); |
||||
} |
||||
|
||||
__global__ void calcSharrDeriv_vertical(const PtrStepb src, PtrStep<short> dx_buf, PtrStep<short> dy_buf, int rows, int colsn) |
||||
{ |
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x; |
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y; |
||||
|
||||
if (y < rows && x < colsn) |
||||
{ |
||||
const uchar src_val0 = src(y > 0 ? y - 1 : 1, x); |
||||
const uchar src_val1 = src(y, x); |
||||
const uchar src_val2 = src(y < rows - 1 ? y + 1 : rows - 2, x); |
||||
|
||||
dx_buf(y, x) = (src_val0 + src_val2) * 3 + src_val1 * 10; |
||||
dy_buf(y, x) = src_val2 - src_val0; |
||||
} |
||||
} |
||||
|
||||
__global__ void calcSharrDeriv_horizontal(const PtrStep<short> dx_buf, const PtrStep<short> dy_buf, PtrStep<short> dIdx, PtrStep<short> dIdy, int rows, int cols) |
||||
{ |
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x; |
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y; |
||||
|
||||
const int colsn = cols * c_cn; |
||||
|
||||
if (y < rows && x < colsn) |
||||
{ |
||||
const short* dx_buf_row = dx_buf.ptr(y); |
||||
const short* dy_buf_row = dy_buf.ptr(y); |
||||
|
||||
const int xr = x + c_cn < colsn ? x + c_cn : (cols - 2) * c_cn + x + c_cn - colsn; |
||||
const int xl = x - c_cn >= 0 ? x - c_cn : c_cn + x; |
||||
|
||||
dIdx(y, x) = dx_buf_row[xr] - dx_buf_row[xl]; |
||||
dIdy(y, x) = (dy_buf_row[xr] + dy_buf_row[xl]) * 3 + dy_buf_row[x] * 10; |
||||
} |
||||
} |
||||
|
||||
void calcSharrDeriv_gpu(DevMem2Db src, DevMem2D_<short> dx_buf, DevMem2D_<short> dy_buf, DevMem2D_<short> dIdx, DevMem2D_<short> dIdy, int cn, |
||||
cudaStream_t stream) |
||||
{ |
||||
dim3 block(32, 8); |
||||
dim3 grid(divUp(src.cols * cn, block.x), divUp(src.rows, block.y)); |
||||
|
||||
calcSharrDeriv_vertical<<<grid, block, 0, stream>>>(src, dx_buf, dy_buf, src.rows, src.cols * cn); |
||||
cudaSafeCall( cudaGetLastError() ); |
||||
|
||||
calcSharrDeriv_horizontal<<<grid, block, 0, stream>>>(dx_buf, dy_buf, dIdx, dIdy, src.rows, src.cols); |
||||
cudaSafeCall( cudaGetLastError() ); |
||||
|
||||
if (stream == 0) |
||||
cudaSafeCall( cudaDeviceSynchronize() ); |
||||
} |
||||
|
||||
#define W_BITS 14 |
||||
#define W_BITS1 14 |
||||
|
||||
#define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n)) |
||||
|
||||
__device__ int linearFilter(const PtrStepb& src, float2 pt, int x, int y) |
||||
{ |
||||
int2 ipt; |
||||
ipt.x = __float2int_rd(pt.x); |
||||
ipt.y = __float2int_rd(pt.y); |
||||
|
||||
float a = pt.x - ipt.x; |
||||
float b = pt.y - ipt.y; |
||||
|
||||
int iw00 = __float2int_rn((1.0f - a) * (1.0f - b) * (1 << W_BITS)); |
||||
int iw01 = __float2int_rn(a * (1.0f - b) * (1 << W_BITS)); |
||||
int iw10 = __float2int_rn((1.0f - a) * b * (1 << W_BITS)); |
||||
int iw11 = (1 << W_BITS) - iw00 - iw01 - iw10; |
||||
|
||||
const uchar* src_row = src.ptr(ipt.y + y) + ipt.x * c_cn; |
||||
const uchar* src_row1 = src.ptr(ipt.y + y + 1) + ipt.x * c_cn; |
||||
|
||||
return CV_DESCALE(src_row[x] * iw00 + src_row[x + c_cn] * iw01 + src_row1[x] * iw10 + src_row1[x + c_cn] * iw11, W_BITS1 - 5); |
||||
} |
||||
|
||||
__device__ int linearFilter(const PtrStep<short>& src, float2 pt, int x, int y) |
||||
{ |
||||
int2 ipt; |
||||
ipt.x = __float2int_rd(pt.x); |
||||
ipt.y = __float2int_rd(pt.y); |
||||
|
||||
float a = pt.x - ipt.x; |
||||
float b = pt.y - ipt.y; |
||||
|
||||
int iw00 = __float2int_rn((1.0f - a) * (1.0f - b) * (1 << W_BITS)); |
||||
int iw01 = __float2int_rn(a * (1.0f - b) * (1 << W_BITS)); |
||||
int iw10 = __float2int_rn((1.0f - a) * b * (1 << W_BITS)); |
||||
int iw11 = (1 << W_BITS) - iw00 - iw01 - iw10; |
||||
|
||||
const short* src_row = src.ptr(ipt.y + y) + ipt.x * c_cn; |
||||
const short* src_row1 = src.ptr(ipt.y + y + 1) + ipt.x * c_cn; |
||||
|
||||
return CV_DESCALE(src_row[x] * iw00 + src_row[x + c_cn] * iw01 + src_row1[x] * iw10 + src_row1[x + c_cn] * iw11, W_BITS1); |
||||
} |
||||
|
||||
__device__ void reduce(float& val1, float& val2, float& val3, float* smem1, float* smem2, float* smem3, int tid) |
||||
{ |
||||
smem1[tid] = val1; |
||||
smem2[tid] = val2; |
||||
smem3[tid] = val3; |
||||
__syncthreads(); |
||||
|
||||
if (tid < 128) |
||||
{ |
||||
smem1[tid] = val1 += smem1[tid + 128]; |
||||
smem2[tid] = val2 += smem2[tid + 128]; |
||||
smem3[tid] = val3 += smem3[tid + 128]; |
||||
} |
||||
__syncthreads(); |
||||
|
||||
if (tid < 64) |
||||
{ |
||||
smem1[tid] = val1 += smem1[tid + 64]; |
||||
smem2[tid] = val2 += smem2[tid + 64]; |
||||
smem3[tid] = val3 += smem3[tid + 64]; |
||||
} |
||||
__syncthreads(); |
||||
|
||||
if (tid < 32) |
||||
{ |
||||
volatile float* vmem1 = smem1; |
||||
volatile float* vmem2 = smem2; |
||||
volatile float* vmem3 = smem3; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 32]; |
||||
vmem2[tid] = val2 += vmem2[tid + 32]; |
||||
vmem3[tid] = val3 += vmem3[tid + 32]; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 16]; |
||||
vmem2[tid] = val2 += vmem2[tid + 16]; |
||||
vmem3[tid] = val3 += vmem3[tid + 16]; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 8]; |
||||
vmem2[tid] = val2 += vmem2[tid + 8]; |
||||
vmem3[tid] = val3 += vmem3[tid + 8]; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 4]; |
||||
vmem2[tid] = val2 += vmem2[tid + 4]; |
||||
vmem3[tid] = val3 += vmem3[tid + 4]; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 2]; |
||||
vmem2[tid] = val2 += vmem2[tid + 2]; |
||||
vmem3[tid] = val3 += vmem3[tid + 2]; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 1]; |
||||
vmem2[tid] = val2 += vmem2[tid + 1]; |
||||
vmem3[tid] = val3 += vmem3[tid + 1]; |
||||
} |
||||
} |
||||
|
||||
__device__ void reduce(float& val1, float& val2, float* smem1, float* smem2, int tid) |
||||
{ |
||||
smem1[tid] = val1; |
||||
smem2[tid] = val2; |
||||
__syncthreads(); |
||||
|
||||
if (tid < 128) |
||||
{ |
||||
smem1[tid] = val1 += smem1[tid + 128]; |
||||
smem2[tid] = val2 += smem2[tid + 128]; |
||||
} |
||||
__syncthreads(); |
||||
|
||||
if (tid < 64) |
||||
{ |
||||
smem1[tid] = val1 += smem1[tid + 64]; |
||||
smem2[tid] = val2 += smem2[tid + 64]; |
||||
} |
||||
__syncthreads(); |
||||
|
||||
if (tid < 32) |
||||
{ |
||||
volatile float* vmem1 = smem1; |
||||
volatile float* vmem2 = smem2; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 32]; |
||||
vmem2[tid] = val2 += vmem2[tid + 32]; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 16]; |
||||
vmem2[tid] = val2 += vmem2[tid + 16]; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 8]; |
||||
vmem2[tid] = val2 += vmem2[tid + 8]; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 4]; |
||||
vmem2[tid] = val2 += vmem2[tid + 4]; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 2]; |
||||
vmem2[tid] = val2 += vmem2[tid + 2]; |
||||
|
||||
vmem1[tid] = val1 += vmem1[tid + 1]; |
||||
vmem2[tid] = val2 += vmem2[tid + 1]; |
||||
} |
||||
} |
||||
|
||||
#define SCALE (1.0f / (1 << 20)) |
||||
|
||||
template <int PATCH_X, int PATCH_Y, bool calcErr> |
||||
__global__ void lkSparse(const PtrStepb I, const PtrStepb J, const PtrStep<short> dIdx, const PtrStep<short> dIdy, |
||||
const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols) |
||||
{ |
||||
__shared__ float smem1[256]; |
||||
__shared__ float smem2[256]; |
||||
__shared__ float smem3[256]; |
||||
|
||||
const int tid = threadIdx.y * blockDim.x + threadIdx.x; |
||||
|
||||
float2 prevPt = prevPts[blockIdx.x]; |
||||
prevPt.x *= (1.0f / (1 << level)); |
||||
prevPt.y *= (1.0f / (1 << level)); |
||||
|
||||
prevPt.x -= c_halfWin_x; |
||||
prevPt.y -= c_halfWin_y; |
||||
|
||||
if (prevPt.x < -c_winSize_x || prevPt.x >= cols || prevPt.y < -c_winSize_y || prevPt.y >= rows) |
||||
{ |
||||
if (level == 0 && tid == 0) |
||||
{ |
||||
status[blockIdx.x] = 0; |
||||
|
||||
if (calcErr) |
||||
err[blockIdx.x] = 0; |
||||
} |
||||
|
||||
return; |
||||
} |
||||
|
||||
// extract the patch from the first image, compute covariation matrix of derivatives |
||||
|
||||
float A11 = 0; |
||||
float A12 = 0; |
||||
float A22 = 0; |
||||
|
||||
int I_patch[PATCH_Y][PATCH_X]; |
||||
int dIdx_patch[PATCH_Y][PATCH_X]; |
||||
int dIdy_patch[PATCH_Y][PATCH_X]; |
||||
|
||||
for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i) |
||||
{ |
||||
for (int x = threadIdx.x, j = 0; x < c_winSize_x_cn; x += blockDim.x, ++j) |
||||
{ |
||||
I_patch[i][j] = linearFilter(I, prevPt, x, y); |
||||
|
||||
int ixval = linearFilter(dIdx, prevPt, x, y); |
||||
int iyval = linearFilter(dIdy, prevPt, x, y); |
||||
|
||||
dIdx_patch[i][j] = ixval; |
||||
dIdy_patch[i][j] = iyval; |
||||
|
||||
A11 += ixval * ixval; |
||||
A12 += ixval * iyval; |
||||
A22 += iyval * iyval; |
||||
} |
||||
} |
||||
|
||||
reduce(A11, A12, A22, smem1, smem2, smem3, tid); |
||||
__syncthreads(); |
||||
|
||||
A11 = smem1[0]; |
||||
A12 = smem2[0]; |
||||
A22 = smem3[0]; |
||||
|
||||
A11 *= SCALE; |
||||
A12 *= SCALE; |
||||
A22 *= SCALE; |
||||
|
||||
{ |
||||
float D = A11 * A22 - A12 * A12; |
||||
float minEig = (A22 + A11 - ::sqrtf((A11 - A22) * (A11 - A22) + 4.f * A12 * A12)) / (2 * c_winSize_x * c_winSize_y); |
||||
|
||||
if (calcErr && tid == 0) |
||||
err[blockIdx.x] = minEig; |
||||
|
||||
if (minEig < c_minEigThreshold || D < numeric_limits<float>::epsilon()) |
||||
{ |
||||
if (level == 0 && tid == 0) |
||||
status[blockIdx.x] = 0; |
||||
|
||||
return; |
||||
} |
||||
|
||||
D = 1.f / D; |
||||
|
||||
A11 *= D; |
||||
A12 *= D; |
||||
A22 *= D; |
||||
} |
||||
|
||||
float2 nextPt = nextPts[blockIdx.x]; |
||||
nextPt.x *= 2.f; |
||||
nextPt.y *= 2.f; |
||||
|
||||
nextPt.x -= c_halfWin_x; |
||||
nextPt.y -= c_halfWin_y; |
||||
|
||||
bool status_ = true; |
||||
|
||||
for (int k = 0; k < c_iters; ++k) |
||||
{ |
||||
if (nextPt.x < -c_winSize_x || nextPt.x >= cols || nextPt.y < -c_winSize_y || nextPt.y >= rows) |
||||
{ |
||||
status_ = false; |
||||
break; |
||||
} |
||||
|
||||
float b1 = 0; |
||||
float b2 = 0; |
||||
|
||||
for (int y = threadIdx.y, i = 0; y < c_winSize_y; y += blockDim.y, ++i) |
||||
{ |
||||
for (int x = threadIdx.x, j = 0; x < c_winSize_x_cn; x += blockDim.x, ++j) |
||||
{ |
||||
int diff = linearFilter(J, nextPt, x, y) - I_patch[i][j]; |
||||
|
||||
b1 += diff * dIdx_patch[i][j]; |
||||
b2 += diff * dIdy_patch[i][j]; |
||||
} |
||||
} |
||||
|
||||
reduce(b1, b2, smem1, smem2, tid); |
||||
__syncthreads(); |
||||
|
||||
b1 = smem1[0]; |
||||
b2 = smem2[0]; |
||||
|
||||
b1 *= SCALE; |
||||
b2 *= SCALE; |
||||
|
||||
float2 delta; |
||||
delta.x = A12 * b2 - A22 * b1; |
||||
delta.y = A12 * b1 - A11 * b2; |
||||
|
||||
nextPt.x += delta.x; |
||||
nextPt.y += delta.y; |
||||
|
||||
if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f) |
||||
break; |
||||
} |
||||
|
||||
if (tid == 0) |
||||
{ |
||||
nextPt.x += c_halfWin_x; |
||||
nextPt.y += c_halfWin_y; |
||||
|
||||
nextPts[blockIdx.x] = nextPt; |
||||
status[blockIdx.x] = status_; |
||||
} |
||||
} |
||||
|
||||
template <int PATCH_X, int PATCH_Y> |
||||
void lkSparse_caller(DevMem2Db I, DevMem2Db J, DevMem2D_<short> dIdx, DevMem2D_<short> dIdy, |
||||
const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, |
||||
int level, dim3 block, cudaStream_t stream) |
||||
{ |
||||
dim3 grid(ptcount); |
||||
|
||||
if (err) |
||||
{ |
||||
cudaSafeCall( cudaFuncSetCacheConfig(lkSparse<PATCH_X, PATCH_Y, true>, cudaFuncCachePreferL1) ); |
||||
|
||||
lkSparse<PATCH_X, PATCH_Y, true><<<grid, block>>>(I, J, dIdx, dIdy, |
||||
prevPts, nextPts, status, err, level, I.rows, I.cols); |
||||
} |
||||
else |
||||
{ |
||||
cudaSafeCall( cudaFuncSetCacheConfig(lkSparse<PATCH_X, PATCH_Y, false>, cudaFuncCachePreferL1) ); |
||||
|
||||
lkSparse<PATCH_X, PATCH_Y, false><<<grid, block>>>(I, J, dIdx, dIdy, |
||||
prevPts, nextPts, status, err, level, I.rows, I.cols); |
||||
} |
||||
|
||||
cudaSafeCall( cudaGetLastError() ); |
||||
|
||||
if (stream == 0) |
||||
cudaSafeCall( cudaDeviceSynchronize() ); |
||||
} |
||||
|
||||
void lkSparse_gpu(DevMem2Db I, DevMem2Db J, DevMem2D_<short> dIdx, DevMem2D_<short> dIdy, |
||||
const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, |
||||
int level, dim3 block, dim3 patch, cudaStream_t stream) |
||||
{ |
||||
typedef void (*func_t)(DevMem2Db I, DevMem2Db J, DevMem2D_<short> dIdx, DevMem2D_<short> dIdy, |
||||
const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount, |
||||
int level, dim3 block, cudaStream_t stream); |
||||
|
||||
static const func_t funcs[5][5] = |
||||
{ |
||||
{lkSparse_caller<1, 1>, lkSparse_caller<2, 1>, lkSparse_caller<3, 1>, lkSparse_caller<4, 1>, lkSparse_caller<5, 1>}, |
||||
{lkSparse_caller<1, 2>, lkSparse_caller<2, 2>, lkSparse_caller<3, 2>, lkSparse_caller<4, 2>, lkSparse_caller<5, 2>}, |
||||
{lkSparse_caller<1, 3>, lkSparse_caller<2, 3>, lkSparse_caller<3, 3>, lkSparse_caller<4, 3>, lkSparse_caller<5, 3>}, |
||||
{lkSparse_caller<1, 4>, lkSparse_caller<2, 4>, lkSparse_caller<3, 4>, lkSparse_caller<4, 4>, lkSparse_caller<5, 4>}, |
||||
{lkSparse_caller<1, 5>, lkSparse_caller<2, 5>, lkSparse_caller<3, 5>, lkSparse_caller<4, 5>, lkSparse_caller<5, 5>} |
||||
}; |
||||
|
||||
funcs[patch.y - 1][patch.x - 1](I, J, dIdx, dIdy, |
||||
prevPts, nextPts, status, err, ptcount, |
||||
level, block, stream); |
||||
} |
||||
|
||||
template <bool calcErr> |
||||
__global__ void lkDense(const PtrStepb I, const PtrStepb J, const PtrStep<short> dIdx, const PtrStep<short> dIdy, |
||||
PtrStepf u, PtrStepf v, PtrStepf err, const int rows, const int cols) |
||||
{ |
||||
const int x = blockIdx.x * blockDim.x + threadIdx.x; |
||||
const int y = blockIdx.y * blockDim.y + threadIdx.y; |
||||
|
||||
if (x >= cols || y >= rows) |
||||
return; |
||||
|
||||
// extract the patch from the first image, compute covariation matrix of derivatives |
||||
|
||||
float A11 = 0; |
||||
float A12 = 0; |
||||
float A22 = 0; |
||||
|
||||
for (int i = 0; i < c_winSize_y; ++i) |
||||
{ |
||||
for (int j = 0; j < c_winSize_x; ++j) |
||||
{ |
||||
int ixval = dIdx(y - c_halfWin_y + i, x - c_halfWin_x + j); |
||||
int iyval = dIdy(y - c_halfWin_y + i, x - c_halfWin_x + j); |
||||
|
||||
A11 += ixval * ixval; |
||||
A12 += ixval * iyval; |
||||
A22 += iyval * iyval; |
||||
} |
||||
} |
||||
|
||||
A11 *= SCALE; |
||||
A12 *= SCALE; |
||||
A22 *= SCALE; |
||||
|
||||
{ |
||||
float D = A11 * A22 - A12 * A12; |
||||
float minEig = (A22 + A11 - ::sqrtf((A11 - A22) * (A11 - A22) + 4.f * A12 * A12)) / (2 * c_winSize_x * c_winSize_y); |
||||
|
||||
if (calcErr) |
||||
err(y, x) = minEig; |
||||
|
||||
if (minEig < c_minEigThreshold || D < numeric_limits<float>::epsilon()) |
||||
return; |
||||
|
||||
D = 1.f / D; |
||||
|
||||
A11 *= D; |
||||
A12 *= D; |
||||
A22 *= D; |
||||
} |
||||
|
||||
float2 nextPt; |
||||
nextPt.x = x - c_halfWin_x + u(y, x); |
||||
nextPt.y = y - c_halfWin_y + v(y, x); |
||||
|
||||
for (int k = 0; k < c_iters; ++k) |
||||
{ |
||||
if (nextPt.x < -c_winSize_x || nextPt.x >= cols || nextPt.y < -c_winSize_y || nextPt.y >= rows) |
||||
break; |
||||
|
||||
float b1 = 0; |
||||
float b2 = 0; |
||||
|
||||
for (int i = 0; i < c_winSize_y; ++i) |
||||
{ |
||||
for (int j = 0; j < c_winSize_x; ++j) |
||||
{ |
||||
int I_val = I(y - c_halfWin_y + i, x - c_halfWin_x + j); |
||||
|
||||
int diff = linearFilter(J, nextPt, j, i) - CV_DESCALE(I_val * (1 << W_BITS), W_BITS1 - 5); |
||||
|
||||
b1 += diff * dIdx(y - c_halfWin_y + i, x - c_halfWin_x + j); |
||||
b2 += diff * dIdy(y - c_halfWin_y + i, x - c_halfWin_x + j); |
||||
} |
||||
} |
||||
|
||||
b1 *= SCALE; |
||||
b2 *= SCALE; |
||||
|
||||
float2 delta; |
||||
delta.x = A12 * b2 - A22 * b1; |
||||
delta.y = A12 * b1 - A11 * b2; |
||||
|
||||
nextPt.x += delta.x; |
||||
nextPt.y += delta.y; |
||||
|
||||
if (::fabs(delta.x) < 0.01f && ::fabs(delta.y) < 0.01f) |
||||
break; |
||||
} |
||||
|
||||
u(y, x) = nextPt.x - x + c_halfWin_x; |
||||
v(y, x) = nextPt.y - y + c_halfWin_y; |
||||
} |
||||
|
||||
void lkDense_gpu(DevMem2Db I, DevMem2Db J, DevMem2D_<short> dIdx, DevMem2D_<short> dIdy, |
||||
DevMem2Df u, DevMem2Df v, DevMem2Df* err, cudaStream_t stream) |
||||
{ |
||||
dim3 block(32, 8); |
||||
dim3 grid(divUp(I.cols, block.x), divUp(I.rows, block.y)); |
||||
|
||||
if (err) |
||||
{ |
||||
cudaSafeCall( cudaFuncSetCacheConfig(lkDense<true>, cudaFuncCachePreferL1) ); |
||||
|
||||
lkDense<true><<<grid, block, 0, stream>>>(I, J, dIdx, dIdy, u, v, *err, I.rows, I.cols); |
||||
cudaSafeCall( cudaGetLastError() ); |
||||
} |
||||
else |
||||
{ |
||||
cudaSafeCall( cudaFuncSetCacheConfig(lkDense<false>, cudaFuncCachePreferL1) ); |
||||
|
||||
lkDense<false><<<grid, block, 0, stream>>>(I, J, dIdx, dIdy, u, v, PtrStepf(), I.rows, I.cols); |
||||
cudaSafeCall( cudaGetLastError() ); |
||||
} |
||||
|
||||
if (stream == 0) |
||||
cudaSafeCall( cudaDeviceSynchronize() ); |
||||
} |
||||
} |
||||
}}} |
@ -0,0 +1,165 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other GpuMaterials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or bpied warranties, including, but not limited to, the bpied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
using namespace std; |
||||
using namespace cv; |
||||
using namespace cv::gpu; |
||||
|
||||
#if !defined (HAVE_CUDA) |
||||
|
||||
void cv::gpu::GoodFeaturesToTrackDetector_GPU::operator ()(const GpuMat&, GpuMat&, const GpuMat&) { throw_nogpu(); } |
||||
|
||||
#else /* !defined (HAVE_CUDA) */ |
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{ |
||||
namespace gfft
|
||||
{ |
||||
int findCorners_gpu(DevMem2Df eig, float threshold, DevMem2Db mask, float2* corners, int max_count); |
||||
void sortCorners_gpu(DevMem2Df eig, float2* corners, int count); |
||||
} |
||||
}}} |
||||
|
||||
void cv::gpu::GoodFeaturesToTrackDetector_GPU::operator ()(const GpuMat& image, GpuMat& corners, const GpuMat& mask) |
||||
{ |
||||
using namespace cv::gpu::device::gfft; |
||||
|
||||
CV_Assert(qualityLevel > 0 && minDistance >= 0 && maxCorners >= 0); |
||||
CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size())); |
||||
CV_Assert(TargetArchs::builtWith(GLOBAL_ATOMICS) && DeviceInfo().supports(GLOBAL_ATOMICS)); |
||||
|
||||
ensureSizeIsEnough(image.size(), CV_32F, eig_); |
||||
|
||||
if (useHarrisDetector) |
||||
cornerHarris(image, eig_, Dx_, Dy_, buf_, blockSize, 3, harrisK); |
||||
else |
||||
cornerMinEigenVal(image, eig_, Dx_, Dy_, buf_, blockSize, 3); |
||||
|
||||
double maxVal = 0; |
||||
minMax(eig_, 0, &maxVal, GpuMat(), minMaxbuf_); |
||||
|
||||
ensureSizeIsEnough(1, std::max(1000, static_cast<int>(image.size().area() * 0.05)), CV_32FC2, tmpCorners_); |
||||
|
||||
int total = findCorners_gpu(eig_, static_cast<float>(maxVal * qualityLevel), mask, tmpCorners_.ptr<float2>(), tmpCorners_.cols); |
||||
|
||||
sortCorners_gpu(eig_, tmpCorners_.ptr<float2>(), total); |
||||
|
||||
if (minDistance < 1) |
||||
tmpCorners_.colRange(0, maxCorners > 0 ? std::min(maxCorners, total) : total).copyTo(corners); |
||||
else |
||||
{ |
||||
vector<Point2f> tmp(total); |
||||
Mat tmpMat(1, total, CV_32FC2, (void*)&tmp[0]); |
||||
tmpCorners_.colRange(0, total).download(tmpMat); |
||||
|
||||
vector<Point2f> tmp2; |
||||
tmp2.reserve(total); |
||||
|
||||
const int cell_size = cvRound(minDistance); |
||||
const int grid_width = (image.cols + cell_size - 1) / cell_size; |
||||
const int grid_height = (image.rows + cell_size - 1) / cell_size; |
||||
|
||||
std::vector< std::vector<Point2f> > grid(grid_width * grid_height); |
||||
|
||||
for (int i = 0; i < total; ++i) |
||||
{ |
||||
Point2f p = tmp[i]; |
||||
|
||||
bool good = true; |
||||
|
||||
int x_cell = static_cast<int>(p.x / cell_size); |
||||
int y_cell = static_cast<int>(p.y / cell_size); |
||||
|
||||
int x1 = x_cell - 1; |
||||
int y1 = y_cell - 1; |
||||
int x2 = x_cell + 1; |
||||
int y2 = y_cell + 1; |
||||
|
||||
// boundary check
|
||||
x1 = std::max(0, x1); |
||||
y1 = std::max(0, y1); |
||||
x2 = std::min(grid_width - 1, x2); |
||||
y2 = std::min(grid_height - 1, y2); |
||||
|
||||
for (int yy = y1; yy <= y2; yy++) |
||||
{ |
||||
for (int xx = x1; xx <= x2; xx++) |
||||
{
|
||||
vector<Point2f>& m = grid[yy * grid_width + xx]; |
||||
|
||||
if (!m.empty()) |
||||
{ |
||||
for(int j = 0; j < m.size(); j++) |
||||
{ |
||||
float dx = p.x - m[j].x; |
||||
float dy = p.y - m[j].y; |
||||
|
||||
if (dx * dx + dy * dy < minDistance * minDistance) |
||||
{ |
||||
good = false; |
||||
goto break_out; |
||||
} |
||||
} |
||||
}
|
||||
} |
||||
} |
||||
|
||||
break_out: |
||||
|
||||
if(good) |
||||
{ |
||||
grid[y_cell * grid_width + x_cell].push_back(p); |
||||
|
||||
tmp2.push_back(p); |
||||
|
||||
if (maxCorners > 0 && tmp2.size() == maxCorners) |
||||
break; |
||||
} |
||||
} |
||||
|
||||
corners.upload(Mat(1, tmp2.size(), CV_32FC2, &tmp2[0])); |
||||
} |
||||
} |
||||
|
||||
#endif /* !defined (HAVE_CUDA) */ |
@ -0,0 +1,295 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other GpuMaterials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or bpied warranties, including, but not limited to, the bpied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
using namespace std; |
||||
using namespace cv; |
||||
using namespace cv::gpu; |
||||
|
||||
#if !defined (HAVE_CUDA) |
||||
|
||||
void cv::gpu::PyrLKOpticalFlow::sparse(const GpuMat&, const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat*) { throw_nogpu(); } |
||||
void cv::gpu::PyrLKOpticalFlow::dense(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, GpuMat*) { throw_nogpu(); } |
||||
|
||||
#else /* !defined (HAVE_CUDA) */ |
||||
|
||||
namespace cv { namespace gpu { namespace device
|
||||
{ |
||||
namespace pyrlk
|
||||
{ |
||||
void loadConstants(int cn, float minEigThreshold, int2 winSize, int iters); |
||||
|
||||
void calcSharrDeriv_gpu(DevMem2Db src, DevMem2D_<short> dx_buf, DevMem2D_<short> dy_buf, DevMem2D_<short> dIdx, DevMem2D_<short> dIdy, int cn,
|
||||
cudaStream_t stream = 0); |
||||
|
||||
void lkSparse_gpu(DevMem2Db I, DevMem2Db J, DevMem2D_<short> dIdx, DevMem2D_<short> dIdy, |
||||
const float2* prevPts, float2* nextPts, uchar* status, float* err, int ptcount,
|
||||
int level, dim3 block, dim3 patch, cudaStream_t stream = 0); |
||||
|
||||
void lkDense_gpu(DevMem2Db I, DevMem2Db J, DevMem2D_<short> dIdx, DevMem2D_<short> dIdy,
|
||||
DevMem2Df u, DevMem2Df v, DevMem2Df* err, cudaStream_t stream = 0); |
||||
} |
||||
}}} |
||||
|
||||
void cv::gpu::PyrLKOpticalFlow::calcSharrDeriv(const GpuMat& src, GpuMat& dIdx, GpuMat& dIdy) |
||||
{ |
||||
using namespace cv::gpu::device::pyrlk; |
||||
|
||||
CV_Assert(src.rows > 1 && src.cols > 1); |
||||
CV_Assert(src.depth() == CV_8U); |
||||
|
||||
const int cn = src.channels(); |
||||
|
||||
ensureSizeIsEnough(src.size(), CV_MAKETYPE(CV_16S, cn), dx_calcBuf_); |
||||
ensureSizeIsEnough(src.size(), CV_MAKETYPE(CV_16S, cn), dy_calcBuf_); |
||||
|
||||
const int colsn = src.cols * cn; |
||||
|
||||
calcSharrDeriv_gpu(src, dx_calcBuf_, dy_calcBuf_, dIdx, dIdy, cn); |
||||
} |
||||
|
||||
void cv::gpu::PyrLKOpticalFlow::buildImagePyramid(const GpuMat& img0, vector<GpuMat>& pyr, bool withBorder) |
||||
{ |
||||
pyr.resize(maxLevel + 1); |
||||
|
||||
Size sz = img0.size(); |
||||
|
||||
for (int level = 0; level <= maxLevel; ++level) |
||||
{ |
||||
GpuMat temp; |
||||
|
||||
if (withBorder) |
||||
{ |
||||
temp.create(sz.height + winSize.height * 2, sz.width + winSize.width * 2, img0.type()); |
||||
pyr[level] = temp(Rect(winSize.width, winSize.height, sz.width, sz.height)); |
||||
} |
||||
else |
||||
{ |
||||
ensureSizeIsEnough(sz, img0.type(), pyr[level]); |
||||
} |
||||
|
||||
if (level == 0) |
||||
img0.copyTo(pyr[level]); |
||||
else |
||||
pyrDown(pyr[level - 1], pyr[level]); |
||||
|
||||
if (withBorder) |
||||
copyMakeBorder(pyr[level], temp, winSize.height, winSize.height, winSize.width, winSize.width, BORDER_REFLECT_101); |
||||
|
||||
sz = Size((sz.width + 1) / 2, (sz.height + 1) / 2); |
||||
|
||||
if (sz.width <= winSize.width || sz.height <= winSize.height) |
||||
{ |
||||
maxLevel = level; |
||||
break; |
||||
} |
||||
} |
||||
} |
||||
|
||||
void cv::gpu::PyrLKOpticalFlow::sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts, GpuMat& status, GpuMat* err) |
||||
{ |
||||
using namespace cv::gpu::device::pyrlk; |
||||
|
||||
if (prevPts.empty()) |
||||
{ |
||||
nextPts.release(); |
||||
status.release(); |
||||
if (err) err->release(); |
||||
return; |
||||
} |
||||
|
||||
derivLambda = std::min(std::max(derivLambda, 0.0), 1.0);
|
||||
|
||||
iters = std::min(std::max(iters, 0), 100); |
||||
|
||||
const int cn = prevImg.channels(); |
||||
|
||||
dim3 block; |
||||
|
||||
if (winSize.width * cn > 32) |
||||
{ |
||||
block.x = 32; |
||||
block.y = 8; |
||||
} |
||||
else |
||||
{ |
||||
block.x = block.y = 16; |
||||
} |
||||
|
||||
dim3 patch((winSize.width * cn + block.x - 1) / block.x, (winSize.height + block.y - 1) / block.y); |
||||
|
||||
CV_Assert(derivLambda >= 0); |
||||
CV_Assert(maxLevel >= 0 && winSize.width > 2 && winSize.height > 2); |
||||
CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type()); |
||||
CV_Assert(patch.x > 0 && patch.x < 6 && patch.y > 0 && patch.y < 6); |
||||
CV_Assert(prevPts.rows == 1 && prevPts.type() == CV_32FC2); |
||||
|
||||
if (useInitialFlow) |
||||
CV_Assert(nextPts.size() == prevPts.size() && nextPts.type() == CV_32FC2); |
||||
else |
||||
ensureSizeIsEnough(1, prevPts.cols, prevPts.type(), nextPts); |
||||
|
||||
GpuMat temp1 = (useInitialFlow ? nextPts : prevPts).reshape(1); |
||||
GpuMat temp2 = nextPts.reshape(1); |
||||
multiply(temp1, Scalar::all(1.0 / (1 << maxLevel) / 2.0), temp2); |
||||
|
||||
ensureSizeIsEnough(1, prevPts.cols, CV_8UC1, status); |
||||
status.setTo(Scalar::all(1)); |
||||
|
||||
if (err) |
||||
ensureSizeIsEnough(1, prevPts.cols, CV_32FC1, *err); |
||||
|
||||
// build the image pyramids.
|
||||
// we pad each level with +/-winSize.{width|height}
|
||||
// pixels to simplify the further patch extraction.
|
||||
|
||||
buildImagePyramid(prevImg, prevPyr_, true); |
||||
buildImagePyramid(nextImg, nextPyr_, true); |
||||
|
||||
// dI/dx ~ Ix, dI/dy ~ Iy
|
||||
|
||||
ensureSizeIsEnough(prevImg.rows + winSize.height * 2, prevImg.cols + winSize.width * 2, CV_MAKETYPE(CV_16S, cn), dx_buf_); |
||||
ensureSizeIsEnough(prevImg.rows + winSize.height * 2, prevImg.cols + winSize.width * 2, CV_MAKETYPE(CV_16S, cn), dy_buf_); |
||||
|
||||
loadConstants(cn, minEigThreshold, make_int2(winSize.width, winSize.height), iters); |
||||
|
||||
for (int level = maxLevel; level >= 0; level--) |
||||
{ |
||||
Size imgSize = prevPyr_[level].size(); |
||||
|
||||
GpuMat dxWhole(imgSize.height + winSize.height * 2, imgSize.width + winSize.width * 2, dx_buf_.type(), dx_buf_.data, dx_buf_.step); |
||||
GpuMat dyWhole(imgSize.height + winSize.height * 2, imgSize.width + winSize.width * 2, dy_buf_.type(), dy_buf_.data, dy_buf_.step); |
||||
dxWhole.setTo(Scalar::all(0)); |
||||
dyWhole.setTo(Scalar::all(0)); |
||||
GpuMat dIdx = dxWhole(Rect(winSize.width, winSize.height, imgSize.width, imgSize.height)); |
||||
GpuMat dIdy = dyWhole(Rect(winSize.width, winSize.height, imgSize.width, imgSize.height)); |
||||
|
||||
calcSharrDeriv(prevPyr_[level], dIdx, dIdy); |
||||
|
||||
lkSparse_gpu(prevPyr_[level], nextPyr_[level], dIdx, dIdy,
|
||||
prevPts.ptr<float2>(), nextPts.ptr<float2>(), status.ptr(), level == 0 && err ? err->ptr<float>() : 0, prevPts.cols,
|
||||
level, block, patch); |
||||
} |
||||
} |
||||
|
||||
void cv::gpu::PyrLKOpticalFlow::dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err) |
||||
{ |
||||
using namespace cv::gpu::device::pyrlk; |
||||
|
||||
derivLambda = std::min(std::max(derivLambda, 0.0), 1.0);
|
||||
|
||||
iters = std::min(std::max(iters, 0), 100); |
||||
|
||||
CV_Assert(prevImg.type() == CV_8UC1); |
||||
CV_Assert(prevImg.size() == nextImg.size() && prevImg.type() == nextImg.type()); |
||||
CV_Assert(derivLambda >= 0); |
||||
CV_Assert(maxLevel >= 0 && winSize.width > 2 && winSize.height > 2); |
||||
|
||||
if (useInitialFlow) |
||||
{ |
||||
CV_Assert(u.size() == prevImg.size() && u.type() == CV_32FC1); |
||||
CV_Assert(v.size() == prevImg.size() && v.type() == CV_32FC1); |
||||
} |
||||
else |
||||
{ |
||||
u.create(prevImg.size(), CV_32FC1); |
||||
v.create(prevImg.size(), CV_32FC1); |
||||
|
||||
u.setTo(Scalar::all(0)); |
||||
v.setTo(Scalar::all(0)); |
||||
} |
||||
|
||||
if (err) |
||||
err->create(prevImg.size(), CV_32FC1); |
||||
|
||||
// build the image pyramids.
|
||||
// we pad each level with +/-winSize.{width|height}
|
||||
// pixels to simplify the further patch extraction.
|
||||
|
||||
buildImagePyramid(prevImg, prevPyr_, true); |
||||
buildImagePyramid(nextImg, nextPyr_, true); |
||||
buildImagePyramid(u, uPyr_, false); |
||||
buildImagePyramid(v, vPyr_, false); |
||||
|
||||
// dI/dx ~ Ix, dI/dy ~ Iy
|
||||
|
||||
ensureSizeIsEnough(prevImg.rows + winSize.height * 2, prevImg.cols + winSize.width * 2, CV_16SC1, dx_buf_); |
||||
ensureSizeIsEnough(prevImg.rows + winSize.height * 2, prevImg.cols + winSize.width * 2, CV_16SC1, dy_buf_); |
||||
|
||||
loadConstants(1, minEigThreshold, make_int2(winSize.width, winSize.height), iters); |
||||
|
||||
DevMem2Df derr = err ? *err : DevMem2Df(); |
||||
|
||||
for (int level = maxLevel; level >= 0; level--) |
||||
{ |
||||
Size imgSize = prevPyr_[level].size(); |
||||
|
||||
GpuMat dxWhole(imgSize.height + winSize.height * 2, imgSize.width + winSize.width * 2, dx_buf_.type(), dx_buf_.data, dx_buf_.step); |
||||
GpuMat dyWhole(imgSize.height + winSize.height * 2, imgSize.width + winSize.width * 2, dy_buf_.type(), dy_buf_.data, dy_buf_.step); |
||||
dxWhole.setTo(Scalar::all(0)); |
||||
dyWhole.setTo(Scalar::all(0)); |
||||
GpuMat dIdx = dxWhole(Rect(winSize.width, winSize.height, imgSize.width, imgSize.height)); |
||||
GpuMat dIdy = dyWhole(Rect(winSize.width, winSize.height, imgSize.width, imgSize.height)); |
||||
|
||||
calcSharrDeriv(prevPyr_[level], dIdx, dIdy); |
||||
|
||||
lkDense_gpu(prevPyr_[level], nextPyr_[level], dIdx, dIdy, uPyr_[level], vPyr_[level],
|
||||
level == 0 && err ? &derr : 0); |
||||
|
||||
if (level == 0) |
||||
{ |
||||
uPyr_[0].copyTo(u); |
||||
vPyr_[0].copyTo(v); |
||||
} |
||||
else |
||||
{ |
||||
pyrUp(uPyr_[level], uPyr_[level - 1]); |
||||
pyrUp(vPyr_[level], vPyr_[level - 1]); |
||||
|
||||
multiply(uPyr_[level - 1], Scalar::all(2), uPyr_[level - 1]); |
||||
multiply(vPyr_[level - 1], Scalar::all(2), vPyr_[level - 1]); |
||||
} |
||||
} |
||||
} |
||||
|
||||
#endif /* !defined (HAVE_CUDA) */ |
@ -0,0 +1,279 @@ |
||||
#include <iostream> |
||||
#include <vector> |
||||
|
||||
#include "cvconfig.h" |
||||
#include "opencv2/core/core.hpp" |
||||
#include "opencv2/core/opengl_interop.hpp" |
||||
#include "opencv2/imgproc/imgproc.hpp" |
||||
#include "opencv2/highgui/highgui.hpp" |
||||
#include "opencv2/video/video.hpp" |
||||
#include "opencv2/gpu/gpu.hpp" |
||||
|
||||
using namespace std; |
||||
using namespace cv; |
||||
using namespace cv::gpu; |
||||
|
||||
void download(const GpuMat& d_mat, vector<Point2f>& vec) |
||||
{ |
||||
vec.resize(d_mat.cols); |
||||
Mat mat(1, d_mat.cols, CV_32FC2, (void*)&vec[0]); |
||||
d_mat.download(mat); |
||||
} |
||||
|
||||
void download(const GpuMat& d_mat, vector<uchar>& vec) |
||||
{ |
||||
vec.resize(d_mat.cols); |
||||
Mat mat(1, d_mat.cols, CV_8UC1, (void*)&vec[0]); |
||||
d_mat.download(mat); |
||||
} |
||||
|
||||
void drawArrows(Mat& frame, const vector<Point2f>& prevPts, const vector<Point2f>& nextPts, const vector<uchar>& status, Scalar line_color = Scalar(0, 0, 255)) |
||||
{ |
||||
for (size_t i = 0; i < prevPts.size(); ++i) |
||||
{ |
||||
if (status[i]) |
||||
{ |
||||
int line_thickness = 1; |
||||
|
||||
Point p = prevPts[i]; |
||||
Point q = nextPts[i]; |
||||
|
||||
double angle = atan2((double) p.y - q.y, (double) p.x - q.x); |
||||
|
||||
double hypotenuse = sqrt( (double)(p.y - q.y)*(p.y - q.y) + (double)(p.x - q.x)*(p.x - q.x) ); |
||||
|
||||
if (hypotenuse < 1.0) |
||||
continue; |
||||
|
||||
// Here we lengthen the arrow by a factor of three.
|
||||
q.x = (int) (p.x - 3 * hypotenuse * cos(angle)); |
||||
q.y = (int) (p.y - 3 * hypotenuse * sin(angle)); |
||||
|
||||
// Now we draw the main line of the arrow.
|
||||
line(frame, p, q, line_color, line_thickness); |
||||
|
||||
// Now draw the tips of the arrow. I do some scaling so that the
|
||||
// tips look proportional to the main line of the arrow.
|
||||
|
||||
p.x = (int) (q.x + 9 * cos(angle + CV_PI / 4)); |
||||
p.y = (int) (q.y + 9 * sin(angle + CV_PI / 4)); |
||||
line(frame, p, q, line_color, line_thickness); |
||||
|
||||
p.x = (int) (q.x + 9 * cos(angle - CV_PI / 4)); |
||||
p.y = (int) (q.y + 9 * sin(angle - CV_PI / 4)); |
||||
line(frame, p, q, line_color, line_thickness); |
||||
} |
||||
} |
||||
} |
||||
|
||||
#ifdef HAVE_OPENGL |
||||
|
||||
struct DrawData |
||||
{ |
||||
GlTexture tex; |
||||
GlArrays arr; |
||||
}; |
||||
|
||||
void drawCallback(void* userdata) |
||||
{ |
||||
DrawData* data = static_cast<DrawData*>(userdata); |
||||
|
||||
if (data->tex.empty() || data->arr.empty()) |
||||
return; |
||||
|
||||
static GlCamera camera; |
||||
static bool init_camera = true; |
||||
|
||||
if (init_camera) |
||||
{ |
||||
camera.setOrthoProjection(0.0, 1.0, 1.0, 0.0, 0.0, 1.0); |
||||
camera.lookAt(Point3d(0.0, 0.0, 1.0), Point3d(0.0, 0.0, 0.0), Point3d(0.0, 1.0, 0.0)); |
||||
init_camera = false; |
||||
} |
||||
|
||||
camera.setupProjectionMatrix(); |
||||
camera.setupModelViewMatrix(); |
||||
|
||||
render(data->tex); |
||||
render(data->arr, RenderMode::TRIANGLES); |
||||
} |
||||
|
||||
#endif |
||||
|
||||
template <typename T> inline T clamp (T x, T a, T b) |
||||
{ |
||||
return ((x) > (a) ? ((x) < (b) ? (x) : (b)) : (a)); |
||||
} |
||||
|
||||
template <typename T> inline T mapValue(T x, T a, T b, T c, T d) |
||||
{ |
||||
x = clamp(x, a, b); |
||||
return c + (d - c) * (x - a) / (b - a); |
||||
} |
||||
|
||||
void getFlowField(const Mat& u, const Mat& v, Mat& flowField) |
||||
{ |
||||
float maxDisplacement = 1.0f; |
||||
|
||||
for (int i = 0; i < u.rows; ++i) |
||||
{ |
||||
const float* ptr_u = u.ptr<float>(i); |
||||
const float* ptr_v = v.ptr<float>(i); |
||||
|
||||
for (int j = 0; j < u.cols; ++j) |
||||
{ |
||||
float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j])); |
||||
|
||||
if (d > maxDisplacement)
|
||||
maxDisplacement = d; |
||||
} |
||||
} |
||||
|
||||
flowField.create(u.size(), CV_8UC4); |
||||
|
||||
for (int i = 0; i < flowField.rows; ++i) |
||||
{ |
||||
const float* ptr_u = u.ptr<float>(i); |
||||
const float* ptr_v = v.ptr<float>(i); |
||||
|
||||
|
||||
Vec4b* row = flowField.ptr<Vec4b>(i); |
||||
|
||||
for (int j = 0; j < flowField.cols; ++j) |
||||
{ |
||||
row[j][0] = 0; |
||||
row[j][1] = static_cast<unsigned char> (mapValue (-ptr_v[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); |
||||
row[j][2] = static_cast<unsigned char> (mapValue ( ptr_u[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); |
||||
row[j][3] = 255; |
||||
} |
||||
} |
||||
} |
||||
|
||||
int main(int argc, const char* argv[]) |
||||
{ |
||||
const char* keys = |
||||
"{ h | help | false | print help message }" |
||||
"{ l | left | | specify left image }" |
||||
"{ r | right | | specify right image }" |
||||
"{ g | gray | false | use grayscale sources [PyrLK Sparse] }" |
||||
"{ p | points | 4000 | specify points count [GoodFeatureToTrack] }"; |
||||
|
||||
CommandLineParser cmd(argc, argv, keys); |
||||
|
||||
if (cmd.get<bool>("help")) |
||||
{ |
||||
cout << "Usage: pyrlk_optical_flow [options]" << endl; |
||||
cout << "Avaible options:" << endl; |
||||
cmd.printParams(); |
||||
return 0; |
||||
} |
||||
|
||||
string fname0 = cmd.get<string>("left"); |
||||
string fname1 = cmd.get<string>("right"); |
||||
|
||||
if (fname0.empty() || fname1.empty()) |
||||
{ |
||||
cerr << "Missing input file names" << endl; |
||||
return -1; |
||||
} |
||||
|
||||
bool useGray = cmd.get<bool>("gray"); |
||||
int points = cmd.get<int>("points"); |
||||
|
||||
Mat frame0 = imread(fname0); |
||||
Mat frame1 = imread(fname1); |
||||
|
||||
if (frame0.empty() || frame1.empty()) |
||||
{ |
||||
cout << "Can't load input images" << endl; |
||||
return -1; |
||||
} |
||||
|
||||
namedWindow("PyrLK [Sparse]", WINDOW_NORMAL); |
||||
namedWindow("PyrLK [Dense] Flow Field", WINDOW_NORMAL); |
||||
|
||||
#ifdef HAVE_OPENGL |
||||
namedWindow("PyrLK [Dense]", WINDOW_OPENGL); |
||||
|
||||
setGlDevice(); |
||||
#endif |
||||
|
||||
cout << "Image size : " << frame0.cols << " x " << frame0.rows << endl; |
||||
cout << "Points count : " << points << endl; |
||||
|
||||
cout << endl; |
||||
|
||||
Mat frame0Gray; |
||||
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY); |
||||
Mat frame1Gray; |
||||
cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY); |
||||
|
||||
// goodFeaturesToTrack
|
||||
|
||||
GoodFeaturesToTrackDetector_GPU detector(points, 0.01, 0.0); |
||||
|
||||
GpuMat d_frame0Gray(frame0Gray); |
||||
GpuMat d_prevPts; |
||||
|
||||
detector(d_frame0Gray, d_prevPts); |
||||
|
||||
// Sparse
|
||||
|
||||
PyrLKOpticalFlow d_pyrLK; |
||||
|
||||
GpuMat d_frame0(frame0); |
||||
GpuMat d_frame1(frame1); |
||||
GpuMat d_frame1Gray(frame1Gray); |
||||
GpuMat d_nextPts; |
||||
GpuMat d_status; |
||||
|
||||
d_pyrLK.sparse(useGray ? d_frame0Gray : d_frame0, useGray ? d_frame1Gray : d_frame1, d_prevPts, d_nextPts, d_status); |
||||
|
||||
// Draw arrows
|
||||
|
||||
vector<Point2f> prevPts(d_prevPts.cols); |
||||
download(d_prevPts, prevPts); |
||||
|
||||
vector<Point2f> nextPts(d_nextPts.cols); |
||||
download(d_nextPts, nextPts); |
||||
|
||||
vector<uchar> status(d_status.cols); |
||||
download(d_status, status); |
||||
|
||||
drawArrows(frame0, prevPts, nextPts, status, Scalar(255, 0, 0)); |
||||
|
||||
imshow("PyrLK [Sparse]", frame0); |
||||
|
||||
// Dense
|
||||
|
||||
GpuMat d_u; |
||||
GpuMat d_v; |
||||
|
||||
d_pyrLK.dense(d_frame0Gray, d_frame1Gray, d_u, d_v); |
||||
|
||||
// Draw flow field
|
||||
|
||||
Mat flowField; |
||||
getFlowField(Mat(d_u), Mat(d_v), flowField); |
||||
|
||||
imshow("PyrLK [Dense] Flow Field", flowField); |
||||
|
||||
#ifdef HAVE_OPENGL |
||||
setOpenGlContext("PyrLK [Dense]"); |
||||
|
||||
GpuMat d_vertex, d_colors; |
||||
createOpticalFlowNeedleMap(d_u, d_v, d_vertex, d_colors); |
||||
|
||||
DrawData drawData; |
||||
|
||||
drawData.tex.copyFrom(d_frame0Gray); |
||||
drawData.arr.setVertexArray(d_vertex); |
||||
drawData.arr.setColorArray(d_colors, false); |
||||
|
||||
setOpenGlDrawCallback("PyrLK [Dense]", drawCallback, &drawData); |
||||
#endif |
||||
|
||||
waitKey(); |
||||
|
||||
return 0; |
||||
} |
Loading…
Reference in new issue