parent
fe2e89df1b
commit
36e42084f0
5 changed files with 967 additions and 0 deletions
@ -0,0 +1,414 @@ |
|||||||
|
/*M/////////////////////////////////////////////////////////////////////////////////////// |
||||||
|
// |
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
||||||
|
// |
||||||
|
// By downloading, copying, installing or using the software you agree to this license. |
||||||
|
// If you do not agree to this license, do not download, install, |
||||||
|
// copy or use the software. |
||||||
|
// |
||||||
|
// |
||||||
|
// License Agreement |
||||||
|
// For Open Source Computer Vision Library |
||||||
|
// |
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
||||||
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. |
||||||
|
// Third party copyrights are property of their respective owners. |
||||||
|
// |
||||||
|
// Redistribution and use in source and binary forms, with or without modification, |
||||||
|
// are permitted provided that the following conditions are met: |
||||||
|
// |
||||||
|
// * Redistribution's of source code must retain the above copyright notice, |
||||||
|
// this list of conditions and the following disclaimer. |
||||||
|
// |
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice, |
||||||
|
// this list of conditions and the following disclaimer in the documentation |
||||||
|
// and/or other materials provided with the distribution. |
||||||
|
// |
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products |
||||||
|
// derived from this software without specific prior written permission. |
||||||
|
// |
||||||
|
// This software is provided by the copyright holders and contributors "as is" and |
||||||
|
// any express or bpied warranties, including, but not limited to, the bpied |
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed. |
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct, |
||||||
|
// indirect, incidental, special, exemplary, or consequential damages |
||||||
|
// (including, but not limited to, procurement of substitute goods or services; |
||||||
|
// loss of use, data, or profits; or business interruption) however caused |
||||||
|
// and on any theory of liability, whether in contract, strict liability, |
||||||
|
// or tort (including negligence or otherwise) arising in any way out of |
||||||
|
// the use of this software, even if advised of the possibility of such damage. |
||||||
|
// |
||||||
|
//M*/ |
||||||
|
|
||||||
|
#if !defined CUDA_DISABLER |
||||||
|
|
||||||
|
#include "opencv2/gpu/device/common.hpp" |
||||||
|
#include "opencv2/gpu/device/limits.hpp" |
||||||
|
#include "opencv2/gpu/device/functional.hpp" |
||||||
|
#include "opencv2/gpu/device/reduce.hpp" |
||||||
|
|
||||||
|
using namespace cv::gpu; |
||||||
|
using namespace cv::gpu::device; |
||||||
|
|
||||||
|
namespace optflowbm |
||||||
|
{ |
||||||
|
texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_prev(false, cudaFilterModePoint, cudaAddressModeClamp); |
||||||
|
texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_curr(false, cudaFilterModePoint, cudaAddressModeClamp); |
||||||
|
|
||||||
|
__device__ int cmpBlocks(int X1, int Y1, int X2, int Y2, int2 blockSize) |
||||||
|
{ |
||||||
|
int s = 0; |
||||||
|
|
||||||
|
for (int y = 0; y < blockSize.y; ++y) |
||||||
|
{ |
||||||
|
for (int x = 0; x < blockSize.x; ++x) |
||||||
|
s += ::abs(tex2D(tex_prev, X1 + x, Y1 + y) - tex2D(tex_curr, X2 + x, Y2 + y)); |
||||||
|
} |
||||||
|
|
||||||
|
return s; |
||||||
|
} |
||||||
|
|
||||||
|
__global__ void calcOptFlowBM(PtrStepSzf velx, PtrStepf vely, const int2 blockSize, const int2 shiftSize, const bool usePrevious, |
||||||
|
const int maxX, const int maxY, const int acceptLevel, const int escapeLevel, |
||||||
|
const short2* ss, const int ssCount) |
||||||
|
{ |
||||||
|
const int j = blockIdx.x * blockDim.x + threadIdx.x; |
||||||
|
const int i = blockIdx.y * blockDim.y + threadIdx.y; |
||||||
|
|
||||||
|
if (i >= velx.rows || j >= velx.cols) |
||||||
|
return; |
||||||
|
|
||||||
|
const int X1 = j * shiftSize.x; |
||||||
|
const int Y1 = i * shiftSize.y; |
||||||
|
|
||||||
|
const int offX = usePrevious ? __float2int_rn(velx(i, j)) : 0; |
||||||
|
const int offY = usePrevious ? __float2int_rn(vely(i, j)) : 0; |
||||||
|
|
||||||
|
int X2 = X1 + offX; |
||||||
|
int Y2 = Y1 + offY; |
||||||
|
|
||||||
|
int dist = numeric_limits<int>::max(); |
||||||
|
|
||||||
|
if (0 <= X2 && X2 <= maxX && 0 <= Y2 && Y2 <= maxY) |
||||||
|
dist = cmpBlocks(X1, Y1, X2, Y2, blockSize); |
||||||
|
|
||||||
|
int countMin = 1; |
||||||
|
int sumx = offX; |
||||||
|
int sumy = offY; |
||||||
|
|
||||||
|
if (dist > acceptLevel) |
||||||
|
{ |
||||||
|
// do brute-force search |
||||||
|
for (int k = 0; k < ssCount; ++k) |
||||||
|
{ |
||||||
|
const short2 ssVal = ss[k]; |
||||||
|
|
||||||
|
const int dx = offX + ssVal.x; |
||||||
|
const int dy = offY + ssVal.y; |
||||||
|
|
||||||
|
X2 = X1 + dx; |
||||||
|
Y2 = Y1 + dy; |
||||||
|
|
||||||
|
if (0 <= X2 && X2 <= maxX && 0 <= Y2 && Y2 <= maxY) |
||||||
|
{ |
||||||
|
const int tmpDist = cmpBlocks(X1, Y1, X2, Y2, blockSize); |
||||||
|
if (tmpDist < acceptLevel) |
||||||
|
{ |
||||||
|
sumx = dx; |
||||||
|
sumy = dy; |
||||||
|
countMin = 1; |
||||||
|
break; |
||||||
|
} |
||||||
|
|
||||||
|
if (tmpDist < dist) |
||||||
|
{ |
||||||
|
dist = tmpDist; |
||||||
|
sumx = dx; |
||||||
|
sumy = dy; |
||||||
|
countMin = 1; |
||||||
|
} |
||||||
|
else if (tmpDist == dist) |
||||||
|
{ |
||||||
|
sumx += dx; |
||||||
|
sumy += dy; |
||||||
|
countMin++; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if (dist > escapeLevel) |
||||||
|
{ |
||||||
|
sumx = offX; |
||||||
|
sumy = offY; |
||||||
|
countMin = 1; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
velx(i, j) = static_cast<float>(sumx) / countMin; |
||||||
|
vely(i, j) = static_cast<float>(sumy) / countMin; |
||||||
|
} |
||||||
|
|
||||||
|
void calc(PtrStepSzb prev, PtrStepSzb curr, PtrStepSzf velx, PtrStepSzf vely, int2 blockSize, int2 shiftSize, bool usePrevious, |
||||||
|
int maxX, int maxY, int acceptLevel, int escapeLevel, const short2* ss, int ssCount, cudaStream_t stream) |
||||||
|
{ |
||||||
|
bindTexture(&tex_prev, prev); |
||||||
|
bindTexture(&tex_curr, curr); |
||||||
|
|
||||||
|
const dim3 block(32, 8); |
||||||
|
const dim3 grid(divUp(velx.cols, block.x), divUp(vely.rows, block.y)); |
||||||
|
|
||||||
|
calcOptFlowBM<<<grid, block, 0, stream>>>(velx, vely, blockSize, shiftSize, usePrevious, |
||||||
|
maxX, maxY, acceptLevel, escapeLevel, ss, ssCount); |
||||||
|
cudaSafeCall( cudaGetLastError() ); |
||||||
|
|
||||||
|
if (stream == 0) |
||||||
|
cudaSafeCall( cudaDeviceSynchronize() ); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
///////////////////////////////////////////////////////// |
||||||
|
// Fast approximate version |
||||||
|
|
||||||
|
namespace optflowbm_fast |
||||||
|
{ |
||||||
|
enum |
||||||
|
{ |
||||||
|
CTA_SIZE = 128, |
||||||
|
|
||||||
|
TILE_COLS = 128, |
||||||
|
TILE_ROWS = 32, |
||||||
|
|
||||||
|
STRIDE = CTA_SIZE |
||||||
|
}; |
||||||
|
|
||||||
|
template <typename T> __device__ __forceinline__ int calcDist(T a, T b) |
||||||
|
{ |
||||||
|
return ::abs(a - b); |
||||||
|
} |
||||||
|
|
||||||
|
template <class T> struct FastOptFlowBM |
||||||
|
{ |
||||||
|
|
||||||
|
int search_radius; |
||||||
|
int block_radius; |
||||||
|
|
||||||
|
int search_window; |
||||||
|
int block_window; |
||||||
|
|
||||||
|
PtrStepSz<T> I0; |
||||||
|
PtrStep<T> I1; |
||||||
|
|
||||||
|
mutable PtrStepi buffer; |
||||||
|
|
||||||
|
FastOptFlowBM(int search_window_, int block_window_, |
||||||
|
PtrStepSz<T> I0_, PtrStepSz<T> I1_, |
||||||
|
PtrStepi buffer_) : |
||||||
|
search_radius(search_window_ / 2), block_radius(block_window_ / 2), |
||||||
|
search_window(search_window_), block_window(block_window_), |
||||||
|
I0(I0_), I1(I1_), |
||||||
|
buffer(buffer_) |
||||||
|
{ |
||||||
|
} |
||||||
|
|
||||||
|
__device__ __forceinline__ void initSums_BruteForce(int i, int j, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const |
||||||
|
{ |
||||||
|
for (int index = threadIdx.x; index < search_window * search_window; index += STRIDE) |
||||||
|
{ |
||||||
|
dist_sums[index] = 0; |
||||||
|
|
||||||
|
for (int tx = 0; tx < block_window; ++tx) |
||||||
|
col_sums(tx, index) = 0; |
||||||
|
|
||||||
|
int y = index / search_window; |
||||||
|
int x = index - y * search_window; |
||||||
|
|
||||||
|
int ay = i; |
||||||
|
int ax = j; |
||||||
|
|
||||||
|
int by = i + y - search_radius; |
||||||
|
int bx = j + x - search_radius; |
||||||
|
|
||||||
|
for (int tx = -block_radius; tx <= block_radius; ++tx) |
||||||
|
{ |
||||||
|
int col_sum = 0; |
||||||
|
for (int ty = -block_radius; ty <= block_radius; ++ty) |
||||||
|
{ |
||||||
|
int dist = calcDist(I0(ay + ty, ax + tx), I1(by + ty, bx + tx)); |
||||||
|
|
||||||
|
dist_sums[index] += dist; |
||||||
|
col_sum += dist; |
||||||
|
} |
||||||
|
|
||||||
|
col_sums(tx + block_radius, index) = col_sum; |
||||||
|
} |
||||||
|
|
||||||
|
up_col_sums(j, index) = col_sums(block_window - 1, index); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
__device__ __forceinline__ void shiftRight_FirstRow(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const |
||||||
|
{ |
||||||
|
for (int index = threadIdx.x; index < search_window * search_window; index += STRIDE) |
||||||
|
{ |
||||||
|
int y = index / search_window; |
||||||
|
int x = index - y * search_window; |
||||||
|
|
||||||
|
int ay = i; |
||||||
|
int ax = j + block_radius; |
||||||
|
|
||||||
|
int by = i + y - search_radius; |
||||||
|
int bx = j + x - search_radius + block_radius; |
||||||
|
|
||||||
|
int col_sum = 0; |
||||||
|
|
||||||
|
for (int ty = -block_radius; ty <= block_radius; ++ty) |
||||||
|
col_sum += calcDist(I0(ay + ty, ax), I1(by + ty, bx)); |
||||||
|
|
||||||
|
dist_sums[index] += col_sum - col_sums(first, index); |
||||||
|
|
||||||
|
col_sums(first, index) = col_sum; |
||||||
|
up_col_sums(j, index) = col_sum; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
__device__ __forceinline__ void shiftRight_UpSums(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const |
||||||
|
{ |
||||||
|
int ay = i; |
||||||
|
int ax = j + block_radius; |
||||||
|
|
||||||
|
T a_up = I0(ay - block_radius - 1, ax); |
||||||
|
T a_down = I0(ay + block_radius, ax); |
||||||
|
|
||||||
|
for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) |
||||||
|
{ |
||||||
|
int y = index / search_window; |
||||||
|
int x = index - y * search_window; |
||||||
|
|
||||||
|
int by = i + y - search_radius; |
||||||
|
int bx = j + x - search_radius + block_radius; |
||||||
|
|
||||||
|
T b_up = I1(by - block_radius - 1, bx); |
||||||
|
T b_down = I1(by + block_radius, bx); |
||||||
|
|
||||||
|
int col_sum = up_col_sums(j, index) + calcDist(a_down, b_down) - calcDist(a_up, b_up); |
||||||
|
|
||||||
|
dist_sums[index] += col_sum - col_sums(first, index); |
||||||
|
col_sums(first, index) = col_sum; |
||||||
|
up_col_sums(j, index) = col_sum; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
__device__ __forceinline__ void convolve_window(int i, int j, const int* dist_sums, float& velx, float& vely) const |
||||||
|
{ |
||||||
|
int bestDist = numeric_limits<int>::max(); |
||||||
|
int bestInd = -1; |
||||||
|
|
||||||
|
for (int index = threadIdx.x; index < search_window * search_window; index += STRIDE) |
||||||
|
{ |
||||||
|
int curDist = dist_sums[index]; |
||||||
|
if (curDist < bestDist) |
||||||
|
{ |
||||||
|
bestDist = curDist; |
||||||
|
bestInd = index; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
__shared__ int cta_dist_buffer[CTA_SIZE]; |
||||||
|
__shared__ int cta_ind_buffer[CTA_SIZE]; |
||||||
|
|
||||||
|
reduceKeyVal<CTA_SIZE>(cta_dist_buffer, bestDist, cta_ind_buffer, bestInd, threadIdx.x, less<int>()); |
||||||
|
|
||||||
|
if (threadIdx.x == 0) |
||||||
|
{ |
||||||
|
int y = bestInd / search_window; |
||||||
|
int x = bestInd - y * search_window; |
||||||
|
|
||||||
|
velx = x - search_radius; |
||||||
|
vely = y - search_radius; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
__device__ __forceinline__ void operator()(PtrStepf velx, PtrStepf vely) const |
||||||
|
{ |
||||||
|
int tbx = blockIdx.x * TILE_COLS; |
||||||
|
int tby = blockIdx.y * TILE_ROWS; |
||||||
|
|
||||||
|
int tex = ::min(tbx + TILE_COLS, I0.cols); |
||||||
|
int tey = ::min(tby + TILE_ROWS, I0.rows); |
||||||
|
|
||||||
|
PtrStepi col_sums; |
||||||
|
col_sums.data = buffer.ptr(I0.cols + blockIdx.x * block_window) + blockIdx.y * search_window * search_window; |
||||||
|
col_sums.step = buffer.step; |
||||||
|
|
||||||
|
PtrStepi up_col_sums; |
||||||
|
up_col_sums.data = buffer.data + blockIdx.y * search_window * search_window; |
||||||
|
up_col_sums.step = buffer.step; |
||||||
|
|
||||||
|
extern __shared__ int dist_sums[]; //search_window * search_window |
||||||
|
|
||||||
|
int first = 0; |
||||||
|
|
||||||
|
for (int i = tby; i < tey; ++i) |
||||||
|
{ |
||||||
|
for (int j = tbx; j < tex; ++j) |
||||||
|
{ |
||||||
|
__syncthreads(); |
||||||
|
|
||||||
|
if (j == tbx) |
||||||
|
{ |
||||||
|
initSums_BruteForce(i, j, dist_sums, col_sums, up_col_sums); |
||||||
|
first = 0; |
||||||
|
} |
||||||
|
else |
||||||
|
{ |
||||||
|
if (i == tby) |
||||||
|
shiftRight_FirstRow(i, j, first, dist_sums, col_sums, up_col_sums); |
||||||
|
else |
||||||
|
shiftRight_UpSums(i, j, first, dist_sums, col_sums, up_col_sums); |
||||||
|
|
||||||
|
first = (first + 1) % block_window; |
||||||
|
} |
||||||
|
|
||||||
|
__syncthreads(); |
||||||
|
|
||||||
|
convolve_window(i, j, dist_sums, velx(i, j), vely(i, j)); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
}; |
||||||
|
|
||||||
|
template<typename T> __global__ void optflowbm_fast_kernel(const FastOptFlowBM<T> fbm, PtrStepf velx, PtrStepf vely) |
||||||
|
{ |
||||||
|
fbm(velx, vely); |
||||||
|
} |
||||||
|
|
||||||
|
void get_buffer_size(int src_cols, int src_rows, int search_window, int block_window, int& buffer_cols, int& buffer_rows) |
||||||
|
{ |
||||||
|
dim3 grid(divUp(src_cols, TILE_COLS), divUp(src_rows, TILE_ROWS)); |
||||||
|
|
||||||
|
buffer_cols = search_window * search_window * grid.y; |
||||||
|
buffer_rows = src_cols + block_window * grid.x; |
||||||
|
} |
||||||
|
|
||||||
|
template <typename T> |
||||||
|
void calc(PtrStepSzb I0, PtrStepSzb I1, PtrStepSzf velx, PtrStepSzf vely, PtrStepi buffer, int search_window, int block_window, cudaStream_t stream) |
||||||
|
{ |
||||||
|
FastOptFlowBM<T> fbm(search_window, block_window, I0, I1, buffer); |
||||||
|
|
||||||
|
dim3 block(CTA_SIZE, 1); |
||||||
|
dim3 grid(divUp(I0.cols, TILE_COLS), divUp(I0.rows, TILE_ROWS)); |
||||||
|
|
||||||
|
size_t smem = search_window * search_window * sizeof(int); |
||||||
|
|
||||||
|
optflowbm_fast_kernel<<<grid, block, smem, stream>>>(fbm, velx, vely); |
||||||
|
cudaSafeCall ( cudaGetLastError () ); |
||||||
|
|
||||||
|
if (stream == 0) |
||||||
|
cudaSafeCall( cudaDeviceSynchronize() ); |
||||||
|
} |
||||||
|
|
||||||
|
template void calc<uchar>(PtrStepSzb I0, PtrStepSzb I1, PtrStepSzf velx, PtrStepSzf vely, PtrStepi buffer, int search_window, int block_window, cudaStream_t stream); |
||||||
|
} |
||||||
|
|
||||||
|
#endif // !defined CUDA_DISABLER |
@ -0,0 +1,243 @@ |
|||||||
|
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
//
|
||||||
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||||
|
//
|
||||||
|
// By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
// If you do not agree to this license, do not download, install,
|
||||||
|
// copy or use the software.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// License Agreement
|
||||||
|
// For Open Source Computer Vision Library
|
||||||
|
//
|
||||||
|
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||||
|
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
||||||
|
// Third party copyrights are property of their respective owners.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
// are permitted provided that the following conditions are met:
|
||||||
|
//
|
||||||
|
// * Redistribution's of source code must retain the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer.
|
||||||
|
//
|
||||||
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||||
|
// this list of conditions and the following disclaimer in the documentation
|
||||||
|
// and/or other materials provided with the distribution.
|
||||||
|
//
|
||||||
|
// * The name of the copyright holders may not be used to endorse or promote products
|
||||||
|
// derived from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
// any express or implied warranties, including, but not limited to, the implied
|
||||||
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||||
|
// indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
// (including, but not limited to, procurement of substitute goods or services;
|
||||||
|
// loss of use, data, or profits; or business interruption) however caused
|
||||||
|
// and on any theory of liability, whether in contract, strict liability,
|
||||||
|
// or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
// the use of this software, even if advised of the possibility of such damage.
|
||||||
|
//
|
||||||
|
//M*/
|
||||||
|
|
||||||
|
#include "precomp.hpp" |
||||||
|
|
||||||
|
using namespace std; |
||||||
|
using namespace cv; |
||||||
|
using namespace cv::gpu; |
||||||
|
|
||||||
|
#if !defined HAVE_CUDA || defined(CUDA_DISABLER) |
||||||
|
|
||||||
|
void cv::gpu::calcOpticalFlowBM(const GpuMat&, const GpuMat&, Size, Size, Size, bool, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); } |
||||||
|
|
||||||
|
void cv::gpu::FastOpticalFlowBM::operator ()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, int, int, Stream&) { throw_nogpu(); } |
||||||
|
|
||||||
|
#else // HAVE_CUDA
|
||||||
|
|
||||||
|
namespace optflowbm |
||||||
|
{ |
||||||
|
void calc(PtrStepSzb prev, PtrStepSzb curr, PtrStepSzf velx, PtrStepSzf vely, int2 blockSize, int2 shiftSize, bool usePrevious, |
||||||
|
int maxX, int maxY, int acceptLevel, int escapeLevel, const short2* ss, int ssCount, cudaStream_t stream); |
||||||
|
} |
||||||
|
|
||||||
|
void cv::gpu::calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr, Size blockSize, Size shiftSize, Size maxRange, bool usePrevious, GpuMat& velx, GpuMat& vely, GpuMat& buf, Stream& st) |
||||||
|
{ |
||||||
|
CV_Assert( prev.type() == CV_8UC1 ); |
||||||
|
CV_Assert( curr.size() == prev.size() && curr.type() == prev.type() ); |
||||||
|
|
||||||
|
const Size velSize((prev.cols - blockSize.width + shiftSize.width) / shiftSize.width, |
||||||
|
(prev.rows - blockSize.height + shiftSize.height) / shiftSize.height); |
||||||
|
|
||||||
|
velx.create(velSize, CV_32FC1); |
||||||
|
vely.create(velSize, CV_32FC1); |
||||||
|
|
||||||
|
// scanning scheme coordinates
|
||||||
|
vector<short2> ss((2 * maxRange.width + 1) * (2 * maxRange.height + 1)); |
||||||
|
int ssCount = 0; |
||||||
|
|
||||||
|
// Calculate scanning scheme
|
||||||
|
const int minCount = std::min(maxRange.width, maxRange.height); |
||||||
|
|
||||||
|
// use spiral search pattern
|
||||||
|
//
|
||||||
|
// 9 10 11 12
|
||||||
|
// 8 1 2 13
|
||||||
|
// 7 * 3 14
|
||||||
|
// 6 5 4 15
|
||||||
|
//... 20 19 18 17
|
||||||
|
//
|
||||||
|
|
||||||
|
for (int i = 0; i < minCount; ++i) |
||||||
|
{ |
||||||
|
// four cycles along sides
|
||||||
|
int x = -i - 1, y = x; |
||||||
|
|
||||||
|
// upper side
|
||||||
|
for (int j = -i; j <= i + 1; ++j, ++ssCount) |
||||||
|
{ |
||||||
|
ss[ssCount].x = ++x; |
||||||
|
ss[ssCount].y = y; |
||||||
|
} |
||||||
|
|
||||||
|
// right side
|
||||||
|
for (int j = -i; j <= i + 1; ++j, ++ssCount) |
||||||
|
{ |
||||||
|
ss[ssCount].x = x; |
||||||
|
ss[ssCount].y = ++y; |
||||||
|
} |
||||||
|
|
||||||
|
// bottom side
|
||||||
|
for (int j = -i; j <= i + 1; ++j, ++ssCount) |
||||||
|
{ |
||||||
|
ss[ssCount].x = --x; |
||||||
|
ss[ssCount].y = y; |
||||||
|
} |
||||||
|
|
||||||
|
// left side
|
||||||
|
for (int j = -i; j <= i + 1; ++j, ++ssCount) |
||||||
|
{ |
||||||
|
ss[ssCount].x = x; |
||||||
|
ss[ssCount].y = --y; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// the rest part
|
||||||
|
if (maxRange.width < maxRange.height) |
||||||
|
{ |
||||||
|
const int xleft = -minCount; |
||||||
|
|
||||||
|
// cycle by neighbor rings
|
||||||
|
for (int i = minCount; i < maxRange.height; ++i) |
||||||
|
{ |
||||||
|
// two cycles by x
|
||||||
|
int y = -(i + 1); |
||||||
|
int x = xleft; |
||||||
|
|
||||||
|
// upper side
|
||||||
|
for (int j = -maxRange.width; j <= maxRange.width; ++j, ++ssCount, ++x) |
||||||
|
{ |
||||||
|
ss[ssCount].x = x; |
||||||
|
ss[ssCount].y = y; |
||||||
|
} |
||||||
|
|
||||||
|
x = xleft; |
||||||
|
y = -y; |
||||||
|
|
||||||
|
// bottom side
|
||||||
|
for (int j = -maxRange.width; j <= maxRange.width; ++j, ++ssCount, ++x) |
||||||
|
{ |
||||||
|
ss[ssCount].x = x; |
||||||
|
ss[ssCount].y = y; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
else if (maxRange.width > maxRange.height) |
||||||
|
{ |
||||||
|
const int yupper = -minCount; |
||||||
|
|
||||||
|
// cycle by neighbor rings
|
||||||
|
for (int i = minCount; i < maxRange.width; ++i) |
||||||
|
{ |
||||||
|
// two cycles by y
|
||||||
|
int x = -(i + 1); |
||||||
|
int y = yupper; |
||||||
|
|
||||||
|
// left side
|
||||||
|
for (int j = -maxRange.height; j <= maxRange.height; ++j, ++ssCount, ++y) |
||||||
|
{ |
||||||
|
ss[ssCount].x = x; |
||||||
|
ss[ssCount].y = y; |
||||||
|
} |
||||||
|
|
||||||
|
y = yupper; |
||||||
|
x = -x; |
||||||
|
|
||||||
|
// right side
|
||||||
|
for (int j = -maxRange.height; j <= maxRange.height; ++j, ++ssCount, ++y) |
||||||
|
{ |
||||||
|
ss[ssCount].x = x; |
||||||
|
ss[ssCount].y = y; |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
const cudaStream_t stream = StreamAccessor::getStream(st); |
||||||
|
|
||||||
|
ensureSizeIsEnough(1, ssCount, CV_16SC2, buf); |
||||||
|
if (stream == 0) |
||||||
|
cudaSafeCall( cudaMemcpy(buf.data, &ss[0], ssCount * sizeof(short2), cudaMemcpyHostToDevice) ); |
||||||
|
else |
||||||
|
cudaSafeCall( cudaMemcpyAsync(buf.data, &ss[0], ssCount * sizeof(short2), cudaMemcpyHostToDevice, stream) ); |
||||||
|
|
||||||
|
const int maxX = prev.cols - blockSize.width; |
||||||
|
const int maxY = prev.rows - blockSize.height; |
||||||
|
|
||||||
|
const int SMALL_DIFF = 2; |
||||||
|
const int BIG_DIFF = 128; |
||||||
|
|
||||||
|
const int blSize = blockSize.area(); |
||||||
|
const int acceptLevel = blSize * SMALL_DIFF; |
||||||
|
const int escapeLevel = blSize * BIG_DIFF; |
||||||
|
|
||||||
|
optflowbm::calc(prev, curr, velx, vely, |
||||||
|
make_int2(blockSize.width, blockSize.height), make_int2(shiftSize.width, shiftSize.height), usePrevious, |
||||||
|
maxX, maxY, acceptLevel, escapeLevel, buf.ptr<short2>(), ssCount, stream); |
||||||
|
} |
||||||
|
|
||||||
|
namespace optflowbm_fast |
||||||
|
{ |
||||||
|
void get_buffer_size(int src_cols, int src_rows, int search_window, int block_window, int& buffer_cols, int& buffer_rows); |
||||||
|
|
||||||
|
template <typename T> |
||||||
|
void calc(PtrStepSzb I0, PtrStepSzb I1, PtrStepSzf velx, PtrStepSzf vely, PtrStepi buffer, int search_window, int block_window, cudaStream_t stream); |
||||||
|
} |
||||||
|
|
||||||
|
void cv::gpu::FastOpticalFlowBM::operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window, int block_window, Stream& stream) |
||||||
|
{ |
||||||
|
CV_Assert( I0.type() == CV_8UC1 ); |
||||||
|
CV_Assert( I1.size() == I0.size() && I1.type() == I0.type() ); |
||||||
|
|
||||||
|
int border_size = search_window / 2 + block_window / 2; |
||||||
|
Size esize = I0.size() + Size(border_size, border_size) * 2; |
||||||
|
|
||||||
|
ensureSizeIsEnough(esize, I0.type(), extended_I0); |
||||||
|
ensureSizeIsEnough(esize, I0.type(), extended_I1); |
||||||
|
|
||||||
|
copyMakeBorder(I0, extended_I0, border_size, border_size, border_size, border_size, cv::BORDER_DEFAULT, Scalar(), stream); |
||||||
|
copyMakeBorder(I1, extended_I1, border_size, border_size, border_size, border_size, cv::BORDER_DEFAULT, Scalar(), stream); |
||||||
|
|
||||||
|
GpuMat I0_hdr = extended_I0(Rect(Point2i(border_size, border_size), I0.size())); |
||||||
|
GpuMat I1_hdr = extended_I1(Rect(Point2i(border_size, border_size), I0.size())); |
||||||
|
|
||||||
|
int bcols, brows; |
||||||
|
optflowbm_fast::get_buffer_size(I0.cols, I0.rows, search_window, block_window, bcols, brows); |
||||||
|
|
||||||
|
ensureSizeIsEnough(brows, bcols, CV_32SC1, buffer); |
||||||
|
|
||||||
|
flowx.create(I0.size(), CV_32FC1); |
||||||
|
flowy.create(I0.size(), CV_32FC1); |
||||||
|
|
||||||
|
optflowbm_fast::calc<uchar>(I0_hdr, I1_hdr, flowx, flowy, buffer, search_window, block_window, StreamAccessor::getStream(stream)); |
||||||
|
} |
||||||
|
|
||||||
|
#endif // HAVE_CUDA
|
Loading…
Reference in new issue