|
|
|
@ -42,329 +42,150 @@ |
|
|
|
|
|
|
|
|
|
#include "precomp.hpp" |
|
|
|
|
|
|
|
|
|
#if !defined HAVE_CUDA || defined(CUDA_DISABLER) |
|
|
|
|
using namespace cv; |
|
|
|
|
using namespace cv::gpu; |
|
|
|
|
|
|
|
|
|
class cv::gpu::FGDStatModel::Impl |
|
|
|
|
{ |
|
|
|
|
}; |
|
|
|
|
#if !defined HAVE_CUDA || defined(CUDA_DISABLER) |
|
|
|
|
|
|
|
|
|
cv::gpu::FGDStatModel::Params::Params() { throw_no_cuda(); } |
|
|
|
|
cv::gpu::FGDParams::FGDParams() { throw_no_cuda(); } |
|
|
|
|
|
|
|
|
|
cv::gpu::FGDStatModel::FGDStatModel(int) { throw_no_cuda(); } |
|
|
|
|
cv::gpu::FGDStatModel::FGDStatModel(const cv::gpu::GpuMat&, const Params&, int) { throw_no_cuda(); } |
|
|
|
|
cv::gpu::FGDStatModel::~FGDStatModel() {} |
|
|
|
|
void cv::gpu::FGDStatModel::create(const cv::gpu::GpuMat&, const Params&) { throw_no_cuda(); } |
|
|
|
|
void cv::gpu::FGDStatModel::release() {} |
|
|
|
|
int cv::gpu::FGDStatModel::update(const cv::gpu::GpuMat&) { throw_no_cuda(); return 0; } |
|
|
|
|
Ptr<gpu::BackgroundSubtractorFGD> cv::gpu::createBackgroundSubtractorFGD(const FGDParams&) { throw_no_cuda(); return Ptr<gpu::BackgroundSubtractorFGD>(); } |
|
|
|
|
|
|
|
|
|
#else |
|
|
|
|
|
|
|
|
|
#include "cuda/fgd.hpp" |
|
|
|
|
#include "opencv2/imgproc/imgproc_c.h" |
|
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
|
// FGDParams
|
|
|
|
|
|
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
class BGPixelStat |
|
|
|
|
{ |
|
|
|
|
public: |
|
|
|
|
void create(cv::Size size, const cv::gpu::FGDStatModel::Params& params, int out_cn); |
|
|
|
|
void release(); |
|
|
|
|
|
|
|
|
|
void setTrained(); |
|
|
|
|
|
|
|
|
|
operator bgfg::BGPixelStat(); |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
cv::gpu::GpuMat Pbc_; |
|
|
|
|
cv::gpu::GpuMat Pbcc_; |
|
|
|
|
cv::gpu::GpuMat is_trained_st_model_; |
|
|
|
|
cv::gpu::GpuMat is_trained_dyn_model_; |
|
|
|
|
|
|
|
|
|
cv::gpu::GpuMat ctable_Pv_; |
|
|
|
|
cv::gpu::GpuMat ctable_Pvb_; |
|
|
|
|
cv::gpu::GpuMat ctable_v_; |
|
|
|
|
|
|
|
|
|
cv::gpu::GpuMat cctable_Pv_; |
|
|
|
|
cv::gpu::GpuMat cctable_Pvb_; |
|
|
|
|
cv::gpu::GpuMat cctable_v1_; |
|
|
|
|
cv::gpu::GpuMat cctable_v2_; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
void BGPixelStat::create(cv::Size size, const cv::gpu::FGDStatModel::Params& params, int out_cn) |
|
|
|
|
{ |
|
|
|
|
cv::gpu::ensureSizeIsEnough(size, CV_32FC1, Pbc_); |
|
|
|
|
Pbc_.setTo(cv::Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(size, CV_32FC1, Pbcc_); |
|
|
|
|
Pbcc_.setTo(cv::Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(size, CV_8UC1, is_trained_st_model_); |
|
|
|
|
is_trained_st_model_.setTo(cv::Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(size, CV_8UC1, is_trained_dyn_model_); |
|
|
|
|
is_trained_dyn_model_.setTo(cv::Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(params.N2c * size.height, size.width, CV_32FC1, ctable_Pv_); |
|
|
|
|
ctable_Pv_.setTo(cv::Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(params.N2c * size.height, size.width, CV_32FC1, ctable_Pvb_); |
|
|
|
|
ctable_Pvb_.setTo(cv::Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(params.N2c * size.height, size.width, CV_8UC(out_cn), ctable_v_); |
|
|
|
|
ctable_v_.setTo(cv::Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_32FC1, cctable_Pv_); |
|
|
|
|
cctable_Pv_.setTo(cv::Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_32FC1, cctable_Pvb_); |
|
|
|
|
cctable_Pvb_.setTo(cv::Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_8UC(out_cn), cctable_v1_); |
|
|
|
|
cctable_v1_.setTo(cv::Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_8UC(out_cn), cctable_v2_); |
|
|
|
|
cctable_v2_.setTo(cv::Scalar::all(0)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void BGPixelStat::release() |
|
|
|
|
{ |
|
|
|
|
Pbc_.release(); |
|
|
|
|
Pbcc_.release(); |
|
|
|
|
is_trained_st_model_.release(); |
|
|
|
|
is_trained_dyn_model_.release(); |
|
|
|
|
|
|
|
|
|
ctable_Pv_.release(); |
|
|
|
|
ctable_Pvb_.release(); |
|
|
|
|
ctable_v_.release(); |
|
|
|
|
|
|
|
|
|
cctable_Pv_.release(); |
|
|
|
|
cctable_Pvb_.release(); |
|
|
|
|
cctable_v1_.release(); |
|
|
|
|
cctable_v2_.release(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void BGPixelStat::setTrained() |
|
|
|
|
{ |
|
|
|
|
is_trained_st_model_.setTo(cv::Scalar::all(1)); |
|
|
|
|
is_trained_dyn_model_.setTo(cv::Scalar::all(1)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
BGPixelStat::operator bgfg::BGPixelStat() |
|
|
|
|
{ |
|
|
|
|
bgfg::BGPixelStat stat; |
|
|
|
|
|
|
|
|
|
stat.rows_ = Pbc_.rows; |
|
|
|
|
|
|
|
|
|
stat.Pbc_data_ = Pbc_.data; |
|
|
|
|
stat.Pbc_step_ = Pbc_.step; |
|
|
|
|
|
|
|
|
|
stat.Pbcc_data_ = Pbcc_.data; |
|
|
|
|
stat.Pbcc_step_ = Pbcc_.step; |
|
|
|
|
|
|
|
|
|
stat.is_trained_st_model_data_ = is_trained_st_model_.data; |
|
|
|
|
stat.is_trained_st_model_step_ = is_trained_st_model_.step; |
|
|
|
|
|
|
|
|
|
stat.is_trained_dyn_model_data_ = is_trained_dyn_model_.data; |
|
|
|
|
stat.is_trained_dyn_model_step_ = is_trained_dyn_model_.step; |
|
|
|
|
|
|
|
|
|
stat.ctable_Pv_data_ = ctable_Pv_.data; |
|
|
|
|
stat.ctable_Pv_step_ = ctable_Pv_.step; |
|
|
|
|
// Default parameters of foreground detection algorithm:
|
|
|
|
|
const int BGFG_FGD_LC = 128; |
|
|
|
|
const int BGFG_FGD_N1C = 15; |
|
|
|
|
const int BGFG_FGD_N2C = 25; |
|
|
|
|
|
|
|
|
|
stat.ctable_Pvb_data_ = ctable_Pvb_.data; |
|
|
|
|
stat.ctable_Pvb_step_ = ctable_Pvb_.step; |
|
|
|
|
const int BGFG_FGD_LCC = 64; |
|
|
|
|
const int BGFG_FGD_N1CC = 25; |
|
|
|
|
const int BGFG_FGD_N2CC = 40; |
|
|
|
|
|
|
|
|
|
stat.ctable_v_data_ = ctable_v_.data; |
|
|
|
|
stat.ctable_v_step_ = ctable_v_.step; |
|
|
|
|
// Background reference image update parameter:
|
|
|
|
|
const float BGFG_FGD_ALPHA_1 = 0.1f; |
|
|
|
|
|
|
|
|
|
stat.cctable_Pv_data_ = cctable_Pv_.data; |
|
|
|
|
stat.cctable_Pv_step_ = cctable_Pv_.step; |
|
|
|
|
// stat model update parameter
|
|
|
|
|
// 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG)
|
|
|
|
|
const float BGFG_FGD_ALPHA_2 = 0.005f; |
|
|
|
|
|
|
|
|
|
stat.cctable_Pvb_data_ = cctable_Pvb_.data; |
|
|
|
|
stat.cctable_Pvb_step_ = cctable_Pvb_.step; |
|
|
|
|
// start value for alpha parameter (to fast initiate statistic model)
|
|
|
|
|
const float BGFG_FGD_ALPHA_3 = 0.1f; |
|
|
|
|
|
|
|
|
|
stat.cctable_v1_data_ = cctable_v1_.data; |
|
|
|
|
stat.cctable_v1_step_ = cctable_v1_.step; |
|
|
|
|
const float BGFG_FGD_DELTA = 2.0f; |
|
|
|
|
|
|
|
|
|
stat.cctable_v2_data_ = cctable_v2_.data; |
|
|
|
|
stat.cctable_v2_step_ = cctable_v2_.step; |
|
|
|
|
const float BGFG_FGD_T = 0.9f; |
|
|
|
|
|
|
|
|
|
return stat; |
|
|
|
|
} |
|
|
|
|
const float BGFG_FGD_MINAREA= 15.0f; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
class cv::gpu::FGDStatModel::Impl |
|
|
|
|
cv::gpu::FGDParams::FGDParams() |
|
|
|
|
{ |
|
|
|
|
public: |
|
|
|
|
Impl(cv::gpu::GpuMat& background, cv::gpu::GpuMat& foreground, std::vector< std::vector<cv::Point> >& foreground_regions, int out_cn); |
|
|
|
|
~Impl(); |
|
|
|
|
|
|
|
|
|
void create(const cv::gpu::GpuMat& firstFrame, const cv::gpu::FGDStatModel::Params& params); |
|
|
|
|
void release(); |
|
|
|
|
|
|
|
|
|
int update(const cv::gpu::GpuMat& curFrame); |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
Impl(const Impl&); |
|
|
|
|
Impl& operator=(const Impl&); |
|
|
|
|
|
|
|
|
|
int out_cn_; |
|
|
|
|
|
|
|
|
|
cv::gpu::FGDStatModel::Params params_; |
|
|
|
|
|
|
|
|
|
cv::gpu::GpuMat& background_; |
|
|
|
|
cv::gpu::GpuMat& foreground_; |
|
|
|
|
std::vector< std::vector<cv::Point> >& foreground_regions_; |
|
|
|
|
|
|
|
|
|
cv::Mat h_foreground_; |
|
|
|
|
|
|
|
|
|
cv::gpu::GpuMat prevFrame_; |
|
|
|
|
cv::gpu::GpuMat Ftd_; |
|
|
|
|
cv::gpu::GpuMat Fbd_; |
|
|
|
|
BGPixelStat stat_; |
|
|
|
|
|
|
|
|
|
cv::gpu::GpuMat hist_; |
|
|
|
|
cv::gpu::GpuMat histBuf_; |
|
|
|
|
|
|
|
|
|
cv::gpu::GpuMat countBuf_; |
|
|
|
|
Lc = BGFG_FGD_LC; |
|
|
|
|
N1c = BGFG_FGD_N1C; |
|
|
|
|
N2c = BGFG_FGD_N2C; |
|
|
|
|
|
|
|
|
|
cv::gpu::GpuMat buf_; |
|
|
|
|
cv::gpu::GpuMat filterBrd_; |
|
|
|
|
Lcc = BGFG_FGD_LCC; |
|
|
|
|
N1cc = BGFG_FGD_N1CC; |
|
|
|
|
N2cc = BGFG_FGD_N2CC; |
|
|
|
|
|
|
|
|
|
cv::Ptr<cv::gpu::Filter> dilateFilter_; |
|
|
|
|
cv::Ptr<cv::gpu::Filter> erodeFilter_; |
|
|
|
|
delta = BGFG_FGD_DELTA; |
|
|
|
|
|
|
|
|
|
CvMemStorage* storage_; |
|
|
|
|
}; |
|
|
|
|
alpha1 = BGFG_FGD_ALPHA_1; |
|
|
|
|
alpha2 = BGFG_FGD_ALPHA_2; |
|
|
|
|
alpha3 = BGFG_FGD_ALPHA_3; |
|
|
|
|
|
|
|
|
|
cv::gpu::FGDStatModel::Impl::Impl(cv::gpu::GpuMat& background, cv::gpu::GpuMat& foreground, std::vector< std::vector<cv::Point> >& foreground_regions, int out_cn) : |
|
|
|
|
out_cn_(out_cn), background_(background), foreground_(foreground), foreground_regions_(foreground_regions) |
|
|
|
|
{ |
|
|
|
|
CV_Assert( out_cn_ == 3 || out_cn_ == 4 ); |
|
|
|
|
T = BGFG_FGD_T; |
|
|
|
|
minArea = BGFG_FGD_MINAREA; |
|
|
|
|
|
|
|
|
|
storage_ = cvCreateMemStorage(); |
|
|
|
|
CV_Assert( storage_ != 0 ); |
|
|
|
|
is_obj_without_holes = true; |
|
|
|
|
perform_morphing = 1; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
cv::gpu::FGDStatModel::Impl::~Impl() |
|
|
|
|
{ |
|
|
|
|
cvReleaseMemStorage(&storage_); |
|
|
|
|
} |
|
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
|
// copyChannels
|
|
|
|
|
|
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
void copyChannels(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, int dst_cn = -1) |
|
|
|
|
void copyChannels(const GpuMat& src, GpuMat& dst, int dst_cn = -1) |
|
|
|
|
{ |
|
|
|
|
const int src_cn = src.channels(); |
|
|
|
|
|
|
|
|
|
if (dst_cn < 0) |
|
|
|
|
dst_cn = src_cn; |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(src.size(), CV_MAKE_TYPE(src.depth(), dst_cn), dst); |
|
|
|
|
gpu::ensureSizeIsEnough(src.size(), CV_MAKE_TYPE(src.depth(), dst_cn), dst); |
|
|
|
|
|
|
|
|
|
if (src_cn == dst_cn) |
|
|
|
|
{ |
|
|
|
|
src.copyTo(dst); |
|
|
|
|
} |
|
|
|
|
else |
|
|
|
|
{ |
|
|
|
|
static const int cvt_codes[4][4] = |
|
|
|
|
{ |
|
|
|
|
{-1, -1, cv::COLOR_GRAY2BGR, cv::COLOR_GRAY2BGRA}, |
|
|
|
|
{-1, -1, COLOR_GRAY2BGR, COLOR_GRAY2BGRA}, |
|
|
|
|
{-1, -1, -1, -1}, |
|
|
|
|
{cv::COLOR_BGR2GRAY, -1, -1, cv::COLOR_BGR2BGRA}, |
|
|
|
|
{cv::COLOR_BGRA2GRAY, -1, cv::COLOR_BGRA2BGR, -1} |
|
|
|
|
{COLOR_BGR2GRAY, -1, -1, COLOR_BGR2BGRA}, |
|
|
|
|
{COLOR_BGRA2GRAY, -1, COLOR_BGRA2BGR, -1} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
const int cvt_code = cvt_codes[src_cn - 1][dst_cn - 1]; |
|
|
|
|
CV_DbgAssert( cvt_code >= 0 ); |
|
|
|
|
|
|
|
|
|
cv::gpu::cvtColor(src, dst, cvt_code, dst_cn); |
|
|
|
|
gpu::cvtColor(src, dst, cvt_code, dst_cn); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::FGDStatModel::Impl::create(const cv::gpu::GpuMat& firstFrame, const cv::gpu::FGDStatModel::Params& params) |
|
|
|
|
{ |
|
|
|
|
CV_Assert(firstFrame.type() == CV_8UC3 || firstFrame.type() == CV_8UC4); |
|
|
|
|
|
|
|
|
|
params_ = params; |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(firstFrame.size(), CV_8UC1, foreground_); |
|
|
|
|
|
|
|
|
|
copyChannels(firstFrame, background_, out_cn_); |
|
|
|
|
|
|
|
|
|
copyChannels(firstFrame, prevFrame_); |
|
|
|
|
|
|
|
|
|
cv::gpu::ensureSizeIsEnough(firstFrame.size(), CV_8UC1, Ftd_); |
|
|
|
|
cv::gpu::ensureSizeIsEnough(firstFrame.size(), CV_8UC1, Fbd_); |
|
|
|
|
|
|
|
|
|
stat_.create(firstFrame.size(), params_, out_cn_); |
|
|
|
|
bgfg::setBGPixelStat(stat_); |
|
|
|
|
|
|
|
|
|
if (params_.perform_morphing > 0) |
|
|
|
|
{ |
|
|
|
|
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(1 + params_.perform_morphing * 2, 1 + params_.perform_morphing * 2)); |
|
|
|
|
cv::Point anchor(params_.perform_morphing, params_.perform_morphing); |
|
|
|
|
|
|
|
|
|
dilateFilter_ = cv::gpu::createMorphologyFilter(cv::MORPH_DILATE, CV_8UC1, kernel, anchor); |
|
|
|
|
erodeFilter_ = cv::gpu::createMorphologyFilter(cv::MORPH_ERODE, CV_8UC1, kernel, anchor); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::FGDStatModel::Impl::release() |
|
|
|
|
{ |
|
|
|
|
background_.release(); |
|
|
|
|
foreground_.release(); |
|
|
|
|
|
|
|
|
|
prevFrame_.release(); |
|
|
|
|
Ftd_.release(); |
|
|
|
|
Fbd_.release(); |
|
|
|
|
stat_.release(); |
|
|
|
|
|
|
|
|
|
hist_.release(); |
|
|
|
|
histBuf_.release(); |
|
|
|
|
|
|
|
|
|
countBuf_.release(); |
|
|
|
|
|
|
|
|
|
buf_.release(); |
|
|
|
|
filterBrd_.release(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
|
// changeDetection
|
|
|
|
|
|
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
void calcDiffHistogram(const cv::gpu::GpuMat& prevFrame, const cv::gpu::GpuMat& curFrame, cv::gpu::GpuMat& hist, cv::gpu::GpuMat& histBuf) |
|
|
|
|
void calcDiffHistogram(const GpuMat& prevFrame, const GpuMat& curFrame, GpuMat& hist, GpuMat& histBuf) |
|
|
|
|
{ |
|
|
|
|
typedef void (*func_t)(cv::gpu::PtrStepSzb prevFrame, cv::gpu::PtrStepSzb curFrame, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2, bool cc20, cudaStream_t stream); |
|
|
|
|
typedef void (*func_t)(PtrStepSzb prevFrame, PtrStepSzb curFrame, |
|
|
|
|
unsigned int* hist0, unsigned int* hist1, unsigned int* hist2, |
|
|
|
|
unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2, |
|
|
|
|
bool cc20, cudaStream_t stream); |
|
|
|
|
static const func_t funcs[4][4] = |
|
|
|
|
{ |
|
|
|
|
{0,0,0,0}, |
|
|
|
|
{0,0,0,0}, |
|
|
|
|
{0,0,bgfg::calcDiffHistogram_gpu<uchar3, uchar3>,bgfg::calcDiffHistogram_gpu<uchar3, uchar4>}, |
|
|
|
|
{0,0,bgfg::calcDiffHistogram_gpu<uchar4, uchar3>,bgfg::calcDiffHistogram_gpu<uchar4, uchar4>} |
|
|
|
|
{0,0,fgd::calcDiffHistogram_gpu<uchar3, uchar3>,fgd::calcDiffHistogram_gpu<uchar3, uchar4>}, |
|
|
|
|
{0,0,fgd::calcDiffHistogram_gpu<uchar4, uchar3>,fgd::calcDiffHistogram_gpu<uchar4, uchar4>} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
hist.create(3, 256, CV_32SC1); |
|
|
|
|
histBuf.create(3, bgfg::PARTIAL_HISTOGRAM_COUNT * bgfg::HISTOGRAM_BIN_COUNT, CV_32SC1); |
|
|
|
|
histBuf.create(3, fgd::PARTIAL_HISTOGRAM_COUNT * fgd::HISTOGRAM_BIN_COUNT, CV_32SC1); |
|
|
|
|
|
|
|
|
|
funcs[prevFrame.channels() - 1][curFrame.channels() - 1]( |
|
|
|
|
prevFrame, curFrame, |
|
|
|
|
hist.ptr<unsigned int>(0), hist.ptr<unsigned int>(1), hist.ptr<unsigned int>(2), |
|
|
|
|
histBuf.ptr<unsigned int>(0), histBuf.ptr<unsigned int>(1), histBuf.ptr<unsigned int>(2), |
|
|
|
|
cv::gpu::deviceSupports(cv::gpu::FEATURE_SET_COMPUTE_20), 0); |
|
|
|
|
deviceSupports(FEATURE_SET_COMPUTE_20), 0); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void calcRelativeVariance(unsigned int hist[3 * 256], double relativeVariance[3][bgfg::HISTOGRAM_BIN_COUNT]) |
|
|
|
|
void calcRelativeVariance(unsigned int hist[3 * 256], double relativeVariance[3][fgd::HISTOGRAM_BIN_COUNT]) |
|
|
|
|
{ |
|
|
|
|
std::memset(relativeVariance, 0, 3 * bgfg::HISTOGRAM_BIN_COUNT * sizeof(double)); |
|
|
|
|
std::memset(relativeVariance, 0, 3 * fgd::HISTOGRAM_BIN_COUNT * sizeof(double)); |
|
|
|
|
|
|
|
|
|
for (int thres = bgfg::HISTOGRAM_BIN_COUNT - 2; thres >= 0; --thres) |
|
|
|
|
for (int thres = fgd::HISTOGRAM_BIN_COUNT - 2; thres >= 0; --thres) |
|
|
|
|
{ |
|
|
|
|
cv::Vec3d sum(0.0, 0.0, 0.0); |
|
|
|
|
cv::Vec3d sqsum(0.0, 0.0, 0.0); |
|
|
|
|
cv::Vec3i count(0, 0, 0); |
|
|
|
|
Vec3d sum(0.0, 0.0, 0.0); |
|
|
|
|
Vec3d sqsum(0.0, 0.0, 0.0); |
|
|
|
|
Vec3i count(0, 0, 0); |
|
|
|
|
|
|
|
|
|
for (int j = thres; j < bgfg::HISTOGRAM_BIN_COUNT; ++j) |
|
|
|
|
for (int j = thres; j < fgd::HISTOGRAM_BIN_COUNT; ++j) |
|
|
|
|
{ |
|
|
|
|
sum[0] += static_cast<double>(j) * hist[j]; |
|
|
|
|
sqsum[0] += static_cast<double>(j * j) * hist[j]; |
|
|
|
@ -383,7 +204,7 @@ namespace |
|
|
|
|
count[1] = std::max(count[1], 1); |
|
|
|
|
count[2] = std::max(count[2], 1); |
|
|
|
|
|
|
|
|
|
cv::Vec3d my( |
|
|
|
|
Vec3d my( |
|
|
|
|
sum[0] / count[0], |
|
|
|
|
sum[1] / count[1], |
|
|
|
|
sum[2] / count[2] |
|
|
|
@ -395,37 +216,39 @@ namespace |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void calcDiffThreshMask(const cv::gpu::GpuMat& prevFrame, const cv::gpu::GpuMat& curFrame, cv::Vec3d bestThres, cv::gpu::GpuMat& changeMask) |
|
|
|
|
void calcDiffThreshMask(const GpuMat& prevFrame, const GpuMat& curFrame, Vec3d bestThres, GpuMat& changeMask) |
|
|
|
|
{ |
|
|
|
|
typedef void (*func_t)(cv::gpu::PtrStepSzb prevFrame, cv::gpu::PtrStepSzb curFrame, uchar3 bestThres, cv::gpu::PtrStepSzb changeMask, cudaStream_t stream); |
|
|
|
|
typedef void (*func_t)(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, cudaStream_t stream); |
|
|
|
|
static const func_t funcs[4][4] = |
|
|
|
|
{ |
|
|
|
|
{0,0,0,0}, |
|
|
|
|
{0,0,0,0}, |
|
|
|
|
{0,0,bgfg::calcDiffThreshMask_gpu<uchar3, uchar3>,bgfg::calcDiffThreshMask_gpu<uchar3, uchar4>}, |
|
|
|
|
{0,0,bgfg::calcDiffThreshMask_gpu<uchar4, uchar3>,bgfg::calcDiffThreshMask_gpu<uchar4, uchar4>} |
|
|
|
|
{0,0,fgd::calcDiffThreshMask_gpu<uchar3, uchar3>,fgd::calcDiffThreshMask_gpu<uchar3, uchar4>}, |
|
|
|
|
{0,0,fgd::calcDiffThreshMask_gpu<uchar4, uchar3>,fgd::calcDiffThreshMask_gpu<uchar4, uchar4>} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
changeMask.setTo(cv::Scalar::all(0)); |
|
|
|
|
changeMask.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
funcs[prevFrame.channels() - 1][curFrame.channels() - 1](prevFrame, curFrame, make_uchar3((uchar)bestThres[0], (uchar)bestThres[1], (uchar)bestThres[2]), changeMask, 0); |
|
|
|
|
funcs[prevFrame.channels() - 1][curFrame.channels() - 1](prevFrame, curFrame, |
|
|
|
|
make_uchar3((uchar)bestThres[0], (uchar)bestThres[1], (uchar)bestThres[2]), |
|
|
|
|
changeMask, 0); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// performs change detection for Foreground detection algorithm
|
|
|
|
|
void changeDetection(const cv::gpu::GpuMat& prevFrame, const cv::gpu::GpuMat& curFrame, cv::gpu::GpuMat& changeMask, cv::gpu::GpuMat& hist, cv::gpu::GpuMat& histBuf) |
|
|
|
|
void changeDetection(const GpuMat& prevFrame, const GpuMat& curFrame, GpuMat& changeMask, GpuMat& hist, GpuMat& histBuf) |
|
|
|
|
{ |
|
|
|
|
calcDiffHistogram(prevFrame, curFrame, hist, histBuf); |
|
|
|
|
|
|
|
|
|
unsigned int histData[3 * 256]; |
|
|
|
|
cv::Mat h_hist(3, 256, CV_32SC1, histData); |
|
|
|
|
Mat h_hist(3, 256, CV_32SC1, histData); |
|
|
|
|
hist.download(h_hist); |
|
|
|
|
|
|
|
|
|
double relativeVariance[3][bgfg::HISTOGRAM_BIN_COUNT]; |
|
|
|
|
double relativeVariance[3][fgd::HISTOGRAM_BIN_COUNT]; |
|
|
|
|
calcRelativeVariance(histData, relativeVariance); |
|
|
|
|
|
|
|
|
|
// Find maximum:
|
|
|
|
|
cv::Vec3d bestThres(10.0, 10.0, 10.0); |
|
|
|
|
for (int i = 0; i < bgfg::HISTOGRAM_BIN_COUNT; ++i) |
|
|
|
|
Vec3d bestThres(10.0, 10.0, 10.0); |
|
|
|
|
for (int i = 0; i < fgd::HISTOGRAM_BIN_COUNT; ++i) |
|
|
|
|
{ |
|
|
|
|
bestThres[0] = std::max(bestThres[0], relativeVariance[0][i]); |
|
|
|
|
bestThres[1] = std::max(bestThres[1], relativeVariance[1][i]); |
|
|
|
@ -441,12 +264,12 @@ namespace |
|
|
|
|
|
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
int bgfgClassification(const cv::gpu::GpuMat& prevFrame, const cv::gpu::GpuMat& curFrame, |
|
|
|
|
const cv::gpu::GpuMat& Ftd, const cv::gpu::GpuMat& Fbd, |
|
|
|
|
cv::gpu::GpuMat& foreground, cv::gpu::GpuMat& countBuf, |
|
|
|
|
const cv::gpu::FGDStatModel::Params& params, int out_cn) |
|
|
|
|
int bgfgClassification(const GpuMat& prevFrame, const GpuMat& curFrame, |
|
|
|
|
const GpuMat& Ftd, const GpuMat& Fbd, |
|
|
|
|
GpuMat& foreground, GpuMat& countBuf, |
|
|
|
|
const FGDParams& params, int out_cn) |
|
|
|
|
{ |
|
|
|
|
typedef void (*func_t)(cv::gpu::PtrStepSzb prevFrame, cv::gpu::PtrStepSzb curFrame, cv::gpu::PtrStepSzb Ftd, cv::gpu::PtrStepSzb Fbd, cv::gpu::PtrStepSzb foreground, |
|
|
|
|
typedef void (*func_t)(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, |
|
|
|
|
int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream); |
|
|
|
|
static const func_t funcs[4][4][4] = |
|
|
|
|
{ |
|
|
|
@ -458,24 +281,26 @@ namespace |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
{0,0,0,0}, {0,0,0,0}, |
|
|
|
|
{0,0,bgfg::bgfgClassification_gpu<uchar3, uchar3, uchar3>,bgfg::bgfgClassification_gpu<uchar3, uchar3, uchar4>}, |
|
|
|
|
{0,0,bgfg::bgfgClassification_gpu<uchar3, uchar4, uchar3>,bgfg::bgfgClassification_gpu<uchar3, uchar4, uchar4>} |
|
|
|
|
{0,0,fgd::bgfgClassification_gpu<uchar3, uchar3, uchar3>,fgd::bgfgClassification_gpu<uchar3, uchar3, uchar4>}, |
|
|
|
|
{0,0,fgd::bgfgClassification_gpu<uchar3, uchar4, uchar3>,fgd::bgfgClassification_gpu<uchar3, uchar4, uchar4>} |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
{0,0,0,0}, {0,0,0,0}, |
|
|
|
|
{0,0,bgfg::bgfgClassification_gpu<uchar4, uchar3, uchar3>,bgfg::bgfgClassification_gpu<uchar4, uchar3, uchar4>}, |
|
|
|
|
{0,0,bgfg::bgfgClassification_gpu<uchar4, uchar4, uchar3>,bgfg::bgfgClassification_gpu<uchar4, uchar4, uchar4>} |
|
|
|
|
{0,0,fgd::bgfgClassification_gpu<uchar4, uchar3, uchar3>,fgd::bgfgClassification_gpu<uchar4, uchar3, uchar4>}, |
|
|
|
|
{0,0,fgd::bgfgClassification_gpu<uchar4, uchar4, uchar3>,fgd::bgfgClassification_gpu<uchar4, uchar4, uchar4>} |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
const int deltaC = cvRound(params.delta * 256 / params.Lc); |
|
|
|
|
const int deltaCC = cvRound(params.delta * 256 / params.Lcc); |
|
|
|
|
|
|
|
|
|
funcs[prevFrame.channels() - 1][curFrame.channels() - 1][out_cn - 1](prevFrame, curFrame, Ftd, Fbd, foreground, deltaC, deltaCC, params.alpha2, params.N1c, params.N1cc, 0); |
|
|
|
|
funcs[prevFrame.channels() - 1][curFrame.channels() - 1][out_cn - 1](prevFrame, curFrame, Ftd, Fbd, foreground, |
|
|
|
|
deltaC, deltaCC, params.alpha2, |
|
|
|
|
params.N1c, params.N1cc, 0); |
|
|
|
|
|
|
|
|
|
int count = cv::gpu::countNonZero(foreground, countBuf); |
|
|
|
|
int count = gpu::countNonZero(foreground, countBuf); |
|
|
|
|
|
|
|
|
|
cv::gpu::multiply(foreground, cv::Scalar::all(255), foreground); |
|
|
|
|
gpu::multiply(foreground, Scalar::all(255), foreground); |
|
|
|
|
|
|
|
|
|
return count; |
|
|
|
|
} |
|
|
|
@ -486,20 +311,20 @@ namespace |
|
|
|
|
|
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
void morphology(const cv::gpu::GpuMat& src, cv::gpu::GpuMat& dst, cv::gpu::GpuMat& filterBrd, int brd, cv::Ptr<cv::gpu::Filter>& filter, cv::Scalar brdVal) |
|
|
|
|
void morphology(const GpuMat& src, GpuMat& dst, GpuMat& filterBrd, int brd, Ptr<gpu::Filter>& filter, Scalar brdVal) |
|
|
|
|
{ |
|
|
|
|
cv::gpu::copyMakeBorder(src, filterBrd, brd, brd, brd, brd, cv::BORDER_CONSTANT, brdVal); |
|
|
|
|
filter->apply(filterBrd(cv::Rect(brd, brd, src.cols, src.rows)), dst); |
|
|
|
|
gpu::copyMakeBorder(src, filterBrd, brd, brd, brd, brd, BORDER_CONSTANT, brdVal); |
|
|
|
|
filter->apply(filterBrd(Rect(brd, brd, src.cols, src.rows)), dst); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void smoothForeground(cv::gpu::GpuMat& foreground, cv::gpu::GpuMat& filterBrd, cv::gpu::GpuMat& buf, |
|
|
|
|
cv::Ptr<cv::gpu::Filter>& erodeFilter, cv::Ptr<cv::gpu::Filter>& dilateFilter, |
|
|
|
|
const cv::gpu::FGDStatModel::Params& params) |
|
|
|
|
void smoothForeground(GpuMat& foreground, GpuMat& filterBrd, GpuMat& buf, |
|
|
|
|
Ptr<gpu::Filter>& erodeFilter, Ptr<gpu::Filter>& dilateFilter, |
|
|
|
|
const FGDParams& params) |
|
|
|
|
{ |
|
|
|
|
const int brd = params.perform_morphing; |
|
|
|
|
|
|
|
|
|
const cv::Scalar erodeBrdVal = cv::Scalar::all(UCHAR_MAX); |
|
|
|
|
const cv::Scalar dilateBrdVal = cv::Scalar::all(0); |
|
|
|
|
const Scalar erodeBrdVal = Scalar::all(UCHAR_MAX); |
|
|
|
|
const Scalar dilateBrdVal = Scalar::all(0); |
|
|
|
|
|
|
|
|
|
// MORPH_OPEN
|
|
|
|
|
morphology(foreground, buf, filterBrd, brd, erodeFilter, erodeBrdVal); |
|
|
|
@ -516,28 +341,28 @@ namespace |
|
|
|
|
|
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
void seqToContours(CvSeq* _ccontours, CvMemStorage* storage, cv::OutputArrayOfArrays _contours) |
|
|
|
|
void seqToContours(CvSeq* _ccontours, CvMemStorage* storage, OutputArrayOfArrays _contours) |
|
|
|
|
{ |
|
|
|
|
cv::Seq<CvSeq*> all_contours(cvTreeToNodeSeq(_ccontours, sizeof(CvSeq), storage)); |
|
|
|
|
Seq<CvSeq*> all_contours(cvTreeToNodeSeq(_ccontours, sizeof(CvSeq), storage)); |
|
|
|
|
|
|
|
|
|
size_t total = all_contours.size(); |
|
|
|
|
|
|
|
|
|
_contours.create((int) total, 1, 0, -1, true); |
|
|
|
|
|
|
|
|
|
cv::SeqIterator<CvSeq*> it = all_contours.begin(); |
|
|
|
|
SeqIterator<CvSeq*> it = all_contours.begin(); |
|
|
|
|
for (size_t i = 0; i < total; ++i, ++it) |
|
|
|
|
{ |
|
|
|
|
CvSeq* c = *it; |
|
|
|
|
((CvContour*)c)->color = (int)i; |
|
|
|
|
_contours.create((int)c->total, 1, CV_32SC2, (int)i, true); |
|
|
|
|
cv::Mat ci = _contours.getMat((int)i); |
|
|
|
|
Mat ci = _contours.getMat((int)i); |
|
|
|
|
CV_Assert( ci.isContinuous() ); |
|
|
|
|
cvCvtSeqToArray(c, ci.data); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
int findForegroundRegions(cv::gpu::GpuMat& d_foreground, cv::Mat& h_foreground, std::vector< std::vector<cv::Point> >& foreground_regions, |
|
|
|
|
CvMemStorage* storage, const cv::gpu::FGDStatModel::Params& params) |
|
|
|
|
int findForegroundRegions(GpuMat& d_foreground, Mat& h_foreground, std::vector< std::vector<Point> >& foreground_regions, |
|
|
|
|
CvMemStorage* storage, const FGDParams& params) |
|
|
|
|
{ |
|
|
|
|
int region_count = 0; |
|
|
|
|
|
|
|
|
@ -581,7 +406,7 @@ namespace |
|
|
|
|
seqToContours(first_seq, storage, foreground_regions); |
|
|
|
|
h_foreground.setTo(0); |
|
|
|
|
|
|
|
|
|
cv::drawContours(h_foreground, foreground_regions, -1, cv::Scalar::all(255), -1); |
|
|
|
|
drawContours(h_foreground, foreground_regions, -1, Scalar::all(255), -1); |
|
|
|
|
|
|
|
|
|
d_foreground.upload(h_foreground); |
|
|
|
|
|
|
|
|
@ -594,12 +419,12 @@ namespace |
|
|
|
|
|
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
void updateBackgroundModel(const cv::gpu::GpuMat& prevFrame, const cv::gpu::GpuMat& curFrame, const cv::gpu::GpuMat& Ftd, const cv::gpu::GpuMat& Fbd, |
|
|
|
|
const cv::gpu::GpuMat& foreground, cv::gpu::GpuMat& background, |
|
|
|
|
const cv::gpu::FGDStatModel::Params& params) |
|
|
|
|
void updateBackgroundModel(const GpuMat& prevFrame, const GpuMat& curFrame, const GpuMat& Ftd, const GpuMat& Fbd, |
|
|
|
|
const GpuMat& foreground, GpuMat& background, |
|
|
|
|
const FGDParams& params) |
|
|
|
|
{ |
|
|
|
|
typedef void (*func_t)(cv::gpu::PtrStepSzb prevFrame, cv::gpu::PtrStepSzb curFrame, cv::gpu::PtrStepSzb Ftd, cv::gpu::PtrStepSzb Fbd, |
|
|
|
|
cv::gpu::PtrStepSzb foreground, cv::gpu::PtrStepSzb background, |
|
|
|
|
typedef void (*func_t)(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, |
|
|
|
|
PtrStepSzb foreground, PtrStepSzb background, |
|
|
|
|
int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, cudaStream_t stream); |
|
|
|
|
static const func_t funcs[4][4][4] = |
|
|
|
|
{ |
|
|
|
@ -611,13 +436,13 @@ namespace |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
{0,0,0,0}, {0,0,0,0}, |
|
|
|
|
{0,0,bgfg::updateBackgroundModel_gpu<uchar3, uchar3, uchar3>,bgfg::updateBackgroundModel_gpu<uchar3, uchar3, uchar4>}, |
|
|
|
|
{0,0,bgfg::updateBackgroundModel_gpu<uchar3, uchar4, uchar3>,bgfg::updateBackgroundModel_gpu<uchar3, uchar4, uchar4>} |
|
|
|
|
{0,0,fgd::updateBackgroundModel_gpu<uchar3, uchar3, uchar3>,fgd::updateBackgroundModel_gpu<uchar3, uchar3, uchar4>}, |
|
|
|
|
{0,0,fgd::updateBackgroundModel_gpu<uchar3, uchar4, uchar3>,fgd::updateBackgroundModel_gpu<uchar3, uchar4, uchar4>} |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
{0,0,0,0}, {0,0,0,0}, |
|
|
|
|
{0,0,bgfg::updateBackgroundModel_gpu<uchar4, uchar3, uchar3>,bgfg::updateBackgroundModel_gpu<uchar4, uchar3, uchar4>}, |
|
|
|
|
{0,0,bgfg::updateBackgroundModel_gpu<uchar4, uchar4, uchar3>,bgfg::updateBackgroundModel_gpu<uchar4, uchar4, uchar4>} |
|
|
|
|
{0,0,fgd::updateBackgroundModel_gpu<uchar4, uchar3, uchar3>,fgd::updateBackgroundModel_gpu<uchar4, uchar3, uchar4>}, |
|
|
|
|
{0,0,fgd::updateBackgroundModel_gpu<uchar4, uchar4, uchar3>,fgd::updateBackgroundModel_gpu<uchar4, uchar4, uchar4>} |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
@ -626,126 +451,271 @@ namespace |
|
|
|
|
|
|
|
|
|
funcs[prevFrame.channels() - 1][curFrame.channels() - 1][background.channels() - 1]( |
|
|
|
|
prevFrame, curFrame, Ftd, Fbd, foreground, background, |
|
|
|
|
deltaC, deltaCC, params.alpha1, params.alpha2, params.alpha3, params.N1c, params.N1cc, params.N2c, params.N2cc, params.T, |
|
|
|
|
deltaC, deltaCC, params.alpha1, params.alpha2, params.alpha3, |
|
|
|
|
params.N1c, params.N1cc, params.N2c, params.N2cc, params.T, |
|
|
|
|
0); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
|
// Impl::update
|
|
|
|
|
|
|
|
|
|
int cv::gpu::FGDStatModel::Impl::update(const cv::gpu::GpuMat& curFrame) |
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
CV_Assert(curFrame.type() == CV_8UC3 || curFrame.type() == CV_8UC4); |
|
|
|
|
CV_Assert(curFrame.size() == prevFrame_.size()); |
|
|
|
|
class BGPixelStat |
|
|
|
|
{ |
|
|
|
|
public: |
|
|
|
|
void create(Size size, const FGDParams& params); |
|
|
|
|
|
|
|
|
|
void setTrained(); |
|
|
|
|
|
|
|
|
|
operator fgd::BGPixelStat(); |
|
|
|
|
|
|
|
|
|
cvClearMemStorage(storage_); |
|
|
|
|
foreground_regions_.clear(); |
|
|
|
|
foreground_.setTo(cv::Scalar::all(0)); |
|
|
|
|
private: |
|
|
|
|
GpuMat Pbc_; |
|
|
|
|
GpuMat Pbcc_; |
|
|
|
|
GpuMat is_trained_st_model_; |
|
|
|
|
GpuMat is_trained_dyn_model_; |
|
|
|
|
|
|
|
|
|
GpuMat ctable_Pv_; |
|
|
|
|
GpuMat ctable_Pvb_; |
|
|
|
|
GpuMat ctable_v_; |
|
|
|
|
|
|
|
|
|
GpuMat cctable_Pv_; |
|
|
|
|
GpuMat cctable_Pvb_; |
|
|
|
|
GpuMat cctable_v1_; |
|
|
|
|
GpuMat cctable_v2_; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
changeDetection(prevFrame_, curFrame, Ftd_, hist_, histBuf_); |
|
|
|
|
changeDetection(background_, curFrame, Fbd_, hist_, histBuf_); |
|
|
|
|
void BGPixelStat::create(Size size, const FGDParams& params) |
|
|
|
|
{ |
|
|
|
|
gpu::ensureSizeIsEnough(size, CV_32FC1, Pbc_); |
|
|
|
|
Pbc_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
int FG_pixels_count = bgfgClassification(prevFrame_, curFrame, Ftd_, Fbd_, foreground_, countBuf_, params_, out_cn_); |
|
|
|
|
gpu::ensureSizeIsEnough(size, CV_32FC1, Pbcc_); |
|
|
|
|
Pbcc_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
if (params_.perform_morphing > 0) |
|
|
|
|
smoothForeground(foreground_, filterBrd_, buf_, erodeFilter_, dilateFilter_, params_); |
|
|
|
|
gpu::ensureSizeIsEnough(size, CV_8UC1, is_trained_st_model_); |
|
|
|
|
is_trained_st_model_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
int region_count = 0; |
|
|
|
|
if (params_.minArea > 0 || params_.is_obj_without_holes) |
|
|
|
|
region_count = findForegroundRegions(foreground_, h_foreground_, foreground_regions_, storage_, params_); |
|
|
|
|
gpu::ensureSizeIsEnough(size, CV_8UC1, is_trained_dyn_model_); |
|
|
|
|
is_trained_dyn_model_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
// Check ALL BG update condition:
|
|
|
|
|
const double BGFG_FGD_BG_UPDATE_TRESH = 0.5; |
|
|
|
|
if (static_cast<double>(FG_pixels_count) / Ftd_.size().area() > BGFG_FGD_BG_UPDATE_TRESH) |
|
|
|
|
stat_.setTrained(); |
|
|
|
|
gpu::ensureSizeIsEnough(params.N2c * size.height, size.width, CV_32FC1, ctable_Pv_); |
|
|
|
|
ctable_Pv_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
updateBackgroundModel(prevFrame_, curFrame, Ftd_, Fbd_, foreground_, background_, params_); |
|
|
|
|
gpu::ensureSizeIsEnough(params.N2c * size.height, size.width, CV_32FC1, ctable_Pvb_); |
|
|
|
|
ctable_Pvb_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
copyChannels(curFrame, prevFrame_); |
|
|
|
|
gpu::ensureSizeIsEnough(params.N2c * size.height, size.width, CV_8UC4, ctable_v_); |
|
|
|
|
ctable_v_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
return region_count; |
|
|
|
|
} |
|
|
|
|
gpu::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_32FC1, cctable_Pv_); |
|
|
|
|
cctable_Pv_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
namespace |
|
|
|
|
{ |
|
|
|
|
// Default parameters of foreground detection algorithm:
|
|
|
|
|
const int BGFG_FGD_LC = 128; |
|
|
|
|
const int BGFG_FGD_N1C = 15; |
|
|
|
|
const int BGFG_FGD_N2C = 25; |
|
|
|
|
gpu::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_32FC1, cctable_Pvb_); |
|
|
|
|
cctable_Pvb_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
const int BGFG_FGD_LCC = 64; |
|
|
|
|
const int BGFG_FGD_N1CC = 25; |
|
|
|
|
const int BGFG_FGD_N2CC = 40; |
|
|
|
|
gpu::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_8UC4, cctable_v1_); |
|
|
|
|
cctable_v1_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
// Background reference image update parameter:
|
|
|
|
|
const float BGFG_FGD_ALPHA_1 = 0.1f; |
|
|
|
|
gpu::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_8UC4, cctable_v2_); |
|
|
|
|
cctable_v2_.setTo(Scalar::all(0)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// stat model update parameter
|
|
|
|
|
// 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG)
|
|
|
|
|
const float BGFG_FGD_ALPHA_2 = 0.005f; |
|
|
|
|
void BGPixelStat::setTrained() |
|
|
|
|
{ |
|
|
|
|
is_trained_st_model_.setTo(Scalar::all(1)); |
|
|
|
|
is_trained_dyn_model_.setTo(Scalar::all(1)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// start value for alpha parameter (to fast initiate statistic model)
|
|
|
|
|
const float BGFG_FGD_ALPHA_3 = 0.1f; |
|
|
|
|
BGPixelStat::operator fgd::BGPixelStat() |
|
|
|
|
{ |
|
|
|
|
fgd::BGPixelStat stat; |
|
|
|
|
|
|
|
|
|
const float BGFG_FGD_DELTA = 2.0f; |
|
|
|
|
stat.rows_ = Pbc_.rows; |
|
|
|
|
|
|
|
|
|
const float BGFG_FGD_T = 0.9f; |
|
|
|
|
stat.Pbc_data_ = Pbc_.data; |
|
|
|
|
stat.Pbc_step_ = Pbc_.step; |
|
|
|
|
|
|
|
|
|
const float BGFG_FGD_MINAREA= 15.0f; |
|
|
|
|
} |
|
|
|
|
stat.Pbcc_data_ = Pbcc_.data; |
|
|
|
|
stat.Pbcc_step_ = Pbcc_.step; |
|
|
|
|
|
|
|
|
|
cv::gpu::FGDStatModel::Params::Params() |
|
|
|
|
{ |
|
|
|
|
Lc = BGFG_FGD_LC; |
|
|
|
|
N1c = BGFG_FGD_N1C; |
|
|
|
|
N2c = BGFG_FGD_N2C; |
|
|
|
|
stat.is_trained_st_model_data_ = is_trained_st_model_.data; |
|
|
|
|
stat.is_trained_st_model_step_ = is_trained_st_model_.step; |
|
|
|
|
|
|
|
|
|
Lcc = BGFG_FGD_LCC; |
|
|
|
|
N1cc = BGFG_FGD_N1CC; |
|
|
|
|
N2cc = BGFG_FGD_N2CC; |
|
|
|
|
stat.is_trained_dyn_model_data_ = is_trained_dyn_model_.data; |
|
|
|
|
stat.is_trained_dyn_model_step_ = is_trained_dyn_model_.step; |
|
|
|
|
|
|
|
|
|
delta = BGFG_FGD_DELTA; |
|
|
|
|
stat.ctable_Pv_data_ = ctable_Pv_.data; |
|
|
|
|
stat.ctable_Pv_step_ = ctable_Pv_.step; |
|
|
|
|
|
|
|
|
|
alpha1 = BGFG_FGD_ALPHA_1; |
|
|
|
|
alpha2 = BGFG_FGD_ALPHA_2; |
|
|
|
|
alpha3 = BGFG_FGD_ALPHA_3; |
|
|
|
|
stat.ctable_Pvb_data_ = ctable_Pvb_.data; |
|
|
|
|
stat.ctable_Pvb_step_ = ctable_Pvb_.step; |
|
|
|
|
|
|
|
|
|
T = BGFG_FGD_T; |
|
|
|
|
minArea = BGFG_FGD_MINAREA; |
|
|
|
|
stat.ctable_v_data_ = ctable_v_.data; |
|
|
|
|
stat.ctable_v_step_ = ctable_v_.step; |
|
|
|
|
|
|
|
|
|
is_obj_without_holes = true; |
|
|
|
|
perform_morphing = 1; |
|
|
|
|
} |
|
|
|
|
stat.cctable_Pv_data_ = cctable_Pv_.data; |
|
|
|
|
stat.cctable_Pv_step_ = cctable_Pv_.step; |
|
|
|
|
|
|
|
|
|
cv::gpu::FGDStatModel::FGDStatModel(int out_cn) |
|
|
|
|
{ |
|
|
|
|
impl_.reset(new Impl(background, foreground, foreground_regions, out_cn)); |
|
|
|
|
} |
|
|
|
|
stat.cctable_Pvb_data_ = cctable_Pvb_.data; |
|
|
|
|
stat.cctable_Pvb_step_ = cctable_Pvb_.step; |
|
|
|
|
|
|
|
|
|
cv::gpu::FGDStatModel::FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params, int out_cn) |
|
|
|
|
{ |
|
|
|
|
impl_.reset(new Impl(background, foreground, foreground_regions, out_cn)); |
|
|
|
|
create(firstFrame, params); |
|
|
|
|
} |
|
|
|
|
stat.cctable_v1_data_ = cctable_v1_.data; |
|
|
|
|
stat.cctable_v1_step_ = cctable_v1_.step; |
|
|
|
|
|
|
|
|
|
cv::gpu::FGDStatModel::~FGDStatModel() |
|
|
|
|
{ |
|
|
|
|
} |
|
|
|
|
stat.cctable_v2_data_ = cctable_v2_.data; |
|
|
|
|
stat.cctable_v2_step_ = cctable_v2_.step; |
|
|
|
|
|
|
|
|
|
void cv::gpu::FGDStatModel::create(const cv::gpu::GpuMat& firstFrame, const Params& params) |
|
|
|
|
{ |
|
|
|
|
impl_->create(firstFrame, params); |
|
|
|
|
} |
|
|
|
|
return stat; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void cv::gpu::FGDStatModel::release() |
|
|
|
|
{ |
|
|
|
|
impl_->release(); |
|
|
|
|
class FGDImpl : public gpu::BackgroundSubtractorFGD |
|
|
|
|
{ |
|
|
|
|
public: |
|
|
|
|
explicit FGDImpl(const FGDParams& params); |
|
|
|
|
~FGDImpl(); |
|
|
|
|
|
|
|
|
|
void apply(InputArray image, OutputArray fgmask, double learningRate=-1); |
|
|
|
|
|
|
|
|
|
void getBackgroundImage(OutputArray backgroundImage) const; |
|
|
|
|
|
|
|
|
|
void getForegroundRegions(OutputArrayOfArrays foreground_regions); |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
void initialize(const GpuMat& firstFrame); |
|
|
|
|
|
|
|
|
|
FGDParams params_; |
|
|
|
|
Size frameSize_; |
|
|
|
|
|
|
|
|
|
GpuMat background_; |
|
|
|
|
GpuMat foreground_; |
|
|
|
|
std::vector< std::vector<Point> > foreground_regions_; |
|
|
|
|
|
|
|
|
|
Mat h_foreground_; |
|
|
|
|
|
|
|
|
|
GpuMat prevFrame_; |
|
|
|
|
GpuMat Ftd_; |
|
|
|
|
GpuMat Fbd_; |
|
|
|
|
BGPixelStat stat_; |
|
|
|
|
|
|
|
|
|
GpuMat hist_; |
|
|
|
|
GpuMat histBuf_; |
|
|
|
|
|
|
|
|
|
GpuMat countBuf_; |
|
|
|
|
|
|
|
|
|
GpuMat buf_; |
|
|
|
|
GpuMat filterBrd_; |
|
|
|
|
|
|
|
|
|
Ptr<gpu::Filter> dilateFilter_; |
|
|
|
|
Ptr<gpu::Filter> erodeFilter_; |
|
|
|
|
|
|
|
|
|
CvMemStorage* storage_; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
FGDImpl::FGDImpl(const FGDParams& params) : params_(params), frameSize_(0, 0) |
|
|
|
|
{ |
|
|
|
|
storage_ = cvCreateMemStorage(); |
|
|
|
|
CV_Assert( storage_ != 0 ); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
FGDImpl::~FGDImpl() |
|
|
|
|
{ |
|
|
|
|
cvReleaseMemStorage(&storage_); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void FGDImpl::apply(InputArray _frame, OutputArray fgmask, double) |
|
|
|
|
{ |
|
|
|
|
GpuMat curFrame = _frame.getGpuMat(); |
|
|
|
|
|
|
|
|
|
if (curFrame.size() != frameSize_) |
|
|
|
|
{ |
|
|
|
|
initialize(curFrame); |
|
|
|
|
return; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
CV_Assert( curFrame.type() == CV_8UC3 || curFrame.type() == CV_8UC4 ); |
|
|
|
|
CV_Assert( curFrame.size() == prevFrame_.size() ); |
|
|
|
|
|
|
|
|
|
cvClearMemStorage(storage_); |
|
|
|
|
foreground_regions_.clear(); |
|
|
|
|
foreground_.setTo(Scalar::all(0)); |
|
|
|
|
|
|
|
|
|
changeDetection(prevFrame_, curFrame, Ftd_, hist_, histBuf_); |
|
|
|
|
changeDetection(background_, curFrame, Fbd_, hist_, histBuf_); |
|
|
|
|
|
|
|
|
|
int FG_pixels_count = bgfgClassification(prevFrame_, curFrame, Ftd_, Fbd_, foreground_, countBuf_, params_, 4); |
|
|
|
|
|
|
|
|
|
if (params_.perform_morphing > 0) |
|
|
|
|
smoothForeground(foreground_, filterBrd_, buf_, erodeFilter_, dilateFilter_, params_); |
|
|
|
|
|
|
|
|
|
if (params_.minArea > 0 || params_.is_obj_without_holes) |
|
|
|
|
findForegroundRegions(foreground_, h_foreground_, foreground_regions_, storage_, params_); |
|
|
|
|
|
|
|
|
|
// Check ALL BG update condition:
|
|
|
|
|
const double BGFG_FGD_BG_UPDATE_TRESH = 0.5; |
|
|
|
|
if (static_cast<double>(FG_pixels_count) / Ftd_.size().area() > BGFG_FGD_BG_UPDATE_TRESH) |
|
|
|
|
stat_.setTrained(); |
|
|
|
|
|
|
|
|
|
updateBackgroundModel(prevFrame_, curFrame, Ftd_, Fbd_, foreground_, background_, params_); |
|
|
|
|
|
|
|
|
|
copyChannels(curFrame, prevFrame_, 4); |
|
|
|
|
|
|
|
|
|
foreground_.copyTo(fgmask); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void FGDImpl::getBackgroundImage(OutputArray backgroundImage) const |
|
|
|
|
{ |
|
|
|
|
gpu::cvtColor(background_, backgroundImage, COLOR_BGRA2BGR); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void FGDImpl::getForegroundRegions(OutputArrayOfArrays dst) |
|
|
|
|
{ |
|
|
|
|
size_t total = foreground_regions_.size(); |
|
|
|
|
|
|
|
|
|
dst.create((int) total, 1, 0, -1, true); |
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < total; ++i) |
|
|
|
|
{ |
|
|
|
|
std::vector<Point>& c = foreground_regions_[i]; |
|
|
|
|
|
|
|
|
|
dst.create((int) c.size(), 1, CV_32SC2, (int) i, true); |
|
|
|
|
Mat ci = dst.getMat((int) i); |
|
|
|
|
|
|
|
|
|
Mat(ci.size(), ci.type(), &c[0]).copyTo(ci); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void FGDImpl::initialize(const GpuMat& firstFrame) |
|
|
|
|
{ |
|
|
|
|
CV_Assert( firstFrame.type() == CV_8UC3 || firstFrame.type() == CV_8UC4 ); |
|
|
|
|
|
|
|
|
|
frameSize_ = firstFrame.size(); |
|
|
|
|
|
|
|
|
|
gpu::ensureSizeIsEnough(firstFrame.size(), CV_8UC1, foreground_); |
|
|
|
|
|
|
|
|
|
copyChannels(firstFrame, background_, 4); |
|
|
|
|
copyChannels(firstFrame, prevFrame_, 4); |
|
|
|
|
|
|
|
|
|
gpu::ensureSizeIsEnough(firstFrame.size(), CV_8UC1, Ftd_); |
|
|
|
|
gpu::ensureSizeIsEnough(firstFrame.size(), CV_8UC1, Fbd_); |
|
|
|
|
|
|
|
|
|
stat_.create(firstFrame.size(), params_); |
|
|
|
|
fgd::setBGPixelStat(stat_); |
|
|
|
|
|
|
|
|
|
if (params_.perform_morphing > 0) |
|
|
|
|
{ |
|
|
|
|
Mat kernel = getStructuringElement(MORPH_RECT, Size(1 + params_.perform_morphing * 2, 1 + params_.perform_morphing * 2)); |
|
|
|
|
Point anchor(params_.perform_morphing, params_.perform_morphing); |
|
|
|
|
|
|
|
|
|
dilateFilter_ = gpu::createMorphologyFilter(MORPH_DILATE, CV_8UC1, kernel, anchor); |
|
|
|
|
erodeFilter_ = gpu::createMorphologyFilter(MORPH_ERODE, CV_8UC1, kernel, anchor); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
int cv::gpu::FGDStatModel::update(const cv::gpu::GpuMat& curFrame) |
|
|
|
|
Ptr<gpu::BackgroundSubtractorFGD> cv::gpu::createBackgroundSubtractorFGD(const FGDParams& params) |
|
|
|
|
{ |
|
|
|
|
return impl_->update(curFrame); |
|
|
|
|
return new FGDImpl(params); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#endif // HAVE_CUDA
|
|
|
|
|