New KNN code -should compile now

pull/2233/head
unknown 11 years ago
parent afa62c4161
commit 14b1e8c7f1
  1. 140
      modules/video/doc/motion_analysis_and_object_tracking.rst
  2. 41
      modules/video/include/opencv2/video/background_segm.hpp
  3. 651
      modules/video/src/bgfg_KNN.cpp
  4. 2
      samples/cpp/bgfg_segm.cpp

@ -596,7 +596,7 @@ Returns the number of gaussian components in the background model
BackgroundSubtractorMOG2::setNMixtures
--------------------------------------
Sets the number of gaussian components in the background model
Sets the number of gaussian components in the background model. The model needs to be reinitalized to reserve memory.
.. ocv:function:: void BackgroundSubtractorMOG2::setNMixtures(int nmixtures)
@ -615,9 +615,23 @@ Sets the "background ratio" parameter of the algorithm
.. ocv:function:: void BackgroundSubtractorMOG2::setBackgroundRatio(double ratio)
BackgroundSubtractorMOG2::getVarThreshold
---------------------------------------------
Returns the variance threshold for the pixel-model match
.. ocv:function:: double BackgroundSubtractorMOG2::getVarThreshold() const
The main threshold on the squared Mahalanobis distance to decide if the sample is well described by the background model or not. Related to Cthr from the paper.
BackgroundSubtractorMOG2::setVarThreshold
---------------------------------------------
Sets the variance threshold for the pixel-model match
.. ocv:function:: void BackgroundSubtractorMOG2::setVarThreshold(double varThreshold)
BackgroundSubtractorMOG2::getVarThresholdGen
---------------------------------------------
Returns the variance scale factor for the pixel-model match
Returns the variance threshold for the pixel-model match used for new mixture component generation
.. ocv:function:: double BackgroundSubtractorMOG2::getVarThresholdGen() const
@ -625,7 +639,7 @@ Threshold for the squared Mahalanobis distance that helps decide when a sample i
BackgroundSubtractorMOG2::setVarThresholdGen
---------------------------------------------
Sets the variance scale factor for the pixel-model match
Sets the variance threshold for the pixel-model match used for new mixture component generation
.. ocv:function:: void BackgroundSubtractorMOG2::setVarThresholdGen(double varThresholdGen)
@ -700,6 +714,126 @@ Sets the shadow threshold
.. ocv:function:: void BackgroundSubtractorMOG2::setShadowThreshold(double threshold)
BackgroundSubtractorKNN
------------------------
K-nearest neigbours - based Background/Foreground Segmentation Algorithm.
.. ocv:class:: BackgroundSubtractorKNN : public BackgroundSubtractor
The class implements the K-nearest neigbours background subtraction described in [Zivkovic2006]_ . Very efficient if number of foreground pixels is low.
createBackgroundSubtractorKNN
--------------------------------------------------
Creates KNN Background Subtractor
.. ocv:function:: Ptr<BackgroundSubtractorKNN> createBackgroundSubtractorKNN( int history=500, double dist2Threshold=400.0, bool detectShadows=true )
:param history: Length of the history.
:param dist2Threshold: Threshold on the squared distance between the pixel and the sample to decide whether a pixel is close to that sample. This parameter does not affect the background update.
:param detectShadows: If true, the algorithm will detect shadows and mark them. It decreases the speed a bit, so if you do not need this feature, set the parameter to false.
BackgroundSubtractorKNN::getHistory
--------------------------------------
Returns the number of last frames that affect the background model
.. ocv:function:: int BackgroundSubtractorKNN::getHistory() const
BackgroundSubtractorKNN::setHistory
--------------------------------------
Sets the number of last frames that affect the background model
.. ocv:function:: void BackgroundSubtractorKNN::setHistory(int history)
BackgroundSubtractorKNN::getNSamples
--------------------------------------
Returns the number of data samples in the background model
.. ocv:function:: int BackgroundSubtractorKNN::getNSamples() const
BackgroundSubtractorKNN::setNSamples
--------------------------------------
Sets the number of data samples in the background model. The model needs to be reinitalized to reserve memory.
.. ocv:function:: void BackgroundSubtractorKNN::setNSamples(int nN)
BackgroundSubtractorKNN::getDist2Threshold
---------------------------------------------
Returns the threshold on the squared distance between the pixel and the sample
.. ocv:function:: double BackgroundSubtractorKNN::getDist2Threshold() const
The threshold on the squared distance between the pixel and the sample to decide whether a pixel is close to a data sample.
BackgroundSubtractorKNN::setDist2Threshold
---------------------------------------------
Sets the threshold on the squared distance
.. ocv:function:: void BackgroundSubtractorKNN::setDist2Threshold(double dist2Threshold)
BackgroundSubtractorKNN::getkNNSamples
---------------------------------------------
Returns the k in the kNN. K is the number of samples that need to be within dist2Threshold in order to decide that that pixel is matching the kNN background model.
.. ocv:function:: int BackgroundSubtractorKNN::getkNNSamples() const
BackgroundSubtractorKNN::setkNNSamples
---------------------------------------------
Sets the k in the kNN. How many nearest neigbours need to match.
.. ocv:function:: void BackgroundSubtractorKNN::setkNNSamples(int nKNN)
BackgroundSubtractorKNN::getDetectShadows
---------------------------------------------
Returns the shadow detection flag
.. ocv:function:: bool BackgroundSubtractorKNN::getDetectShadows() const
If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorKNN for details.
BackgroundSubtractorKNN::setDetectShadows
---------------------------------------------
Enables or disables shadow detection
.. ocv:function:: void BackgroundSubtractorKNN::setDetectShadows(bool detectShadows)
BackgroundSubtractorKNN::getShadowValue
---------------------------------------------
Returns the shadow value
.. ocv:function:: int BackgroundSubtractorKNN::getShadowValue() const
Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0 in the mask always means background, 255 means foreground.
BackgroundSubtractorKNN::setShadowValue
---------------------------------------------
Sets the shadow value
.. ocv:function:: void BackgroundSubtractorKNN::setShadowValue(int value)
BackgroundSubtractorKNN::getShadowThreshold
---------------------------------------------
Returns the shadow threshold
.. ocv:function:: double BackgroundSubtractorKNN::getShadowThreshold() const
A shadow is detected if pixel is a darker version of the background. The shadow threshold (``Tau`` in the paper) is a threshold defining how much darker the shadow can be. ``Tau= 0.5`` means that if a pixel is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiarra, *Detecting Moving Shadows...*, IEEE PAMI,2003.
BackgroundSubtractorKNN::setShadowThreshold
---------------------------------------------
Sets the shadow threshold
.. ocv:function:: void BackgroundSubtractorKNN::setShadowThreshold(double threshold)
BackgroundSubtractorGMG
------------------------
Background Subtractor module based on the algorithm given in [Gold2012]_.

@ -113,7 +113,7 @@ public:
CV_WRAP virtual void setHistory(int history) = 0;
CV_WRAP virtual int getNMixtures() const = 0;
CV_WRAP virtual void setNMixtures(int nmixtures) = 0;
CV_WRAP virtual void setNMixtures(int nmixtures) = 0;//needs reinitialization!
CV_WRAP virtual double getBackgroundRatio() const = 0;
CV_WRAP virtual void setBackgroundRatio(double ratio) = 0;
@ -150,6 +150,45 @@ CV_EXPORTS_W Ptr<BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history=500, double varThreshold=16,
bool detectShadows=true);
/*!
The class implements the K nearest neigbours algorithm from:
"Efficient Adaptive Density Estimation per Image Pixel for the Task of Background Subtraction"
Z.Zivkovic, F. van der Heijden
Pattern Recognition Letters, vol. 27, no. 7, pages 773-780, 2006
http://www.zoranz.net/Publications/zivkovicPRL2006.pdf
Fast for small foreground object. Results on the benchmark data is at http://www.changedetection.net.
*/
class CV_EXPORTS_W BackgroundSubtractorKNN : public BackgroundSubtractor
{
public:
CV_WRAP virtual int getHistory() const = 0;
CV_WRAP virtual void setHistory(int history) = 0;
CV_WRAP virtual int getNSamples() const = 0;
CV_WRAP virtual void setNSamples(int _nN) = 0;//needs reinitialization!
CV_WRAP virtual double getDist2Threshold() const = 0;
CV_WRAP virtual void setDist2Threshold(double _dist2Threshold) = 0;
CV_WRAP virtual int getkNNSamples() const = 0;
CV_WRAP virtual void setkNNSamples(int _nkNN) = 0;
CV_WRAP virtual bool getDetectShadows() const = 0;
CV_WRAP virtual void setDetectShadows(bool detectShadows) = 0;
CV_WRAP virtual int getShadowValue() const = 0;
CV_WRAP virtual void setShadowValue(int value) = 0;
CV_WRAP virtual double getShadowThreshold() const = 0;
CV_WRAP virtual void setShadowThreshold(double threshold) = 0;
};
CV_EXPORTS_W Ptr<BackgroundSubtractorKNN>
createBackgroundSubtractorKNN(int history=500, double dist2Threshold=400.0,
bool detectShadows=true);
/**
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background.

@ -0,0 +1,651 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
//#include <math.h>
#include "precomp.hpp"
namespace cv
{
/*!
The class implements the following algorithm:
"Efficient Adaptive Density Estimation per Image Pixel for the Task of Background Subtraction"
Z.Zivkovic, F. van der Heijden
Pattern Recognition Letters, vol. 27, no. 7, pages 773-780, 2006
http://www.zoranz.net/Publications/zivkovicPRL2006.pdf
*/
// default parameters of gaussian background detection algorithm
static const int defaultHistory2 = 500; // Learning rate; alpha = 1/defaultHistory2
static const int defaultNsamples = 7; // number of samples saved in memory
static const float defaultDist2Threshold = 20.0f*20.0f;//threshold on distance from the sample
// additional parameters
static const unsigned char defaultnShadowDetection2 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
class BackgroundSubtractorKNNImpl : public BackgroundSubtractorKNN
{
public:
//! the default constructor
BackgroundSubtractorKNNImpl()
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = defaultHistory2;
//set parameters
// N - the number of samples stored in memory per model
nN = defaultNsamples;
//kNN - k nearest neighbour - number on NN for detecting background - default K=[0.1*nN]
nkNN=MAX(1,cvRound(0.1*nN*3+0.40));
//Tb - Threshold Tb*kernelwidth
fTb = defaultDist2Threshold;
// Shadow detection
bShadowDetection = 1;//turn on
nShadowDetection = defaultnShadowDetection2;
fTau = defaultfTau;// Tau - shadow threshold
name_ = "BackgroundSubtractor.KNN";
}
//! the full constructor that takes the length of the history,
// the number of gaussian mixtures, the background ratio parameter and the noise strength
BackgroundSubtractorKNNImpl(int _history, float _dist2Threshold, bool _bShadowDetection=true)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = _history > 0 ? _history : defaultHistory2;
//set parameters
// N - the number of samples stored in memory per model
nN = defaultNsamples;
//kNN - k nearest neighbour - number on NN for detcting background - default K=[0.1*nN]
nkNN=MAX(1,cvRound(0.1*nN*3+0.40));
//Tb - Threshold Tb*kernelwidth
fTb = _dist2Threshold>0? _dist2Threshold : defaultDist2Threshold;
bShadowDetection = _bShadowDetection;
nShadowDetection = defaultnShadowDetection2;
fTau = defaultfTau;
name_ = "BackgroundSubtractor.KNN";
}
//! the destructor
~BackgroundSubtractorKNNImpl() {}
//! the update operator
void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
//! computes a background image which are the mean of all background gaussians
virtual void getBackgroundImage(OutputArray backgroundImage) const;
//! re-initiaization method
void initialize(Size _frameSize, int _frameType)
{
frameSize = _frameSize;
frameType = _frameType;
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels <= CV_CN_MAX );
// Reserve memory for the model
int size=frameSize.height*frameSize.width;
// for each sample of 3 speed pixel models each pixel bg model we store ...
// values + flag (nchannels+1 values)
bgmodel.create( 1,(nN * 3) * (nchannels+1)* size,CV_8U);
//index through the three circular lists
aModelIndexShort.create(1,size,CV_8U);
aModelIndexMid.create(1,size,CV_8U);
aModelIndexLong.create(1,size,CV_8U);
//when to update next
nNextShortUpdate.create(1,size,CV_8U);
nNextMidUpdate.create(1,size,CV_8U);
nNextLongUpdate.create(1,size,CV_8U);
//Reset counters
nShortCounter = 0;
nMidCounter = 0;
nLongCounter = 0;
aModelIndexShort = Scalar::all(0);//random? //((m_nN)*rand())/(RAND_MAX+1);//0...m_nN-1
aModelIndexMid = Scalar::all(0);
aModelIndexLong = Scalar::all(0);
nNextShortUpdate = Scalar::all(0);
nNextMidUpdate = Scalar::all(0);
nNextLongUpdate = Scalar::all(0);
}
virtual AlgorithmInfo* info() const { return 0; }
virtual int getHistory() const { return history; }
virtual void setHistory(int _nframes) { history = _nframes; }
virtual int getNSamples() const { return nN; }
virtual void setNSamples(int _nN) { nN = _nN; }//needs reinitialization!
virtual int getkNNSamples() const { return nkNN; }
virtual void setkNNSamples(int _nkNN) { nkNN = _nkNN; }
virtual double getDist2Threshold() const { return fTb; }
virtual void setDist2Threshold(double _dist2Threshold) { fTb = (float)_dist2Threshold; }
virtual bool getDetectShadows() const { return bShadowDetection; }
virtual void setDetectShadows(bool detectshadows) { bShadowDetection = detectshadows; }
virtual int getShadowValue() const { return nShadowDetection; }
virtual void setShadowValue(int value) { nShadowDetection = (uchar)value; }
virtual double getShadowThreshold() const { return fTau; }
virtual void setShadowThreshold(double value) { fTau = (float)value; }
virtual void write(FileStorage& fs) const
{
fs << "name" << name_
<< "history" << history
<< "nsamples" << nN
<< "nKNN" << nkNN
<< "dist2Threshold" << fTb
<< "detectShadows" << (int)bShadowDetection
<< "shadowValue" << (int)nShadowDetection
<< "shadowThreshold" << fTau;
}
virtual void read(const FileNode& fn)
{
CV_Assert( (String)fn["name"] == name_ );
history = (int)fn["history"];
nN = (int)fn["nsamples"];
nkNN = (int)fn["nKNN"];
fTb = (float)fn["dist2Threshold"];
bShadowDetection = (int)fn["detectShadows"] != 0;
nShadowDetection = saturate_cast<uchar>((int)fn["shadowValue"]);
fTau = (float)fn["shadowThreshold"];
}
protected:
Size frameSize;
int frameType;
int nframes;
/////////////////////////
//very important parameters - things you will change
////////////////////////
int history;
//alpha=1/history - speed of update - if the time interval you want to average over is T
//set alpha=1/history. It is also usefull at start to make T slowly increase
//from 1 until the desired T
float fTb;
//Tb - threshold on the squared distance from the sample used to decide if it is well described
//by the background model or not. A typical value could be 2 sigma
//and that is Tb=2*2*10*10 =400; where we take typical pixel level sigma=10
/////////////////////////
//less important parameters - things you might change but be carefull
////////////////////////
int nN;//totlal number of samples
int nkNN;//number on NN for detcting background - default K=[0.1*nN]
//shadow detection parameters
bool bShadowDetection;//default 1 - do shadow detection
unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value
float fTau;
// Tau - shadow threshold. The shadow is detected if the pixel is darker
//version of the background. Tau is a threshold on how much darker the shadow can be.
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
//model data
int nLongCounter;//circular counter
int nMidCounter;
int nShortCounter;
Mat bgmodel; // model data pixel values
Mat aModelIndexShort;// index into the models
Mat aModelIndexMid;
Mat aModelIndexLong;
Mat nNextShortUpdate;//random update points per model
Mat nNextMidUpdate;
Mat nNextLongUpdate;
String name_;
};
//{ to do - paralelization ...
//struct KNNInvoker....
CV_INLINE void
_cvUpdatePixelBackgroundNP( long pixel,const uchar* data, int nchannels, int m_nN,
uchar* m_aModel,
uchar* m_nNextLongUpdate,
uchar* m_nNextMidUpdate,
uchar* m_nNextShortUpdate,
uchar* m_aModelIndexLong,
uchar* m_aModelIndexMid,
uchar* m_aModelIndexShort,
int m_nLongCounter,
int m_nMidCounter,
int m_nShortCounter,
int m_nLongUpdate,
int m_nMidUpdate,
int m_nShortUpdate,
uchar include
)
{
// hold the offset
int ndata=1+nchannels;
long offsetLong = ndata * (pixel * m_nN * 3 + m_aModelIndexLong[pixel] + m_nN * 2);
long offsetMid = ndata * (pixel * m_nN * 3 + m_aModelIndexMid[pixel] + m_nN * 1);
long offsetShort = ndata * (pixel * m_nN * 3 + m_aModelIndexShort[pixel]);
// Long update?
if (m_nNextLongUpdate[pixel] == m_nLongCounter)
{
// add the oldest pixel from Mid to the list of values (for each color)
memcpy(&m_aModel[offsetLong],&m_aModel[offsetMid],ndata*sizeof(unsigned char));
// increase the index
m_aModelIndexLong[pixel] = (m_aModelIndexLong[pixel] >= (m_nN-1)) ? 0 : (m_aModelIndexLong[pixel] + 1);
};
if (m_nLongCounter == (m_nLongUpdate-1))
{
//m_nNextLongUpdate[pixel] = (uchar)(((m_nLongUpdate)*(rand()-1))/RAND_MAX);//0,...m_nLongUpdate-1;
m_nNextLongUpdate[pixel] = (uchar)( rand() % m_nLongUpdate );//0,...m_nLongUpdate-1;
};
// Mid update?
if (m_nNextMidUpdate[pixel] == m_nMidCounter)
{
// add this pixel to the list of values (for each color)
memcpy(&m_aModel[offsetMid],&m_aModel[offsetShort],ndata*sizeof(unsigned char));
// increase the index
m_aModelIndexMid[pixel] = (m_aModelIndexMid[pixel] >= (m_nN-1)) ? 0 : (m_aModelIndexMid[pixel] + 1);
};
if (m_nMidCounter == (m_nMidUpdate-1))
{
m_nNextMidUpdate[pixel] = (uchar)( rand() % m_nMidUpdate );
};
// Short update?
if (m_nNextShortUpdate[pixel] == m_nShortCounter)
{
// add this pixel to the list of values (for each color)
memcpy(&m_aModel[offsetShort],data,ndata*sizeof(unsigned char));
//set the include flag
m_aModel[offsetShort+nchannels]=include;
// increase the index
m_aModelIndexShort[pixel] = (m_aModelIndexShort[pixel] >= (m_nN-1)) ? 0 : (m_aModelIndexShort[pixel] + 1);
};
if (m_nShortCounter == (m_nShortUpdate-1))
{
m_nNextShortUpdate[pixel] = (uchar)( rand() % m_nShortUpdate );
};
};
CV_INLINE int
_cvCheckPixelBackgroundNP(long pixel,
const uchar* data, int nchannels,
int m_nN,
uchar* m_aModel,
float m_fTb,
int m_nkNN,
float tau,
int m_nShadowDetection,
uchar& include)
{
int Pbf = 0; // the total probability that this pixel is background
int Pb = 0; //background model probability
float dData[CV_CN_MAX];
//uchar& include=data[nchannels];
include=0;//do we include this pixel into background model?
int ndata=nchannels+1;
long posPixel = pixel * ndata * m_nN * 3;
// float k;
// now increase the probability for each pixel
for (int n = 0; n < m_nN*3; n++)
{
uchar* mean_m = &m_aModel[posPixel + n*ndata];
//calculate difference and distance
float dist2;
if( nchannels == 3 )
{
dData[0] = (float)mean_m[0] - data[0];
dData[1] = (float)mean_m[1] - data[1];
dData[2] = (float)mean_m[2] - data[2];
dist2 = dData[0]*dData[0] + dData[1]*dData[1] + dData[2]*dData[2];
}
else
{
dist2 = 0.f;
for( int c = 0; c < nchannels; c++ )
{
dData[c] = (float)mean_m[c] - data[c];
dist2 += dData[c]*dData[c];
}
}
if (dist2<m_fTb)
{
Pbf++;//all
//background only
//if(m_aModel[subPosPixel + nchannels])//indicator
if(mean_m[nchannels])//indicator
{
Pb++;
if (Pb >= m_nkNN)//Tb
{
include=1;//include
return 1;//background ->exit
};
}
};
};
//include?
if (Pbf>=m_nkNN)//m_nTbf)
{
include=1;
}
int Ps = 0; // the total probability that this pixel is background shadow
// Detected as moving object, perform shadow detection
if (m_nShadowDetection)
{
for (int n = 0; n < m_nN*3; n++)
{
//long subPosPixel = posPixel + n*ndata;
uchar* mean_m = &m_aModel[posPixel + n*ndata];
if(mean_m[nchannels])//check only background
{
float numerator = 0.0f;
float denominator = 0.0f;
for( int c = 0; c < nchannels; c++ )
{
numerator += (float)data[c] * mean_m[c];
denominator += (float)mean_m[c] * mean_m[c];
}
// no division by zero allowed
if( denominator == 0 )
return 0;
// if tau < a < 1 then also check the color distortion
if( numerator <= denominator && numerator >= tau*denominator )
{
float a = numerator / denominator;
float dist2a = 0.0f;
for( int c = 0; c < nchannels; c++ )
{
float dD= a*mean_m[c] - data[c];
dist2a += dD*dD;
}
if (dist2a<m_fTb*a*a)
{
Ps++;
if (Ps >= m_nkNN)//shadow
return 2;
};
};
};
};
}
return 0;
};
CV_INLINE void
icvUpdatePixelBackgroundNP(const Mat& _src, Mat& _dst,
Mat& _bgmodel,
Mat& _nNextLongUpdate,
Mat& _nNextMidUpdate,
Mat& _nNextShortUpdate,
Mat& _aModelIndexLong,
Mat& _aModelIndexMid,
Mat& _aModelIndexShort,
int& _nLongCounter,
int& _nMidCounter,
int& _nShortCounter,
int _nN,
float _fAlphaT,
float _fTb,
int _nkNN,
float _fTau,
int _bShadowDetection,
uchar nShadowDetection
)
{
int size=_src.rows*_src.cols;
int nchannels = CV_MAT_CN(_src.type());
const uchar* pDataCurrent=_src.ptr(0);
uchar* pDataOutput=_dst.ptr(0);
//model
uchar* m_aModel=_bgmodel.ptr(0);
uchar* m_nNextLongUpdate=_nNextLongUpdate.ptr(0);
uchar* m_nNextMidUpdate=_nNextMidUpdate.ptr(0);
uchar* m_nNextShortUpdate=_nNextShortUpdate.ptr(0);
uchar* m_aModelIndexLong=_aModelIndexLong.ptr(0);
uchar* m_aModelIndexMid=_aModelIndexMid.ptr(0);
uchar* m_aModelIndexShort=_aModelIndexShort.ptr(0);
//some constants
int m_nN=_nN;
float m_fAlphaT=_fAlphaT;
float m_fTb=_fTb;//Tb - threshold on the distance
float m_fTau=_fTau;
int m_nkNN=_nkNN;
int m_bShadowDetection=_bShadowDetection;
//recalculate update rates - in case alpha is changed
// calculate update parameters (using alpha)
int Kshort,Kmid,Klong;
//approximate exponential learning curve
Kshort=(int)(log(0.7)/log(1-m_fAlphaT))+1;//Kshort
Kmid=(int)(log(0.4)/log(1-m_fAlphaT))-Kshort+1;//Kmid
Klong=(int)(log(0.1)/log(1-m_fAlphaT))-Kshort-Kmid+1;//Klong
//refresh rates
int m_nShortUpdate = (Kshort/m_nN)+1;
int m_nMidUpdate = (Kmid/m_nN)+1;
int m_nLongUpdate = (Klong/m_nN)+1;
//int m_nShortUpdate = MAX((Kshort/m_nN),m_nN);
//int m_nMidUpdate = MAX((Kmid/m_nN),m_nN);
//int m_nLongUpdate = MAX((Klong/m_nN),m_nN);
//update counters for the refresh rate
int m_nLongCounter=_nLongCounter;
int m_nMidCounter=_nMidCounter;
int m_nShortCounter=_nShortCounter;
_nShortCounter++;//0,1,...,m_nShortUpdate-1
_nMidCounter++;
_nLongCounter++;
if (_nShortCounter >= m_nShortUpdate) _nShortCounter = 0;
if (_nMidCounter >= m_nMidUpdate) _nMidCounter = 0;
if (_nLongCounter >= m_nLongUpdate) _nLongCounter = 0;
//go through the image
for (long i=0;i<size;i++)
{
const uchar* data=pDataCurrent;
pDataCurrent=pDataCurrent+nchannels;
//update model+ background subtract
uchar include=0;
int result= _cvCheckPixelBackgroundNP(i, data, nchannels,
m_nN, m_aModel, m_fTb,m_nkNN, m_fTau,m_bShadowDetection,include);
_cvUpdatePixelBackgroundNP(i,data,nchannels,
m_nN, m_aModel,
m_nNextLongUpdate,
m_nNextMidUpdate,
m_nNextShortUpdate,
m_aModelIndexLong,
m_aModelIndexMid,
m_aModelIndexShort,
m_nLongCounter,
m_nMidCounter,
m_nShortCounter,
m_nLongUpdate,
m_nMidUpdate,
m_nShortUpdate,
include
);
switch (result)
{
case 0:
//foreground
(* pDataOutput)=255;
break;
case 1:
//background
(* pDataOutput)=0;
break;
case 2:
//shadow
(* pDataOutput)=nShadowDetection;
break;
}
pDataOutput++;
}
};
void BackgroundSubtractorKNNImpl::apply(InputArray _image, OutputArray _fgmask, double learningRate)
{
Mat image = _image.getMat();
bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType;
if( needToInitialize )
initialize(image.size(), image.type());
_fgmask.create( image.size(), CV_8U );
Mat fgmask = _fgmask.getMat();
++nframes;
learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./std::min( 2*nframes, history );
CV_Assert(learningRate >= 0);
//parallel_for_(Range(0, image.rows),
// KNNInvoker(image, fgmask,
icvUpdatePixelBackgroundNP(image, fgmask,
bgmodel,
nNextLongUpdate,
nNextMidUpdate,
nNextShortUpdate,
aModelIndexLong,
aModelIndexMid,
aModelIndexShort,
nLongCounter,
nMidCounter,
nShortCounter,
nN,
(float)learningRate,
fTb,
nkNN,
fTau,
bShadowDetection,
nShadowDetection
);
};
void BackgroundSubtractorKNNImpl::getBackgroundImage(OutputArray backgroundImage) const
{
int nchannels = CV_MAT_CN(frameType);
//CV_Assert( nchannels == 3 );
Mat meanBackground(frameSize, CV_8UC3, Scalar::all(0));
int ndata=nchannels+1;
int modelstep=(ndata * nN * 3);
const uchar* pbgmodel=bgmodel.ptr(0);
for(int row=0; row<meanBackground.rows; row++)
{
for(int col=0; col<meanBackground.cols; col++)
{
for (int n = 0; n < nN*3; n++)
{
const uchar* mean_m = &pbgmodel[n*ndata];
if (mean_m[nchannels])
{
meanBackground.at<Vec3b>(row, col) = Vec3b(mean_m);
break;
}
}
pbgmodel=pbgmodel+modelstep;
}
}
switch(CV_MAT_CN(frameType))
{
case 1:
{
std::vector<Mat> channels;
split(meanBackground, channels);
channels[0].copyTo(backgroundImage);
break;
}
case 3:
{
meanBackground.copyTo(backgroundImage);
break;
}
default:
CV_Error(Error::StsUnsupportedFormat, "");
}
};
Ptr<BackgroundSubtractorKNN> createBackgroundSubtractorKNN(int _history, double _threshold2,bool _bShadowDetection)
{
return makePtr<BackgroundSubtractorKNNImpl>(_history, (float)_threshold2, _bShadowDetection);
};
};//namespace cv

@ -52,7 +52,7 @@ int main(int argc, const char** argv)
namedWindow("foreground image", WINDOW_NORMAL);
namedWindow("mean background image", WINDOW_NORMAL);
Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorMOG2();
Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorKNN();
Mat img, fgmask, fgimg;

Loading…
Cancel
Save