Merge pull request #680 from vpisarev:c2cpp_video_take2

pull/694/head
Andrey Kamaev 12 years ago committed by OpenCV Buildbot
commit 82d7292ee7
  1. 2
      modules/contrib/src/facerec.cpp
  2. 4
      modules/core/include/opencv2/core/internal.hpp
  3. 30
      modules/gpu/perf/perf_video.cpp
  4. 14
      modules/gpu/test/test_bgfg.cpp
  5. 18
      modules/legacy/src/bgfg_gaussmix.cpp
  6. 2
      modules/ml/src/ml_init.cpp
  7. 2
      modules/nonfree/src/nonfree_init.cpp
  8. 4
      modules/python/src2/cv2.cpp
  9. 4
      modules/softcascade/src/softcascade_init.cpp
  10. 192
      modules/video/doc/motion_analysis_and_object_tracking.rst
  11. 250
      modules/video/include/opencv2/video/background_segm.hpp
  12. 25
      modules/video/include/opencv2/video/tracking.hpp
  13. 850
      modules/video/src/affineflow.cpp
  14. 164
      modules/video/src/bgfg_gaussmix.cpp
  15. 306
      modules/video/src/bgfg_gaussmix2.cpp
  16. 250
      modules/video/src/bgfg_gmg.cpp
  17. 314
      modules/video/src/camshift.cpp
  18. 388
      modules/video/src/compat_video.cpp
  19. 15
      modules/video/src/ecc.cpp
  20. 170
      modules/video/src/kalman.cpp
  21. 1139
      modules/video/src/lkpyramid.cpp
  22. 442
      modules/video/src/motempl.cpp
  23. 15
      modules/video/src/optflowgf.cpp
  24. 34
      modules/video/src/simpleflow.cpp
  25. 86
      modules/video/src/simpleflow.hpp
  26. 43
      modules/video/src/video_init.cpp
  27. 108
      modules/video/test/test_backgroundsubtractor_gbh.cpp
  28. 2
      modules/video/test/test_estimaterigid.cpp
  29. 9
      samples/cpp/bgfg_gmg.cpp
  30. 6
      samples/cpp/bgfg_segm.cpp
  31. 6
      samples/cpp/segment_objects.cpp
  32. 16
      samples/gpu/performance/tests.cpp

@ -893,7 +893,7 @@ CV_INIT_ALGORITHM(LBPH, "FaceRecognizer.LBPH",
bool initModule_contrib() bool initModule_contrib()
{ {
Ptr<Algorithm> efaces = createEigenfaces(), ffaces = createFisherfaces(), lbph = createLBPH(); Ptr<Algorithm> efaces = createEigenfaces_hidden(), ffaces = createFisherfaces_hidden(), lbph = createLBPH_hidden();
return efaces->info() != 0 && ffaces->info() != 0 && lbph->info() != 0; return efaces->info() != 0 && ffaces->info() != 0 && lbph->info() != 0;
} }

@ -254,14 +254,14 @@ namespace cv
} //namespace cv } //namespace cv
#define CV_INIT_ALGORITHM(classname, algname, memberinit) \ #define CV_INIT_ALGORITHM(classname, algname, memberinit) \
static ::cv::Algorithm* create##classname() \ static ::cv::Algorithm* create##classname##_hidden() \
{ \ { \
return new classname; \ return new classname; \
} \ } \
\ \
static ::cv::AlgorithmInfo& classname##_info() \ static ::cv::AlgorithmInfo& classname##_info() \
{ \ { \
static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname); \ static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname##_hidden); \
return classname##_info_var; \ return classname##_info_var; \
} \ } \
\ \

@ -632,10 +632,10 @@ PERF_TEST_P(Video_Cn_LearningRate, Video_MOG,
} }
else else
{ {
cv::BackgroundSubtractorMOG mog; cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground; cv::Mat foreground;
mog(frame, foreground, learningRate); mog->apply(frame, foreground, learningRate);
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
{ {
@ -653,7 +653,7 @@ PERF_TEST_P(Video_Cn_LearningRate, Video_MOG,
} }
startTimer(); next(); startTimer(); next();
mog(frame, foreground, learningRate); mog->apply(frame, foreground, learningRate);
stopTimer(); stopTimer();
} }
@ -731,12 +731,12 @@ PERF_TEST_P(Video_Cn, Video_MOG2,
} }
else else
{ {
cv::BackgroundSubtractorMOG2 mog2; cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
mog2.set("detectShadows", false); mog2->set("detectShadows", false);
cv::Mat foreground; cv::Mat foreground;
mog2(frame, foreground); mog2->apply(frame, foreground);
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
{ {
@ -754,7 +754,7 @@ PERF_TEST_P(Video_Cn, Video_MOG2,
} }
startTimer(); next(); startTimer(); next();
mog2(frame, foreground); mog2->apply(frame, foreground);
stopTimer(); stopTimer();
} }
@ -815,7 +815,7 @@ PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage,
} }
else else
{ {
cv::BackgroundSubtractorMOG2 mog2; cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
cv::Mat foreground; cv::Mat foreground;
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
@ -833,12 +833,12 @@ PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage,
cv::swap(temp, frame); cv::swap(temp, frame);
} }
mog2(frame, foreground); mog2->apply(frame, foreground);
} }
cv::Mat background; cv::Mat background;
TEST_CYCLE() mog2.getBackgroundImage(background); TEST_CYCLE() mog2->getBackgroundImage(background);
CPU_SANITY_CHECK(background); CPU_SANITY_CHECK(background);
} }
@ -923,11 +923,11 @@ PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG,
cv::Mat foreground; cv::Mat foreground;
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0)); cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
cv::BackgroundSubtractorGMG gmg; cv::Ptr<cv::BackgroundSubtractor> gmg = cv::createBackgroundSubtractorGMG();
gmg.set("maxFeatures", maxFeatures); gmg->set("maxFeatures", maxFeatures);
gmg.initialize(frame.size(), 0.0, 255.0); //gmg.initialize(frame.size(), 0.0, 255.0);
gmg(frame, foreground); gmg->apply(frame, foreground);
for (int i = 0; i < 150; ++i) for (int i = 0; i < 150; ++i)
{ {
@ -950,7 +950,7 @@ PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG,
} }
startTimer(); next(); startTimer(); next();
gmg(frame, foreground); gmg->apply(frame, foreground);
stopTimer(); stopTimer();
} }

@ -269,8 +269,8 @@ GPU_TEST_P(MOG2, Update)
mog2.bShadowDetection = detectShadow; mog2.bShadowDetection = detectShadow;
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi); cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
cv::BackgroundSubtractorMOG2 mog2_gold; cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = cv::createBackgroundSubtractorMOG2();
mog2_gold.set("detectShadows", detectShadow); mog2_gold.setDetectShadows(detectShadow);
cv::Mat foreground_gold; cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
@ -287,7 +287,7 @@ GPU_TEST_P(MOG2, Update)
mog2(loadMat(frame, useRoi), foreground); mog2(loadMat(frame, useRoi), foreground);
mog2_gold(frame, foreground_gold); mog2_gold->apply(frame, foreground_gold);
if (detectShadow) if (detectShadow)
{ {
@ -314,8 +314,8 @@ GPU_TEST_P(MOG2, getBackgroundImage)
mog2.bShadowDetection = detectShadow; mog2.bShadowDetection = detectShadow;
cv::gpu::GpuMat foreground; cv::gpu::GpuMat foreground;
cv::BackgroundSubtractorMOG2 mog2_gold; cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = cv::createBackgroundSubtractorMOG2();
mog2_gold.set("detectShadows", detectShadow); mog2_gold.setDetectShadows(detectShadow);
cv::Mat foreground_gold; cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
@ -325,14 +325,14 @@ GPU_TEST_P(MOG2, getBackgroundImage)
mog2(loadMat(frame, useRoi), foreground); mog2(loadMat(frame, useRoi), foreground);
mog2_gold(frame, foreground_gold); mog2_gold->apply(frame, foreground_gold);
} }
cv::gpu::GpuMat background = createMat(frame.size(), frame.type(), useRoi); cv::gpu::GpuMat background = createMat(frame.size(), frame.type(), useRoi);
mog2.getBackgroundImage(background); mog2.getBackgroundImage(background);
cv::Mat background_gold; cv::Mat background_gold;
mog2_gold.getBackgroundImage(background_gold); mog2_gold->getBackgroundImage(background_gold);
ASSERT_MAT_NEAR(background_gold, background, 0); ASSERT_MAT_NEAR(background_gold, background, 0);
} }

@ -50,7 +50,7 @@ icvReleaseGaussianBGModel( CvGaussBGModel** bg_model )
if( *bg_model ) if( *bg_model )
{ {
delete (cv::BackgroundSubtractorMOG*)((*bg_model)->mog); delete (cv::Ptr<cv::BackgroundSubtractor>*)((*bg_model)->mog);
cvReleaseImage( &(*bg_model)->background ); cvReleaseImage( &(*bg_model)->background );
cvReleaseImage( &(*bg_model)->foreground ); cvReleaseImage( &(*bg_model)->foreground );
memset( *bg_model, 0, sizeof(**bg_model) ); memset( *bg_model, 0, sizeof(**bg_model) );
@ -65,10 +65,10 @@ icvUpdateGaussianBGModel( IplImage* curr_frame, CvGaussBGModel* bg_model, doubl
{ {
cv::Mat image = cv::cvarrToMat(curr_frame), mask = cv::cvarrToMat(bg_model->foreground); cv::Mat image = cv::cvarrToMat(curr_frame), mask = cv::cvarrToMat(bg_model->foreground);
cv::BackgroundSubtractorMOG* mog = (cv::BackgroundSubtractorMOG*)(bg_model->mog); cv::Ptr<cv::BackgroundSubtractor>* mog = (cv::Ptr<cv::BackgroundSubtractor>*)(bg_model->mog);
CV_Assert(mog != 0); CV_Assert(mog != 0);
(*mog)(image, mask, learningRate); (*mog)->apply(image, mask, learningRate);
bg_model->countFrames++; bg_model->countFrames++;
return 0; return 0;
@ -105,13 +105,11 @@ cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parame
bg_model->params = params; bg_model->params = params;
cv::BackgroundSubtractorMOG* mog = cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG(params.win_size, params.n_gauss,
new cv::BackgroundSubtractorMOG(params.win_size, params.bg_threshold);
params.n_gauss, cv::Ptr<cv::BackgroundSubtractor>* pmog = new cv::Ptr<cv::BackgroundSubtractor>;
params.bg_threshold, *pmog = mog;
params.variance_init); bg_model->mog = pmog;
bg_model->mog = mog;
CvSize sz = cvGetSize(first_frame); CvSize sz = cvGetSize(first_frame);
bg_model->background = cvCreateImage(sz, IPL_DEPTH_8U, first_frame->nChannels); bg_model->background = cvCreateImage(sz, IPL_DEPTH_8U, first_frame->nChannels);

@ -56,7 +56,7 @@ CV_INIT_ALGORITHM(EM, "StatModel.EM",
bool initModule_ml(void) bool initModule_ml(void)
{ {
Ptr<Algorithm> em = createEM(); Ptr<Algorithm> em = createEM_hidden();
return em->info() != 0; return em->info() != 0;
} }

@ -67,7 +67,7 @@ CV_INIT_ALGORITHM(SIFT, "Feature2D.SIFT",
bool initModule_nonfree(void) bool initModule_nonfree(void)
{ {
Ptr<Algorithm> sift = createSIFT(), surf = createSURF(); Ptr<Algorithm> sift = createSIFT_hidden(), surf = createSURF_hidden();
return sift->info() != 0 && surf->info() != 0; return sift->info() != 0 && surf->info() != 0;
} }

@ -128,6 +128,10 @@ typedef Ptr<FeatureDetector> Ptr_FeatureDetector;
typedef Ptr<DescriptorExtractor> Ptr_DescriptorExtractor; typedef Ptr<DescriptorExtractor> Ptr_DescriptorExtractor;
typedef Ptr<Feature2D> Ptr_Feature2D; typedef Ptr<Feature2D> Ptr_Feature2D;
typedef Ptr<DescriptorMatcher> Ptr_DescriptorMatcher; typedef Ptr<DescriptorMatcher> Ptr_DescriptorMatcher;
typedef Ptr<BackgroundSubtractor> Ptr_BackgroundSubtractor;
typedef Ptr<BackgroundSubtractorMOG> Ptr_BackgroundSubtractorMOG;
typedef Ptr<BackgroundSubtractorMOG2> Ptr_BackgroundSubtractorMOG2;
typedef Ptr<BackgroundSubtractorGMG> Ptr_BackgroundSubtractorGMG;
typedef Ptr<cv::softcascade::ChannelFeatureBuilder> Ptr_ChannelFeatureBuilder; typedef Ptr<cv::softcascade::ChannelFeatureBuilder> Ptr_ChannelFeatureBuilder;

@ -58,8 +58,8 @@ CV_INIT_ALGORITHM(SCascade, "CascadeDetector.SCascade",
bool initModule_softcascade(void) bool initModule_softcascade(void)
{ {
Ptr<Algorithm> sc = createSCascade(); Ptr<Algorithm> sc = createSCascade_hidden();
Ptr<Algorithm> sc1 = createDetector(); Ptr<Algorithm> sc1 = createDetector_hidden();
return (sc1->info() != 0) && (sc->info() != 0); return (sc1->info() != 0) && (sc->info() != 0);
} }

@ -158,7 +158,6 @@ findTransformECC
Finds the geometric transform (warp) between two images in terms of the ECC criterion [EP08]_. Finds the geometric transform (warp) between two images in terms of the ECC criterion [EP08]_.
.. ocv:function:: double findTransformECC( InputArray templateImage, InputArray inputImage, InputOutputArray warpMatrix, int motionType=MOTION_AFFINE, TermCriteria criteria=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001)) .. ocv:function:: double findTransformECC( InputArray templateImage, InputArray inputImage, InputOutputArray warpMatrix, int motionType=MOTION_AFFINE, TermCriteria criteria=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001))
.. ocv:cfunction:: double cvFindTransformECC( const CvArr* templateImage, const CvArr* inputImage, CvMat* warpMatrix, const int motionType, const CvTermCriteria criteria)
:param templateImage: single-channel template image; ``CV_8U`` or ``CV_32F`` array. :param templateImage: single-channel template image; ``CV_8U`` or ``CV_32F`` array.
@ -167,9 +166,13 @@ Finds the geometric transform (warp) between two images in terms of the ECC crit
:param warpMatrix: floating-point :math:`2\times 3` or :math:`3\times 3` mapping matrix (warp). :param warpMatrix: floating-point :math:`2\times 3` or :math:`3\times 3` mapping matrix (warp).
:param motionType: parameter, specifying the type of motion: :param motionType: parameter, specifying the type of motion:
* **MOTION_TRANSLATION** sets a translational motion model; ``warpMatrix`` is :math:`2\times 3` with the first :math:`2\times 2` part being the unity matrix and the rest two parameters being estimated. * **MOTION_TRANSLATION** sets a translational motion model; ``warpMatrix`` is :math:`2\times 3` with the first :math:`2\times 2` part being the unity matrix and the rest two parameters being estimated.
* **MOTION_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three parameters are estimated; ``warpMatrix`` is :math:`2\times 3`. * **MOTION_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three parameters are estimated; ``warpMatrix`` is :math:`2\times 3`.
* **MOTION_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated; ``warpMatrix`` is :math:`2\times 3`. * **MOTION_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated; ``warpMatrix`` is :math:`2\times 3`.
* **MOTION_HOMOGRAPHY** sets a homography as a motion model; eight parameters are estimated;``warpMatrix`` is :math:`3\times 3`. * **MOTION_HOMOGRAPHY** sets a homography as a motion model; eight parameters are estimated;``warpMatrix`` is :math:`3\times 3`.
:param criteria: parameter, specifying the termination criteria of the ECC algorithm; ``criteria.epsilon`` defines the threshold of the increment in the correlation coefficient between two iterations (a negative ``criteria.epsilon`` makes ``criteria.maxcount`` the only termination criterion). Default values are shown in the declaration above. :param criteria: parameter, specifying the termination criteria of the ECC algorithm; ``criteria.epsilon`` defines the threshold of the increment in the correlation coefficient between two iterations (a negative ``criteria.epsilon`` makes ``criteria.maxcount`` the only termination criterion). Default values are shown in the declaration above.
@ -177,13 +180,13 @@ Finds the geometric transform (warp) between two images in terms of the ECC crit
The function estimates the optimum transformation (``warpMatrix``) with respect to ECC criterion ([EP08]_), that is The function estimates the optimum transformation (``warpMatrix``) with respect to ECC criterion ([EP08]_), that is
..math:: .. math::
\texttt{warpMatrix} = \texttt{warpMatrix} = \arg\max_{W} \texttt{ECC}(\texttt{templateImage}(x,y),\texttt{inputImage}(x',y')) \texttt{warpMatrix} = \texttt{warpMatrix} = \arg\max_{W} \texttt{ECC}(\texttt{templateImage}(x,y),\texttt{inputImage}(x',y'))
where where
..math:: .. math::
\begin{bmatrix} x' \\ y' \end{bmatrix} = W \cdot \begin{bmatrix} x \\ y \\ 1 \end{bmatrix} \begin{bmatrix} x' \\ y' \end{bmatrix} = W \cdot \begin{bmatrix} x \\ y \\ 1 \end{bmatrix}
@ -479,7 +482,7 @@ Base class for background/foreground segmentation. ::
{ {
public: public:
virtual ~BackgroundSubtractor(); virtual ~BackgroundSubtractor();
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=0); virtual void apply(InputArray image, OutputArray fgmask, double learningRate=0);
virtual void getBackgroundImage(OutputArray backgroundImage) const; virtual void getBackgroundImage(OutputArray backgroundImage) const;
}; };
@ -487,11 +490,11 @@ Base class for background/foreground segmentation. ::
The class is only used to define the common interface for the whole family of background/foreground segmentation algorithms. The class is only used to define the common interface for the whole family of background/foreground segmentation algorithms.
BackgroundSubtractor::operator() BackgroundSubtractor::apply
-------------------------------- --------------------------------
Computes a foreground mask. Computes a foreground mask.
.. ocv:function:: void BackgroundSubtractor::operator()(InputArray image, OutputArray fgmask, double learningRate=0) .. ocv:function:: void BackgroundSubtractor::apply(InputArray image, OutputArray fgmask, double learningRate=-1)
.. ocv:pyfunction:: cv2.BackgroundSubtractor.apply(image[, fgmask[, learningRate]]) -> fgmask .. ocv:pyfunction:: cv2.BackgroundSubtractor.apply(image[, fgmask[, learningRate]]) -> fgmask
@ -499,6 +502,7 @@ Computes a foreground mask.
:param fgmask: The output foreground mask as an 8-bit binary image. :param fgmask: The output foreground mask as an 8-bit binary image.
:param learningRate: The value between 0 and 1 that indicates how fast the background model is learnt. Negative parameter value makes the algorithm to use some automatically chosen learning rate. 0 means that the background model is not updated at all, 1 means that the background model is completely reinitialized from the last frame.
BackgroundSubtractor::getBackgroundImage BackgroundSubtractor::getBackgroundImage
---------------------------------------- ----------------------------------------
@ -517,20 +521,16 @@ BackgroundSubtractorMOG
Gaussian Mixture-based Background/Foreground Segmentation Algorithm. Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
The class implements the algorithm described in P. KadewTraKuPong and R. Bowden, *An improved adaptive background mixture model for real-time tracking with shadow detection*, Proc. 2nd European Workshop on Advanced Video-Based Surveillance Systems, 2001: http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf The class implements the algorithm described in [KB2001]_.
BackgroundSubtractorMOG::BackgroundSubtractorMOG createBackgroundSubtractorMOG
------------------------------------------------ ------------------------------------------------
The constructors. Creates mixture-of-gaussian background subtractor
.. ocv:function:: BackgroundSubtractorMOG::BackgroundSubtractorMOG()
.. ocv:function:: BackgroundSubtractorMOG::BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma=0) .. ocv:function:: Ptr<BackgroundSubtractorMOG> createBackgroundSubtractorMOG(int history=200, int nmixtures=5, double backgroundRatio=0.7, double noiseSigma=0)
.. ocv:pyfunction:: cv2.BackgroundSubtractorMOG([history, nmixtures, backgroundRatio[, noiseSigma]]) -> <BackgroundSubtractorMOG object> .. ocv:pyfunction:: cv2.createBackgroundSubtractorMOG([history, nmixtures, backgroundRatio, noiseSigma]) -> <BackgroundSubtractorMOG object>
:param history: Length of the history. :param history: Length of the history.
@ -538,106 +538,156 @@ The constructors.
:param backgroundRatio: Background ratio. :param backgroundRatio: Background ratio.
:param noiseSigma: Noise strength. :param noiseSigma: Noise strength (standard deviation of the brightness or each color channel). 0 means some automatic value.
Default constructor sets all parameters to default values.
BackgroundSubtractorMOG2
------------------------
Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
.. ocv:class:: BackgroundSubtractorMOG2 : public BackgroundSubtractor
The class implements the Gaussian mixture model background subtraction described in [Zivkovic2004]_ and [Zivkovic2006]_ .
BackgroundSubtractorMOG::operator()
-----------------------------------
Updates the background model and returns the foreground mask
.. ocv:function:: void BackgroundSubtractorMOG::operator()(InputArray image, OutputArray fgmask, double learningRate=0) createBackgroundSubtractorMOG2
--------------------------------------------------
Creates MOG2 Background Subtractor
Parameters are the same as in :ocv:funcx:`BackgroundSubtractor::operator()` .. ocv:function:: Ptr<BackgroundSubtractorMOG2> createBackgroundSubtractorMOG2( int history=500, double varThreshold=16, bool detectShadows=true )
:param history: Length of the history.
BackgroundSubtractorMOG2 :param varThreshold: Threshold on the squared Mahalanobis distance between the pixel and the model to decide whether a pixel is well described by the background model. This parameter does not affect the background update.
------------------------
Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
.. ocv:class:: BackgroundSubtractorMOG2 : public BackgroundSubtractor :param detectShadows: If true, the algorithm will detect shadows and mark them. It decreases the speed a bit, so if you do not need this feature, set the parameter to false.
Here are important members of the class that control the algorithm, which you can set after constructing the class instance:
.. ocv:member:: int nmixtures BackgroundSubtractorMOG2::getHistory
--------------------------------------
Returns the number of last frames that affect the background model
Maximum allowed number of mixture components. Actual number is determined dynamically per pixel. .. ocv:function:: int BackgroundSubtractorMOG2::getHistory() const
.. ocv:member:: float backgroundRatio
Threshold defining whether the component is significant enough to be included into the background model ( corresponds to ``TB=1-cf`` from the paper??which paper??). ``cf=0.1 => TB=0.9`` is default. For ``alpha=0.001``, it means that the mode should exist for approximately 105 frames before it is considered foreground. BackgroundSubtractorMOG2::setHistory
--------------------------------------
Sets the number of last frames that affect the background model
.. ocv:member:: float varThresholdGen .. ocv:function:: void BackgroundSubtractorMOG2::setHistory(int history)
Threshold for the squared Mahalanobis distance that helps decide when a sample is close to the existing components (corresponds to ``Tg``). If it is not close to any component, a new component is generated. ``3 sigma => Tg=3*3=9`` is default. A smaller ``Tg`` value generates more components. A higher ``Tg`` value may result in a small number of components but they can grow too large.
.. ocv:member:: float fVarInit BackgroundSubtractorMOG2::getNMixtures
--------------------------------------
Returns the number of gaussian components in the background model
Initial variance for the newly generated components. It affects the speed of adaptation. The parameter value is based on your estimate of the typical standard deviation from the images. OpenCV uses 15 as a reasonable value. .. ocv:function:: int BackgroundSubtractorMOG2::getNMixtures() const
.. ocv:member:: float fVarMin
Parameter used to further control the variance. BackgroundSubtractorMOG2::setNMixtures
--------------------------------------
Sets the number of gaussian components in the background model
.. ocv:member:: float fVarMax .. ocv:function:: void BackgroundSubtractorMOG2::setNMixtures(int nmixtures)
Parameter used to further control the variance.
.. ocv:member:: float fCT BackgroundSubtractorMOG2::getBackgroundRatio
---------------------------------------------
Returns the "background ratio" parameter of the algorithm
Complexity reduction parameter. This parameter defines the number of samples needed to accept to prove the component exists. ``CT=0.05`` is a default value for all the samples. By setting ``CT=0`` you get an algorithm very similar to the standard Stauffer&Grimson algorithm. .. ocv:function:: double BackgroundSubtractorMOG2::getBackgroundRatio() const
.. ocv:member:: uchar nShadowDetection If a foreground pixel keeps semi-constant value for about ``backgroundRatio*history`` frames, it's considered background and added to the model as a center of a new component. It corresponds to ``TB`` parameter in the paper.
The value for marking shadow pixels in the output foreground mask. Default value is 127. BackgroundSubtractorMOG2::setBackgroundRatio
---------------------------------------------
Sets the "background ratio" parameter of the algorithm
.. ocv:member:: float fTau .. ocv:function:: void BackgroundSubtractorMOG2::setBackgroundRatio(double ratio)
Shadow threshold. The shadow is detected if the pixel is a darker version of the background. ``Tau`` is a threshold defining how much darker the shadow can be. ``Tau= 0.5`` means that if a pixel is more than twice darker then it is not shadow. See Prati,Mikic,Trivedi,Cucchiarra, *Detecting Moving Shadows...*, IEEE PAMI,2003. BackgroundSubtractorMOG2::getVarThresholdGen
---------------------------------------------
Returns the variance scale factor for the pixel-model match
.. ocv:function:: double BackgroundSubtractorMOG2::getVarThresholdGen() const
The class implements the Gaussian mixture model background subtraction described in: Threshold for the squared Mahalanobis distance that helps decide when a sample is close to the existing components (corresponds to ``Tg`` in the paper). If a pixel is not close to any component, it is considered foreground or added as a new component. ``3 sigma => Tg=3*3=9`` is default. A smaller ``Tg`` value generates more components. A higher ``Tg`` value may result in a small number of components but they can grow too large.
* Z.Zivkovic, *Improved adaptive Gausian mixture model for background subtraction*, International Conference Pattern Recognition, UK, August, 2004, http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf. The code is very fast and performs also shadow detection. Number of Gausssian components is adapted per pixel. BackgroundSubtractorMOG2::setVarThresholdGen
---------------------------------------------
Sets the variance scale factor for the pixel-model match
* Z.Zivkovic, F. van der Heijden, *Efficient Adaptive Density Estimapion per Image Pixel for the Task of Background Subtraction*, Pattern Recognition Letters, vol. 27, no. 7, pages 773-780, 2006. The algorithm similar to the standard Stauffer&Grimson algorithm with additional selection of the number of the Gaussian components based on: Z.Zivkovic, F.van der Heijden, Recursive unsupervised learning of finite mixture models, IEEE Trans. on Pattern Analysis and Machine Intelligence, vol.26, no.5, pages 651-656, 2004. .. ocv:function:: void BackgroundSubtractorMOG2::setVarThresholdGen(double varThresholdGen)
BackgroundSubtractorMOG2::getVarInit
---------------------------------------------
Returns the initial variance of each gaussian component
BackgroundSubtractorMOG2::BackgroundSubtractorMOG2 .. ocv:function:: double BackgroundSubtractorMOG2::getVarInit() const
--------------------------------------------------
The constructors.
.. ocv:function:: BackgroundSubtractorMOG2::BackgroundSubtractorMOG2() BackgroundSubtractorMOG2::setVarInit
---------------------------------------------
Sets the initial variance of each gaussian component
.. ocv:function:: BackgroundSubtractorMOG2::BackgroundSubtractorMOG2( int history, float varThreshold, bool bShadowDetection=true ) .. ocv:function:: void BackgroundSubtractorMOG2::setVarInit(double varInit)
:param history: Length of the history.
:param varThreshold: Threshold on the squared Mahalanobis distance to decide whether it is well described by the background model (see Cthr??). This parameter does not affect the background update. A typical value could be 4 sigma, that is, ``varThreshold=4*4=16;`` (see Tb??). BackgroundSubtractorMOG2::getComplexityReductionThreshold
----------------------------------------------------------
Returns the complexity reduction threshold
.. ocv:function:: double BackgroundSubtractorMOG2::getComplexityReductionThreshold() const
This parameter defines the number of samples needed to accept to prove the component exists. ``CT=0.05`` is a default value for all the samples. By setting ``CT=0`` you get an algorithm very similar to the standard Stauffer&Grimson algorithm.
BackgroundSubtractorMOG2::setComplexityReductionThreshold
----------------------------------------------------------
Sets the complexity reduction threshold
.. ocv:function:: void BackgroundSubtractorMOG2::setComplexityReductionThreshold(double ct)
BackgroundSubtractorMOG2::getDetectShadows
---------------------------------------------
Returns the shadow detection flag
.. ocv:function:: bool BackgroundSubtractorMOG2::getDetectShadows() const
If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorMOG2 for details.
:param bShadowDetection: Parameter defining whether shadow detection should be enabled (``true`` or ``false``). BackgroundSubtractorMOG2::setDetectShadows
---------------------------------------------
Enables or disables shadow detection
.. ocv:function:: void BackgroundSubtractorMOG2::setDetectShadows(bool detectShadows)
BackgroundSubtractorMOG2::getShadowValue
---------------------------------------------
Returns the shadow value
BackgroundSubtractorMOG2::operator() .. ocv:function:: int BackgroundSubtractorMOG2::getShadowValue() const
------------------------------------
Updates the background model and computes the foreground mask
.. ocv:function:: void BackgroundSubtractorMOG2::operator()(InputArray image, OutputArray fgmask, double learningRate=-1) Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0 in the mask always means background, 255 means foreground.
See :ocv:funcx:`BackgroundSubtractor::operator()`. BackgroundSubtractorMOG2::setShadowValue
---------------------------------------------
Sets the shadow value
.. ocv:function:: void BackgroundSubtractorMOG2::setShadowValue(int value)
BackgroundSubtractorMOG2::getBackgroundImage BackgroundSubtractorMOG2::getShadowThreshold
-------------------------------------------- ---------------------------------------------
Returns background image Returns the shadow threshold
.. ocv:function:: void BackgroundSubtractorMOG2::getBackgroundImage(OutputArray backgroundImage) .. ocv:function:: double BackgroundSubtractorMOG2::getShadowThreshold() const
See :ocv:func:`BackgroundSubtractor::getBackgroundImage`. A shadow is detected if pixel is a darker version of the background. The shadow threshold (``Tau`` in the paper) is a threshold defining how much darker the shadow can be. ``Tau= 0.5`` means that if a pixel is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiarra, *Detecting Moving Shadows...*, IEEE PAMI,2003.
BackgroundSubtractorMOG2::setShadowThreshold
---------------------------------------------
Sets the shadow threshold
.. ocv:function:: void BackgroundSubtractorMOG2::setShadowThreshold(double threshold)
calcOpticalFlowSF calcOpticalFlowSF
@ -756,10 +806,16 @@ Releases all inner buffers.
.. [Davis97] Davis, J.W. and Bobick, A.F. “The Representation and Recognition of Action Using Temporal Templates”, CVPR97, 1997 .. [Davis97] Davis, J.W. and Bobick, A.F. “The Representation and Recognition of Action Using Temporal Templates”, CVPR97, 1997
.. [EP08] Evangelidis, G.D. and Psarakis E.Z. "Parametric Image Alignment using Enhanced Correlation Coefficient Maximization", IEEE Transactions on PAMI, vol. 32, no. 10, 2008
.. [Farneback2003] Gunnar Farneback, Two-frame motion estimation based on polynomial expansion, Lecture Notes in Computer Science, 2003, (2749), , 363-370. .. [Farneback2003] Gunnar Farneback, Two-frame motion estimation based on polynomial expansion, Lecture Notes in Computer Science, 2003, (2749), , 363-370.
.. [Horn81] Berthold K.P. Horn and Brian G. Schunck. Determining Optical Flow. Artificial Intelligence, 17, pp. 185-203, 1981. .. [Horn81] Berthold K.P. Horn and Brian G. Schunck. Determining Optical Flow. Artificial Intelligence, 17, pp. 185-203, 1981.
.. [KB2001] P. KadewTraKuPong and R. Bowden. "An improved adaptive background mixture model for real-time tracking with shadow detection", Proc. 2nd European Workshop on Advanced Video-Based Surveillance Systems, 2001: http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
.. [Javier2012] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
.. [Lucas81] Lucas, B., and Kanade, T. An Iterative Image Registration Technique with an Application to Stereo Vision, Proc. of 7th International Joint Conference on Artificial Intelligence (IJCAI), pp. 674-679. .. [Lucas81] Lucas, B., and Kanade, T. An Iterative Image Registration Technique with an Application to Stereo Vision, Proc. of 7th International Joint Conference on Artificial Intelligence (IJCAI), pp. 674-679.
.. [Welch95] Greg Welch and Gary Bishop “An Introduction to the Kalman Filter”, 1995 .. [Welch95] Greg Welch and Gary Bishop “An Introduction to the Kalman Filter”, 1995
@ -768,6 +824,6 @@ Releases all inner buffers.
.. [Zach2007] C. Zach, T. Pock and H. Bischof. "A Duality Based Approach for Realtime TV-L1 Optical Flow", In Proceedings of Pattern Recognition (DAGM), Heidelberg, Germany, pp. 214-223, 2007 .. [Zach2007] C. Zach, T. Pock and H. Bischof. "A Duality Based Approach for Realtime TV-L1 Optical Flow", In Proceedings of Pattern Recognition (DAGM), Heidelberg, Germany, pp. 214-223, 2007
.. [Javier2012] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". .. [Zivkovic2004] Z. Zivkovic. Improved adaptive Gausian mixture model for background subtraction*, International Conference Pattern Recognition, UK, August, 2004, http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf. The code is very fast and performs also shadow detection. Number of Gausssian components is adapted per pixel.
.. [EP08] Evangelidis, G.D. and Psarakis E.Z. "Parametric Image Alignment using Enhanced Correlation Coefficient Maximization", IEEE Transactions on PAMI, vol. 32, no. 10, 2008 .. [Zivkovic2006] Z.Zivkovic, F. van der Heijden. "Efficient Adaptive Density Estimapion per Image Pixel for the Task of Background Subtraction", Pattern Recognition Letters, vol. 27, no. 7, pages 773-780, 2006.

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -57,14 +58,11 @@ namespace cv
class CV_EXPORTS_W BackgroundSubtractor : public Algorithm class CV_EXPORTS_W BackgroundSubtractor : public Algorithm
{ {
public: public:
//! the virtual destructor
virtual ~BackgroundSubtractor();
//! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image. //! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
CV_WRAP_AS(apply) virtual void operator()(InputArray image, OutputArray fgmask, CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) = 0;
double learningRate=0);
//! computes a background image //! computes a background image
virtual void getBackgroundImage(OutputArray backgroundImage) const; CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const = 0;
}; };
@ -81,32 +79,23 @@ public:
class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor
{ {
public: public:
//! the default constructor CV_WRAP virtual int getHistory() const = 0;
CV_WRAP BackgroundSubtractorMOG(); CV_WRAP virtual void setHistory(int nframes) = 0;
//! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength
CV_WRAP BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma=0); CV_WRAP virtual int getNMixtures() const = 0;
//! the destructor CV_WRAP virtual void setNMixtures(int nmix) = 0;
virtual ~BackgroundSubtractorMOG();
//! the update operator CV_WRAP virtual double getBackgroundRatio() const = 0;
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=0); CV_WRAP virtual void setBackgroundRatio(double backgroundRatio) = 0;
//! re-initiaization method CV_WRAP virtual double getNoiseSigma() const = 0;
virtual void initialize(Size frameSize, int frameType); CV_WRAP virtual void setNoiseSigma(double noiseSigma) = 0;
virtual AlgorithmInfo* info() const;
protected:
Size frameSize;
int frameType;
Mat bgmodel;
int nframes;
int history;
int nmixtures;
double varThreshold;
double backgroundRatio;
double noiseSigma;
}; };
CV_EXPORTS_W Ptr<BackgroundSubtractorMOG>
createBackgroundSubtractorMOG(int history=200, int nmixtures=5,
double backgroundRatio=0.7, double noiseSigma=0);
/*! /*!
The class implements the following algorithm: The class implements the following algorithm:
@ -114,82 +103,51 @@ protected:
Z.Zivkovic Z.Zivkovic
International Conference Pattern Recognition, UK, August, 2004. International Conference Pattern Recognition, UK, August, 2004.
http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
*/ */
class CV_EXPORTS BackgroundSubtractorMOG2 : public BackgroundSubtractor class CV_EXPORTS_W BackgroundSubtractorMOG2 : public BackgroundSubtractor
{ {
public: public:
//! the default constructor CV_WRAP virtual int getHistory() const = 0;
BackgroundSubtractorMOG2(); CV_WRAP virtual void setHistory(int history) = 0;
//! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength
BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=true); CV_WRAP virtual int getNMixtures() const = 0;
//! the destructor CV_WRAP virtual void setNMixtures(int nmixtures) = 0;
virtual ~BackgroundSubtractorMOG2();
//! the update operator CV_WRAP virtual double getBackgroundRatio() const = 0;
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1); CV_WRAP virtual void setBackgroundRatio(double ratio) = 0;
//! computes a background image which are the mean of all background gaussians CV_WRAP virtual double getVarThreshold() const = 0;
virtual void getBackgroundImage(OutputArray backgroundImage) const; CV_WRAP virtual void setVarThreshold(double varThreshold) = 0;
//! re-initiaization method CV_WRAP virtual double getVarThresholdGen() const = 0;
virtual void initialize(Size frameSize, int frameType); CV_WRAP virtual void setVarThresholdGen(double varThresholdGen) = 0;
virtual AlgorithmInfo* info() const; CV_WRAP virtual double getVarInit() const = 0;
CV_WRAP virtual void setVarInit(double varInit) = 0;
protected:
Size frameSize; CV_WRAP virtual double getVarMin() const = 0;
int frameType; CV_WRAP virtual void setVarMin(double varMin) = 0;
Mat bgmodel;
Mat bgmodelUsedModes;//keep track of number of modes per pixel CV_WRAP virtual double getVarMax() const = 0;
int nframes; CV_WRAP virtual void setVarMax(double varMax) = 0;
int history;
int nmixtures; CV_WRAP virtual double getComplexityReductionThreshold() const = 0;
//! here it is the maximum allowed number of mixture components. CV_WRAP virtual void setComplexityReductionThreshold(double ct) = 0;
//! Actual number is determined dynamically per pixel
double varThreshold; CV_WRAP virtual bool getDetectShadows() const = 0;
// threshold on the squared Mahalanobis distance to decide if it is well described CV_WRAP virtual void setDetectShadows(bool detectShadows) = 0;
// by the background model or not. Related to Cthr from the paper.
// This does not influence the update of the background. A typical value could be 4 sigma CV_WRAP virtual int getShadowValue() const = 0;
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper. CV_WRAP virtual void setShadowValue(int value) = 0;
///////////////////////// CV_WRAP virtual double getShadowThreshold() const = 0;
// less important parameters - things you might change but be carefull CV_WRAP virtual void setShadowThreshold(double threshold) = 0;
////////////////////////
float backgroundRatio;
// corresponds to fTB=1-cf from the paper
// TB - threshold when the component becomes significant enough to be included into
// the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
// For alpha=0.001 it means that the mode should exist for approximately 105 frames before
// it is considered foreground
// float noiseSigma;
float varThresholdGen;
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close
//to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
//Smaller Tg leads to more generated components and higher Tg might make
//lead to small number of components but they can grow too large
float fVarInit;
float fVarMin;
float fVarMax;
//initial variance for the newly generated components.
//It will will influence the speed of adaptation. A good guess should be made.
//A simple way is to estimate the typical standard deviation from the images.
//I used here 10 as a reasonable value
// min and max can be used to further control the variance
float fCT;//CT - complexity reduction prior
//this is related to the number of samples needed to accept that a component
//actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
//the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
//shadow detection parameters
bool bShadowDetection;//default 1 - do shadow detection
unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value
float fTau;
// Tau - shadow threshold. The shadow is detected if the pixel is darker
//version of the background. Tau is a threshold on how much darker the shadow can be.
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
}; };
CV_EXPORTS_W Ptr<BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history=500, double varThreshold=16,
bool detectShadows=true);
/** /**
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1) * Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background. * images of the same size, where 255 indicates Foreground and 0 represents Background.
@ -197,66 +155,44 @@ protected:
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere, * Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012. * A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
*/ */
class CV_EXPORTS BackgroundSubtractorGMG: public cv::BackgroundSubtractor class CV_EXPORTS_W BackgroundSubtractorGMG : public BackgroundSubtractor
{ {
public: public:
BackgroundSubtractorGMG(); CV_WRAP virtual int getMaxFeatures() const = 0;
virtual ~BackgroundSubtractorGMG(); CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;
virtual AlgorithmInfo* info() const;
CV_WRAP virtual double getDefaultLearningRate() const = 0;
/** CV_WRAP virtual void setDefaultLearningRate(double lr) = 0;
* Validate parameters and set up data structures for appropriate image size.
* Must call before running on data. CV_WRAP virtual int getNumFrames() const = 0;
* @param frameSize input frame size CV_WRAP virtual void setNumFrames(int nframes) = 0;
* @param min minimum value taken on by pixels in image sequence. Usually 0
* @param max maximum value taken on by pixels in image sequence. e.g. 1.0 or 255 CV_WRAP virtual int getQuantizationLevels() const = 0;
*/ CV_WRAP virtual void setQuantizationLevels(int nlevels) = 0;
void initialize(cv::Size frameSize, double min, double max);
CV_WRAP virtual double getBackgroundPrior() const = 0;
/** CV_WRAP virtual void setBackgroundPrior(double bgprior) = 0;
* Performs single-frame background subtraction and builds up a statistical background image
* model. CV_WRAP virtual int getSmoothingRadius() const = 0;
* @param image Input image CV_WRAP virtual void setSmoothingRadius(int radius) = 0;
* @param fgmask Output mask image representing foreground and background pixels
*/ CV_WRAP virtual double getDecisionThreshold() const = 0;
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1.0); CV_WRAP virtual void setDecisionThreshold(double thresh) = 0;
/** CV_WRAP virtual bool getUpdateBackgroundModel() const = 0;
* Releases all inner buffers. CV_WRAP virtual void setUpdateBackgroundModel(bool update) = 0;
*/
void release(); CV_WRAP virtual double getMinVal() const = 0;
CV_WRAP virtual void setMinVal(double val) = 0;
//! Total number of distinct colors to maintain in histogram.
int maxFeatures; CV_WRAP virtual double getMaxVal() const = 0;
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms. CV_WRAP virtual void setMaxVal(double val) = 0;
double learningRate;
//! Number of frames of video to use to initialize histograms.
int numInitializationFrames;
//! Number of discrete levels in each channel to be used in histograms.
int quantizationLevels;
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
double backgroundPrior;
//! Value above which pixel is determined to be FG.
double decisionThreshold;
//! Smoothing radius, in pixels, for cleaning up FG image.
int smoothingRadius;
//! Perform background model update
bool updateBackgroundModel;
private:
double maxVal_;
double minVal_;
cv::Size frameSize_;
int frameNum_;
cv::Mat_<int> nfeatures_;
cv::Mat_<unsigned int> colors_;
cv::Mat_<float> weights_;
cv::Mat buf_;
}; };
CV_EXPORTS_W Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames=120,
double decisionThreshold=0.8);
} }
#endif #endif

@ -219,23 +219,6 @@ CVAPI(const CvMat*) cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement
#define cvKalmanUpdateByMeasurement cvKalmanCorrect #define cvKalmanUpdateByMeasurement cvKalmanCorrect
/****************************************************************************************\
* Image Alignment (ECC algorithm) *
\****************************************************************************************/
enum
{
MOTION_TRANSLATION,
MOTION_EUCLIDEAN,
MOTION_AFFINE,
MOTION_HOMOGRAPHY
};
/* Estimate the geometric transformation between 2 images (area-based alignment) */
CVAPI(double) cvFindTransformECC (const CvArr* templateImage, const CvArr* inputImage,
CvMat* warpMatrix,
const int motionType,
const CvTermCriteria criteria);
#ifdef __cplusplus #ifdef __cplusplus
} }
@ -341,6 +324,14 @@ CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next,
CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst,
bool fullAffine); bool fullAffine);
enum
{
MOTION_TRANSLATION=0,
MOTION_EUCLIDEAN=1,
MOTION_AFFINE=2,
MOTION_HOMOGRAPHY=3
};
//! estimates the best-fit Translation, Euclidean, Affine or Perspective Transformation //! estimates the best-fit Translation, Euclidean, Affine or Perspective Transformation
// with respect to Enhanced Correlation Coefficient criterion that maps one image to // with respect to Enhanced Correlation Coefficient criterion that maps one image to
// another (area-based alignment) // another (area-based alignment)

@ -0,0 +1,850 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
// to be moved to legacy
static int icvMinimalPyramidSize( CvSize imgSize )
{
return cvAlign(imgSize.width,8) * imgSize.height / 3;
}
static void
icvInitPyramidalAlgorithm( const CvMat* imgA, const CvMat* imgB,
CvMat* pyrA, CvMat* pyrB,
int level, CvTermCriteria * criteria,
int max_iters, int flags,
uchar *** imgI, uchar *** imgJ,
int **step, CvSize** size,
double **scale, cv::AutoBuffer<uchar>* buffer )
{
const int ALIGN = 8;
int pyrBytes, bufferBytes = 0, elem_size;
int level1 = level + 1;
int i;
CvSize imgSize, levelSize;
*imgI = *imgJ = 0;
*step = 0;
*scale = 0;
*size = 0;
/* check input arguments */
if( ((flags & CV_LKFLOW_PYR_A_READY) != 0 && !pyrA) ||
((flags & CV_LKFLOW_PYR_B_READY) != 0 && !pyrB) )
CV_Error( CV_StsNullPtr, "Some of the precomputed pyramids are missing" );
if( level < 0 )
CV_Error( CV_StsOutOfRange, "The number of pyramid levels is negative" );
switch( criteria->type )
{
case CV_TERMCRIT_ITER:
criteria->epsilon = 0.f;
break;
case CV_TERMCRIT_EPS:
criteria->max_iter = max_iters;
break;
case CV_TERMCRIT_ITER | CV_TERMCRIT_EPS:
break;
default:
assert( 0 );
CV_Error( CV_StsBadArg, "Invalid termination criteria" );
}
/* compare squared values */
criteria->epsilon *= criteria->epsilon;
/* set pointers and step for every level */
pyrBytes = 0;
imgSize = cvGetSize(imgA);
elem_size = CV_ELEM_SIZE(imgA->type);
levelSize = imgSize;
for( i = 1; i < level1; i++ )
{
levelSize.width = (levelSize.width + 1) >> 1;
levelSize.height = (levelSize.height + 1) >> 1;
int tstep = cvAlign(levelSize.width,ALIGN) * elem_size;
pyrBytes += tstep * levelSize.height;
}
assert( pyrBytes <= imgSize.width * imgSize.height * elem_size * 4 / 3 );
/* buffer_size = <size for patches> + <size for pyramids> */
bufferBytes = (int)((level1 >= 0) * ((pyrA->data.ptr == 0) +
(pyrB->data.ptr == 0)) * pyrBytes +
(sizeof(imgI[0][0]) * 2 + sizeof(step[0][0]) +
sizeof(size[0][0]) + sizeof(scale[0][0])) * level1);
buffer->allocate( bufferBytes );
*imgI = (uchar **) (uchar*)(*buffer);
*imgJ = *imgI + level1;
*step = (int *) (*imgJ + level1);
*scale = (double *) (*step + level1);
*size = (CvSize *)(*scale + level1);
imgI[0][0] = imgA->data.ptr;
imgJ[0][0] = imgB->data.ptr;
step[0][0] = imgA->step;
scale[0][0] = 1;
size[0][0] = imgSize;
if( level > 0 )
{
uchar *bufPtr = (uchar *) (*size + level1);
uchar *ptrA = pyrA->data.ptr;
uchar *ptrB = pyrB->data.ptr;
if( !ptrA )
{
ptrA = bufPtr;
bufPtr += pyrBytes;
}
if( !ptrB )
ptrB = bufPtr;
levelSize = imgSize;
/* build pyramids for both frames */
for( i = 1; i <= level; i++ )
{
int levelBytes;
CvMat prev_level, next_level;
levelSize.width = (levelSize.width + 1) >> 1;
levelSize.height = (levelSize.height + 1) >> 1;
size[0][i] = levelSize;
step[0][i] = cvAlign( levelSize.width, ALIGN ) * elem_size;
scale[0][i] = scale[0][i - 1] * 0.5;
levelBytes = step[0][i] * levelSize.height;
imgI[0][i] = (uchar *) ptrA;
ptrA += levelBytes;
if( !(flags & CV_LKFLOW_PYR_A_READY) )
{
prev_level = cvMat( size[0][i-1].height, size[0][i-1].width, CV_8UC1 );
next_level = cvMat( size[0][i].height, size[0][i].width, CV_8UC1 );
cvSetData( &prev_level, imgI[0][i-1], step[0][i-1] );
cvSetData( &next_level, imgI[0][i], step[0][i] );
cvPyrDown( &prev_level, &next_level );
}
imgJ[0][i] = (uchar *) ptrB;
ptrB += levelBytes;
if( !(flags & CV_LKFLOW_PYR_B_READY) )
{
prev_level = cvMat( size[0][i-1].height, size[0][i-1].width, CV_8UC1 );
next_level = cvMat( size[0][i].height, size[0][i].width, CV_8UC1 );
cvSetData( &prev_level, imgJ[0][i-1], step[0][i-1] );
cvSetData( &next_level, imgJ[0][i], step[0][i] );
cvPyrDown( &prev_level, &next_level );
}
}
}
}
/* compute dI/dx and dI/dy */
static void
icvCalcIxIy_32f( const float* src, int src_step, float* dstX, float* dstY, int dst_step,
CvSize src_size, const float* smooth_k, float* buffer0 )
{
int src_width = src_size.width, dst_width = src_size.width-2;
int x, height = src_size.height - 2;
float* buffer1 = buffer0 + src_width;
src_step /= sizeof(src[0]);
dst_step /= sizeof(dstX[0]);
for( ; height--; src += src_step, dstX += dst_step, dstY += dst_step )
{
const float* src2 = src + src_step;
const float* src3 = src + src_step*2;
for( x = 0; x < src_width; x++ )
{
float t0 = (src3[x] + src[x])*smooth_k[0] + src2[x]*smooth_k[1];
float t1 = src3[x] - src[x];
buffer0[x] = t0; buffer1[x] = t1;
}
for( x = 0; x < dst_width; x++ )
{
float t0 = buffer0[x+2] - buffer0[x];
float t1 = (buffer1[x] + buffer1[x+2])*smooth_k[0] + buffer1[x+1]*smooth_k[1];
dstX[x] = t0; dstY[x] = t1;
}
}
}
#undef CV_8TO32F
#define CV_8TO32F(a) (a)
static const void*
icvAdjustRect( const void* srcptr, int src_step, int pix_size,
CvSize src_size, CvSize win_size,
CvPoint ip, CvRect* pRect )
{
CvRect rect;
const char* src = (const char*)srcptr;
if( ip.x >= 0 )
{
src += ip.x*pix_size;
rect.x = 0;
}
else
{
rect.x = -ip.x;
if( rect.x > win_size.width )
rect.x = win_size.width;
}
if( ip.x + win_size.width < src_size.width )
rect.width = win_size.width;
else
{
rect.width = src_size.width - ip.x - 1;
if( rect.width < 0 )
{
src += rect.width*pix_size;
rect.width = 0;
}
assert( rect.width <= win_size.width );
}
if( ip.y >= 0 )
{
src += ip.y * src_step;
rect.y = 0;
}
else
rect.y = -ip.y;
if( ip.y + win_size.height < src_size.height )
rect.height = win_size.height;
else
{
rect.height = src_size.height - ip.y - 1;
if( rect.height < 0 )
{
src += rect.height*src_step;
rect.height = 0;
}
}
*pRect = rect;
return src - rect.x*pix_size;
}
static CvStatus CV_STDCALL icvGetRectSubPix_8u32f_C1R
( const uchar* src, int src_step, CvSize src_size,
float* dst, int dst_step, CvSize win_size, CvPoint2D32f center )
{
CvPoint ip;
float a12, a22, b1, b2;
float a, b;
double s = 0;
int i, j;
center.x -= (win_size.width-1)*0.5f;
center.y -= (win_size.height-1)*0.5f;
ip.x = cvFloor( center.x );
ip.y = cvFloor( center.y );
if( win_size.width <= 0 || win_size.height <= 0 )
return CV_BADRANGE_ERR;
a = center.x - ip.x;
b = center.y - ip.y;
a = MAX(a,0.0001f);
a12 = a*(1.f-b);
a22 = a*b;
b1 = 1.f - b;
b2 = b;
s = (1. - a)/a;
src_step /= sizeof(src[0]);
dst_step /= sizeof(dst[0]);
if( 0 <= ip.x && ip.x + win_size.width < src_size.width &&
0 <= ip.y && ip.y + win_size.height < src_size.height )
{
// extracted rectangle is totally inside the image
src += ip.y * src_step + ip.x;
#if 0
if( icvCopySubpix_8u32f_C1R_p &&
icvCopySubpix_8u32f_C1R_p( src, src_step, dst,
dst_step*sizeof(dst[0]), win_size, a, b ) >= 0 )
return CV_OK;
#endif
for( ; win_size.height--; src += src_step, dst += dst_step )
{
float prev = (1 - a)*(b1*CV_8TO32F(src[0]) + b2*CV_8TO32F(src[src_step]));
for( j = 0; j < win_size.width; j++ )
{
float t = a12*CV_8TO32F(src[j+1]) + a22*CV_8TO32F(src[j+1+src_step]);
dst[j] = prev + t;
prev = (float)(t*s);
}
}
}
else
{
CvRect r;
src = (const uchar*)icvAdjustRect( src, src_step*sizeof(*src),
sizeof(*src), src_size, win_size,ip, &r);
for( i = 0; i < win_size.height; i++, dst += dst_step )
{
const uchar *src2 = src + src_step;
if( i < r.y || i >= r.height )
src2 -= src_step;
for( j = 0; j < r.x; j++ )
{
float s0 = CV_8TO32F(src[r.x])*b1 +
CV_8TO32F(src2[r.x])*b2;
dst[j] = (float)(s0);
}
if( j < r.width )
{
float prev = (1 - a)*(b1*CV_8TO32F(src[j]) + b2*CV_8TO32F(src2[j]));
for( ; j < r.width; j++ )
{
float t = a12*CV_8TO32F(src[j+1]) + a22*CV_8TO32F(src2[j+1]);
dst[j] = prev + t;
prev = (float)(t*s);
}
}
for( ; j < win_size.width; j++ )
{
float s0 = CV_8TO32F(src[r.width])*b1 +
CV_8TO32F(src2[r.width])*b2;
dst[j] = (float)(s0);
}
if( i < r.height )
src = src2;
}
}
return CV_OK;
}
#define ICV_32F8U(x) ((uchar)cvRound(x))
#define ICV_DEF_GET_QUADRANGLE_SUB_PIX_FUNC( flavor, srctype, dsttype, \
worktype, cast_macro, cvt ) \
static CvStatus CV_STDCALL \
icvGetQuadrangleSubPix_##flavor##_C1R \
( const srctype * src, int src_step, CvSize src_size, \
dsttype *dst, int dst_step, CvSize win_size, const float *matrix ) \
{ \
int x, y; \
double dx = (win_size.width - 1)*0.5; \
double dy = (win_size.height - 1)*0.5; \
double A11 = matrix[0], A12 = matrix[1], A13 = matrix[2]-A11*dx-A12*dy; \
double A21 = matrix[3], A22 = matrix[4], A23 = matrix[5]-A21*dx-A22*dy; \
\
src_step /= sizeof(srctype); \
dst_step /= sizeof(dsttype); \
\
for( y = 0; y < win_size.height; y++, dst += dst_step ) \
{ \
double xs = A12*y + A13; \
double ys = A22*y + A23; \
double xe = A11*(win_size.width-1) + A12*y + A13; \
double ye = A21*(win_size.width-1) + A22*y + A23; \
\
if( (unsigned)(cvFloor(xs)-1) < (unsigned)(src_size.width - 3) && \
(unsigned)(cvFloor(ys)-1) < (unsigned)(src_size.height - 3) && \
(unsigned)(cvFloor(xe)-1) < (unsigned)(src_size.width - 3) && \
(unsigned)(cvFloor(ye)-1) < (unsigned)(src_size.height - 3)) \
{ \
for( x = 0; x < win_size.width; x++ ) \
{ \
int ixs = cvFloor( xs ); \
int iys = cvFloor( ys ); \
const srctype *ptr = src + src_step*iys + ixs; \
double a = xs - ixs, b = ys - iys, a1 = 1.f - a; \
worktype p0 = cvt(ptr[0])*a1 + cvt(ptr[1])*a; \
worktype p1 = cvt(ptr[src_step])*a1 + cvt(ptr[src_step+1])*a;\
xs += A11; \
ys += A21; \
\
dst[x] = cast_macro(p0 + b * (p1 - p0)); \
} \
} \
else \
{ \
for( x = 0; x < win_size.width; x++ ) \
{ \
int ixs = cvFloor( xs ), iys = cvFloor( ys ); \
double a = xs - ixs, b = ys - iys, a1 = 1.f - a; \
const srctype *ptr0, *ptr1; \
worktype p0, p1; \
xs += A11; ys += A21; \
\
if( (unsigned)iys < (unsigned)(src_size.height-1) ) \
ptr0 = src + src_step*iys, ptr1 = ptr0 + src_step; \
else \
ptr0 = ptr1 = src + (iys < 0 ? 0 : src_size.height-1)*src_step; \
\
if( (unsigned)ixs < (unsigned)(src_size.width-1) ) \
{ \
p0 = cvt(ptr0[ixs])*a1 + cvt(ptr0[ixs+1])*a; \
p1 = cvt(ptr1[ixs])*a1 + cvt(ptr1[ixs+1])*a; \
} \
else \
{ \
ixs = ixs < 0 ? 0 : src_size.width - 1; \
p0 = cvt(ptr0[ixs]); p1 = cvt(ptr1[ixs]); \
} \
dst[x] = cast_macro(p0 + b * (p1 - p0)); \
} \
} \
} \
\
return CV_OK; \
}
ICV_DEF_GET_QUADRANGLE_SUB_PIX_FUNC( 8u32f, uchar, float, double, CV_CAST_32F, CV_8TO32F )
/* Affine tracking algorithm */
CV_IMPL void
cvCalcAffineFlowPyrLK( const void* arrA, const void* arrB,
void* pyrarrA, void* pyrarrB,
const CvPoint2D32f * featuresA,
CvPoint2D32f * featuresB,
float *matrices, int count,
CvSize winSize, int level,
char *status, float *error,
CvTermCriteria criteria, int flags )
{
const int MAX_ITERS = 100;
cv::AutoBuffer<char> _status;
cv::AutoBuffer<uchar> buffer;
cv::AutoBuffer<uchar> pyr_buffer;
CvMat stubA, *imgA = (CvMat*)arrA;
CvMat stubB, *imgB = (CvMat*)arrB;
CvMat pstubA, *pyrA = (CvMat*)pyrarrA;
CvMat pstubB, *pyrB = (CvMat*)pyrarrB;
static const float smoothKernel[] = { 0.09375, 0.3125, 0.09375 }; /* 3/32, 10/32, 3/32 */
int bufferBytes = 0;
uchar **imgI = 0;
uchar **imgJ = 0;
int *step = 0;
double *scale = 0;
CvSize* size = 0;
float *patchI;
float *patchJ;
float *Ix;
float *Iy;
int i, j, k, l;
CvSize patchSize = cvSize( winSize.width * 2 + 1, winSize.height * 2 + 1 );
int patchLen = patchSize.width * patchSize.height;
int patchStep = patchSize.width * sizeof( patchI[0] );
CvSize srcPatchSize = cvSize( patchSize.width + 2, patchSize.height + 2 );
int srcPatchLen = srcPatchSize.width * srcPatchSize.height;
int srcPatchStep = srcPatchSize.width * sizeof( patchI[0] );
CvSize imgSize;
float eps = (float)MIN(winSize.width, winSize.height);
imgA = cvGetMat( imgA, &stubA );
imgB = cvGetMat( imgB, &stubB );
if( CV_MAT_TYPE( imgA->type ) != CV_8UC1 )
CV_Error( CV_StsUnsupportedFormat, "" );
if( !CV_ARE_TYPES_EQ( imgA, imgB ))
CV_Error( CV_StsUnmatchedFormats, "" );
if( !CV_ARE_SIZES_EQ( imgA, imgB ))
CV_Error( CV_StsUnmatchedSizes, "" );
if( imgA->step != imgB->step )
CV_Error( CV_StsUnmatchedSizes, "imgA and imgB must have equal steps" );
if( !matrices )
CV_Error( CV_StsNullPtr, "" );
imgSize = cvGetMatSize( imgA );
if( pyrA )
{
pyrA = cvGetMat( pyrA, &pstubA );
if( pyrA->step*pyrA->height < icvMinimalPyramidSize( imgSize ) )
CV_Error( CV_StsBadArg, "pyramid A has insufficient size" );
}
else
{
pyrA = &pstubA;
pyrA->data.ptr = 0;
}
if( pyrB )
{
pyrB = cvGetMat( pyrB, &pstubB );
if( pyrB->step*pyrB->height < icvMinimalPyramidSize( imgSize ) )
CV_Error( CV_StsBadArg, "pyramid B has insufficient size" );
}
else
{
pyrB = &pstubB;
pyrB->data.ptr = 0;
}
if( count == 0 )
return;
/* check input arguments */
if( !featuresA || !featuresB || !matrices )
CV_Error( CV_StsNullPtr, "" );
if( winSize.width <= 1 || winSize.height <= 1 )
CV_Error( CV_StsOutOfRange, "the search window is too small" );
if( count < 0 )
CV_Error( CV_StsOutOfRange, "" );
icvInitPyramidalAlgorithm( imgA, imgB,
pyrA, pyrB, level, &criteria, MAX_ITERS, flags,
&imgI, &imgJ, &step, &size, &scale, &pyr_buffer );
/* buffer_size = <size for patches> + <size for pyramids> */
bufferBytes = (srcPatchLen + patchLen*3)*sizeof(patchI[0]) + (36*2 + 6)*sizeof(double);
buffer.allocate(bufferBytes);
if( !status )
{
_status.allocate(count);
status = _status;
}
patchI = (float *)(uchar*)buffer;
patchJ = patchI + srcPatchLen;
Ix = patchJ + patchLen;
Iy = Ix + patchLen;
if( status )
memset( status, 1, count );
if( !(flags & CV_LKFLOW_INITIAL_GUESSES) )
{
memcpy( featuresB, featuresA, count * sizeof( featuresA[0] ));
for( i = 0; i < count * 4; i += 4 )
{
matrices[i] = matrices[i + 3] = 1.f;
matrices[i + 1] = matrices[i + 2] = 0.f;
}
}
for( i = 0; i < count; i++ )
{
featuresB[i].x = (float)(featuresB[i].x * scale[level] * 0.5);
featuresB[i].y = (float)(featuresB[i].y * scale[level] * 0.5);
}
/* do processing from top pyramid level (smallest image)
to the bottom (original image) */
for( l = level; l >= 0; l-- )
{
CvSize levelSize = size[l];
int levelStep = step[l];
/* find flow for each given point at the particular level */
for( i = 0; i < count; i++ )
{
CvPoint2D32f u;
float Av[6];
double G[36];
double meanI = 0, meanJ = 0;
int x, y;
int pt_status = status[i];
CvMat mat;
if( !pt_status )
continue;
Av[0] = matrices[i*4];
Av[1] = matrices[i*4+1];
Av[3] = matrices[i*4+2];
Av[4] = matrices[i*4+3];
Av[2] = featuresB[i].x += featuresB[i].x;
Av[5] = featuresB[i].y += featuresB[i].y;
u.x = (float) (featuresA[i].x * scale[l]);
u.y = (float) (featuresA[i].y * scale[l]);
if( u.x < -eps || u.x >= levelSize.width+eps ||
u.y < -eps || u.y >= levelSize.height+eps ||
icvGetRectSubPix_8u32f_C1R( imgI[l], levelStep,
levelSize, patchI, srcPatchStep, srcPatchSize, u ) < 0 )
{
/* point is outside the image. take the next */
if( l == 0 )
status[i] = 0;
continue;
}
icvCalcIxIy_32f( patchI, srcPatchStep, Ix, Iy,
(srcPatchSize.width-2)*sizeof(patchI[0]), srcPatchSize,
smoothKernel, patchJ );
/* repack patchI (remove borders) */
for( k = 0; k < patchSize.height; k++ )
memcpy( patchI + k * patchSize.width,
patchI + (k + 1) * srcPatchSize.width + 1, patchStep );
memset( G, 0, sizeof( G ));
/* calculate G matrix */
for( y = -winSize.height, k = 0; y <= winSize.height; y++ )
{
for( x = -winSize.width; x <= winSize.width; x++, k++ )
{
double ixix = ((double) Ix[k]) * Ix[k];
double ixiy = ((double) Ix[k]) * Iy[k];
double iyiy = ((double) Iy[k]) * Iy[k];
double xx, xy, yy;
G[0] += ixix;
G[1] += ixiy;
G[2] += x * ixix;
G[3] += y * ixix;
G[4] += x * ixiy;
G[5] += y * ixiy;
// G[6] == G[1]
G[7] += iyiy;
// G[8] == G[4]
// G[9] == G[5]
G[10] += x * iyiy;
G[11] += y * iyiy;
xx = x * x;
xy = x * y;
yy = y * y;
// G[12] == G[2]
// G[13] == G[8] == G[4]
G[14] += xx * ixix;
G[15] += xy * ixix;
G[16] += xx * ixiy;
G[17] += xy * ixiy;
// G[18] == G[3]
// G[19] == G[9]
// G[20] == G[15]
G[21] += yy * ixix;
// G[22] == G[17]
G[23] += yy * ixiy;
// G[24] == G[4]
// G[25] == G[10]
// G[26] == G[16]
// G[27] == G[22]
G[28] += xx * iyiy;
G[29] += xy * iyiy;
// G[30] == G[5]
// G[31] == G[11]
// G[32] == G[17]
// G[33] == G[23]
// G[34] == G[29]
G[35] += yy * iyiy;
meanI += patchI[k];
}
}
meanI /= patchSize.width*patchSize.height;
G[8] = G[4];
G[9] = G[5];
G[22] = G[17];
// fill part of G below its diagonal
for( y = 1; y < 6; y++ )
for( x = 0; x < y; x++ )
G[y * 6 + x] = G[x * 6 + y];
cvInitMatHeader( &mat, 6, 6, CV_64FC1, G );
if( cvInvert( &mat, &mat, CV_SVD ) < 1e-4 )
{
/* bad matrix. take the next point */
if( l == 0 )
status[i] = 0;
continue;
}
for( j = 0; j < criteria.max_iter; j++ )
{
double b[6] = {0,0,0,0,0,0}, eta[6];
double t0, t1, s = 0;
if( Av[2] < -eps || Av[2] >= levelSize.width+eps ||
Av[5] < -eps || Av[5] >= levelSize.height+eps ||
icvGetQuadrangleSubPix_8u32f_C1R( imgJ[l], levelStep,
levelSize, patchJ, patchStep, patchSize, Av ) < 0 )
{
pt_status = 0;
break;
}
for( y = -winSize.height, k = 0, meanJ = 0; y <= winSize.height; y++ )
for( x = -winSize.width; x <= winSize.width; x++, k++ )
meanJ += patchJ[k];
meanJ = meanJ / (patchSize.width * patchSize.height) - meanI;
for( y = -winSize.height, k = 0; y <= winSize.height; y++ )
{
for( x = -winSize.width; x <= winSize.width; x++, k++ )
{
double t = patchI[k] - patchJ[k] + meanJ;
double ixt = Ix[k] * t;
double iyt = Iy[k] * t;
s += t;
b[0] += ixt;
b[1] += iyt;
b[2] += x * ixt;
b[3] += y * ixt;
b[4] += x * iyt;
b[5] += y * iyt;
}
}
for( k = 0; k < 6; k++ )
eta[k] = G[k*6]*b[0] + G[k*6+1]*b[1] + G[k*6+2]*b[2] +
G[k*6+3]*b[3] + G[k*6+4]*b[4] + G[k*6+5]*b[5];
Av[2] = (float)(Av[2] + Av[0] * eta[0] + Av[1] * eta[1]);
Av[5] = (float)(Av[5] + Av[3] * eta[0] + Av[4] * eta[1]);
t0 = Av[0] * (1 + eta[2]) + Av[1] * eta[4];
t1 = Av[0] * eta[3] + Av[1] * (1 + eta[5]);
Av[0] = (float)t0;
Av[1] = (float)t1;
t0 = Av[3] * (1 + eta[2]) + Av[4] * eta[4];
t1 = Av[3] * eta[3] + Av[4] * (1 + eta[5]);
Av[3] = (float)t0;
Av[4] = (float)t1;
if( eta[0] * eta[0] + eta[1] * eta[1] < criteria.epsilon )
break;
}
if( pt_status != 0 || l == 0 )
{
status[i] = (char)pt_status;
featuresB[i].x = Av[2];
featuresB[i].y = Av[5];
matrices[i*4] = Av[0];
matrices[i*4+1] = Av[1];
matrices[i*4+2] = Av[3];
matrices[i*4+3] = Av[4];
}
if( pt_status && l == 0 && error )
{
/* calc error */
double err = 0;
for( y = 0, k = 0; y < patchSize.height; y++ )
{
for( x = 0; x < patchSize.width; x++, k++ )
{
double t = patchI[k] - patchJ[k] + meanJ;
err += t * t;
}
}
error[i] = (float)std::sqrt(err);
}
}
}
}

@ -7,9 +7,11 @@
// copy or use the software. // copy or use the software.
// //
// //
// Intel License Agreement // License Agreement
// For Open Source Computer Vision Library
// //
// Copyright (C) 2000, Intel Corporation, all rights reserved. // Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -22,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation // this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. // and/or other materials provided with the distribution.
// //
// * The name of Intel Corporation may not be used to endorse or promote products // * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission. // derived from this software without specific prior written permission.
// //
// This software is provided by the copyright holders and contributors "as is" and // This software is provided by the copyright holders and contributors "as is" and
@ -58,15 +60,6 @@
namespace cv namespace cv
{ {
BackgroundSubtractor::~BackgroundSubtractor() {}
void BackgroundSubtractor::operator()(InputArray, OutputArray, double)
{
}
void BackgroundSubtractor::getBackgroundImage(OutputArray) const
{
}
static const int defaultNMixtures = 5; static const int defaultNMixtures = 5;
static const int defaultHistory = 200; static const int defaultHistory = 200;
static const double defaultBackgroundRatio = 0.7; static const double defaultBackgroundRatio = 0.7;
@ -74,55 +67,108 @@ static const double defaultVarThreshold = 2.5*2.5;
static const double defaultNoiseSigma = 30*0.5; static const double defaultNoiseSigma = 30*0.5;
static const double defaultInitialWeight = 0.05; static const double defaultInitialWeight = 0.05;
BackgroundSubtractorMOG::BackgroundSubtractorMOG() class BackgroundSubtractorMOGImpl : public BackgroundSubtractorMOG
{ {
frameSize = Size(0,0); public:
frameType = 0; //! the default constructor
BackgroundSubtractorMOGImpl()
nframes = 0; {
nmixtures = defaultNMixtures; frameSize = Size(0,0);
history = defaultHistory; frameType = 0;
varThreshold = defaultVarThreshold;
backgroundRatio = defaultBackgroundRatio; nframes = 0;
noiseSigma = defaultNoiseSigma; nmixtures = defaultNMixtures;
} history = defaultHistory;
varThreshold = defaultVarThreshold;
backgroundRatio = defaultBackgroundRatio;
noiseSigma = defaultNoiseSigma;
name_ = "BackgroundSubtractor.MOG";
}
// the full constructor that takes the length of the history,
// the number of gaussian mixtures, the background ratio parameter and the noise strength
BackgroundSubtractorMOGImpl(int _history, int _nmixtures, double _backgroundRatio, double _noiseSigma=0)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
nmixtures = std::min(_nmixtures > 0 ? _nmixtures : defaultNMixtures, 8);
history = _history > 0 ? _history : defaultHistory;
varThreshold = defaultVarThreshold;
backgroundRatio = std::min(_backgroundRatio > 0 ? _backgroundRatio : 0.95, 1.);
noiseSigma = _noiseSigma <= 0 ? defaultNoiseSigma : _noiseSigma;
}
BackgroundSubtractorMOG::BackgroundSubtractorMOG(int _history, int _nmixtures, //! the update operator
double _backgroundRatio, virtual void apply(InputArray image, OutputArray fgmask, double learningRate=0);
double _noiseSigma)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
nmixtures = std::min(_nmixtures > 0 ? _nmixtures : defaultNMixtures, 8);
history = _history > 0 ? _history : defaultHistory;
varThreshold = defaultVarThreshold;
backgroundRatio = std::min(_backgroundRatio > 0 ? _backgroundRatio : 0.95, 1.);
noiseSigma = _noiseSigma <= 0 ? defaultNoiseSigma : _noiseSigma;
}
BackgroundSubtractorMOG::~BackgroundSubtractorMOG() //! re-initiaization method
{ virtual void initialize(Size _frameSize, int _frameType)
} {
frameSize = _frameSize;
frameType = _frameType;
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
CV_Assert( CV_MAT_DEPTH(frameType) == CV_8U );
// for each gaussian mixture of each pixel bg model we store ...
// the mixture sort key (w/sum_of_variances), the mixture weight (w),
// the mean (nchannels values) and
// the diagonal covariance matrix (another nchannels values)
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + 2*nchannels), CV_32F );
bgmodel = Scalar::all(0);
}
virtual AlgorithmInfo* info() const { return 0; }
void BackgroundSubtractorMOG::initialize(Size _frameSize, int _frameType) virtual void getBackgroundImage(OutputArray) const
{ {
frameSize = _frameSize; CV_Error( CV_StsNotImplemented, "" );
frameType = _frameType; }
nframes = 0;
virtual int getHistory() const { return history; }
int nchannels = CV_MAT_CN(frameType); virtual void setHistory(int _nframes) { history = _nframes; }
CV_Assert( CV_MAT_DEPTH(frameType) == CV_8U );
virtual int getNMixtures() const { return nmixtures; }
// for each gaussian mixture of each pixel bg model we store ... virtual void setNMixtures(int nmix) { nmixtures = nmix; }
// the mixture sort key (w/sum_of_variances), the mixture weight (w),
// the mean (nchannels values) and virtual double getBackgroundRatio() const { return backgroundRatio; }
// the diagonal covariance matrix (another nchannels values) virtual void setBackgroundRatio(double _backgroundRatio) { backgroundRatio = _backgroundRatio; }
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + 2*nchannels), CV_32F );
bgmodel = Scalar::all(0); virtual double getNoiseSigma() const { return noiseSigma; }
} virtual void setNoiseSigma(double _noiseSigma) { noiseSigma = _noiseSigma; }
virtual void write(FileStorage& fs) const
{
fs << "name" << name_
<< "history" << history
<< "nmixtures" << nmixtures
<< "backgroundRatio" << backgroundRatio
<< "noiseSigma" << noiseSigma;
}
virtual void read(const FileNode& fn)
{
CV_Assert( (std::string)fn["name"] == name_ );
history = (int)fn["history"];
nmixtures = (int)fn["nmixtures"];
backgroundRatio = (double)fn["backgroundRatio"];
noiseSigma = (double)fn["noiseSigma"];
}
protected:
Size frameSize;
int frameType;
Mat bgmodel;
int nframes;
int history;
int nmixtures;
double varThreshold;
double backgroundRatio;
double noiseSigma;
std::string name_;
};
template<typename VT> struct MixData template<typename VT> struct MixData
@ -391,7 +437,7 @@ static void process8uC3( const Mat& image, Mat& fgmask, double learningRate,
} }
} }
void BackgroundSubtractorMOG::operator()(InputArray _image, OutputArray _fgmask, double learningRate) void BackgroundSubtractorMOGImpl::apply(InputArray _image, OutputArray _fgmask, double learningRate)
{ {
Mat image = _image.getMat(); Mat image = _image.getMat();
bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType; bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType;
@ -415,6 +461,12 @@ void BackgroundSubtractorMOG::operator()(InputArray _image, OutputArray _fgmask,
CV_Error( CV_StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" ); CV_Error( CV_StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" );
} }
Ptr<BackgroundSubtractorMOG> createBackgroundSubtractorMOG(int history, int nmixtures,
double backgroundRatio, double noiseSigma)
{
return new BackgroundSubtractorMOGImpl(history, nmixtures, backgroundRatio, noiseSigma);
}
} }
/* End of file. */ /* End of file. */

@ -7,9 +7,11 @@
// copy or use the software. // copy or use the software.
// //
// //
// Intel License Agreement // License Agreement
// For Open Source Computer Vision Library
// //
// Copyright (C) 2000, Intel Corporation, all rights reserved. // Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -22,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation // this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. // and/or other materials provided with the distribution.
// //
// * The name of Intel Corporation may not be used to endorse or promote products // * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission. // derived from this software without specific prior written permission.
// //
// This software is provided by the copyright holders and contributors "as is" and // This software is provided by the copyright holders and contributors "as is" and
@ -114,6 +116,213 @@ static const float defaultfCT2 = 0.05f; // complexity reduction prior constant 0
static const unsigned char defaultnShadowDetection2 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection static const unsigned char defaultnShadowDetection2 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
class BackgroundSubtractorMOG2Impl : public BackgroundSubtractorMOG2
{
public:
//! the default constructor
BackgroundSubtractorMOG2Impl()
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = defaultHistory2;
varThreshold = defaultVarThreshold2;
bShadowDetection = 1;
nmixtures = defaultNMixtures2;
backgroundRatio = defaultBackgroundRatio2;
fVarInit = defaultVarInit2;
fVarMax = defaultVarMax2;
fVarMin = defaultVarMin2;
varThresholdGen = defaultVarThresholdGen2;
fCT = defaultfCT2;
nShadowDetection = defaultnShadowDetection2;
fTau = defaultfTau;
}
//! the full constructor that takes the length of the history,
// the number of gaussian mixtures, the background ratio parameter and the noise strength
BackgroundSubtractorMOG2Impl(int _history, float _varThreshold, bool _bShadowDetection=true)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = _history > 0 ? _history : defaultHistory2;
varThreshold = (_varThreshold>0)? _varThreshold : defaultVarThreshold2;
bShadowDetection = _bShadowDetection;
nmixtures = defaultNMixtures2;
backgroundRatio = defaultBackgroundRatio2;
fVarInit = defaultVarInit2;
fVarMax = defaultVarMax2;
fVarMin = defaultVarMin2;
varThresholdGen = defaultVarThresholdGen2;
fCT = defaultfCT2;
nShadowDetection = defaultnShadowDetection2;
fTau = defaultfTau;
name_ = "BackgroundSubtractor.MOG2";
}
//! the destructor
~BackgroundSubtractorMOG2Impl() {}
//! the update operator
void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
//! computes a background image which are the mean of all background gaussians
virtual void getBackgroundImage(OutputArray backgroundImage) const;
//! re-initiaization method
void initialize(Size _frameSize, int _frameType)
{
frameSize = _frameSize;
frameType = _frameType;
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels <= CV_CN_MAX );
// for each gaussian mixture of each pixel bg model we store ...
// the mixture weight (w),
// the mean (nchannels values) and
// the covariance
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + nchannels), CV_32F );
//make the array for keeping track of the used modes per pixel - all zeros at start
bgmodelUsedModes.create(frameSize,CV_8U);
bgmodelUsedModes = Scalar::all(0);
}
virtual AlgorithmInfo* info() const { return 0; }
virtual int getHistory() const { return history; }
virtual void setHistory(int _nframes) { history = _nframes; }
virtual int getNMixtures() const { return nmixtures; }
virtual void setNMixtures(int nmix) { nmixtures = nmix; }
virtual double getBackgroundRatio() const { return backgroundRatio; }
virtual void setBackgroundRatio(double _backgroundRatio) { backgroundRatio = (float)_backgroundRatio; }
virtual double getVarThreshold() const { return varThreshold; }
virtual void setVarThreshold(double _varThreshold) { varThreshold = _varThreshold; }
virtual double getVarThresholdGen() const { return varThresholdGen; }
virtual void setVarThresholdGen(double _varThresholdGen) { varThresholdGen = (float)_varThresholdGen; }
virtual double getVarInit() const { return fVarInit; }
virtual void setVarInit(double varInit) { fVarInit = (float)varInit; }
virtual double getVarMin() const { return fVarMin; }
virtual void setVarMin(double varMin) { fVarMin = (float)varMin; }
virtual double getVarMax() const { return fVarMax; }
virtual void setVarMax(double varMax) { fVarMax = (float)varMax; }
virtual double getComplexityReductionThreshold() const { return fCT; }
virtual void setComplexityReductionThreshold(double ct) { fCT = (float)ct; }
virtual bool getDetectShadows() const { return bShadowDetection; }
virtual void setDetectShadows(bool detectshadows) { bShadowDetection = detectshadows; }
virtual int getShadowValue() const { return nShadowDetection; }
virtual void setShadowValue(int value) { nShadowDetection = (uchar)value; }
virtual double getShadowThreshold() const { return fTau; }
virtual void setShadowThreshold(double value) { fTau = (float)value; }
virtual void write(FileStorage& fs) const
{
fs << "name" << name_
<< "history" << history
<< "nmixtures" << nmixtures
<< "backgroundRatio" << backgroundRatio
<< "varThreshold" << varThreshold
<< "varThresholdGen" << varThresholdGen
<< "varInit" << fVarInit
<< "varMin" << fVarMin
<< "varMax" << fVarMax
<< "complexityReductionThreshold" << fCT
<< "detectShadows" << (int)bShadowDetection
<< "shadowValue" << (int)nShadowDetection
<< "shadowThreshold" << fTau;
}
virtual void read(const FileNode& fn)
{
CV_Assert( (std::string)fn["name"] == name_ );
history = (int)fn["history"];
nmixtures = (int)fn["nmixtures"];
backgroundRatio = (float)fn["backgroundRatio"];
varThreshold = (double)fn["varThreshold"];
varThresholdGen = (float)fn["varThresholdGen"];
fVarInit = (float)fn["varInit"];
fVarMin = (float)fn["varMin"];
fVarMax = (float)fn["varMax"];
fCT = (float)fn["complexityReductionThreshold"];
bShadowDetection = (int)fn["detectShadows"] != 0;
nShadowDetection = saturate_cast<uchar>((int)fn["shadowValue"]);
fTau = (float)fn["shadowThreshold"];
}
protected:
Size frameSize;
int frameType;
Mat bgmodel;
Mat bgmodelUsedModes;//keep track of number of modes per pixel
int nframes;
int history;
int nmixtures;
//! here it is the maximum allowed number of mixture components.
//! Actual number is determined dynamically per pixel
double varThreshold;
// threshold on the squared Mahalanobis distance to decide if it is well described
// by the background model or not. Related to Cthr from the paper.
// This does not influence the update of the background. A typical value could be 4 sigma
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
/////////////////////////
// less important parameters - things you might change but be carefull
////////////////////////
float backgroundRatio;
// corresponds to fTB=1-cf from the paper
// TB - threshold when the component becomes significant enough to be included into
// the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
// For alpha=0.001 it means that the mode should exist for approximately 105 frames before
// it is considered foreground
// float noiseSigma;
float varThresholdGen;
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close
//to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
//Smaller Tg leads to more generated components and higher Tg might make
//lead to small number of components but they can grow too large
float fVarInit;
float fVarMin;
float fVarMax;
//initial variance for the newly generated components.
//It will will influence the speed of adaptation. A good guess should be made.
//A simple way is to estimate the typical standard deviation from the images.
//I used here 10 as a reasonable value
// min and max can be used to further control the variance
float fCT;//CT - complexity reduction prior
//this is related to the number of samples needed to accept that a component
//actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
//the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
//shadow detection parameters
bool bShadowDetection;//default 1 - do shadow detection
unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value
float fTau;
// Tau - shadow threshold. The shadow is detected if the pixel is darker
//version of the background. Tau is a threshold on how much darker the shadow can be.
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
std::string name_;
};
struct GaussBGStatModel2Params struct GaussBGStatModel2Params
{ {
//image info //image info
@ -248,8 +457,9 @@ detectShadowGMM(const float* data, int nchannels, int nmodes,
//IEEE Trans. on Pattern Analysis and Machine Intelligence, vol.26, no.5, pages 651-656, 2004 //IEEE Trans. on Pattern Analysis and Machine Intelligence, vol.26, no.5, pages 651-656, 2004
//http://www.zoranz.net/Publications/zivkovic2004PAMI.pdf //http://www.zoranz.net/Publications/zivkovic2004PAMI.pdf
struct MOG2Invoker class MOG2Invoker : public ParallelLoopBody
{ {
public:
MOG2Invoker(const Mat& _src, Mat& _dst, MOG2Invoker(const Mat& _src, Mat& _dst,
GMM* _gmm, float* _mean, GMM* _gmm, float* _mean,
uchar* _modesUsed, uchar* _modesUsed,
@ -280,9 +490,9 @@ struct MOG2Invoker
cvtfunc = src->depth() != CV_32F ? getConvertFunc(src->depth(), CV_32F) : 0; cvtfunc = src->depth() != CV_32F ? getConvertFunc(src->depth(), CV_32F) : 0;
} }
void operator()(const BlockedRange& range) const void operator()(const Range& range) const
{ {
int y0 = range.begin(), y1 = range.end(); int y0 = range.start, y1 = range.end;
int ncols = src->cols, nchannels = src->channels(); int ncols = src->cols, nchannels = src->channels();
AutoBuffer<float> buf(src->cols*nchannels); AutoBuffer<float> buf(src->cols*nchannels);
float alpha1 = 1.f - alphaT; float alpha1 = 1.f - alphaT;
@ -479,75 +689,7 @@ struct MOG2Invoker
BinaryFunc cvtfunc; BinaryFunc cvtfunc;
}; };
BackgroundSubtractorMOG2::BackgroundSubtractorMOG2() void BackgroundSubtractorMOG2Impl::apply(InputArray _image, OutputArray _fgmask, double learningRate)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = defaultHistory2;
varThreshold = defaultVarThreshold2;
bShadowDetection = 1;
nmixtures = defaultNMixtures2;
backgroundRatio = defaultBackgroundRatio2;
fVarInit = defaultVarInit2;
fVarMax = defaultVarMax2;
fVarMin = defaultVarMin2;
varThresholdGen = defaultVarThresholdGen2;
fCT = defaultfCT2;
nShadowDetection = defaultnShadowDetection2;
fTau = defaultfTau;
}
BackgroundSubtractorMOG2::BackgroundSubtractorMOG2(int _history, float _varThreshold, bool _bShadowDetection)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = _history > 0 ? _history : defaultHistory2;
varThreshold = (_varThreshold>0)? _varThreshold : defaultVarThreshold2;
bShadowDetection = _bShadowDetection;
nmixtures = defaultNMixtures2;
backgroundRatio = defaultBackgroundRatio2;
fVarInit = defaultVarInit2;
fVarMax = defaultVarMax2;
fVarMin = defaultVarMin2;
varThresholdGen = defaultVarThresholdGen2;
fCT = defaultfCT2;
nShadowDetection = defaultnShadowDetection2;
fTau = defaultfTau;
}
BackgroundSubtractorMOG2::~BackgroundSubtractorMOG2()
{
}
void BackgroundSubtractorMOG2::initialize(Size _frameSize, int _frameType)
{
frameSize = _frameSize;
frameType = _frameType;
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels <= CV_CN_MAX );
// for each gaussian mixture of each pixel bg model we store ...
// the mixture weight (w),
// the mean (nchannels values) and
// the covariance
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + nchannels), CV_32F );
//make the array for keeping track of the used modes per pixel - all zeros at start
bgmodelUsedModes.create(frameSize,CV_8U);
bgmodelUsedModes = Scalar::all(0);
}
void BackgroundSubtractorMOG2::operator()(InputArray _image, OutputArray _fgmask, double learningRate)
{ {
Mat image = _image.getMat(); Mat image = _image.getMat();
bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType; bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType;
@ -562,18 +704,19 @@ void BackgroundSubtractorMOG2::operator()(InputArray _image, OutputArray _fgmask
learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./std::min( 2*nframes, history ); learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./std::min( 2*nframes, history );
CV_Assert(learningRate >= 0); CV_Assert(learningRate >= 0);
parallel_for(BlockedRange(0, image.rows), parallel_for_(Range(0, image.rows),
MOG2Invoker(image, fgmask, MOG2Invoker(image, fgmask,
(GMM*)bgmodel.data, (GMM*)bgmodel.data,
(float*)(bgmodel.data + sizeof(GMM)*nmixtures*image.rows*image.cols), (float*)(bgmodel.data + sizeof(GMM)*nmixtures*image.rows*image.cols),
bgmodelUsedModes.data, nmixtures, (float)learningRate, bgmodelUsedModes.data, nmixtures, (float)learningRate,
(float)varThreshold, (float)varThreshold,
backgroundRatio, varThresholdGen, backgroundRatio, varThresholdGen,
fVarInit, fVarMin, fVarMax, float(-learningRate*fCT), fTau, fVarInit, fVarMin, fVarMax, float(-learningRate*fCT), fTau,
bShadowDetection, nShadowDetection)); bShadowDetection, nShadowDetection),
image.total()/(double)(1 << 16));
} }
void BackgroundSubtractorMOG2::getBackgroundImage(OutputArray backgroundImage) const void BackgroundSubtractorMOG2Impl::getBackgroundImage(OutputArray backgroundImage) const
{ {
int nchannels = CV_MAT_CN(frameType); int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels == 3 ); CV_Assert( nchannels == 3 );
@ -626,6 +769,13 @@ void BackgroundSubtractorMOG2::getBackgroundImage(OutputArray backgroundImage) c
} }
} }
Ptr<BackgroundSubtractorMOG2> createBackgroundSubtractorMOG2(int _history, double _varThreshold,
bool _bShadowDetection)
{
return new BackgroundSubtractorMOG2Impl(_history, (float)_varThreshold, _bShadowDetection);
}
} }
/* End of file. */ /* End of file. */

@ -8,8 +8,10 @@
// //
// //
// License Agreement // License Agreement
// For Open Source Computer Vision Library
// //
// Copyright (C) 2000, Intel Corporation, all rights reserved. // Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -22,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation // this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. // and/or other materials provided with the distribution.
// //
// * The name of Intel Corporation may not be used to endorse or promote products // * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission. // derived from this software without specific prior written permission.
// //
// This software is provided by the copyright holders and contributors "as is" and // This software is provided by the copyright holders and contributors "as is" and
@ -48,36 +50,166 @@
#include "precomp.hpp" #include "precomp.hpp"
cv::BackgroundSubtractorGMG::BackgroundSubtractorGMG() namespace cv
{ {
/*
* Default Parameter Values. Override with algorithm "set" method.
*/
maxFeatures = 64;
learningRate = 0.025;
numInitializationFrames = 120;
quantizationLevels = 16;
backgroundPrior = 0.8;
decisionThreshold = 0.8;
smoothingRadius = 7;
updateBackgroundModel = true;
}
cv::BackgroundSubtractorGMG::~BackgroundSubtractorGMG() class BackgroundSubtractorGMGImpl : public BackgroundSubtractorGMG
{ {
} public:
BackgroundSubtractorGMGImpl()
{
/*
* Default Parameter Values. Override with algorithm "set" method.
*/
maxFeatures = 64;
learningRate = 0.025;
numInitializationFrames = 120;
quantizationLevels = 16;
backgroundPrior = 0.8;
decisionThreshold = 0.8;
smoothingRadius = 7;
updateBackgroundModel = true;
minVal_ = maxVal_ = 0;
name_ = "BackgroundSubtractor.GMG";
}
~BackgroundSubtractorGMGImpl()
{
}
virtual AlgorithmInfo* info() const { return 0; }
/**
* Validate parameters and set up data structures for appropriate image size.
* Must call before running on data.
* @param frameSize input frame size
* @param min minimum value taken on by pixels in image sequence. Usually 0
* @param max maximum value taken on by pixels in image sequence. e.g. 1.0 or 255
*/
void initialize(Size frameSize, double minVal, double maxVal);
/**
* Performs single-frame background subtraction and builds up a statistical background image
* model.
* @param image Input image
* @param fgmask Output mask image representing foreground and background pixels
*/
virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1.0);
/**
* Releases all inner buffers.
*/
void release();
virtual int getMaxFeatures() const { return maxFeatures; }
virtual void setMaxFeatures(int _maxFeatures) { maxFeatures = _maxFeatures; }
virtual double getDefaultLearningRate() const { return learningRate; }
virtual void setDefaultLearningRate(double lr) { learningRate = lr; }
virtual int getNumFrames() const { return numInitializationFrames; }
virtual void setNumFrames(int nframes) { numInitializationFrames = nframes; }
virtual int getQuantizationLevels() const { return quantizationLevels; }
virtual void setQuantizationLevels(int nlevels) { quantizationLevels = nlevels; }
virtual double getBackgroundPrior() const { return backgroundPrior; }
virtual void setBackgroundPrior(double bgprior) { backgroundPrior = bgprior; }
virtual int getSmoothingRadius() const { return smoothingRadius; }
virtual void setSmoothingRadius(int radius) { smoothingRadius = radius; }
virtual double getDecisionThreshold() const { return decisionThreshold; }
virtual void setDecisionThreshold(double thresh) { decisionThreshold = thresh; }
virtual bool getUpdateBackgroundModel() const { return updateBackgroundModel; }
virtual void setUpdateBackgroundModel(bool update) { updateBackgroundModel = update; }
virtual double getMinVal() const { return minVal_; }
virtual void setMinVal(double val) { minVal_ = val; }
void cv::BackgroundSubtractorGMG::initialize(cv::Size frameSize, double min, double max) virtual double getMaxVal() const { return maxVal_; }
virtual void setMaxVal(double val) { maxVal_ = val; }
virtual void getBackgroundImage(OutputArray) const
{
CV_Error( CV_StsNotImplemented, "" );
}
virtual void write(FileStorage& fs) const
{
fs << "name" << name_
<< "maxFeatures" << maxFeatures
<< "defaultLearningRate" << learningRate
<< "numFrames" << numInitializationFrames
<< "quantizationLevels" << quantizationLevels
<< "backgroundPrior" << backgroundPrior
<< "decisionThreshold" << decisionThreshold
<< "smoothingRadius" << smoothingRadius
<< "updateBackgroundModel" << (int)updateBackgroundModel;
// we do not save minVal_ & maxVal_, since they depend on the image type.
}
virtual void read(const FileNode& fn)
{
CV_Assert( (std::string)fn["name"] == name_ );
maxFeatures = (int)fn["maxFeatures"];
learningRate = (double)fn["defaultLearningRate"];
numInitializationFrames = (int)fn["numFrames"];
quantizationLevels = (int)fn["quantizationLevels"];
backgroundPrior = (double)fn["backgroundPrior"];
smoothingRadius = (int)fn["smoothingRadius"];
decisionThreshold = (double)fn["decisionThreshold"];
updateBackgroundModel = (int)fn["updateBackgroundModel"] != 0;
minVal_ = maxVal_ = 0;
frameSize_ = Size();
}
//! Total number of distinct colors to maintain in histogram.
int maxFeatures;
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
double learningRate;
//! Number of frames of video to use to initialize histograms.
int numInitializationFrames;
//! Number of discrete levels in each channel to be used in histograms.
int quantizationLevels;
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
double backgroundPrior;
//! Value above which pixel is determined to be FG.
double decisionThreshold;
//! Smoothing radius, in pixels, for cleaning up FG image.
int smoothingRadius;
//! Perform background model update
bool updateBackgroundModel;
private:
double maxVal_;
double minVal_;
Size frameSize_;
int frameNum_;
std::string name_;
Mat_<int> nfeatures_;
Mat_<unsigned int> colors_;
Mat_<float> weights_;
Mat buf_;
};
void BackgroundSubtractorGMGImpl::initialize(Size frameSize, double minVal, double maxVal)
{ {
CV_Assert(min < max); CV_Assert(minVal < maxVal);
CV_Assert(maxFeatures > 0); CV_Assert(maxFeatures > 0);
CV_Assert(learningRate >= 0.0 && learningRate <= 1.0); CV_Assert(learningRate >= 0.0 && learningRate <= 1.0);
CV_Assert(numInitializationFrames >= 1); CV_Assert(numInitializationFrames >= 1);
CV_Assert(quantizationLevels >= 1 && quantizationLevels <= 255); CV_Assert(quantizationLevels >= 1 && quantizationLevels <= 255);
CV_Assert(backgroundPrior >= 0.0 && backgroundPrior <= 1.0); CV_Assert(backgroundPrior >= 0.0 && backgroundPrior <= 1.0);
minVal_ = min; minVal_ = minVal;
maxVal_ = max; maxVal_ = maxVal;
frameSize_ = frameSize; frameSize_ = frameSize;
frameNum_ = 0; frameNum_ = 0;
@ -86,7 +218,7 @@ void cv::BackgroundSubtractorGMG::initialize(cv::Size frameSize, double min, dou
colors_.create(frameSize_.area(), maxFeatures); colors_.create(frameSize_.area(), maxFeatures);
weights_.create(frameSize_.area(), maxFeatures); weights_.create(frameSize_.area(), maxFeatures);
nfeatures_.setTo(cv::Scalar::all(0)); nfeatures_.setTo(Scalar::all(0));
} }
namespace namespace
@ -181,10 +313,10 @@ namespace
} }
}; };
class GMG_LoopBody : public cv::ParallelLoopBody class GMG_LoopBody : public ParallelLoopBody
{ {
public: public:
GMG_LoopBody(const cv::Mat& frame, const cv::Mat& fgmask, const cv::Mat_<int>& nfeatures, const cv::Mat_<unsigned int>& colors, const cv::Mat_<float>& weights, GMG_LoopBody(const Mat& frame, const Mat& fgmask, const Mat_<int>& nfeatures, const Mat_<unsigned int>& colors, const Mat_<float>& weights,
int maxFeatures, double learningRate, int numInitializationFrames, int quantizationLevels, double backgroundPrior, double decisionThreshold, int maxFeatures, double learningRate, int numInitializationFrames, int quantizationLevels, double backgroundPrior, double decisionThreshold,
double maxVal, double minVal, int frameNum, bool updateBackgroundModel) : double maxVal, double minVal, int frameNum, bool updateBackgroundModel) :
frame_(frame), fgmask_(fgmask), nfeatures_(nfeatures), colors_(colors), weights_(weights), frame_(frame), fgmask_(fgmask), nfeatures_(nfeatures), colors_(colors), weights_(weights),
@ -194,16 +326,16 @@ namespace
{ {
} }
void operator() (const cv::Range& range) const; void operator() (const Range& range) const;
private: private:
cv::Mat frame_; Mat frame_;
mutable cv::Mat_<uchar> fgmask_; mutable Mat_<uchar> fgmask_;
mutable cv::Mat_<int> nfeatures_; mutable Mat_<int> nfeatures_;
mutable cv::Mat_<unsigned int> colors_; mutable Mat_<unsigned int> colors_;
mutable cv::Mat_<float> weights_; mutable Mat_<float> weights_;
int maxFeatures_; int maxFeatures_;
double learningRate_; double learningRate_;
@ -218,7 +350,7 @@ namespace
int frameNum_; int frameNum_;
}; };
void GMG_LoopBody::operator() (const cv::Range& range) const void GMG_LoopBody::operator() (const Range& range) const
{ {
typedef unsigned int (*func_t)(const void* src_, int x, int cn, double minVal, double maxVal, int quantizationLevels); typedef unsigned int (*func_t)(const void* src_, int x, int cn, double minVal, double maxVal, int quantizationLevels);
static const func_t funcs[] = static const func_t funcs[] =
@ -296,7 +428,7 @@ namespace
} }
} }
void cv::BackgroundSubtractorGMG::operator ()(InputArray _frame, OutputArray _fgmask, double newLearningRate) void BackgroundSubtractorGMGImpl::apply(InputArray _frame, OutputArray _fgmask, double newLearningRate)
{ {
Mat frame = _frame.getMat(); Mat frame = _frame.getMat();
@ -310,7 +442,16 @@ void cv::BackgroundSubtractorGMG::operator ()(InputArray _frame, OutputArray _fg
} }
if (frame.size() != frameSize_) if (frame.size() != frameSize_)
initialize(frame.size(), 0.0, frame.depth() == CV_8U ? 255.0 : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0); {
double minval = minVal_;
double maxval = maxVal_;
if( minVal_ == 0 && maxVal_ == 0 )
{
minval = 0;
maxval = frame.depth() == CV_8U ? 255.0 : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0;
}
initialize(frame.size(), minval, maxval);
}
_fgmask.create(frameSize_, CV_8UC1); _fgmask.create(frameSize_, CV_8UC1);
Mat fgmask = _fgmask.getMat(); Mat fgmask = _fgmask.getMat();
@ -323,19 +464,58 @@ void cv::BackgroundSubtractorGMG::operator ()(InputArray _frame, OutputArray _fg
if (smoothingRadius > 0) if (smoothingRadius > 0)
{ {
medianBlur(fgmask, buf_, smoothingRadius); medianBlur(fgmask, buf_, smoothingRadius);
cv::swap(fgmask, buf_); swap(fgmask, buf_);
} }
// keep track of how many frames we have processed // keep track of how many frames we have processed
++frameNum_; ++frameNum_;
} }
void cv::BackgroundSubtractorGMG::release() void BackgroundSubtractorGMGImpl::release()
{ {
frameSize_ = cv::Size(); frameSize_ = Size();
nfeatures_.release(); nfeatures_.release();
colors_.release(); colors_.release();
weights_.release(); weights_.release();
buf_.release(); buf_.release();
} }
Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames, double decisionThreshold)
{
Ptr<BackgroundSubtractorGMG> bgfg = new BackgroundSubtractorGMGImpl;
bgfg->setNumFrames(initializationFrames);
bgfg->setDecisionThreshold(decisionThreshold);
return bgfg;
}
/*
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorGMG, "BackgroundSubtractor.GMG",
obj.info()->addParam(obj, "maxFeatures", obj.maxFeatures,false,0,0,
"Maximum number of features to store in histogram. Harsh enforcement of sparsity constraint.");
obj.info()->addParam(obj, "learningRate", obj.learningRate,false,0,0,
"Adaptation rate of histogram. Close to 1, slow adaptation. Close to 0, fast adaptation, features forgotten quickly.");
obj.info()->addParam(obj, "initializationFrames", obj.numInitializationFrames,false,0,0,
"Number of frames to use to initialize histograms of pixels.");
obj.info()->addParam(obj, "quantizationLevels", obj.quantizationLevels,false,0,0,
"Number of discrete colors to be used in histograms. Up-front quantization.");
obj.info()->addParam(obj, "backgroundPrior", obj.backgroundPrior,false,0,0,
"Prior probability that each individual pixel is a background pixel.");
obj.info()->addParam(obj, "smoothingRadius", obj.smoothingRadius,false,0,0,
"Radius of smoothing kernel to filter noise from FG mask image.");
obj.info()->addParam(obj, "decisionThreshold", obj.decisionThreshold,false,0,0,
"Threshold for FG decision rule. Pixel is FG if posterior probability exceeds threshold.");
obj.info()->addParam(obj, "updateBackgroundModel", obj.updateBackgroundModel,false,0,0,
"Perform background model update.");
obj.info()->addParam(obj, "minVal", obj.minVal_,false,0,0,
"Minimum of the value range (mostly for regression testing)");
obj.info()->addParam(obj, "maxVal", obj.maxVal_,false,0,0,
"Maximum of the value range (mostly for regression testing)");
);
*/
}

@ -7,10 +7,11 @@
// copy or use the software. // copy or use the software.
// //
// //
// Intel License Agreement // License Agreement
// For Open Source Computer Vision Library // For Open Source Computer Vision Library
// //
// Copyright (C) 2000, Intel Corporation, all rights reserved. // Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,285 +41,156 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
int cv::meanShift( InputArray _probImage, Rect& window, TermCriteria criteria )
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: cvMeanShift
// Purpose: MeanShift algorithm
// Context:
// Parameters:
// imgProb - 2D object probability distribution
// windowIn - CvRect of CAMSHIFT Window intial size
// numIters - If CAMSHIFT iterates this many times, stop
// windowOut - Location, height and width of converged CAMSHIFT window
// len - If != NULL, return equivalent len
// width - If != NULL, return equivalent width
// Returns:
// Number of iterations CAMSHIFT took to converge
// Notes:
//F*/
CV_IMPL int
cvMeanShift( const void* imgProb, CvRect windowIn,
CvTermCriteria criteria, CvConnectedComp* comp )
{ {
CvMoments moments; Mat mat = _probImage.getMat();
int i = 0, eps; Rect cur_rect = window;
CvMat stub, *mat = (CvMat*)imgProb;
CvMat cur_win;
CvRect cur_rect = windowIn;
if( comp )
comp->rect = windowIn;
moments.m00 = moments.m10 = moments.m01 = 0;
mat = cvGetMat( mat, &stub );
if( CV_MAT_CN( mat->type ) > 1 ) CV_Assert( mat.channels() == 1 );
CV_Error( CV_BadNumChannels, cvUnsupportedFormat );
if( windowIn.height <= 0 || windowIn.width <= 0 ) if( window.height <= 0 || window.width <= 0 )
CV_Error( CV_StsBadArg, "Input window has non-positive sizes" ); CV_Error( CV_StsBadArg, "Input window has non-positive sizes" );
windowIn = cv::Rect(windowIn) & cv::Rect(0, 0, mat->cols, mat->rows); window = window & Rect(0, 0, mat.cols, mat.rows);
criteria = cvCheckTermCriteria( criteria, 1., 100 ); double eps = (criteria.type & TermCriteria::EPS) ? std::max(criteria.epsilon, 0.) : 1.;
eps = cvRound( criteria.epsilon * criteria.epsilon ); eps = cvRound(eps*eps);
int i, niters = (criteria.type & TermCriteria::MAX_ITER) ? std::max(criteria.maxCount, 1) : 100;
for( i = 0; i < criteria.max_iter; i++ ) for( i = 0; i < niters; i++ )
{ {
int dx, dy, nx, ny; cur_rect = cur_rect & Rect(0, 0, mat.cols, mat.rows);
double inv_m00; if( cur_rect == Rect() )
cur_rect = cv::Rect(cur_rect) & cv::Rect(0, 0, mat->cols, mat->rows);
if( cv::Rect(cur_rect) == cv::Rect() )
{ {
cur_rect.x = mat->cols/2; cur_rect.x = mat.cols/2;
cur_rect.y = mat->rows/2; cur_rect.y = mat.rows/2;
} }
cur_rect.width = MAX(cur_rect.width, 1); cur_rect.width = std::max(cur_rect.width, 1);
cur_rect.height = MAX(cur_rect.height, 1); cur_rect.height = std::max(cur_rect.height, 1);
cvGetSubRect( mat, &cur_win, cur_rect ); Moments m = moments(mat(cur_rect));
cvMoments( &cur_win, &moments );
/* Calculating center of mass */ // Calculating center of mass
if( fabs(moments.m00) < DBL_EPSILON ) if( fabs(m.m00) < DBL_EPSILON )
break; break;
inv_m00 = moments.inv_sqrt_m00*moments.inv_sqrt_m00; int dx = cvRound( m.m10/m.m00 - window.width*0.5 );
dx = cvRound( moments.m10 * inv_m00 - windowIn.width*0.5 ); int dy = cvRound( m.m01/m.m00 - window.height*0.5 );
dy = cvRound( moments.m01 * inv_m00 - windowIn.height*0.5 );
nx = cur_rect.x + dx; int nx = std::min(std::max(cur_rect.x + dx, 0), mat.cols - cur_rect.width);
ny = cur_rect.y + dy; int ny = std::min(std::max(cur_rect.y + dy, 0), mat.rows - cur_rect.height);
if( nx < 0 )
nx = 0;
else if( nx + cur_rect.width > mat->cols )
nx = mat->cols - cur_rect.width;
if( ny < 0 )
ny = 0;
else if( ny + cur_rect.height > mat->rows )
ny = mat->rows - cur_rect.height;
dx = nx - cur_rect.x; dx = nx - cur_rect.x;
dy = ny - cur_rect.y; dy = ny - cur_rect.y;
cur_rect.x = nx; cur_rect.x = nx;
cur_rect.y = ny; cur_rect.y = ny;
/* Check for coverage centers mass & window */ // Check for coverage centers mass & window
if( dx*dx + dy*dy < eps ) if( dx*dx + dy*dy < eps )
break; break;
} }
if( comp ) window = cur_rect;
{
comp->rect = cur_rect;
comp->area = (float)moments.m00;
}
return i; return i;
} }
/*F/////////////////////////////////////////////////////////////////////////////////////// cv::RotatedRect cv::CamShift( InputArray _probImage, Rect& window,
// Name: cvCamShift TermCriteria criteria )
// Purpose: CAMSHIFT algorithm
// Context:
// Parameters:
// imgProb - 2D object probability distribution
// windowIn - CvRect of CAMSHIFT Window intial size
// criteria - criteria of stop finding window
// windowOut - Location, height and width of converged CAMSHIFT window
// orientation - If != NULL, return distribution orientation
// len - If != NULL, return equivalent len
// width - If != NULL, return equivalent width
// area - sum of all elements in result window
// Returns:
// Number of iterations CAMSHIFT took to converge
// Notes:
//F*/
CV_IMPL int
cvCamShift( const void* imgProb, CvRect windowIn,
CvTermCriteria criteria,
CvConnectedComp* _comp,
CvBox2D* box )
{ {
const int TOLERANCE = 10; const int TOLERANCE = 10;
CvMoments moments; Mat mat = _probImage.getMat();
double m00 = 0, m10, m01, mu20, mu11, mu02, inv_m00;
double a, b, c, xc, yc;
double rotate_a, rotate_c;
double theta = 0, square;
double cs, sn;
double length = 0, width = 0;
int itersUsed = 0;
CvConnectedComp comp;
CvMat cur_win, stub, *mat = (CvMat*)imgProb;
comp.rect = windowIn;
mat = cvGetMat( mat, &stub ); meanShift( mat, window, criteria );
itersUsed = cvMeanShift( mat, windowIn, criteria, &comp ); window.x -= TOLERANCE;
windowIn = comp.rect; if( window.x < 0 )
window.x = 0;
windowIn.x -= TOLERANCE; window.y -= TOLERANCE;
if( windowIn.x < 0 ) if( window.y < 0 )
windowIn.x = 0; window.y = 0;
windowIn.y -= TOLERANCE; window.width += 2 * TOLERANCE;
if( windowIn.y < 0 ) if( window.x + window.width > mat.cols )
windowIn.y = 0; window.width = mat.cols - window.x;
windowIn.width += 2 * TOLERANCE; window.height += 2 * TOLERANCE;
if( windowIn.x + windowIn.width > mat->width ) if( window.y + window.height > mat.rows )
windowIn.width = mat->width - windowIn.x; window.height = mat.rows - window.y;
windowIn.height += 2 * TOLERANCE; // Calculating moments in new center mass
if( windowIn.y + windowIn.height > mat->height ) Moments m = moments( mat(window) );
windowIn.height = mat->height - windowIn.y;
cvGetSubRect( mat, &cur_win, windowIn ); double m00 = m.m00, m10 = m.m10, m01 = m.m01;
double mu11 = m.mu11, mu20 = m.mu20, mu02 = m.mu02;
/* Calculating moments in new center mass */
cvMoments( &cur_win, &moments );
m00 = moments.m00;
m10 = moments.m10;
m01 = moments.m01;
mu11 = moments.mu11;
mu20 = moments.mu20;
mu02 = moments.mu02;
if( fabs(m00) < DBL_EPSILON ) if( fabs(m00) < DBL_EPSILON )
return -1; return RotatedRect();
inv_m00 = 1. / m00; double inv_m00 = 1. / m00;
xc = cvRound( m10 * inv_m00 + windowIn.x ); int xc = cvRound( m10 * inv_m00 + window.x );
yc = cvRound( m01 * inv_m00 + windowIn.y ); int yc = cvRound( m01 * inv_m00 + window.y );
a = mu20 * inv_m00; double a = mu20 * inv_m00, b = mu11 * inv_m00, c = mu02 * inv_m00;
b = mu11 * inv_m00;
c = mu02 * inv_m00;
/* Calculating width & height */ // Calculating width & height
square = sqrt( 4 * b * b + (a - c) * (a - c) ); double square = std::sqrt( 4 * b * b + (a - c) * (a - c) );
/* Calculating orientation */ // Calculating orientation
theta = atan2( 2 * b, a - c + square ); double theta = atan2( 2 * b, a - c + square );
/* Calculating width & length of figure */ // Calculating width & length of figure
cs = cos( theta ); double cs = cos( theta );
sn = sin( theta ); double sn = sin( theta );
rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02; double rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02;
rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02; double rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02;
length = sqrt( rotate_a * inv_m00 ) * 4; double length = std::sqrt( rotate_a * inv_m00 ) * 4;
width = sqrt( rotate_c * inv_m00 ) * 4; double width = std::sqrt( rotate_c * inv_m00 ) * 4;
/* In case, when tetta is 0 or 1.57... the Length & Width may be exchanged */ // In case, when tetta is 0 or 1.57... the Length & Width may be exchanged
if( length < width ) if( length < width )
{ {
double t; std::swap( length, width );
std::swap( cs, sn );
CV_SWAP( length, width, t );
CV_SWAP( cs, sn, t );
theta = CV_PI*0.5 - theta; theta = CV_PI*0.5 - theta;
} }
/* Saving results */ // Saving results
if( _comp || box ) int _xc = cvRound( xc );
{ int _yc = cvRound( yc );
int t0, t1;
int _xc = cvRound( xc );
int _yc = cvRound( yc );
t0 = cvRound( fabs( length * cs )); int t0 = cvRound( fabs( length * cs ));
t1 = cvRound( fabs( width * sn )); int t1 = cvRound( fabs( width * sn ));
t0 = MAX( t0, t1 ) + 2; t0 = MAX( t0, t1 ) + 2;
comp.rect.width = MIN( t0, (mat->width - _xc) * 2 ); window.width = MIN( t0, (mat.cols - _xc) * 2 );
t0 = cvRound( fabs( length * sn )); t0 = cvRound( fabs( length * sn ));
t1 = cvRound( fabs( width * cs )); t1 = cvRound( fabs( width * cs ));
t0 = MAX( t0, t1 ) + 2; t0 = MAX( t0, t1 ) + 2;
comp.rect.height = MIN( t0, (mat->height - _yc) * 2 ); window.height = MIN( t0, (mat.rows - _yc) * 2 );
comp.rect.x = MAX( 0, _xc - comp.rect.width / 2 ); window.x = MAX( 0, _xc - window.width / 2 );
comp.rect.y = MAX( 0, _yc - comp.rect.height / 2 ); window.y = MAX( 0, _yc - window.height / 2 );
comp.rect.width = MIN( mat->width - comp.rect.x, comp.rect.width );
comp.rect.height = MIN( mat->height - comp.rect.y, comp.rect.height );
comp.area = (float) m00;
}
if( _comp )
*_comp = comp;
if( box )
{
box->size.height = (float)length;
box->size.width = (float)width;
box->angle = (float)((CV_PI*0.5+theta)*180./CV_PI);
while(box->angle < 0)
box->angle += 360;
while(box->angle >= 360)
box->angle -= 360;
if(box->angle >= 180)
box->angle -= 180;
box->center = cvPoint2D32f( comp.rect.x + comp.rect.width*0.5f,
comp.rect.y + comp.rect.height*0.5f);
}
return itersUsed;
}
window.width = MIN( mat.cols - window.x, window.width );
window.height = MIN( mat.rows - window.y, window.height );
cv::RotatedRect cv::CamShift( InputArray _probImage, Rect& window, RotatedRect box;
TermCriteria criteria ) box.size.height = (float)length;
{ box.size.width = (float)width;
CvConnectedComp comp; box.angle = (float)((CV_PI*0.5+theta)*180./CV_PI);
CvBox2D box; while(box.angle < 0)
box.angle += 360;
box.center.x = box.center.y = 0; box.angle = 0; box.size.width = box.size.height = 0; while(box.angle >= 360)
comp.rect.x = comp.rect.y = comp.rect.width = comp.rect.height = 0; box.angle -= 360;
if(box.angle >= 180)
box.angle -= 180;
box.center = Point2f( window.x + window.width*0.5f, window.y + window.height*0.5f);
Mat probImage = _probImage.getMat(); return box;
CvMat c_probImage = probImage;
cvCamShift(&c_probImage, window, (CvTermCriteria)criteria, &comp, &box);
window = comp.rect;
return RotatedRect(Point2f(box.center), Size2f(box.size), box.angle);
}
int cv::meanShift( InputArray _probImage, Rect& window, TermCriteria criteria )
{
CvConnectedComp comp;
Mat probImage = _probImage.getMat();
CvMat c_probImage = probImage;
int iters = cvMeanShift(&c_probImage, window, (CvTermCriteria)criteria, &comp );
window = comp.rect;
return iters;
} }
/* End of file. */ /* End of file. */

@ -0,0 +1,388 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
/////////////////////////// Meanshift & CAMShift ///////////////////////////
CV_IMPL int
cvMeanShift( const void* imgProb, CvRect windowIn,
CvTermCriteria criteria, CvConnectedComp* comp )
{
cv::Mat img = cv::cvarrToMat(imgProb);
cv::Rect window = windowIn;
int iters = cv::meanShift(img, window, criteria);
if( comp )
{
comp->rect = window;
comp->area = cvRound(cv::sum(img(window))[0]);
}
return iters;
}
CV_IMPL int
cvCamShift( const void* imgProb, CvRect windowIn,
CvTermCriteria criteria,
CvConnectedComp* comp,
CvBox2D* box )
{
cv::Mat img = cv::cvarrToMat(imgProb);
cv::Rect window = windowIn;
cv::RotatedRect rr = cv::CamShift(img, window, criteria);
if( comp )
{
comp->rect = window;
cv::Rect roi = rr.boundingRect() & cv::Rect(0, 0, img.cols, img.rows);
comp->area = cvRound(cv::sum(img(roi))[0]);
}
if( box )
*box = rr;
return rr.size.width*rr.size.height > 0.f ? 1 : -1;
}
///////////////////////// Motion Templates ////////////////////////////
CV_IMPL void
cvUpdateMotionHistory( const void* silhouette, void* mhimg,
double timestamp, double mhi_duration )
{
cv::Mat silh = cv::cvarrToMat(silhouette), mhi = cv::cvarrToMat(mhimg);
cv::updateMotionHistory(silh, mhi, timestamp, mhi_duration);
}
CV_IMPL void
cvCalcMotionGradient( const CvArr* mhimg, CvArr* maskimg,
CvArr* orientation,
double delta1, double delta2,
int aperture_size )
{
cv::Mat mhi = cv::cvarrToMat(mhimg);
const cv::Mat mask = cv::cvarrToMat(maskimg), orient = cv::cvarrToMat(orientation);
cv::calcMotionGradient(mhi, mask, orient, delta1, delta2, aperture_size);
}
CV_IMPL double
cvCalcGlobalOrientation( const void* orientation, const void* maskimg, const void* mhimg,
double curr_mhi_timestamp, double mhi_duration )
{
cv::Mat mhi = cv::cvarrToMat(mhimg);
cv::Mat mask = cv::cvarrToMat(maskimg), orient = cv::cvarrToMat(orientation);
return cv::calcGlobalOrientation(orient, mask, mhi, curr_mhi_timestamp, mhi_duration);
}
CV_IMPL CvSeq*
cvSegmentMotion( const CvArr* mhimg, CvArr* segmaskimg, CvMemStorage* storage,
double timestamp, double segThresh )
{
cv::Mat mhi = cv::cvarrToMat(mhimg);
const cv::Mat segmask = cv::cvarrToMat(segmaskimg);
std::vector<cv::Rect> brs;
cv::segmentMotion(mhi, segmask, brs, timestamp, segThresh);
CvSeq* seq = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvConnectedComp), storage);
CvConnectedComp comp;
memset(&comp, 0, sizeof(comp));
for( size_t i = 0; i < brs.size(); i++ )
{
cv::Rect roi = brs[i];
float compLabel = (float)(i+1);
int x, y, area = 0;
cv::Mat part = segmask(roi);
for( y = 0; y < roi.height; y++ )
{
const float* partptr = part.ptr<float>(y);
for( x = 0; x < roi.width; x++ )
area += partptr[x] == compLabel;
}
comp.value = cv::Scalar(compLabel);
comp.rect = roi;
comp.area = area;
cvSeqPush(seq, &comp);
}
return seq;
}
///////////////////////////////// Kalman ///////////////////////////////
CV_IMPL CvKalman*
cvCreateKalman( int DP, int MP, int CP )
{
CvKalman *kalman = 0;
if( DP <= 0 || MP <= 0 )
CV_Error( CV_StsOutOfRange,
"state and measurement vectors must have positive number of dimensions" );
if( CP < 0 )
CP = DP;
/* allocating memory for the structure */
kalman = (CvKalman *)cvAlloc( sizeof( CvKalman ));
memset( kalman, 0, sizeof(*kalman));
kalman->DP = DP;
kalman->MP = MP;
kalman->CP = CP;
kalman->state_pre = cvCreateMat( DP, 1, CV_32FC1 );
cvZero( kalman->state_pre );
kalman->state_post = cvCreateMat( DP, 1, CV_32FC1 );
cvZero( kalman->state_post );
kalman->transition_matrix = cvCreateMat( DP, DP, CV_32FC1 );
cvSetIdentity( kalman->transition_matrix );
kalman->process_noise_cov = cvCreateMat( DP, DP, CV_32FC1 );
cvSetIdentity( kalman->process_noise_cov );
kalman->measurement_matrix = cvCreateMat( MP, DP, CV_32FC1 );
cvZero( kalman->measurement_matrix );
kalman->measurement_noise_cov = cvCreateMat( MP, MP, CV_32FC1 );
cvSetIdentity( kalman->measurement_noise_cov );
kalman->error_cov_pre = cvCreateMat( DP, DP, CV_32FC1 );
kalman->error_cov_post = cvCreateMat( DP, DP, CV_32FC1 );
cvZero( kalman->error_cov_post );
kalman->gain = cvCreateMat( DP, MP, CV_32FC1 );
if( CP > 0 )
{
kalman->control_matrix = cvCreateMat( DP, CP, CV_32FC1 );
cvZero( kalman->control_matrix );
}
kalman->temp1 = cvCreateMat( DP, DP, CV_32FC1 );
kalman->temp2 = cvCreateMat( MP, DP, CV_32FC1 );
kalman->temp3 = cvCreateMat( MP, MP, CV_32FC1 );
kalman->temp4 = cvCreateMat( MP, DP, CV_32FC1 );
kalman->temp5 = cvCreateMat( MP, 1, CV_32FC1 );
#if 1
kalman->PosterState = kalman->state_pre->data.fl;
kalman->PriorState = kalman->state_post->data.fl;
kalman->DynamMatr = kalman->transition_matrix->data.fl;
kalman->MeasurementMatr = kalman->measurement_matrix->data.fl;
kalman->MNCovariance = kalman->measurement_noise_cov->data.fl;
kalman->PNCovariance = kalman->process_noise_cov->data.fl;
kalman->KalmGainMatr = kalman->gain->data.fl;
kalman->PriorErrorCovariance = kalman->error_cov_pre->data.fl;
kalman->PosterErrorCovariance = kalman->error_cov_post->data.fl;
#endif
return kalman;
}
CV_IMPL void
cvReleaseKalman( CvKalman** _kalman )
{
CvKalman *kalman;
if( !_kalman )
CV_Error( CV_StsNullPtr, "" );
kalman = *_kalman;
if( !kalman )
return;
/* freeing the memory */
cvReleaseMat( &kalman->state_pre );
cvReleaseMat( &kalman->state_post );
cvReleaseMat( &kalman->transition_matrix );
cvReleaseMat( &kalman->control_matrix );
cvReleaseMat( &kalman->measurement_matrix );
cvReleaseMat( &kalman->process_noise_cov );
cvReleaseMat( &kalman->measurement_noise_cov );
cvReleaseMat( &kalman->error_cov_pre );
cvReleaseMat( &kalman->gain );
cvReleaseMat( &kalman->error_cov_post );
cvReleaseMat( &kalman->temp1 );
cvReleaseMat( &kalman->temp2 );
cvReleaseMat( &kalman->temp3 );
cvReleaseMat( &kalman->temp4 );
cvReleaseMat( &kalman->temp5 );
memset( kalman, 0, sizeof(*kalman));
/* deallocating the structure */
cvFree( _kalman );
}
CV_IMPL const CvMat*
cvKalmanPredict( CvKalman* kalman, const CvMat* control )
{
if( !kalman )
CV_Error( CV_StsNullPtr, "" );
/* update the state */
/* x'(k) = A*x(k) */
cvMatMulAdd( kalman->transition_matrix, kalman->state_post, 0, kalman->state_pre );
if( control && kalman->CP > 0 )
/* x'(k) = x'(k) + B*u(k) */
cvMatMulAdd( kalman->control_matrix, control, kalman->state_pre, kalman->state_pre );
/* update error covariance matrices */
/* temp1 = A*P(k) */
cvMatMulAdd( kalman->transition_matrix, kalman->error_cov_post, 0, kalman->temp1 );
/* P'(k) = temp1*At + Q */
cvGEMM( kalman->temp1, kalman->transition_matrix, 1, kalman->process_noise_cov, 1,
kalman->error_cov_pre, CV_GEMM_B_T );
/* handle the case when there will be measurement before the next predict */
cvCopy(kalman->state_pre, kalman->state_post);
return kalman->state_pre;
}
CV_IMPL const CvMat*
cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement )
{
if( !kalman || !measurement )
CV_Error( CV_StsNullPtr, "" );
/* temp2 = H*P'(k) */
cvMatMulAdd( kalman->measurement_matrix, kalman->error_cov_pre, 0, kalman->temp2 );
/* temp3 = temp2*Ht + R */
cvGEMM( kalman->temp2, kalman->measurement_matrix, 1,
kalman->measurement_noise_cov, 1, kalman->temp3, CV_GEMM_B_T );
/* temp4 = inv(temp3)*temp2 = Kt(k) */
cvSolve( kalman->temp3, kalman->temp2, kalman->temp4, CV_SVD );
/* K(k) */
cvTranspose( kalman->temp4, kalman->gain );
/* temp5 = z(k) - H*x'(k) */
cvGEMM( kalman->measurement_matrix, kalman->state_pre, -1, measurement, 1, kalman->temp5 );
/* x(k) = x'(k) + K(k)*temp5 */
cvMatMulAdd( kalman->gain, kalman->temp5, kalman->state_pre, kalman->state_post );
/* P(k) = P'(k) - K(k)*temp2 */
cvGEMM( kalman->gain, kalman->temp2, -1, kalman->error_cov_pre, 1,
kalman->error_cov_post, 0 );
return kalman->state_post;
}
///////////////////////////////////// Optical Flow ////////////////////////////////
CV_IMPL void
cvCalcOpticalFlowPyrLK( const void* arrA, const void* arrB,
void* /*pyrarrA*/, void* /*pyrarrB*/,
const CvPoint2D32f * featuresA,
CvPoint2D32f * featuresB,
int count, CvSize winSize, int level,
char *status, float *error,
CvTermCriteria criteria, int flags )
{
if( count <= 0 )
return;
CV_Assert( featuresA && featuresB );
cv::Mat A = cv::cvarrToMat(arrA), B = cv::cvarrToMat(arrB);
cv::Mat ptA(count, 1, CV_32FC2, (void*)featuresA);
cv::Mat ptB(count, 1, CV_32FC2, (void*)featuresB);
cv::Mat st, err;
if( status )
st = cv::Mat(count, 1, CV_8U, (void*)status);
if( error )
err = cv::Mat(count, 1, CV_32F, (void*)error);
cv::calcOpticalFlowPyrLK( A, B, ptA, ptB, st,
error ? cv::_OutputArray(err) : cv::noArray(),
winSize, level, criteria, flags);
}
CV_IMPL void cvCalcOpticalFlowFarneback(
const CvArr* _prev, const CvArr* _next,
CvArr* _flow, double pyr_scale, int levels,
int winsize, int iterations, int poly_n,
double poly_sigma, int flags )
{
cv::Mat prev = cv::cvarrToMat(_prev), next = cv::cvarrToMat(_next);
cv::Mat flow = cv::cvarrToMat(_flow);
CV_Assert( flow.size() == prev.size() && flow.type() == CV_32FC2 );
cv::calcOpticalFlowFarneback( prev, next, flow, pyr_scale, levels,
winsize, iterations, poly_n, poly_sigma, flags );
}
CV_IMPL int
cvEstimateRigidTransform( const CvArr* arrA, const CvArr* arrB, CvMat* arrM, int full_affine )
{
cv::Mat matA = cv::cvarrToMat(arrA), matB = cv::cvarrToMat(arrB);
const cv::Mat matM0 = cv::cvarrToMat(arrM);
cv::Mat matM = cv::estimateRigidTransform(matA, matB, full_affine != 0);
if( matM.empty() )
{
matM = cv::cvarrToMat(arrM);
matM.setTo(cv::Scalar::all(0));
return 0;
}
matM.convertTo(matM0, matM0.type());
return 1;
}

@ -305,23 +305,8 @@ static void update_warping_matrix_ECC (Mat& map_matrix, const Mat& update, const
mapPtr[3] = (float) sin(new_theta); mapPtr[3] = (float) sin(new_theta);
mapPtr[1] = -mapPtr[3]; mapPtr[1] = -mapPtr[3];
} }
} }
CV_IMPL double cvFindTransformECC (const CvArr* _image1, const CvArr* _image2,
CvMat* _map_matrix,
const int motionType,
const CvTermCriteria _criteria)
{
Mat image1 = cvarrToMat(_image1);
Mat image2 = cvarrToMat(_image2);
Mat map_matrix = cvarrToMat(_map_matrix);
double cc = cv::findTransformECC(image1, image2, map_matrix, motionType,
TermCriteria(TermCriteria::EPS+TermCriteria::COUNT, _criteria.max_iter, _criteria.epsilon));
return cc;
}
double cv::findTransformECC(InputArray templateImage, double cv::findTransformECC(InputArray templateImage,
InputArray inputImage, InputArray inputImage,

@ -40,176 +40,6 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
CV_IMPL CvKalman*
cvCreateKalman( int DP, int MP, int CP )
{
CvKalman *kalman = 0;
if( DP <= 0 || MP <= 0 )
CV_Error( CV_StsOutOfRange,
"state and measurement vectors must have positive number of dimensions" );
if( CP < 0 )
CP = DP;
/* allocating memory for the structure */
kalman = (CvKalman *)cvAlloc( sizeof( CvKalman ));
memset( kalman, 0, sizeof(*kalman));
kalman->DP = DP;
kalman->MP = MP;
kalman->CP = CP;
kalman->state_pre = cvCreateMat( DP, 1, CV_32FC1 );
cvZero( kalman->state_pre );
kalman->state_post = cvCreateMat( DP, 1, CV_32FC1 );
cvZero( kalman->state_post );
kalman->transition_matrix = cvCreateMat( DP, DP, CV_32FC1 );
cvSetIdentity( kalman->transition_matrix );
kalman->process_noise_cov = cvCreateMat( DP, DP, CV_32FC1 );
cvSetIdentity( kalman->process_noise_cov );
kalman->measurement_matrix = cvCreateMat( MP, DP, CV_32FC1 );
cvZero( kalman->measurement_matrix );
kalman->measurement_noise_cov = cvCreateMat( MP, MP, CV_32FC1 );
cvSetIdentity( kalman->measurement_noise_cov );
kalman->error_cov_pre = cvCreateMat( DP, DP, CV_32FC1 );
kalman->error_cov_post = cvCreateMat( DP, DP, CV_32FC1 );
cvZero( kalman->error_cov_post );
kalman->gain = cvCreateMat( DP, MP, CV_32FC1 );
if( CP > 0 )
{
kalman->control_matrix = cvCreateMat( DP, CP, CV_32FC1 );
cvZero( kalman->control_matrix );
}
kalman->temp1 = cvCreateMat( DP, DP, CV_32FC1 );
kalman->temp2 = cvCreateMat( MP, DP, CV_32FC1 );
kalman->temp3 = cvCreateMat( MP, MP, CV_32FC1 );
kalman->temp4 = cvCreateMat( MP, DP, CV_32FC1 );
kalman->temp5 = cvCreateMat( MP, 1, CV_32FC1 );
#if 1
kalman->PosterState = kalman->state_pre->data.fl;
kalman->PriorState = kalman->state_post->data.fl;
kalman->DynamMatr = kalman->transition_matrix->data.fl;
kalman->MeasurementMatr = kalman->measurement_matrix->data.fl;
kalman->MNCovariance = kalman->measurement_noise_cov->data.fl;
kalman->PNCovariance = kalman->process_noise_cov->data.fl;
kalman->KalmGainMatr = kalman->gain->data.fl;
kalman->PriorErrorCovariance = kalman->error_cov_pre->data.fl;
kalman->PosterErrorCovariance = kalman->error_cov_post->data.fl;
#endif
return kalman;
}
CV_IMPL void
cvReleaseKalman( CvKalman** _kalman )
{
CvKalman *kalman;
if( !_kalman )
CV_Error( CV_StsNullPtr, "" );
kalman = *_kalman;
if( !kalman )
return;
/* freeing the memory */
cvReleaseMat( &kalman->state_pre );
cvReleaseMat( &kalman->state_post );
cvReleaseMat( &kalman->transition_matrix );
cvReleaseMat( &kalman->control_matrix );
cvReleaseMat( &kalman->measurement_matrix );
cvReleaseMat( &kalman->process_noise_cov );
cvReleaseMat( &kalman->measurement_noise_cov );
cvReleaseMat( &kalman->error_cov_pre );
cvReleaseMat( &kalman->gain );
cvReleaseMat( &kalman->error_cov_post );
cvReleaseMat( &kalman->temp1 );
cvReleaseMat( &kalman->temp2 );
cvReleaseMat( &kalman->temp3 );
cvReleaseMat( &kalman->temp4 );
cvReleaseMat( &kalman->temp5 );
memset( kalman, 0, sizeof(*kalman));
/* deallocating the structure */
cvFree( _kalman );
}
CV_IMPL const CvMat*
cvKalmanPredict( CvKalman* kalman, const CvMat* control )
{
if( !kalman )
CV_Error( CV_StsNullPtr, "" );
/* update the state */
/* x'(k) = A*x(k) */
cvMatMulAdd( kalman->transition_matrix, kalman->state_post, 0, kalman->state_pre );
if( control && kalman->CP > 0 )
/* x'(k) = x'(k) + B*u(k) */
cvMatMulAdd( kalman->control_matrix, control, kalman->state_pre, kalman->state_pre );
/* update error covariance matrices */
/* temp1 = A*P(k) */
cvMatMulAdd( kalman->transition_matrix, kalman->error_cov_post, 0, kalman->temp1 );
/* P'(k) = temp1*At + Q */
cvGEMM( kalman->temp1, kalman->transition_matrix, 1, kalman->process_noise_cov, 1,
kalman->error_cov_pre, CV_GEMM_B_T );
/* handle the case when there will be measurement before the next predict */
cvCopy(kalman->state_pre, kalman->state_post);
return kalman->state_pre;
}
CV_IMPL const CvMat*
cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement )
{
if( !kalman || !measurement )
CV_Error( CV_StsNullPtr, "" );
/* temp2 = H*P'(k) */
cvMatMulAdd( kalman->measurement_matrix, kalman->error_cov_pre, 0, kalman->temp2 );
/* temp3 = temp2*Ht + R */
cvGEMM( kalman->temp2, kalman->measurement_matrix, 1,
kalman->measurement_noise_cov, 1, kalman->temp3, CV_GEMM_B_T );
/* temp4 = inv(temp3)*temp2 = Kt(k) */
cvSolve( kalman->temp3, kalman->temp2, kalman->temp4, CV_SVD );
/* K(k) */
cvTranspose( kalman->temp4, kalman->gain );
/* temp5 = z(k) - H*x'(k) */
cvGEMM( kalman->measurement_matrix, kalman->state_pre, -1, measurement, 1, kalman->temp5 );
/* x(k) = x'(k) + K(k)*temp5 */
cvMatMulAdd( kalman->gain, kalman->temp5, kalman->state_pre, kalman->state_post );
/* P(k) = P'(k) - K(k)*temp2 */
cvGEMM( kalman->gain, kalman->temp2, -1, kalman->error_cov_pre, 1,
kalman->error_cov_post, 0 );
return kalman->state_post;
}
namespace cv namespace cv
{ {

File diff suppressed because it is too large Load Diff

@ -41,34 +41,23 @@
#include "precomp.hpp" #include "precomp.hpp"
void cv::updateMotionHistory( InputArray _silhouette, InputOutputArray _mhi,
/* motion templates */ double timestamp, double duration )
CV_IMPL void
cvUpdateMotionHistory( const void* silhouette, void* mhimg,
double timestamp, double mhi_duration )
{ {
CvMat silhstub, *silh = cvGetMat(silhouette, &silhstub); Mat silh = _silhouette.getMat(), mhi = _mhi.getMat();
CvMat mhistub, *mhi = cvGetMat(mhimg, &mhistub);
if( !CV_IS_MASK_ARR( silh ))
CV_Error( CV_StsBadMask, "" );
if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 )
CV_Error( CV_StsUnsupportedFormat, "" );
if( !CV_ARE_SIZES_EQ( mhi, silh ))
CV_Error( CV_StsUnmatchedSizes, "" );
CvSize size = cvGetMatSize( mhi ); CV_Assert( silh.type() == CV_8U && mhi.type() == CV_32F );
CV_Assert( silh.size() == mhi.size() );
if( CV_IS_MAT_CONT( mhi->type & silh->type )) Size size = silh.size();
if( silh.isContinuous() && mhi.isContinuous() )
{ {
size.width *= size.height; size.width *= size.height;
size.height = 1; size.height = 1;
} }
float ts = (float)timestamp; float ts = (float)timestamp;
float delbound = (float)(timestamp - mhi_duration); float delbound = (float)(timestamp - duration);
int x, y; int x, y;
#if CV_SSE2 #if CV_SSE2
volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2); volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2);
@ -76,8 +65,8 @@ cvUpdateMotionHistory( const void* silhouette, void* mhimg,
for( y = 0; y < size.height; y++ ) for( y = 0; y < size.height; y++ )
{ {
const uchar* silhData = silh->data.ptr + silh->step*y; const uchar* silhData = silh.ptr<uchar>(y);
float* mhiData = (float*)(mhi->data.ptr + mhi->step*y); float* mhiData = mhi.ptr<float>(y);
x = 0; x = 0;
#if CV_SSE2 #if CV_SSE2
@ -117,26 +106,21 @@ cvUpdateMotionHistory( const void* silhouette, void* mhimg,
} }
CV_IMPL void void cv::calcMotionGradient( InputArray _mhi, OutputArray _mask,
cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg, OutputArray _orientation,
CvArr* orientation, double delta1, double delta2,
double delta1, double delta2, int aperture_size )
int aperture_size )
{ {
cv::Ptr<CvMat> dX_min, dY_max; static int runcase = 0; runcase++;
CvMat mhistub, *mhi = cvGetMat(mhiimg, &mhistub); Mat mhi = _mhi.getMat();
CvMat maskstub, *mask = cvGetMat(maskimg, &maskstub); Size size = mhi.size();
CvMat orientstub, *orient = cvGetMat(orientation, &orientstub);
CvMat dX_min_row, dY_max_row, orient_row, mask_row;
CvSize size;
int x, y;
float gradient_epsilon = 1e-4f * aperture_size * aperture_size; _mask.create(size, CV_8U);
float min_delta, max_delta; _orientation.create(size, CV_32F);
if( !CV_IS_MASK_ARR( mask )) Mat mask = _mask.getMat();
CV_Error( CV_StsBadMask, "" ); Mat orient = _orientation.getMat();
if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 ) if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 )
CV_Error( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" ); CV_Error( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" );
@ -144,343 +128,247 @@ cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg,
if( delta1 <= 0 || delta2 <= 0 ) if( delta1 <= 0 || delta2 <= 0 )
CV_Error( CV_StsOutOfRange, "both delta's must be positive" ); CV_Error( CV_StsOutOfRange, "both delta's must be positive" );
if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 ) if( mhi.type() != CV_32FC1 )
CV_Error( CV_StsUnsupportedFormat, CV_Error( CV_StsUnsupportedFormat,
"MHI and orientation must be single-channel floating-point images" ); "MHI must be single-channel floating-point images" );
if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi ))
CV_Error( CV_StsUnmatchedSizes, "" );
if( orient->data.ptr == mhi->data.ptr )
CV_Error( CV_StsInplaceNotSupported, "orientation image must be different from MHI" );
if( delta1 > delta2 ) if( orient.data == mhi.data )
{ {
double t; _orientation.release();
CV_SWAP( delta1, delta2, t ); _orientation.create(size, CV_32F);
orient = _orientation.getMat();
} }
size = cvGetMatSize( mhi ); if( delta1 > delta2 )
min_delta = (float)delta1; std::swap(delta1, delta2);
max_delta = (float)delta2;
dX_min = cvCreateMat( mhi->rows, mhi->cols, CV_32F ); float gradient_epsilon = 1e-4f * aperture_size * aperture_size;
dY_max = cvCreateMat( mhi->rows, mhi->cols, CV_32F ); float min_delta = (float)delta1;
float max_delta = (float)delta2;
Mat dX_min, dY_max;
// calc Dx and Dy // calc Dx and Dy
cvSobel( mhi, dX_min, 1, 0, aperture_size ); Sobel( mhi, dX_min, CV_32F, 1, 0, aperture_size, 1, 0, BORDER_REPLICATE );
cvSobel( mhi, dY_max, 0, 1, aperture_size ); Sobel( mhi, dY_max, CV_32F, 0, 1, aperture_size, 1, 0, BORDER_REPLICATE );
cvGetRow( dX_min, &dX_min_row, 0 );
cvGetRow( dY_max, &dY_max_row, 0 ); int x, y;
cvGetRow( orient, &orient_row, 0 );
cvGetRow( mask, &mask_row, 0 ); if( mhi.isContinuous() && orient.isContinuous() && mask.isContinuous() )
{
size.width *= size.height;
size.height = 1;
}
// calc gradient // calc gradient
for( y = 0; y < size.height; y++ ) for( y = 0; y < size.height; y++ )
{ {
dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step; const float* dX_min_row = dX_min.ptr<float>(y);
dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step; const float* dY_max_row = dY_max.ptr<float>(y);
orient_row.data.ptr = orient->data.ptr + y*orient->step; float* orient_row = orient.ptr<float>(y);
mask_row.data.ptr = mask->data.ptr + y*mask->step; uchar* mask_row = mask.ptr<uchar>(y);
cvCartToPolar( &dX_min_row, &dY_max_row, 0, &orient_row, 1 );
fastAtan2(dY_max_row, dX_min_row, orient_row, size.width, true);
// make orientation zero where the gradient is very small // make orientation zero where the gradient is very small
for( x = 0; x < size.width; x++ ) for( x = 0; x < size.width; x++ )
{ {
float dY = dY_max_row.data.fl[x]; float dY = dY_max_row[x];
float dX = dX_min_row.data.fl[x]; float dX = dX_min_row[x];
if( fabs(dX) < gradient_epsilon && fabs(dY) < gradient_epsilon ) if( std::abs(dX) < gradient_epsilon && std::abs(dY) < gradient_epsilon )
{ {
mask_row.data.ptr[x] = 0; mask_row[x] = (uchar)0;
orient_row.data.i[x] = 0; orient_row[x] = 0.f;
} }
else else
mask_row.data.ptr[x] = 1; mask_row[x] = (uchar)1;
} }
} }
cvErode( mhi, dX_min, 0, (aperture_size-1)/2); erode( mhi, dX_min, noArray(), Point(-1,-1), (aperture_size-1)/2, BORDER_REPLICATE );
cvDilate( mhi, dY_max, 0, (aperture_size-1)/2); dilate( mhi, dY_max, noArray(), Point(-1,-1), (aperture_size-1)/2, BORDER_REPLICATE );
// mask off pixels which have little motion difference in their neighborhood // mask off pixels which have little motion difference in their neighborhood
for( y = 0; y < size.height; y++ ) for( y = 0; y < size.height; y++ )
{ {
dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step; const float* dX_min_row = dX_min.ptr<float>(y);
dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step; const float* dY_max_row = dY_max.ptr<float>(y);
mask_row.data.ptr = mask->data.ptr + y*mask->step; float* orient_row = orient.ptr<float>(y);
orient_row.data.ptr = orient->data.ptr + y*orient->step; uchar* mask_row = mask.ptr<uchar>(y);
for( x = 0; x < size.width; x++ ) for( x = 0; x < size.width; x++ )
{ {
float d0 = dY_max_row.data.fl[x] - dX_min_row.data.fl[x]; float d0 = dY_max_row[x] - dX_min_row[x];
if( mask_row.data.ptr[x] == 0 || d0 < min_delta || max_delta < d0 ) if( mask_row[x] == 0 || d0 < min_delta || max_delta < d0 )
{ {
mask_row.data.ptr[x] = 0; mask_row[x] = (uchar)0;
orient_row.data.i[x] = 0; orient_row[x] = 0.f;
} }
} }
} }
} }
double cv::calcGlobalOrientation( InputArray _orientation, InputArray _mask,
CV_IMPL double InputArray _mhi, double /*timestamp*/,
cvCalcGlobalOrientation( const void* orientation, const void* maskimg, const void* mhiimg, double duration )
double curr_mhi_timestamp, double mhi_duration )
{ {
int hist_size = 12; Mat orient = _orientation.getMat(), mask = _mask.getMat(), mhi = _mhi.getMat();
cv::Ptr<CvHistogram> hist; Size size = mhi.size();
CvMat mhistub, *mhi = cvGetMat(mhiimg, &mhistub);
CvMat maskstub, *mask = cvGetMat(maskimg, &maskstub);
CvMat orientstub, *orient = cvGetMat(orientation, &orientstub);
void* _orient;
float _ranges[] = { 0, 360 };
float* ranges = _ranges;
int base_orient;
float shift_orient = 0, shift_weight = 0;
float a, b, fbase_orient;
float delbound;
CvMat mhi_row, mask_row, orient_row;
int x, y, mhi_rows, mhi_cols;
if( !CV_IS_MASK_ARR( mask ))
CV_Error( CV_StsBadMask, "" );
if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 )
CV_Error( CV_StsUnsupportedFormat,
"MHI and orientation must be single-channel floating-point images" );
if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi ))
CV_Error( CV_StsUnmatchedSizes, "" );
if( mhi_duration <= 0 ) CV_Assert( mask.type() == CV_8U && orient.type() == CV_32F && mhi.type() == CV_32F );
CV_Error( CV_StsOutOfRange, "MHI duration must be positive" ); CV_Assert( mask.size() == size && orient.size() == size );
CV_Assert( duration > 0 );
if( orient->data.ptr == mhi->data.ptr ) int histSize = 12;
CV_Error( CV_StsInplaceNotSupported, "orientation image must be different from MHI" ); float _ranges[] = { 0.f, 360.f };
const float* ranges = _ranges;
Mat hist;
// calculate histogram of different orientation values calcHist(&orient, 1, 0, mask, hist, 1, &histSize, &ranges);
hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &ranges );
_orient = orient;
cvCalcArrHist( &_orient, hist, 0, mask );
// find the maximum index (the dominant orientation) // find the maximum index (the dominant orientation)
cvGetMinMaxHistValue( hist, 0, 0, 0, &base_orient ); Point baseOrientPt;
fbase_orient = base_orient*360.f/hist_size; minMaxLoc(hist, 0, 0, 0, &baseOrientPt);
float fbaseOrient = (baseOrientPt.x + baseOrientPt.y)*360.f/histSize;
// override timestamp with the maximum value in MHI // override timestamp with the maximum value in MHI
cvMinMaxLoc( mhi, 0, &curr_mhi_timestamp, 0, 0, mask ); double timestamp = 0;
minMaxLoc( mhi, 0, &timestamp, 0, 0, mask );
// find the shift relative to the dominant orientation as weighted sum of relative angles // find the shift relative to the dominant orientation as weighted sum of relative angles
a = (float)(254. / 255. / mhi_duration); float a = (float)(254. / 255. / duration);
b = (float)(1. - curr_mhi_timestamp * a); float b = (float)(1. - timestamp * a);
delbound = (float)(curr_mhi_timestamp - mhi_duration); float delbound = (float)(timestamp - duration);
mhi_rows = mhi->rows;
mhi_cols = mhi->cols;
if( CV_IS_MAT_CONT( mhi->type & mask->type & orient->type )) if( mhi.isContinuous() && mask.isContinuous() && orient.isContinuous() )
{ {
mhi_cols *= mhi_rows; size.width *= size.height;
mhi_rows = 1; size.height = 1;
} }
cvGetRow( mhi, &mhi_row, 0 );
cvGetRow( mask, &mask_row, 0 );
cvGetRow( orient, &orient_row, 0 );
/* /*
a = 254/(255*dt) a = 254/(255*dt)
b = 1 - t*a = 1 - 254*t/(255*dur) = b = 1 - t*a = 1 - 254*t/(255*dur) =
(255*dt - 254*t)/(255*dt) = (255*dt - 254*t)/(255*dt) =
(dt - (t - dt)*254)/(255*dt); (dt - (t - dt)*254)/(255*dt);
-------------------------------------------------------- --------------------------------------------------------
ax + b = 254*x/(255*dt) + (dt - (t - dt)*254)/(255*dt) = ax + b = 254*x/(255*dt) + (dt - (t - dt)*254)/(255*dt) =
(254*x + dt - (t - dt)*254)/(255*dt) = (254*x + dt - (t - dt)*254)/(255*dt) =
((x - (t - dt))*254 + dt)/(255*dt) = ((x - (t - dt))*254 + dt)/(255*dt) =
(((x - (t - dt))/dt)*254 + 1)/255 = (((x - low_time)/dt)*254 + 1)/255 (((x - (t - dt))/dt)*254 + 1)/255 = (((x - low_time)/dt)*254 + 1)/255
*/ */
for( y = 0; y < mhi_rows; y++ ) float shiftOrient = 0, shiftWeight = 0;
for( int y = 0; y < size.height; y++ )
{ {
mhi_row.data.ptr = mhi->data.ptr + mhi->step*y; const float* mhiptr = mhi.ptr<float>(y);
mask_row.data.ptr = mask->data.ptr + mask->step*y; const float* oriptr = orient.ptr<float>(y);
orient_row.data.ptr = orient->data.ptr + orient->step*y; const uchar* maskptr = mask.ptr<uchar>(y);
for( x = 0; x < mhi_cols; x++ ) for( int x = 0; x < size.width; x++ )
if( mask_row.data.ptr[x] != 0 && mhi_row.data.fl[x] > delbound ) {
if( maskptr[x] != 0 && mhiptr[x] > delbound )
{ {
/* /*
orient in 0..360, base_orient in 0..360 orient in 0..360, base_orient in 0..360
-> (rel_angle = orient - base_orient) in -360..360. -> (rel_angle = orient - base_orient) in -360..360.
rel_angle is translated to -180..180 rel_angle is translated to -180..180
*/ */
float weight = mhi_row.data.fl[x] * a + b; float weight = mhiptr[x] * a + b;
float rel_angle = orient_row.data.fl[x] - fbase_orient; float relAngle = oriptr[x] - fbaseOrient;
rel_angle += (rel_angle < -180 ? 360 : 0); relAngle += (relAngle < -180 ? 360 : 0);
rel_angle += (rel_angle > 180 ? -360 : 0); relAngle += (relAngle > 180 ? -360 : 0);
if( fabs(rel_angle) < 45 ) if( fabs(relAngle) < 45 )
{ {
shift_orient += weight * rel_angle; shiftOrient += weight * relAngle;
shift_weight += weight; shiftWeight += weight;
} }
} }
}
} }
// add the dominant orientation and the relative shift // add the dominant orientation and the relative shift
if( shift_weight == 0 ) if( shiftWeight == 0 )
shift_weight = 0.01f; shiftWeight = 0.01f;
fbase_orient += shift_orient / shift_weight; fbaseOrient += shiftOrient / shiftWeight;
fbase_orient -= (fbase_orient < 360 ? 0 : 360); fbaseOrient -= (fbaseOrient < 360 ? 0 : 360);
fbase_orient += (fbase_orient >= 0 ? 0 : 360); fbaseOrient += (fbaseOrient >= 0 ? 0 : 360);
return fbase_orient; return fbaseOrient;
} }
CV_IMPL CvSeq* void cv::segmentMotion(InputArray _mhi, OutputArray _segmask,
cvSegmentMotion( const CvArr* mhiimg, CvArr* segmask, CvMemStorage* storage, std::vector<Rect>& boundingRects,
double timestamp, double seg_thresh ) double timestamp, double segThresh)
{ {
CvSeq* components = 0; Mat mhi = _mhi.getMat();
cv::Ptr<CvMat> mask8u;
CvMat mhistub, *mhi = cvGetMat(mhiimg, &mhistub);
CvMat maskstub, *mask = cvGetMat(segmask, &maskstub);
Cv32suf v, comp_idx;
int stub_val, ts;
int x, y;
if( !storage )
CV_Error( CV_StsNullPtr, "NULL memory storage" );
mhi = cvGetMat( mhi, &mhistub );
mask = cvGetMat( mask, &maskstub );
if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( mask->type ) != CV_32FC1 ) _segmask.create(mhi.size(), CV_32F);
CV_Error( CV_BadDepth, "Both MHI and the destination mask" ); Mat segmask = _segmask.getMat();
segmask = Scalar::all(0);
if( !CV_ARE_SIZES_EQ( mhi, mask )) CV_Assert( mhi.type() == CV_32F );
CV_Error( CV_StsUnmatchedSizes, "" ); CV_Assert( segThresh >= 0 );
mask8u = cvCreateMat( mhi->rows + 2, mhi->cols + 2, CV_8UC1 ); Mat mask = Mat::zeros( mhi.rows + 2, mhi.cols + 2, CV_8UC1 );
cvZero( mask8u );
cvZero( mask );
components = cvCreateSeq( CV_SEQ_KIND_GENERIC, sizeof(CvSeq),
sizeof(CvConnectedComp), storage );
v.f = (float)timestamp; ts = v.i; int x, y;
v.f = FLT_MAX*0.1f; stub_val = v.i;
comp_idx.f = 1;
for( y = 0; y < mhi->rows; y++ ) // protect zero mhi pixels from floodfill.
for( y = 0; y < mhi.rows; y++ )
{ {
int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step); const float* mhiptr = mhi.ptr<float>(y);
for( x = 0; x < mhi->cols; x++ ) uchar* maskptr = mask.ptr<uchar>(y+1) + 1;
for( x = 0; x < mhi.cols; x++ )
{ {
if( mhi_row[x] == 0 ) if( mhiptr[x] == 0 )
mhi_row[x] = stub_val; maskptr[x] = 1;
} }
} }
for( y = 0; y < mhi->rows; y++ ) float ts = (float)timestamp;
float comp_idx = 1.f;
for( y = 0; y < mhi.rows; y++ )
{ {
int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step); float* mhiptr = mhi.ptr<float>(y);
uchar* mask8u_row = mask8u->data.ptr + (y+1)*mask8u->step + 1; uchar* maskptr = mask.ptr<uchar>(y+1) + 1;
for( x = 0; x < mhi->cols; x++ ) for( x = 0; x < mhi.cols; x++ )
{ {
if( mhi_row[x] == ts && mask8u_row[x] == 0 ) if( mhiptr[x] == ts && maskptr[x] == 0 )
{ {
CvConnectedComp comp; Rect cc;
int x1, y1; floodFill( mhi, mask, Point(x,y), Scalar::all(0),
CvScalar _seg_thresh = cvRealScalar(seg_thresh); &cc, Scalar::all(segThresh), Scalar::all(segThresh),
CvPoint seed = cvPoint(x,y); FLOODFILL_MASK_ONLY + 2*256 + 4 );
cvFloodFill( mhi, seed, cvRealScalar(0), _seg_thresh, _seg_thresh,
&comp, CV_FLOODFILL_MASK_ONLY + 2*256 + 4, mask8u );
for( y1 = 0; y1 < comp.rect.height; y1++ ) for( int y1 = 0; y1 < cc.height; y1++ )
{ {
int* mask_row1 = (int*)(mask->data.ptr + float* segmaskptr = segmask.ptr<float>(cc.y + y1) + cc.x;
(comp.rect.y + y1)*mask->step) + comp.rect.x; uchar* maskptr1 = mask.ptr<uchar>(cc.y + y1 + 1) + cc.x + 1;
uchar* mask8u_row1 = mask8u->data.ptr +
(comp.rect.y + y1+1)*mask8u->step + comp.rect.x+1;
for( x1 = 0; x1 < comp.rect.width; x1++ ) for( int x1 = 0; x1 < cc.width; x1++ )
{ {
if( mask8u_row1[x1] > 1 ) if( maskptr1[x1] > 1 )
{ {
mask8u_row1[x1] = 1; maskptr1[x1] = 1;
mask_row1[x1] = comp_idx.i; segmaskptr[x1] = comp_idx;
} }
} }
} }
comp_idx.f++; comp_idx += 1.f;
cvSeqPush( components, &comp ); boundingRects.push_back(cc);
} }
} }
} }
for( y = 0; y < mhi->rows; y++ )
{
int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step);
for( x = 0; x < mhi->cols; x++ )
{
if( mhi_row[x] == stub_val )
mhi_row[x] = 0;
}
}
return components;
}
void cv::updateMotionHistory( InputArray _silhouette, InputOutputArray _mhi,
double timestamp, double duration )
{
Mat silhouette = _silhouette.getMat();
CvMat c_silhouette = silhouette, c_mhi = _mhi.getMat();
cvUpdateMotionHistory( &c_silhouette, &c_mhi, timestamp, duration );
} }
void cv::calcMotionGradient( InputArray _mhi, OutputArray _mask,
OutputArray _orientation,
double delta1, double delta2,
int aperture_size )
{
Mat mhi = _mhi.getMat();
_mask.create(mhi.size(), CV_8U);
_orientation.create(mhi.size(), CV_32F);
CvMat c_mhi = mhi, c_mask = _mask.getMat(), c_orientation = _orientation.getMat();
cvCalcMotionGradient(&c_mhi, &c_mask, &c_orientation, delta1, delta2, aperture_size);
}
double cv::calcGlobalOrientation( InputArray _orientation, InputArray _mask,
InputArray _mhi, double timestamp,
double duration )
{
Mat orientation = _orientation.getMat(), mask = _mask.getMat(), mhi = _mhi.getMat();
CvMat c_orientation = orientation, c_mask = mask, c_mhi = mhi;
return cvCalcGlobalOrientation(&c_orientation, &c_mask, &c_mhi, timestamp, duration);
}
void cv::segmentMotion(InputArray _mhi, OutputArray _segmask,
std::vector<Rect>& boundingRects,
double timestamp, double segThresh)
{
Mat mhi = _mhi.getMat();
_segmask.create(mhi.size(), CV_32F);
CvMat c_mhi = mhi, c_segmask = _segmask.getMat();
Ptr<CvMemStorage> storage = cvCreateMemStorage();
Seq<CvConnectedComp> comps = cvSegmentMotion(&c_mhi, &c_segmask, storage, timestamp, segThresh);
Seq<CvConnectedComp>::const_iterator it(comps);
size_t i, ncomps = comps.size();
boundingRects.resize(ncomps);
for( i = 0; i < ncomps; i++, ++it)
boundingRects[i] = (*it).rect;
}
/* End of file. */ /* End of file. */

@ -644,18 +644,3 @@ void cv::calcOpticalFlowFarneback( InputArray _prev0, InputArray _next0,
prevFlow = flow; prevFlow = flow;
} }
} }
CV_IMPL void cvCalcOpticalFlowFarneback(
const CvArr* _prev, const CvArr* _next,
CvArr* _flow, double pyr_scale, int levels,
int winsize, int iterations, int poly_n,
double poly_sigma, int flags )
{
cv::Mat prev = cv::cvarrToMat(_prev), next = cv::cvarrToMat(_next);
cv::Mat flow = cv::cvarrToMat(_flow);
CV_Assert( flow.size() == prev.size() && flow.type() == CV_32FC2 );
cv::calcOpticalFlowFarneback( prev, next, flow, pyr_scale, levels,
winsize, iterations, poly_n, poly_sigma, flags );
}

@ -41,7 +41,6 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
#include "simpleflow.hpp"
// //
// 2D dense optical flow algorithm from the following paper: // 2D dense optical flow algorithm from the following paper:
@ -54,6 +53,39 @@
namespace cv namespace cv
{ {
static const uchar MASK_TRUE_VALUE = (uchar)255;
inline static float dist(const Vec3b& p1, const Vec3b& p2) {
return (float)((p1[0] - p2[0]) * (p1[0] - p2[0]) +
(p1[1] - p2[1]) * (p1[1] - p2[1]) +
(p1[2] - p2[2]) * (p1[2] - p2[2]));
}
inline static float dist(const Vec2f& p1, const Vec2f& p2) {
return (p1[0] - p2[0]) * (p1[0] - p2[0]) +
(p1[1] - p2[1]) * (p1[1] - p2[1]);
}
inline static float dist(const Point2f& p1, const Point2f& p2) {
return (p1.x - p2.x) * (p1.x - p2.x) +
(p1.y - p2.y) * (p1.y - p2.y);
}
inline static float dist(float x1, float y1, float x2, float y2) {
return (x1 - x2) * (x1 - x2) +
(y1 - y2) * (y1 - y2);
}
inline static int dist(int x1, int y1, int x2, int y2) {
return (x1 - x2) * (x1 - x2) +
(y1 - y2) * (y1 - y2);
}
template<class T>
inline static T min(T t1, T t2, T t3) {
return (t1 <= t2 && t1 <= t3) ? t1 : min(t2, t3);
}
static void removeOcclusions(const Mat& flow, static void removeOcclusions(const Mat& flow,
const Mat& flow_inv, const Mat& flow_inv,
float occ_thr, float occ_thr,

@ -1,86 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_SIMPLEFLOW_H__
#define __OPENCV_SIMPLEFLOW_H__
#include <vector>
#define MASK_TRUE_VALUE 255
#define UNKNOWN_FLOW_THRESH 1e9
namespace cv {
inline static float dist(const Vec3b& p1, const Vec3b& p2) {
return (float)((p1[0] - p2[0]) * (p1[0] - p2[0]) +
(p1[1] - p2[1]) * (p1[1] - p2[1]) +
(p1[2] - p2[2]) * (p1[2] - p2[2]));
}
inline static float dist(const Vec2f& p1, const Vec2f& p2) {
return (p1[0] - p2[0]) * (p1[0] - p2[0]) +
(p1[1] - p2[1]) * (p1[1] - p2[1]);
}
inline static float dist(const Point2f& p1, const Point2f& p2) {
return (p1.x - p2.x) * (p1.x - p2.x) +
(p1.y - p2.y) * (p1.y - p2.y);
}
inline static float dist(float x1, float y1, float x2, float y2) {
return (x1 - x2) * (x1 - x2) +
(y1 - y2) * (y1 - y2);
}
inline static int dist(int x1, int y1, int x2, int y2) {
return (x1 - x2) * (x1 - x2) +
(y1 - y2) * (y1 - y2);
}
template<class T>
inline static T min(T t1, T t2, T t3) {
return (t1 <= t2 && t1 <= t3) ? t1 : min(t2, t3);
}
}
#endif

@ -46,50 +46,9 @@
namespace cv namespace cv
{ {
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorMOG, "BackgroundSubtractor.MOG",
obj.info()->addParam(obj, "history", obj.history);
obj.info()->addParam(obj, "nmixtures", obj.nmixtures);
obj.info()->addParam(obj, "backgroundRatio", obj.backgroundRatio);
obj.info()->addParam(obj, "noiseSigma", obj.noiseSigma));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorMOG2, "BackgroundSubtractor.MOG2",
obj.info()->addParam(obj, "history", obj.history);
obj.info()->addParam(obj, "nmixtures", obj.nmixtures);
obj.info()->addParam(obj, "varThreshold", obj.varThreshold);
obj.info()->addParam(obj, "detectShadows", obj.bShadowDetection));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorGMG, "BackgroundSubtractor.GMG",
obj.info()->addParam(obj, "maxFeatures", obj.maxFeatures,false,0,0,
"Maximum number of features to store in histogram. Harsh enforcement of sparsity constraint.");
obj.info()->addParam(obj, "learningRate", obj.learningRate,false,0,0,
"Adaptation rate of histogram. Close to 1, slow adaptation. Close to 0, fast adaptation, features forgotten quickly.");
obj.info()->addParam(obj, "initializationFrames", obj.numInitializationFrames,false,0,0,
"Number of frames to use to initialize histograms of pixels.");
obj.info()->addParam(obj, "quantizationLevels", obj.quantizationLevels,false,0,0,
"Number of discrete colors to be used in histograms. Up-front quantization.");
obj.info()->addParam(obj, "backgroundPrior", obj.backgroundPrior,false,0,0,
"Prior probability that each individual pixel is a background pixel.");
obj.info()->addParam(obj, "smoothingRadius", obj.smoothingRadius,false,0,0,
"Radius of smoothing kernel to filter noise from FG mask image.");
obj.info()->addParam(obj, "decisionThreshold", obj.decisionThreshold,false,0,0,
"Threshold for FG decision rule. Pixel is FG if posterior probability exceeds threshold.");
obj.info()->addParam(obj, "updateBackgroundModel", obj.updateBackgroundModel,false,0,0,
"Perform background model update."));
bool initModule_video(void) bool initModule_video(void)
{ {
bool all = true; return true;
all &= !BackgroundSubtractorMOG_info_auto.name().empty();
all &= !BackgroundSubtractorMOG2_info_auto.name().empty();
all &= !BackgroundSubtractorGMG_info_auto.name().empty();
return all;
} }
} }

@ -37,8 +37,7 @@ void CV_BackgroundSubtractorTest::run(int)
int width = 2 + ((unsigned int)rng)%98; //!< Mat will be 2 to 100 in width and height int width = 2 + ((unsigned int)rng)%98; //!< Mat will be 2 to 100 in width and height
int height = 2 + ((unsigned int)rng)%98; int height = 2 + ((unsigned int)rng)%98;
Ptr<BackgroundSubtractorGMG> fgbg = Ptr<BackgroundSubtractorGMG> fgbg = createBackgroundSubtractorGMG();
Algorithm::create<BackgroundSubtractorGMG>("BackgroundSubtractor.GMG");
Mat fgmask; Mat fgmask;
if (fgbg.empty()) if (fgbg.empty())
@ -47,19 +46,13 @@ void CV_BackgroundSubtractorTest::run(int)
/** /**
* Set a few parameters * Set a few parameters
*/ */
fgbg->set("smoothingRadius",7); fgbg->setSmoothingRadius(7);
fgbg->set("decisionThreshold",0.7); fgbg->setDecisionThreshold(0.7);
fgbg->set("initializationFrames",120); fgbg->setNumFrames(120);
/** /**
* Generate bounds for the values in the matrix for each type * Generate bounds for the values in the matrix for each type
*/ */
uchar maxuc = 0, minuc = 0;
char maxc = 0, minc = 0;
unsigned int maxui = 0, minui = 0;
int maxi=0, mini = 0;
long int maxli = 0, minli = 0;
float maxf = 0, minf = 0;
double maxd = 0, mind = 0; double maxd = 0, mind = 0;
/** /**
@ -69,34 +62,34 @@ void CV_BackgroundSubtractorTest::run(int)
if (type == CV_8U) if (type == CV_8U)
{ {
uchar half = UCHAR_MAX/2; uchar half = UCHAR_MAX/2;
maxuc = (unsigned char)rng.uniform(half+32, UCHAR_MAX); maxd = (unsigned char)rng.uniform(half+32, UCHAR_MAX);
minuc = (unsigned char)rng.uniform(0, half-32); mind = (unsigned char)rng.uniform(0, half-32);
} }
else if (type == CV_8S) else if (type == CV_8S)
{ {
maxc = (char)rng.uniform(32, CHAR_MAX); maxd = (char)rng.uniform(32, CHAR_MAX);
minc = (char)rng.uniform(CHAR_MIN, -32); mind = (char)rng.uniform(CHAR_MIN, -32);
} }
else if (type == CV_16U) else if (type == CV_16U)
{ {
ushort half = USHRT_MAX/2; ushort half = USHRT_MAX/2;
maxui = (unsigned int)rng.uniform(half+32, USHRT_MAX); maxd = (unsigned int)rng.uniform(half+32, USHRT_MAX);
minui = (unsigned int)rng.uniform(0, half-32); mind = (unsigned int)rng.uniform(0, half-32);
} }
else if (type == CV_16S) else if (type == CV_16S)
{ {
maxi = rng.uniform(32, SHRT_MAX); maxd = rng.uniform(32, SHRT_MAX);
mini = rng.uniform(SHRT_MIN, -32); mind = rng.uniform(SHRT_MIN, -32);
} }
else if (type == CV_32S) else if (type == CV_32S)
{ {
maxli = rng.uniform(32, INT_MAX); maxd = rng.uniform(32, INT_MAX);
minli = rng.uniform(INT_MIN, -32); mind = rng.uniform(INT_MIN, -32);
} }
else if (type == CV_32F) else if (type == CV_32F)
{ {
maxf = rng.uniform(32.0f, FLT_MAX); maxd = rng.uniform(32.0f, FLT_MAX);
minf = rng.uniform(-FLT_MAX, -32.0f); mind = rng.uniform(-FLT_MAX, -32.0f);
} }
else if (type == CV_64F) else if (type == CV_64F)
{ {
@ -104,60 +97,22 @@ void CV_BackgroundSubtractorTest::run(int)
mind = rng.uniform(-DBL_MAX, -32.0); mind = rng.uniform(-DBL_MAX, -32.0);
} }
fgbg->setMinVal(mind);
fgbg->setMaxVal(maxd);
Mat simImage = Mat::zeros(height, width, channelsAndType); Mat simImage = Mat::zeros(height, width, channelsAndType);
const unsigned int numLearningFrames = 120; int numLearningFrames = 120;
for (unsigned int i = 0; i < numLearningFrames; ++i) for (int i = 0; i < numLearningFrames; ++i)
{ {
/** /**
* Genrate simulated "image" for any type. Values always confined to upper half of range. * Genrate simulated "image" for any type. Values always confined to upper half of range.
*/ */
if (type == CV_8U) rng.fill(simImage, RNG::UNIFORM, (mind + maxd)*0.5, maxd);
{
rng.fill(simImage,RNG::UNIFORM,(unsigned char)(minuc/2+maxuc/2),maxuc);
if (i == 0)
fgbg->initialize(simImage.size(),minuc,maxuc);
}
else if (type == CV_8S)
{
rng.fill(simImage,RNG::UNIFORM,(char)(minc/2+maxc/2),maxc);
if (i==0)
fgbg->initialize(simImage.size(),minc,maxc);
}
else if (type == CV_16U)
{
rng.fill(simImage,RNG::UNIFORM,(unsigned int)(minui/2+maxui/2),maxui);
if (i==0)
fgbg->initialize(simImage.size(),minui,maxui);
}
else if (type == CV_16S)
{
rng.fill(simImage,RNG::UNIFORM,(int)(mini/2+maxi/2),maxi);
if (i==0)
fgbg->initialize(simImage.size(),mini,maxi);
}
else if (type == CV_32F)
{
rng.fill(simImage,RNG::UNIFORM,(float)(minf/2.0+maxf/2.0),maxf);
if (i==0)
fgbg->initialize(simImage.size(),minf,maxf);
}
else if (type == CV_32S)
{
rng.fill(simImage,RNG::UNIFORM,(long int)(minli/2+maxli/2),maxli);
if (i==0)
fgbg->initialize(simImage.size(),minli,maxli);
}
else if (type == CV_64F)
{
rng.fill(simImage,RNG::UNIFORM,(double)(mind/2.0+maxd/2.0),maxd);
if (i==0)
fgbg->initialize(simImage.size(),mind,maxd);
}
/** /**
* Feed simulated images into background subtractor * Feed simulated images into background subtractor
*/ */
(*fgbg)(simImage,fgmask); fgbg->apply(simImage,fgmask);
Mat fullbg = Mat::zeros(simImage.rows, simImage.cols, CV_8U); Mat fullbg = Mat::zeros(simImage.rows, simImage.cols, CV_8U);
//! fgmask should be entirely background during training //! fgmask should be entirely background during training
@ -166,22 +121,9 @@ void CV_BackgroundSubtractorTest::run(int)
ts->set_failed_test_info( code ); ts->set_failed_test_info( code );
} }
//! generate last image, distinct from training images //! generate last image, distinct from training images
if (type == CV_8U) rng.fill(simImage, RNG::UNIFORM, mind, maxd);
rng.fill(simImage,RNG::UNIFORM,minuc,minuc);
else if (type == CV_8S)
rng.fill(simImage,RNG::UNIFORM,minc,minc);
else if (type == CV_16U)
rng.fill(simImage,RNG::UNIFORM,minui,minui);
else if (type == CV_16S)
rng.fill(simImage,RNG::UNIFORM,mini,mini);
else if (type == CV_32F)
rng.fill(simImage,RNG::UNIFORM,minf,minf);
else if (type == CV_32S)
rng.fill(simImage,RNG::UNIFORM,minli,minli);
else if (type == CV_64F)
rng.fill(simImage,RNG::UNIFORM,mind,mind);
(*fgbg)(simImage,fgmask); fgbg->apply(simImage,fgmask);
//! now fgmask should be entirely foreground //! now fgmask should be entirely foreground
Mat fullfg = 255*Mat::ones(simImage.rows, simImage.cols, CV_8U); Mat fullfg = 255*Mat::ones(simImage.rows, simImage.cols, CV_8U);
code = cvtest::cmpEps2( ts, fgmask, fullfg, 255, false, "The final foreground mask" ); code = cvtest::cmpEps2( ts, fgmask, fullfg, 255, false, "The final foreground mask" );

@ -153,7 +153,7 @@ bool CV_RigidTransform_Test::testImage()
Mat aff_est = estimateRigidTransform(img, rotated, true); Mat aff_est = estimateRigidTransform(img, rotated, true);
const double thres = 0.03; const double thres = 0.033;
if (norm(aff_est, aff, NORM_INF) > thres) if (norm(aff_est, aff, NORM_INF) > thres)
{ {
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);

@ -33,16 +33,13 @@ int main(int argc, char** argv)
setUseOptimized(true); setUseOptimized(true);
setNumThreads(8); setNumThreads(8);
Ptr<BackgroundSubtractorGMG> fgbg = Algorithm::create<BackgroundSubtractorGMG>("BackgroundSubtractor.GMG"); Ptr<BackgroundSubtractor> fgbg = createBackgroundSubtractorGMG(20, 0.7);
if (fgbg.empty()) if (fgbg.empty())
{ {
std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl; std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl;
return -1; return -1;
} }
fgbg->set("initializationFrames", 20);
fgbg->set("decisionThreshold", 0.7);
VideoCapture cap; VideoCapture cap;
if (argc > 1) if (argc > 1)
cap.open(argv[1]); cap.open(argv[1]);
@ -66,9 +63,9 @@ int main(int argc, char** argv)
if (frame.empty()) if (frame.empty())
break; break;
(*fgbg)(frame, fgmask); fgbg->apply(frame, fgmask);
frame.copyTo(segm); frame.convertTo(segm, CV_8U, 0.5);
add(frame, Scalar(100, 100, 0), segm, fgmask); add(frame, Scalar(100, 100, 0), segm, fgmask);
imshow("FG Segmentation", segm); imshow("FG Segmentation", segm);

@ -52,7 +52,7 @@ int main(int argc, const char** argv)
namedWindow("foreground image", CV_WINDOW_NORMAL); namedWindow("foreground image", CV_WINDOW_NORMAL);
namedWindow("mean background image", CV_WINDOW_NORMAL); namedWindow("mean background image", CV_WINDOW_NORMAL);
BackgroundSubtractorMOG2 bg_model;//(100, 3, 0.3, 5); Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorMOG2();
Mat img, fgmask, fgimg; Mat img, fgmask, fgimg;
@ -69,13 +69,13 @@ int main(int argc, const char** argv)
fgimg.create(img.size(), img.type()); fgimg.create(img.size(), img.type());
//update the model //update the model
bg_model(img, fgmask, update_bg_model ? -1 : 0); bg_model->apply(img, fgmask, update_bg_model ? -1 : 0);
fgimg = Scalar::all(0); fgimg = Scalar::all(0);
img.copyTo(fgimg, fgmask); img.copyTo(fgimg, fgmask);
Mat bgimg; Mat bgimg;
bg_model.getBackgroundImage(bgimg); bg_model->getBackgroundImage(bgimg);
imshow("image", img); imshow("image", img);
imshow("foreground mask", fgmask); imshow("foreground mask", fgmask);

@ -87,15 +87,15 @@ int main(int argc, char** argv)
namedWindow("video", 1); namedWindow("video", 1);
namedWindow("segmented", 1); namedWindow("segmented", 1);
BackgroundSubtractorMOG bgsubtractor; Ptr<BackgroundSubtractorMOG> bgsubtractor=createBackgroundSubtractorMOG();
bgsubtractor.set("noiseSigma", 10); bgsubtractor->setNoiseSigma(10);
for(;;) for(;;)
{ {
cap >> tmp_frame; cap >> tmp_frame;
if( !tmp_frame.data ) if( !tmp_frame.data )
break; break;
bgsubtractor(tmp_frame, bgmask, update_bg_model ? -1 : 0); bgsubtractor->apply(tmp_frame, bgmask, update_bg_model ? -1 : 0);
//CvMat _bgmask = bgmask; //CvMat _bgmask = bgmask;
//cvSegmentFGMask(&_bgmask); //cvSegmentFGMask(&_bgmask);
refineSegments(tmp_frame, bgmask, out_frame); refineSegments(tmp_frame, bgmask, out_frame);

@ -1324,10 +1324,10 @@ TEST(MOG)
cv::Mat frame; cv::Mat frame;
cap >> frame; cap >> frame;
cv::BackgroundSubtractorMOG mog; cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground; cv::Mat foreground;
mog(frame, foreground, 0.01); mog->apply(frame, foreground, 0.01);
while (!TestSystem::instance().stop()) while (!TestSystem::instance().stop())
{ {
@ -1335,7 +1335,7 @@ TEST(MOG)
TestSystem::instance().cpuOn(); TestSystem::instance().cpuOn();
mog(frame, foreground, 0.01); mog->apply(frame, foreground, 0.01);
TestSystem::instance().cpuOff(); TestSystem::instance().cpuOff();
} }
@ -1375,12 +1375,12 @@ TEST(MOG2)
cv::Mat frame; cv::Mat frame;
cap >> frame; cap >> frame;
cv::BackgroundSubtractorMOG2 mog2; cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
cv::Mat foreground; cv::Mat foreground;
cv::Mat background; cv::Mat background;
mog2(frame, foreground); mog2->apply(frame, foreground);
mog2.getBackgroundImage(background); mog2->getBackgroundImage(background);
while (!TestSystem::instance().stop()) while (!TestSystem::instance().stop())
{ {
@ -1388,8 +1388,8 @@ TEST(MOG2)
TestSystem::instance().cpuOn(); TestSystem::instance().cpuOn();
mog2(frame, foreground); mog2->apply(frame, foreground);
mog2.getBackgroundImage(background); mog2->getBackgroundImage(background);
TestSystem::instance().cpuOff(); TestSystem::instance().cpuOff();
} }

Loading…
Cancel
Save