added GMG background segmentation algorithm by Andrew Godbehere, ticket #2065

pull/2/head
Vadim Pisarevsky 13 years ago
parent 35344569bf
commit e4b58ebff5
  1. 260
      modules/video/include/opencv2/video/background_segm.hpp
  2. 480
      modules/video/src/bgfg_gmg.cpp
  3. 3
      modules/video/src/precomp.hpp
  4. 17
      modules/video/src/video_init.cpp
  5. 201
      modules/video/test/test_backgroundsubtractor_gbh.cpp
  6. 1
      modules/video/test/test_precomp.hpp
  7. 97
      samples/cpp/bgfg_gmg.cpp

@ -44,7 +44,7 @@
#define __OPENCV_BACKGROUND_SEGM_HPP__
#include "opencv2/core/core.hpp"
#include <list>
namespace cv
{
@ -189,7 +189,263 @@ protected:
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
};
/**
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background.
* This class implements an algorithm described in "Visual Tracking of Human Visitors under
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
*/
class CV_EXPORTS BackgroundSubtractorGMG: public cv::BackgroundSubtractor
{
private:
/**
* A general flexible datatype.
*
* Used internally to enable background subtraction algorithm to be robust to any input Mat type.
* Datatype can be char, unsigned char, int, unsigned int, long int, float, or double.
*/
union flexitype{
char c;
uchar uc;
int i;
unsigned int ui;
long int li;
float f;
double d;
flexitype(){d = 0.0;} //!< Default constructor, set all bits of the union to 0.
flexitype(char cval){c = cval;} //!< Char type constructor
bool operator ==(flexitype& rhs)
{
return d == rhs.d;
}
//! Char type assignment operator
flexitype& operator =(char cval){
if (this->c == cval){return *this;}
c = cval; return *this;
}
flexitype(unsigned char ucval){uc = ucval;} //!< unsigned char type constructor
//! unsigned char type assignment operator
flexitype& operator =(unsigned char ucval){
if (this->uc == ucval){return *this;}
uc = ucval; return *this;
}
flexitype(int ival){i = ival;} //!< int type constructor
//! int type assignment operator
flexitype& operator =(int ival){
if (this->i == ival){return *this;}
i = ival; return *this;
}
flexitype(unsigned int uival){ui = uival;} //!< unsigned int type constructor
//! unsigned int type assignment operator
flexitype& operator =(unsigned int uival){
if (this->ui == uival){return *this;}
ui = uival; return *this;
}
flexitype(float fval){f = fval;} //!< float type constructor
//! float type assignment operator
flexitype& operator =(float fval){
if (this->f == fval){return *this;}
f = fval; return *this;
}
flexitype(long int lival){li = lival;} //!< long int type constructor
//! long int type assignment operator
flexitype& operator =(long int lival){
if (this->li == lival){return *this;}
li = lival; return *this;
}
flexitype(double dval){d=dval;} //!< double type constructor
//! double type assignment operator
flexitype& operator =(double dval){
if (this->d == dval){return *this;}
d = dval; return *this;
}
};
/**
* Used internally to represent a single feature in a histogram.
* Feature is a color and an associated likelihood (weight in the histogram).
*/
struct HistogramFeatureGMG
{
/**
* Default constructor.
* Initializes likelihood of feature to 0, color remains uninitialized.
*/
HistogramFeatureGMG(){likelihood = 0.0;}
/**
* Copy constructor.
* Required to use HistogramFeatureGMG in a std::vector
* @see operator =()
*/
HistogramFeatureGMG(const HistogramFeatureGMG& orig){
color = orig.color; likelihood = orig.likelihood;
}
/**
* Assignment operator.
* Required to use HistogramFeatureGMG in a std::vector
*/
HistogramFeatureGMG& operator =(const HistogramFeatureGMG& orig){
color = orig.color; likelihood = orig.likelihood; return *this;
}
/**
* Tests equality of histogram features.
* Equality is tested only by matching the color (feature), not the likelihood.
* This operator is used to look up an observed feature in a histogram.
*/
bool operator ==(HistogramFeatureGMG &rhs);
//! Regardless of the image datatype, it is quantized and mapped to an integer and represented as a vector.
vector<size_t> color;
//! Represents the weight of feature in the histogram.
float likelihood;
friend class PixelModelGMG;
};
/**
* Representation of the statistical model of a single pixel for use in the background subtraction
* algorithm.
*/
class PixelModelGMG
{
public:
PixelModelGMG();
virtual ~PixelModelGMG();
/**
* Incorporate the last observed feature into the statistical model.
*
* @param learningRate The adaptation parameter for the histogram. -1.0 to use default. Value
* should be between 0.0 and 1.0, the higher the value, the faster the
* adaptation. 1.0 is limiting case where fast adaptation means no memory.
*/
void insertFeature(double learningRate = -1.0);
/**
* Set the feature last observed, to save before incorporating it into the statistical
* model with insertFeature().
*
* @param feature The feature (color) just observed.
*/
void setLastObservedFeature(BackgroundSubtractorGMG::HistogramFeatureGMG feature);
/**
* Set the upper limit for the number of features to store in the histogram. Use to adjust
* memory requirements.
*
* @param max size_t representing the max number of features.
*/
void setMaxFeatures(size_t max) {
maxFeatures = max; histogram.resize(max); histogram.clear();
}
/**
* Normalize the histogram, so sum of weights of all features = 1.0
*/
void normalizeHistogram();
/**
* Return the weight of a feature in the histogram. If the feature is not represented in the
* histogram, the weight returned is 0.0.
*/
double getLikelihood(HistogramFeatureGMG f);
PixelModelGMG& operator *=(const float &rhs);
//friend class BackgroundSubtractorGMG;
//friend class HistogramFeatureGMG;
protected:
size_t numFeatures; //!< number of features in histogram
size_t maxFeatures; //!< max allowable features in histogram
std::list<HistogramFeatureGMG> histogram; //!< represents the histogram as a list of features
HistogramFeatureGMG lastObservedFeature;
//!< store last observed feature in case we need to add it to histogram
};
public:
BackgroundSubtractorGMG();
virtual ~BackgroundSubtractorGMG();
virtual AlgorithmInfo* info() const;
/**
* Performs single-frame background subtraction and builds up a statistical background image
* model.
* @param image Input image
* @param fgmask Output mask image representing foreground and background pixels
*/
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1.0);
/**
* Validate parameters and set up data structures for appropriate image type. Must call before
* running on data.
* @param image One sample image from dataset
* @param min minimum value taken on by pixels in image sequence. Usually 0
* @param max maximum value taken on by pixels in image sequence. e.g. 1.0 or 255
*/
void initializeType(InputArray image, flexitype min, flexitype max);
/**
* Selectively update the background model. Only update background model for pixels identified
* as background.
* @param mask Mask image same size as images in sequence. Must be 8UC1 matrix, 255 for foreground
* and 0 for background.
*/
void updateBackgroundModel(InputArray mask);
/**
* Retrieve the greyscale image representing the probability that each pixel is foreground given
* the current estimated background model. Values are 0.0 (black) to 1.0 (white).
* @param img The 32FC1 image representing per-pixel probabilities that the pixel is foreground.
*/
void getPosteriorImage(OutputArray img);
protected:
//! Total number of distinct colors to maintain in histogram.
int maxFeatures;
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
double learningRate;
//! Number of frames of video to use to initialize histograms.
int numInitializationFrames;
//! Number of discrete levels in each channel to be used in histograms.
int quantizationLevels;
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
double backgroundPrior;
double decisionThreshold; //!< value above which pixel is determined to be FG.
int smoothingRadius; //!< smoothing radius, in pixels, for cleaning up FG image.
flexitype maxVal, minVal;
/*
* General Parameters
*/
size_t imWidth; //!< width of image.
size_t imHeight; //!< height of image.
size_t numPixels;
int imageDepth; //!< Depth of image, e.g. CV_8U
unsigned int numChannels; //!< Number of channels in image.
bool isDataInitialized;
//!< After general parameters are set, data structures must be initialized.
size_t elemSize; //!< store image mat element sizes
size_t elemSize1;
/*
* Data Structures
*/
vector<PixelModelGMG> pixels; //!< Probabilistic background models for each pixel in image.
int frameNum; //!< Frame number counter, used to count frames in training mode.
Mat posteriorImage; //!< Posterior probability image.
Mat fgMaskImage; //!< Foreground mask image.
};
bool initModule_BackgroundSubtractorGMG(void);
}
#endif

@ -0,0 +1,480 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*
* This class implements an algorithm described in "Visual Tracking of Human Visitors under
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
*
* Prepared and integrated by Andrew B. Godbehere.
*/
#include "precomp.hpp"
using namespace std;
namespace cv
{
BackgroundSubtractorGMG::BackgroundSubtractorGMG()
{
/*
* Default Parameter Values. Override with algorithm "set" method.
*/
maxFeatures = 64;
learningRate = 0.025;
numInitializationFrames = 120;
quantizationLevels = 16;
backgroundPrior = 0.8;
decisionThreshold = 0.8;
smoothingRadius = 7;
}
void BackgroundSubtractorGMG::initializeType(InputArray _image,flexitype min, flexitype max)
{
minVal = min;
maxVal = max;
if (minVal == maxVal)
{
CV_Error_(CV_StsBadArg,("minVal and maxVal cannot be the same."));
}
/*
* Parameter validation
*/
if (maxFeatures <= 0)
{
CV_Error_(CV_StsBadArg,
("maxFeatures parameter must be 1 or greater. Instead, it is %d.",maxFeatures));
}
if (learningRate < 0.0 || learningRate > 1.0)
{
CV_Error_(CV_StsBadArg,
("learningRate parameter must be in the range [0.0,1.0]. Instead, it is %f.",
learningRate));
}
if (numInitializationFrames < 1)
{
CV_Error_(CV_StsBadArg,
("numInitializationFrames must be at least 1. Instead, it is %d.",
numInitializationFrames));
}
if (quantizationLevels < 1)
{
CV_Error_(CV_StsBadArg,
("quantizationLevels must be at least 1 (preferably more). Instead it is %d.",
quantizationLevels));
}
if (backgroundPrior < 0.0 || backgroundPrior > 1.0)
{
CV_Error_(CV_StsBadArg,
("backgroundPrior must be a probability, between 0.0 and 1.0. Instead it is %f.",
backgroundPrior));
}
/*
* Detect and accommodate the image depth
*/
Mat image = _image.getMat();
imageDepth = image.depth(); // 32f, 8u, etc.
numChannels = image.channels();
/*
* Color quantization [0 | | | | max] --> [0 | | max]
* (0) Use double as intermediary to convert all types to int.
* (i) Shift min to 0,
* (ii) max/(num intervals) = factor. x/factor * factor = quantized result, after integer operation.
*/
/*
* Data Structure Initialization
*/
Size imsize = image.size();
imWidth = imsize.width;
imHeight = imsize.height;
numPixels = imWidth*imHeight;
pixels.resize(numPixels);
frameNum = 0;
// used to iterate through matrix of type unknown at compile time
elemSize = image.elemSize();
elemSize1 = image.elemSize1();
vector<PixelModelGMG>::iterator pixel;
vector<PixelModelGMG>::iterator pixel_end = pixels.end();
for (pixel = pixels.begin(); pixel != pixel_end; ++pixel)
{
pixel->setMaxFeatures(maxFeatures);
}
fgMaskImage = Mat::zeros(imHeight,imWidth,CV_8UC1); // 8-bit unsigned mask. 255 for FG, 0 for BG
posteriorImage = Mat::zeros(imHeight,imWidth,CV_32FC1); // float for storing probabilities. Can be viewed directly with imshow.
isDataInitialized = true;
}
void BackgroundSubtractorGMG::operator()(InputArray _image, OutputArray _fgmask, double newLearningRate)
{
if (!isDataInitialized)
{
CV_Error(CV_StsError,"BackgroundSubstractorGMG has not been initialized. Call initialize() first.\n");
}
/*
* Update learning rate parameter, if desired
*/
if (newLearningRate != -1.0)
{
if (newLearningRate < 0.0 || newLearningRate > 1.0)
{
CV_Error(CV_StsOutOfRange,"Learning rate for Operator () must be between 0.0 and 1.0.\n");
}
this->learningRate = newLearningRate;
}
Mat image = _image.getMat();
_fgmask.create(Size(imHeight,imWidth),CV_8U);
fgMaskImage = _fgmask.getMat(); // 8-bit unsigned mask. 255 for FG, 0 for BG
/*
* Iterate over pixels in image
*/
// grab data at each pixel (1,2,3 channels, int, float, etc.)
// grab data as an array of bytes. Then, send that array to a function that reads data into vector of appropriate types... and quantizing... before saving as a feature, which is a vector of flexitypes, so code can be portable.
// multiple channels do have sequential storage, use mat::elemSize() and mat::elemSize1()
vector<PixelModelGMG>::iterator pixel;
vector<PixelModelGMG>::iterator pixel_end = pixels.end();
size_t i;
//#pragma omp parallel
for (i = 0, pixel=pixels.begin(); pixel != pixel_end; ++i,++pixel)
{
HistogramFeatureGMG newFeature;
newFeature.color.clear();
for (size_t c = 0; c < numChannels; ++c)
{
/*
* Perform quantization. in each channel. (color-min)*(levels)/(max-min).
* Shifts min to 0 and scales, finally casting to an int.
*/
size_t quantizedColor;
// pixel at data+elemSize*i. Individual channel c at data+elemSize*i+elemSize1*c
if (imageDepth == CV_8U)
{
uchar *color = (uchar*)(image.data+elemSize*i+elemSize1*c);
quantizedColor = (size_t)((double)(*color-minVal.uc)*quantizationLevels/(maxVal.uc-minVal.uc));
}
else if (imageDepth == CV_8S)
{
char *color = (char*)(image.data+elemSize*i+elemSize1*c);
quantizedColor = (size_t)((double)(*color-minVal.c)*quantizationLevels/(maxVal.c-minVal.c));
}
else if (imageDepth == CV_16U)
{
unsigned int *color = (unsigned int*)(image.data+elemSize*i+elemSize1*c);
quantizedColor = (size_t)((double)(*color-minVal.ui)*quantizationLevels/(maxVal.ui-minVal.ui));
}
else if (imageDepth == CV_16S)
{
int *color = (int*)(image.data+elemSize*i+elemSize1*c);
quantizedColor = (size_t)((double)(*color-minVal.i)*quantizationLevels/(maxVal.i-minVal.i));
}
else if (imageDepth == CV_32F)
{
float *color = (float*)image.data+elemSize*i+elemSize1*c;
quantizedColor = (size_t)((double)(*color-minVal.ui)*quantizationLevels/(maxVal.ui-minVal.ui));
}
else if (imageDepth == CV_32S)
{
long int *color = (long int*)(image.data+elemSize*i+elemSize1*c);
quantizedColor = (size_t)((double)(*color-minVal.li)*quantizationLevels/(maxVal.li-minVal.li));
}
else if (imageDepth == CV_64F)
{
double *color = (double*)image.data+elemSize*i+elemSize1*c;
quantizedColor = (size_t)((double)(*color-minVal.d)*quantizationLevels/(maxVal.d-minVal.d));
}
newFeature.color.push_back(quantizedColor);
}
// now that the feature is ready for use, put it in the histogram
if (frameNum > numInitializationFrames) // typical operation
{
newFeature.likelihood = learningRate;
/*
* (1) Query histogram to find posterior probability of feature under model.
*/
float likelihood = (float)pixel->getLikelihood(newFeature);
// see Godbehere, Matsukawa, Goldberg (2012) for reasoning behind this implementation of Bayes rule
float posterior = (likelihood*backgroundPrior)/(likelihood*backgroundPrior+(1-likelihood)*(1-backgroundPrior));
/*
* (2) feed posterior probability into the posterior image
*/
int row,col;
col = i%imWidth;
row = (i-col)/imWidth;
posteriorImage.at<float>(row,col) = (1.0-posterior);
}
pixel->setLastObservedFeature(newFeature);
}
/*
* (3) Perform filtering and threshold operations to yield final mask image.
*
* 2 options. First is morphological open/close as before. Second is "median filtering" which Jon Barron says is good to remove noise
*/
Mat thresholdedPosterior;
threshold(posteriorImage,thresholdedPosterior,decisionThreshold,1.0,THRESH_BINARY);
thresholdedPosterior.convertTo(fgMaskImage,CV_8U,255); // convert image to integer space for further filtering and mask creation
medianBlur(fgMaskImage,fgMaskImage,smoothingRadius);
fgMaskImage.copyTo(_fgmask);
++frameNum; // keep track of how many frames we have processed
}
void BackgroundSubtractorGMG::getPosteriorImage(OutputArray _img)
{
_img.create(Size(imWidth,imHeight),CV_32F);
Mat img = _img.getMat();
posteriorImage.copyTo(img);
}
void BackgroundSubtractorGMG::updateBackgroundModel(InputArray _mask)
{
CV_Assert(_mask.size() == Size(imWidth,imHeight)); // mask should be same size as image
Mat maskImg = _mask.getMat();
//#pragma omp parallel
for (size_t i = 0; i < imHeight; ++i)
{
//#pragma omp parallel
for (size_t j = 0; j < imWidth; ++j)
{
if (frameNum <= numInitializationFrames + 1)
{
// insert previously observed feature into the histogram. -1.0 parameter indicates training.
pixels[i*imWidth+j].insertFeature(-1.0);
if (frameNum >= numInitializationFrames+1) // training is done, normalize
{
pixels[i*imWidth+j].normalizeHistogram();
}
}
// if mask is 0, pixel is identified as a background pixel, so update histogram.
else if (maskImg.at<uchar>(i,j) == 0)
{
pixels[i*imWidth+j].insertFeature(learningRate); // updates the histogram for the next iteration.
}
}
}
}
BackgroundSubtractorGMG::~BackgroundSubtractorGMG()
{
}
BackgroundSubtractorGMG::PixelModelGMG::PixelModelGMG()
{
numFeatures = 0;
maxFeatures = 0;
}
BackgroundSubtractorGMG::PixelModelGMG::~PixelModelGMG()
{
}
void BackgroundSubtractorGMG::PixelModelGMG::setLastObservedFeature(HistogramFeatureGMG f)
{
this->lastObservedFeature = f;
}
double BackgroundSubtractorGMG::PixelModelGMG::getLikelihood(BackgroundSubtractorGMG::HistogramFeatureGMG f)
{
std::list<HistogramFeatureGMG>::iterator feature = histogram.begin();
std::list<HistogramFeatureGMG>::iterator feature_end = histogram.end();
for (feature = histogram.begin(); feature != feature_end; ++feature)
{
// comparing only feature color, not likelihood. See equality operator for HistogramFeatureGMG
if (f == *feature)
{
return feature->likelihood;
}
}
return 0.0; // not in histogram, so return 0.
}
void BackgroundSubtractorGMG::PixelModelGMG::insertFeature(double learningRate)
{
std::list<HistogramFeatureGMG>::iterator feature;
std::list<HistogramFeatureGMG>::iterator swap_end;
std::list<HistogramFeatureGMG>::iterator last_feature = histogram.end();
/*
* If feature is in histogram already, add the weights, and move feature to front.
* If there are too many features, remove the end feature and push new feature to beginning
*/
if (learningRate == -1.0) // then, this is a training-mode update.
{
/*
* (1) Check if feature already represented in histogram
*/
lastObservedFeature.likelihood = 1.0;
for (feature = histogram.begin(); feature != last_feature; ++feature)
{
if (lastObservedFeature == *feature) // feature in histogram
{
feature->likelihood += lastObservedFeature.likelihood;
// now, move feature to beginning of list and break the loop
HistogramFeatureGMG tomove = *feature;
histogram.erase(feature);
histogram.push_front(tomove);
return;
}
}
if (numFeatures == maxFeatures)
{
histogram.pop_back(); // discard oldest feature
histogram.push_front(lastObservedFeature);
}
else
{
histogram.push_front(lastObservedFeature);
++numFeatures;
}
}
else
{
/*
* (1) Scale entire histogram by scaling factor
* (2) Scale input feature.
* (3) Check if feature already represented. If so, simply add.
* (4) If feature is not represented, remove old feature, distribute weight evenly among existing features, add in new feature.
*/
*this *= (1.0-learningRate);
lastObservedFeature.likelihood = learningRate;
for (feature = histogram.begin(); feature != last_feature; ++feature)
{
if (lastObservedFeature == *feature) // feature in histogram
{
lastObservedFeature.likelihood += feature->likelihood;
histogram.erase(feature);
histogram.push_front(lastObservedFeature);
return; // done with the update.
}
}
if (numFeatures == maxFeatures)
{
histogram.pop_back(); // discard oldest feature
histogram.push_front(lastObservedFeature);
normalizeHistogram();
}
else
{
histogram.push_front(lastObservedFeature);
++numFeatures;
}
}
}
BackgroundSubtractorGMG::PixelModelGMG& BackgroundSubtractorGMG::PixelModelGMG::operator *=(const float &rhs)
{
/*
* Used to scale histogram by a constant factor
*/
list<HistogramFeatureGMG>::iterator feature;
list<HistogramFeatureGMG>::iterator last_feature = histogram.end();
for (feature = histogram.begin(); feature != last_feature; ++feature)
{
feature->likelihood *= rhs;
}
return *this;
}
void BackgroundSubtractorGMG::PixelModelGMG::normalizeHistogram()
{
/*
* First, calculate the total weight in the histogram
*/
list<HistogramFeatureGMG>::iterator feature;
list<HistogramFeatureGMG>::iterator last_feature = histogram.end();
double total = 0.0;
for (feature = histogram.begin(); feature != last_feature; ++feature)
{
total += feature->likelihood;
}
/*
* Then, if weight is not 0, divide every feature by the total likelihood to re-normalize.
*/
for (feature = histogram.begin(); feature != last_feature; ++feature)
{
if (total != 0.0)
feature->likelihood /= total;
}
}
bool BackgroundSubtractorGMG::HistogramFeatureGMG::operator ==(HistogramFeatureGMG &rhs)
{
CV_Assert(color.size() == rhs.color.size());
std::vector<size_t>::iterator color_a;
std::vector<size_t>::iterator color_b;
std::vector<size_t>::iterator color_a_end = this->color.end();
std::vector<size_t>::iterator color_b_end = rhs.color.end();
for (color_a = color.begin(),color_b =rhs.color.begin();color_a!=color_a_end;++color_a,++color_b)
{
if (*color_a != *color_b)
{
return false;
}
}
return true;
}
}

@ -52,6 +52,9 @@
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/internal.hpp"
#include <list>
#include <stdint.h>
#ifdef HAVE_TEGRA_OPTIMIZATION
#include "opencv2/video/video_tegra.hpp"
#endif

@ -64,11 +64,28 @@ CV_INIT_ALGORITHM(BackgroundSubtractorMOG2, "BackgroundSubtractor.MOG2",
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorGMG, "BackgroundSubtractor.GMG",
obj.info()->addParam(obj, "maxFeatures", obj.maxFeatures,false,0,0,
"Maximum number of features to store in histogram. Harsh enforcement of sparsity constraint.");
obj.info()->addParam(obj, "learningRate", obj.learningRate,false,0,0,
"Adaptation rate of histogram. Close to 1, slow adaptation. Close to 0, fast adaptation, features forgotten quickly.");
obj.info()->addParam(obj, "initializationFrames", obj.numInitializationFrames,false,0,0,
"Number of frames to use to initialize histograms of pixels.");
obj.info()->addParam(obj, "quantizationLevels", obj.quantizationLevels,false,0,0,
"Number of discrete colors to be used in histograms. Up-front quantization.");
obj.info()->addParam(obj, "backgroundPrior", obj.backgroundPrior,false,0,0,
"Prior probability that each individual pixel is a background pixel.");
obj.info()->addParam(obj, "smoothingRadius", obj.smoothingRadius,false,0,0,
"Radius of smoothing kernel to filter noise from FG mask image.");
obj.info()->addParam(obj, "decisionThreshold", obj.decisionThreshold,false,0,0,
"Threshold for FG decision rule. Pixel is FG if posterior probability exceeds threshold."));
bool initModule_video(void)
{
bool all = true;
all &= !BackgroundSubtractorMOG_info_auto.name().empty();
all &= !BackgroundSubtractorMOG2_info_auto.name().empty();
all &= !BackgroundSubtractorGMG_info_auto.name().empty();
return all;
}

@ -0,0 +1,201 @@
/*
* BackgroundSubtractorGBH_test.cpp
*
* Created on: Jun 14, 2012
* Author: andrewgodbehere
*/
#include "test_precomp.hpp"
using namespace cv;
class CV_BackgroundSubtractorTest : public cvtest::BaseTest
{
public:
CV_BackgroundSubtractorTest();
protected:
void run(int);
};
CV_BackgroundSubtractorTest::CV_BackgroundSubtractorTest()
{
}
/**
* This test checks the following:
* (i) BackgroundSubtractorGMG can operate with matrices of various types and sizes
* (ii) Training mode returns empty fgmask
* (iii) End of training mode, and anomalous frame yields every pixel detected as FG
*/
void CV_BackgroundSubtractorTest::run(int)
{
int code = cvtest::TS::OK;
RNG& rng = ts->get_rng();
int type = ((unsigned int)rng)%7; //!< pick a random type, 0 - 6, defined in types_c.h
int channels = 1 + ((unsigned int)rng)%4; //!< random number of channels from 1 to 4.
int channelsAndType = CV_MAKETYPE(type,channels);
int width = 2 + ((unsigned int)rng)%98; //!< Mat will be 2 to 100 in width and height
int height = 2 + ((unsigned int)rng)%98;
Ptr<BackgroundSubtractorGMG> fgbg =
Algorithm::create<BackgroundSubtractorGMG>("BackgroundSubtractor.GMG");
Mat fgmask;
if (fgbg == NULL)
CV_Error(CV_StsError,"Failed to create Algorithm\n");
/**
* Set a few parameters
*/
fgbg->set("smoothingRadius",7);
fgbg->set("decisionThreshold",0.7);
fgbg->set("initializationFrames",120);
/**
* Generate bounds for the values in the matrix for each type
*/
uchar maxuc,minuc = 0;
char maxc,minc = 0;
uint maxui,minui = 0;
int maxi,mini = 0;
long int maxli,minli = 0;
float maxf,minf = 0.0;
double maxd,mind = 0.0;
/**
* Max value for simulated images picked randomly in upper half of type range
* Min value for simulated images picked randomly in lower half of type range
*/
if (type == CV_8U)
{
unsigned char half = UCHAR_MAX/2;
maxuc = (unsigned char)rng.uniform(half+32,UCHAR_MAX);
minuc = (unsigned char)rng.uniform(0,half-32);
}
else if (type == CV_8S)
{
char half = CHAR_MAX/2 + CHAR_MIN/2;
maxc = (char)rng.uniform(half+32,CHAR_MAX);
minc = (char)rng.uniform(CHAR_MIN,half-32);
}
else if (type == CV_16U)
{
uint half = UINT_MAX/2;
maxui = (unsigned int)rng.uniform((int)half+32,UINT_MAX);
minui = (unsigned int)rng.uniform(0,(int)half-32);
}
else if (type == CV_16S)
{
int half = INT_MAX/2 + INT_MIN/2;
maxi = rng.uniform(half+32,INT_MAX);
mini = rng.uniform(INT_MIN,half-32);
}
else if (type == CV_32S)
{
long int half = LONG_MAX/2 + LONG_MIN/2;
maxli = rng.uniform((int)half+32,(int)LONG_MAX);
minli = rng.uniform((int)LONG_MIN,(int)half-32);
}
else if (type == CV_32F)
{
float half = FLT_MAX/2.0 + FLT_MIN/2.0;
maxf = rng.uniform(half+(float)32.0*FLT_EPSILON,FLT_MAX);
minf = rng.uniform(FLT_MIN,half-(float)32.0*FLT_EPSILON);
}
else if (type == CV_64F)
{
double half = DBL_MAX/2.0 + DBL_MIN/2.0;
maxd = rng.uniform(half+(double)32.0*DBL_EPSILON,DBL_MAX);
mind = rng.uniform(DBL_MIN,half-(double)32.0*DBL_EPSILON);
}
Mat simImage = Mat::zeros(height,width,channelsAndType);
const uint numLearningFrames = 120;
for (uint i = 0; i < numLearningFrames; ++i)
{
/**
* Genrate simulated "image" for any type. Values always confined to upper half of range.
*/
if (type == CV_8U)
{
rng.fill(simImage,RNG::UNIFORM,(unsigned char)(minuc/2+maxuc/2),maxuc);
if (i == 0)
fgbg->initializeType(simImage,minuc,maxuc);
}
else if (type == CV_8S)
{
rng.fill(simImage,RNG::UNIFORM,(char)(minc/2+maxc/2),maxc);
if (i==0)
fgbg->initializeType(simImage,minc,maxc);
}
else if (type == CV_16U)
{
rng.fill(simImage,RNG::UNIFORM,(unsigned int)(minui/2+maxui/2),maxui);
if (i==0)
fgbg->initializeType(simImage,minui,maxui);
}
else if (type == CV_16S)
{
rng.fill(simImage,RNG::UNIFORM,(int)(mini/2+maxi/2),maxi);
if (i==0)
fgbg->initializeType(simImage,mini,maxi);
}
else if (type == CV_32F)
{
rng.fill(simImage,RNG::UNIFORM,(float)(minf/2.0+maxf/2.0),maxf);
if (i==0)
fgbg->initializeType(simImage,minf,maxf);
}
else if (type == CV_32S)
{
rng.fill(simImage,RNG::UNIFORM,(long int)(minli/2+maxli/2),maxli);
if (i==0)
fgbg->initializeType(simImage,minli,maxli);
}
else if (type == CV_64F)
{
rng.fill(simImage,RNG::UNIFORM,(double)(mind/2.0+maxd/2.0),maxd);
if (i==0)
fgbg->initializeType(simImage,mind,maxd);
}
/**
* Feed simulated images into background subtractor
*/
(*fgbg)(simImage,fgmask);
Mat fullbg = Mat::zeros(Size(simImage.cols,simImage.rows),CV_8U);
fgbg->updateBackgroundModel(fullbg);
//! fgmask should be entirely background during training
code = cvtest::cmpEps2( ts, fgmask, fullbg, 0, false, "The training foreground mask" );
if (code < 0)
ts->set_failed_test_info( code );
}
//! generate last image, distinct from training images
if (type == CV_8U)
rng.fill(simImage,RNG::UNIFORM,minuc,minuc);
else if (type == CV_8S)
rng.fill(simImage,RNG::UNIFORM,minc,minc);
else if (type == CV_16U)
rng.fill(simImage,RNG::UNIFORM,minui,minui);
else if (type == CV_16S)
rng.fill(simImage,RNG::UNIFORM,mini,mini);
else if (type == CV_32F)
rng.fill(simImage,RNG::UNIFORM,minf,minf);
else if (type == CV_32S)
rng.fill(simImage,RNG::UNIFORM,minli,minli);
else if (type == CV_64F)
rng.fill(simImage,RNG::UNIFORM,mind,mind);
(*fgbg)(simImage,fgmask);
//! now fgmask should be entirely foreground
Mat fullfg = 255*Mat::ones(Size(simImage.cols,simImage.rows),CV_8U);
code = cvtest::cmpEps2( ts, fgmask, fullfg, 255, false, "The final foreground mask" );
if (code < 0)
{
ts->set_failed_test_info( code );
}
}
TEST(VIDEO_BGSUBGMG, accuracy) { CV_BackgroundSubtractorTest test; test.safe_run(); }

@ -9,6 +9,7 @@
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/video/tracking.hpp"
#include "opencv2/video/background_segm.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

@ -0,0 +1,97 @@
/*
* FGBGTest.cpp
*
* Created on: May 7, 2012
* Author: Andrew B. Godbehere
*/
#include <opencv2/opencv.hpp>
#include <iostream>
#include <sstream>
using namespace cv;
static void help()
{
std::cout <<
"\nA program demonstrating the use and capabilities of a particular BackgroundSubtraction\n"
"algorithm described in A. Godbehere, A. Matsukawa, K. Goldberg, \n"
"\"Visual Tracking of Human Visitors under Variable-Lighting Conditions for a Responsive\n"
"Audio Art Installation\", American Control Conference, 2012, used in an interactive\n"
"installation at the Contemporary Jewish Museum in San Francisco, CA from March 31 through\n"
"July 31, 2011.\n"
"Call:\n"
"./BackgroundSubtractorGMG_sample\n"
"Using OpenCV version " << CV_VERSION << "\n"<<std::endl;
}
int main(int argc, char** argv)
{
help();
setUseOptimized(true);
setNumThreads(8);
Ptr<BackgroundSubtractorGMG> fgbg = Algorithm::create<BackgroundSubtractorGMG>("BackgroundSubtractor.GMG");
if (fgbg == NULL)
{
CV_Error(CV_StsError,"Failed to create Algorithm\n");
}
fgbg->set("smoothingRadius",7);
fgbg->set("decisionThreshold",0.7);
VideoCapture cap;
if( argc > 1 )
cap.open(argv[1]);
else
cap.open(0);
if (!cap.isOpened())
{
std::cout << "error: cannot read video. Try moving video file to sample directory.\n";
return -1;
}
Mat img, downimg, downimg2, fgmask, upfgmask, posterior, upposterior;
bool first = true;
namedWindow("posterior");
namedWindow("fgmask");
namedWindow("FG Segmentation");
int i = 0;
for (;;)
{
std::stringstream txt;
txt << "frame: ";
txt << i++;
cap >> img;
putText(img,txt.str(),Point(20,40),FONT_HERSHEY_SIMPLEX,0.8,Scalar(1.0,0.0,0.0));
resize(img,downimg,Size(160,120),0,0,INTER_NEAREST); // Size(cols, rows) or Size(width,height)
if (first)
{
fgbg->initializeType(downimg,0,255);
first = false;
}
if (img.empty())
{
return 0;
}
(*fgbg)(downimg,fgmask);
fgbg->updateBackgroundModel(Mat::zeros(120,160,CV_8U));
fgbg->getPosteriorImage(posterior);
resize(fgmask,upfgmask,Size(640,480),0,0,INTER_NEAREST);
Mat coloredFG = Mat::zeros(480,640,CV_8UC3);
coloredFG.setTo(Scalar(100,100,0),upfgmask);
resize(posterior,upposterior,Size(640,480),0,0,INTER_NEAREST);
imshow("posterior",upposterior);
imshow("fgmask",upfgmask);
resize(img, downimg2, Size(640, 480),0,0,INTER_LINEAR);
imshow("FG Segmentation",downimg2 + coloredFG);
int c = waitKey(30);
if( c == 'q' || c == 'Q' || (c & 255) == 27 )
break;
}
}
Loading…
Cancel
Save