Merge branch 'master' of https://github.com/opencv/opencv_contrib into pr723
@ -0,0 +1,30 @@ |
||||
<!-- |
||||
If you have a question rather than reporting a bug please go to http://answers.opencv.org where you get much faster responses. |
||||
If you need further assistance please read [How To Contribute](https://github.com/opencv/opencv/wiki/How_to_contribute). |
||||
|
||||
This is a template helping you to create an issue which can be processed as quickly as possible. This is the bug reporting section for the OpenCV library. |
||||
--> |
||||
|
||||
##### System information (version) |
||||
<!-- Example |
||||
- OpenCV => 3.1 |
||||
- Operating System / Platform => Windows 64 Bit |
||||
- Compiler => Visual Studio 2015 |
||||
--> |
||||
|
||||
- OpenCV => :grey_question: |
||||
- Operating System / Platform => :grey_question: |
||||
- Compiler => :grey_question: |
||||
|
||||
##### Detailed description |
||||
|
||||
<!-- your description --> |
||||
|
||||
##### Steps to reproduce |
||||
|
||||
<!-- to add code example fence it with triple backticks and optional file extension |
||||
```.cpp |
||||
// C++ code example |
||||
``` |
||||
or attach as .txt or .zip file |
||||
--> |
@ -0,0 +1,9 @@ |
||||
<!-- Please use this line to close one or multiple issues when this pullrequest gets merged |
||||
You can add another line right under the first one: |
||||
resolves #1234 |
||||
resolves #1235 |
||||
--> |
||||
|
||||
### This pullrequest changes |
||||
|
||||
<!-- Please describe what your pullrequest is changing --> |
@ -1,3 +1,3 @@ |
||||
## Contributing guidelines |
||||
|
||||
All guidelines for contributing to the OpenCV repository can be found at [`How to contribute guideline`](https://github.com/Itseez/opencv/wiki/How_to_contribute). |
||||
All guidelines for contributing to the OpenCV repository can be found at [`How to contribute guideline`](https://github.com/opencv/opencv/wiki/How_to_contribute). |
||||
|
@ -1,2 +1,2 @@ |
||||
set(the_description "ArUco Marker Detection") |
||||
ocv_define_module(aruco opencv_core opencv_imgproc opencv_calib3d WRAP python) |
||||
ocv_define_module(aruco opencv_core opencv_imgproc opencv_calib3d WRAP python java) |
||||
|
@ -0,0 +1,24 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "test_precomp.hpp" |
||||
#include <opencv2/aruco/charuco.hpp> |
||||
|
||||
TEST(CV_ArucoDrawMarker, regression_1226) |
||||
{ |
||||
int squares_x = 7; |
||||
int squares_y = 5; |
||||
int bwidth = 1600; |
||||
int bheight = 1200; |
||||
|
||||
cv::Ptr<cv::aruco::Dictionary> dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50); |
||||
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(squares_x, squares_y, 1.0, 0.75, dict); |
||||
cv::Size sz(bwidth, bheight); |
||||
cv::Mat mat; |
||||
|
||||
ASSERT_NO_THROW( |
||||
{ |
||||
board->draw(sz, mat, 0, 1); |
||||
}); |
||||
} |
@ -1,2 +1,2 @@ |
||||
set(the_description "Background Segmentation Algorithms") |
||||
ocv_define_module(bgsegm opencv_core opencv_imgproc opencv_video opencv_highgui WRAP python) |
||||
ocv_define_module(bgsegm opencv_core opencv_imgproc opencv_video WRAP python) |
||||
|
@ -1,5 +1,10 @@ |
||||
Improved Background-Foreground Segmentation Methods |
||||
=================================================== |
||||
|
||||
1. Adaptive Background Mixture Model for Real-time Tracking |
||||
2. Visual Tracking of Human Visitors under Variable-Lighting Conditions. |
||||
This algorithm combines statistical background image estimation and per-pixel Bayesian segmentation. It[1] was introduced by Andrew B. Godbehere, Akihiro Matsukawa, Ken Goldberg in 2012. As per the paper, the system ran a successful interactive audio art installation called “Are We There Yet?” from March 31 - July 31 2011 at the Contemporary Jewish Museum in San Francisco, California. |
||||
|
||||
It uses first few (120 by default) frames for background modelling. It employs probabilistic foreground segmentation algorithm that identifies possible foreground objects using Bayesian inference. The estimates are adaptive; newer observations are more heavily weighted than old observations to accommodate variable illumination. Several morphological filtering operations like closing and opening are done to remove unwanted noise. You will get a black window during first few frames. |
||||
|
||||
References |
||||
---------- |
||||
[1]: A.B. Godbehere, A. Matsukawa, K. Goldberg. Visual tracking of human visitors under variable-lighting conditions for a responsive audio art installation. American Control Conference. (2012), pp. 4305–4312 |
@ -0,0 +1,104 @@ |
||||
#include "opencv2/bgsegm.hpp" |
||||
#include "opencv2/videoio.hpp" |
||||
#include "opencv2/highgui.hpp" |
||||
#include <opencv2/core/utility.hpp> |
||||
#include <iostream> |
||||
|
||||
using namespace cv; |
||||
using namespace cv::bgsegm; |
||||
|
||||
const String about = |
||||
"\nA program demonstrating the use and capabilities of different background subtraction algrorithms\n" |
||||
"Using OpenCV version " + String(CV_VERSION) + |
||||
"\nPress q or ESC to exit\n"; |
||||
|
||||
const String keys = |
||||
"{help h usage ? | | print this message }" |
||||
"{vid | | path to a video file }" |
||||
"{algo | GMG | name of the algorithm (GMG, CNT, KNN, MOG, MOG2) }" |
||||
; |
||||
|
||||
static Ptr<BackgroundSubtractor> createBGSubtractorByName(const String& algoName) |
||||
{ |
||||
Ptr<BackgroundSubtractor> algo; |
||||
if(algoName == String("GMG")) |
||||
algo = createBackgroundSubtractorGMG(20, 0.7); |
||||
else if(algoName == String("CNT")) |
||||
algo = createBackgroundSubtractorCNT(); |
||||
else if(algoName == String("KNN")) |
||||
algo = createBackgroundSubtractorKNN(); |
||||
else if(algoName == String("MOG")) |
||||
algo = createBackgroundSubtractorMOG(); |
||||
else if(algoName == String("MOG2")) |
||||
algo = createBackgroundSubtractorMOG2(); |
||||
|
||||
return algo; |
||||
} |
||||
|
||||
int main(int argc, char** argv) |
||||
{ |
||||
setUseOptimized(true); |
||||
setNumThreads(8); |
||||
|
||||
CommandLineParser parser(argc, argv, keys); |
||||
parser.about(about); |
||||
parser.printMessage(); |
||||
if (parser.has("help")) |
||||
{ |
||||
parser.printMessage(); |
||||
return 0; |
||||
} |
||||
|
||||
String videoPath = parser.get<String>("vid"); |
||||
String algoName = parser.get<String>("algo"); |
||||
|
||||
if (!parser.check()) |
||||
{ |
||||
parser.printErrors(); |
||||
return 0; |
||||
} |
||||
|
||||
Ptr<BackgroundSubtractor> bgfs = createBGSubtractorByName(algoName); |
||||
if (!bgfs) |
||||
{ |
||||
std::cerr << "Failed to create " << algoName << " background subtractor" << std::endl; |
||||
return -1; |
||||
} |
||||
|
||||
VideoCapture cap; |
||||
if (argc > 1) |
||||
cap.open(videoPath); |
||||
else |
||||
cap.open(0); |
||||
|
||||
if (!cap.isOpened()) |
||||
{ |
||||
std::cerr << "Cannot read video. Try moving video file to sample directory." << std::endl; |
||||
return -1; |
||||
} |
||||
|
||||
Mat frame, fgmask, segm; |
||||
|
||||
namedWindow("FG Segmentation", WINDOW_NORMAL); |
||||
|
||||
for (;;) |
||||
{ |
||||
cap >> frame; |
||||
|
||||
if (frame.empty()) |
||||
break; |
||||
|
||||
bgfs->apply(frame, fgmask); |
||||
|
||||
frame.convertTo(segm, CV_8U, 0.5); |
||||
add(frame, Scalar(100, 100, 0), segm, fgmask); |
||||
|
||||
imshow("FG Segmentation", segm); |
||||
|
||||
int c = waitKey(30); |
||||
if (c == 'q' || c == 'Q' || (c & 255) == 27) |
||||
break; |
||||
} |
||||
|
||||
return 0; |
||||
} |
@ -1,81 +0,0 @@ |
||||
/*
|
||||
* FGBGTest.cpp |
||||
* |
||||
* Created on: May 7, 2012 |
||||
* Author: Andrew B. Godbehere |
||||
*/ |
||||
|
||||
#include "opencv2/bgsegm.hpp" |
||||
#include "opencv2/videoio.hpp" |
||||
#include "opencv2/highgui.hpp" |
||||
#include <opencv2/core/utility.hpp> |
||||
#include <iostream> |
||||
|
||||
using namespace cv; |
||||
using namespace cv::bgsegm; |
||||
|
||||
static void help() |
||||
{ |
||||
std::cout << |
||||
"\nA program demonstrating the use and capabilities of a particular BackgroundSubtraction\n" |
||||
"algorithm described in A. Godbehere, A. Matsukawa, K. Goldberg, \n" |
||||
"\"Visual Tracking of Human Visitors under Variable-Lighting Conditions for a Responsive\n" |
||||
"Audio Art Installation\", American Control Conference, 2012, used in an interactive\n" |
||||
"installation at the Contemporary Jewish Museum in San Francisco, CA from March 31 through\n" |
||||
"July 31, 2011.\n" |
||||
"Call:\n" |
||||
"./BackgroundSubtractorGMG_sample\n" |
||||
"Using OpenCV version " << CV_VERSION << "\n"<<std::endl; |
||||
} |
||||
|
||||
int main(int argc, char** argv) |
||||
{ |
||||
help(); |
||||
|
||||
setUseOptimized(true); |
||||
setNumThreads(8); |
||||
|
||||
Ptr<BackgroundSubtractor> fgbg = createBackgroundSubtractorGMG(20, 0.7); |
||||
if (!fgbg) |
||||
{ |
||||
std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl; |
||||
return -1; |
||||
} |
||||
|
||||
VideoCapture cap; |
||||
if (argc > 1) |
||||
cap.open(argv[1]); |
||||
else |
||||
cap.open(0); |
||||
|
||||
if (!cap.isOpened()) |
||||
{ |
||||
std::cerr << "Cannot read video. Try moving video file to sample directory." << std::endl; |
||||
return -1; |
||||
} |
||||
|
||||
Mat frame, fgmask, segm; |
||||
|
||||
namedWindow("FG Segmentation", WINDOW_NORMAL); |
||||
|
||||
for (;;) |
||||
{ |
||||
cap >> frame; |
||||
|
||||
if (frame.empty()) |
||||
break; |
||||
|
||||
fgbg->apply(frame, fgmask); |
||||
|
||||
frame.convertTo(segm, CV_8U, 0.5); |
||||
add(frame, Scalar(100, 100, 0), segm, fgmask); |
||||
|
||||
imshow("FG Segmentation", segm); |
||||
|
||||
int c = waitKey(30); |
||||
if (c == 'q' || c == 'Q' || (c & 255) == 27) |
||||
break; |
||||
} |
||||
|
||||
return 0; |
||||
} |
@ -0,0 +1,421 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// (3-clause BSD License)
|
||||
// For BackgroundSubtractorCNT
|
||||
// (Background Subtraction based on Counting)
|
||||
//
|
||||
// Copyright (C) 2016, Sagi Zeevi (www.theimpossiblecode.com), all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
|
||||
#include "precomp.hpp" |
||||
#include <functional> |
||||
|
||||
namespace cv |
||||
{ |
||||
namespace bgsegm |
||||
{ |
||||
|
||||
class BackgroundSubtractorCNTImpl: public BackgroundSubtractorCNT |
||||
{ |
||||
public: |
||||
|
||||
BackgroundSubtractorCNTImpl(int minStability, |
||||
bool useHistory, |
||||
int maxStability, |
||||
bool isParallel); |
||||
|
||||
// BackgroundSubtractor interface
|
||||
virtual void apply(InputArray image, OutputArray fgmask, double learningRate); |
||||
virtual void getBackgroundImage(OutputArray backgroundImage) const; |
||||
|
||||
int getMinPixelStability() const; |
||||
void setMinPixelStability(int value); |
||||
|
||||
int getMaxPixelStability() const; |
||||
void setMaxPixelStability(int value); |
||||
|
||||
bool getUseHistory() const; |
||||
void setUseHistory(bool value); |
||||
|
||||
bool getIsParallel() const; |
||||
void setIsParallel(bool value); |
||||
|
||||
//! the destructor
|
||||
virtual ~BackgroundSubtractorCNTImpl() {} |
||||
|
||||
private: |
||||
int minPixelStability; |
||||
int maxPixelStability; |
||||
int threshold; |
||||
bool useHistory; |
||||
bool isParallel; |
||||
// These 3 commented expressed in 1 'data' for faster single access
|
||||
// Mat_<int> stability; // data[0] => Candidate for historyStability if pixel is ~same as in prevFrame
|
||||
// Mat_<int> history; // data[1] => Color which got most hits for the past maxPixelStability frames
|
||||
// Mat_<int> historyStability; // data[2] => How many hits this pixel got for the color in history
|
||||
// Mat_<int> background; // data[3] => Current background as detected by algorithm
|
||||
Mat_<Vec4i> data; |
||||
Mat prevFrame; |
||||
Mat fgMaskPrev; |
||||
}; |
||||
|
||||
BackgroundSubtractorCNTImpl::BackgroundSubtractorCNTImpl(int minStability, |
||||
bool _useHistory, |
||||
int maxStability, |
||||
bool _isParallel) |
||||
: minPixelStability(minStability), |
||||
maxPixelStability(maxStability), |
||||
threshold(5), |
||||
useHistory(_useHistory), |
||||
isParallel(_isParallel) |
||||
{ |
||||
} |
||||
|
||||
void BackgroundSubtractorCNTImpl::getBackgroundImage(OutputArray _backgroundImage) const |
||||
{ |
||||
CV_Assert(! data.empty()); |
||||
|
||||
_backgroundImage.create(prevFrame.size(), CV_8U); // OutputArray usage requires this step
|
||||
Mat backgroundImage = _backgroundImage.getMat(); |
||||
|
||||
// mixChannels requires same types to mix,
|
||||
// so imixing with tmp Mat and conerting
|
||||
Mat_<int> tmp(prevFrame.rows, prevFrame.cols); |
||||
int from_bg_model_to_user[] = {3, 0}; |
||||
mixChannels(&data, 1, &tmp, 1, from_bg_model_to_user, 1); |
||||
tmp.convertTo(backgroundImage, CV_8U); |
||||
} |
||||
|
||||
int BackgroundSubtractorCNTImpl::getMinPixelStability() const |
||||
{ |
||||
return minPixelStability; |
||||
} |
||||
|
||||
void BackgroundSubtractorCNTImpl::setMinPixelStability(int value) |
||||
{ |
||||
CV_Assert(value > 0 && value < maxPixelStability); |
||||
minPixelStability = value; |
||||
} |
||||
|
||||
int BackgroundSubtractorCNTImpl::getMaxPixelStability() const |
||||
{ |
||||
return maxPixelStability; |
||||
} |
||||
|
||||
void BackgroundSubtractorCNTImpl::setMaxPixelStability(int value) |
||||
{ |
||||
CV_Assert(value > minPixelStability); |
||||
maxPixelStability = value; |
||||
} |
||||
|
||||
bool BackgroundSubtractorCNTImpl::getUseHistory() const |
||||
{ |
||||
return useHistory; |
||||
} |
||||
|
||||
void BackgroundSubtractorCNTImpl::setUseHistory(bool value) |
||||
{ |
||||
useHistory = value; |
||||
} |
||||
|
||||
bool BackgroundSubtractorCNTImpl::getIsParallel() const |
||||
{ |
||||
return isParallel; |
||||
} |
||||
|
||||
void BackgroundSubtractorCNTImpl::setIsParallel(bool value) |
||||
{ |
||||
isParallel = value; |
||||
} |
||||
|
||||
class CNTFunctor |
||||
{ |
||||
public: |
||||
virtual void operator()(Vec4i &vec, uchar currColor, uchar prevColor, uchar &fgMaskPixelRef) = 0; |
||||
//! the destructor
|
||||
virtual ~CNTFunctor() {} |
||||
}; |
||||
|
||||
struct BGSubtractPixel : public CNTFunctor |
||||
{ |
||||
BGSubtractPixel(int _minPixelStability, int _threshold, |
||||
const Mat &_frame, const Mat &_prevFrame, Mat &_fgMask) |
||||
: minPixelStability(_minPixelStability), |
||||
threshold(_threshold), |
||||
frame(_frame), |
||||
prevFrame(_prevFrame), |
||||
fgMask(_fgMask) |
||||
{} |
||||
|
||||
//! the destructor
|
||||
virtual ~BGSubtractPixel() {} |
||||
|
||||
void operator()(Vec4i &vec, uchar currColor, uchar prevColor, uchar &fgMaskPixelRef) |
||||
{ |
||||
int &stabilityRef = vec[0]; |
||||
int &bgImgRef = vec[3]; |
||||
if (abs(currColor - prevColor) < threshold) |
||||
{ |
||||
++stabilityRef; |
||||
if (stabilityRef == minPixelStability) |
||||
{ // bg
|
||||
--stabilityRef; |
||||
bgImgRef = prevColor; |
||||
} |
||||
else |
||||
{ // fg
|
||||
fgMaskPixelRef = 255; |
||||
} |
||||
} |
||||
else |
||||
{ // fg
|
||||
stabilityRef = 0; |
||||
fgMaskPixelRef = 255; |
||||
} |
||||
} |
||||
|
||||
int minPixelStability; |
||||
int threshold; |
||||
const Mat &frame; |
||||
const Mat &prevFrame; |
||||
Mat &fgMask; |
||||
}; |
||||
|
||||
struct BGSubtractPixelWithHistory : public CNTFunctor |
||||
{ |
||||
BGSubtractPixelWithHistory(int _minPixelStability, int _maxPixelStability, int _threshold, |
||||
const Mat &_frame, const Mat &_prevFrame, Mat &_fgMask) |
||||
: minPixelStability(_minPixelStability), |
||||
maxPixelStability(_maxPixelStability), |
||||
threshold(_threshold), |
||||
thresholdHistory(30), |
||||
frame(_frame), |
||||
prevFrame(_prevFrame), |
||||
fgMask(_fgMask) |
||||
{} |
||||
|
||||
//! the destructor
|
||||
virtual ~BGSubtractPixelWithHistory() {} |
||||
|
||||
void incrStability(int &histStabilityRef) |
||||
{ |
||||
if (histStabilityRef < maxPixelStability) |
||||
{ |
||||
++histStabilityRef; |
||||
} |
||||
} |
||||
|
||||
void decrStability(int &histStabilityRef) |
||||
{ |
||||
if (histStabilityRef > 0) |
||||
{ |
||||
--histStabilityRef; |
||||
} |
||||
} |
||||
|
||||
void operator()(Vec4i &vec, uchar currColor, uchar prevColor, uchar &fgMaskPixelRef) |
||||
{ |
||||
int &stabilityRef = vec[0]; |
||||
int &historyColorRef = vec[1]; |
||||
int &histStabilityRef = vec[2]; |
||||
int &bgImgRef = vec[3]; |
||||
if (abs(currColor - historyColorRef) < thresholdHistory) |
||||
{ // No change compared to history - this is maybe a background
|
||||
stabilityRef = 0; |
||||
incrStability(histStabilityRef); |
||||
if (histStabilityRef <= minPixelStability) |
||||
{ |
||||
fgMaskPixelRef = 255; |
||||
} |
||||
else |
||||
{ |
||||
bgImgRef = historyColorRef; |
||||
} |
||||
} |
||||
else if (abs(currColor - prevColor) < threshold) |
||||
{ // No change compared to prev - this is maybe a background
|
||||
incrStability(stabilityRef); |
||||
if (stabilityRef > minPixelStability) |
||||
{ // Stable color - this is maybe a background
|
||||
if (stabilityRef >= histStabilityRef) |
||||
{ |
||||
historyColorRef = currColor; |
||||
histStabilityRef = stabilityRef; |
||||
bgImgRef = historyColorRef; |
||||
} |
||||
else |
||||
{ // Stable but different from stable history - this is a foreground
|
||||
decrStability(histStabilityRef); |
||||
fgMaskPixelRef = 255; |
||||
} |
||||
} |
||||
else |
||||
{ // This is FG.
|
||||
fgMaskPixelRef = 255; |
||||
} |
||||
} |
||||
else |
||||
{ // Color changed - this is defently a foreground
|
||||
stabilityRef = 0; |
||||
decrStability(histStabilityRef); |
||||
fgMaskPixelRef = 255; |
||||
} |
||||
|
||||
} |
||||
|
||||
int minPixelStability; |
||||
int maxPixelStability; |
||||
int threshold; |
||||
int thresholdHistory; |
||||
const Mat &frame; |
||||
const Mat &prevFrame; |
||||
Mat &fgMask; |
||||
}; |
||||
|
||||
class CNTInvoker : public ParallelLoopBody |
||||
{ |
||||
public: |
||||
CNTInvoker(Mat_<Vec4i> &_data, Mat &_img, Mat &_prevFrame, Mat &_fgMask, CNTFunctor &_functor) |
||||
: data(_data), img(_img), prevFrame(_prevFrame), fgMask(_fgMask), functor(_functor) |
||||
{ |
||||
} |
||||
|
||||
// Iterate rows
|
||||
void operator()(const Range& range) const |
||||
{ |
||||
for (int r = range.start; r < range.end; ++r) |
||||
{ |
||||
Vec4i* row = data.ptr<Vec4i>(r); |
||||
uchar* frameRow = img.ptr<uchar>(r); |
||||
uchar* prevFrameRow = prevFrame.ptr<uchar>(r); |
||||
uchar* fgMaskRow = fgMask.ptr<uchar>(r); |
||||
for (int c = 0; c < data.cols; ++c) |
||||
{ |
||||
functor(row[c], frameRow[c], prevFrameRow[c], fgMaskRow[c]); |
||||
} |
||||
} |
||||
} |
||||
|
||||
private: |
||||
Mat_<Vec4i> &data; |
||||
Mat &img; |
||||
Mat &prevFrame; |
||||
Mat &fgMask; |
||||
CNTFunctor &functor; |
||||
}; |
||||
|
||||
void BackgroundSubtractorCNTImpl::apply(InputArray image, OutputArray _fgmask, double learningRate) |
||||
{ |
||||
CV_Assert(image.depth() == CV_8U); |
||||
|
||||
Mat frameIn = image.getMat(); |
||||
if(frameIn.channels() != 1) |
||||
cvtColor(frameIn, frameIn, COLOR_BGR2GRAY); |
||||
|
||||
_fgmask.create(image.size(), CV_8U); // OutputArray usage requires this step
|
||||
Mat fgMask = _fgmask.getMat(); |
||||
|
||||
bool needToInitialize = data.empty() || learningRate >= 1 || frameIn.size() != prevFrame.size(); |
||||
|
||||
Mat frame = frameIn.clone(); |
||||
|
||||
if (needToInitialize) |
||||
{ // Usually done only once
|
||||
data = Mat_<Vec4i>::zeros(frame.rows, frame.cols); |
||||
prevFrame = frame; |
||||
|
||||
// mixChannels requires same types to mix,
|
||||
// so imixing with tmp Mat and conerting
|
||||
Mat tmp; |
||||
prevFrame.convertTo(tmp, CV_32S); |
||||
int from_gray_to_history_color[] = {0,1}; |
||||
mixChannels(&tmp, 1, &data, 1, from_gray_to_history_color, 1); |
||||
} |
||||
|
||||
fgMask = Scalar(0); |
||||
CNTFunctor *functor; |
||||
if (useHistory && learningRate) |
||||
{ |
||||
double scaleMaxStability = 1.0; |
||||
if (learningRate > 0 && learningRate < 1.0) |
||||
{ |
||||
scaleMaxStability = learningRate; |
||||
} |
||||
functor = new BGSubtractPixelWithHistory(minPixelStability, int(maxPixelStability * scaleMaxStability), |
||||
threshold, frame, prevFrame, fgMask); |
||||
} |
||||
else |
||||
{ |
||||
functor = new BGSubtractPixel(minPixelStability, threshold*3, frame, prevFrame, fgMask); |
||||
} |
||||
|
||||
if (isParallel) |
||||
{ |
||||
parallel_for_(Range(0, frame.rows), |
||||
CNTInvoker(data, frame, prevFrame, fgMask, *functor)); |
||||
} |
||||
else |
||||
{ |
||||
for (int r = 0; r < data.rows; ++r) |
||||
{ |
||||
Vec4i* row = data.ptr<Vec4i>(r); |
||||
uchar* frameRow = frame.ptr<uchar>(r); |
||||
uchar* prevFrameRow = prevFrame.ptr<uchar>(r); |
||||
uchar* fgMaskRow = fgMask.ptr<uchar>(r); |
||||
for (int c = 0; c < data.cols; ++c) |
||||
{ |
||||
(*functor)(row[c], frameRow[c], prevFrameRow[c], fgMaskRow[c]); |
||||
} |
||||
} |
||||
} |
||||
|
||||
delete functor; |
||||
|
||||
prevFrame = frame; |
||||
} |
||||
|
||||
|
||||
Ptr<BackgroundSubtractorCNT> createBackgroundSubtractorCNT(int minPixelStability, bool useHistory, int maxStability, bool isParallel) |
||||
{ |
||||
return makePtr<BackgroundSubtractorCNTImpl>(minPixelStability, useHistory, maxStability, isParallel); |
||||
} |
||||
|
||||
} |
||||
} |
||||
|
||||
/* End of file. */ |
@ -1,126 +0,0 @@ |
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
|
||||
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// @Authors
|
||||
// Fangfang Bai, fangfang@multicorewareinc.com
|
||||
// Jin Ma, jin@multicorewareinc.com
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors as is and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#include "perf_precomp.hpp" |
||||
|
||||
#include "opencv2/imgproc.hpp" |
||||
#include "opencv2/highgui.hpp" |
||||
#include "opencv2/core/ocl.hpp" |
||||
|
||||
#ifdef HAVE_OPENCV_OCL |
||||
|
||||
#include "opencv2/ocl.hpp" |
||||
|
||||
using namespace std::tr1; |
||||
using namespace cv; |
||||
using namespace perf; |
||||
|
||||
namespace cvtest { |
||||
namespace ocl { |
||||
|
||||
///////////////////////// Retina ////////////////////////
|
||||
|
||||
typedef tuple<bool, int, double, double> RetinaParams; |
||||
typedef TestBaseWithParam<RetinaParams> RetinaFixture; |
||||
|
||||
#define OCL_TEST_CYCLE() for(; startTimer(), next(); cv::ocl::finish(), stopTimer()) |
||||
|
||||
PERF_TEST_P(RetinaFixture, Retina, |
||||
::testing::Combine(testing::Bool(), testing::Values((int)cv::bioinspired::RETINA_COLOR_BAYER), |
||||
testing::Values(1.0, 0.5), testing::Values(10.0, 5.0))) |
||||
{ |
||||
if (!cv::ocl::haveOpenCL()) |
||||
throw TestBase::PerfSkipTestException(); |
||||
|
||||
RetinaParams params = GetParam(); |
||||
bool colorMode = get<0>(params), useLogSampling = false; |
||||
int colorSamplingMethod = get<1>(params); |
||||
double reductionFactor = get<2>(params), samplingStrength = get<3>(params); |
||||
|
||||
Mat input = cv::imread(cvtest::TS::ptr()->get_data_path() + "shared/lena.png", colorMode); |
||||
ASSERT_FALSE(input.empty()); |
||||
|
||||
Mat gold_parvo, gold_magno; |
||||
|
||||
if (getSelectedImpl() == "plain") |
||||
{ |
||||
Ptr<bioinspired::Retina> gold_retina = bioinspired::createRetina( |
||||
input.size(), colorMode, colorSamplingMethod, |
||||
useLogSampling, reductionFactor, samplingStrength); |
||||
|
||||
TEST_CYCLE() |
||||
{ |
||||
gold_retina->run(input); |
||||
|
||||
gold_retina->getParvo(gold_parvo); |
||||
gold_retina->getMagno(gold_magno); |
||||
} |
||||
} |
||||
else if (getSelectedImpl() == "ocl") |
||||
{ |
||||
cv::ocl::oclMat ocl_input(input), ocl_parvo, ocl_magno; |
||||
|
||||
Ptr<cv::bioinspired::Retina> ocl_retina = cv::bioinspired::createRetina_OCL( |
||||
input.size(), colorMode, colorSamplingMethod, useLogSampling, |
||||
reductionFactor, samplingStrength); |
||||
|
||||
OCL_TEST_CYCLE() |
||||
{ |
||||
ocl_retina->run(ocl_input); |
||||
|
||||
ocl_retina->getParvo(ocl_parvo); |
||||
ocl_retina->getMagno(ocl_magno); |
||||
} |
||||
} |
||||
else |
||||
CV_TEST_FAIL_NO_IMPL(); |
||||
|
||||
SANITY_CHECK_NOTHING(); |
||||
} |
||||
|
||||
} } // namespace cvtest::ocl
|
||||
|
||||
#endif // HAVE_OPENCV_OCL
|
@ -0,0 +1,47 @@ |
||||
#include "../perf_precomp.hpp" |
||||
#include "opencv2/ts/ocl_perf.hpp" |
||||
|
||||
using namespace std::tr1; |
||||
using namespace cv; |
||||
using namespace perf; |
||||
|
||||
namespace cvtest { |
||||
namespace ocl { |
||||
|
||||
///////////////////////// Retina ////////////////////////
|
||||
|
||||
typedef tuple<bool, int, double, double> RetinaParams; |
||||
typedef TestBaseWithParam<RetinaParams> RetinaFixture; |
||||
|
||||
OCL_PERF_TEST_P(RetinaFixture, Retina, |
||||
::testing::Combine(testing::Bool(), testing::Values((int)cv::bioinspired::RETINA_COLOR_BAYER), |
||||
testing::Values(1.0, 0.5), testing::Values(10.0, 5.0))) |
||||
{ |
||||
RetinaParams params = GetParam(); |
||||
bool colorMode = get<0>(params), useLogSampling = false; |
||||
int colorSamplingMethod = get<1>(params); |
||||
float reductionFactor = static_cast<float>(get<2>(params)); |
||||
float samplingStrength = static_cast<float>(get<3>(params)); |
||||
|
||||
Mat input = imread(getDataPath("cv/shared/lena.png"), colorMode); |
||||
ASSERT_FALSE(input.empty()); |
||||
|
||||
UMat ocl_parvo, ocl_magno; |
||||
|
||||
{ |
||||
Ptr<cv::bioinspired::Retina> retina = cv::bioinspired::Retina::create( |
||||
input.size(), colorMode, colorSamplingMethod, useLogSampling, |
||||
reductionFactor, samplingStrength); |
||||
|
||||
OCL_TEST_CYCLE() |
||||
{ |
||||
retina->run(input); |
||||
retina->getParvo(ocl_parvo); |
||||
retina->getMagno(ocl_magno); |
||||
} |
||||
} |
||||
|
||||
SANITY_CHECK_NOTHING(); |
||||
} |
||||
|
||||
} } // namespace cvtest::ocl
|
@ -1,91 +0,0 @@ |
||||
//============================================================================
|
||||
// Name : retinademo.cpp
|
||||
// Author : Alexandre Benoit, benoit.alexandre.vision@gmail.com
|
||||
// Version : 0.1
|
||||
// Copyright : LISTIC/GIPSA French Labs, May 2015
|
||||
// Description : Gipsa/LISTIC Labs quick retina demo in C++, Ansi-style
|
||||
//============================================================================
|
||||
|
||||
// include bioinspired module and OpenCV core utilities
|
||||
#include "opencv2/bioinspired.hpp" |
||||
#include "opencv2/imgcodecs.hpp" |
||||
#include "opencv2/videoio.hpp" |
||||
#include "opencv2/highgui.hpp" |
||||
#include <iostream> |
||||
#include <cstring> |
||||
|
||||
// main function
|
||||
int main(int argc, char* argv[]) { |
||||
|
||||
// basic input arguments checking
|
||||
if (argc>1) |
||||
{ |
||||
std::cout<<"****************************************************"<<std::endl; |
||||
std::cout<<"* Retina demonstration : demonstrates the use of is a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl; |
||||
std::cout<<"* This retina model allows spatio-temporal image processing (applied on a webcam sequences)."<<std::endl; |
||||
std::cout<<"* As a summary, these are the retina model properties:"<<std::endl; |
||||
std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl; |
||||
std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl; |
||||
std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl; |
||||
std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl; |
||||
std::cout<<"* for more information, reer to the following papers :"<<std::endl; |
||||
std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl; |
||||
std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl; |
||||
std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl; |
||||
std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl; |
||||
std::cout<<"****************************************************"<<std::endl; |
||||
std::cout<<" NOTE : this program generates the default retina parameters file 'RetinaDefaultParameters.xml'"<<std::endl; |
||||
std::cout<<" => you can use this to fine tune parameters and load them if you save to file 'RetinaSpecificParameters.xml'"<<std::endl; |
||||
|
||||
if (strcmp(argv[1], "help")==0){ |
||||
std::cout<<"No help provided for now, please test the retina Demo for a more complete program"<<std::endl; |
||||
} |
||||
} |
||||
|
||||
std::string inputMediaType=argv[1]; |
||||
// declare the retina input buffer.
|
||||
cv::Mat inputFrame; |
||||
// setup webcam reader and grab a first frame to get its size
|
||||
cv::VideoCapture videoCapture(0);
|
||||
videoCapture>>inputFrame; |
||||
|
||||
// allocate a retina instance with input size equal to the one of the loaded image
|
||||
cv::Ptr<cv::bioinspired::Retina> myRetina = cv::bioinspired::createRetina(inputFrame.size()); |
||||
|
||||
/* retina parameters management methods use sample
|
||||
-> save current (here default) retina parameters to a xml file (you may use it only one time to get the file and modify it) |
||||
*/ |
||||
myRetina->write("RetinaDefaultParameters.xml"); |
||||
|
||||
// -> load parameters if file exists
|
||||
myRetina->setup("RetinaSpecificParameters.xml"); |
||||
|
||||
// reset all retina buffers (open your eyes)
|
||||
myRetina->clearBuffers(); |
||||
|
||||
// declare retina output buffers
|
||||
cv::Mat retinaOutput_parvo; |
||||
cv::Mat retinaOutput_magno; |
||||
|
||||
//main processing loop
|
||||
bool stillProcess=true; |
||||
while(stillProcess){ |
||||
|
||||
// if using video stream, then, grabbing a new frame, else, input remains the same
|
||||
if (videoCapture.isOpened()) |
||||
videoCapture>>inputFrame; |
||||
else |
||||
stillProcess=false; |
||||
// run retina filter
|
||||
myRetina->run(inputFrame); |
||||
// Retrieve and display retina output
|
||||
myRetina->getParvo(retinaOutput_parvo); |
||||
myRetina->getMagno(retinaOutput_magno); |
||||
cv::imshow("retina input", inputFrame); |
||||
cv::imshow("Retina Parvo", retinaOutput_parvo); |
||||
cv::imshow("Retina Magno", retinaOutput_magno); |
||||
|
||||
cv::waitKey(5); |
||||
} |
||||
|
||||
} |
@ -1,2 +1,2 @@ |
||||
set(the_description "Custom Calibration Pattern") |
||||
ocv_define_module(ccalib opencv_core opencv_imgproc opencv_calib3d opencv_features2d WRAP python) |
||||
ocv_define_module(ccalib opencv_core opencv_imgproc opencv_calib3d opencv_features2d opencv_highgui WRAP python) |
||||
|
Before Width: | Height: | Size: 119 KiB After Width: | Height: | Size: 119 KiB |
Before Width: | Height: | Size: 104 KiB After Width: | Height: | Size: 104 KiB |
Before Width: | Height: | Size: 182 KiB After Width: | Height: | Size: 182 KiB |
Before Width: | Height: | Size: 105 KiB After Width: | Height: | Size: 105 KiB |
Before Width: | Height: | Size: 41 KiB After Width: | Height: | Size: 41 KiB |
Before Width: | Height: | Size: 100 KiB After Width: | Height: | Size: 100 KiB |
Before Width: | Height: | Size: 74 KiB After Width: | Height: | Size: 74 KiB |
Before Width: | Height: | Size: 93 KiB After Width: | Height: | Size: 93 KiB |
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 51 KiB After Width: | Height: | Size: 51 KiB |
Before Width: | Height: | Size: 105 KiB After Width: | Height: | Size: 105 KiB |
@ -1,5 +0,0 @@ |
||||
#ifndef __OPENCV_CNN_3DOBJ_CONFIG_HPP__ |
||||
#define __OPENCV_CNN_3DOBJ_CONFIG_HPP__ |
||||
// HAVE CAFFE
|
||||
#cmakedefine HAVE_CAFFE |
||||
#endif |
@ -1,21 +0,0 @@ |
||||
cmake_minimum_required(VERSION 2.8) |
||||
SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g -ggdb ") |
||||
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall") |
||||
project(cnn_3dobj) |
||||
find_package(OpenCV REQUIRED) |
||||
set(SOURCES_generator demo_sphereview_data.cpp) |
||||
include_directories(${OpenCV_INCLUDE_DIRS}) |
||||
add_executable(sphereview_test ${SOURCES_generator}) |
||||
target_link_libraries(sphereview_test opencv_core opencv_imgproc opencv_highgui opencv_cnn_3dobj opencv_xfeatures2d) |
||||
|
||||
set(SOURCES_classifier demo_classify.cpp) |
||||
add_executable(classify_test ${SOURCES_classifier}) |
||||
target_link_libraries(classify_test opencv_core opencv_imgproc opencv_highgui opencv_cnn_3dobj opencv_xfeatures2d) |
||||
|
||||
set(SOURCES_modelanalysis demo_model_analysis.cpp) |
||||
add_executable(model_test ${SOURCES_modelanalysis}) |
||||
target_link_libraries(model_test opencv_core opencv_imgproc opencv_highgui opencv_cnn_3dobj opencv_xfeatures2d) |
||||
|
||||
set(SOURCES_video demo_video.cpp) |
||||
add_executable(video_test ${SOURCES_video}) |
||||
target_link_libraries(video_test opencv_core opencv_imgproc opencv_highgui opencv_cnn_3dobj opencv_xfeatures2d) |