Merge pull request #899 from mshabunin:pr718

pull/904/head
Alexander Alekhin 8 years ago
commit 88742e0ea7
  1. 107
      modules/datasets/include/opencv2/datasets/track_alov.hpp
  2. 384
      modules/datasets/src/track_alov.cpp
  3. 2
      modules/tracking/CMakeLists.txt
  4. 7
      modules/tracking/doc/tracking.bib
  5. 405
      modules/tracking/include/opencv2/tracking/tracker.hpp
  6. 70
      modules/tracking/perf/perf_Tracker.cpp
  7. 217
      modules/tracking/samples/goturnTracker.cpp
  8. 191
      modules/tracking/src/gtrTracker.cpp
  9. 76
      modules/tracking/src/gtrTracker.hpp
  10. 146
      modules/tracking/src/gtrUtils.cpp
  11. 61
      modules/tracking/src/gtrUtils.hpp
  12. 1
      modules/tracking/src/tracker.cpp
  13. 14
      modules/tracking/test/test_trackerOPE.cpp
  14. 14
      modules/tracking/test/test_trackerSRE.cpp
  15. 16
      modules/tracking/test/test_trackerTRE.cpp

@ -0,0 +1,107 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Itseez Inc or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_DATASETS_TRACK_ALOV_HPP
#define OPENCV_DATASETS_TRACK_ALOV_HPP
#include <string>
#include <vector>
#include "opencv2/datasets/dataset.hpp"
#include "opencv2/datasets/util.hpp"
using namespace std;
namespace cv
{
namespace datasets
{
//! @addtogroup datasets_track
//! @{
struct TRACK_alovObj : public Object
{
int id;
std::string imagePath;
vector <Point2f> gtbb;
};
const string sectionNames[] = { "01-Light", "02-SurfaceCover", "03-Specularity", "04-Transparency", "05-Shape", "06-MotionSmoothness", "07-MotionCoherence",
"08-Clutter", "09-Confusion", "10-LowContrast", "11-Occlusion", "12-MovingCamera", "13-ZoomingCamera", "14-LongDuration" };
const int sectionSizes[] = { 33, 15, 18, 20, 24, 22, 12, 15, 37, 23, 34, 22, 29, 10 };
class CV_EXPORTS TRACK_alov : public Dataset
{
public:
static Ptr<TRACK_alov> create();
virtual void load(const std::string &path) = 0;
//Load only frames with annotations (~every 5-th frame)
virtual void loadAnnotatedOnly(const std::string &path) = 0;
virtual int getDatasetsNum() = 0;
virtual int getDatasetLength(int id) = 0;
virtual bool initDataset(int id) = 0;
virtual bool getNextFrame(Mat &frame) = 0;
virtual vector <Point2f> getNextGT() = 0;
//Get frame/GT by datasetID (1..N) frameID (1..K)
virtual bool getFrame(Mat &frame, int datasetID, int frameID) = 0;
virtual vector <Point2f> getGT(int datasetID, int frameID) = 0;
protected:
vector <vector <Ptr<TRACK_alovObj> > > data;
int activeDatasetID;
int frameCounter;
};
//! @}
}
}
#endif

@ -0,0 +1,384 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2014, Itseez Inc, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Itseez Inc or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/datasets/track_alov.hpp"
#include <sys/stat.h>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
namespace cv
{
namespace datasets
{
class TRACK_alovImpl : public TRACK_alov
{
public:
//Constructor
TRACK_alovImpl()
{
activeDatasetID = 1;
frameCounter = 0;
}
//Destructor
virtual ~TRACK_alovImpl() {}
//Load Dataset
virtual void load(const string &path);
virtual void loadAnnotatedOnly(const std::string &path);
protected:
virtual int getDatasetsNum();
virtual int getDatasetLength(int id);
virtual bool initDataset(int id);
virtual bool getNextFrame(Mat &frame);
virtual bool getFrame(Mat &frame, int datasetID, int frameID);
virtual vector <Point2f> getNextGT();
virtual vector <Point2f> getGT(int datasetID, int frameID);
void loadDataset(const string &path);
void loadDatasetAnnotatedOnly(const string &path);
string fullFramePath(string rootPath, int sectionID, int videoID, int frameID);
string fullAnnoPath(string rootPath, int sectionID, int videoID);
};
void TRACK_alovImpl::load(const string &path)
{
loadDataset(path);
}
void TRACK_alovImpl::loadAnnotatedOnly(const string &path)
{
loadDatasetAnnotatedOnly(path);
}
string TRACK_alovImpl::fullFramePath(string rootPath, int sectionID, int videoID, int frameID)
{
string out;
char videoNum[9];
sprintf(videoNum, "%u", videoID+1);
char frameNum[9];
sprintf(frameNum, "%u", frameID);
out = rootPath + "/imagedata++/" + sectionNames[sectionID] + "/" + sectionNames[sectionID] + "_video";
for (unsigned int i = 0; i < 5 - strlen(videoNum); ++i)
{
out += "0";
}
out += videoNum;
out += "/";
for (unsigned int i = 0; i < 8 - strlen(frameNum); ++i)
{
out += "0";
}
out += frameNum;
out += ".jpg";
return out;
}
string TRACK_alovImpl::fullAnnoPath(string rootPath, int sectionID, int videoID)
{
string out;
char videoNum[9];
sprintf(videoNum, "%u", videoID+1);
out = rootPath + "/alov300++_rectangleAnnotation_full/" + sectionNames[sectionID] + "/" + sectionNames[sectionID] + "_video";
for (unsigned int i = 0; i < 5 - strlen(videoNum); ++i)
{
out += "0";
}
out += videoNum;
out += ".ann";
return out;
}
inline bool fileExists(const std::string& name)
{
struct stat buffer;
return (stat(name.c_str(), &buffer) == 0);
}
void TRACK_alovImpl::loadDataset(const string &rootPath)
{
vector <int> datasetsLengths;
printf("ALOV300++ Dataset Initialization...\n");
//Load frames
//Loop for all sections of ALOV300++ (14 sections)
for (int i = 0; i < 14; i++)
{
//Loop for all videos in section
for (int k = 0; k < sectionSizes[i]; k++)
{
vector <Ptr<TRACK_alovObj> > objects;
//Make a list of datasets lengths
int currFrameID = 0;
for (;;)
{
currFrameID++;
string fullPath = fullFramePath(rootPath, i, k, currFrameID);
if (!fileExists(fullPath))
break;
//Make ALOV300++ Object
Ptr<TRACK_alovObj> currObj(new TRACK_alovObj);
currObj->imagePath = fullPath;
currObj->id = currFrameID;
currObj->gtbb.push_back(Point2d(0, 0));
currObj->gtbb.push_back(Point2d(0, 0));
currObj->gtbb.push_back(Point2d(0, 0));
currObj->gtbb.push_back(Point2d(0, 0));
//Add object to storage
objects.push_back(currObj);
}
datasetsLengths.push_back(currFrameID - 1);
data.push_back(objects);
}
}
//Load annotations
//Loop for all sections of ALOV300++ (14 sections)
int currDatasetID = 0;
for (int i = 0; i < 14; i++)
{
//Loop for all videos in section
for (int k = 0; k < sectionSizes[i]; k++)
{
currDatasetID++;
//Open dataset's ground truth (annotation) file
string annoPath = fullAnnoPath(rootPath, i, k);
ifstream annoList(annoPath.c_str());
if (!annoList.is_open())
{
printf("Error: Can't open annotation file *.ANN!!!\n");
break;
}
//Ground Truth data
int n = 0;
double x1 = 0, y1 = 0,
x2 = 0, y2 = 0,
x3 = 0, y3 = 0,
x4 = 0, y4 = 0;
do
{
//Make ALOV300++ Object
string tmp;
getline(annoList, tmp);
std::istringstream in(tmp);
in >> n >> x1 >> y1 >> x2 >> y2 >> x3 >> y3 >> x4 >> y4;
Ptr<TRACK_alovObj> currObj = data[currDatasetID-1][n-1];
currObj->gtbb.clear();
currObj->gtbb.push_back(Point2d(x1, y1));
currObj->gtbb.push_back(Point2d(x2, y2));
currObj->gtbb.push_back(Point2d(x3, y3));
currObj->gtbb.push_back(Point2d(x4, y4));
} while (annoList.good());
}
}
return;
}
void TRACK_alovImpl::loadDatasetAnnotatedOnly(const string &rootPath)
{
vector <int> datasetsLengths;
int currDatasetID = 0;
printf("ALOV300++ Annotated Dataset Initialization...\n");
//Loop for all sections of ALOV300++ (14 sections)
for (int i = 0; i < 14; i++)
{
//Loop for all videos in section
for (int k = 0; k < sectionSizes[i]; k++)
{
vector <Ptr<TRACK_alovObj> > objects;
currDatasetID++;
//Open dataset's ground truth (annotation) file
string annoPath = fullAnnoPath(rootPath, i, k);
ifstream annoList(annoPath.c_str());
if (!annoList.is_open())
{
printf("Error: Can't open annotation file *.ANN!!!\n");
break;
}
int framesNum = 0;
do
{
//Make ALOV300++ Object
Ptr<TRACK_alovObj> currObj(new TRACK_alovObj);
string tmp;
framesNum++;
//Ground Truth data
int n = 0;
double x1 = 0, y1 = 0,
x2 = 0, y2 = 0,
x3 = 0, y3 = 0,
x4 = 0, y4 = 0;
getline(annoList, tmp);
std::istringstream in(tmp);
in >> n >> x1 >> y1 >> x2 >> y2 >> x3 >> y3 >> x4 >> y4;
currObj->gtbb.push_back(Point2d(x1, y1));
currObj->gtbb.push_back(Point2d(x2, y2));
currObj->gtbb.push_back(Point2d(x3, y3));
currObj->gtbb.push_back(Point2d(x4, y4));
string fullPath = fullFramePath(rootPath, i, k, n);
if (!fileExists(fullPath))
break;
currObj->imagePath = fullPath;
currObj->id = n;
//Add object to storage
objects.push_back(currObj);
} while (annoList.good());
datasetsLengths.push_back(framesNum-1);
data.push_back(objects);
}
}
return;
}
int TRACK_alovImpl::getDatasetsNum()
{
return (int)(data.size());
}
int TRACK_alovImpl::getDatasetLength(int id)
{
if (id > 0 && id <= (int)data.size())
return (int)(data[id - 1].size());
else
{
printf("Dataset ID is out of range...\nAllowed IDs are: 1~%d\n", (int)data.size());
return -1;
}
}
bool TRACK_alovImpl::initDataset(int id)
{
if (id > 0 && id <= (int)data.size())
{
activeDatasetID = id;
return true;
}
else
{
printf("Dataset ID is out of range...\nAllowed IDs are: 1~%d\n", (int)data.size());
return false;
}
}
bool TRACK_alovImpl::getNextFrame(Mat &frame)
{
if (frameCounter >= (int)data[activeDatasetID - 1].size())
return false;
string imgPath = data[activeDatasetID - 1][frameCounter]->imagePath;
frame = imread(imgPath);
frameCounter++;
return !frame.empty();
}
bool TRACK_alovImpl::getFrame(Mat &frame, int datasetID, int frameID)
{
if (frameID > (int)data[datasetID-1].size())
return false;
string imgPath = data[datasetID-1][frameID-1]->imagePath;
frame = imread(imgPath);
return !frame.empty();
}
Ptr<TRACK_alov> TRACK_alov::create()
{
return Ptr<TRACK_alovImpl>(new TRACK_alovImpl);
}
vector <Point2f> TRACK_alovImpl::getNextGT()
{
Ptr <TRACK_alovObj> currObj = data[activeDatasetID - 1][frameCounter - 1];
return currObj->gtbb;
}
vector <Point2f> TRACK_alovImpl::getGT(int datasetID, int frameID)
{
Ptr <TRACK_alovObj> currObj = data[datasetID - 1][frameID - 1];
return currObj->gtbb;
}
}
}

@ -1,2 +1,2 @@
set(the_description "Tracking API")
ocv_define_module(tracking opencv_imgproc opencv_core opencv_video opencv_highgui opencv_plot OPTIONAL opencv_datasets WRAP python)
ocv_define_module(tracking opencv_imgproc opencv_core opencv_video opencv_highgui opencv_dnn opencv_plot OPTIONAL opencv_datasets WRAP python)

@ -93,3 +93,10 @@
keywords={computer vision;feature extraction;image colour analysis;image representation;image sequences;adaptive color attributes;benchmark color sequences;color features;color representations;computer vision;image description;real-time visual tracking;tracking-by-detection framework;Color;Computational modeling;Covariance matrices;Image color analysis;Kernel;Target tracking;Visualization;Adaptive Dimensionality Reduction;Appearance Model;Color Features;Visual Tracking},
doi={10.1109/CVPR.2014.143},
}
@inproceedings{GOTURN,
title={Learning to Track at 100 FPS with Deep Regression Networks},
author={Held, David and Thrun, Sebastian and Savarese, Silvio},
booktitle = {European Conference Computer Vision (ECCV)},
year = {2016}
}

@ -57,7 +57,7 @@
/*
* Partially based on:
* ====================================================================================================================
* - [AAM] S. Salti, A. Cavallaro, L. Di Stefano, Adaptive Appearance Modeling for Video Tracking: Survey and Evaluation
* - [AAM] S. Salti, A. Cavallaro, L. Di Stefano, Adaptive Appearance Modeling for Video Tracking: Survey and Evaluation
* - [AMVOT] X. Li, W. Hu, C. Shen, Z. Zhang, A. Dick, A. van den Hengel, A Survey of Appearance Models in Visual Object Tracking
*
* This Tracking API has been designed with PlantUML. If you modify this API please change UML files under modules/tracking/doc/uml
@ -200,7 +200,7 @@ class CV_EXPORTS TrackerFeatureSet
bool blockAddTrackerFeature;
std::vector<std::pair<String, Ptr<TrackerFeature> > > features; //list of features
std::vector<Mat> responses; //list of response after compute
std::vector<Mat> responses; //list of response after compute
};
@ -567,7 +567,7 @@ class CV_EXPORTS_W Tracker : public virtual Algorithm
Ptr<TrackerModel> getModel()
{
return model;
return model;
}
protected:
@ -806,7 +806,7 @@ class CV_EXPORTS TrackerSamplerCSC : public TrackerSamplerAlgorithm
Params();
float initInRad; //!< radius for gathering positive instances during init
float trackInPosRad; //!< radius for gathering positive instances during tracking
float searchWinSize; //!< size of search window
float searchWinSize; //!< size of search window
int initMaxNegNum; //!< # negative samples to use during init
int trackMaxPosNum; //!< # positive samples to use during training
int trackMaxNegNum; //!< # negative samples to use during training
@ -1090,12 +1090,12 @@ class CV_EXPORTS TrackerMIL : public Tracker
{
Params();
//parameters for sampler
float samplerInitInRadius; //!< radius for gathering positive instances during init
float samplerInitInRadius; //!< radius for gathering positive instances during init
int samplerInitMaxNegNum; //!< # negative samples to use during init
float samplerSearchWinSize; //!< size of search window
float samplerTrackInRadius; //!< radius for gathering positive instances during tracking
int samplerTrackMaxPosNum; //!< # positive samples to use during tracking
int samplerTrackMaxNegNum; //!< # negative samples to use during tracking
int samplerTrackMaxPosNum; //!< # positive samples to use during tracking
int samplerTrackMaxNegNum; //!< # negative samples to use during tracking
int featureSetNumFeatures; //!< # features
void read( const FileNode& fn );
@ -1205,56 +1205,85 @@ class CV_EXPORTS TrackerTLD : public Tracker
class CV_EXPORTS TrackerKCF : public Tracker
{
public:
/**
* \brief Feature type to be used in the tracking grayscale, colornames, compressed color-names
* The modes available now:
- "GRAY" -- Use grayscale values as the feature
- "CN" -- Color-names feature
*/
enum MODE {
GRAY = (1u << 0),
CN = (1u << 1),
CUSTOM = (1u << 2)
};
struct CV_EXPORTS Params
{
/**
* \brief Constructor
*/
Params();
/**
* \brief Read parameters from file, currently unused
*/
void read(const FileNode& /*fn*/);
/**
* \brief Read parameters from file, currently unused
*/
void write(FileStorage& /*fs*/) const;
double sigma; //!< gaussian kernel bandwidth
double lambda; //!< regularization
double interp_factor; //!< linear interpolation factor for adaptation
double output_sigma_factor; //!< spatial bandwidth (proportional to target)
double pca_learning_rate; //!< compression learning rate
bool resize; //!< activate the resize feature to improve the processing speed
bool split_coeff; //!< split the training coefficients into two matrices
bool wrap_kernel; //!< wrap around the kernel values
bool compress_feature; //!< activate the pca method to compress the features
int max_patch_size; //!< threshold for the ROI size
int compressed_size; //!< feature size after compression
unsigned int desc_pca; //!< compressed descriptors of TrackerKCF::MODE
unsigned int desc_npca; //!< non-compressed descriptors of TrackerKCF::MODE
};
virtual void setFeatureExtractor(void(*)(const Mat, const Rect, Mat&), bool pca_func = false);
/** @brief Constructor
@param parameters KCF parameters TrackerKCF::Params
*/
BOILERPLATE_CODE("KCF", TrackerKCF);
/**
* \brief Feature type to be used in the tracking grayscale, colornames, compressed color-names
* The modes available now:
- "GRAY" -- Use grayscale values as the feature
- "CN" -- Color-names feature
*/
enum MODE {
GRAY = (1u << 0),
CN = (1u << 1),
CUSTOM = (1u << 2)
};
struct CV_EXPORTS Params
{
/**
* \brief Constructor
*/
Params();
/**
* \brief Read parameters from file, currently unused
*/
void read(const FileNode& /*fn*/);
/**
* \brief Read parameters from file, currently unused
*/
void write(FileStorage& /*fs*/) const;
double sigma; //!< gaussian kernel bandwidth
double lambda; //!< regularization
double interp_factor; //!< linear interpolation factor for adaptation
double output_sigma_factor; //!< spatial bandwidth (proportional to target)
double pca_learning_rate; //!< compression learning rate
bool resize; //!< activate the resize feature to improve the processing speed
bool split_coeff; //!< split the training coefficients into two matrices
bool wrap_kernel; //!< wrap around the kernel values
bool compress_feature; //!< activate the pca method to compress the features
int max_patch_size; //!< threshold for the ROI size
int compressed_size; //!< feature size after compression
unsigned int desc_pca; //!< compressed descriptors of TrackerKCF::MODE
unsigned int desc_npca; //!< non-compressed descriptors of TrackerKCF::MODE
};
virtual void setFeatureExtractor(void(*)(const Mat, const Rect, Mat&), bool pca_func = false);
/** @brief Constructor
@param parameters KCF parameters TrackerKCF::Params
*/
BOILERPLATE_CODE("KCF", TrackerKCF);
};
/** @brief GOTURN (@cite GOTURN) is kind of trackers based on Convolutional Neural Networks (CNN). While taking all advantages of CNN trackers,
* GOTURN is much faster due to offline training without online fine-tuning nature.
* GOTURN tracker addresses the problem of single target tracking: given a bounding box label of an object in the first frame of the video,
* we track that object through the rest of the video. NOTE: Current method of GOTURN does not handle occlusions; however, it is fairly
* robust to viewpoint changes, lighting changes, and deformations.
* Inputs of GOTURN are two RGB patches representing Target and Search patches resized to 227x227.
* Outputs of GOTURN are predicted bounding box coordinates, relative to Search patch coordinate system, in format X1,Y1,X2,Y2.
* Original paper is here: <http://davheld.github.io/GOTURN/GOTURN.pdf>
* As long as original authors implementation: <https://github.com/davheld/GOTURN#train-the-tracker>
* Implementation of training algorithm is placed in separately here due to 3d-party dependencies:
* <https://github.com/Auron-X/GOTURN_Training_Toolkit>
* GOTURN architecture goturn.prototxt and trained model goturn.caffemodel are accessible on opencv_extra GitHub repository.
*/
class CV_EXPORTS TrackerGOTURN : public Tracker
{
public:
struct CV_EXPORTS Params
{
Params();
void read(const FileNode& /*fn*/);
void write(FileStorage& /*fs*/) const;
};
/** @brief Constructor
@param parameters GOTURN parameters TrackerGOTURN::Params
*/
BOILERPLATE_CODE("GOTURN", TrackerGOTURN);
};
/************************************ MultiTracker Class ---By Laksono Kurnianggoro---) ************************************/
@ -1266,103 +1295,103 @@ class CV_EXPORTS_W MultiTracker
{
public:
/**
* \brief Constructor.
* In the case of trackerType is given, it will be set as the default algorithm for all trackers.
* @param trackerType the name of the tracker algorithm to be used
*/
CV_WRAP MultiTracker(const String& trackerType = "");
/**
* \brief Destructor
*/
~MultiTracker();
/**
* \brief Add a new object to be tracked.
* The defaultAlgorithm will be used the newly added tracker.
* @param image input image
* @param boundingBox a rectangle represents ROI of the tracked object
*/
CV_WRAP bool add(const Mat& image, const Rect2d& boundingBox);
/**
* \brief Add a new object to be tracked.
* @param trackerType the name of the tracker algorithm to be used
* @param image input image
* @param boundingBox a rectangle represents ROI of the tracked object
*/
CV_WRAP bool add(const String& trackerType, const Mat& image, const Rect2d& boundingBox);
/**
* \brief Add a set of objects to be tracked.
* @param trackerType the name of the tracker algorithm to be used
* @param image input image
* @param boundingBox list of the tracked objects
*/
CV_WRAP bool add(const String& trackerType, const Mat& image, std::vector<Rect2d> boundingBox);
/**
* \brief Add a set of objects to be tracked using the defaultAlgorithm tracker.
* @param image input image
* @param boundingBox list of the tracked objects
*/
CV_WRAP bool add(const Mat& image, std::vector<Rect2d> boundingBox);
/**
* \brief Update the current tracking status.
* The result will be saved in the internal storage.
* @param image input image
*/
bool update(const Mat& image);
//!< storage for the tracked objects, each object corresponds to one tracker algorithm.
std::vector<Rect2d> objects;
/**
* \brief Update the current tracking status.
* @param image input image
* @param boundingBox the tracking result, represent a list of ROIs of the tracked objects.
*/
CV_WRAP bool update(const Mat& image, CV_OUT std::vector<Rect2d> & boundingBox);
/**
* \brief Constructor.
* In the case of trackerType is given, it will be set as the default algorithm for all trackers.
* @param trackerType the name of the tracker algorithm to be used
*/
CV_WRAP MultiTracker(const String& trackerType = "");
/**
* \brief Destructor
*/
~MultiTracker();
/**
* \brief Add a new object to be tracked.
* The defaultAlgorithm will be used the newly added tracker.
* @param image input image
* @param boundingBox a rectangle represents ROI of the tracked object
*/
CV_WRAP bool add(const Mat& image, const Rect2d& boundingBox);
/**
* \brief Add a new object to be tracked.
* @param trackerType the name of the tracker algorithm to be used
* @param image input image
* @param boundingBox a rectangle represents ROI of the tracked object
*/
CV_WRAP bool add(const String& trackerType, const Mat& image, const Rect2d& boundingBox);
/**
* \brief Add a set of objects to be tracked.
* @param trackerType the name of the tracker algorithm to be used
* @param image input image
* @param boundingBox list of the tracked objects
*/
CV_WRAP bool add(const String& trackerType, const Mat& image, std::vector<Rect2d> boundingBox);
/**
* \brief Add a set of objects to be tracked using the defaultAlgorithm tracker.
* @param image input image
* @param boundingBox list of the tracked objects
*/
CV_WRAP bool add(const Mat& image, std::vector<Rect2d> boundingBox);
/**
* \brief Update the current tracking status.
* The result will be saved in the internal storage.
* @param image input image
*/
bool update(const Mat& image);
//!< storage for the tracked objects, each object corresponds to one tracker algorithm.
std::vector<Rect2d> objects;
/**
* \brief Update the current tracking status.
* @param image input image
* @param boundingBox the tracking result, represent a list of ROIs of the tracked objects.
*/
CV_WRAP bool update(const Mat& image, CV_OUT std::vector<Rect2d> & boundingBox);
protected:
//!< storage for the tracker algorithms.
std::vector< Ptr<Tracker> > trackerList;
//!< storage for the tracker algorithms.
std::vector< Ptr<Tracker> > trackerList;
//!< default algorithm for the tracking method.
String defaultAlgorithm;
//!< default algorithm for the tracking method.
String defaultAlgorithm;
};
class ROISelector {
public:
Rect2d select(Mat img, bool fromCenter = true);
Rect2d select(const cv::String& windowName, Mat img, bool showCrossair = true, bool fromCenter = true);
void select(const cv::String& windowName, Mat img, std::vector<Rect2d> & boundingBox, bool fromCenter = true);
Rect2d select(Mat img, bool fromCenter = true);
Rect2d select(const cv::String& windowName, Mat img, bool showCrossair = true, bool fromCenter = true);
void select(const cv::String& windowName, Mat img, std::vector<Rect2d> & boundingBox, bool fromCenter = true);
struct handlerT{
// basic parameters
bool isDrawing;
Rect2d box;
Mat image;
struct handlerT{
// basic parameters
bool isDrawing;
Rect2d box;
Mat image;
// parameters for drawing from the center
bool drawFromCenter;
Point2f center;
// parameters for drawing from the center
bool drawFromCenter;
Point2f center;
// initializer list
handlerT() : isDrawing(false), drawFromCenter(true) {};
}selectorParams;
// initializer list
handlerT() : isDrawing(false), drawFromCenter(true) {};
}selectorParams;
// to store the tracked objects
std::vector<handlerT> objects;
// to store the tracked objects
std::vector<handlerT> objects;
private:
static void mouseHandler(int event, int x, int y, int flags, void *param);
void opencv_mouse_callback(int event, int x, int y, int, void *param);
static void mouseHandler(int event, int x, int y, int flags, void *param);
void opencv_mouse_callback(int event, int x, int y, int, void *param);
// save the keypressed characted
int key;
// save the keypressed characted
int key;
};
Rect2d CV_EXPORTS_W selectROI(Mat img, bool fromCenter = true);
@ -1379,45 +1408,45 @@ void CV_EXPORTS_W selectROI(const cv::String& windowName, Mat img, std::vector<R
class CV_EXPORTS MultiTracker_Alt
{
public:
/** @brief Constructor for Multitracker
*/
MultiTracker_Alt()
{
targetNum = 0;
}
/** @brief Add a new target to a tracking-list and initialize the tracker with a know bounding box that surrounding the target
@param image The initial frame
@param boundingBox The initial boundig box of target
@param tracker_algorithm_name Multi-tracker algorithm name
@return True if new target initialization went succesfully, false otherwise
*/
bool addTarget(const Mat& image, const Rect2d& boundingBox, String tracker_algorithm_name);
/** @brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets
@param image The current frame
@return True means that all targets were located and false means that tracker couldn't locate one of the targets in
current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed
missing from the frame (say, out of sight)
*/
bool update(const Mat& image);
/** @brief Current number of targets in tracking-list
*/
int targetNum;
/** @brief Trackers list for Multi-Object-Tracker
*/
std::vector <Ptr<Tracker> > trackers;
/** @brief Bounding Boxes list for Multi-Object-Tracker
*/
std::vector <Rect2d> boundingBoxes;
/** @brief List of randomly generated colors for bounding boxes display
*/
std::vector<Scalar> colors;
/** @brief Constructor for Multitracker
*/
MultiTracker_Alt()
{
targetNum = 0;
}
/** @brief Add a new target to a tracking-list and initialize the tracker with a know bounding box that surrounding the target
@param image The initial frame
@param boundingBox The initial boundig box of target
@param tracker_algorithm_name Multi-tracker algorithm name
@return True if new target initialization went succesfully, false otherwise
*/
bool addTarget(const Mat& image, const Rect2d& boundingBox, String tracker_algorithm_name);
/** @brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets
@param image The current frame
@return True means that all targets were located and false means that tracker couldn't locate one of the targets in
current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed
missing from the frame (say, out of sight)
*/
bool update(const Mat& image);
/** @brief Current number of targets in tracking-list
*/
int targetNum;
/** @brief Trackers list for Multi-Object-Tracker
*/
std::vector <Ptr<Tracker> > trackers;
/** @brief Bounding Boxes list for Multi-Object-Tracker
*/
std::vector <Rect2d> boundingBoxes;
/** @brief List of randomly generated colors for bounding boxes display
*/
std::vector<Scalar> colors;
};
/** @brief Multi Object Tracker for TLD. TLD is a novel tracking framework that explicitly decomposes
@ -1436,17 +1465,17 @@ occlusions, object absence etc.
class CV_EXPORTS MultiTrackerTLD : public MultiTracker_Alt
{
public:
/** @brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets by
optimized update method using some techniques to speedup calculations specifically for MO TLD. The only limitation
is that all target bounding boxes should have approximately same aspect ratios. Speed boost is around 20%
/** @brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets by
optimized update method using some techniques to speedup calculations specifically for MO TLD. The only limitation
is that all target bounding boxes should have approximately same aspect ratios. Speed boost is around 20%
@param image The current frame.
@param image The current frame.
@return True means that all targets were located and false means that tracker couldn't locate one of the targets in
current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed
missing from the frame (say, out of sight)
*/
bool update_opt(const Mat& image);
@return True means that all targets were located and false means that tracker couldn't locate one of the targets in
current frame. Note, that latter *does not* imply that tracker has failed, maybe target is indeed
missing from the frame (say, out of sight)
*/
bool update_opt(const Mat& image);
};
//! @}

@ -343,3 +343,73 @@ PERF_TEST_P(tracking, tld, testing::Combine(TESTSET_NAMES, SEGMENTS))
SANITY_CHECK( bbs_mat, 15, ERROR_RELATIVE );
}
PERF_TEST_P(tracking, GOTURN, testing::Combine(TESTSET_NAMES, SEGMENTS))
{
string video = get<0>(GetParam());
int segmentId = get<1>(GetParam());
int startFrame;
string prefix;
string suffix;
string datasetMeta = getDataPath(TRACKING_DIR + "/" + video + "/" + video + ".yml");
checkData(datasetMeta, startFrame, prefix, suffix);
int gtStartFrame = startFrame;
vector<Rect> gtBBs;
string gtFile = getDataPath(TRACKING_DIR + "/" + video + "/gt.txt");
if (!getGroundTruth(gtFile, gtBBs))
FAIL() << "Ground truth file " << gtFile << " can not be read" << endl;
int bbCounter = (int)gtBBs.size();
Mat frame;
bool initialized = false;
vector<Rect> bbs;
Ptr<Tracker> tracker = Tracker::create("GOTURN");
string folder = TRACKING_DIR + "/" + video + "/" + FOLDER_IMG;
int numSegments = (sizeof(SEGMENTS) / sizeof(int));
int endFrame = 0;
getSegment(segmentId, numSegments, bbCounter, startFrame, endFrame);
Rect currentBBi = gtBBs[startFrame - gtStartFrame];
Rect2d currentBB(currentBBi);
TEST_CYCLE_N(1)
{
VideoCapture c;
c.open(getDataPath(TRACKING_DIR + "/" + video + "/" + FOLDER_IMG + "/" + video + ".webm"));
c.set(CAP_PROP_POS_FRAMES, startFrame);
for (int frameCounter = startFrame; frameCounter < endFrame; frameCounter++)
{
c >> frame;
if (frame.empty())
{
break;
}
if (!initialized)
{
if (!tracker->init(frame, currentBB))
{
FAIL() << "Could not initialize tracker" << endl;
return;
}
initialized = true;
}
else if (initialized)
{
tracker->update(frame, currentBB);
}
bbs.push_back(currentBB);
}
}
//save the bounding boxes in a Mat
Mat bbs_mat((int)bbs.size(), 4, CV_32F);
getMatOfRects(bbs, bbs_mat);
SANITY_CHECK(bbs_mat, 15, ERROR_RELATIVE);
}

@ -0,0 +1,217 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
//Demo of GOTURN tracker
//In order to use GOTURN tracker, GOTURN architecture goturn.prototxt and goturn.caffemodel are required to exist in root folder.
//There are 2 ways to get caffemodel:
//1 - Train you own GOTURN model using <https://github.com/Auron-X/GOTURN_Training_Toolkit>
//2 - Download pretrained caffemodel from <https://github.com/opencv/opencv_extra>
#include "opencv2/datasets/track_alov.hpp"
#include <opencv2/core/utility.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::datasets;
#define NUM_TEST_FRAMES 1000
static Mat image;
static bool paused;
static bool selectObjects = false;
static bool startSelection = false;
Rect2d boundingBox;
static const char* keys =
{ "{@dataset_path |true| Dataset path }"
"{@dataset_id |1| Dataset ID }"
};
static void onMouse(int event, int x, int y, int, void*)
{
if (!selectObjects)
{
switch (event)
{
case EVENT_LBUTTONDOWN:
//set origin of the bounding box
startSelection = true;
boundingBox.x = x;
boundingBox.y = y;
boundingBox.width = boundingBox.height = 0;
break;
case EVENT_LBUTTONUP:
//sei with and height of the bounding box
boundingBox.width = std::abs(x - boundingBox.x);
boundingBox.height = std::abs(y - boundingBox.y);
paused = false;
selectObjects = true;
startSelection = false;
break;
case EVENT_MOUSEMOVE:
if (startSelection && !selectObjects)
{
//draw the bounding box
Mat currentFrame;
image.copyTo(currentFrame);
rectangle(currentFrame, Point((int)boundingBox.x, (int)boundingBox.y), Point(x, y), Scalar(255, 0, 0), 2, 1);
imshow("GOTURN Tracking", currentFrame);
}
break;
}
}
}
static void help()
{
cout << "\nThis example is a simple demo of GOTURN tracking on ALOV300++ dataset"
"ALOV dataset contains videos with ID range: 1~314\n"
"-- pause video [p] and draw a bounding boxes around the targets to start the tracker\n"
"Example:\n"
"./goturnTracker <dataset_path> <dataset_id>\n"
<< endl;
cout << "\n\nHot keys: \n"
"\tq - quit the program\n"
"\tp - pause video\n";
}
int main(int argc, char *argv[])
{
CommandLineParser parser(argc, argv, keys);
string datasetRootPath = parser.get<string>(0);
int datasetID = parser.get<int>(1);
if (datasetRootPath.empty())
{
help();
return -1;
}
Mat frame;
paused = false;
namedWindow("GOTURN Tracking", 0);
setMouseCallback("GOTURN Tracking", onMouse, 0);
//Create GOTURN tracker
Ptr<Tracker> tracker = Tracker::create("GOTURN");
//Load and init full ALOV300++ dataset with a given datasetID, as alternative you can use loadAnnotatedOnly(..)
//to load only frames with labled ground truth ~ every 5-th frame
Ptr<cv::datasets::TRACK_alov> dataset = TRACK_alov::create();
dataset->load(datasetRootPath);
dataset->initDataset(datasetID);
//Read first frame
dataset->getNextFrame(frame);
frame.copyTo(image);
rectangle(image, boundingBox, Scalar(255, 0, 0), 2, 1);
imshow("GOTURN Tracking", image);
bool initialized = false;
paused = true;
int frameCounter = 0;
//Time measurment
int64 e3 = getTickCount();
for (;;)
{
if (!paused)
{
//Time measurment
int64 e1 = getTickCount();
if (initialized){
if (!dataset->getNextFrame(frame))
break;
frame.copyTo(image);
}
if (!initialized && selectObjects)
{
//Initialize the tracker and add targets
if (!tracker->init(frame, boundingBox))
{
cout << "Tracker Init Error!!!";
return 0;
}
rectangle(frame, boundingBox, Scalar(0, 0, 255), 2, 1);
initialized = true;
}
else if (initialized)
{
//Update all targets
if (tracker->update(frame, boundingBox))
{
rectangle(frame, boundingBox, Scalar(0, 0, 255), 2, 1);
}
}
imshow("GOTURN Tracking", frame);
frameCounter++;
//Time measurment
int64 e2 = getTickCount();
double t1 = (e2 - e1) / getTickFrequency();
cout << frameCounter << "\tframe : " << t1 * 1000.0 << "ms" << endl;
}
char c = (char)waitKey(2);
if (c == 'q')
break;
if (c == 'p')
paused = !paused;
}
//Time measurment
int64 e4 = getTickCount();
double t2 = (e4 - e3) / getTickFrequency();
cout << "Average Time for Frame: " << t2 * 1000.0 / frameCounter << "ms" << endl;
cout << "Average FPS: " << 1.0 / t2*frameCounter << endl;
waitKey(0);
return 0;
}

@ -0,0 +1,191 @@
/*///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "gtrTracker.hpp"
namespace cv
{
TrackerGOTURN::Params::Params(){}
void TrackerGOTURN::Params::read(const cv::FileNode& /*fn*/){}
void TrackerGOTURN::Params::write(cv::FileStorage& /*fs*/) const {}
Ptr<TrackerGOTURN> TrackerGOTURN::createTracker(const TrackerGOTURN::Params &parameters)
{
return Ptr<gtr::TrackerGOTURNImpl>(new gtr::TrackerGOTURNImpl(parameters));
}
namespace gtr
{
class TrackerGOTURNModel : public TrackerModel{
public:
TrackerGOTURNModel(TrackerGOTURN::Params){}
Rect2d getBoundingBox(){ return boundingBox_; }
void setBoudingBox(Rect2d boundingBox){ boundingBox_ = boundingBox; }
Mat getImage(){ return image_; }
void setImage(const Mat& image){ image.copyTo(image_); }
protected:
Rect2d boundingBox_;
Mat image_;
void modelEstimationImpl(const std::vector<Mat>&){}
void modelUpdateImpl(){}
};
TrackerGOTURNImpl::TrackerGOTURNImpl(const TrackerGOTURN::Params &parameters) :
params(parameters){
isInit = false;
};
void TrackerGOTURNImpl::read(const cv::FileNode& fn)
{
params.read(fn);
}
void TrackerGOTURNImpl::write(cv::FileStorage& fs) const
{
params.write(fs);
}
bool TrackerGOTURNImpl::initImpl(const Mat& image, const Rect2d& boundingBox)
{
//Make a simple model from frame and bounding box
model = Ptr<TrackerGOTURNModel>(new TrackerGOTURNModel(params));
((TrackerGOTURNModel*)static_cast<TrackerModel*>(model))->setImage(image);
((TrackerGOTURNModel*)static_cast<TrackerModel*>(model))->setBoudingBox(boundingBox);
//Load GOTURN architecture from *.prototxt and pretrained weights from *.caffemodel
String modelTxt = "goturn.prototxt";
String modelBin = "goturn.caffemodel";
Ptr<dnn::Importer> importer;
try //Import GOTURN model
{
importer = dnn::createCaffeImporter(modelTxt, modelBin);
}
catch (const cv::Exception &err) //Importer can throw errors, we will catch them
{
std::cerr << err.msg << std::endl;
}
if (!importer)
{
cvError(CV_StsError, "cv::gtr::InitImpl", "GOTURN network loading error...", "gtrTracker.cpp", 117);
}
importer->populateNet(net);
importer.release(); //We don't need importer anymore
return true;
}
bool TrackerGOTURNImpl::updateImpl(const Mat& image, Rect2d& boundingBox)
{
int INPUT_SIZE = 227;
//Using prevFrame & prevBB from model and curFrame GOTURN calculating curBB
Mat curFrame = image.clone();
Mat prevFrame = ((TrackerGOTURNModel*)static_cast<TrackerModel*>(model))->getImage();
Rect2d prevBB = ((TrackerGOTURNModel*)static_cast<TrackerModel*>(model))->getBoundingBox();
Rect2d curBB;
float padTargetPatch = 2.0;
Rect2f searchPatchRect, targetPatchRect;
Point2f currCenter, prevCenter;
Mat prevFramePadded, curFramePadded;
Mat searchPatch, targetPatch;
prevCenter.x = (float)(prevBB.x + prevBB.width / 2);
prevCenter.y = (float)(prevBB.y + prevBB.height / 2);
targetPatchRect.width = (float)(prevBB.width*padTargetPatch);
targetPatchRect.height = (float)(prevBB.height*padTargetPatch);
targetPatchRect.x = (float)(prevCenter.x - prevBB.width*padTargetPatch / 2.0 + targetPatchRect.width);
targetPatchRect.y = (float)(prevCenter.y - prevBB.height*padTargetPatch / 2.0 + targetPatchRect.height);
copyMakeBorder(prevFrame, prevFramePadded, (int)targetPatchRect.height, (int)targetPatchRect.height, (int)targetPatchRect.width, (int)targetPatchRect.width, BORDER_REPLICATE);
targetPatch = prevFramePadded(targetPatchRect).clone();
copyMakeBorder(curFrame, curFramePadded, (int)targetPatchRect.height, (int)targetPatchRect.height, (int)targetPatchRect.width, (int)targetPatchRect.width, BORDER_REPLICATE);
searchPatch = curFramePadded(targetPatchRect).clone();
//Preprocess
//Resize
resize(targetPatch, targetPatch, Size(INPUT_SIZE, INPUT_SIZE));
resize(searchPatch, searchPatch, Size(INPUT_SIZE, INPUT_SIZE));
//Mean Subtract
targetPatch = targetPatch - 128;
searchPatch = searchPatch - 128;
//Convert to Float type
targetPatch.convertTo(targetPatch, CV_32F);
searchPatch.convertTo(searchPatch, CV_32F);
dnn::Blob targetBlob = dnn::Blob(targetPatch);
dnn::Blob searchBlob = dnn::Blob(searchPatch);
net.setBlob(".data1", targetBlob);
net.setBlob(".data2", searchBlob);
net.forward();
dnn::Blob res = net.getBlob("scale");
Mat resMat = res.matRefConst().reshape(1, 1);
curBB.x = targetPatchRect.x + (resMat.at<float>(0) * targetPatchRect.width / INPUT_SIZE) - targetPatchRect.width;
curBB.y = targetPatchRect.y + (resMat.at<float>(1) * targetPatchRect.height / INPUT_SIZE) - targetPatchRect.height;
curBB.width = (resMat.at<float>(2) - resMat.at<float>(0)) * targetPatchRect.width / INPUT_SIZE;
curBB.height = (resMat.at<float>(3) - resMat.at<float>(1)) * targetPatchRect.height / INPUT_SIZE;
//Predicted BB
boundingBox = curBB;
//Set new model image and BB from current frame
((TrackerGOTURNModel*)static_cast<TrackerModel*>(model))->setImage(curFrame);
((TrackerGOTURNModel*)static_cast<TrackerModel*>(model))->setBoudingBox(curBB);
return true;
}
}
}

@ -0,0 +1,76 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_GOTURN_TRACKER
#define OPENCV_GOTURN_TRACKER
#include "precomp.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/dnn.hpp"
#include "gtrUtils.hpp"
#include "opencv2/imgproc.hpp"
#include<algorithm>
#include<limits.h>
namespace cv
{
namespace gtr
{
class TrackerGOTURNImpl : public TrackerGOTURN
{
public:
TrackerGOTURNImpl(const TrackerGOTURN::Params &parameters = TrackerGOTURN::Params());
void read(const FileNode& fn);
void write(FileStorage& fs) const;
bool initImpl(const Mat& image, const Rect2d& boundingBox);
bool updateImpl(const Mat& image, Rect2d& boundingBox);
TrackerGOTURN::Params params;
dnn::Net net;
};
}
}
#endif

@ -0,0 +1,146 @@
/*///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "gtrUtils.hpp"
namespace cv
{
namespace gtr
{
double generateRandomLaplacian(double b, double m)
{
double t = (double)rand() / (RAND_MAX);
double n = (double)rand() / (RAND_MAX);
if (t > 0.5)
return m + b*log(n);
else
return m - b*log(n);
}
Rect2f anno2rect(vector<Point2f> annoBB)
{
Rect2f rectBB;
rectBB.x = min(annoBB[0].x, annoBB[1].x);
rectBB.y = min(annoBB[0].y, annoBB[2].y);
rectBB.width = fabs(annoBB[0].x - annoBB[1].x);
rectBB.height = fabs(annoBB[0].y - annoBB[2].y);
return rectBB;
}
vector <TrainingSample> gatherFrameSamples(Mat prevFrame, Mat currFrame, Rect2f prevBB, Rect2f currBB)
{
vector <TrainingSample> trainingSamples;
Point2f currCenter, prevCenter;
Rect2f targetPatchRect, searchPatchRect;
Mat targetPatch, searchPatch;
Mat prevFramePadded, currFramePadded;
//Crop Target Patch
//Padding
//Previous frame GTBBs center
prevCenter.x = prevBB.x + prevBB.width / 2;
prevCenter.y = prevBB.y + prevBB.height / 2;
targetPatchRect.width = (float)(prevBB.width*padTarget);
targetPatchRect.height = (float)(prevBB.height*padTarget);
targetPatchRect.x = (float)(prevCenter.x - prevBB.width*padTarget / 2.0 + targetPatchRect.width);
targetPatchRect.y = (float)(prevCenter.y - prevBB.height*padTarget / 2.0 + targetPatchRect.height);
copyMakeBorder(prevFrame, prevFramePadded, (int)targetPatchRect.height, (int)targetPatchRect.height, (int)targetPatchRect.width, (int)targetPatchRect.width, BORDER_REPLICATE);
targetPatch = prevFramePadded(targetPatchRect);
for (int i = 0; i < samplesInFrame; i++)
{
TrainingSample sample;
//Current frame GTBBs center
currCenter.x = (float)(currBB.x + currBB.width / 2.0);
currCenter.y = (float)(currBB.y + currBB.height / 2.0);
//Generate and add random Laplacian distribution (Scaling from target size)
double dx, dy, ds;
dx = generateRandomLaplacian(bX, 0)*prevBB.width;
dy = generateRandomLaplacian(bY, 0)*prevBB.height;
ds = generateRandomLaplacian(bS, 1);
//Limit coefficients
dx = min(dx, (double)prevBB.width);
dx = max(dx, (double)-prevBB.width);
dy = min(dy, (double)prevBB.height);
dy = max(dy, (double)-prevBB.height);
ds = min(ds, Ymax);
ds = max(ds, Ymin);
searchPatchRect.width = (float)(prevBB.width*padSearch*ds);
searchPatchRect.height =(float)(prevBB.height*padSearch*ds);
searchPatchRect.x = (float)(currCenter.x + dx - searchPatchRect.width / 2.0 + searchPatchRect.width);
searchPatchRect.y = (float)(currCenter.y + dy - searchPatchRect.height / 2.0 + searchPatchRect.height);
copyMakeBorder(currFrame, currFramePadded, (int)searchPatchRect.height, (int)searchPatchRect.height, (int)searchPatchRect.width, (int)searchPatchRect.width, BORDER_REPLICATE);
searchPatch = currFramePadded(searchPatchRect);
//Calculate Relative GTBB in search patch
Rect2f relGTBB;
relGTBB.width = currBB.width;
relGTBB.height = currBB.height;
relGTBB.x = currBB.x - searchPatchRect.x + searchPatchRect.width;
relGTBB.y = currBB.y - searchPatchRect.y + searchPatchRect.height;
//Link to the sample struct
sample.targetPatch = targetPatch.clone();
sample.searchPatch = searchPatch.clone();
sample.targetBB = relGTBB;
trainingSamples.push_back(sample);
}
return trainingSamples;
}
}
}

@ -0,0 +1,61 @@
#ifndef OPENCV_GTR_UTILS
#define OPENCV_GTR_UTILS
#include "precomp.hpp"
#include <vector>
#include "opencv2/highgui.hpp"
#include <opencv2/datasets/track_alov.hpp>
namespace cv
{
namespace gtr
{
//Number of samples in batch
const int samplesInBatch = 50;
//Number of samples to mine from video frame
const int samplesInFrame = 10;
//Number of samples to mine from still image
const int samplesInImage = 10;
//Padding coefficients for Target/Search Region
const double padTarget = 2.0;
const double padSearch = 2.0;
//Scale parameters for Laplace distribution for Translation/Scale
const double bX = 1.0/10;
const double bY = 1.0/10;
const double bS = 1.0/15;
//Limits of scale changes
const double Ymax = 1.4;
const double Ymin = 0.6;
//Lower boundary constraints for random samples (sample should include X% of target BB)
const double minX = 0.5;
const double minY = 0.5;
//Structure of sample for training
struct TrainingSample
{
Mat targetPatch;
Mat searchPatch;
//Output bounding box on search patch
Rect2f targetBB;
};
//Laplacian distribution
double generateRandomLaplacian(double b, double m);
//Convert ALOV300++ anno coordinates to Rectangle BB
Rect2f anno2rect(vector<Point2f> annoBB);
//Gather samples from random video frame
vector <TrainingSample> gatherFrameSamples(Mat prevFrame, Mat currFrame, Rect2f prevBB, Rect2f currBB);
}
}
#endif

@ -111,6 +111,7 @@ Ptr<Tracker> Tracker::create( const String& trackerType )
BOILERPLATE_CODE("MEDIANFLOW",TrackerMedianFlow);
BOILERPLATE_CODE("TLD",TrackerTLD);
BOILERPLATE_CODE("KCF",TrackerKCF);
BOILERPLATE_CODE("GOTURN", TrackerGOTURN);
return Ptr<Tracker>();
}

@ -422,6 +422,20 @@ TEST_P(OPE_Overlap, TLD)
RecordProperty( "ratioSuccess", test.getRatioSucc() );
}
TEST_P(OPE_Distance, GOTURN)
{
TrackerOPETest test(Tracker::create("GOTURN"), TrackerOPETest::DISTANCE, dataset, threshold);
test.run();
RecordProperty("ratioSuccess", test.getRatioSucc());
}
TEST_P(OPE_Overlap, GOTURN)
{
TrackerOPETest test(Tracker::create("GOTURN"), TrackerOPETest::OVERLAP, dataset, threshold);
test.run();
RecordProperty("ratioSuccess", test.getRatioSucc());
}
INSTANTIATE_TEST_CASE_P( Tracking, OPE_Distance, testing::Combine( TESTSET_NAMES, LOCATION_ERROR_THRESHOLD ) );
INSTANTIATE_TEST_CASE_P( Tracking, OPE_Overlap, testing::Combine( TESTSET_NAMES, OVERLAP_THRESHOLD ) );

@ -529,6 +529,20 @@ TEST_P(SRE_Overlap, TLD)
RecordProperty( "ratioSuccess", test.getRatioSucc() );
}
TEST_P(SRE_Distance, GOTURN)
{
TrackerSRETest test(Tracker::create("GOTURN"), TrackerSRETest::DISTANCE, dataset, shift, threshold);
test.run();
RecordProperty("ratioSuccess", test.getRatioSucc());
}
TEST_P(SRE_Overlap, GOTURN)
{
TrackerSRETest test(Tracker::create("GOTURN"), TrackerSRETest::OVERLAP, dataset, shift, threshold);
test.run();
RecordProperty("ratioSuccess", test.getRatioSucc());
}
INSTANTIATE_TEST_CASE_P( Tracking, SRE_Distance, testing::Combine( TESTSET_NAMES, SPATIAL_SHIFTS, LOCATION_ERROR_THRESHOLD ) );
INSTANTIATE_TEST_CASE_P( Tracking, SRE_Overlap, testing::Combine( TESTSET_NAMES, SPATIAL_SHIFTS, OVERLAP_THRESHOLD ) );

@ -389,7 +389,7 @@ void TrackerTRETest::checkDataTest()
gt2.close();
if( segmentIdx == ( sizeof ( SEGMENTS)/sizeof(int) ) )
endFrame = (int)bbs.size();
endFrame = (int)bbs.size();
}
void TrackerTRETest::run()
@ -499,6 +499,20 @@ TEST_P(TRE_Overlap, TLD)
RecordProperty( "ratioSuccess", test.getRatioSucc() );
}
TEST_P(TRE_Distance, GOTURN)
{
TrackerTRETest test(Tracker::create("GOTURN"), TrackerTRETest::DISTANCE, dataset, threshold, segment);
test.run();
RecordProperty("ratioSuccess", test.getRatioSucc());
}
TEST_P(TRE_Overlap, GOTURN)
{
TrackerTRETest test(Tracker::create("GOTURN"), TrackerTRETest::OVERLAP, dataset, threshold, segment);
test.run();
RecordProperty("ratioSuccess", test.getRatioSucc());
}
INSTANTIATE_TEST_CASE_P( Tracking, TRE_Distance, testing::Combine( TESTSET_NAMES, SEGMENTS, LOCATION_ERROR_THRESHOLD ) );
INSTANTIATE_TEST_CASE_P( Tracking, TRE_Overlap, testing::Combine( TESTSET_NAMES, SEGMENTS, OVERLAP_THRESHOLD ) );

Loading…
Cancel
Save