Open Source Computer Vision Library https://opencv.org/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

175 lines
5.9 KiB

// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
//#define DEBUG_TEST
#ifdef DEBUG_TEST
#include <opencv2/highgui.hpp>
#endif
namespace opencv_test { namespace {
//using namespace cv::tracking;
#define TESTSET_NAMES testing::Values("david", "dudek", "faceocc2")
const string TRACKING_DIR = "tracking";
const string FOLDER_IMG = "data";
const string FOLDER_OMIT_INIT = "initOmit";
#include "test_trackers.impl.hpp"
//[TESTDATA]
PARAM_TEST_CASE(DistanceAndOverlap, string)
{
string dataset;
virtual void SetUp()
{
dataset = GET_PARAM(0);
}
};
TEST_P(DistanceAndOverlap, MIL)
{
TrackerTest<Tracker, Rect> test(TrackerMIL::create(), dataset, 30, .65f, NoTransform);
test.run();
}
TEST_P(DistanceAndOverlap, Shifted_Data_MIL)
{
TrackerTest<Tracker, Rect> test(TrackerMIL::create(), dataset, 30, .6f, CenterShiftLeft);
test.run();
}
/***************************************************************************************/
//Tests with scaled initial window
TEST_P(DistanceAndOverlap, Scaled_Data_MIL)
{
TrackerTest<Tracker, Rect> test(TrackerMIL::create(), dataset, 30, .7f, Scale_1_1);
test.run();
}
TEST_P(DistanceAndOverlap, GOTURN)
{
std::string model = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.prototxt");
std::string weights = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.caffemodel", false);
cv::TrackerGOTURN::Params params;
params.modelTxt = model;
params.modelBin = weights;
TrackerTest<Tracker, Rect> test(TrackerGOTURN::create(params), dataset, 35, .35f, NoTransform);
test.run();
}
INSTANTIATE_TEST_CASE_P(Tracking, DistanceAndOverlap, TESTSET_NAMES);
static bool checkIOU(const Rect& r0, const Rect& r1, double threshold)
{
int interArea = (r0 & r1).area();
double iouVal = (interArea * 1.0 )/ (r0.area() + r1.area() - interArea);;
if (iouVal > threshold)
return true;
else
{
std::cout <<"Unmatched IOU: expect IOU val ("<<iouVal <<") > the IOU threadhold ("<<threshold<<")! Box 0 is "
<< r0 <<", and Box 1 is "<<r1<< std::endl;
return false;
}
}
static void checkTrackingAccuracy(cv::Ptr<Tracker>& tracker, double iouThreshold = 0.7)
{
// Template image
Mat img0 = imread(findDataFile("tracking/bag/00000001.jpg"), 1);
// Tracking image sequence.
std::vector<Mat> imgs;
imgs.push_back(imread(findDataFile("tracking/bag/00000002.jpg"), 1));
imgs.push_back(imread(findDataFile("tracking/bag/00000003.jpg"), 1));
imgs.push_back(imread(findDataFile("tracking/bag/00000004.jpg"), 1));
imgs.push_back(imread(findDataFile("tracking/bag/00000005.jpg"), 1));
imgs.push_back(imread(findDataFile("tracking/bag/00000006.jpg"), 1));
cv::Rect roi(325, 164, 100, 100);
std::vector<Rect> targetRois;
targetRois.push_back(cv::Rect(278, 133, 99, 104));
targetRois.push_back(cv::Rect(293, 88, 93, 110));
targetRois.push_back(cv::Rect(287, 76, 89, 116));
targetRois.push_back(cv::Rect(297, 74, 82, 122));
targetRois.push_back(cv::Rect(311, 83, 78, 125));
tracker->init(img0, roi);
CV_Assert(targetRois.size() == imgs.size());
for (int i = 0; i < (int)imgs.size(); i++)
{
bool res = tracker->update(imgs[i], roi);
ASSERT_TRUE(res);
ASSERT_TRUE(checkIOU(roi, targetRois[i], iouThreshold)) << cv::format("Fail at img %d.",i);
}
}
TEST(GOTURN, accuracy)
{
std::string model = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.prototxt");
std::string weights = cvtest::findDataFile("dnn/gsoc2016-goturn/goturn.caffemodel", false);
cv::TrackerGOTURN::Params params;
params.modelTxt = model;
params.modelBin = weights;
cv::Ptr<Tracker> tracker = TrackerGOTURN::create(params);
// TODO! GOTURN have low accuracy. Try to remove this api at 5.x.
checkTrackingAccuracy(tracker, 0.08);
}
TEST(DaSiamRPN, accuracy)
{
std::string model = cvtest::findDataFile("dnn/onnx/models/dasiamrpn_model.onnx", false);
std::string kernel_r1 = cvtest::findDataFile("dnn/onnx/models/dasiamrpn_kernel_r1.onnx", false);
std::string kernel_cls1 = cvtest::findDataFile("dnn/onnx/models/dasiamrpn_kernel_cls1.onnx", false);
cv::TrackerDaSiamRPN::Params params;
params.model = model;
params.kernel_r1 = kernel_r1;
params.kernel_cls1 = kernel_cls1;
cv::Ptr<Tracker> tracker = TrackerDaSiamRPN::create(params);
checkTrackingAccuracy(tracker, 0.7);
}
TEST(NanoTrack, accuracy_NanoTrack_V1)
{
std::string backbonePath = cvtest::findDataFile("dnn/onnx/models/nanotrack_backbone_sim.onnx", false);
std::string neckheadPath = cvtest::findDataFile("dnn/onnx/models/nanotrack_head_sim.onnx", false);
cv::TrackerNano::Params params;
params.backbone = backbonePath;
params.neckhead = neckheadPath;
cv::Ptr<Tracker> tracker = TrackerNano::create(params);
checkTrackingAccuracy(tracker);
}
TEST(NanoTrack, accuracy_NanoTrack_V2)
{
std::string backbonePath = cvtest::findDataFile("dnn/onnx/models/nanotrack_backbone_sim_v2.onnx", false);
std::string neckheadPath = cvtest::findDataFile("dnn/onnx/models/nanotrack_head_sim_v2.onnx", false);
cv::TrackerNano::Params params;
params.backbone = backbonePath;
params.neckhead = neckheadPath;
cv::Ptr<Tracker> tracker = TrackerNano::create(params);
checkTrackingAccuracy(tracker, 0.69);
}
Merge pull request #24201 from lpylpy0514:4.x VIT track(gsoc realtime object tracking model) #24201 Vit tracker(vision transformer tracker) is a much better model for real-time object tracking. Vit tracker can achieve speeds exceeding nanotrack by 20% in single-threaded mode with ARM chip, and the advantage becomes even more pronounced in multi-threaded mode. In addition, on the dataset, vit tracker demonstrates better performance compared to nanotrack. Moreover, vit trackerprovides confidence values during the tracking process, which can be used to determine if the tracking is currently lost. opencv_zoo: https://github.com/opencv/opencv_zoo/pull/194 opencv_extra: [https://github.com/opencv/opencv_extra/pull/1088](https://github.com/opencv/opencv_extra/pull/1088) # Performance comparison is as follows: NOTE: The speed below is tested by **onnxruntime** because opencv has poor support for the transformer architecture for now. ONNX speed test on ARM platform(apple M2)(ms): | thread nums | 1| 2| 3| 4| |--------|--------|--------|--------|--------| | nanotrack| 5.25| 4.86| 4.72| 4.49| | vit tracker| 4.18| 2.41| 1.97| **1.46 (3X)**| ONNX speed test on x86 platform(intel i3 10105)(ms): | thread nums | 1| 2| 3| 4| |--------|--------|--------|--------|--------| | nanotrack|3.20|2.75|2.46|2.55| | vit tracker|3.84|2.37|2.10|2.01| opencv speed test on x86 platform(intel i3 10105)(ms): | thread nums | 1| 2| 3| 4| |--------|--------|--------|--------|--------| | vit tracker|31.3|31.4|31.4|31.4| preformance test on lasot dataset(AUC is the most important data. Higher AUC means better tracker): |LASOT | AUC| P| Pnorm| |--------|--------|--------|--------| | nanotrack| 46.8| 45.0| 43.3| | vit tracker| 48.6| 44.8| 54.7| [https://youtu.be/MJiPnu1ZQRI](https://youtu.be/MJiPnu1ZQRI) In target tracking tasks, the score is an important indicator that can indicate whether the current target is lost. In the video, vit tracker can track the target and display the current score in the upper left corner of the video. When the target is lost, the score drops significantly. While nanotrack will only return 0.9 score in any situation, so that we cannot determine whether the target is lost. ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [ ] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [ ] The feature is well documented and sample code can be built with the project CMake
1 year ago
TEST(vittrack, accuracy_vittrack)
{
std::string model = cvtest::findDataFile("dnn/onnx/models/vitTracker.onnx");
Merge pull request #24201 from lpylpy0514:4.x VIT track(gsoc realtime object tracking model) #24201 Vit tracker(vision transformer tracker) is a much better model for real-time object tracking. Vit tracker can achieve speeds exceeding nanotrack by 20% in single-threaded mode with ARM chip, and the advantage becomes even more pronounced in multi-threaded mode. In addition, on the dataset, vit tracker demonstrates better performance compared to nanotrack. Moreover, vit trackerprovides confidence values during the tracking process, which can be used to determine if the tracking is currently lost. opencv_zoo: https://github.com/opencv/opencv_zoo/pull/194 opencv_extra: [https://github.com/opencv/opencv_extra/pull/1088](https://github.com/opencv/opencv_extra/pull/1088) # Performance comparison is as follows: NOTE: The speed below is tested by **onnxruntime** because opencv has poor support for the transformer architecture for now. ONNX speed test on ARM platform(apple M2)(ms): | thread nums | 1| 2| 3| 4| |--------|--------|--------|--------|--------| | nanotrack| 5.25| 4.86| 4.72| 4.49| | vit tracker| 4.18| 2.41| 1.97| **1.46 (3X)**| ONNX speed test on x86 platform(intel i3 10105)(ms): | thread nums | 1| 2| 3| 4| |--------|--------|--------|--------|--------| | nanotrack|3.20|2.75|2.46|2.55| | vit tracker|3.84|2.37|2.10|2.01| opencv speed test on x86 platform(intel i3 10105)(ms): | thread nums | 1| 2| 3| 4| |--------|--------|--------|--------|--------| | vit tracker|31.3|31.4|31.4|31.4| preformance test on lasot dataset(AUC is the most important data. Higher AUC means better tracker): |LASOT | AUC| P| Pnorm| |--------|--------|--------|--------| | nanotrack| 46.8| 45.0| 43.3| | vit tracker| 48.6| 44.8| 54.7| [https://youtu.be/MJiPnu1ZQRI](https://youtu.be/MJiPnu1ZQRI) In target tracking tasks, the score is an important indicator that can indicate whether the current target is lost. In the video, vit tracker can track the target and display the current score in the upper left corner of the video. When the target is lost, the score drops significantly. While nanotrack will only return 0.9 score in any situation, so that we cannot determine whether the target is lost. ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [ ] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [ ] The feature is well documented and sample code can be built with the project CMake
1 year ago
cv::TrackerVit::Params params;
params.net = model;
cv::Ptr<Tracker> tracker = TrackerVit::create(params);
// NOTE: Test threshold was reduced from 0.67 (libjpeg-turbo) to 0.66 (libjpeg 9f),
// becase libjpeg and libjpeg-turbo produce slightly different images
checkTrackingAccuracy(tracker, 0.66);
Merge pull request #24201 from lpylpy0514:4.x VIT track(gsoc realtime object tracking model) #24201 Vit tracker(vision transformer tracker) is a much better model for real-time object tracking. Vit tracker can achieve speeds exceeding nanotrack by 20% in single-threaded mode with ARM chip, and the advantage becomes even more pronounced in multi-threaded mode. In addition, on the dataset, vit tracker demonstrates better performance compared to nanotrack. Moreover, vit trackerprovides confidence values during the tracking process, which can be used to determine if the tracking is currently lost. opencv_zoo: https://github.com/opencv/opencv_zoo/pull/194 opencv_extra: [https://github.com/opencv/opencv_extra/pull/1088](https://github.com/opencv/opencv_extra/pull/1088) # Performance comparison is as follows: NOTE: The speed below is tested by **onnxruntime** because opencv has poor support for the transformer architecture for now. ONNX speed test on ARM platform(apple M2)(ms): | thread nums | 1| 2| 3| 4| |--------|--------|--------|--------|--------| | nanotrack| 5.25| 4.86| 4.72| 4.49| | vit tracker| 4.18| 2.41| 1.97| **1.46 (3X)**| ONNX speed test on x86 platform(intel i3 10105)(ms): | thread nums | 1| 2| 3| 4| |--------|--------|--------|--------|--------| | nanotrack|3.20|2.75|2.46|2.55| | vit tracker|3.84|2.37|2.10|2.01| opencv speed test on x86 platform(intel i3 10105)(ms): | thread nums | 1| 2| 3| 4| |--------|--------|--------|--------|--------| | vit tracker|31.3|31.4|31.4|31.4| preformance test on lasot dataset(AUC is the most important data. Higher AUC means better tracker): |LASOT | AUC| P| Pnorm| |--------|--------|--------|--------| | nanotrack| 46.8| 45.0| 43.3| | vit tracker| 48.6| 44.8| 54.7| [https://youtu.be/MJiPnu1ZQRI](https://youtu.be/MJiPnu1ZQRI) In target tracking tasks, the score is an important indicator that can indicate whether the current target is lost. In the video, vit tracker can track the target and display the current score in the upper left corner of the video. When the target is lost, the score drops significantly. While nanotrack will only return 0.9 score in any situation, so that we cannot determine whether the target is lost. ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [ ] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [ ] The feature is well documented and sample code can be built with the project CMake
1 year ago
}
}} // namespace opencv_test::