mirror of https://github.com/opencv/opencv.git
commit
33dde339fe
8 changed files with 329 additions and 356 deletions
@ -0,0 +1,6 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
#include "perf_precomp.hpp" |
||||
#include "../test/test_common.impl.hpp" // shared with accuracy tests |
@ -0,0 +1,294 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
|
||||
// Used in accuracy and perf tests as a content of .cpp file
|
||||
// Note: don't use "precomp.hpp" here
|
||||
#include "opencv2/ts.hpp" |
||||
#include "opencv2/ts/ts_perf.hpp" |
||||
#include "opencv2/core/utility.hpp" |
||||
#include "opencv2/core/ocl.hpp" |
||||
|
||||
#include "opencv2/dnn.hpp" |
||||
#include "test_common.hpp" |
||||
|
||||
#include <opencv2/core/utils/configuration.private.hpp> |
||||
#include <opencv2/core/utils/logger.hpp> |
||||
|
||||
namespace cv { namespace dnn { |
||||
CV__DNN_INLINE_NS_BEGIN |
||||
|
||||
void PrintTo(const cv::dnn::Backend& v, std::ostream* os) |
||||
{ |
||||
switch (v) { |
||||
case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return; |
||||
case DNN_BACKEND_HALIDE: *os << "HALIDE"; return; |
||||
case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return; |
||||
case DNN_BACKEND_VKCOM: *os << "VKCOM"; return; |
||||
case DNN_BACKEND_OPENCV: *os << "OCV"; return; |
||||
} // don't use "default:" to emit compiler warnings
|
||||
*os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")"; |
||||
} |
||||
|
||||
void PrintTo(const cv::dnn::Target& v, std::ostream* os) |
||||
{ |
||||
switch (v) { |
||||
case DNN_TARGET_CPU: *os << "CPU"; return; |
||||
case DNN_TARGET_OPENCL: *os << "OCL"; return; |
||||
case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return; |
||||
case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return; |
||||
case DNN_TARGET_VULKAN: *os << "VULKAN"; return; |
||||
case DNN_TARGET_FPGA: *os << "FPGA"; return; |
||||
} // don't use "default:" to emit compiler warnings
|
||||
*os << "DNN_TARGET_UNKNOWN(" << (int)v << ")"; |
||||
} |
||||
|
||||
void PrintTo(const tuple<cv::dnn::Backend, cv::dnn::Target> v, std::ostream* os) |
||||
{ |
||||
PrintTo(get<0>(v), os); |
||||
*os << "/"; |
||||
PrintTo(get<1>(v), os); |
||||
} |
||||
|
||||
CV__DNN_INLINE_NS_END |
||||
}} // namespace
|
||||
|
||||
|
||||
|
||||
namespace opencv_test { |
||||
|
||||
void normAssert( |
||||
cv::InputArray ref, cv::InputArray test, const char *comment /*= ""*/, |
||||
double l1 /*= 0.00001*/, double lInf /*= 0.0001*/) |
||||
{ |
||||
double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); |
||||
EXPECT_LE(normL1, l1) << comment; |
||||
|
||||
double normInf = cvtest::norm(ref, test, cv::NORM_INF); |
||||
EXPECT_LE(normInf, lInf) << comment; |
||||
} |
||||
|
||||
std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m) |
||||
{ |
||||
EXPECT_EQ(m.type(), CV_32FC1); |
||||
EXPECT_EQ(m.dims, 2); |
||||
EXPECT_EQ(m.cols, 4); |
||||
|
||||
std::vector<cv::Rect2d> boxes(m.rows); |
||||
for (int i = 0; i < m.rows; ++i) |
||||
{ |
||||
CV_Assert(m.row(i).isContinuous()); |
||||
const float* data = m.ptr<float>(i); |
||||
double l = data[0], t = data[1], r = data[2], b = data[3]; |
||||
boxes[i] = cv::Rect2d(l, t, r - l, b - t); |
||||
} |
||||
return boxes; |
||||
} |
||||
|
||||
void normAssertDetections( |
||||
const std::vector<int>& refClassIds, |
||||
const std::vector<float>& refScores, |
||||
const std::vector<cv::Rect2d>& refBoxes, |
||||
const std::vector<int>& testClassIds, |
||||
const std::vector<float>& testScores, |
||||
const std::vector<cv::Rect2d>& testBoxes, |
||||
const char *comment /*= ""*/, double confThreshold /*= 0.0*/, |
||||
double scores_diff /*= 1e-5*/, double boxes_iou_diff /*= 1e-4*/) |
||||
{ |
||||
std::vector<bool> matchedRefBoxes(refBoxes.size(), false); |
||||
for (int i = 0; i < testBoxes.size(); ++i) |
||||
{ |
||||
double testScore = testScores[i]; |
||||
if (testScore < confThreshold) |
||||
continue; |
||||
|
||||
int testClassId = testClassIds[i]; |
||||
const cv::Rect2d& testBox = testBoxes[i]; |
||||
bool matched = false; |
||||
for (int j = 0; j < refBoxes.size() && !matched; ++j) |
||||
{ |
||||
if (!matchedRefBoxes[j] && testClassId == refClassIds[j] && |
||||
std::abs(testScore - refScores[j]) < scores_diff) |
||||
{ |
||||
double interArea = (testBox & refBoxes[j]).area(); |
||||
double iou = interArea / (testBox.area() + refBoxes[j].area() - interArea); |
||||
if (std::abs(iou - 1.0) < boxes_iou_diff) |
||||
{ |
||||
matched = true; |
||||
matchedRefBoxes[j] = true; |
||||
} |
||||
} |
||||
} |
||||
if (!matched) |
||||
std::cout << cv::format("Unmatched prediction: class %d score %f box ", |
||||
testClassId, testScore) << testBox << std::endl; |
||||
EXPECT_TRUE(matched) << comment; |
||||
} |
||||
|
||||
// Check unmatched reference detections.
|
||||
for (int i = 0; i < refBoxes.size(); ++i) |
||||
{ |
||||
if (!matchedRefBoxes[i] && refScores[i] > confThreshold) |
||||
{ |
||||
std::cout << cv::format("Unmatched reference: class %d score %f box ", |
||||
refClassIds[i], refScores[i]) << refBoxes[i] << std::endl; |
||||
EXPECT_LE(refScores[i], confThreshold) << comment; |
||||
} |
||||
} |
||||
} |
||||
|
||||
// For SSD-based object detection networks which produce output of shape 1x1xNx7
|
||||
// where N is a number of detections and an every detection is represented by
|
||||
// a vector [batchId, classId, confidence, left, top, right, bottom].
|
||||
void normAssertDetections( |
||||
cv::Mat ref, cv::Mat out, const char *comment /*= ""*/, |
||||
double confThreshold /*= 0.0*/, double scores_diff /*= 1e-5*/, |
||||
double boxes_iou_diff /*= 1e-4*/) |
||||
{ |
||||
CV_Assert(ref.total() % 7 == 0); |
||||
CV_Assert(out.total() % 7 == 0); |
||||
ref = ref.reshape(1, ref.total() / 7); |
||||
out = out.reshape(1, out.total() / 7); |
||||
|
||||
cv::Mat refClassIds, testClassIds; |
||||
ref.col(1).convertTo(refClassIds, CV_32SC1); |
||||
out.col(1).convertTo(testClassIds, CV_32SC1); |
||||
std::vector<float> refScores(ref.col(2)), testScores(out.col(2)); |
||||
std::vector<cv::Rect2d> refBoxes = matToBoxes(ref.colRange(3, 7)); |
||||
std::vector<cv::Rect2d> testBoxes = matToBoxes(out.colRange(3, 7)); |
||||
normAssertDetections(refClassIds, refScores, refBoxes, testClassIds, testScores, |
||||
testBoxes, comment, confThreshold, scores_diff, boxes_iou_diff); |
||||
} |
||||
|
||||
bool readFileInMemory(const std::string& filename, std::string& content) |
||||
{ |
||||
std::ios::openmode mode = std::ios::in | std::ios::binary; |
||||
std::ifstream ifs(filename.c_str(), mode); |
||||
if (!ifs.is_open()) |
||||
return false; |
||||
|
||||
content.clear(); |
||||
|
||||
ifs.seekg(0, std::ios::end); |
||||
content.reserve(ifs.tellg()); |
||||
ifs.seekg(0, std::ios::beg); |
||||
|
||||
content.assign((std::istreambuf_iterator<char>(ifs)), |
||||
std::istreambuf_iterator<char>()); |
||||
|
||||
return true; |
||||
} |
||||
|
||||
|
||||
testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets( |
||||
bool withInferenceEngine /*= true*/, |
||||
bool withHalide /*= false*/, |
||||
bool withCpuOCV /*= true*/, |
||||
bool withVkCom /*= true*/ |
||||
) |
||||
{ |
||||
#ifdef HAVE_INF_ENGINE |
||||
bool withVPU = validateVPUType(); |
||||
#endif |
||||
|
||||
std::vector< tuple<Backend, Target> > targets; |
||||
std::vector< Target > available; |
||||
if (withHalide) |
||||
{ |
||||
available = getAvailableTargets(DNN_BACKEND_HALIDE); |
||||
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i) |
||||
targets.push_back(make_tuple(DNN_BACKEND_HALIDE, *i)); |
||||
} |
||||
#ifdef HAVE_INF_ENGINE |
||||
if (withInferenceEngine) |
||||
{ |
||||
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE); |
||||
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i) |
||||
{ |
||||
if (*i == DNN_TARGET_MYRIAD && !withVPU) |
||||
continue; |
||||
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, *i)); |
||||
} |
||||
} |
||||
#else |
||||
CV_UNUSED(withInferenceEngine); |
||||
#endif |
||||
if (withVkCom) |
||||
{ |
||||
available = getAvailableTargets(DNN_BACKEND_VKCOM); |
||||
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i) |
||||
targets.push_back(make_tuple(DNN_BACKEND_VKCOM, *i)); |
||||
} |
||||
{ |
||||
available = getAvailableTargets(DNN_BACKEND_OPENCV); |
||||
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i) |
||||
{ |
||||
if (!withCpuOCV && *i == DNN_TARGET_CPU) |
||||
continue; |
||||
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, *i)); |
||||
} |
||||
} |
||||
if (targets.empty()) // validate at least CPU mode
|
||||
targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)); |
||||
return testing::ValuesIn(targets); |
||||
} |
||||
|
||||
|
||||
#ifdef HAVE_INF_ENGINE |
||||
static std::string getTestInferenceEngineVPUType() |
||||
{ |
||||
static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_TEST_DNN_IE_VPU_TYPE", ""); |
||||
return param_vpu_type; |
||||
} |
||||
|
||||
static bool validateVPUType_() |
||||
{ |
||||
std::string test_vpu_type = getTestInferenceEngineVPUType(); |
||||
if (test_vpu_type == "DISABLED" || test_vpu_type == "disabled") |
||||
{ |
||||
return false; |
||||
} |
||||
|
||||
std::vector<Target> available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE); |
||||
bool have_vpu_target = false; |
||||
for (std::vector<Target>::const_iterator i = available.begin(); i != available.end(); ++i) |
||||
{ |
||||
if (*i == DNN_TARGET_MYRIAD) |
||||
{ |
||||
have_vpu_target = true; |
||||
break; |
||||
} |
||||
} |
||||
|
||||
if (test_vpu_type.empty()) |
||||
{ |
||||
if (have_vpu_target) |
||||
{ |
||||
CV_LOG_INFO(NULL, "OpenCV-DNN-Test: VPU type for testing is not specified via 'OPENCV_TEST_DNN_IE_VPU_TYPE' parameter.") |
||||
} |
||||
} |
||||
else |
||||
{ |
||||
if (!have_vpu_target) |
||||
{ |
||||
CV_LOG_FATAL(NULL, "OpenCV-DNN-Test: 'OPENCV_TEST_DNN_IE_VPU_TYPE' parameter requires VPU of type = '" << test_vpu_type << "', but VPU is not detected. STOP."); |
||||
exit(1); |
||||
} |
||||
std::string dnn_vpu_type = getInferenceEngineVPUType(); |
||||
if (dnn_vpu_type != test_vpu_type) |
||||
{ |
||||
CV_LOG_FATAL(NULL, "OpenCV-DNN-Test: 'testing' and 'detected' VPU types mismatch: '" << test_vpu_type << "' vs '" << dnn_vpu_type << "'. STOP."); |
||||
exit(1); |
||||
} |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
bool validateVPUType() |
||||
{ |
||||
static bool result = validateVPUType_(); |
||||
return result; |
||||
} |
||||
#endif // HAVE_INF_ENGINE
|
||||
|
||||
} // namespace
|
Loading…
Reference in new issue