fixed tests (call resetDevice, if there was a gpu failure)

pull/328/head
Vladislav Vinogradov 12 years ago
parent 0773ab4d07
commit b7e6b5af1b
  1. 868
      modules/gpu/perf/perf_core.cpp
  2. 45
      modules/gpu/perf/perf_imgproc.cpp
  3. 3
      modules/gpu/perf/perf_objdetect.cpp
  4. 5
      modules/gpu/test/interpolation.hpp
  5. 97
      modules/gpu/test/nvidia/TestHaarCascadeApplication.cpp
  6. 72
      modules/gpu/test/nvidia/main_nvidia.cpp
  7. 405
      modules/gpu/test/test_bgfg.cpp
  8. 18
      modules/gpu/test/test_calib3d.cpp
  9. 712
      modules/gpu/test/test_color.cpp
  10. 11
      modules/gpu/test/test_copy_make_border.cpp
  11. 675
      modules/gpu/test/test_core.cpp
  12. 13
      modules/gpu/test/test_denoising.cpp
  13. 229
      modules/gpu/test/test_features2d.cpp
  14. 129
      modules/gpu/test/test_filters.cpp
  15. 2
      modules/gpu/test/test_global_motion.cpp
  16. 20
      modules/gpu/test/test_gpumat.cpp
  17. 12
      modules/gpu/test/test_hough.cpp
  18. 305
      modules/gpu/test/test_imgproc.cpp
  19. 12
      modules/gpu/test/test_labeling.cpp
  20. 36
      modules/gpu/test/test_nvidia.cpp
  21. 15
      modules/gpu/test/test_objdetect.cpp
  22. 404
      modules/gpu/test/test_optflow.cpp
  23. 1
      modules/gpu/test/test_precomp.hpp
  24. 4
      modules/gpu/test/test_pyramids.cpp
  25. 2
      modules/gpu/test/test_remap.cpp
  26. 8
      modules/gpu/test/test_resize.cpp
  27. 2
      modules/gpu/test/test_threshold.cpp
  28. 776
      modules/gpu/test/test_video.cpp
  29. 16
      modules/gpu/test/test_warp_affine.cpp
  30. 21
      modules/gpu/test/test_warp_perspective.cpp
  31. 117
      modules/gpu/test/utility.cpp
  32. 72
      modules/gpu/test/utility.hpp
  33. 2
      modules/ts/include/opencv2/ts/ts.hpp
  34. 8
      modules/ts/src/ts_perf.cpp

File diff suppressed because it is too large Load Diff

@ -581,13 +581,12 @@ PERF_TEST_P(Sz, ImgProc_CalcHist, GPU_TYPICAL_MAT_SIZES)
{
cv::gpu::GpuMat d_src(src);
cv::gpu::GpuMat d_hist;
cv::gpu::GpuMat d_buf;
cv::gpu::calcHist(d_src, d_hist, d_buf);
cv::gpu::calcHist(d_src, d_hist);
TEST_CYCLE()
{
cv::gpu::calcHist(d_src, d_hist, d_buf);
cv::gpu::calcHist(d_src, d_hist);
}
GPU_SANITY_CHECK(d_hist);
@ -1706,10 +1705,30 @@ PERF_TEST_P(Sz_Depth_Cn, ImgProc_ImagePyramidGetLayer, Combine(GPU_TYPICAL_MAT_S
}
}
namespace {
struct Vec3fComparator
{
bool operator()(const cv::Vec3f& a, const cv::Vec3f b) const
{
if(a[0] != b[0]) return a[0] < b[0];
else if(a[1] != b[1]) return a[1] < b[1];
else return a[2] < b[2];
}
};
struct Vec2fComparator
{
bool operator()(const cv::Vec2f& a, const cv::Vec2f b) const
{
if(a[0] != b[0]) return a[0] < b[0];
else return a[1] < b[1];
}
};
}
//////////////////////////////////////////////////////////////////////
// HoughLines
PERF_TEST_P(Sz, DISABLED_ImgProc_HoughLines, GPU_TYPICAL_MAT_SIZES)
PERF_TEST_P(Sz, ImgProc_HoughLines, GPU_TYPICAL_MAT_SIZES)
{
declare.time(30.0);
@ -1744,7 +1763,11 @@ PERF_TEST_P(Sz, DISABLED_ImgProc_HoughLines, GPU_TYPICAL_MAT_SIZES)
cv::gpu::HoughLines(d_src, d_lines, d_buf, rho, theta, threshold);
}
GPU_SANITY_CHECK(d_lines);
cv::Mat h_lines(d_lines);
cv::Vec2f* begin = (cv::Vec2f*)(h_lines.ptr<char>(0));
cv::Vec2f* end = (cv::Vec2f*)(h_lines.ptr<char>(0) + (h_lines.cols) * 2 * sizeof(float));
std::sort(begin, end, Vec2fComparator());
SANITY_CHECK(h_lines);
}
else
{
@ -1756,7 +1779,8 @@ PERF_TEST_P(Sz, DISABLED_ImgProc_HoughLines, GPU_TYPICAL_MAT_SIZES)
cv::HoughLines(src, lines, rho, theta, threshold);
}
CPU_SANITY_CHECK(lines);
std::sort(lines.begin(), lines.end(), Vec2fComparator());
SANITY_CHECK(lines);
}
}
@ -1804,7 +1828,11 @@ PERF_TEST_P(Sz_Dp_MinDist, ImgProc_HoughCircles, Combine(GPU_TYPICAL_MAT_SIZES,
cv::gpu::HoughCircles(d_src, d_circles, d_buf, CV_HOUGH_GRADIENT, dp, minDist, cannyThreshold, votesThreshold, minRadius, maxRadius);
}
GPU_SANITY_CHECK(d_circles);
cv::Mat h_circles(d_circles);
cv::Vec3f* begin = (cv::Vec3f*)(h_circles.ptr<char>(0));
cv::Vec3f* end = (cv::Vec3f*)(h_circles.ptr<char>(0) + (h_circles.cols) * 3 * sizeof(float));
std::sort(begin, end, Vec3fComparator());
SANITY_CHECK(h_circles);
}
else
{
@ -1817,7 +1845,8 @@ PERF_TEST_P(Sz_Dp_MinDist, ImgProc_HoughCircles, Combine(GPU_TYPICAL_MAT_SIZES,
cv::HoughCircles(src, circles, CV_HOUGH_GRADIENT, dp, minDist, cannyThreshold, votesThreshold, minRadius, maxRadius);
}
CPU_SANITY_CHECK(circles);
std::sort(circles.begin(), circles.end(), Vec3fComparator());
SANITY_CHECK(circles);
}
}

@ -89,7 +89,6 @@ PERF_TEST_P(HOG, CalTech, Values<string>("gpu/caltech/image_00000009_0.png", "gp
SANITY_CHECK(found_locations);
}
///////////////////////////////////////////////////////////////
// HaarClassifier
@ -181,4 +180,4 @@ PERF_TEST_P(ImageAndCascade, ObjDetect_LBPClassifier,
}
}
} // namespace
} // namespace

@ -42,6 +42,9 @@
#ifndef __OPENCV_TEST_INTERPOLATION_HPP__
#define __OPENCV_TEST_INTERPOLATION_HPP__
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
template <typename T> T readVal(const cv::Mat& src, int y, int x, int c, int border_type, cv::Scalar borderVal = cv::Scalar())
{
if (border_type == cv::BORDER_CONSTANT)
@ -113,7 +116,7 @@ template <typename T> struct CubicInterpolator
for (float cx = xmin; cx <= xmax; cx += 1.0f)
{
const float w = bicubicCoeff(x - cx) * bicubicCoeff(y - cy);
sum += w * readVal<T>(src, cvFloor(cy), cvFloor(cx), c, border_type, borderVal);
sum += w * readVal<T>(src, (int) floorf(cy), (int) floorf(cx), c, border_type, borderVal);
wsum += w;
}
}

@ -13,10 +13,50 @@
#include <float.h>
#if defined(__GNUC__) && !defined(__APPLE__)
#if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__)
#include <fpu_control.h>
#endif
namespace
{
// http://www.christian-seiler.de/projekte/fpmath/
class FpuControl
{
public:
FpuControl();
~FpuControl();
private:
#if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__)
fpu_control_t fpu_oldcw, fpu_cw;
#elif defined(_WIN32) && !defined(_WIN64)
unsigned int fpu_oldcw, fpu_cw;
#endif
};
FpuControl::FpuControl()
{
#if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__)
_FPU_GETCW(fpu_oldcw);
fpu_cw = (fpu_oldcw & ~_FPU_EXTENDED & ~_FPU_DOUBLE & ~_FPU_SINGLE) | _FPU_SINGLE;
_FPU_SETCW(fpu_cw);
#elif defined(_WIN32) && !defined(_WIN64)
_controlfp_s(&fpu_cw, 0, 0);
fpu_oldcw = fpu_cw;
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
#endif
}
FpuControl::~FpuControl()
{
#if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__)
_FPU_SETCW(fpu_oldcw);
#elif defined(_WIN32) && !defined(_WIN64)
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
#endif
}
}
#include "TestHaarCascadeApplication.h"
#include "NCVHaarObjectDetection.hpp"
@ -47,12 +87,8 @@ bool TestHaarCascadeApplication::init()
return true;
}
bool TestHaarCascadeApplication::process()
{
#if defined(__APPLE)
return true;
#endif
NCVStatus ncvStat;
bool rcode = false;
@ -205,44 +241,19 @@ bool TestHaarCascadeApplication::process()
}
ncvAssertReturn(cudaSuccess == cudaStreamSynchronize(0), false);
#if !defined(__APPLE__)
#if defined(__GNUC__)
//http://www.christian-seiler.de/projekte/fpmath/
fpu_control_t fpu_oldcw, fpu_cw;
_FPU_GETCW(fpu_oldcw); // store old cw
fpu_cw = (fpu_oldcw & ~_FPU_EXTENDED & ~_FPU_DOUBLE & ~_FPU_SINGLE) | _FPU_SINGLE;
_FPU_SETCW(fpu_cw);
// calculations here
ncvStat = ncvApplyHaarClassifierCascade_host(
h_integralImage, h_rectStdDev, h_pixelMask,
detectionsOnThisScale_h,
haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false,
searchRoiU, 1, 1.0f);
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
_FPU_SETCW(fpu_oldcw); // restore old cw
#else
#ifndef _WIN64
Ncv32u fpu_oldcw, fpu_cw;
_controlfp_s(&fpu_cw, 0, 0);
fpu_oldcw = fpu_cw;
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
#endif
ncvStat = ncvApplyHaarClassifierCascade_host(
h_integralImage, h_rectStdDev, h_pixelMask,
detectionsOnThisScale_h,
haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false,
searchRoiU, 1, 1.0f);
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
#ifndef _WIN64
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
#endif
#endif
{
// calculations here
FpuControl fpu;
(void) fpu;
ncvStat = ncvApplyHaarClassifierCascade_host(
h_integralImage, h_rectStdDev, h_pixelMask,
detectionsOnThisScale_h,
haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false,
searchRoiU, 1, 1.0f);
ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
}
#endif
NCV_SKIP_COND_END
int devId;
@ -302,4 +313,4 @@ bool TestHaarCascadeApplication::deinit()
return true;
}
#endif /* CUDA_DISABLER */
#endif /* CUDA_DISABLER */

@ -25,7 +25,7 @@
#include "NCVAutoTestLister.hpp"
#include "NCVTestSourceProvider.hpp"
#include <main_test_nvidia.h>
#include "main_test_nvidia.h"
static std::string path;
@ -97,7 +97,7 @@ void generateRectStdDevTests(NCVAutoTestLister &testLister, NCVTestSourceProvide
template <class T>
void generateResizeTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T> &src)
{
for (Ncv32u i=1; i<480; i+=3)
for (Ncv32u i=2; i<10; ++i)
{
char testName[80];
sprintf(testName, "TestResize_VGA_s%d", i);
@ -105,7 +105,7 @@ void generateResizeTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T>
testLister.add(new TestResize<T>(testName, src, 640, 480, i, false));
}
for (Ncv32u i=1; i<1080; i+=5)
for (Ncv32u i=2; i<10; ++i)
{
char testName[80];
sprintf(testName, "TestResize_1080_s%d", i);
@ -117,7 +117,7 @@ void generateResizeTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T>
void generateNPPSTVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength)
{
//compaction
for (Ncv32f _i=256.0; _i<maxLength; _i*=1.1f)
for (Ncv32f _i=256.0; _i<maxLength; _i*=1.5f)
{
Ncv32u i = (Ncv32u)_i;
char testName[80];
@ -132,13 +132,13 @@ void generateNPPSTVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvid
testLister.add(new TestCompact(testName, src, i, 0xC001C0DE, 0));
testLister.add(new TestCompact(testName, src, i, 0xC001C0DE, 100));
}
for (Ncv32u i=256*256-256; i<256*256+257; i++)
for (Ncv32u i=256*256-10; i<256*256+10; i++)
{
char testName[80];
sprintf(testName, "Compaction%d", i);
testLister.add(new TestCompact(testName, src, i, 0xFFFFFFFF, 40));
}
for (Ncv32u i=256*256*256-10; i<256*256*256+10; i++)
for (Ncv32u i=256*256*256-2; i<256*256*256+2; i++)
{
char testName[80];
sprintf(testName, "Compaction%d", i);
@ -212,7 +212,7 @@ void generateDrawRectsTests(NCVAutoTestLister &testLister,
void generateVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength)
{
//growth
for (Ncv32f _i=10.0; _i<maxLength; _i*=1.1f)
for (Ncv32f _i=10.0; _i<maxLength; _i*=1.5f)
{
Ncv32u i = (Ncv32u)_i;
char testName[80];
@ -253,16 +253,16 @@ void generateHaarApplicationTests(NCVAutoTestLister &testLister, NCVTestSourcePr
Ncv32u maxWidth, Ncv32u maxHeight)
{
(void)maxHeight;
for (Ncv32u i=20; i<512; i+=11)
for (Ncv32u i=100; i<512; i+=41)
{
for (Ncv32u j=20; j<128; j+=5)
for (Ncv32u j=100; j<128; j+=25)
{
char testName[80];
sprintf(testName, "HaarAppl%d_%d", i, j);
testLister.add(new TestHaarCascadeApplication(testName, src, path + "haarcascade_frontalface_alt.xml", j, i));
}
}
for (Ncv32f _i=20.0; _i<maxWidth; _i*=1.1f)
for (Ncv32f _i=20.0; _i<maxWidth; _i*=1.5f)
{
Ncv32u i = (Ncv32u)_i;
char testName[80];
@ -276,6 +276,8 @@ static void devNullOutput(const std::string& msg)
(void)msg;
}
}
bool nvidia_NPPST_Integral_Image(const std::string& test_data_path, OutputLevel outputLevel)
{
path = test_data_path.c_str();
@ -283,17 +285,15 @@ bool nvidia_NPPST_Integral_Image(const std::string& test_data_path, OutputLevel
NCVAutoTestLister testListerII("NPPST Integral Image", outputLevel);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 4096, 4096);
NCVTestSourceProvider<Ncv32f> testSrcRandom_32f(2010, -1.0f, 1.0f, 4096, 4096);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 2048, 2048);
NCVTestSourceProvider<Ncv32f> testSrcRandom_32f(2010, -1.0f, 1.0f, 2048, 2048);
generateIntegralTests<Ncv8u, Ncv32u>(testListerII, testSrcRandom_8u, 4096, 4096);
generateIntegralTests<Ncv32f, Ncv32f>(testListerII, testSrcRandom_32f, 4096, 4096);
generateIntegralTests<Ncv8u, Ncv32u>(testListerII, testSrcRandom_8u, 2048, 2048);
generateIntegralTests<Ncv32f, Ncv32f>(testListerII, testSrcRandom_32f, 2048, 2048);
return testListerII.invoke();
}
}
bool nvidia_NPPST_Squared_Integral_Image(const std::string& test_data_path, OutputLevel outputLevel)
{
path = test_data_path;
@ -301,9 +301,9 @@ bool nvidia_NPPST_Squared_Integral_Image(const std::string& test_data_path, Outp
NCVAutoTestLister testListerSII("NPPST Squared Integral Image", outputLevel);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 4096, 4096);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 2048, 2048);
generateSquaredIntegralTests(testListerSII, testSrcRandom_8u, 4096, 4096);
generateSquaredIntegralTests(testListerSII, testSrcRandom_8u, 2048, 2048);
return testListerSII.invoke();
}
@ -315,9 +315,9 @@ bool nvidia_NPPST_RectStdDev(const std::string& test_data_path, OutputLevel outp
NCVAutoTestLister testListerRStdDev("NPPST RectStdDev", outputLevel);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 4096, 4096);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 2048, 2048);
generateRectStdDevTests(testListerRStdDev, testSrcRandom_8u, 4096, 4096);
generateRectStdDevTests(testListerRStdDev, testSrcRandom_8u, 2048, 2048);
return testListerRStdDev.invoke();
}
@ -329,8 +329,8 @@ bool nvidia_NPPST_Resize(const std::string& test_data_path, OutputLevel outputLe
NCVAutoTestLister testListerResize("NPPST Resize", outputLevel);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 4096, 4096);
NCVTestSourceProvider<Ncv64u> testSrcRandom_64u(2010, 0, -1, 4096, 4096);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 2048, 2048);
NCVTestSourceProvider<Ncv64u> testSrcRandom_64u(2010, 0, -1, 2048, 2048);
generateResizeTests(testListerResize, testSrcRandom_32u);
generateResizeTests(testListerResize, testSrcRandom_64u);
@ -345,9 +345,9 @@ bool nvidia_NPPST_Vector_Operations(const std::string& test_data_path, OutputLev
NCVAutoTestLister testListerNPPSTVectorOperations("NPPST Vector Operations", outputLevel);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 4096, 4096);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 2048, 2048);
generateNPPSTVectorTests(testListerNPPSTVectorOperations, testSrcRandom_32u, 4096*4096);
generateNPPSTVectorTests(testListerNPPSTVectorOperations, testSrcRandom_32u, 2048*2048);
return testListerNPPSTVectorOperations.invoke();
}
@ -359,8 +359,8 @@ bool nvidia_NPPST_Transpose(const std::string& test_data_path, OutputLevel outpu
NCVAutoTestLister testListerTranspose("NPPST Transpose", outputLevel);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 4096, 4096);
NCVTestSourceProvider<Ncv64u> testSrcRandom_64u(2010, 0, -1, 4096, 4096);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 2048, 2048);
NCVTestSourceProvider<Ncv64u> testSrcRandom_64u(2010, 0, -1, 2048, 2048);
generateTransposeTests(testListerTranspose, testSrcRandom_32u);
generateTransposeTests(testListerTranspose, testSrcRandom_64u);
@ -375,9 +375,9 @@ bool nvidia_NCV_Vector_Operations(const std::string& test_data_path, OutputLevel
NCVAutoTestLister testListerVectorOperations("Vector Operations", outputLevel);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 4096, 4096);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 2048, 2048);
generateVectorTests(testListerVectorOperations, testSrcRandom_32u, 4096*4096);
generateVectorTests(testListerVectorOperations, testSrcRandom_32u, 2048*2048);
return testListerVectorOperations.invoke();
@ -404,7 +404,7 @@ bool nvidia_NCV_Haar_Cascade_Application(const std::string& test_data_path, Outp
NCVTestSourceProvider<Ncv8u> testSrcFacesVGA_8u(path + "group_1_640x480_VGA.pgm");
generateHaarApplicationTests(testListerHaarAppl, testSrcFacesVGA_8u, 1280, 720);
generateHaarApplicationTests(testListerHaarAppl, testSrcFacesVGA_8u, 640, 480);
return testListerHaarAppl.invoke();
}
@ -416,9 +416,9 @@ bool nvidia_NCV_Hypotheses_Filtration(const std::string& test_data_path, OutputL
NCVAutoTestLister testListerHypFiltration("Hypotheses Filtration", outputLevel);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 4096, 4096);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 2048, 2048);
generateHypothesesFiltrationTests(testListerHypFiltration, testSrcRandom_32u, 1024);
generateHypothesesFiltrationTests(testListerHypFiltration, testSrcRandom_32u, 512);
return testListerHypFiltration.invoke();
}
@ -430,13 +430,13 @@ bool nvidia_NCV_Visualization(const std::string& test_data_path, OutputLevel out
NCVAutoTestLister testListerVisualize("Visualization", outputLevel);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 4096, 4096);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, RAND_MAX, 4096, 4096);
NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 2048, 2048);
NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, RAND_MAX, 2048, 2048);
generateDrawRectsTests(testListerVisualize, testSrcRandom_8u, testSrcRandom_32u, 4096, 4096);
generateDrawRectsTests(testListerVisualize, testSrcRandom_32u, testSrcRandom_32u, 4096, 4096);
generateDrawRectsTests(testListerVisualize, testSrcRandom_8u, testSrcRandom_32u, 2048, 2048);
generateDrawRectsTests(testListerVisualize, testSrcRandom_32u, testSrcRandom_32u, 2048, 2048);
return testListerVisualize.invoke();
}
#endif /* CUDA_DISABLER */
#endif /* CUDA_DISABLER */

@ -0,0 +1,405 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
//////////////////////////////////////////////////////
// FGDStatModel
namespace cv
{
template<> void Ptr<CvBGStatModel>::delete_obj()
{
cvReleaseBGStatModel(&obj);
}
}
PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string, Channels)
{
cv::gpu::DeviceInfo devInfo;
std::string inputFile;
int out_cn;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
out_cn = GET_PARAM(2);
}
};
GPU_TEST_P(FGDStatModel, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
cv::gpu::GpuMat d_frame(frame);
cv::gpu::FGDStatModel d_model(out_cn);
d_model.create(d_frame);
cv::Mat h_background;
cv::Mat h_foreground;
cv::Mat h_background3;
cv::Mat backgroundDiff;
cv::Mat foregroundDiff;
for (int i = 0; i < 5; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
int gold_count = cvUpdateBGStatModel(&ipl_frame, model);
d_frame.upload(frame);
int count = d_model.update(d_frame);
ASSERT_EQ(gold_count, count);
cv::Mat gold_background(model->background);
cv::Mat gold_foreground(model->foreground);
if (out_cn == 3)
d_model.background.download(h_background3);
else
{
d_model.background.download(h_background);
cv::cvtColor(h_background, h_background3, cv::COLOR_BGRA2BGR);
}
d_model.foreground.download(h_foreground);
ASSERT_MAT_NEAR(gold_background, h_background3, 1.0);
ASSERT_MAT_NEAR(gold_foreground, h_foreground, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Video, FGDStatModel, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(Channels(3), Channels(4))));
//////////////////////////////////////////////////////
// MOG
namespace
{
IMPLEMENT_PARAM_CLASS(UseGray, bool)
IMPLEMENT_PARAM_CLASS(LearningRate, double)
}
PARAM_TEST_CASE(MOG, cv::gpu::DeviceInfo, std::string, UseGray, LearningRate, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
std::string inputFile;
bool useGray;
double learningRate;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
useGray = GET_PARAM(2);
learningRate = GET_PARAM(3);
useRoi = GET_PARAM(4);
}
};
GPU_TEST_P(MOG, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
cv::gpu::MOG_GPU mog;
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
cv::BackgroundSubtractorMOG mog_gold;
cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (useGray)
{
cv::Mat temp;
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
cv::swap(temp, frame);
}
mog(loadMat(frame, useRoi), foreground, (float)learningRate);
mog_gold(frame, foreground_gold, learningRate);
ASSERT_MAT_NEAR(foreground_gold, foreground, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Video, MOG, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(UseGray(true), UseGray(false)),
testing::Values(LearningRate(0.0), LearningRate(0.01)),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////
// MOG2
PARAM_TEST_CASE(MOG2, cv::gpu::DeviceInfo, std::string, UseGray, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
std::string inputFile;
bool useGray;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
useGray = GET_PARAM(2);
useRoi = GET_PARAM(3);
}
};
GPU_TEST_P(MOG2, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
cv::gpu::MOG2_GPU mog2;
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
cv::BackgroundSubtractorMOG2 mog2_gold;
cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (useGray)
{
cv::Mat temp;
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
cv::swap(temp, frame);
}
mog2(loadMat(frame, useRoi), foreground);
mog2_gold(frame, foreground_gold);
double norm = cv::norm(foreground_gold, cv::Mat(foreground), cv::NORM_L1);
norm /= foreground_gold.size().area();
ASSERT_LE(norm, 0.09);
}
}
GPU_TEST_P(MOG2, getBackgroundImage)
{
if (useGray)
return;
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cv::gpu::MOG2_GPU mog2;
cv::gpu::GpuMat foreground;
cv::BackgroundSubtractorMOG2 mog2_gold;
cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
mog2(loadMat(frame, useRoi), foreground);
mog2_gold(frame, foreground_gold);
}
cv::gpu::GpuMat background = createMat(frame.size(), frame.type(), useRoi);
mog2.getBackgroundImage(background);
cv::Mat background_gold;
mog2_gold.getBackgroundImage(background_gold);
ASSERT_MAT_NEAR(background_gold, background, 0);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, MOG2, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(UseGray(true), UseGray(false)),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////
// VIBE
PARAM_TEST_CASE(VIBE, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
};
GPU_TEST_P(VIBE, Accuracy)
{
const cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
const cv::Size size = GET_PARAM(1);
const int type = GET_PARAM(2);
const bool useRoi = GET_PARAM(3);
const cv::Mat fullfg(size, CV_8UC1, cv::Scalar::all(255));
cv::Mat frame = randomMat(size, type, 0.0, 100);
cv::gpu::GpuMat d_frame = loadMat(frame, useRoi);
cv::gpu::VIBE_GPU vibe;
cv::gpu::GpuMat d_fgmask = createMat(size, CV_8UC1, useRoi);
vibe.initialize(d_frame);
for (int i = 0; i < 20; ++i)
vibe(d_frame, d_fgmask);
frame = randomMat(size, type, 160, 255);
d_frame = loadMat(frame, useRoi);
vibe(d_frame, d_fgmask);
// now fgmask should be entirely foreground
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, VIBE, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4)),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////
// GMG
PARAM_TEST_CASE(GMG, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
{
};
GPU_TEST_P(GMG, Accuracy)
{
const cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
const cv::Size size = GET_PARAM(1);
const int depth = GET_PARAM(2);
const int channels = GET_PARAM(3);
const bool useRoi = GET_PARAM(4);
const int type = CV_MAKE_TYPE(depth, channels);
const cv::Mat zeros(size, CV_8UC1, cv::Scalar::all(0));
const cv::Mat fullfg(size, CV_8UC1, cv::Scalar::all(255));
cv::Mat frame = randomMat(size, type, 0, 100);
cv::gpu::GpuMat d_frame = loadMat(frame, useRoi);
cv::gpu::GMG_GPU gmg;
gmg.numInitializationFrames = 5;
gmg.smoothingRadius = 0;
gmg.initialize(d_frame.size(), 0, 255);
cv::gpu::GpuMat d_fgmask = createMat(size, CV_8UC1, useRoi);
for (int i = 0; i < gmg.numInitializationFrames; ++i)
{
gmg(d_frame, d_fgmask);
// fgmask should be entirely background during training
ASSERT_MAT_NEAR(zeros, d_fgmask, 0);
}
frame = randomMat(size, type, 160, 255);
d_frame = loadMat(frame, useRoi);
gmg(d_frame, d_fgmask);
// now fgmask should be entirely foreground
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, GMG, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8U), MatType(CV_16U), MatType(CV_32F)),
testing::Values(Channels(1), Channels(3), Channels(4)),
WHOLE_SUBMAT));
#endif // HAVE_CUDA

@ -43,8 +43,6 @@
#ifdef HAVE_CUDA
namespace {
//////////////////////////////////////////////////////////////////////////
// StereoBM
@ -60,7 +58,7 @@ struct StereoBM : testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(StereoBM, Regression)
GPU_TEST_P(StereoBM, Regression)
{
cv::Mat left_image = readImage("stereobm/aloe-L.png", cv::IMREAD_GRAYSCALE);
cv::Mat right_image = readImage("stereobm/aloe-R.png", cv::IMREAD_GRAYSCALE);
@ -95,7 +93,7 @@ struct StereoBeliefPropagation : testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(StereoBeliefPropagation, Regression)
GPU_TEST_P(StereoBeliefPropagation, Regression)
{
cv::Mat left_image = readImage("stereobp/aloe-L.png");
cv::Mat right_image = readImage("stereobp/aloe-R.png");
@ -133,7 +131,7 @@ struct StereoConstantSpaceBP : testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(StereoConstantSpaceBP, Regression)
GPU_TEST_P(StereoConstantSpaceBP, Regression)
{
cv::Mat left_image = readImage("csstereobp/aloe-L.png");
cv::Mat right_image = readImage("csstereobp/aloe-R.png");
@ -177,7 +175,7 @@ struct TransformPoints : testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(TransformPoints, Accuracy)
GPU_TEST_P(TransformPoints, Accuracy)
{
cv::Mat src = randomMat(cv::Size(1000, 1), CV_32FC3, 0, 10);
cv::Mat rvec = randomMat(cv::Size(3, 1), CV_32F, 0, 1);
@ -225,7 +223,7 @@ struct ProjectPoints : testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(ProjectPoints, Accuracy)
GPU_TEST_P(ProjectPoints, Accuracy)
{
cv::Mat src = randomMat(cv::Size(1000, 1), CV_32FC3, 0, 10);
cv::Mat rvec = randomMat(cv::Size(3, 1), CV_32F, 0, 1);
@ -275,7 +273,7 @@ struct SolvePnPRansac : testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(SolvePnPRansac, Accuracy)
GPU_TEST_P(SolvePnPRansac, Accuracy)
{
cv::Mat object = randomMat(cv::Size(5000, 1), CV_32FC3, 0, 100);
cv::Mat camera_mat = randomMat(cv::Size(3, 3), CV_32F, 0.5, 1);
@ -324,7 +322,7 @@ PARAM_TEST_CASE(ReprojectImageTo3D, cv::gpu::DeviceInfo, cv::Size, MatDepth, Use
}
};
TEST_P(ReprojectImageTo3D, Accuracy)
GPU_TEST_P(ReprojectImageTo3D, Accuracy)
{
cv::Mat disp = randomMat(size, depth, 5.0, 30.0);
cv::Mat Q = randomMat(cv::Size(4, 4), CV_32FC1, 0.1, 1.0);
@ -344,6 +342,4 @@ INSTANTIATE_TEST_CASE_P(GPU_Calib3D, ReprojectImageTo3D, testing::Combine(
testing::Values(MatDepth(CV_8U), MatDepth(CV_16S)),
WHOLE_SUBMAT));
} // namespace
#endif // HAVE_CUDA

File diff suppressed because it is too large Load Diff

@ -43,9 +43,10 @@
#ifdef HAVE_CUDA
namespace {
IMPLEMENT_PARAM_CLASS(Border, int)
namespace
{
IMPLEMENT_PARAM_CLASS(Border, int)
}
PARAM_TEST_CASE(CopyMakeBorder, cv::gpu::DeviceInfo, cv::Size, MatType, Border, BorderType, UseRoi)
{
@ -69,7 +70,7 @@ PARAM_TEST_CASE(CopyMakeBorder, cv::gpu::DeviceInfo, cv::Size, MatType, Border,
}
};
TEST_P(CopyMakeBorder, Accuracy)
GPU_TEST_P(CopyMakeBorder, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Scalar val = randomScalar(0, 255);
@ -99,6 +100,4 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CopyMakeBorder, testing::Combine(
ALL_BORDER_TYPES,
WHOLE_SUBMAT));
} // namespace
#endif // HAVE_CUDA

File diff suppressed because it is too large Load Diff

@ -69,7 +69,7 @@ PARAM_TEST_CASE(BilateralFilter, cv::gpu::DeviceInfo, cv::Size, MatType)
}
};
TEST_P(BilateralFilter, Accuracy)
GPU_TEST_P(BilateralFilter, Accuracy)
{
cv::Mat src = randomMat(size, type);
@ -105,7 +105,7 @@ struct BruteForceNonLocalMeans: testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(BruteForceNonLocalMeans, Regression)
GPU_TEST_P(BruteForceNonLocalMeans, Regression)
{
using cv::gpu::GpuMat;
@ -134,8 +134,6 @@ TEST_P(BruteForceNonLocalMeans, Regression)
INSTANTIATE_TEST_CASE_P(GPU_Denoising, BruteForceNonLocalMeans, ALL_DEVICES);
////////////////////////////////////////////////////////
// Fast Force Non local means
@ -150,7 +148,7 @@ struct FastNonLocalMeans: testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(FastNonLocalMeans, Regression)
GPU_TEST_P(FastNonLocalMeans, Regression)
{
using cv::gpu::GpuMat;
@ -167,8 +165,8 @@ TEST_P(FastNonLocalMeans, Regression)
fnlmd.labMethod(GpuMat(bgr), dbgr, 20, 10);
#if 0
//dumpImage("denoising/fnlm_denoised_lena_bgr.png", cv::Mat(dbgr));
//dumpImage("denoising/fnlm_denoised_lena_gray.png", cv::Mat(dgray));
dumpImage("denoising/fnlm_denoised_lena_bgr.png", cv::Mat(dbgr));
dumpImage("denoising/fnlm_denoised_lena_gray.png", cv::Mat(dgray));
#endif
cv::Mat bgr_gold = readImage("denoising/fnlm_denoised_lena_bgr.png", cv::IMREAD_COLOR);
@ -181,5 +179,4 @@ TEST_P(FastNonLocalMeans, Regression)
INSTANTIATE_TEST_CASE_P(GPU_Denoising, FastNonLocalMeans, ALL_DEVICES);
#endif // HAVE_CUDA

@ -43,118 +43,122 @@
#ifdef HAVE_CUDA
namespace {
bool keyPointsEquals(const cv::KeyPoint& p1, const cv::KeyPoint& p2)
namespace
{
const double maxPtDif = 1.0;
const double maxSizeDif = 1.0;
const double maxAngleDif = 2.0;
const double maxResponseDif = 0.1;
double dist = cv::norm(p1.pt - p2.pt);
if (dist < maxPtDif &&
fabs(p1.size - p2.size) < maxSizeDif &&
abs(p1.angle - p2.angle) < maxAngleDif &&
abs(p1.response - p2.response) < maxResponseDif &&
p1.octave == p2.octave &&
p1.class_id == p2.class_id)
bool keyPointsEquals(const cv::KeyPoint& p1, const cv::KeyPoint& p2)
{
return true;
}
const double maxPtDif = 1.0;
const double maxSizeDif = 1.0;
const double maxAngleDif = 2.0;
const double maxResponseDif = 0.1;
return false;
}
double dist = cv::norm(p1.pt - p2.pt);
struct KeyPointLess : std::binary_function<cv::KeyPoint, cv::KeyPoint, bool>
{
bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
{
return kp1.pt.y < kp2.pt.y || (kp1.pt.y == kp2.pt.y && kp1.pt.x < kp2.pt.x);
}
};
if (dist < maxPtDif &&
fabs(p1.size - p2.size) < maxSizeDif &&
abs(p1.angle - p2.angle) < maxAngleDif &&
abs(p1.response - p2.response) < maxResponseDif &&
p1.octave == p2.octave &&
p1.class_id == p2.class_id)
{
return true;
}
testing::AssertionResult assertKeyPointsEquals(const char* gold_expr, const char* actual_expr, std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
if (gold.size() != actual.size())
{
return testing::AssertionFailure() << "KeyPoints size mistmach\n"
<< "\"" << gold_expr << "\" : " << gold.size() << "\n"
<< "\"" << actual_expr << "\" : " << actual.size();
return false;
}
std::sort(actual.begin(), actual.end(), KeyPointLess());
std::sort(gold.begin(), gold.end(), KeyPointLess());
struct KeyPointLess : std::binary_function<cv::KeyPoint, cv::KeyPoint, bool>
{
bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
{
return kp1.pt.y < kp2.pt.y || (kp1.pt.y == kp2.pt.y && kp1.pt.x < kp2.pt.x);
}
};
for (size_t i = 0; i < gold.size(); ++i)
testing::AssertionResult assertKeyPointsEquals(const char* gold_expr, const char* actual_expr, std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (gold.size() != actual.size())
{
return testing::AssertionFailure() << "KeyPoints size mistmach\n"
<< "\"" << gold_expr << "\" : " << gold.size() << "\n"
<< "\"" << actual_expr << "\" : " << actual.size();
}
if (!keyPointsEquals(p1, p2))
std::sort(actual.begin(), actual.end(), KeyPointLess());
std::sort(gold.begin(), gold.end(), KeyPointLess());
for (size_t i = 0; i < gold.size(); ++i)
{
return testing::AssertionFailure() << "KeyPoints differ at " << i << "\n"
<< "\"" << gold_expr << "\" vs \"" << actual_expr << "\" : \n"
<< "pt : " << testing::PrintToString(p1.pt) << " vs " << testing::PrintToString(p2.pt) << "\n"
<< "size : " << p1.size << " vs " << p2.size << "\n"
<< "angle : " << p1.angle << " vs " << p2.angle << "\n"
<< "response : " << p1.response << " vs " << p2.response << "\n"
<< "octave : " << p1.octave << " vs " << p2.octave << "\n"
<< "class_id : " << p1.class_id << " vs " << p2.class_id;
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (!keyPointsEquals(p1, p2))
{
return testing::AssertionFailure() << "KeyPoints differ at " << i << "\n"
<< "\"" << gold_expr << "\" vs \"" << actual_expr << "\" : \n"
<< "pt : " << testing::PrintToString(p1.pt) << " vs " << testing::PrintToString(p2.pt) << "\n"
<< "size : " << p1.size << " vs " << p2.size << "\n"
<< "angle : " << p1.angle << " vs " << p2.angle << "\n"
<< "response : " << p1.response << " vs " << p2.response << "\n"
<< "octave : " << p1.octave << " vs " << p2.octave << "\n"
<< "class_id : " << p1.class_id << " vs " << p2.class_id;
}
}
return ::testing::AssertionSuccess();
}
return ::testing::AssertionSuccess();
}
#define ASSERT_KEYPOINTS_EQ(gold, actual) EXPECT_PRED_FORMAT2(assertKeyPointsEquals, gold, actual);
#define ASSERT_KEYPOINTS_EQ(gold, actual) EXPECT_PRED_FORMAT2(assertKeyPointsEquals, gold, actual);
int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
std::sort(actual.begin(), actual.end(), KeyPointLess());
std::sort(gold.begin(), gold.end(), KeyPointLess());
int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
{
std::sort(actual.begin(), actual.end(), KeyPointLess());
std::sort(gold.begin(), gold.end(), KeyPointLess());
int validCount = 0;
int validCount = 0;
for (size_t i = 0; i < gold.size(); ++i)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
for (size_t i = 0; i < gold.size(); ++i)
{
const cv::KeyPoint& p1 = gold[i];
const cv::KeyPoint& p2 = actual[i];
if (keyPointsEquals(p1, p2))
++validCount;
}
if (keyPointsEquals(p1, p2))
++validCount;
return validCount;
}
return validCount;
}
int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches)
{
int validCount = 0;
int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches)
{
int validCount = 0;
for (size_t i = 0; i < matches.size(); ++i)
{
const cv::DMatch& m = matches[i];
for (size_t i = 0; i < matches.size(); ++i)
{
const cv::DMatch& m = matches[i];
const cv::KeyPoint& p1 = keypoints1[m.queryIdx];
const cv::KeyPoint& p2 = keypoints2[m.trainIdx];
const cv::KeyPoint& p1 = keypoints1[m.queryIdx];
const cv::KeyPoint& p2 = keypoints2[m.trainIdx];
if (keyPointsEquals(p1, p2))
++validCount;
}
if (keyPointsEquals(p1, p2))
++validCount;
return validCount;
}
return validCount;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// SURF
IMPLEMENT_PARAM_CLASS(SURF_HessianThreshold, double)
IMPLEMENT_PARAM_CLASS(SURF_Octaves, int)
IMPLEMENT_PARAM_CLASS(SURF_OctaveLayers, int)
IMPLEMENT_PARAM_CLASS(SURF_Extended, bool)
IMPLEMENT_PARAM_CLASS(SURF_Upright, bool)
namespace
{
IMPLEMENT_PARAM_CLASS(SURF_HessianThreshold, double)
IMPLEMENT_PARAM_CLASS(SURF_Octaves, int)
IMPLEMENT_PARAM_CLASS(SURF_OctaveLayers, int)
IMPLEMENT_PARAM_CLASS(SURF_Extended, bool)
IMPLEMENT_PARAM_CLASS(SURF_Upright, bool)
}
PARAM_TEST_CASE(SURF, cv::gpu::DeviceInfo, SURF_HessianThreshold, SURF_Octaves, SURF_OctaveLayers, SURF_Extended, SURF_Upright)
{
@ -178,7 +182,7 @@ PARAM_TEST_CASE(SURF, cv::gpu::DeviceInfo, SURF_HessianThreshold, SURF_Octaves,
}
};
TEST_P(SURF, Detector)
GPU_TEST_P(SURF, Detector)
{
cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
@ -226,7 +230,7 @@ TEST_P(SURF, Detector)
}
}
TEST_P(SURF, Detector_Masked)
GPU_TEST_P(SURF, Detector_Masked)
{
cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
@ -277,7 +281,7 @@ TEST_P(SURF, Detector_Masked)
}
}
TEST_P(SURF, Descriptor)
GPU_TEST_P(SURF, Descriptor)
{
cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
@ -328,7 +332,7 @@ TEST_P(SURF, Descriptor)
int matchedCount = getMatchedPointsCount(keypoints, keypoints, matches);
double matchedRatio = static_cast<double>(matchedCount) / keypoints.size();
EXPECT_GT(matchedRatio, 0.35);
EXPECT_GT(matchedRatio, 0.6);
}
}
@ -343,8 +347,11 @@ INSTANTIATE_TEST_CASE_P(GPU_Features2D, SURF, testing::Combine(
/////////////////////////////////////////////////////////////////////////////////////////////////
// FAST
IMPLEMENT_PARAM_CLASS(FAST_Threshold, int)
IMPLEMENT_PARAM_CLASS(FAST_NonmaxSupression, bool)
namespace
{
IMPLEMENT_PARAM_CLASS(FAST_Threshold, int)
IMPLEMENT_PARAM_CLASS(FAST_NonmaxSupression, bool)
}
PARAM_TEST_CASE(FAST, cv::gpu::DeviceInfo, FAST_Threshold, FAST_NonmaxSupression)
{
@ -362,7 +369,7 @@ PARAM_TEST_CASE(FAST, cv::gpu::DeviceInfo, FAST_Threshold, FAST_NonmaxSupression
}
};
TEST_P(FAST, Accuracy)
GPU_TEST_P(FAST, Accuracy)
{
cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
@ -402,14 +409,17 @@ INSTANTIATE_TEST_CASE_P(GPU_Features2D, FAST, testing::Combine(
/////////////////////////////////////////////////////////////////////////////////////////////////
// ORB
IMPLEMENT_PARAM_CLASS(ORB_FeaturesCount, int)
IMPLEMENT_PARAM_CLASS(ORB_ScaleFactor, float)
IMPLEMENT_PARAM_CLASS(ORB_LevelsCount, int)
IMPLEMENT_PARAM_CLASS(ORB_EdgeThreshold, int)
IMPLEMENT_PARAM_CLASS(ORB_firstLevel, int)
IMPLEMENT_PARAM_CLASS(ORB_WTA_K, int)
IMPLEMENT_PARAM_CLASS(ORB_PatchSize, int)
IMPLEMENT_PARAM_CLASS(ORB_BlurForDescriptor, bool)
namespace
{
IMPLEMENT_PARAM_CLASS(ORB_FeaturesCount, int)
IMPLEMENT_PARAM_CLASS(ORB_ScaleFactor, float)
IMPLEMENT_PARAM_CLASS(ORB_LevelsCount, int)
IMPLEMENT_PARAM_CLASS(ORB_EdgeThreshold, int)
IMPLEMENT_PARAM_CLASS(ORB_firstLevel, int)
IMPLEMENT_PARAM_CLASS(ORB_WTA_K, int)
IMPLEMENT_PARAM_CLASS(ORB_PatchSize, int)
IMPLEMENT_PARAM_CLASS(ORB_BlurForDescriptor, bool)
}
CV_ENUM(ORB_ScoreType, cv::ORB::HARRIS_SCORE, cv::ORB::FAST_SCORE)
@ -443,7 +453,7 @@ PARAM_TEST_CASE(ORB, cv::gpu::DeviceInfo, ORB_FeaturesCount, ORB_ScaleFactor, OR
}
};
TEST_P(ORB, Accuracy)
GPU_TEST_P(ORB, Accuracy)
{
cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
@ -505,8 +515,11 @@ INSTANTIATE_TEST_CASE_P(GPU_Features2D, ORB, testing::Combine(
/////////////////////////////////////////////////////////////////////////////////////////////////
// BruteForceMatcher
IMPLEMENT_PARAM_CLASS(DescriptorSize, int)
IMPLEMENT_PARAM_CLASS(UseMask, bool)
namespace
{
IMPLEMENT_PARAM_CLASS(DescriptorSize, int)
IMPLEMENT_PARAM_CLASS(UseMask, bool)
}
PARAM_TEST_CASE(BruteForceMatcher, cv::gpu::DeviceInfo, NormCode, DescriptorSize, UseMask)
{
@ -568,7 +581,7 @@ PARAM_TEST_CASE(BruteForceMatcher, cv::gpu::DeviceInfo, NormCode, DescriptorSize
}
};
TEST_P(BruteForceMatcher, Match_Single)
GPU_TEST_P(BruteForceMatcher, Match_Single)
{
cv::gpu::BruteForceMatcher_GPU_base matcher(
cv::gpu::BruteForceMatcher_GPU_base::DistType((normCode -2) / 2));
@ -596,7 +609,7 @@ TEST_P(BruteForceMatcher, Match_Single)
ASSERT_EQ(0, badCount);
}
TEST_P(BruteForceMatcher, Match_Collection)
GPU_TEST_P(BruteForceMatcher, Match_Collection)
{
cv::gpu::BruteForceMatcher_GPU_base matcher(
cv::gpu::BruteForceMatcher_GPU_base::DistType((normCode -2) / 2));
@ -651,7 +664,7 @@ TEST_P(BruteForceMatcher, Match_Collection)
ASSERT_EQ(0, badCount);
}
TEST_P(BruteForceMatcher, KnnMatch_2_Single)
GPU_TEST_P(BruteForceMatcher, KnnMatch_2_Single)
{
cv::gpu::BruteForceMatcher_GPU_base matcher(
cv::gpu::BruteForceMatcher_GPU_base::DistType((normCode -2) / 2));
@ -691,7 +704,7 @@ TEST_P(BruteForceMatcher, KnnMatch_2_Single)
ASSERT_EQ(0, badCount);
}
TEST_P(BruteForceMatcher, KnnMatch_3_Single)
GPU_TEST_P(BruteForceMatcher, KnnMatch_3_Single)
{
cv::gpu::BruteForceMatcher_GPU_base matcher(
cv::gpu::BruteForceMatcher_GPU_base::DistType((normCode -2) / 2));
@ -731,7 +744,7 @@ TEST_P(BruteForceMatcher, KnnMatch_3_Single)
ASSERT_EQ(0, badCount);
}
TEST_P(BruteForceMatcher, KnnMatch_2_Collection)
GPU_TEST_P(BruteForceMatcher, KnnMatch_2_Collection)
{
cv::gpu::BruteForceMatcher_GPU_base matcher(
cv::gpu::BruteForceMatcher_GPU_base::DistType((normCode -2) / 2));
@ -794,7 +807,7 @@ TEST_P(BruteForceMatcher, KnnMatch_2_Collection)
ASSERT_EQ(0, badCount);
}
TEST_P(BruteForceMatcher, KnnMatch_3_Collection)
GPU_TEST_P(BruteForceMatcher, KnnMatch_3_Collection)
{
cv::gpu::BruteForceMatcher_GPU_base matcher(
cv::gpu::BruteForceMatcher_GPU_base::DistType((normCode -2) / 2));
@ -857,7 +870,7 @@ TEST_P(BruteForceMatcher, KnnMatch_3_Collection)
ASSERT_EQ(0, badCount);
}
TEST_P(BruteForceMatcher, RadiusMatch_Single)
GPU_TEST_P(BruteForceMatcher, RadiusMatch_Single)
{
cv::gpu::BruteForceMatcher_GPU_base matcher(
cv::gpu::BruteForceMatcher_GPU_base::DistType((normCode -2) / 2));
@ -907,7 +920,7 @@ TEST_P(BruteForceMatcher, RadiusMatch_Single)
}
}
TEST_P(BruteForceMatcher, RadiusMatch_Collection)
GPU_TEST_P(BruteForceMatcher, RadiusMatch_Collection)
{
cv::gpu::BruteForceMatcher_GPU_base matcher(
cv::gpu::BruteForceMatcher_GPU_base::DistType((normCode -2) / 2));
@ -993,6 +1006,4 @@ INSTANTIATE_TEST_CASE_P(GPU_Features2D, BruteForceMatcher, testing::Combine(
testing::Values(DescriptorSize(57), DescriptorSize(64), DescriptorSize(83), DescriptorSize(128), DescriptorSize(179), DescriptorSize(256), DescriptorSize(304)),
testing::Values(UseMask(false), UseMask(true))));
} // namespace
#endif // HAVE_CUDA

@ -43,27 +43,30 @@
#ifdef HAVE_CUDA
namespace {
IMPLEMENT_PARAM_CLASS(KSize, cv::Size)
cv::Mat getInnerROI(cv::InputArray m_, cv::Size ksize)
namespace
{
cv::Mat m = getMat(m_);
cv::Rect roi(ksize.width, ksize.height, m.cols - 2 * ksize.width, m.rows - 2 * ksize.height);
return m(roi);
}
IMPLEMENT_PARAM_CLASS(KSize, cv::Size)
IMPLEMENT_PARAM_CLASS(Anchor, cv::Point)
IMPLEMENT_PARAM_CLASS(Deriv_X, int)
IMPLEMENT_PARAM_CLASS(Deriv_Y, int)
IMPLEMENT_PARAM_CLASS(Iterations, int)
cv::Mat getInnerROI(cv::InputArray m, int ksize)
{
return getInnerROI(m, cv::Size(ksize, ksize));
cv::Mat getInnerROI(cv::InputArray m_, cv::Size ksize)
{
cv::Mat m = getMat(m_);
cv::Rect roi(ksize.width, ksize.height, m.cols - 2 * ksize.width, m.rows - 2 * ksize.height);
return m(roi);
}
cv::Mat getInnerROI(cv::InputArray m, int ksize)
{
return getInnerROI(m, cv::Size(ksize, ksize));
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Blur
IMPLEMENT_PARAM_CLASS(Anchor, cv::Point)
PARAM_TEST_CASE(Blur, cv::gpu::DeviceInfo, cv::Size, MatType, KSize, Anchor, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
@ -86,7 +89,7 @@ PARAM_TEST_CASE(Blur, cv::gpu::DeviceInfo, cv::Size, MatType, KSize, Anchor, Use
}
};
TEST_P(Blur, Accuracy)
GPU_TEST_P(Blur, Accuracy)
{
cv::Mat src = randomMat(size, type);
@ -110,36 +113,39 @@ INSTANTIATE_TEST_CASE_P(GPU_Filter, Blur, testing::Combine(
/////////////////////////////////////////////////////////////////////////////////////////////////
// Sobel
IMPLEMENT_PARAM_CLASS(Deriv_X, int)
IMPLEMENT_PARAM_CLASS(Deriv_Y, int)
PARAM_TEST_CASE(Sobel, cv::gpu::DeviceInfo, cv::Size, MatType, KSize, Deriv_X, Deriv_Y, BorderType, UseRoi)
PARAM_TEST_CASE(Sobel, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, KSize, Deriv_X, Deriv_Y, BorderType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
int depth;
int cn;
cv::Size ksize;
int dx;
int dy;
int borderType;
bool useRoi;
int type;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
ksize = GET_PARAM(3);
dx = GET_PARAM(4);
dy = GET_PARAM(5);
borderType = GET_PARAM(6);
useRoi = GET_PARAM(7);
depth = GET_PARAM(2);
cn = GET_PARAM(3);
ksize = GET_PARAM(4);
dx = GET_PARAM(5);
dy = GET_PARAM(6);
borderType = GET_PARAM(7);
useRoi = GET_PARAM(8);
cv::gpu::setDevice(devInfo.deviceID());
type = CV_MAKE_TYPE(depth, cn);
}
};
TEST_P(Sobel, Accuracy)
GPU_TEST_P(Sobel, Accuracy)
{
if (dx == 0 && dy == 0)
return;
@ -152,13 +158,14 @@ TEST_P(Sobel, Accuracy)
cv::Mat dst_gold;
cv::Sobel(src, dst_gold, -1, dx, dy, ksize.width, 1.0, 0.0, borderType);
EXPECT_MAT_NEAR(dst_gold, dst, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.1);
EXPECT_MAT_NEAR(getInnerROI(dst_gold, ksize), getInnerROI(dst, ksize), CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.1);
}
INSTANTIATE_TEST_CASE_P(GPU_Filter, Sobel, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32F)),
IMAGE_CHANNELS,
testing::Values(KSize(cv::Size(3, 3)), KSize(cv::Size(5, 5)), KSize(cv::Size(7, 7))),
testing::Values(Deriv_X(0), Deriv_X(1), Deriv_X(2)),
testing::Values(Deriv_Y(0), Deriv_Y(1), Deriv_Y(2)),
@ -171,31 +178,37 @@ INSTANTIATE_TEST_CASE_P(GPU_Filter, Sobel, testing::Combine(
/////////////////////////////////////////////////////////////////////////////////////////////////
// Scharr
PARAM_TEST_CASE(Scharr, cv::gpu::DeviceInfo, cv::Size, MatType, Deriv_X, Deriv_Y, BorderType, UseRoi)
PARAM_TEST_CASE(Scharr, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, Deriv_X, Deriv_Y, BorderType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
int depth;
int cn;
int dx;
int dy;
int borderType;
bool useRoi;
int type;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
dx = GET_PARAM(3);
dy = GET_PARAM(4);
borderType = GET_PARAM(5);
useRoi = GET_PARAM(6);
depth = GET_PARAM(2);
cn = GET_PARAM(3);
dx = GET_PARAM(4);
dy = GET_PARAM(5);
borderType = GET_PARAM(6);
useRoi = GET_PARAM(7);
cv::gpu::setDevice(devInfo.deviceID());
type = CV_MAKE_TYPE(depth, cn);
}
};
TEST_P(Scharr, Accuracy)
GPU_TEST_P(Scharr, Accuracy)
{
if (dx + dy != 1)
return;
@ -208,13 +221,14 @@ TEST_P(Scharr, Accuracy)
cv::Mat dst_gold;
cv::Scharr(src, dst_gold, -1, dx, dy, 1.0, 0.0, borderType);
EXPECT_MAT_NEAR(dst_gold, dst, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.1);
EXPECT_MAT_NEAR(getInnerROI(dst_gold, cv::Size(3, 3)), getInnerROI(dst, cv::Size(3, 3)), CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.1);
}
INSTANTIATE_TEST_CASE_P(GPU_Filter, Scharr, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32F)),
IMAGE_CHANNELS,
testing::Values(Deriv_X(0), Deriv_X(1)),
testing::Values(Deriv_Y(0), Deriv_Y(1)),
testing::Values(BorderType(cv::BORDER_REFLECT101),
@ -226,29 +240,35 @@ INSTANTIATE_TEST_CASE_P(GPU_Filter, Scharr, testing::Combine(
/////////////////////////////////////////////////////////////////////////////////////////////////
// GaussianBlur
PARAM_TEST_CASE(GaussianBlur, cv::gpu::DeviceInfo, cv::Size, MatType, KSize, BorderType, UseRoi)
PARAM_TEST_CASE(GaussianBlur, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, KSize, BorderType, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
cv::Size size;
int type;
int depth;
int cn;
cv::Size ksize;
int borderType;
bool useRoi;
int type;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
size = GET_PARAM(1);
type = GET_PARAM(2);
ksize = GET_PARAM(3);
borderType = GET_PARAM(4);
useRoi = GET_PARAM(5);
depth = GET_PARAM(2);
cn = GET_PARAM(3);
ksize = GET_PARAM(4);
borderType = GET_PARAM(5);
useRoi = GET_PARAM(6);
cv::gpu::setDevice(devInfo.deviceID());
type = CV_MAKE_TYPE(depth, cn);
}
};
TEST_P(GaussianBlur, Accuracy)
GPU_TEST_P(GaussianBlur, Accuracy)
{
cv::Mat src = randomMat(size, type);
double sigma1 = randomDouble(0.1, 1.0);
@ -281,7 +301,8 @@ TEST_P(GaussianBlur, Accuracy)
INSTANTIATE_TEST_CASE_P(GPU_Filter, GaussianBlur, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32F)),
IMAGE_CHANNELS,
testing::Values(KSize(cv::Size(3, 3)),
KSize(cv::Size(5, 5)),
KSize(cv::Size(7, 7)),
@ -326,7 +347,7 @@ PARAM_TEST_CASE(Laplacian, cv::gpu::DeviceInfo, cv::Size, MatType, KSize, UseRoi
}
};
TEST_P(Laplacian, Accuracy)
GPU_TEST_P(Laplacian, Accuracy)
{
cv::Mat src = randomMat(size, type);
@ -349,8 +370,6 @@ INSTANTIATE_TEST_CASE_P(GPU_Filter, Laplacian, testing::Combine(
/////////////////////////////////////////////////////////////////////////////////////////////////
// Erode
IMPLEMENT_PARAM_CLASS(Iterations, int)
PARAM_TEST_CASE(Erode, cv::gpu::DeviceInfo, cv::Size, MatType, Anchor, Iterations, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
@ -373,7 +392,7 @@ PARAM_TEST_CASE(Erode, cv::gpu::DeviceInfo, cv::Size, MatType, Anchor, Iteration
}
};
TEST_P(Erode, Accuracy)
GPU_TEST_P(Erode, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat kernel = cv::Mat::ones(3, 3, CV_8U);
@ -422,7 +441,7 @@ PARAM_TEST_CASE(Dilate, cv::gpu::DeviceInfo, cv::Size, MatType, Anchor, Iteratio
}
};
TEST_P(Dilate, Accuracy)
GPU_TEST_P(Dilate, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat kernel = cv::Mat::ones(3, 3, CV_8U);
@ -476,7 +495,7 @@ PARAM_TEST_CASE(MorphEx, cv::gpu::DeviceInfo, cv::Size, MatType, MorphOp, Anchor
}
};
TEST_P(MorphEx, Accuracy)
GPU_TEST_P(MorphEx, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat kernel = cv::Mat::ones(3, 3, CV_8U);
@ -530,7 +549,7 @@ PARAM_TEST_CASE(Filter2D, cv::gpu::DeviceInfo, cv::Size, MatType, KSize, Anchor,
}
};
TEST_P(Filter2D, Accuracy)
GPU_TEST_P(Filter2D, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat kernel = randomMat(cv::Size(ksize.width, ksize.height), CV_32FC1, 0.0, 1.0);
@ -553,6 +572,4 @@ INSTANTIATE_TEST_CASE_P(GPU_Filter, Filter2D, testing::Combine(
testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_CONSTANT), BorderType(cv::BORDER_REFLECT)),
WHOLE_SUBMAT));
} // namespace
#endif // HAVE_CUDA

@ -51,7 +51,7 @@ struct CompactPoints : testing::TestWithParam<gpu::DeviceInfo>
virtual void SetUp() { gpu::setDevice(GetParam().deviceID()); }
};
TEST_P(CompactPoints, CanCompactizeSmallInput)
GPU_TEST_P(CompactPoints, CanCompactizeSmallInput)
{
Mat src0(1, 3, CV_32FC2);
src0.at<Point2f>(0,0) = Point2f(0,0);

@ -44,8 +44,6 @@
#ifdef HAVE_CUDA
namespace {
////////////////////////////////////////////////////////////////////////////////
// SetTo
@ -67,7 +65,7 @@ PARAM_TEST_CASE(SetTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
}
};
TEST_P(SetTo, Zero)
GPU_TEST_P(SetTo, Zero)
{
cv::Scalar zero = cv::Scalar::all(0);
@ -77,7 +75,7 @@ TEST_P(SetTo, Zero)
EXPECT_MAT_NEAR(cv::Mat::zeros(size, type), mat, 0.0);
}
TEST_P(SetTo, SameVal)
GPU_TEST_P(SetTo, SameVal)
{
cv::Scalar val = cv::Scalar::all(randomDouble(0.0, 255.0));
@ -102,7 +100,7 @@ TEST_P(SetTo, SameVal)
}
}
TEST_P(SetTo, DifferentVal)
GPU_TEST_P(SetTo, DifferentVal)
{
cv::Scalar val = randomScalar(0.0, 255.0);
@ -127,7 +125,7 @@ TEST_P(SetTo, DifferentVal)
}
}
TEST_P(SetTo, Masked)
GPU_TEST_P(SetTo, Masked)
{
cv::Scalar val = randomScalar(0.0, 255.0);
cv::Mat mat_gold = randomMat(size, type);
@ -184,7 +182,7 @@ PARAM_TEST_CASE(CopyTo, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
}
};
TEST_P(CopyTo, WithOutMask)
GPU_TEST_P(CopyTo, WithOutMask)
{
cv::Mat src = randomMat(size, type);
@ -195,7 +193,7 @@ TEST_P(CopyTo, WithOutMask)
EXPECT_MAT_NEAR(src, dst, 0.0);
}
TEST_P(CopyTo, Masked)
GPU_TEST_P(CopyTo, Masked)
{
cv::Mat src = randomMat(size, type);
cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
@ -255,7 +253,7 @@ PARAM_TEST_CASE(ConvertTo, cv::gpu::DeviceInfo, cv::Size, MatDepth, MatDepth, Us
}
};
TEST_P(ConvertTo, WithOutScaling)
GPU_TEST_P(ConvertTo, WithOutScaling)
{
cv::Mat src = randomMat(size, depth1);
@ -285,7 +283,7 @@ TEST_P(ConvertTo, WithOutScaling)
}
}
TEST_P(ConvertTo, WithScaling)
GPU_TEST_P(ConvertTo, WithScaling)
{
cv::Mat src = randomMat(size, depth1);
double a = randomDouble(0.0, 1.0);
@ -324,6 +322,4 @@ INSTANTIATE_TEST_CASE_P(GPU_GpuMat, ConvertTo, testing::Combine(
ALL_DEPTH,
WHOLE_SUBMAT));
} // namespace
#endif // HAVE_CUDA

@ -43,8 +43,6 @@
#ifdef HAVE_CUDA
namespace {
///////////////////////////////////////////////////////////////////////////////////////////////////////
// HoughLines
@ -79,7 +77,7 @@ PARAM_TEST_CASE(HoughLines, cv::gpu::DeviceInfo, cv::Size, UseRoi)
}
};
TEST_P(HoughLines, Accuracy)
GPU_TEST_P(HoughLines, Accuracy)
{
const cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
@ -87,7 +85,7 @@ TEST_P(HoughLines, Accuracy)
const bool useRoi = GET_PARAM(2);
const float rho = 1.0f;
const float theta = 1.5f * CV_PI / 180.0f;
const float theta = (float) (1.5 * CV_PI / 180.0);
const int threshold = 100;
cv::Mat src(size, CV_8UC1);
@ -124,7 +122,7 @@ PARAM_TEST_CASE(HoughCircles, cv::gpu::DeviceInfo, cv::Size, UseRoi)
}
};
TEST_P(HoughCircles, Accuracy)
GPU_TEST_P(HoughCircles, Accuracy)
{
const cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
@ -188,7 +186,7 @@ PARAM_TEST_CASE(GeneralizedHough, cv::gpu::DeviceInfo, UseRoi)
{
};
TEST_P(GeneralizedHough, POSITION)
GPU_TEST_P(GeneralizedHough, POSITION)
{
const cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
@ -251,6 +249,4 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, GeneralizedHough, testing::Combine(
ALL_DEVICES,
WHOLE_SUBMAT));
} // namespace
#endif // HAVE_CUDA

@ -43,8 +43,6 @@
#ifdef HAVE_CUDA
namespace {
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Integral
@ -64,7 +62,7 @@ PARAM_TEST_CASE(Integral, cv::gpu::DeviceInfo, cv::Size, UseRoi)
}
};
TEST_P(Integral, Accuracy)
GPU_TEST_P(Integral, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
@ -97,7 +95,7 @@ struct HistEven : testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(HistEven, Accuracy)
GPU_TEST_P(HistEven, Accuracy)
{
cv::Mat img = readImage("stereobm/aloe-L.png");
ASSERT_FALSE(img.empty());
@ -132,18 +130,21 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CalcHist
void calcHistGold(const cv::Mat& src, cv::Mat& hist)
namespace
{
hist.create(1, 256, CV_32SC1);
hist.setTo(cv::Scalar::all(0));
int* hist_row = hist.ptr<int>();
for (int y = 0; y < src.rows; ++y)
void calcHistGold(const cv::Mat& src, cv::Mat& hist)
{
const uchar* src_row = src.ptr(y);
hist.create(1, 256, CV_32SC1);
hist.setTo(cv::Scalar::all(0));
for (int x = 0; x < src.cols; ++x)
++hist_row[src_row[x]];
int* hist_row = hist.ptr<int>();
for (int y = 0; y < src.rows; ++y)
{
const uchar* src_row = src.ptr(y);
for (int x = 0; x < src.cols; ++x)
++hist_row[src_row[x]];
}
}
}
@ -162,7 +163,7 @@ PARAM_TEST_CASE(CalcHist, cv::gpu::DeviceInfo, cv::Size)
}
};
TEST_P(CalcHist, Accuracy)
GPU_TEST_P(CalcHist, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
@ -196,7 +197,7 @@ PARAM_TEST_CASE(EqualizeHist, cv::gpu::DeviceInfo, cv::Size)
}
};
TEST_P(EqualizeHist, Accuracy)
GPU_TEST_P(EqualizeHist, Accuracy)
{
cv::Mat src = randomMat(size, CV_8UC1);
@ -230,7 +231,7 @@ PARAM_TEST_CASE(ColumnSum, cv::gpu::DeviceInfo, cv::Size)
}
};
TEST_P(ColumnSum, Accuracy)
GPU_TEST_P(ColumnSum, Accuracy)
{
cv::Mat src = randomMat(size, CV_32FC1);
@ -264,8 +265,11 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, ColumnSum, testing::Combine(
////////////////////////////////////////////////////////
// Canny
IMPLEMENT_PARAM_CLASS(AppertureSize, int);
IMPLEMENT_PARAM_CLASS(L2gradient, bool);
namespace
{
IMPLEMENT_PARAM_CLASS(AppertureSize, int);
IMPLEMENT_PARAM_CLASS(L2gradient, bool);
}
PARAM_TEST_CASE(Canny, cv::gpu::DeviceInfo, AppertureSize, L2gradient, UseRoi)
{
@ -285,7 +289,7 @@ PARAM_TEST_CASE(Canny, cv::gpu::DeviceInfo, AppertureSize, L2gradient, UseRoi)
}
};
TEST_P(Canny, Accuracy)
GPU_TEST_P(Canny, Accuracy)
{
cv::Mat img = readImage("stereobm/aloe-L.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(img.empty());
@ -313,7 +317,7 @@ TEST_P(Canny, Accuracy)
cv::Mat edges_gold;
cv::Canny(img, edges_gold, low_thresh, high_thresh, apperture_size, useL2gradient);
EXPECT_MAT_SIMILAR(edges_gold, edges, 1e-2);
EXPECT_MAT_SIMILAR(edges_gold, edges, 2e-2);
}
}
@ -349,7 +353,7 @@ struct MeanShift : testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(MeanShift, Filtering)
GPU_TEST_P(MeanShift, Filtering)
{
cv::Mat img_template;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
@ -371,7 +375,7 @@ TEST_P(MeanShift, Filtering)
EXPECT_MAT_NEAR(img_template, result, 0.0);
}
TEST_P(MeanShift, Proc)
GPU_TEST_P(MeanShift, Proc)
{
cv::FileStorage fs;
if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
@ -402,7 +406,10 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShift, ALL_DEVICES);
////////////////////////////////////////////////////////////////////////////////
// MeanShiftSegmentation
IMPLEMENT_PARAM_CLASS(MinSize, int);
namespace
{
IMPLEMENT_PARAM_CLASS(MinSize, int);
}
PARAM_TEST_CASE(MeanShiftSegmentation, cv::gpu::DeviceInfo, MinSize)
{
@ -418,7 +425,7 @@ PARAM_TEST_CASE(MeanShiftSegmentation, cv::gpu::DeviceInfo, MinSize)
}
};
TEST_P(MeanShiftSegmentation, Regression)
GPU_TEST_P(MeanShiftSegmentation, Regression)
{
cv::Mat img = readImageType("meanshift/cones.png", CV_8UC4);
ASSERT_FALSE(img.empty());
@ -448,26 +455,29 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShiftSegmentation, testing::Combine(
////////////////////////////////////////////////////////////////////////////
// Blend
template <typename T>
void blendLinearGold(const cv::Mat& img1, const cv::Mat& img2, const cv::Mat& weights1, const cv::Mat& weights2, cv::Mat& result_gold)
namespace
{
result_gold.create(img1.size(), img1.type());
int cn = img1.channels();
for (int y = 0; y < img1.rows; ++y)
template <typename T>
void blendLinearGold(const cv::Mat& img1, const cv::Mat& img2, const cv::Mat& weights1, const cv::Mat& weights2, cv::Mat& result_gold)
{
const float* weights1_row = weights1.ptr<float>(y);
const float* weights2_row = weights2.ptr<float>(y);
const T* img1_row = img1.ptr<T>(y);
const T* img2_row = img2.ptr<T>(y);
T* result_gold_row = result_gold.ptr<T>(y);
result_gold.create(img1.size(), img1.type());
for (int x = 0; x < img1.cols * cn; ++x)
int cn = img1.channels();
for (int y = 0; y < img1.rows; ++y)
{
float w1 = weights1_row[x / cn];
float w2 = weights2_row[x / cn];
result_gold_row[x] = static_cast<T>((img1_row[x] * w1 + img2_row[x] * w2) / (w1 + w2 + 1e-5f));
const float* weights1_row = weights1.ptr<float>(y);
const float* weights2_row = weights2.ptr<float>(y);
const T* img1_row = img1.ptr<T>(y);
const T* img2_row = img2.ptr<T>(y);
T* result_gold_row = result_gold.ptr<T>(y);
for (int x = 0; x < img1.cols * cn; ++x)
{
float w1 = weights1_row[x / cn];
float w2 = weights2_row[x / cn];
result_gold_row[x] = static_cast<T>((img1_row[x] * w1 + img2_row[x] * w2) / (w1 + w2 + 1e-5f));
}
}
}
}
@ -490,7 +500,7 @@ PARAM_TEST_CASE(Blend, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
}
};
TEST_P(Blend, Accuracy)
GPU_TEST_P(Blend, Accuracy)
{
int depth = CV_MAT_DEPTH(type);
@ -520,47 +530,50 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Blend, testing::Combine(
////////////////////////////////////////////////////////
// Convolve
void convolveDFT(const cv::Mat& A, const cv::Mat& B, cv::Mat& C, bool ccorr = false)
namespace
{
// reallocate the output array if needed
C.create(std::abs(A.rows - B.rows) + 1, std::abs(A.cols - B.cols) + 1, A.type());
cv::Size dftSize;
// compute the size of DFT transform
dftSize.width = cv::getOptimalDFTSize(A.cols + B.cols - 1);
dftSize.height = cv::getOptimalDFTSize(A.rows + B.rows - 1);
// allocate temporary buffers and initialize them with 0s
cv::Mat tempA(dftSize, A.type(), cv::Scalar::all(0));
cv::Mat tempB(dftSize, B.type(), cv::Scalar::all(0));
// copy A and B to the top-left corners of tempA and tempB, respectively
cv::Mat roiA(tempA, cv::Rect(0, 0, A.cols, A.rows));
A.copyTo(roiA);
cv::Mat roiB(tempB, cv::Rect(0, 0, B.cols, B.rows));
B.copyTo(roiB);
// now transform the padded A & B in-place;
// use "nonzeroRows" hint for faster processing
cv::dft(tempA, tempA, 0, A.rows);
cv::dft(tempB, tempB, 0, B.rows);
// multiply the spectrums;
// the function handles packed spectrum representations well
cv::mulSpectrums(tempA, tempB, tempA, 0, ccorr);
// transform the product back from the frequency domain.
// Even though all the result rows will be non-zero,
// you need only the first C.rows of them, and thus you
// pass nonzeroRows == C.rows
cv::dft(tempA, tempA, cv::DFT_INVERSE + cv::DFT_SCALE, C.rows);
// now copy the result back to C.
tempA(cv::Rect(0, 0, C.cols, C.rows)).copyTo(C);
}
void convolveDFT(const cv::Mat& A, const cv::Mat& B, cv::Mat& C, bool ccorr = false)
{
// reallocate the output array if needed
C.create(std::abs(A.rows - B.rows) + 1, std::abs(A.cols - B.cols) + 1, A.type());
cv::Size dftSize;
// compute the size of DFT transform
dftSize.width = cv::getOptimalDFTSize(A.cols + B.cols - 1);
dftSize.height = cv::getOptimalDFTSize(A.rows + B.rows - 1);
// allocate temporary buffers and initialize them with 0s
cv::Mat tempA(dftSize, A.type(), cv::Scalar::all(0));
cv::Mat tempB(dftSize, B.type(), cv::Scalar::all(0));
// copy A and B to the top-left corners of tempA and tempB, respectively
cv::Mat roiA(tempA, cv::Rect(0, 0, A.cols, A.rows));
A.copyTo(roiA);
cv::Mat roiB(tempB, cv::Rect(0, 0, B.cols, B.rows));
B.copyTo(roiB);
// now transform the padded A & B in-place;
// use "nonzeroRows" hint for faster processing
cv::dft(tempA, tempA, 0, A.rows);
cv::dft(tempB, tempB, 0, B.rows);
// multiply the spectrums;
// the function handles packed spectrum representations well
cv::mulSpectrums(tempA, tempB, tempA, 0, ccorr);
// transform the product back from the frequency domain.
// Even though all the result rows will be non-zero,
// you need only the first C.rows of them, and thus you
// pass nonzeroRows == C.rows
cv::dft(tempA, tempA, cv::DFT_INVERSE + cv::DFT_SCALE, C.rows);
// now copy the result back to C.
tempA(cv::Rect(0, 0, C.cols, C.rows)).copyTo(C);
}
IMPLEMENT_PARAM_CLASS(KSize, int);
IMPLEMENT_PARAM_CLASS(Ccorr, bool);
IMPLEMENT_PARAM_CLASS(KSize, int);
IMPLEMENT_PARAM_CLASS(Ccorr, bool);
}
PARAM_TEST_CASE(Convolve, cv::gpu::DeviceInfo, cv::Size, KSize, Ccorr)
{
@ -580,7 +593,7 @@ PARAM_TEST_CASE(Convolve, cv::gpu::DeviceInfo, cv::Size, KSize, Ccorr)
}
};
TEST_P(Convolve, Accuracy)
GPU_TEST_P(Convolve, Accuracy)
{
cv::Mat src = randomMat(size, CV_32FC1, 0.0, 100.0);
cv::Mat kernel = randomMat(cv::Size(ksize, ksize), CV_32FC1, 0.0, 1.0);
@ -606,7 +619,10 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Convolve, testing::Combine(
CV_ENUM(TemplateMethod, cv::TM_SQDIFF, cv::TM_SQDIFF_NORMED, cv::TM_CCORR, cv::TM_CCORR_NORMED, cv::TM_CCOEFF, cv::TM_CCOEFF_NORMED)
#define ALL_TEMPLATE_METHODS testing::Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_SQDIFF_NORMED), TemplateMethod(cv::TM_CCORR), TemplateMethod(cv::TM_CCORR_NORMED), TemplateMethod(cv::TM_CCOEFF), TemplateMethod(cv::TM_CCOEFF_NORMED))
IMPLEMENT_PARAM_CLASS(TemplateSize, cv::Size);
namespace
{
IMPLEMENT_PARAM_CLASS(TemplateSize, cv::Size);
}
PARAM_TEST_CASE(MatchTemplate8U, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)
{
@ -628,7 +644,7 @@ PARAM_TEST_CASE(MatchTemplate8U, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Ch
}
};
TEST_P(MatchTemplate8U, Accuracy)
GPU_TEST_P(MatchTemplate8U, Accuracy)
{
cv::Mat image = randomMat(size, CV_MAKETYPE(CV_8U, cn));
cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_8U, cn));
@ -674,7 +690,7 @@ PARAM_TEST_CASE(MatchTemplate32F, cv::gpu::DeviceInfo, cv::Size, TemplateSize, C
}
};
TEST_P(MatchTemplate32F, Regression)
GPU_TEST_P(MatchTemplate32F, Regression)
{
cv::Mat image = randomMat(size, CV_MAKETYPE(CV_32F, cn));
cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_32F, cn));
@ -712,7 +728,7 @@ PARAM_TEST_CASE(MatchTemplateBlackSource, cv::gpu::DeviceInfo, TemplateMethod)
}
};
TEST_P(MatchTemplateBlackSource, Accuracy)
GPU_TEST_P(MatchTemplateBlackSource, Accuracy)
{
cv::Mat image = readImage("matchtemplate/black.png");
ASSERT_FALSE(image.empty());
@ -757,7 +773,7 @@ PARAM_TEST_CASE(MatchTemplate_CCOEF_NORMED, cv::gpu::DeviceInfo, std::pair<std::
}
};
TEST_P(MatchTemplate_CCOEF_NORMED, Accuracy)
GPU_TEST_P(MatchTemplate_CCOEF_NORMED, Accuracy)
{
cv::Mat image = readImage(imageName);
ASSERT_FALSE(image.empty());
@ -806,7 +822,7 @@ struct MatchTemplate_CanFindBigTemplate : testing::TestWithParam<cv::gpu::Device
}
};
TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)
GPU_TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)
{
cv::Mat scene = readImage("matchtemplate/scene.png");
ASSERT_FALSE(scene.empty());
@ -829,7 +845,7 @@ TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)
ASSERT_EQ(0, minLoc.y);
}
TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF)
GPU_TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF)
{
cv::Mat scene = readImage("matchtemplate/scene.png");
ASSERT_FALSE(scene.empty());
@ -879,7 +895,7 @@ PARAM_TEST_CASE(MulSpectrums, cv::gpu::DeviceInfo, cv::Size, DftFlags)
}
};
TEST_P(MulSpectrums, Simple)
GPU_TEST_P(MulSpectrums, Simple)
{
cv::gpu::GpuMat c;
cv::gpu::mulSpectrums(loadMat(a), loadMat(b), c, flag, false);
@ -890,7 +906,7 @@ TEST_P(MulSpectrums, Simple)
EXPECT_MAT_NEAR(c_gold, c, 1e-2);
}
TEST_P(MulSpectrums, Scaled)
GPU_TEST_P(MulSpectrums, Scaled)
{
float scale = 1.f / size.area();
@ -924,31 +940,34 @@ struct Dft : testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
void testC2C(const std::string& hint, int cols, int rows, int flags, bool inplace)
namespace
{
SCOPED_TRACE(hint);
void testC2C(const std::string& hint, int cols, int rows, int flags, bool inplace)
{
SCOPED_TRACE(hint);
cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC2, 0.0, 10.0);
cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC2, 0.0, 10.0);
cv::Mat b_gold;
cv::dft(a, b_gold, flags);
cv::Mat b_gold;
cv::dft(a, b_gold, flags);
cv::gpu::GpuMat d_b;
cv::gpu::GpuMat d_b_data;
if (inplace)
{
d_b_data.create(1, a.size().area(), CV_32FC2);
d_b = cv::gpu::GpuMat(a.rows, a.cols, CV_32FC2, d_b_data.ptr(), a.cols * d_b_data.elemSize());
}
cv::gpu::dft(loadMat(a), d_b, cv::Size(cols, rows), flags);
cv::gpu::GpuMat d_b;
cv::gpu::GpuMat d_b_data;
if (inplace)
{
d_b_data.create(1, a.size().area(), CV_32FC2);
d_b = cv::gpu::GpuMat(a.rows, a.cols, CV_32FC2, d_b_data.ptr(), a.cols * d_b_data.elemSize());
}
cv::gpu::dft(loadMat(a), d_b, cv::Size(cols, rows), flags);
EXPECT_TRUE(!inplace || d_b.ptr() == d_b_data.ptr());
ASSERT_EQ(CV_32F, d_b.depth());
ASSERT_EQ(2, d_b.channels());
EXPECT_MAT_NEAR(b_gold, cv::Mat(d_b), rows * cols * 1e-4);
EXPECT_TRUE(!inplace || d_b.ptr() == d_b_data.ptr());
ASSERT_EQ(CV_32F, d_b.depth());
ASSERT_EQ(2, d_b.channels());
EXPECT_MAT_NEAR(b_gold, cv::Mat(d_b), rows * cols * 1e-4);
}
}
TEST_P(Dft, C2C)
GPU_TEST_P(Dft, C2C)
{
int cols = randomInt(2, 100);
int rows = randomInt(2, 100);
@ -973,43 +992,46 @@ TEST_P(Dft, C2C)
}
}
void testR2CThenC2R(const std::string& hint, int cols, int rows, bool inplace)
namespace
{
SCOPED_TRACE(hint);
void testR2CThenC2R(const std::string& hint, int cols, int rows, bool inplace)
{
SCOPED_TRACE(hint);
cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC1, 0.0, 10.0);
cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC1, 0.0, 10.0);
cv::gpu::GpuMat d_b, d_c;
cv::gpu::GpuMat d_b_data, d_c_data;
if (inplace)
{
if (a.cols == 1)
{
d_b_data.create(1, (a.rows / 2 + 1) * a.cols, CV_32FC2);
d_b = cv::gpu::GpuMat(a.rows / 2 + 1, a.cols, CV_32FC2, d_b_data.ptr(), a.cols * d_b_data.elemSize());
}
else
cv::gpu::GpuMat d_b, d_c;
cv::gpu::GpuMat d_b_data, d_c_data;
if (inplace)
{
d_b_data.create(1, a.rows * (a.cols / 2 + 1), CV_32FC2);
d_b = cv::gpu::GpuMat(a.rows, a.cols / 2 + 1, CV_32FC2, d_b_data.ptr(), (a.cols / 2 + 1) * d_b_data.elemSize());
if (a.cols == 1)
{
d_b_data.create(1, (a.rows / 2 + 1) * a.cols, CV_32FC2);
d_b = cv::gpu::GpuMat(a.rows / 2 + 1, a.cols, CV_32FC2, d_b_data.ptr(), a.cols * d_b_data.elemSize());
}
else
{
d_b_data.create(1, a.rows * (a.cols / 2 + 1), CV_32FC2);
d_b = cv::gpu::GpuMat(a.rows, a.cols / 2 + 1, CV_32FC2, d_b_data.ptr(), (a.cols / 2 + 1) * d_b_data.elemSize());
}
d_c_data.create(1, a.size().area(), CV_32F);
d_c = cv::gpu::GpuMat(a.rows, a.cols, CV_32F, d_c_data.ptr(), a.cols * d_c_data.elemSize());
}
d_c_data.create(1, a.size().area(), CV_32F);
d_c = cv::gpu::GpuMat(a.rows, a.cols, CV_32F, d_c_data.ptr(), a.cols * d_c_data.elemSize());
}
cv::gpu::dft(loadMat(a), d_b, cv::Size(cols, rows), 0);
cv::gpu::dft(d_b, d_c, cv::Size(cols, rows), cv::DFT_REAL_OUTPUT | cv::DFT_SCALE);
cv::gpu::dft(loadMat(a), d_b, cv::Size(cols, rows), 0);
cv::gpu::dft(d_b, d_c, cv::Size(cols, rows), cv::DFT_REAL_OUTPUT | cv::DFT_SCALE);
EXPECT_TRUE(!inplace || d_b.ptr() == d_b_data.ptr());
EXPECT_TRUE(!inplace || d_c.ptr() == d_c_data.ptr());
ASSERT_EQ(CV_32F, d_c.depth());
ASSERT_EQ(1, d_c.channels());
EXPECT_TRUE(!inplace || d_b.ptr() == d_b_data.ptr());
EXPECT_TRUE(!inplace || d_c.ptr() == d_c_data.ptr());
ASSERT_EQ(CV_32F, d_c.depth());
ASSERT_EQ(1, d_c.channels());
cv::Mat c(d_c);
EXPECT_MAT_NEAR(a, c, rows * cols * 1e-5);
cv::Mat c(d_c);
EXPECT_MAT_NEAR(a, c, rows * cols * 1e-5);
}
}
TEST_P(Dft, R2CThenC2R)
GPU_TEST_P(Dft, R2CThenC2R)
{
int cols = randomInt(2, 100);
int rows = randomInt(2, 100);
@ -1036,8 +1058,11 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Dft, ALL_DEVICES);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// CornerHarris
IMPLEMENT_PARAM_CLASS(BlockSize, int);
IMPLEMENT_PARAM_CLASS(ApertureSize, int);
namespace
{
IMPLEMENT_PARAM_CLASS(BlockSize, int);
IMPLEMENT_PARAM_CLASS(ApertureSize, int);
}
PARAM_TEST_CASE(CornerHarris, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)
{
@ -1059,7 +1084,7 @@ PARAM_TEST_CASE(CornerHarris, cv::gpu::DeviceInfo, MatType, BorderType, BlockSiz
}
};
TEST_P(CornerHarris, Accuracy)
GPU_TEST_P(CornerHarris, Accuracy)
{
cv::Mat src = readImageType("stereobm/aloe-L.png", type);
ASSERT_FALSE(src.empty());
@ -1105,7 +1130,7 @@ PARAM_TEST_CASE(CornerMinEigen, cv::gpu::DeviceInfo, MatType, BorderType, BlockS
}
};
TEST_P(CornerMinEigen, Accuracy)
GPU_TEST_P(CornerMinEigen, Accuracy)
{
cv::Mat src = readImageType("stereobm/aloe-L.png", type);
ASSERT_FALSE(src.empty());
@ -1126,6 +1151,4 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerMinEigen, testing::Combine(
testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),
testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));
} // namespace
#endif // HAVE_CUDA

@ -43,8 +43,8 @@
#ifdef HAVE_CUDA
namespace {
namespace
{
struct GreedyLabeling
{
struct dot
@ -82,7 +82,7 @@ namespace {
int cc = -1;
int* dist_labels = (int*)labels.data;
int pitch = labels.step1();
int pitch = (int) labels.step1();
unsigned char* source = (unsigned char*)image.data;
int width = image.cols;
@ -166,7 +166,7 @@ struct Labeling : testing::TestWithParam<cv::gpu::DeviceInfo>
}
};
TEST_P(Labeling, ConnectedComponents)
GPU_TEST_P(Labeling, DISABLED_ConnectedComponents)
{
cv::Mat image;
cvtColor(loat_image(), image, CV_BGR2GRAY);
@ -186,11 +186,11 @@ TEST_P(Labeling, ConnectedComponents)
cv::gpu::connectivityMask(cv::gpu::GpuMat(image), mask, cv::Scalar::all(0), cv::Scalar::all(2));
ASSERT_NO_THROW(cv::gpu::labelComponents(mask, components));
cv::gpu::labelComponents(mask, components);
host.checkCorrectness(cv::Mat(components));
}
INSTANTIATE_TEST_CASE_P(ConnectedComponents, Labeling, ALL_DEVICES);
INSTANTIATE_TEST_CASE_P(GPU_ConnectedComponents, Labeling, ALL_DEVICES);
#endif // HAVE_CUDA

@ -41,11 +41,9 @@
#include "test_precomp.hpp"
#if defined HAVE_CUDA
OutputLevel nvidiaTestOutputLevel = OutputLevelNone;
#endif
#ifdef HAVE_CUDA
#if defined HAVE_CUDA && !defined(CUDA_DISABLER)
OutputLevel nvidiaTestOutputLevel = OutputLevelNone;
using namespace cvtest;
using namespace testing;
@ -69,77 +67,77 @@ struct NVidiaTest : TestWithParam<cv::gpu::DeviceInfo>
struct NPPST : NVidiaTest {};
struct NCV : NVidiaTest {};
//TEST_P(NPPST, Integral)
//{
// bool res = nvidia_NPPST_Integral_Image(path, nvidiaTestOutputLevel);
GPU_TEST_P(NPPST, Integral)
{
bool res = nvidia_NPPST_Integral_Image(_path, nvidiaTestOutputLevel);
// ASSERT_TRUE(res);
//}
ASSERT_TRUE(res);
}
TEST_P(NPPST, SquaredIntegral)
GPU_TEST_P(NPPST, SquaredIntegral)
{
bool res = nvidia_NPPST_Squared_Integral_Image(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
TEST_P(NPPST, RectStdDev)
GPU_TEST_P(NPPST, RectStdDev)
{
bool res = nvidia_NPPST_RectStdDev(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
TEST_P(NPPST, Resize)
GPU_TEST_P(NPPST, Resize)
{
bool res = nvidia_NPPST_Resize(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
TEST_P(NPPST, VectorOperations)
GPU_TEST_P(NPPST, VectorOperations)
{
bool res = nvidia_NPPST_Vector_Operations(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
TEST_P(NPPST, Transpose)
GPU_TEST_P(NPPST, Transpose)
{
bool res = nvidia_NPPST_Transpose(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
TEST_P(NCV, VectorOperations)
GPU_TEST_P(NCV, VectorOperations)
{
bool res = nvidia_NCV_Vector_Operations(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
TEST_P(NCV, HaarCascadeLoader)
GPU_TEST_P(NCV, HaarCascadeLoader)
{
bool res = nvidia_NCV_Haar_Cascade_Loader(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
TEST_P(NCV, HaarCascadeApplication)
GPU_TEST_P(NCV, HaarCascadeApplication)
{
bool res = nvidia_NCV_Haar_Cascade_Application(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
TEST_P(NCV, HypothesesFiltration)
GPU_TEST_P(NCV, HypothesesFiltration)
{
bool res = nvidia_NCV_Hypotheses_Filtration(_path, nvidiaTestOutputLevel);
ASSERT_TRUE(res);
}
TEST_P(NCV, Visualization)
GPU_TEST_P(NCV, Visualization)
{
// this functionality doesn't used in gpu module
bool res = nvidia_NCV_Visualization(_path, nvidiaTestOutputLevel);

@ -43,8 +43,6 @@
#ifdef HAVE_CUDA
namespace {
//#define DUMP
struct HOG : testing::TestWithParam<cv::gpu::DeviceInfo>, cv::gpu::HOGDescriptor
@ -176,7 +174,7 @@ struct HOG : testing::TestWithParam<cv::gpu::DeviceInfo>, cv::gpu::HOGDescriptor
};
// desabled while resize does not fixed
TEST_P(HOG, DISABLED_Detect)
GPU_TEST_P(HOG, Detect)
{
cv::Mat img_rgb = readImage("hog/road.png");
ASSERT_FALSE(img_rgb.empty());
@ -201,7 +199,7 @@ TEST_P(HOG, DISABLED_Detect)
f.close();
}
TEST_P(HOG, GetDescriptors)
GPU_TEST_P(HOG, GetDescriptors)
{
// Load image (e.g. train data, composed from windows)
cv::Mat img_rgb = readImage("hog/train_data.png");
@ -288,6 +286,7 @@ TEST_P(HOG, GetDescriptors)
INSTANTIATE_TEST_CASE_P(GPU_ObjDetect, HOG, ALL_DEVICES);
//============== caltech hog tests =====================//
struct CalTech : public ::testing::TestWithParam<std::tr1::tuple<cv::gpu::DeviceInfo, std::string> >
{
cv::gpu::DeviceInfo devInfo;
@ -303,7 +302,7 @@ struct CalTech : public ::testing::TestWithParam<std::tr1::tuple<cv::gpu::Device
}
};
TEST_P(CalTech, HOG)
GPU_TEST_P(CalTech, HOG)
{
cv::gpu::GpuMat d_img(img);
cv::Mat markedImage(img.clone());
@ -350,7 +349,7 @@ PARAM_TEST_CASE(LBP_Read_classifier, cv::gpu::DeviceInfo, int)
}
};
TEST_P(LBP_Read_classifier, Accuracy)
GPU_TEST_P(LBP_Read_classifier, Accuracy)
{
cv::gpu::CascadeClassifier_GPU classifier;
std::string classifierXmlPath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/lbpcascade_frontalface.xml";
@ -372,7 +371,7 @@ PARAM_TEST_CASE(LBP_classify, cv::gpu::DeviceInfo, int)
}
};
TEST_P(LBP_classify, Accuracy)
GPU_TEST_P(LBP_classify, Accuracy)
{
std::string classifierXmlPath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/lbpcascade_frontalface.xml";
std::string imagePath = std::string(cvtest::TS::ptr()->get_data_path()) + "lbpcascade/er.png";
@ -422,6 +421,4 @@ TEST_P(LBP_classify, Accuracy)
INSTANTIATE_TEST_CASE_P(GPU_ObjDetect, LBP_classify,
testing::Combine(ALL_DEVICES, testing::Values<int>(0)));
} // namespace
#endif // HAVE_CUDA

@ -0,0 +1,404 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
//////////////////////////////////////////////////////
// BroxOpticalFlow
//#define BROX_DUMP
struct BroxOpticalFlow : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(BroxOpticalFlow, Regression)
{
cv::Mat frame0 = readImageType("opticalflow/frame0.png", CV_32FC1);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImageType("opticalflow/frame1.png", CV_32FC1);
ASSERT_FALSE(frame1.empty());
cv::gpu::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);
cv::gpu::GpuMat u;
cv::gpu::GpuMat v;
brox(loadMat(frame0), loadMat(frame1), u, v);
std::string fname(cvtest::TS::ptr()->get_data_path());
if (devInfo.majorVersion() >= 2)
fname += "opticalflow/brox_optical_flow_cc20.bin";
else
fname += "opticalflow/brox_optical_flow.bin";
#ifndef BROX_DUMP
std::ifstream f(fname.c_str(), std::ios_base::binary);
int rows, cols;
f.read((char*) &rows, sizeof(rows));
f.read((char*) &cols, sizeof(cols));
cv::Mat u_gold(rows, cols, CV_32FC1);
for (int i = 0; i < u_gold.rows; ++i)
f.read(u_gold.ptr<char>(i), u_gold.cols * sizeof(float));
cv::Mat v_gold(rows, cols, CV_32FC1);
for (int i = 0; i < v_gold.rows; ++i)
f.read(v_gold.ptr<char>(i), v_gold.cols * sizeof(float));
EXPECT_MAT_NEAR(u_gold, u, 0);
EXPECT_MAT_NEAR(v_gold, v, 0);
#else
std::ofstream f(fname.c_str(), std::ios_base::binary);
f.write((char*) &u.rows, sizeof(u.rows));
f.write((char*) &u.cols, sizeof(u.cols));
cv::Mat h_u(u);
cv::Mat h_v(v);
for (int i = 0; i < u.rows; ++i)
f.write(h_u.ptr<char>(i), u.cols * sizeof(float));
for (int i = 0; i < v.rows; ++i)
f.write(h_v.ptr<char>(i), v.cols * sizeof(float));
#endif
}
GPU_TEST_P(BroxOpticalFlow, OpticalFlowNan)
{
cv::Mat frame0 = readImageType("opticalflow/frame0.png", CV_32FC1);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImageType("opticalflow/frame1.png", CV_32FC1);
ASSERT_FALSE(frame1.empty());
cv::Mat r_frame0, r_frame1;
cv::resize(frame0, r_frame0, cv::Size(1380,1000));
cv::resize(frame1, r_frame1, cv::Size(1380,1000));
cv::gpu::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
5 /*inner_iterations*/, 150 /*outer_iterations*/, 10 /*solver_iterations*/);
cv::gpu::GpuMat u;
cv::gpu::GpuMat v;
brox(loadMat(r_frame0), loadMat(r_frame1), u, v);
cv::Mat h_u, h_v;
u.download(h_u);
v.download(h_v);
EXPECT_TRUE(cv::checkRange(h_u));
EXPECT_TRUE(cv::checkRange(h_v));
};
INSTANTIATE_TEST_CASE_P(GPU_Video, BroxOpticalFlow, ALL_DEVICES);
//////////////////////////////////////////////////////
// GoodFeaturesToTrack
namespace
{
IMPLEMENT_PARAM_CLASS(MinDistance, double)
}
PARAM_TEST_CASE(GoodFeaturesToTrack, cv::gpu::DeviceInfo, MinDistance)
{
cv::gpu::DeviceInfo devInfo;
double minDistance;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
minDistance = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(GoodFeaturesToTrack, Accuracy)
{
cv::Mat image = readImage("opticalflow/frame0.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
int maxCorners = 1000;
double qualityLevel = 0.01;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);
cv::gpu::GpuMat d_pts;
detector(loadMat(image), d_pts);
ASSERT_FALSE(d_pts.empty());
std::vector<cv::Point2f> pts(d_pts.cols);
cv::Mat pts_mat(1, d_pts.cols, CV_32FC2, (void*) &pts[0]);
d_pts.download(pts_mat);
std::vector<cv::Point2f> pts_gold;
cv::goodFeaturesToTrack(image, pts_gold, maxCorners, qualityLevel, minDistance);
ASSERT_EQ(pts_gold.size(), pts.size());
size_t mistmatch = 0;
for (size_t i = 0; i < pts.size(); ++i)
{
cv::Point2i a = pts_gold[i];
cv::Point2i b = pts[i];
bool eq = std::abs(a.x - b.x) < 1 && std::abs(a.y - b.y) < 1;
if (!eq)
++mistmatch;
}
double bad_ratio = static_cast<double>(mistmatch) / pts.size();
ASSERT_LE(bad_ratio, 0.01);
}
GPU_TEST_P(GoodFeaturesToTrack, EmptyCorners)
{
int maxCorners = 1000;
double qualityLevel = 0.01;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);
cv::gpu::GpuMat src(100, 100, CV_8UC1, cv::Scalar::all(0));
cv::gpu::GpuMat corners(1, maxCorners, CV_32FC2);
detector(src, corners);
ASSERT_TRUE(corners.empty());
}
INSTANTIATE_TEST_CASE_P(GPU_Video, GoodFeaturesToTrack, testing::Combine(
ALL_DEVICES,
testing::Values(MinDistance(0.0), MinDistance(3.0))));
//////////////////////////////////////////////////////
// PyrLKOpticalFlow
namespace
{
IMPLEMENT_PARAM_CLASS(UseGray, bool)
}
PARAM_TEST_CASE(PyrLKOpticalFlow, cv::gpu::DeviceInfo, UseGray)
{
cv::gpu::DeviceInfo devInfo;
bool useGray;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
useGray = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(PyrLKOpticalFlow, Sparse)
{
cv::Mat frame0 = readImage("opticalflow/frame0.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage("opticalflow/frame1.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
ASSERT_FALSE(frame1.empty());
cv::Mat gray_frame;
if (useGray)
gray_frame = frame0;
else
cv::cvtColor(frame0, gray_frame, cv::COLOR_BGR2GRAY);
std::vector<cv::Point2f> pts;
cv::goodFeaturesToTrack(gray_frame, pts, 1000, 0.01, 0.0);
cv::gpu::GpuMat d_pts;
cv::Mat pts_mat(1, (int) pts.size(), CV_32FC2, (void*) &pts[0]);
d_pts.upload(pts_mat);
cv::gpu::PyrLKOpticalFlow pyrLK;
cv::gpu::GpuMat d_nextPts;
cv::gpu::GpuMat d_status;
pyrLK.sparse(loadMat(frame0), loadMat(frame1), d_pts, d_nextPts, d_status);
std::vector<cv::Point2f> nextPts(d_nextPts.cols);
cv::Mat nextPts_mat(1, d_nextPts.cols, CV_32FC2, (void*) &nextPts[0]);
d_nextPts.download(nextPts_mat);
std::vector<unsigned char> status(d_status.cols);
cv::Mat status_mat(1, d_status.cols, CV_8UC1, (void*) &status[0]);
d_status.download(status_mat);
std::vector<cv::Point2f> nextPts_gold;
std::vector<unsigned char> status_gold;
cv::calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts_gold, status_gold, cv::noArray());
ASSERT_EQ(nextPts_gold.size(), nextPts.size());
ASSERT_EQ(status_gold.size(), status.size());
size_t mistmatch = 0;
for (size_t i = 0; i < nextPts.size(); ++i)
{
cv::Point2i a = nextPts[i];
cv::Point2i b = nextPts_gold[i];
if (status[i] != status_gold[i])
{
++mistmatch;
continue;
}
if (status[i])
{
bool eq = std::abs(a.x - b.x) <= 1 && std::abs(a.y - b.y) <= 1;
if (!eq)
++mistmatch;
}
}
double bad_ratio = static_cast<double>(mistmatch) / nextPts.size();
ASSERT_LE(bad_ratio, 0.01);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, PyrLKOpticalFlow, testing::Combine(
ALL_DEVICES,
testing::Values(UseGray(true), UseGray(false))));
//////////////////////////////////////////////////////
// FarnebackOpticalFlow
namespace
{
IMPLEMENT_PARAM_CLASS(PyrScale, double)
IMPLEMENT_PARAM_CLASS(PolyN, int)
CV_FLAGS(FarnebackOptFlowFlags, 0, cv::OPTFLOW_FARNEBACK_GAUSSIAN)
IMPLEMENT_PARAM_CLASS(UseInitFlow, bool)
}
PARAM_TEST_CASE(FarnebackOpticalFlow, cv::gpu::DeviceInfo, PyrScale, PolyN, FarnebackOptFlowFlags, UseInitFlow)
{
cv::gpu::DeviceInfo devInfo;
double pyrScale;
int polyN;
int flags;
bool useInitFlow;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
pyrScale = GET_PARAM(1);
polyN = GET_PARAM(2);
flags = GET_PARAM(3);
useInitFlow = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
GPU_TEST_P(FarnebackOpticalFlow, Accuracy)
{
cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
double polySigma = polyN <= 5 ? 1.1 : 1.5;
cv::gpu::FarnebackOpticalFlow farn;
farn.pyrScale = pyrScale;
farn.polyN = polyN;
farn.polySigma = polySigma;
farn.flags = flags;
cv::gpu::GpuMat d_flowx, d_flowy;
farn(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy);
cv::Mat flow;
if (useInitFlow)
{
cv::Mat flowxy[] = {cv::Mat(d_flowx), cv::Mat(d_flowy)};
cv::merge(flowxy, 2, flow);
farn.flags |= cv::OPTFLOW_USE_INITIAL_FLOW;
farn(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy);
}
cv::calcOpticalFlowFarneback(
frame0, frame1, flow, farn.pyrScale, farn.numLevels, farn.winSize,
farn.numIters, farn.polyN, farn.polySigma, farn.flags);
std::vector<cv::Mat> flowxy;
cv::split(flow, flowxy);
EXPECT_MAT_SIMILAR(flowxy[0], d_flowx, 0.1);
EXPECT_MAT_SIMILAR(flowxy[1], d_flowy, 0.1);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, FarnebackOpticalFlow, testing::Combine(
ALL_DEVICES,
testing::Values(PyrScale(0.3), PyrScale(0.5), PyrScale(0.8)),
testing::Values(PolyN(5), PolyN(7)),
testing::Values(FarnebackOptFlowFlags(0), FarnebackOptFlowFlags(cv::OPTFLOW_FARNEBACK_GAUSSIAN)),
testing::Values(UseInitFlow(false), UseInitFlow(true))));
#endif // HAVE_CUDA

@ -51,6 +51,7 @@
#define __OPENCV_TEST_PRECOMP_HPP__
#include <cmath>
#include <ctime>
#include <cstdio>
#include <iostream>
#include <fstream>

@ -64,7 +64,7 @@ PARAM_TEST_CASE(PyrDown, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
}
};
TEST_P(PyrDown, Accuracy)
GPU_TEST_P(PyrDown, Accuracy)
{
cv::Mat src = randomMat(size, type);
@ -104,7 +104,7 @@ PARAM_TEST_CASE(PyrUp, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
}
};
TEST_P(PyrUp, Accuracy)
GPU_TEST_P(PyrUp, Accuracy)
{
cv::Mat src = randomMat(size, type);

@ -152,7 +152,7 @@ PARAM_TEST_CASE(Remap, cv::gpu::DeviceInfo, cv::Size, MatType, Interpolation, Bo
}
};
TEST_P(Remap, Accuracy)
GPU_TEST_P(Remap, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Scalar val = randomScalar(0.0, 255.0);

@ -136,7 +136,7 @@ PARAM_TEST_CASE(Resize, cv::gpu::DeviceInfo, cv::Size, MatType, double, Interpol
}
};
TEST_P(Resize, Accuracy)
GPU_TEST_P(Resize, Accuracy)
{
cv::Mat src = randomMat(size, type);
@ -157,8 +157,8 @@ INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Resize, testing::Combine(
testing::Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_CUBIC)),
WHOLE_SUBMAT));
/////////////////
PARAM_TEST_CASE(ResizeSameAsHost, cv::gpu::DeviceInfo, cv::Size, MatType, double, Interpolation, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
@ -182,7 +182,7 @@ PARAM_TEST_CASE(ResizeSameAsHost, cv::gpu::DeviceInfo, cv::Size, MatType, double
};
// downscaling only: used for classifiers
TEST_P(ResizeSameAsHost, Accuracy)
GPU_TEST_P(ResizeSameAsHost, Accuracy)
{
cv::Mat src = randomMat(size, type);
@ -224,7 +224,7 @@ PARAM_TEST_CASE(ResizeNPP, cv::gpu::DeviceInfo, MatType, double, Interpolation)
}
};
TEST_P(ResizeNPP, Accuracy)
GPU_TEST_P(ResizeNPP, Accuracy)
{
cv::Mat src = readImageType("stereobp/aloe-L.png", type);
ASSERT_FALSE(src.empty());

@ -66,7 +66,7 @@ PARAM_TEST_CASE(Threshold, cv::gpu::DeviceInfo, cv::Size, MatType, ThreshOp, Use
}
};
TEST_P(Threshold, Accuracy)
GPU_TEST_P(Threshold, Accuracy)
{
cv::Mat src = randomMat(size, type);
double maxVal = randomDouble(20.0, 127.0);

@ -41,739 +41,47 @@
#include "test_precomp.hpp"
#ifdef HAVE_CUDA
//#define DUMP
//////////////////////////////////////////////////////
// BroxOpticalFlow
#define BROX_OPTICAL_FLOW_DUMP_FILE "opticalflow/brox_optical_flow.bin"
#define BROX_OPTICAL_FLOW_DUMP_FILE_CC20 "opticalflow/brox_optical_flow_cc20.bin"
struct BroxOpticalFlow : testing::TestWithParam<cv::gpu::DeviceInfo>
{
cv::gpu::DeviceInfo devInfo;
virtual void SetUp()
{
devInfo = GetParam();
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(BroxOpticalFlow, Regression)
{
cv::Mat frame0 = readImageType("opticalflow/frame0.png", CV_32FC1);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImageType("opticalflow/frame1.png", CV_32FC1);
ASSERT_FALSE(frame1.empty());
cv::gpu::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);
cv::gpu::GpuMat u;
cv::gpu::GpuMat v;
brox(loadMat(frame0), loadMat(frame1), u, v);
#ifndef DUMP
std::string fname(cvtest::TS::ptr()->get_data_path());
if (devInfo.majorVersion() >= 2)
fname += BROX_OPTICAL_FLOW_DUMP_FILE_CC20;
else
fname += BROX_OPTICAL_FLOW_DUMP_FILE;
std::ifstream f(fname.c_str(), std::ios_base::binary);
int rows, cols;
f.read((char*)&rows, sizeof(rows));
f.read((char*)&cols, sizeof(cols));
cv::Mat u_gold(rows, cols, CV_32FC1);
for (int i = 0; i < u_gold.rows; ++i)
f.read(u_gold.ptr<char>(i), u_gold.cols * sizeof(float));
cv::Mat v_gold(rows, cols, CV_32FC1);
for (int i = 0; i < v_gold.rows; ++i)
f.read(v_gold.ptr<char>(i), v_gold.cols * sizeof(float));
EXPECT_MAT_NEAR(u_gold, u, 0);
EXPECT_MAT_NEAR(v_gold, v, 0);
#else
std::string fname(cvtest::TS::ptr()->get_data_path());
if (devInfo.majorVersion() >= 2)
fname += BROX_OPTICAL_FLOW_DUMP_FILE_CC20;
else
fname += BROX_OPTICAL_FLOW_DUMP_FILE;
std::ofstream f(fname.c_str(), std::ios_base::binary);
f.write((char*)&u.rows, sizeof(u.rows));
f.write((char*)&u.cols, sizeof(u.cols));
cv::Mat h_u(u);
cv::Mat h_v(v);
for (int i = 0; i < u.rows; ++i)
f.write(h_u.ptr<char>(i), u.cols * sizeof(float));
for (int i = 0; i < v.rows; ++i)
f.write(h_v.ptr<char>(i), v.cols * sizeof(float));
#endif
}
INSTANTIATE_TEST_CASE_P(GPU_Video, BroxOpticalFlow, ALL_DEVICES);
//////////////////////////////////////////////////////
// GoodFeaturesToTrack
IMPLEMENT_PARAM_CLASS(MinDistance, double)
PARAM_TEST_CASE(GoodFeaturesToTrack, cv::gpu::DeviceInfo, MinDistance)
{
cv::gpu::DeviceInfo devInfo;
double minDistance;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
minDistance = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(GoodFeaturesToTrack, Accuracy)
{
cv::Mat image = readImage("opticalflow/frame0.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(image.empty());
int maxCorners = 1000;
double qualityLevel = 0.01;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);
if (!supportFeature(devInfo, cv::gpu::GLOBAL_ATOMICS))
{
try
{
cv::gpu::GpuMat d_pts;
detector(loadMat(image), d_pts);
}
catch (const cv::Exception& e)
{
ASSERT_EQ(CV_StsNotImplemented, e.code);
}
}
else
{
cv::gpu::GpuMat d_pts;
detector(loadMat(image), d_pts);
std::vector<cv::Point2f> pts(d_pts.cols);
cv::Mat pts_mat(1, d_pts.cols, CV_32FC2, (void*)&pts[0]);
d_pts.download(pts_mat);
std::vector<cv::Point2f> pts_gold;
cv::goodFeaturesToTrack(image, pts_gold, maxCorners, qualityLevel, minDistance);
ASSERT_EQ(pts_gold.size(), pts.size());
size_t mistmatch = 0;
for (size_t i = 0; i < pts.size(); ++i)
{
cv::Point2i a = pts_gold[i];
cv::Point2i b = pts[i];
bool eq = std::abs(a.x - b.x) < 1 && std::abs(a.y - b.y) < 1;
if (!eq)
++mistmatch;
}
double bad_ratio = static_cast<double>(mistmatch) / pts.size();
ASSERT_LE(bad_ratio, 0.01);
}
}
TEST_P(GoodFeaturesToTrack, EmptyCorners)
{
int maxCorners = 1000;
double qualityLevel = 0.01;
cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);
cv::gpu::GpuMat src(100, 100, CV_8UC1, cv::Scalar::all(0));
cv::gpu::GpuMat corners(1, maxCorners, CV_32FC2);
detector(src, corners);
ASSERT_TRUE( corners.empty() );
}
INSTANTIATE_TEST_CASE_P(GPU_Video, GoodFeaturesToTrack, testing::Combine(
ALL_DEVICES,
testing::Values(MinDistance(0.0), MinDistance(3.0))));
//////////////////////////////////////////////////////
// PyrLKOpticalFlow
IMPLEMENT_PARAM_CLASS(UseGray, bool)
PARAM_TEST_CASE(PyrLKOpticalFlow, cv::gpu::DeviceInfo, UseGray)
{
cv::gpu::DeviceInfo devInfo;
bool useGray;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
useGray = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(PyrLKOpticalFlow, Sparse)
{
cv::Mat frame0 = readImage("opticalflow/frame0.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage("opticalflow/frame1.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
ASSERT_FALSE(frame1.empty());
cv::Mat gray_frame;
if (useGray)
gray_frame = frame0;
else
cv::cvtColor(frame0, gray_frame, cv::COLOR_BGR2GRAY);
std::vector<cv::Point2f> pts;
cv::goodFeaturesToTrack(gray_frame, pts, 1000, 0.01, 0.0);
cv::gpu::GpuMat d_pts;
cv::Mat pts_mat(1, (int)pts.size(), CV_32FC2, (void*)&pts[0]);
d_pts.upload(pts_mat);
cv::gpu::PyrLKOpticalFlow pyrLK;
cv::gpu::GpuMat d_nextPts;
cv::gpu::GpuMat d_status;
pyrLK.sparse(loadMat(frame0), loadMat(frame1), d_pts, d_nextPts, d_status);
std::vector<cv::Point2f> nextPts(d_nextPts.cols);
cv::Mat nextPts_mat(1, d_nextPts.cols, CV_32FC2, (void*)&nextPts[0]);
d_nextPts.download(nextPts_mat);
std::vector<unsigned char> status(d_status.cols);
cv::Mat status_mat(1, d_status.cols, CV_8UC1, (void*)&status[0]);
d_status.download(status_mat);
std::vector<cv::Point2f> nextPts_gold;
std::vector<unsigned char> status_gold;
cv::calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts_gold, status_gold, cv::noArray());
ASSERT_EQ(nextPts_gold.size(), nextPts.size());
ASSERT_EQ(status_gold.size(), status.size());
size_t mistmatch = 0;
for (size_t i = 0; i < nextPts.size(); ++i)
{
cv::Point2i a = nextPts[i];
cv::Point2i b = nextPts_gold[i];
if (status[i] != status_gold[i])
{
++mistmatch;
continue;
}
if (status[i])
{
bool eq = std::abs(a.x - b.x) <= 1 && std::abs(a.y - b.y) <= 1;
if (!eq)
++mistmatch;
}
}
double bad_ratio = static_cast<double>(mistmatch) / nextPts.size();
ASSERT_LE(bad_ratio, 0.01);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, PyrLKOpticalFlow, testing::Combine(
ALL_DEVICES,
testing::Values(UseGray(true), UseGray(false))));
#if defined(HAVE_CUDA) && defined(HAVE_NVCUVID)
//////////////////////////////////////////////////////
// FarnebackOpticalFlow
IMPLEMENT_PARAM_CLASS(PyrScale, double)
IMPLEMENT_PARAM_CLASS(PolyN, int)
CV_FLAGS(FarnebackOptFlowFlags, 0, cv::OPTFLOW_FARNEBACK_GAUSSIAN)
IMPLEMENT_PARAM_CLASS(UseInitFlow, bool)
PARAM_TEST_CASE(FarnebackOpticalFlow, cv::gpu::DeviceInfo, PyrScale, PolyN, FarnebackOptFlowFlags, UseInitFlow)
{
cv::gpu::DeviceInfo devInfo;
double pyrScale;
int polyN;
int flags;
bool useInitFlow;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
pyrScale = GET_PARAM(1);
polyN = GET_PARAM(2);
flags = GET_PARAM(3);
useInitFlow = GET_PARAM(4);
cv::gpu::setDevice(devInfo.deviceID());
}
};
TEST_P(FarnebackOpticalFlow, Accuracy)
{
cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty());
cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
double polySigma = polyN <= 5 ? 1.1 : 1.5;
cv::gpu::FarnebackOpticalFlow calc;
calc.pyrScale = pyrScale;
calc.polyN = polyN;
calc.polySigma = polySigma;
calc.flags = flags;
cv::gpu::GpuMat d_flowx, d_flowy;
calc(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy);
cv::Mat flow;
if (useInitFlow)
{
cv::Mat flowxy[] = {cv::Mat(d_flowx), cv::Mat(d_flowy)};
cv::merge(flowxy, 2, flow);
}
if (useInitFlow)
{
calc.flags |= cv::OPTFLOW_USE_INITIAL_FLOW;
calc(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy);
}
cv::calcOpticalFlowFarneback(
frame0, frame1, flow, calc.pyrScale, calc.numLevels, calc.winSize,
calc.numIters, calc.polyN, calc.polySigma, calc.flags);
std::vector<cv::Mat> flowxy;
cv::split(flow, flowxy);
EXPECT_MAT_SIMILAR(flowxy[0], d_flowx, 0.1);
EXPECT_MAT_SIMILAR(flowxy[1], d_flowy, 0.1);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, FarnebackOpticalFlow, testing::Combine(
ALL_DEVICES,
testing::Values(PyrScale(0.3), PyrScale(0.5), PyrScale(0.8)),
testing::Values(PolyN(5), PolyN(7)),
testing::Values(FarnebackOptFlowFlags(0), FarnebackOptFlowFlags(cv::OPTFLOW_FARNEBACK_GAUSSIAN)),
testing::Values(UseInitFlow(false), UseInitFlow(true))));
struct OpticalFlowNan : public BroxOpticalFlow {};
TEST_P(OpticalFlowNan, Regression)
{
cv::Mat frame0 = readImageType("opticalflow/frame0.png", CV_32FC1);
ASSERT_FALSE(frame0.empty());
cv::Mat r_frame0, r_frame1;
cv::resize(frame0, r_frame0, cv::Size(1380,1000));
cv::Mat frame1 = readImageType("opticalflow/frame1.png", CV_32FC1);
ASSERT_FALSE(frame1.empty());
cv::resize(frame1, r_frame1, cv::Size(1380,1000));
cv::gpu::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
5 /*inner_iterations*/, 150 /*outer_iterations*/, 10 /*solver_iterations*/);
cv::gpu::GpuMat u;
cv::gpu::GpuMat v;
brox(loadMat(r_frame0), loadMat(r_frame1), u, v);
cv::Mat h_u, h_v;
u.download(h_u);
v.download(h_v);
EXPECT_TRUE(cv::checkRange(h_u));
EXPECT_TRUE(cv::checkRange(h_v));
};
INSTANTIATE_TEST_CASE_P(GPU_Video, OpticalFlowNan, ALL_DEVICES);
//////////////////////////////////////////////////////
// FGDStatModel
namespace cv
{
template<> void Ptr<CvBGStatModel>::delete_obj()
{
cvReleaseBGStatModel(&obj);
}
}
PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string, Channels)
{
cv::gpu::DeviceInfo devInfo;
std::string inputFile;
int out_cn;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
out_cn = GET_PARAM(2);
}
};
TEST_P(FGDStatModel, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
cv::gpu::GpuMat d_frame(frame);
cv::gpu::FGDStatModel d_model(out_cn);
d_model.create(d_frame);
cv::Mat h_background;
cv::Mat h_foreground;
cv::Mat h_background3;
cv::Mat backgroundDiff;
cv::Mat foregroundDiff;
for (int i = 0; i < 5; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
int gold_count = cvUpdateBGStatModel(&ipl_frame, model);
d_frame.upload(frame);
int count = d_model.update(d_frame);
ASSERT_EQ(gold_count, count);
cv::Mat gold_background(model->background);
cv::Mat gold_foreground(model->foreground);
if (out_cn == 3)
d_model.background.download(h_background3);
else
{
d_model.background.download(h_background);
cv::cvtColor(h_background, h_background3, cv::COLOR_BGRA2BGR);
}
d_model.foreground.download(h_foreground);
ASSERT_MAT_NEAR(gold_background, h_background3, 1.0);
ASSERT_MAT_NEAR(gold_foreground, h_foreground, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Video, FGDStatModel, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(Channels(3), Channels(4))));
//////////////////////////////////////////////////////
// MOG
IMPLEMENT_PARAM_CLASS(LearningRate, double)
// VideoReader
PARAM_TEST_CASE(MOG, cv::gpu::DeviceInfo, std::string, UseGray, LearningRate, UseRoi)
PARAM_TEST_CASE(VideoReader, cv::gpu::DeviceInfo, std::string)
{
cv::gpu::DeviceInfo devInfo;
std::string inputFile;
bool useGray;
double learningRate;
bool useRoi;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
useGray = GET_PARAM(2);
learningRate = GET_PARAM(3);
useRoi = GET_PARAM(4);
}
};
TEST_P(MOG, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
cv::gpu::MOG_GPU mog;
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
cv::BackgroundSubtractorMOG mog_gold;
cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (useGray)
{
cv::Mat temp;
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
cv::swap(temp, frame);
}
mog(loadMat(frame, useRoi), foreground, (float)learningRate);
mog_gold(frame, foreground_gold, learningRate);
ASSERT_MAT_NEAR(foreground_gold, foreground, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Video, MOG, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(UseGray(true), UseGray(false)),
testing::Values(LearningRate(0.0), LearningRate(0.01)),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////
// MOG2
PARAM_TEST_CASE(MOG2, cv::gpu::DeviceInfo, std::string, UseGray, UseRoi)
{
cv::gpu::DeviceInfo devInfo;
std::string inputFile;
bool useGray;
bool useRoi;
inputFile = GET_PARAM(1);
virtual void SetUp()
{
devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
useGray = GET_PARAM(2);
useRoi = GET_PARAM(3);
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + inputFile;
}
};
TEST_P(MOG2, Update)
{
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
cv::gpu::MOG2_GPU mog2;
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
cv::BackgroundSubtractorMOG2 mog2_gold;
cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
if (useGray)
{
cv::Mat temp;
cv::cvtColor(frame, temp, cv::COLOR_BGR2GRAY);
cv::swap(temp, frame);
}
mog2(loadMat(frame, useRoi), foreground);
mog2_gold(frame, foreground_gold);
double norm = cv::norm(foreground_gold, cv::Mat(foreground), cv::NORM_L1);
norm /= foreground_gold.size().area();
ASSERT_LE(norm, 0.09);
}
}
TEST_P(MOG2, getBackgroundImage)
GPU_TEST_P(VideoReader, Regression)
{
if (useGray)
return;
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cv::gpu::MOG2_GPU mog2;
cv::gpu::GpuMat foreground;
cv::gpu::VideoReader_GPU reader(inputFile);
ASSERT_TRUE(reader.isOpened());
cv::BackgroundSubtractorMOG2 mog2_gold;
cv::Mat foreground_gold;
cv::gpu::GpuMat frame;
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_TRUE(reader.read(frame));
ASSERT_FALSE(frame.empty());
mog2(loadMat(frame, useRoi), foreground);
mog2_gold(frame, foreground_gold);
}
cv::gpu::GpuMat background = createMat(frame.size(), frame.type(), useRoi);
mog2.getBackgroundImage(background);
cv::Mat background_gold;
mog2_gold.getBackgroundImage(background_gold);
ASSERT_MAT_NEAR(background_gold, background, 0);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, MOG2, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(UseGray(true), UseGray(false)),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////
// VIBE
PARAM_TEST_CASE(VIBE, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
{
};
TEST_P(VIBE, Accuracy)
{
const cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
const cv::Size size = GET_PARAM(1);
const int type = GET_PARAM(2);
const bool useRoi = GET_PARAM(3);
const cv::Mat fullfg(size, CV_8UC1, cv::Scalar::all(255));
cv::Mat frame = randomMat(size, type, 0.0, 100);
cv::gpu::GpuMat d_frame = loadMat(frame, useRoi);
cv::gpu::VIBE_GPU vibe;
cv::gpu::GpuMat d_fgmask = createMat(size, CV_8UC1, useRoi);
vibe.initialize(d_frame);
for (int i = 0; i < 20; ++i)
vibe(d_frame, d_fgmask);
frame = randomMat(size, type, 160, 255);
d_frame = loadMat(frame, useRoi);
vibe(d_frame, d_fgmask);
// now fgmask should be entirely foreground
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, VIBE, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4)),
WHOLE_SUBMAT));
//////////////////////////////////////////////////////
// GMG
PARAM_TEST_CASE(GMG, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, UseRoi)
{
};
TEST_P(GMG, Accuracy)
{
const cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
const cv::Size size = GET_PARAM(1);
const int depth = GET_PARAM(2);
const int channels = GET_PARAM(3);
const bool useRoi = GET_PARAM(4);
const int type = CV_MAKE_TYPE(depth, channels);
const cv::Mat zeros(size, CV_8UC1, cv::Scalar::all(0));
const cv::Mat fullfg(size, CV_8UC1, cv::Scalar::all(255));
cv::Mat frame = randomMat(size, type, 0, 100);
cv::gpu::GpuMat d_frame = loadMat(frame, useRoi);
cv::gpu::GMG_GPU gmg;
gmg.numInitializationFrames = 5;
gmg.smoothingRadius = 0;
gmg.initialize(d_frame.size(), 0, 255);
cv::gpu::GpuMat d_fgmask = createMat(size, CV_8UC1, useRoi);
for (int i = 0; i < gmg.numInitializationFrames; ++i)
{
gmg(d_frame, d_fgmask);
// fgmask should be entirely background during training
ASSERT_MAT_NEAR(zeros, d_fgmask, 0);
}
frame = randomMat(size, type, 160, 255);
d_frame = loadMat(frame, useRoi);
gmg(d_frame, d_fgmask);
// now fgmask should be entirely foreground
ASSERT_MAT_NEAR(fullfg, d_fgmask, 0);
reader.close();
ASSERT_FALSE(reader.isOpened());
}
INSTANTIATE_TEST_CASE_P(GPU_Video, GMG, testing::Combine(
INSTANTIATE_TEST_CASE_P(GPU_Video, VideoReader, testing::Combine(
ALL_DEVICES,
DIFFERENT_SIZES,
testing::Values(MatType(CV_8U), MatType(CV_16U), MatType(CV_32F)),
testing::Values(Channels(1), Channels(3), Channels(4)),
WHOLE_SUBMAT));
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
//////////////////////////////////////////////////////
// VideoWriter
@ -785,8 +93,6 @@ PARAM_TEST_CASE(VideoWriter, cv::gpu::DeviceInfo, std::string)
cv::gpu::DeviceInfo devInfo;
std::string inputFile;
std::string outputFile;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
@ -794,17 +100,17 @@ PARAM_TEST_CASE(VideoWriter, cv::gpu::DeviceInfo, std::string)
cv::gpu::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + inputFile;
outputFile = cv::tempfile(".avi");
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + std::string("video/") + inputFile;
}
};
TEST_P(VideoWriter, Regression)
GPU_TEST_P(VideoWriter, Regression)
{
std::string outputFile = cv::tempfile(".avi");
const double FPS = 25.0;
cv::VideoCapture reader(inputFile);
ASSERT_TRUE( reader.isOpened() );
ASSERT_TRUE(reader.isOpened());
cv::gpu::VideoWriter_GPU d_writer;
@ -828,12 +134,12 @@ TEST_P(VideoWriter, Regression)
d_writer.close();
reader.open(outputFile);
ASSERT_TRUE( reader.isOpened() );
ASSERT_TRUE(reader.isOpened());
for (int i = 0; i < 5; ++i)
{
reader >> frame;
ASSERT_FALSE( frame.empty() );
ASSERT_FALSE(frame.empty());
}
}
@ -843,44 +149,4 @@ INSTANTIATE_TEST_CASE_P(GPU_Video, VideoWriter, testing::Combine(
#endif // WIN32
//////////////////////////////////////////////////////
// VideoReader
PARAM_TEST_CASE(VideoReader, cv::gpu::DeviceInfo, std::string)
{
cv::gpu::DeviceInfo devInfo;
std::string inputFile;
virtual void SetUp()
{
devInfo = GET_PARAM(0);
inputFile = GET_PARAM(1);
cv::gpu::setDevice(devInfo.deviceID());
inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + inputFile;
}
};
TEST_P(VideoReader, Regression)
{
cv::gpu::VideoReader_GPU reader(inputFile);
ASSERT_TRUE( reader.isOpened() );
cv::gpu::GpuMat frame;
for (int i = 0; i < 10; ++i)
{
ASSERT_TRUE( reader.read(frame) );
ASSERT_FALSE( frame.empty() );
}
reader.close();
ASSERT_FALSE( reader.isOpened() );
}
INSTANTIATE_TEST_CASE_P(GPU_Video, VideoReader, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
#endif // HAVE_CUDA
#endif // defined(HAVE_CUDA) && defined(HAVE_NVCUVID)

@ -48,6 +48,7 @@ namespace
cv::Mat createTransfomMatrix(cv::Size srcSize, double angle)
{
cv::Mat M(2, 3, CV_64FC1);
M.at<double>(0, 0) = std::cos(angle); M.at<double>(0, 1) = -std::sin(angle); M.at<double>(0, 2) = srcSize.width / 2;
M.at<double>(1, 0) = std::sin(angle); M.at<double>(1, 1) = std::cos(angle); M.at<double>(1, 2) = 0.0;
@ -74,22 +75,23 @@ PARAM_TEST_CASE(BuildWarpAffineMaps, cv::gpu::DeviceInfo, cv::Size, Inverse)
}
};
TEST_P(BuildWarpAffineMaps, Accuracy)
GPU_TEST_P(BuildWarpAffineMaps, Accuracy)
{
cv::Mat M = createTransfomMatrix(size, CV_PI / 4);
cv::Mat src = randomMat(randomSize(200, 400), CV_8UC1);
cv::gpu::GpuMat xmap, ymap;
cv::gpu::buildWarpAffineMaps(M, inverse, size, xmap, ymap);
int interpolation = cv::INTER_NEAREST;
int borderMode = cv::BORDER_CONSTANT;
int flags = interpolation;
if (inverse)
flags |= cv::WARP_INVERSE_MAP;
cv::Mat src = randomMat(randomSize(200, 400), CV_8UC1);
cv::Mat dst;
cv::remap(src, dst, cv::Mat(xmap), cv::Mat(ymap), interpolation, borderMode);
int flags = interpolation;
if (inverse)
flags |= cv::WARP_INVERSE_MAP;
cv::Mat dst_gold;
cv::warpAffine(src, dst_gold, M, size, flags, borderMode);
@ -199,7 +201,7 @@ PARAM_TEST_CASE(WarpAffine, cv::gpu::DeviceInfo, cv::Size, MatType, Inverse, Int
}
};
TEST_P(WarpAffine, Accuracy)
GPU_TEST_P(WarpAffine, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat M = createTransfomMatrix(size, CV_PI / 3);
@ -247,7 +249,7 @@ PARAM_TEST_CASE(WarpAffineNPP, cv::gpu::DeviceInfo, MatType, Inverse, Interpolat
}
};
TEST_P(WarpAffineNPP, Accuracy)
GPU_TEST_P(WarpAffineNPP, Accuracy)
{
cv::Mat src = readImageType("stereobp/aloe-L.png", type);
cv::Mat M = createTransfomMatrix(src.size(), CV_PI / 4);

@ -48,6 +48,7 @@ namespace
cv::Mat createTransfomMatrix(cv::Size srcSize, double angle)
{
cv::Mat M(3, 3, CV_64FC1);
M.at<double>(0, 0) = std::cos(angle); M.at<double>(0, 1) = -std::sin(angle); M.at<double>(0, 2) = srcSize.width / 2;
M.at<double>(1, 0) = std::sin(angle); M.at<double>(1, 1) = std::cos(angle); M.at<double>(1, 2) = 0.0;
M.at<double>(2, 0) = 0.0 ; M.at<double>(2, 1) = 0.0 ; M.at<double>(2, 2) = 1.0;
@ -75,21 +76,25 @@ PARAM_TEST_CASE(BuildWarpPerspectiveMaps, cv::gpu::DeviceInfo, cv::Size, Inverse
}
};
TEST_P(BuildWarpPerspectiveMaps, Accuracy)
GPU_TEST_P(BuildWarpPerspectiveMaps, Accuracy)
{
cv::Mat M = createTransfomMatrix(size, CV_PI / 4);
cv::gpu::GpuMat xmap, ymap;
cv::gpu::buildWarpPerspectiveMaps(M, inverse, size, xmap, ymap);
cv::Mat src = randomMat(randomSize(200, 400), CV_8UC1);
cv::Mat dst;
cv::remap(src, dst, cv::Mat(xmap), cv::Mat(ymap), cv::INTER_NEAREST, cv::BORDER_CONSTANT);
int flags = cv::INTER_NEAREST;
int interpolation = cv::INTER_NEAREST;
int borderMode = cv::BORDER_CONSTANT;
int flags = interpolation;
if (inverse)
flags |= cv::WARP_INVERSE_MAP;
cv::Mat dst;
cv::remap(src, dst, cv::Mat(xmap), cv::Mat(ymap), interpolation, borderMode);
cv::Mat dst_gold;
cv::warpPerspective(src, dst_gold, M, size, flags, cv::BORDER_CONSTANT);
cv::warpPerspective(src, dst_gold, M, size, flags, borderMode);
EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
}
@ -199,7 +204,7 @@ PARAM_TEST_CASE(WarpPerspective, cv::gpu::DeviceInfo, cv::Size, MatType, Inverse
}
};
TEST_P(WarpPerspective, Accuracy)
GPU_TEST_P(WarpPerspective, Accuracy)
{
cv::Mat src = randomMat(size, type);
cv::Mat M = createTransfomMatrix(size, CV_PI / 3);
@ -247,7 +252,7 @@ PARAM_TEST_CASE(WarpPerspectiveNPP, cv::gpu::DeviceInfo, MatType, Inverse, Inter
}
};
TEST_P(WarpPerspectiveNPP, Accuracy)
GPU_TEST_P(WarpPerspectiveNPP, Accuracy)
{
cv::Mat src = readImageType("stereobp/aloe-L.png", type);
cv::Mat M = createTransfomMatrix(src.size(), CV_PI / 4);

@ -67,7 +67,7 @@ double randomDouble(double minVal, double maxVal)
Size randomSize(int minVal, int maxVal)
{
return cv::Size(randomInt(minVal, maxVal), randomInt(minVal, maxVal));
return Size(randomInt(minVal, maxVal), randomInt(minVal, maxVal));
}
Scalar randomScalar(double minVal, double maxVal)
@ -83,7 +83,7 @@ Mat randomMat(Size size, int type, double minVal, double maxVal)
//////////////////////////////////////////////////////////////////////
// GpuMat create
cv::gpu::GpuMat createMat(cv::Size size, int type, bool useRoi)
GpuMat createMat(Size size, int type, bool useRoi)
{
Size size0 = size;
@ -122,21 +122,13 @@ Mat readImageType(const std::string& fname, int type)
if (CV_MAT_CN(type) == 4)
{
Mat temp;
cvtColor(src, temp, cv::COLOR_BGR2BGRA);
cvtColor(src, temp, COLOR_BGR2BGRA);
swap(src, temp);
}
src.convertTo(src, CV_MAT_DEPTH(type), CV_MAT_DEPTH(type) == CV_32F ? 1.0 / 255.0 : 1.0);
return src;
}
//////////////////////////////////////////////////////////////////////
// Image dumping
void dumpImage(const std::string& fileName, const cv::Mat& image)
{
cv::imwrite(TS::ptr()->get_data_path() + fileName, image);
}
//////////////////////////////////////////////////////////////////////
// Gpu devices
@ -156,7 +148,7 @@ void DeviceManager::load(int i)
devices_.clear();
devices_.reserve(1);
ostringstream msg;
std::ostringstream msg;
if (i < 0 || i >= getCudaEnabledDeviceCount())
{
@ -195,21 +187,39 @@ void DeviceManager::loadAll()
//////////////////////////////////////////////////////////////////////
// Additional assertion
Mat getMat(InputArray arr)
namespace
{
if (arr.kind() == _InputArray::GPU_MAT)
template <typename T, typename OutT> std::string printMatValImpl(const Mat& m, Point p)
{
Mat m;
arr.getGpuMat().download(m);
return m;
const int cn = m.channels();
std::ostringstream ostr;
ostr << "(";
p.x /= cn;
ostr << static_cast<OutT>(m.at<T>(p.y, p.x * cn));
for (int c = 1; c < m.channels(); ++c)
{
ostr << ", " << static_cast<OutT>(m.at<T>(p.y, p.x * cn + c));
}
ostr << ")";
return ostr.str();
}
return arr.getMat();
}
std::string printMatVal(const Mat& m, Point p)
{
typedef std::string (*func_t)(const Mat& m, Point p);
double checkNorm(InputArray m1, InputArray m2)
{
return norm(getMat(m1), getMat(m2), NORM_INF);
static const func_t funcs[] =
{
printMatValImpl<uchar, int>, printMatValImpl<schar, int>, printMatValImpl<ushort, int>, printMatValImpl<short, int>,
printMatValImpl<int, int>, printMatValImpl<float, float>, printMatValImpl<double, double>
};
return funcs[m.depth()](m, p);
}
}
void minMaxLocGold(const Mat& src, double* minVal_, double* maxVal_, Point* minLoc_, Point* maxLoc_, const Mat& mask)
@ -229,8 +239,8 @@ void minMaxLocGold(const Mat& src, double* minVal_, double* maxVal_, Point* minL
for (int y = 0; y < src.rows; ++y)
{
const schar* src_row = src.ptr<signed char>(y);
const uchar* mask_row = mask.empty() ? 0 : mask.ptr<unsigned char>(y);
const schar* src_row = src.ptr<schar>(y);
const uchar* mask_row = mask.empty() ? 0 : mask.ptr<uchar>(y);
for (int x = 0; x < src.cols; ++x)
{
@ -260,42 +270,19 @@ void minMaxLocGold(const Mat& src, double* minVal_, double* maxVal_, Point* minL
if (maxLoc_) *maxLoc_ = maxLoc;
}
namespace
Mat getMat(InputArray arr)
{
template <typename T, typename OutT> std::string printMatValImpl(const Mat& m, Point p)
if (arr.kind() == _InputArray::GPU_MAT)
{
const int cn = m.channels();
ostringstream ostr;
ostr << "(";
p.x /= cn;
ostr << static_cast<OutT>(m.at<T>(p.y, p.x * cn));
for (int c = 1; c < m.channels(); ++c)
{
ostr << ", " << static_cast<OutT>(m.at<T>(p.y, p.x * cn + c));
}
ostr << ")";
return ostr.str();
Mat m;
arr.getGpuMat().download(m);
return m;
}
std::string printMatVal(const Mat& m, Point p)
{
typedef std::string (*func_t)(const Mat& m, Point p);
static const func_t funcs[] =
{
printMatValImpl<uchar, int>, printMatValImpl<schar, int>, printMatValImpl<ushort, int>, printMatValImpl<short, int>,
printMatValImpl<int, int>, printMatValImpl<float, float>, printMatValImpl<double, double>
};
return funcs[m.depth()](m, p);
}
return arr.getMat();
}
testing::AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, cv::InputArray m1_, cv::InputArray m2_, double eps)
AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, InputArray m1_, InputArray m2_, double eps)
{
Mat m1 = getMat(m1_);
Mat m2 = getMat(m2_);
@ -344,18 +331,6 @@ double checkSimilarity(InputArray m1, InputArray m2)
//////////////////////////////////////////////////////////////////////
// Helper structs for value-parameterized tests
vector<MatDepth> depths(int depth_start, int depth_end)
{
vector<MatDepth> v;
v.reserve((depth_end - depth_start + 1));
for (int depth = depth_start; depth <= depth_end; ++depth)
v.push_back(depth);
return v;
}
vector<MatType> types(int depth_start, int depth_end, int cn_start, int cn_end)
{
vector<MatType> v;
@ -366,7 +341,7 @@ vector<MatType> types(int depth_start, int depth_end, int cn_start, int cn_end)
{
for (int cn = cn_start; cn <= cn_end; ++cn)
{
v.push_back(CV_MAKETYPE(depth, cn));
v.push_back(MatType(CV_MAKE_TYPE(depth, cn)));
}
}
@ -401,6 +376,14 @@ void PrintTo(const Inverse& inverse, std::ostream* os)
(*os) << "direct";
}
//////////////////////////////////////////////////////////////////////
// Other
void dumpImage(const std::string& fileName, const Mat& image)
{
imwrite(TS::ptr()->get_data_path() + fileName, image);
}
void showDiff(InputArray gold_, InputArray actual_, double eps)
{
Mat gold = getMat(gold_);

@ -39,8 +39,14 @@
//
//M*/
#ifndef __OPENCV_TEST_UTILITY_HPP__
#define __OPENCV_TEST_UTILITY_HPP__
#ifndef __OPENCV_GPU_TEST_UTILITY_HPP__
#define __OPENCV_GPU_TEST_UTILITY_HPP__
#include "opencv2/core/core.hpp"
#include "opencv2/core/gpumat.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/ts/ts.hpp"
#include "opencv2/ts/ts_perf.hpp"
//////////////////////////////////////////////////////////////////////
// random generators
@ -66,11 +72,6 @@ cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
//! read image from testdata folder and convert it to specified type
cv::Mat readImageType(const std::string& fname, int type);
//////////////////////////////////////////////////////////////////////
// Image dumping
void dumpImage(const std::string& fileName, const cv::Mat& image);
//////////////////////////////////////////////////////////////////////
// Gpu devices
@ -96,12 +97,10 @@ private:
//////////////////////////////////////////////////////////////////////
// Additional assertion
cv::Mat getMat(cv::InputArray arr);
double checkNorm(cv::InputArray m1, cv::InputArray m2);
void minMaxLocGold(const cv::Mat& src, double* minVal_, double* maxVal_ = 0, cv::Point* minLoc_ = 0, cv::Point* maxLoc_ = 0, const cv::Mat& mask = cv::Mat());
cv::Mat getMat(cv::InputArray arr);
testing::AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, cv::InputArray m1, cv::InputArray m2, double eps);
#define EXPECT_MAT_NEAR(m1, m2, eps) EXPECT_PRED_FORMAT3(assertMatNear, m1, m2, eps)
@ -164,6 +163,45 @@ double checkSimilarity(cv::InputArray m1, cv::InputArray m2);
//////////////////////////////////////////////////////////////////////
// Helper structs for value-parameterized tests
#define GPU_TEST_P(test_case_name, test_name) \
class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
: public test_case_name { \
public: \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
virtual void TestBody(); \
private: \
void UnsafeTestBody(); \
static int AddToRegistry() { \
::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
GetTestCasePatternHolder<test_case_name>(\
#test_case_name, __FILE__, __LINE__)->AddTestPattern(\
#test_case_name, \
#test_name, \
new ::testing::internal::TestMetaFactory< \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \
return 0; \
} \
static int gtest_registering_dummy_; \
GTEST_DISALLOW_COPY_AND_ASSIGN_(\
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
}; \
int GTEST_TEST_CLASS_NAME_(test_case_name, \
test_name)::gtest_registering_dummy_ = \
GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() \
{ \
try \
{ \
UnsafeTestBody(); \
} \
catch (...) \
{ \
cv::gpu::resetDevice(); \
throw; \
} \
} \
void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::UnsafeTestBody()
#define PARAM_TEST_CASE(name, ...) struct name : testing::TestWithParam< std::tr1::tuple< __VA_ARGS__ > >
#define GET_PARAM(k) std::tr1::get< k >(GetParam())
@ -178,11 +216,8 @@ namespace cv { namespace gpu
using perf::MatDepth;
//! return vector with depths from specified range.
std::vector<MatDepth> depths(int depth_start, int depth_end);
#define ALL_DEPTH testing::Values(MatDepth(CV_8U), MatDepth(CV_8S), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32S), MatDepth(CV_32F), MatDepth(CV_64F))
#define DEPTHS(depth_start, depth_end) testing::ValuesIn(depths(depth_start, depth_end))
#define DEPTH_PAIRS testing::Values(std::make_pair(MatDepth(CV_8U), MatDepth(CV_8U)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16U)), \
std::make_pair(MatDepth(CV_8U), MatDepth(CV_16S)), \
@ -237,8 +272,6 @@ private:
void PrintTo(const UseRoi& useRoi, std::ostream* os);
#define WHOLE testing::Values(UseRoi(false))
#define SUBMAT testing::Values(UseRoi(true))
#define WHOLE_SUBMAT testing::Values(UseRoi(false), UseRoi(true))
// Direct/Inverse
@ -253,7 +286,9 @@ public:
private:
bool val_;
};
void PrintTo(const Inverse& useRoi, std::ostream* os);
#define DIRECT_INVERSE testing::Values(Inverse(false), Inverse(true))
// Param class
@ -291,6 +326,7 @@ CV_FLAGS(WarpFlags, cv::INTER_NEAREST, cv::INTER_LINEAR, cv::INTER_CUBIC, cv::WA
//////////////////////////////////////////////////////////////////////
// Other
void dumpImage(const std::string& fileName, const cv::Mat& image);
void showDiff(cv::InputArray gold, cv::InputArray actual, double eps);
#endif // __OPENCV_TEST_UTILITY_HPP__
#endif // __OPENCV_GPU_TEST_UTILITY_HPP__

@ -1,7 +1,7 @@
#ifndef __OPENCV_GTESTCV_HPP__
#define __OPENCV_GTESTCV_HPP__
#if HAVE_CVCONFIG_H
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
#ifndef GTEST_CREATE_SHARED_LIBRARY

@ -1,5 +1,9 @@
#include "precomp.hpp"
#ifdef HAVE_CUDA
#include "opencv2/core/gpumat.hpp"
#endif
#ifdef ANDROID
# include <sys/time.h>
#endif
@ -1160,6 +1164,10 @@ void TestBase::RunPerfTestBody()
catch(cv::Exception e)
{
metrics.terminationReason = performance_metrics::TERM_EXCEPTION;
#ifdef HAVE_CUDA
if (e.code == CV_GpuApiCallError)
cv::gpu::resetDevice();
#endif
FAIL() << "Expected: PerfTestBody() doesn't throw an exception.\n Actual: it throws cv::Exception:\n " << e.what();
}
catch(std::exception e)

Loading…
Cancel
Save