Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/14320/head
Alexander Alekhin 6 years ago
commit 4635356435
  1. 1
      apps/traincascade/cascadeclassifier.cpp
  2. 5
      cmake/OpenCVDetectDirectX.cmake
  3. 7
      cmake/OpenCVDetectOpenCL.cmake
  4. 19
      cmake/OpenCVUtils.cmake
  5. 11
      doc/CMakeLists.txt
  6. 2
      doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown
  7. 0
      modules/calib3d/misc/python/test/test_calibration.py
  8. 11
      modules/core/include/opencv2/core/private.hpp
  9. 29
      modules/core/include/opencv2/core/utils/allocator_stats.hpp
  10. 117
      modules/core/include/opencv2/core/utils/allocator_stats.impl.hpp
  11. 78
      modules/core/src/alloc.cpp
  12. 34
      modules/core/src/ocl.cpp
  13. 2
      modules/core/src/system.cpp
  14. 10
      modules/dnn/include/opencv2/dnn/dnn.hpp
  15. 2
      modules/dnn/include/opencv2/dnn/version.hpp
  16. 0
      modules/dnn/misc/python/test/test_dnn.py
  17. 227
      modules/dnn/src/dnn.cpp
  18. 12
      modules/dnn/src/layers/elementwise_layers.cpp
  19. 4
      modules/dnn/src/layers/flatten_layer.cpp
  20. 49
      modules/dnn/src/layers/padding_layer.cpp
  21. 2
      modules/dnn/src/layers/pooling_layer.cpp
  22. 4
      modules/dnn/src/tensorflow/tf_importer.cpp
  23. 24
      modules/dnn/test/test_backends.cpp
  24. 17
      modules/dnn/test/test_caffe_importer.cpp
  25. 7
      modules/dnn/test/test_darknet_importer.cpp
  26. 13
      modules/dnn/test/test_halide_layers.cpp
  27. 31
      modules/dnn/test/test_onnx_importer.cpp
  28. 56
      modules/dnn/test/test_tf_importer.cpp
  29. 1
      modules/dnn/test/test_torch_importer.cpp
  30. 0
      modules/features2d/misc/python/test/test_feature_homography.py
  31. 0
      modules/ml/misc/python/test/test_digits.py
  32. 0
      modules/ml/misc/python/test/test_goodfeatures.py
  33. 0
      modules/ml/misc/python/test/test_letter_recog.py
  34. 0
      modules/objdetect/misc/python/test/test_facedetect.py
  35. 0
      modules/objdetect/misc/python/test/test_peopledetect.py
  36. 2
      modules/python/CMakeLists.txt
  37. 32
      modules/python/test/CMakeLists.txt
  38. 35
      modules/python/test/test.py
  39. 0
      modules/stitching/misc/python/test/test_stitching.py
  40. 62
      modules/ts/include/opencv2/ts.hpp
  41. 12
      modules/ts/include/opencv2/ts/ts_ext.hpp
  42. 8
      modules/ts/include/opencv2/ts/ts_perf.hpp
  43. 1
      modules/ts/misc/testlog_parser.py
  44. 2
      modules/ts/src/precomp.hpp
  45. 108
      modules/ts/src/ts.cpp
  46. 14
      modules/ts/src/ts_perf.cpp
  47. 471
      modules/ts/src/ts_tags.cpp
  48. 26
      modules/ts/src/ts_tags.hpp
  49. 0
      modules/video/misc/python/test/test_lk_homography.py
  50. 0
      modules/video/misc/python/test/test_lk_track.py
  51. 0
      modules/videoio/misc/python/test/test_videoio.py
  52. 5
      modules/videoio/src/cap_ffmpeg_impl.hpp
  53. 7
      platforms/scripts/valgrind.supp

@ -341,6 +341,7 @@ int CvCascadeClassifier::fillPassedSamples( int first, int count, bool isPositiv
{
getcount++;
printf("%s current samples: %d\r", isPositive ? "POS":"NEG", getcount);
fflush(stdout);
break;
}
}

@ -22,4 +22,9 @@ if(WIN32)
set(HAVE_D3D11 ON)
set(HAVE_D3D10 ON)
set(HAVE_D3D9 ON)
if(HAVE_OPENCL AND WITH_OPENCL_D3D11_NV AND EXISTS "${OPENCL_INCLUDE_DIR}/CL/cl_d3d11_ext.h")
set(HAVE_OPENCL_D3D11_NV ON)
endif()
endif()

@ -11,10 +11,6 @@ mark_as_advanced(OPENCL_INCLUDE_DIR OPENCL_LIBRARY)
if(OPENCL_FOUND)
if(WITH_OPENCL_D3D11_NV AND EXISTS "${OPENCL_INCLUDE_DIR}/CL/cl_d3d11_ext.h")
set(HAVE_OPENCL_D3D11_NV ON)
endif()
if(OPENCL_LIBRARY)
set(HAVE_OPENCL_STATIC ON)
set(OPENCL_LIBRARIES "${OPENCL_LIBRARY}")
@ -82,4 +78,7 @@ if(OPENCL_FOUND)
list(APPEND OPENCL_INCLUDE_DIRS "${CLAMDBLAS_INCLUDE_DIR}")
endif()
endif()
# check WITH_OPENCL_D3D11_NV is located in OpenCVDetectDirectX.cmake file
endif()

@ -1769,3 +1769,22 @@ macro(ocv_git_describe var_name path)
set(${var_name} "unknown")
endif()
endmacro()
# ocv_update_file(filepath content [VERBOSE])
# - write content to file
# - will not change modification time in case when file already exists and content has not changed
function(ocv_update_file filepath content)
if(EXISTS "${filepath}")
file(READ "${filepath}" actual_content)
else()
set(actual_content "")
endif()
if("${actual_content}" STREQUAL "${content}")
if(";${ARGN};" MATCHES ";VERBOSE;")
message(STATUS "${filepath} contains the same content")
endif()
else()
file(WRITE "${filepath}" "${content}")
endif()
endfunction()

@ -15,7 +15,7 @@ if(DOXYGEN_FOUND)
# not documented modules list
set(blacklist "${DOXYGEN_BLACKLIST}")
list(APPEND blacklist "ts" "java_bindings_generator" "java" "python_bindings_generator" "python2" "python3" "js" "world")
list(APPEND blacklist "ts" "world")
unset(CMAKE_DOXYGEN_TUTORIAL_CONTRIB_ROOT)
unset(CMAKE_DOXYGEN_TUTORIAL_JS_ROOT)
@ -38,7 +38,16 @@ if(DOXYGEN_FOUND)
set(refs_extra)
set(deps)
foreach(m ${OPENCV_MODULES_MAIN} ${OPENCV_MODULES_EXTRA})
set(the_module "${m}")
if(NOT the_module MATCHES "^opencv_")
set(the_module "opencv_${m}")
endif()
list(FIND blacklist ${m} _pos)
if(NOT EXISTS "${OPENCV_MODULE_${the_module}_LOCATION}/include"
AND NOT EXISTS "${OPENCV_MODULE_${the_module}_LOCATION}/doc"
)
set(_pos -2) # blacklist
endif()
if(${_pos} EQUAL -1)
list(APPEND CMAKE_DOXYGEN_ENABLED_SECTIONS "HAVE_opencv_${m}")
# include folder

@ -15,7 +15,7 @@ histograms**.
**What is it actually in simple words?** It is used for image segmentation or finding objects of
interest in an image. In simple words, it creates an image of the same size (but single channel) as
that of our input image, where each pixel corresponds to the probability of that pixel belonging to
our object. In more simpler worlds, the output image will have our object of interest in more white
our object. In more simpler words, the output image will have our object of interest in more white
compared to remaining part. Well, that is an intuitive explanation. (I can't make it more simpler).
Histogram Backprojection is used with camshift algorithm etc.

@ -141,13 +141,20 @@ namespace cv
{
CV_EXPORTS void scalarToRawData(const cv::Scalar& s, void* buf, int type, int unroll_to = 0);
//! Allocate all memory buffers which will not be freed, ease filtering memcheck issues
//! Allocate memory buffers which will not be freed, ease filtering memcheck issues. Uses fastMalloc() call.
CV_EXPORTS void* allocSingletonBuffer(size_t size);
//! Allocate all memory buffers which will not be freed, ease filtering memcheck issues
//! Allocate memory buffers which will not be freed, ease filtering memcheck issues. Uses fastMalloc() call
template <typename T> static inline
T* allocSingleton(size_t count = 1) { return static_cast<T*>(allocSingletonBuffer(sizeof(T) * count)); }
//! Allocate memory buffers which will not be freed, ease filtering memcheck issues. Uses generic malloc() call.
CV_EXPORTS void* allocSingletonNewBuffer(size_t size);
//! Allocate memory buffers which will not be freed, ease filtering memcheck issues. Uses generic malloc() call.
template <typename T> static inline
T* allocSingletonNew() { return new(allocSingletonNewBuffer(sizeof(T))) T(); }
} // namespace
#if 1 // TODO: Remove in OpenCV 4.x

@ -0,0 +1,29 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CORE_ALLOCATOR_STATS_HPP
#define OPENCV_CORE_ALLOCATOR_STATS_HPP
#include "../cvdef.h"
namespace cv { namespace utils {
class AllocatorStatisticsInterface
{
protected:
AllocatorStatisticsInterface() {}
virtual ~AllocatorStatisticsInterface() {}
public:
virtual uint64_t getCurrentUsage() const = 0;
virtual uint64_t getTotalUsage() const = 0;
virtual uint64_t getNumberOfAllocations() const = 0;
virtual uint64_t getPeakUsage() const = 0;
/** set peak usage = current usage */
virtual void resetPeakUsage() = 0;
};
}} // namespace
#endif // OPENCV_CORE_ALLOCATOR_STATS_HPP

@ -0,0 +1,117 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CORE_ALLOCATOR_STATS_IMPL_HPP
#define OPENCV_CORE_ALLOCATOR_STATS_IMPL_HPP
#include "./allocator_stats.hpp"
#ifdef CV_CXX11
#include <atomic>
#endif
namespace cv { namespace utils {
#ifdef CV__ALLOCATOR_STATS_LOG
namespace {
#endif
class AllocatorStatistics : public AllocatorStatisticsInterface
{
protected:
#ifdef CV_CXX11
std::atomic<long long> curr, total, total_allocs, peak;
#else
volatile long long curr, total, total_allocs, peak; // overflow is possible, CV_XADD operates with 'int' only
#endif
public:
AllocatorStatistics()
#ifndef CV_CXX11
: curr(0), total(0), total_allocs(0), peak(0)
#endif
{}
~AllocatorStatistics() CV_OVERRIDE {}
// AllocatorStatisticsInterface
#ifdef CV_CXX11
uint64_t getCurrentUsage() const CV_OVERRIDE { return (uint64_t)curr.load(); }
uint64_t getTotalUsage() const CV_OVERRIDE { return (uint64_t)total.load(); }
uint64_t getNumberOfAllocations() const CV_OVERRIDE { return (uint64_t)total_allocs.load(); }
uint64_t getPeakUsage() const CV_OVERRIDE { return (uint64_t)peak.load(); }
/** set peak usage = current usage */
void resetPeakUsage() CV_OVERRIDE { peak.store(curr.load()); }
// Controller interface
void onAllocate(size_t sz)
{
#ifdef CV__ALLOCATOR_STATS_LOG
CV__ALLOCATOR_STATS_LOG(cv::format("allocate: %lld (curr=%lld)", (long long int)sz, (long long int)curr.load()));
#endif
long long new_curr = curr.fetch_add((long long)sz) + (long long)sz;
// peak = std::max((uint64_t)peak, new_curr);
auto prev_peak = peak.load();
while (prev_peak < new_curr)
{
if (peak.compare_exchange_weak(prev_peak, new_curr))
break;
}
// end of peak = max(...)
total += (long long)sz;
total_allocs++;
}
void onFree(size_t sz)
{
#ifdef CV__ALLOCATOR_STATS_LOG
CV__ALLOCATOR_STATS_LOG(cv::format("free: %lld (curr=%lld)", (long long int)sz, (long long int)curr.load()));
#endif
curr -= (long long)sz;
}
#else
uint64_t getCurrentUsage() const CV_OVERRIDE { return (uint64_t)curr; }
uint64_t getTotalUsage() const CV_OVERRIDE { return (uint64_t)total; }
uint64_t getNumberOfAllocations() const CV_OVERRIDE { return (uint64_t)total_allocs; }
uint64_t getPeakUsage() const CV_OVERRIDE { return (uint64_t)peak; }
void resetPeakUsage() CV_OVERRIDE { peak = curr; }
// Controller interface
void onAllocate(size_t sz)
{
#ifdef CV__ALLOCATOR_STATS_LOG
CV__ALLOCATOR_STATS_LOG(cv::format("allocate: %lld (curr=%lld)", (long long int)sz, (long long int)curr));
#endif
uint64_t new_curr = (uint64_t)CV_XADD(&curr, (uint64_t)sz) + sz;
peak = std::max((uint64_t)peak, new_curr); // non-thread safe
//CV_XADD(&total, (uint64_t)sz); // overflow with int, non-reliable...
total += sz;
CV_XADD(&total_allocs, (uint64_t)1);
}
void onFree(size_t sz)
{
#ifdef CV__ALLOCATOR_STATS_LOG
CV__ALLOCATOR_STATS_LOG(cv::format("free: %lld (curr=%lld)", (long long int)sz, (long long int)curr));
#endif
CV_XADD(&curr, (uint64_t)-sz);
}
#endif
};
#ifdef CV__ALLOCATOR_STATS_LOG
} // namespace
#endif
}} // namespace
#endif // OPENCV_CORE_ALLOCATOR_STATS_IMPL_HPP

@ -42,12 +42,29 @@
#include "precomp.hpp"
#include <opencv2/core/utils/logger.defines.hpp>
#undef CV_LOG_STRIP_LEVEL
#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_VERBOSE + 1
#include <opencv2/core/utils/logger.hpp>
#define CV__ALLOCATOR_STATS_LOG(...) CV_LOG_VERBOSE(NULL, 0, "alloc.cpp: " << __VA_ARGS__)
#include "opencv2/core/utils/allocator_stats.impl.hpp"
#undef CV__ALLOCATOR_STATS_LOG
//#define OPENCV_ALLOC_ENABLE_STATISTICS
#define OPENCV_ALLOC_STATISTICS_LIMIT 4096 // don't track buffers less than N bytes
#ifdef HAVE_POSIX_MEMALIGN
#include <stdlib.h>
#elif defined HAVE_MALLOC_H
#include <malloc.h>
#endif
#ifdef OPENCV_ALLOC_ENABLE_STATISTICS
#include <map>
#endif
namespace cv {
static void* OutOfMemoryError(size_t size)
@ -55,8 +72,21 @@ static void* OutOfMemoryError(size_t size)
CV_Error_(CV_StsNoMem, ("Failed to allocate %llu bytes", (unsigned long long)size));
}
CV_EXPORTS cv::utils::AllocatorStatisticsInterface& getAllocatorStatistics();
static cv::utils::AllocatorStatistics allocator_stats;
cv::utils::AllocatorStatisticsInterface& getAllocatorStatistics()
{
return allocator_stats;
}
void* fastMalloc( size_t size )
#ifdef OPENCV_ALLOC_ENABLE_STATISTICS
static inline
void* fastMalloc_(size_t size)
#else
void* fastMalloc(size_t size)
#endif
{
#ifdef HAVE_POSIX_MEMALIGN
void* ptr = NULL;
@ -80,7 +110,12 @@ void* fastMalloc( size_t size )
#endif
}
#ifdef OPENCV_ALLOC_ENABLE_STATISTICS
static inline
void fastFree_(void* ptr)
#else
void fastFree(void* ptr)
#endif
{
#if defined HAVE_POSIX_MEMALIGN || defined HAVE_MEMALIGN
free(ptr);
@ -95,6 +130,47 @@ void fastFree(void* ptr)
#endif
}
#ifdef OPENCV_ALLOC_ENABLE_STATISTICS
static
Mutex& getAllocationStatisticsMutex()
{
static Mutex* p_alloc_mutex = allocSingletonNew<Mutex>();
CV_Assert(p_alloc_mutex);
return *p_alloc_mutex;
}
static std::map<void*, size_t> allocated_buffers; // guarded by getAllocationStatisticsMutex()
void* fastMalloc(size_t size)
{
void* res = fastMalloc_(size);
if (res && size >= OPENCV_ALLOC_STATISTICS_LIMIT)
{
cv::AutoLock lock(getAllocationStatisticsMutex());
allocated_buffers.insert(std::make_pair(res, size));
allocator_stats.onAllocate(size);
}
return res;
}
void fastFree(void* ptr)
{
{
cv::AutoLock lock(getAllocationStatisticsMutex());
std::map<void*, size_t>::iterator i = allocated_buffers.find(ptr);
if (i != allocated_buffers.end())
{
size_t size = i->second;
allocator_stats.onFree(size);
allocated_buffers.erase(i);
}
}
fastFree_(ptr);
}
#endif // OPENCV_ALLOC_ENABLE_STATISTICS
} // namespace
CV_IMPL void* cvAlloc( size_t size )

@ -54,6 +54,9 @@
#include <opencv2/core/utils/configuration.private.hpp>
#include <opencv2/core/utils/logger.defines.hpp>
#undef CV_LOG_STRIP_LEVEL
#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_DEBUG + 1
#include <opencv2/core/utils/logger.hpp>
#include "opencv2/core/ocl_genbase.hpp"
@ -63,6 +66,10 @@
#include "opencv2/core/utils/filesystem.hpp"
#include "opencv2/core/utils/filesystem.private.hpp"
#define CV__ALLOCATOR_STATS_LOG(...) CV_LOG_VERBOSE(NULL, 0, "OpenCL allocator: " << __VA_ARGS__)
#include "opencv2/core/utils/allocator_stats.impl.hpp"
#undef CV__ALLOCATOR_STATS_LOG
#define CV_OPENCL_ALWAYS_SHOW_BUILD_LOG 0
#define CV_OPENCL_SHOW_RUN_KERNELS 0
@ -132,6 +139,14 @@ namespace cv { namespace ocl {
void release() { if( CV_XADD(&refcount, -1) == 1 && !cv::__termination) delete this; } \
int refcount
static cv::utils::AllocatorStatistics opencl_allocator_stats;
CV_EXPORTS cv::utils::AllocatorStatisticsInterface& getOpenCLAllocatorStatistics();
cv::utils::AllocatorStatisticsInterface& getOpenCLAllocatorStatistics()
{
return opencl_allocator_stats;
}
#ifndef HAVE_OPENCL
#define CV_OPENCL_NO_SUPPORT() CV_Error(cv::Error::OpenCLApiCallError, "OpenCV build without OpenCL support")
namespace {
@ -4534,15 +4549,17 @@ class OpenCLAllocator CV_FINAL : public MatAllocator
mutable OpenCLSVMBufferPoolImpl bufferPoolSVM;
#endif
public:
enum AllocatorFlags
{
ALLOCATOR_FLAGS_BUFFER_POOL_USED = 1 << 0,
ALLOCATOR_FLAGS_BUFFER_POOL_HOST_PTR_USED = 1 << 1
ALLOCATOR_FLAGS_BUFFER_POOL_HOST_PTR_USED = 1 << 1,
#ifdef HAVE_OPENCL_SVM
,ALLOCATOR_FLAGS_BUFFER_POOL_SVM_USED = 1 << 2
ALLOCATOR_FLAGS_BUFFER_POOL_SVM_USED = 1 << 2,
#endif
ALLOCATOR_FLAGS_EXTERNAL_BUFFER = 1 << 3 // convertFromBuffer()
};
public:
OpenCLAllocator()
: bufferPool(0),
bufferPoolHostPtr(CL_MEM_ALLOC_HOST_PTR)
@ -4648,6 +4665,7 @@ public:
u->allocatorFlags_ = allocatorFlags;
CV_DbgAssert(!u->tempUMat()); // for bufferPool.release() consistency in deallocate()
u->markHostCopyObsolete(true);
opencl_allocator_stats.onAllocate(u->size);
return u;
}
@ -4757,6 +4775,7 @@ public:
}
if (!!(accessFlags & ACCESS_WRITE))
u->markHostCopyObsolete(true);
opencl_allocator_stats.onAllocate(u->size);
return true;
}
@ -4809,6 +4828,13 @@ public:
void deallocate_(UMatData* u) const
{
CV_Assert(u);
CV_Assert(u->handle);
if ((u->allocatorFlags_ & ALLOCATOR_FLAGS_EXTERNAL_BUFFER) == 0)
{
opencl_allocator_stats.onFree(u->size);
}
#ifdef _WIN32
if (cv::__termination) // process is not in consistent state (after ExitProcess call) and terminating
return; // avoid any OpenCL calls
@ -5790,7 +5816,7 @@ void convertFromBuffer(void* cl_mem_buffer, size_t step, int rows, int cols, int
// attach clBuffer to UMatData
dst.u = new UMatData(getOpenCLAllocator());
dst.u->data = 0;
dst.u->allocatorFlags_ = 0; // not allocated from any OpenCV buffer pool
dst.u->allocatorFlags_ = OpenCLAllocator::ALLOCATOR_FLAGS_EXTERNAL_BUFFER; // not allocated from any OpenCV buffer pool
dst.u->flags = static_cast<UMatData::MemoryFlag>(0);
dst.u->handle = cl_mem_buffer;
dst.u->origdata = 0;

@ -71,6 +71,8 @@ static bool param_dumpErrors = utils::getConfigurationParameterBool("OPENCV_DUMP
);
void* allocSingletonBuffer(size_t size) { return fastMalloc(size); }
void* allocSingletonNewBuffer(size_t size) { return malloc(size); }
} // namespace cv

@ -381,6 +381,16 @@ CV__DNN_INLINE_NS_BEGIN
/** Returns true if there are no layers in the network. */
CV_WRAP bool empty() const;
/** @brief Dump net to String
* @returns String with structure, hyperparameters, backend, target and fusion
* To see correct backend, target and fusion run after forward().
*/
CV_WRAP String dump();
/** @brief Dump net structure, hyperparameters, backend, target and fusion to dot file
* @param path path to output file with .dot extension
* @see dump()
*/
CV_WRAP void dumpToFile(const String& path);
/** @brief Adds new layer to the net.
* @param name unique name of the adding layer.
* @param type typename of the adding layer (type must be registered in LayerRegister).

@ -6,7 +6,7 @@
#define OPENCV_DNN_VERSION_HPP
/// Use with major OpenCV version only.
#define OPENCV_DNN_API_VERSION 20190122
#define OPENCV_DNN_API_VERSION 20190412
#if !defined CV_DOXYGEN && !defined CV_DNN_DONT_ADD_INLINE_NS
#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)

@ -48,6 +48,7 @@
#include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <iterator>
#include <numeric>
#include <opencv2/dnn/shape_utils.hpp>
@ -1179,12 +1180,6 @@ struct Net::Impl
continue;
currLayer->unsetAttached();
Ptr<PoolingLayer> poolingLayer = currLayer.dynamicCast<PoolingLayer>();
if( !poolingLayer.empty() )
{
poolingLayer->computeMaxIdx = true;
}
}
layersTimings.clear();
@ -2145,30 +2140,11 @@ struct Net::Impl
}
}
}
// the optimization #2. if there is no layer that takes max pooling layer's computed
// max indices (and only some semantical segmentation networks might need this;
// many others only take the maximum values), then we switch the max pooling
// layer to the faster operating mode.
Ptr<PoolingLayer> poolingLayer = ld.layerInstance.dynamicCast<PoolingLayer>();
if( !poolingLayer.empty() && !ld.consumers.empty() )
{
size_t i = 0, nconsumers = ld.consumers.size();
for( ; i < nconsumers; i++ )
if( ld.consumers[i].oid > 0 )
break;
// if there is no layer that takes the second output pin of the pooling layer
// on input then we don't need to compute the indices
if( i >= nconsumers )
{
poolingLayer->computeMaxIdx = false;
printf_(("\tsimplified pooling layer %s\n", poolingLayer->name.c_str()));
}
}
if (preferableBackend != DNN_BACKEND_OPENCV)
continue; // Go to the next layer.
// the optimization #3. if there is concat layer that concatenates channels
// the optimization #2. if there is concat layer that concatenates channels
// from the inputs together (i.e. axis == 1) then we make the inputs of
// the concat layer to write to the concatenation output buffer
// (and so we eliminate the concatenation layer, because the channels
@ -3022,6 +2998,205 @@ int Net::getLayerId(const String &layer)
return impl->getLayerId(layer);
}
String Net::dump()
{
CV_Assert(!empty());
std::ostringstream out;
std::map<int, LayerData>& map = impl->layers;
int prefBackend = impl->preferableBackend;
std::vector<std::vector<int> > skippedLayers;
std::vector<int> skipId;
std::vector<int> allLayers(map.size(), -1);
int idPrev = -1;
Ptr<BackendNode> prevNode;
for (std::map<int, LayerData>::reverse_iterator rit = map.rbegin(); rit != map.rend(); ++rit)
{
std::map<int, Ptr<BackendNode> >::iterator itBackend = rit->second.backendNodes.find(prefBackend);
if (prefBackend == DNN_BACKEND_OPENCV || itBackend == rit->second.backendNodes.end() ||
itBackend->second.empty())
{
if (rit->second.skip)
skipId.push_back(rit->first);
else if (!skipId.empty())
{
if (prefBackend == DNN_BACKEND_OPENCV || prevNode.empty())
skipId.push_back(rit->first);
else if (idPrev != -1)
skipId.push_back(idPrev);
std::sort(skipId.begin(), skipId.end());
for (int i = 0; i < skipId.size(); i++) {
allLayers[skipId[i]] = skippedLayers.size();
}
skippedLayers.push_back(skipId);
skipId.clear();
}
}
else
{
if (itBackend->second == prevNode)
skipId.push_back(idPrev);
else if (!skipId.empty())
{
skipId.push_back(idPrev);
std::sort(skipId.begin(), skipId.end());
for (int i = 0; i < skipId.size(); i++) {
allLayers[skipId[i]] = skippedLayers.size();
}
skippedLayers.push_back(skipId);
skipId.clear();
}
idPrev = rit->first;
prevNode = itBackend->second;
}
}
String colors[] = {"#ffffb3", "#fccde5", "#8dd3c7", "#bebada", "#80b1d3", "#fdb462"};
String backend;
switch (prefBackend) {
case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break;
case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break;
case DNN_BACKEND_INFERENCE_ENGINE: backend = "DLIE/"; break;
case DNN_BACKEND_OPENCV: backend = "OCV/"; break;
}
out << "digraph G {" << '\n';
// Add nodes
for (std::map<int, LayerData>::iterator it = map.begin(); it != map.end(); ++it)
{
String name = it->second.params.name;
if (allLayers[it->first] == -1 && !name.empty()) {
out << " " << "\"" << name << "\"" << " [label=\"";
skipId.clear();
skipId.push_back(it->first);
}
else if (name.empty() || it->first != skippedLayers[allLayers[it->first]][0])
continue;
else { // first node in cluster : it->first == skippedLayers[allLayers[it->first]][0]
int cluster = allLayers[it->first];
out << " " << "\"" << "cluster_" << cluster << "\"" << " [label=\"{";
skipId = skippedLayers[allLayers[it->first]]; // vertices in current cluster
}
for (int i = 0; i < skipId.size(); i++)
{
LayerParams& lp = map[skipId[i]].params;
if (!lp.name.empty()) {
if (i > 0) {
out << " | ";
}
out << lp.name << "\\n" << lp.type << "\\n";
if (lp.has("kernel_size")) {
DictValue size = lp.get("kernel_size");
out << "kernel (HxW): " << size << " x " << size << "\\l";
} else if (lp.has("kernel_h") && lp.has("kernel_w")) {
DictValue h = lp.get("kernel_h");
DictValue w = lp.get("kernel_w");
out << "kernel (HxW): " << h << " x " << w << "\\l";
}
if (lp.has("stride")) {
DictValue stride = lp.get("stride");
out << "stride (HxW): " << stride << " x " << stride << "\\l";
} else if (lp.has("stride_h") && lp.has("stride_w")) {
DictValue h = lp.get("stride_h");
DictValue w = lp.get("stride_w");
out << "stride (HxW): " << h << " x " << w << "\\l";
}
if (lp.has("dilation")) {
DictValue dilation = lp.get("dilation");
out << "dilation (HxW): " << dilation << " x " << dilation << "\\l";
} else if (lp.has("dilation_h") && lp.has("dilation_w")) {
DictValue h = lp.get("dilation_h");
DictValue w = lp.get("dilation_w");
out << "dilation (HxW): " << h << " x " << w << "\\l";
}
if (lp.has("pad")) {
DictValue pad = lp.get("pad");
out << "pad (LxTxRxB): " << pad << " x " << pad << " x " << pad << " x " << pad << "\\l";
} else if (lp.has("pad_l") && lp.has("pad_t") && lp.has("pad_r") && lp.has("pad_b")) {
DictValue l = lp.get("pad_l");
DictValue t = lp.get("pad_t");
DictValue r = lp.get("pad_r");
DictValue b = lp.get("pad_b");
out << "pad (LxTxRxB): " << l << " x " << t << " x " << r << " x " << b << "\\l";
}
else if (lp.has("pooled_w") || lp.has("pooled_h")) {
DictValue h = lp.get("pooled_h");
DictValue w = lp.get("pooled_w");
out << "pad (HxW): " << h << " x " << w << "\\l";
}
if (lp.has("pool")) {
out << "pool: " << lp.get("pool") << "\\l";
}
if (lp.has("global_pooling")) {
out << "global_pooling: " << lp.get("global_pooling") << "\\l";
}
if (lp.has("group")) {
out << "group: " << lp.get("group") << "\\l";
}
}
}
if (!it->second.outputBlobs.empty())
out << "output: " << it->second.outputBlobs[0].size << "\\l";
Ptr<BackendNode> layerBackend = it->second.backendNodes[prefBackend];
out << (!layerBackend.empty() ? backend : "OCV/");
int colorId = 0;
switch (it->second.layerInstance->preferableTarget) {
case DNN_TARGET_CPU: out << "CPU\\n"; colorId = layerBackend.empty() ? 0 : 5; break;
case DNN_TARGET_OPENCL: out << "OCL\\n"; colorId = 1; break;
case DNN_TARGET_OPENCL_FP16: out << "OCL_FP16\\n"; colorId = 2; break;
case DNN_TARGET_MYRIAD: out << "MYRIAD\\n"; colorId = 3; break;
case DNN_TARGET_FPGA: out << "FPGA\\n"; colorId = 4; break;
}
out << ((skipId.size() == 1)? "\" " : " }\" ");
out << "fillcolor=\"" << colors[colorId] << "\" ";
out << "style=filled ";
out << "shape=" << ((skipId.size() == 1)? "box" : "record") << "]" << '\n';
}
out << '\n';
// Add edges
int inputsSize = impl->netInputLayer->outNames.size();
for (std::map<int, LayerData>::iterator it = map.begin(); it != map.end(); ++it)
{
if (allLayers[it->first] == -1) // node
{
for (int i = 0; i < it->second.consumers.size(); i++)
{
int outId = it->second.consumers[i].lid;
if (it == map.begin() && inputsSize > 1)
out << " " << "\"" << it->second.name << "_" << i << "\"" << " -> ";
else
out << " " << "\"" << it->second.name << "\"" << " -> ";
if (allLayers[outId] == -1) // node
out << "\"" << map[outId].name << "\"" << '\n';
else // cluster
out << "\"" << "cluster_" << allLayers[outId] << "\"" << '\n';
}
}
else if (it->first == skippedLayers[allLayers[it->first]].back()) // edges from last layer in cluster
{
for (int i = 0; i < it->second.consumers.size(); i++)
{
int outId = it->second.consumers[i].lid;
if (allLayers[outId] == -1) { // node
out << " " << "\"" << "cluster_" << allLayers[it->first] << "\"" << " -> ";
out << "\"" << map[outId].name << "\"" << '\n';
}
else if (allLayers[outId] != allLayers[it->first]) { // another cluster
out << " " << "\"" << "cluster_" << allLayers[it->first] << "\"" << " -> ";
out << "\"" << "cluster_" << allLayers[outId] << "\"" << '\n';
}
}
}
}
out << "}";
return out.str();
}
void Net::dumpToFile(const String& path) {
std::ofstream file(path.c_str());
file << dump();
file.close();
}
Ptr<Layer> Net::getLayer(LayerId layerId)
{
LayerData &ld = impl->getLayerData(layerId);

@ -265,8 +265,11 @@ struct ReLUFunctor
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
#endif
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE ||
backendId == DNN_BACKEND_VKCOM;
}
@ -793,8 +796,11 @@ struct AbsValFunctor
bool supportBackend(int backendId, int)
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE;
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
#endif
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const

@ -159,8 +159,8 @@ public:
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Flatten");
ieLayer.getParameters()["axis"] = _startAxis;
ieLayer.getParameters()["end_axis"] = _endAxis;
ieLayer.getParameters()["axis"] = (size_t)_startAxis;
ieLayer.getParameters()["end_axis"] = _endAxis; // Do not cast to size_t because it might be negative.
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));

@ -12,6 +12,7 @@ Implementation of padding layer, which adds paddings to input blob.
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
#include <vector>
namespace cv
@ -68,28 +69,36 @@ public:
// Compute dstRanges.
const MatSize& inpShape = inputs[0].size;
dstRanges.resize(paddings.size());
int offset = 0;
if (inputDims != -1 && inputs[0].dims != inputDims)
{
dstRanges.insert(dstRanges.begin(), Range::all());
offset = 1;
paddings.insert(paddings.begin(), std::make_pair(0, 0));
}
dstRanges.resize(paddings.size());
for (int i = 0; i < paddings.size(); ++i)
{
dstRanges[offset + i].start = paddings[i].first;
dstRanges[offset + i].end = paddings[i].first + inpShape[offset + i];
dstRanges[i].start = paddings[i].first;
dstRanges[i].end = paddings[i].first + inpShape[i];
}
// Add the rest of dimensions.
for (int i = dstRanges.size(); i < inputs[0].dims; ++i)
{
dstRanges.push_back(Range::all());
paddings.push_back(std::make_pair(0, 0));
}
inputDims = -1; // Next time paddings are filled for all the dimensions.
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
(preferableTarget != DNN_TARGET_MYRIAD ||
(dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0));
#endif
return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4);
}
@ -109,7 +118,7 @@ public:
{
std::vector<float> paddingValue_fp32(1, paddingValue);
std::vector<int16_t> paddingValue_fp16(1);
convertFp16(paddingValue_fp32, paddingValue_fp16);
cv::convertFp16(paddingValue_fp32, paddingValue_fp16);
outputs[0].setTo(paddingValue_fp16[0]);
}
else
@ -173,6 +182,32 @@ public:
return Ptr<BackendNode>();
}
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Pad");
std::vector<int> begins(paddings.size(), 0), ends(paddings.size(), 0);
for (int i = 0; i < paddings.size(); ++i)
{
begins[i] = paddings[i].first;
ends[i] = paddings[i].second;
}
ieLayer.getParameters()["pads_begin"] = begins;
ieLayer.getParameters()["pads_end"] = ends;
ieLayer.getParameters()["pad_mode"] = paddingType;
if (paddingType == "constant")
ieLayer.getParameters()["pad_value"] = paddingValue;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif
return Ptr<BackendNode>();
}
private:
std::vector<std::pair<int, int> > paddings; // Pairs pad before, pad after.
std::vector<Range> dstRanges;

@ -141,7 +141,7 @@ public:
#ifdef HAVE_OPENCL
poolOp.release();
#endif
computeMaxIdx = type == MAX;
computeMaxIdx = type == MAX && outputs.size() == 2;
}
virtual bool supportBackend(int backendId) CV_OVERRIDE

@ -1509,8 +1509,8 @@ void TFImporter::populateNet(Net dstNet)
if (layerParams.blobs.size() == 2)
CV_Error(Error::StsNotImplemented, "Cannot determine number "
"of parameters for batch normalization layer.");
mean = Mat::zeros(1, layerParams.blobs[3].total(), CV_32F);
std = Mat::ones(1, layerParams.blobs[3].total(), CV_32F);
mean = Mat::zeros(1, layerParams.blobs[2].total(), CV_32F);
std = Mat::ones(1, layerParams.blobs[2].total(), CV_32F);
// Add an extra layer: Mean-Variance normalization
LayerParams mvnParams;

@ -98,6 +98,7 @@ public:
TEST_P(DNNTestNetwork, AlexNet)
{
applyTestTag(CV_TEST_TAG_MEMORY_1GB);
processNet("dnn/bvlc_alexnet.caffemodel", "dnn/bvlc_alexnet.prototxt",
Size(227, 227), "prob",
target == DNN_TARGET_OPENCL ? "dnn/halide_scheduler_opencl_alexnet.yml" :
@ -106,6 +107,7 @@ TEST_P(DNNTestNetwork, AlexNet)
TEST_P(DNNTestNetwork, ResNet_50)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
processNet("dnn/ResNet-50-model.caffemodel", "dnn/ResNet-50-deploy.prototxt",
Size(224, 224), "prob",
target == DNN_TARGET_OPENCL ? "dnn/halide_scheduler_opencl_resnet_50.yml" :
@ -122,12 +124,14 @@ TEST_P(DNNTestNetwork, SqueezeNet_v1_1)
TEST_P(DNNTestNetwork, GoogLeNet)
{
applyTestTag(target == DNN_TARGET_CPU ? "" : CV_TEST_TAG_MEMORY_512MB);
processNet("dnn/bvlc_googlenet.caffemodel", "dnn/bvlc_googlenet.prototxt",
Size(224, 224), "prob");
}
TEST_P(DNNTestNetwork, Inception_5h)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
double l1 = default_l1, lInf = default_lInf;
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_CPU || target == DNN_TARGET_OPENCL))
{
@ -142,6 +146,7 @@ TEST_P(DNNTestNetwork, Inception_5h)
TEST_P(DNNTestNetwork, ENet)
{
applyTestTag(target == DNN_TARGET_CPU ? "" : CV_TEST_TAG_MEMORY_512MB);
if ((backend == DNN_BACKEND_INFERENCE_ENGINE) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
@ -153,6 +158,7 @@ TEST_P(DNNTestNetwork, ENet)
TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false));
@ -184,6 +190,7 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe_Different_Width_Height)
TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow)
{
applyTestTag(target == DNN_TARGET_CPU ? "" : CV_TEST_TAG_MEMORY_512MB);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false));
@ -214,6 +221,7 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow_Different_Width_Height)
TEST_P(DNNTestNetwork, MobileNet_SSD_v2_TensorFlow)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/street.png", false));
@ -226,6 +234,8 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v2_TensorFlow)
TEST_P(DNNTestNetwork, SSD_VGG16)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
CV_TEST_TAG_DEBUG_VERYLONG);
if (backend == DNN_BACKEND_HALIDE && target == DNN_TARGET_CPU)
throw SkipTestException("");
double scoreThreshold = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0325 : 0.0;
@ -238,6 +248,8 @@ TEST_P(DNNTestNetwork, SSD_VGG16)
TEST_P(DNNTestNetwork, OpenPose_pose_coco)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
CV_TEST_TAG_DEBUG_VERYLONG);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
@ -254,6 +266,8 @@ TEST_P(DNNTestNetwork, OpenPose_pose_coco)
TEST_P(DNNTestNetwork, OpenPose_pose_mpi)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
CV_TEST_TAG_DEBUG_VERYLONG);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
@ -270,6 +284,7 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi)
TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
{
applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_MEMORY_1GB);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
@ -289,12 +304,7 @@ TEST_P(DNNTestNetwork, OpenFace)
#if INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
#elif INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX target");
#else
#elif INF_ENGINE_VER_MAJOR_EQ(2018030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("Test has been fixed in OpenVINO 2018R4");
#endif
@ -318,6 +328,7 @@ TEST_P(DNNTestNetwork, opencv_face_detector)
TEST_P(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
@ -335,6 +346,7 @@ TEST_P(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
TEST_P(DNNTestNetwork, DenseNet_121)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
// Reference output values are in range [-3.807, 4.605]

@ -112,6 +112,8 @@ TEST(Test_Caffe, read_googlenet)
typedef testing::TestWithParam<tuple<bool, Target> > Reproducibility_AlexNet;
TEST_P(Reproducibility_AlexNet, Accuracy)
{
Target targetId = get<1>(GetParam());
applyTestTag(targetId == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
bool readFromMemory = get<0>(GetParam());
Net net;
{
@ -132,7 +134,6 @@ TEST_P(Reproducibility_AlexNet, Accuracy)
ASSERT_FALSE(net.empty());
}
int targetId = get<1>(GetParam());
const float l1 = 1e-5;
const float lInf = (targetId == DNN_TARGET_OPENCL_FP16) ? 3e-3 : 1e-4;
@ -151,9 +152,9 @@ TEST_P(Reproducibility_AlexNet, Accuracy)
INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_AlexNet, Combine(testing::Bool(),
Values(DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16)));
#if !defined(_WIN32) || defined(_WIN64)
TEST(Reproducibility_FCN, Accuracy)
{
applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_MEMORY_2GB);
Net net;
{
const string proto = findDataFile("dnn/fcn8s-heavy-pascal.prototxt", false);
@ -179,10 +180,10 @@ TEST(Reproducibility_FCN, Accuracy)
normAssert(ref, out);
}
#endif
TEST(Reproducibility_SSD, Accuracy)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
Net net;
{
const string proto = findDataFile("dnn/ssd_vgg16.prototxt", false);
@ -264,10 +265,11 @@ INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_MobileNet_SSD,
typedef testing::TestWithParam<Target> Reproducibility_ResNet50;
TEST_P(Reproducibility_ResNet50, Accuracy)
{
Target targetId = GetParam();
applyTestTag(targetId == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
Net net = readNetFromCaffe(findDataFile("dnn/ResNet-50-deploy.prototxt", false),
findDataFile("dnn/ResNet-50-model.caffemodel", false));
int targetId = GetParam();
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(targetId);
@ -330,6 +332,7 @@ INSTANTIATE_TEST_CASE_P(/**/, Reproducibility_SqueezeNet_v1_1,
TEST(Reproducibility_AlexNet_fp16, Accuracy)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
const float l1 = 1e-5;
const float lInf = 3e-3;
@ -375,6 +378,7 @@ TEST(Reproducibility_GoogLeNet_fp16, Accuracy)
// https://github.com/richzhang/colorization
TEST_P(Test_Caffe_nets, Colorization)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
checkBackend();
Mat inp = blobFromNPY(_tf("colorization_inp.npy"));
@ -405,6 +409,7 @@ TEST_P(Test_Caffe_nets, Colorization)
TEST_P(Test_Caffe_nets, DenseNet_121)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
checkBackend();
const string proto = findDataFile("dnn/DenseNet_121.prototxt", false);
const string model = findDataFile("dnn/DenseNet_121.caffemodel", false);
@ -520,6 +525,8 @@ INSTANTIATE_TEST_CASE_P(Test_Caffe, opencv_face_detector,
TEST_P(Test_Caffe_nets, FasterRCNN_vgg16)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB));
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("Test is disabled for DLIE OpenCL targets"); // very slow
@ -536,6 +543,7 @@ TEST_P(Test_Caffe_nets, FasterRCNN_vgg16)
TEST_P(Test_Caffe_nets, FasterRCNN_zf)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
@ -547,6 +555,7 @@ TEST_P(Test_Caffe_nets, FasterRCNN_zf)
TEST_P(Test_Caffe_nets, RFCN)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_2GB));
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");

@ -78,6 +78,7 @@ TEST(Test_Darknet, read_yolo_voc)
TEST(Test_Darknet, read_yolo_voc_stream)
{
applyTestTag(CV_TEST_TAG_MEMORY_1GB);
Mat ref;
Mat sample = imread(_tf("dog416.png"));
Mat inp = blobFromImage(sample, 1.0/255, Size(416, 416), Scalar(), true, false);
@ -267,6 +268,8 @@ public:
TEST_P(Test_Darknet_nets, YoloVoc)
{
applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_MEMORY_1GB);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("Test is disabled");
@ -305,6 +308,8 @@ TEST_P(Test_Darknet_nets, YoloVoc)
TEST_P(Test_Darknet_nets, TinyYoloVoc)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
@ -339,6 +344,8 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc)
TEST_P(Test_Darknet_nets, YOLOv3)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB));
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)

@ -561,12 +561,6 @@ TEST_P(ReLU, Accuracy)
float negativeSlope = get<0>(GetParam());
Backend backendId = get<0>(get<1>(GetParam()));
Target targetId = get<1>(get<1>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE
&& negativeSlope < 0
)
throw SkipTestException("Test is disabled");
#endif
LayerParams lp;
lp.set("negative_slope", negativeSlope);
@ -589,13 +583,6 @@ TEST_P(NoParamActivation, Accuracy)
LayerParams lp;
lp.type = get<0>(GetParam());
lp.name = "testLayer";
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE
&& lp.type == "AbsVal"
)
throw SkipTestException("Test is disabled");
#endif
testInPlaceActivation(lp, backendId, targetId);
}
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, NoParamActivation, Combine(

@ -217,6 +217,7 @@ INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
class Test_ONNX_nets : public Test_ONNX_layers {};
TEST_P(Test_ONNX_nets, Alexnet)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
const String model = _tf("models/alexnet.onnx");
Net net = readNetFromONNX(model);
@ -270,31 +271,30 @@ TEST_P(Test_ONNX_nets, Googlenet)
TEST_P(Test_ONNX_nets, CaffeNet)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
testONNXModels("caffenet", pb);
}
TEST_P(Test_ONNX_nets, RCNN_ILSVRC13)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
// Reference output values are in range [-4.992, -1.161]
testONNXModels("rcnn_ilsvrc13", pb, 0.0045);
}
#ifdef OPENCV_32BIT_CONFIGURATION
TEST_P(Test_ONNX_nets, DISABLED_VGG16) // memory usage >2Gb
#else
TEST_P(Test_ONNX_nets, VGG16)
#endif
{
applyTestTag(CV_TEST_TAG_MEMORY_6GB); // > 2.3Gb
// output range: [-69; 72], after Softmax [0; 0.96]
testONNXModels("vgg16", pb, default_l1, default_lInf, true);
}
#ifdef OPENCV_32BIT_CONFIGURATION
TEST_P(Test_ONNX_nets, DISABLED_VGG16_bn) // memory usage >2Gb
#else
TEST_P(Test_ONNX_nets, VGG16_bn)
#endif
{
applyTestTag(CV_TEST_TAG_MEMORY_6GB); // > 2.3Gb
// output range: [-16; 27], after Softmax [0; 0.67]
const double lInf = (target == DNN_TARGET_MYRIAD) ? 0.038 : default_lInf;
testONNXModels("vgg16-bn", pb, default_l1, lInf, true);
@ -302,23 +302,30 @@ TEST_P(Test_ONNX_nets, VGG16_bn)
TEST_P(Test_ONNX_nets, ZFNet)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
testONNXModels("zfnet512", pb);
}
TEST_P(Test_ONNX_nets, ResNet18v1)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
// output range: [-16; 22], after Softmax [0, 0.51]
testONNXModels("resnet18v1", pb, default_l1, default_lInf, true);
}
TEST_P(Test_ONNX_nets, ResNet50v1)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
// output range: [-67; 75], after Softmax [0, 0.98]
testONNXModels("resnet50v1", pb, default_l1, default_lInf, true);
}
TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC)
{
applyTestTag(CV_TEST_TAG_VERYLONG);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("Test is disabled for DLIE targets");
@ -334,6 +341,8 @@ TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC)
TEST_P(Test_ONNX_nets, TinyYolov2)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
if (cvtest::skipUnstableTests)
throw SkipTestException("Skip unstable test");
#if defined(INF_ENGINE_RELEASE)
@ -347,6 +356,7 @@ TEST_P(Test_ONNX_nets, TinyYolov2)
)
throw SkipTestException("Test is disabled for MyriadX");
#endif
// output range: [-11; 8]
double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.017 : default_l1;
double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.14 : default_lInf;
@ -367,6 +377,7 @@ TEST_P(Test_ONNX_nets, MobileNet_v2)
TEST_P(Test_ONNX_nets, LResNet100E_IR)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL || target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
@ -379,7 +390,7 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)
lInf = 0.035;
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) {
l1 = 4.5e-5;
l1 = 4.6e-5;
lInf = 1.9e-4;
}
testONNXModels("LResNet100E_IR", pb, l1, lInf);
@ -419,6 +430,8 @@ TEST_P(Test_ONNX_nets, Inception_v2)
TEST_P(Test_ONNX_nets, DenseNet121)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
// output range: [-87; 138], after Softmax [0; 1]
testONNXModels("densenet121", pb, default_l1, default_lInf, true);
}

@ -140,10 +140,6 @@ TEST_P(Test_TensorFlow_layers, padding)
TEST_P(Test_TensorFlow_layers, padding_same)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("Test is disabled for DLIE");
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
@ -251,10 +247,6 @@ TEST_P(Test_TensorFlow_layers, reshape)
TEST_P(Test_TensorFlow_layers, flatten)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("Test is disabled for DLIE");
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
@ -267,11 +259,6 @@ TEST_P(Test_TensorFlow_layers, flatten)
TEST_P(Test_TensorFlow_layers, unfused_flatten)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("Test is disabled for DLIE");
#endif
runTensorFlowNet("unfused_flatten");
runTensorFlowNet("unfused_flatten_unknown_batch");
}
@ -320,11 +307,14 @@ class Test_TensorFlow_nets : public DNNTestLayer {};
TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
{
checkBackend();
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
)
throw SkipTestException("Test is disabled for MyriadX");
#endif
checkBackend();
std::string netPath = findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false);
std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt", false);
std::string imgPath = findDataFile("dnn/street.png", false);
@ -333,34 +323,24 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
resize(imread(imgPath), inp, Size(300, 300));
inp = blobFromImage(inp, 1.0f / 127.5, Size(), Scalar(127.5, 127.5, 127.5), true);
std::vector<String> outNames(3);
outNames[0] = "concat";
outNames[1] = "concat_1";
outNames[2] = "detection_out";
std::vector<Mat> refs(outNames.size());
for (int i = 0; i < outNames.size(); ++i)
{
std::string path = findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco." + outNames[i] + ".npy", false);
refs[i] = blobFromNPY(path);
}
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco.detection_out.npy", false));
Net net = readNetFromTensorflow(netPath, netConfig);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
net.setInput(inp);
Mat out = net.forward();
std::vector<Mat> output;
net.forward(output, outNames);
normAssert(refs[0].reshape(1, 1), output[0].reshape(1, 1), "", 1e-5, 1.5e-4);
normAssert(refs[1].reshape(1, 1), output[1].reshape(1, 1), "", 1e-5, 3e-4);
normAssertDetections(refs[2], output[2], "", 0.2);
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0043 : default_l1;
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.037 : default_lInf;
normAssertDetections(ref, out, "", 0.2, scoreDiff, iouDiff);
}
TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
@ -426,6 +406,7 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
TEST_P(Test_TensorFlow_nets, Faster_RCNN)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB)); // FIXIT split test
static std::string names[] = {"faster_rcnn_inception_v2_coco_2018_01_28",
"faster_rcnn_resnet50_coco_2018_01_28"};
@ -521,6 +502,8 @@ TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8)
// np.save('east_text_detection.geometry.npy', geometry)
TEST_P(Test_TensorFlow_nets, EAST_text_detection)
{
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad targets");
@ -597,10 +580,6 @@ TEST_P(Test_TensorFlow_layers, fp16_weights)
TEST_P(Test_TensorFlow_layers, fp16_padding_same)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
throw SkipTestException("Test is disabled for DLIE");
#endif
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
@ -695,6 +674,7 @@ TEST(Test_TensorFlow, two_inputs)
TEST(Test_TensorFlow, Mask_RCNN)
{
applyTestTag(CV_TEST_TAG_MEMORY_1GB);
std::string proto = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt", false);
std::string model = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pb", false);

@ -345,6 +345,7 @@ static void normAssertSegmentation(const Mat& ref, const Mat& test)
TEST_P(Test_Torch_nets, ENet_accuracy)
{
applyTestTag(target == DNN_TARGET_CPU ? "" : CV_TEST_TAG_MEMORY_512MB);
checkBackend();
if (backend == DNN_BACKEND_INFERENCE_ENGINE ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))

@ -18,6 +18,8 @@ endif()
add_subdirectory(bindings)
add_subdirectory(test)
if(NOT OPENCV_SKIP_PYTHON_LOADER)
include("./python_loader.cmake")
message(STATUS "OpenCV Python: during development append to PYTHONPATH: ${CMAKE_BINARY_DIR}/python_loader")

@ -0,0 +1,32 @@
set(MODULE_NAME "python_tests")
set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE)
ocv_add_module(${MODULE_NAME} INTERNAL)
set(OPENCV_PYTHON_TESTS_CONFIG_FILE_DIR "${OpenCV_BINARY_DIR}" CACHE INTERNAL "")
set(OPENCV_PYTHON_TESTS_CONFIG_FILE "${OPENCV_PYTHON_TESTS_CONFIG_FILE_DIR}/opencv_python_tests.cfg" CACHE INTERNAL "")
# get list of modules to wrap
set(OPENCV_PYTHON_MODULES)
foreach(m ${OPENCV_MODULES_BUILD})
if(";${OPENCV_MODULE_${m}_WRAPPERS};" MATCHES ";python.*;" AND HAVE_${m})
list(APPEND OPENCV_PYTHON_MODULES ${m})
#message(STATUS "\t${m}")
endif()
endforeach()
file(RELATIVE_PATH __loc_relative "${OPENCV_PYTHON_TESTS_CONFIG_FILE_DIR}" "${CMAKE_CURRENT_LIST_DIR}")
set(opencv_tests_locations "${__loc_relative}")
foreach(m ${OPENCV_PYTHON_MODULES})
set(__loc "${OPENCV_MODULE_${m}_LOCATION}/misc/python/test")
if(EXISTS "${__loc}")
file(RELATIVE_PATH __loc_relative "${OPENCV_PYTHON_TESTS_CONFIG_FILE_DIR}" "${__loc}")
list(APPEND opencv_tests_locations "${__loc_relative}")
endif()
endforeach(m)
string(REPLACE ";" "\n" opencv_tests_locations_ "${opencv_tests_locations}")
ocv_update_file("${OPENCV_PYTHON_TESTS_CONFIG_FILE}" "${opencv_tests_locations_}")
#
# TODO: Install rules (with test data?)
#

@ -1,4 +1,9 @@
#!/usr/bin/env python
'''
Location of tests:
- <opencv_src>/modules/python/test
- <opencv_src>/modules/<module>/misc/python/test/
'''
from __future__ import print_function
@ -20,7 +25,35 @@ from tests_common import NewOpenCVTests
basedir = os.path.abspath(os.path.dirname(__file__))
def load_tests(loader, tests, pattern):
tests.addTests(loader.discover(basedir, pattern=os.environ.get('OPENCV_PYTEST_FILTER', 'test_') + '*.py'))
cwd = os.getcwd()
config_file = 'opencv_python_tests.cfg'
locations = [cwd, basedir]
if os.path.exists(config_file):
with open(config_file, 'r') as f:
locations += [str(s).strip() for s in f.readlines()]
else:
print('WARNING: OpenCV tests config file ({}) is missing, running subset of tests'.format(config_file))
tests_pattern = os.environ.get('OPENCV_PYTEST_FILTER', 'test_') + '*.py'
if tests_pattern != 'test_*py':
print('Tests filter: {}'.format(tests_pattern))
processed = set()
for l in locations:
if not os.path.isabs(l):
l = os.path.normpath(os.path.join(cwd, l))
if l in processed:
continue
processed.add(l)
print('Discovering python tests from: {}'.format(l))
sys_path_modify = l not in sys.path
if sys_path_modify:
sys.path.append(l) # Hack python loader
discovered_tests = loader.discover(l, pattern=tests_pattern, top_level_dir=l)
print(' found {} tests'.format(discovered_tests.countTestCases()))
tests.addTests(loader.discover(l, pattern=tests_pattern))
if sys_path_modify:
sys.path.remove(l)
return tests
if __name__ == '__main__':

@ -51,6 +51,44 @@
# endif
#endif
// most part of OpenCV tests are fit into 200Mb limit, but some tests are not:
// Note: due memory fragmentation real limits are usually lower on 20-25% (400Mb memory usage goes into mem_1Gb class)
#define CV_TEST_TAG_MEMORY_512MB "mem_512mb" // used memory: 200..512Mb - enabled by default
#define CV_TEST_TAG_MEMORY_1GB "mem_1gb" // used memory: 512Mb..1Gb - enabled by default
#define CV_TEST_TAG_MEMORY_2GB "mem_2gb" // used memory: 1..2Gb - enabled by default on 64-bit configuration (32-bit - disabled)
#define CV_TEST_TAG_MEMORY_6GB "mem_6gb" // used memory: 2..6Gb - disabled by default
#define CV_TEST_TAG_MEMORY_14GB "mem_14gb" // used memory: 6..14Gb - disabled by default
// Large / huge video streams or complex workloads
#define CV_TEST_TAG_LONG "long" // 5+ seconds on modern desktop machine (single thread)
#define CV_TEST_TAG_VERYLONG "verylong" // 20+ seconds on modern desktop machine (single thread)
// Large / huge video streams or complex workloads for debug builds
#define CV_TEST_TAG_DEBUG_LONG "debug_long" // 10+ seconds on modern desktop machine (single thread)
#define CV_TEST_TAG_DEBUG_VERYLONG "debug_verylong" // 40+ seconds on modern desktop machine (single thread)
// Lets skip processing of high resolution images via instrumentation tools (valgrind/coverage/sanitizers).
// It is enough to run lower resolution (VGA: 640x480) tests.
#define CV_TEST_TAG_SIZE_HD "size_hd" // 720p+, enabled
#define CV_TEST_TAG_SIZE_FULLHD "size_fullhd" // 1080p+, enabled (disable these tests for valgrind/coverage run)
#define CV_TEST_TAG_SIZE_4K "size_4k" // 2160p+, enabled (disable these tests for valgrind/coverage run)
// Other misc test tags
#define CV_TEST_TAG_TYPE_64F "type_64f" // CV_64F, enabled (disable these tests on low power embedded devices)
// Kernel-based image processing
#define CV_TEST_TAG_FILTER_SMALL "filter_small" // Filtering with kernels <= 3x3
#define CV_TEST_TAG_FILTER_MEDIUM "filter_medium" // Filtering with kernels: 3x3 < kernel <= 5x5
#define CV_TEST_TAG_FILTER_LARGE "filter_large" // Filtering with kernels: 5x5 < kernel <= 9x9
#define CV_TEST_TAG_FILTER_HUGE "filter_huge" // Filtering with kernels: > 9x9
// Other tests categories
#define CV_TEST_TAG_OPENCL "opencl" // Tests with OpenCL
#ifdef WINRT
#pragma warning(disable:4447) // Disable warning 'main' signature found without threading model
#endif
@ -150,6 +188,30 @@ public:
SkipTestException(const cv::String& message) : dummy(0) { this->msg = message; }
};
/** Apply tag to the current test
Automatically apply corresponding additional tags (for example, 4K => FHD => HD => VGA).
If tag is in skip list, then SkipTestException is thrown
*/
void applyTestTag(const std::string& tag);
/** Run postponed checks of applied test tags
If tag is in skip list, then SkipTestException is thrown
*/
void checkTestTags();
void applyTestTag_(const std::string& tag);
static inline void applyTestTag(const std::string& tag1, const std::string& tag2)
{ applyTestTag_(tag1); applyTestTag_(tag2); checkTestTags(); }
static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3)
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); checkTestTags(); }
static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4)
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); applyTestTag_(tag4); checkTestTags(); }
class TS;
int64 readSeed(const char* str);

@ -13,6 +13,9 @@ void checkIppStatus();
extern bool skipUnstableTests;
extern bool runBigDataTests;
extern int testThreads;
void testSetUp();
void testTearDown();
}
// check for required "opencv_test" namespace
@ -24,13 +27,8 @@ extern int testThreads;
#define CV__TEST_INIT \
CV__TEST_NAMESPACE_CHECK \
fflush(stdout); fflush(stderr); \
cv::ipp::setIppStatus(0); \
cv::theRNG().state = cvtest::param_seed; \
cv::setNumThreads(cvtest::testThreads);
#define CV__TEST_CLEANUP \
fflush(stdout); fflush(stderr); \
::cvtest::checkIppStatus();
::cvtest::testSetUp();
#define CV__TEST_CLEANUP ::cvtest::testTearDown();
#define CV__TEST_BODY_IMPL(name) \
{ \
CV__TRACE_APP_FUNCTION_NAME(name); \

@ -527,7 +527,15 @@ void PrintTo(const Size& sz, ::std::ostream* os);
{ \
CV__TEST_NAMESPACE_CHECK \
CV__TRACE_APP_FUNCTION_NAME("PERF_TEST: " name); \
try { \
::cvtest::testSetUp(); \
RunPerfTestBody(); \
} \
catch (cvtest::SkipTestException& e) \
{ \
printf("[ SKIP ] %s\n", e.what()); \
} \
::cvtest::testTearDown(); \
}
#define PERF_PROXY_NAMESPACE_NAME_(test_case_name, test_name) \

@ -51,6 +51,7 @@ class TestInfo(object):
self.parseLongMetric(xmlnode, "stddev");
self.parseFloatMetric(xmlnode, "gstddev");
self.parseFloatMetric(xmlnode, "time");
self.parseLongMetric(xmlnode, "total_memory_usage");
def parseLongMetric(self, xmlnode, name, default = 0):
if name in self.properties:

@ -1,4 +1,6 @@
#include "opencv2/ts.hpp"
#include <opencv2/core/utils/logger.hpp>
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#ifdef GTEST_LINKED_AS_SHARED_LIBRARY

@ -91,11 +91,31 @@
#include "opencv2/core/opencl/opencl_info.hpp"
#include "opencv2/core/utils/allocator_stats.hpp"
namespace cv { namespace ocl {
cv::utils::AllocatorStatisticsInterface& getOpenCLAllocatorStatistics();
}}
#endif // HAVE_OPENCL
#include "opencv2/core/utility.hpp"
#include "opencv2/core/utils/allocator_stats.hpp"
namespace cv {
CV_EXPORTS cv::utils::AllocatorStatisticsInterface& getAllocatorStatistics();
}
#include "opencv_tests_config.hpp"
#include "ts_tags.hpp"
#if defined(__GNUC__) && defined(__linux__)
extern "C" {
size_t malloc_peak(void) __attribute__((weak));
void malloc_reset_peak(void) __attribute__((weak));
} // extern "C"
#else // stubs
static size_t (*malloc_peak)(void) = 0;
static void (*malloc_reset_peak)(void) = 0;
#endif
namespace opencv_test {
bool required_opencv_test_namespace = false; // compilation check for non-refactored tests
}
@ -726,6 +746,85 @@ bool skipUnstableTests = false;
bool runBigDataTests = false;
int testThreads = 0;
static size_t memory_usage_base = 0;
static uint64_t memory_usage_base_opencv = 0;
#ifdef HAVE_OPENCL
static uint64_t memory_usage_base_opencl = 0;
#endif
void testSetUp()
{
cv::ipp::setIppStatus(0);
cv::theRNG().state = cvtest::param_seed;
cv::setNumThreads(cvtest::testThreads);
if (malloc_peak) // if memory profiler is available
{
malloc_reset_peak();
memory_usage_base = malloc_peak(); // equal to malloc_current()
}
{
cv::utils::AllocatorStatisticsInterface& ocv_stats = cv::getAllocatorStatistics();
ocv_stats.resetPeakUsage();
memory_usage_base_opencv = ocv_stats.getCurrentUsage();
}
#ifdef HAVE_OPENCL
{
cv::utils::AllocatorStatisticsInterface& ocl_stats = cv::ocl::getOpenCLAllocatorStatistics();
ocl_stats.resetPeakUsage();
memory_usage_base_opencl = ocl_stats.getCurrentUsage();
}
#endif
checkTestTags();
}
void testTearDown()
{
::cvtest::checkIppStatus();
uint64_t memory_usage = 0;
uint64_t ocv_memory_usage = 0, ocv_peak = 0;
if (malloc_peak) // if memory profiler is available
{
size_t peak = malloc_peak();
memory_usage = peak - memory_usage_base;
CV_LOG_INFO(NULL, "Memory_usage (malloc): " << memory_usage << " (base=" << memory_usage_base << ")");
}
{
// core/src/alloc.cpp: #define OPENCV_ALLOC_ENABLE_STATISTICS
// handle large buffers via fastAlloc()
// (not always accurate on heavy 3rdparty usage, like protobuf)
cv::utils::AllocatorStatisticsInterface& ocv_stats = cv::getAllocatorStatistics();
ocv_peak = ocv_stats.getPeakUsage();
ocv_memory_usage = ocv_peak - memory_usage_base_opencv;
CV_LOG_INFO(NULL, "Memory_usage (OpenCV): " << ocv_memory_usage << " (base=" << memory_usage_base_opencv << " current=" << ocv_stats.getCurrentUsage() << ")");
if (memory_usage == 0) // external profiler has higher priority (and accuracy)
memory_usage = ocv_memory_usage;
}
#ifdef HAVE_OPENCL
uint64_t ocl_memory_usage = 0, ocl_peak = 0;
{
cv::utils::AllocatorStatisticsInterface& ocl_stats = cv::ocl::getOpenCLAllocatorStatistics();
ocl_peak = ocl_stats.getPeakUsage();
ocl_memory_usage = ocl_peak - memory_usage_base_opencl;
CV_LOG_INFO(NULL, "Memory_usage (OpenCL): " << ocl_memory_usage << " (base=" << memory_usage_base_opencl << " current=" << ocl_stats.getCurrentUsage() << ")");
::testing::Test::RecordProperty("ocl_memory_usage",
cv::format("%llu", (unsigned long long)ocl_memory_usage));
}
#else
uint64_t ocl_memory_usage = 0;
#endif
if (malloc_peak // external memory profiler is available
|| ocv_peak > 0 // or enabled OpenCV builtin allocation statistics
)
{
CV_LOG_INFO(NULL, "Memory usage total: " << (memory_usage + ocl_memory_usage));
::testing::Test::RecordProperty("memory_usage",
cv::format("%llu", (unsigned long long)memory_usage));
::testing::Test::RecordProperty("total_memory_usage",
cv::format("%llu", (unsigned long long)(memory_usage + ocl_memory_usage)));
}
}
void parseCustomOptions(int argc, char **argv)
{
const char * const command_line_keys =
@ -735,7 +834,9 @@ void parseCustomOptions(int argc, char **argv)
"{ skip_unstable |false |skip unstable tests }"
"{ test_bigdata |false |run BigData tests (>=2Gb) }"
"{ test_require_data |false |fail on missing non-required test data instead of skip}"
"{ h help |false |print help info }";
CV_TEST_TAGS_PARAMS
"{ h help |false |print help info }"
;
cv::CommandLineParser parser(argc, argv, command_line_keys);
if (parser.get<bool>("help"))
@ -759,8 +860,9 @@ void parseCustomOptions(int argc, char **argv)
skipUnstableTests = parser.get<bool>("skip_unstable");
runBigDataTests = parser.get<bool>("test_bigdata");
checkTestData = parser.get<bool>("test_require_data");
}
activateTestTags(parser);
}
static bool isDirectory(const std::string& path)
{

@ -1,5 +1,7 @@
#include "precomp.hpp"
#include "ts_tags.hpp"
#include <map>
#include <iostream>
#include <fstream>
@ -999,6 +1001,8 @@ void TestBase::Init(const std::vector<std::string> & availableImpls,
"{ perf_cuda_info_only |false |print an information about system and an available CUDA devices and then exit.}"
#endif
"{ skip_unstable |false |skip unstable tests }"
CV_TEST_TAGS_PARAMS
;
cv::CommandLineParser args(argc, argv, command_line_keys);
@ -1145,6 +1149,8 @@ void TestBase::Init(const std::vector<std::string> & availableImpls,
::testing::AddGlobalTestEnvironment(new PerfValidationEnvironment());
}
activateTestTags(args);
if (!args.check())
{
args.printErrors();
@ -1869,14 +1875,15 @@ void TestBase::SetUp()
currentIter = (unsigned int)-1;
timeLimit = timeLimitDefault;
times.clear();
metrics.terminationReason = performance_metrics::TERM_SKIP_TEST;
}
void TestBase::TearDown()
{
if (metrics.terminationReason == performance_metrics::TERM_SKIP_TEST)
{
LOGI("\tTest was skipped");
GTEST_SUCCEED() << "Test was skipped";
//LOGI("\tTest was skipped");
//GTEST_SUCCEED() << "Test was skipped";
}
else
{
@ -1975,6 +1982,7 @@ std::string TestBase::getDataPath(const std::string& relativePath)
void TestBase::RunPerfTestBody()
{
metrics.clear();
try
{
#ifdef CV_COLLECT_IMPL_DATA
@ -1990,7 +1998,7 @@ void TestBase::RunPerfTestBody()
catch(const SkipTestException&)
{
metrics.terminationReason = performance_metrics::TERM_SKIP_TEST;
return;
throw;
}
catch(const PerfSkipTestException&)
{

@ -0,0 +1,471 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "precomp.hpp"
#include "ts_tags.hpp"
namespace cvtest {
static bool printTestTag = false;
static std::vector<std::string> currentDirectTestTags, currentImpliedTestTags;
static std::vector<const ::testing::TestInfo*> skipped_tests;
static std::vector<std::string>& getTestTagsSkipList()
{
static std::vector<std::string> testSkipWithTags;
static bool initialized = false;
if (!initialized)
{
#if OPENCV_32BIT_CONFIGURATION
testSkipWithTags.push_back(CV_TEST_TAG_MEMORY_2GB);
#else
testSkipWithTags.push_back(CV_TEST_TAG_MEMORY_6GB);
#endif
testSkipWithTags.push_back(CV_TEST_TAG_VERYLONG);
#if defined(_DEBUG)
testSkipWithTags.push_back(CV_TEST_TAG_DEBUG_VERYLONG);
#endif
initialized = true;
}
return testSkipWithTags;
}
static std::vector<std::string>& getTestTagsForceList()
{
static std::vector<std::string> getTestTagsForceList;
return getTestTagsForceList;
}
static std::vector<std::string>& getTestTagsRequiredList()
{
static std::vector<std::string> getTestTagsRequiredList;
return getTestTagsRequiredList;
}
class TestTagsListener: public ::testing::EmptyTestEventListener
{
public:
void OnTestProgramStart(const ::testing::UnitTest& /*unit_test*/) CV_OVERRIDE
{
{
const std::vector<std::string>& tags = getTestTagsRequiredList();
std::ostringstream os, os_direct;
for (size_t i = 0; i < tags.size(); i++)
{
os << (i == 0 ? "'" : ", '") << tags[i] << "'";
os_direct << (i == 0 ? "" : ",") << tags[i];
}
std::string tags_str = os.str();
if (!tags.empty())
std::cout << "TEST: Run tests with tags: " << tags_str << std::endl;
::testing::Test::RecordProperty("test_tags", os_direct.str());
}
{
const std::vector<std::string>& tags = getTestTagsSkipList();
std::ostringstream os, os_direct;
for (size_t i = 0; i < tags.size(); i++)
{
os << (i == 0 ? "'" : ", '") << tags[i] << "'";
os_direct << (i == 0 ? "" : ",") << tags[i];
}
std::string tags_str = os.str();
if (!tags.empty())
std::cout << "TEST: Skip tests with tags: " << tags_str << std::endl;
::testing::Test::RecordProperty("test_tags_skip", os_direct.str());
}
{
const std::vector<std::string>& tags = getTestTagsForceList();
std::ostringstream os, os_direct;
for (size_t i = 0; i < tags.size(); i++)
{
os << (i == 0 ? "'" : ", '") << tags[i] << "'";
os_direct << (i == 0 ? "" : ",") << tags[i];
}
std::string tags_str = os.str();
if (!tags.empty())
std::cout << "TEST: Force tests with tags: " << tags_str << std::endl;
::testing::Test::RecordProperty("test_tags_force", os_direct.str());
}
}
void OnTestStart(const ::testing::TestInfo& test_info) CV_OVERRIDE
{
currentDirectTestTags.clear();
currentImpliedTestTags.clear();
const char* value_param_ = test_info.value_param();
if (value_param_)
{
std::string value_param(value_param_);
if (value_param.find("CV_64F") != std::string::npos
|| (value_param.find("64F") != std::string::npos
&& value_param.find(" 64F") != std::string::npos
&& value_param.find(",64F") != std::string::npos
&& value_param.find("(64F") != std::string::npos
)
)
applyTestTag_(CV_TEST_TAG_TYPE_64F);
if (value_param.find("1280x720") != std::string::npos)
applyTestTag_(CV_TEST_TAG_SIZE_HD);
if (value_param.find("1920x1080") != std::string::npos)
applyTestTag_(CV_TEST_TAG_SIZE_FULLHD);
if (value_param.find("3840x2160") != std::string::npos)
applyTestTag_(CV_TEST_TAG_SIZE_4K);
}
}
void OnTestEnd(const ::testing::TestInfo& /*test_info*/) CV_OVERRIDE
{
if (currentDirectTestTags.empty() && currentImpliedTestTags.empty())
{
if (printTestTag) std::cout << "[ TAGS ] No tags" << std::endl;
return;
}
std::ostringstream os;
std::ostringstream os_direct;
std::ostringstream os_implied;
{
const std::vector<std::string>& tags = currentDirectTestTags;
for (size_t i = 0; i < tags.size(); i++)
{
os << (i == 0 ? "" : ", ") << tags[i];
os_direct << (i == 0 ? "" : ",") << tags[i];
}
}
if (!currentImpliedTestTags.empty())
{
os << " (implied tags: ";
const std::vector<std::string>& tags = currentImpliedTestTags;
for (size_t i = 0; i < tags.size(); i++)
{
os << (i == 0 ? "" : ", ") << tags[i];
os_implied << (i == 0 ? "" : ",") << tags[i];
}
os << ")";
}
if (printTestTag) std::cout << "[ TAGS ] " << os.str() << std::endl;
::testing::Test::RecordProperty("tags", os_direct.str());
::testing::Test::RecordProperty("tags_implied", os_implied.str());
}
void OnTestIterationEnd(const ::testing::UnitTest& /*unit_test*/, int /*iteration*/) CV_OVERRIDE
{
if (!skipped_tests.empty())
{
std::cout << "[ SKIP ] " << skipped_tests.size() << " tests via tags" << std::endl;
}
skipped_tests.clear();
}
void OnTestProgramEnd(const ::testing::UnitTest& /*unit_test*/) CV_OVERRIDE
{
/*if (!skipped_tests.empty())
{
for (size_t i = 0; i < skipped_tests.size(); i++)
{
const ::testing::TestInfo* test_info = skipped_tests[i];
if (!test_info) continue;
std::cout << "- " << test_info->test_case_name() << "." << test_info->name() << std::endl;
}
}*/
}
};
static bool isTestTagForced(const std::string& testTag)
{
const std::vector<std::string>& forceTags = getTestTagsForceList();
for (size_t i = 0; i < forceTags.size(); ++i)
{
const std::string& forceTag = forceTags[i];
if (testTag == forceTag
|| (testTag.size() >= forceTag.size()
&& forceTag[forceTag.size() - 1] == '*'
&& forceTag.substr(0, forceTag.size() - 1) == testTag.substr(0, forceTag.size() - 1)
)
)
{
return true;
}
}
return false;
}
static bool isTestTagSkipped(const std::string& testTag, CV_OUT std::string& skippedByTag)
{
skippedByTag.clear();
const std::vector<std::string>& skipTags = getTestTagsSkipList();
for (size_t i = 0; i < skipTags.size(); ++i)
{
const std::string& skipTag = skipTags[i];
if (testTag == skipTag
|| (testTag.size() >= skipTag.size()
&& skipTag[skipTag.size() - 1] == '*'
&& skipTag.substr(0, skipTag.size() - 1) == testTag.substr(0, skipTag.size() - 1)
)
)
{
skippedByTag = skipTag;
return true;
}
}
return false;
}
void checkTestTags()
{
std::string skipTag;
const std::vector<std::string>& testTags = currentDirectTestTags;
{
const std::vector<std::string>& tags = getTestTagsRequiredList();
if (!tags.empty())
{
size_t found = 0;
for (size_t i = 0; i < tags.size(); ++i)
{
const std::string& tag = tags[i];
for (size_t j = 0; j < testTags.size(); ++j)
{
const std::string& testTag = testTags[i];
if (testTag == tag
|| (testTag.size() >= tag.size()
&& tag[tag.size() - 1] == '*'
&& tag.substr(0, tag.size() - 1) == testTag.substr(0, tag.size() - 1)
)
)
{
found++;
break;
}
}
}
if (found != tags.size())
{
skipped_tests.push_back(::testing::UnitTest::GetInstance()->current_test_info());
throw SkipTestException("Test tags don't pass required tags list (--test_tag parameter)");
}
}
}
for (size_t i = 0; i < testTags.size(); ++i)
{
const std::string& testTag = testTags[i];
if (isTestTagForced(testTag))
return;
}
for (size_t i = 0; i < testTags.size(); ++i)
{
const std::string& testTag = testTags[i];
if (isTestTagSkipped(testTag, skipTag))
{
skipped_tests.push_back(::testing::UnitTest::GetInstance()->current_test_info());
throw SkipTestException("Test with tag '" + testTag + "' is skipped ('" + skipTag + "' is in skip list)");
}
}
const std::vector<std::string>& testTagsImplied = currentImpliedTestTags;
for (size_t i = 0; i < testTagsImplied.size(); ++i)
{
const std::string& testTag = testTagsImplied[i];
if (isTestTagSkipped(testTag, skipTag))
{
skipped_tests.push_back(::testing::UnitTest::GetInstance()->current_test_info());
throw SkipTestException("Test with tag '" + testTag + "' is skipped ('" + skipTag + "' is in skip list)");
}
}
}
static bool applyTestTagImpl(const std::string& tag, bool direct = false)
{
CV_Assert(!tag.empty());
std::vector<std::string>& testTags = direct ? currentDirectTestTags : currentImpliedTestTags;
for (size_t i = 0; i < testTags.size(); ++i)
{
const std::string& testTag = testTags[i];
if (tag == testTag)
{
return false; // already exists, skip
}
}
testTags.push_back(tag);
// Tags implies logic
if (tag == CV_TEST_TAG_MEMORY_14GB)
applyTestTagImpl(CV_TEST_TAG_MEMORY_6GB);
if (tag == CV_TEST_TAG_MEMORY_6GB)
applyTestTagImpl(CV_TEST_TAG_MEMORY_2GB);
if (tag == CV_TEST_TAG_MEMORY_2GB)
applyTestTagImpl(CV_TEST_TAG_MEMORY_1GB);
if (tag == CV_TEST_TAG_MEMORY_1GB)
applyTestTagImpl(CV_TEST_TAG_MEMORY_512MB);
if (tag == CV_TEST_TAG_VERYLONG)
{
applyTestTagImpl(CV_TEST_TAG_DEBUG_VERYLONG);
applyTestTagImpl(CV_TEST_TAG_LONG);
}
else if (tag == CV_TEST_TAG_DEBUG_VERYLONG)
{
applyTestTagImpl(CV_TEST_TAG_DEBUG_LONG);
}
else if (tag == CV_TEST_TAG_LONG)
{
applyTestTagImpl(CV_TEST_TAG_DEBUG_LONG);
}
if (tag == CV_TEST_TAG_SIZE_4K)
applyTestTagImpl(CV_TEST_TAG_SIZE_FULLHD);
if (tag == CV_TEST_TAG_SIZE_FULLHD)
applyTestTagImpl(CV_TEST_TAG_SIZE_HD);
return true;
}
void applyTestTag(const std::string& tag)
{
if (tag.empty()) return;
if (!applyTestTagImpl(tag, true))
return;
checkTestTags();
}
void applyTestTag_(const std::string& tag)
{
if (tag.empty()) return;
if (!applyTestTagImpl(tag, true))
return;
}
static std::vector<std::string> parseStringList(const std::string& s)
{
std::vector<std::string> result;
size_t start_pos = 0;
while (start_pos != std::string::npos)
{
while (start_pos < s.size() && s[start_pos] == ' ')
start_pos++;
const size_t pos_ = s.find(',', start_pos);
size_t pos = (pos_ == std::string::npos ? s.size() : pos_);
while (pos > start_pos && s[pos - 1] == ' ')
pos--;
if (pos > start_pos)
{
const std::string one_piece(s, start_pos, pos - start_pos);
result.push_back(one_piece);
}
start_pos = (pos_ == std::string::npos ? pos_ : pos_ + 1);
}
return result;
}
void activateTestTags(const cv::CommandLineParser& parser)
{
std::string test_tag_skip = parser.get<std::string>("test_tag_skip");
if (!test_tag_skip.empty())
{
const std::vector<std::string> tag_list = parseStringList(test_tag_skip);
if (!tag_list.empty())
{
std::vector<std::string>& skipTags = getTestTagsSkipList();
for (size_t k = 0; k < tag_list.size(); ++k)
{
const std::string& tag = tag_list[k];
bool found = false;
for (size_t i = 0; i < skipTags.size(); ++i)
{
if (tag == skipTags[i])
{
found = true;
break;
}
}
if (!found)
skipTags.push_back(tag);
}
}
}
std::string test_tag_enable = parser.get<std::string>("test_tag_enable");
if (!test_tag_enable.empty())
{
const std::vector<std::string> tag_list = parseStringList(test_tag_enable);
if (!tag_list.empty())
{
std::vector<std::string>& skipTags = getTestTagsSkipList();
for (size_t k = 0; k < tag_list.size(); ++k)
{
const std::string& tag = tag_list[k];
bool found = false;
for (size_t i = 0; i < skipTags.size(); ++i)
{
if (tag == skipTags[i])
{
skipTags.erase(skipTags.begin() + i);
found = true;
}
}
if (!found)
{
std::cerr << "Can't re-enable tag '" << tag << "' - it is not in the skip list" << std::endl;
}
}
}
}
std::string test_tag_force = parser.get<std::string>("test_tag_force");
if (!test_tag_force.empty())
{
const std::vector<std::string> tag_list = parseStringList(test_tag_force);
if (!tag_list.empty())
{
std::vector<std::string>& forceTags = getTestTagsForceList();
for (size_t k = 0; k < tag_list.size(); ++k)
{
const std::string& tag = tag_list[k];
bool found = false;
for (size_t i = 0; i < forceTags.size(); ++i)
{
if (tag == forceTags[i])
{
found = true;
break;
}
}
if (!found)
forceTags.push_back(tag);
}
}
}
std::string test_tag = parser.get<std::string>("test_tag");
if (!test_tag.empty())
{
const std::vector<std::string> tag_list = parseStringList(test_tag);
if (!tag_list.empty())
{
std::vector<std::string>& requiredTags = getTestTagsRequiredList();
for (size_t k = 0; k < tag_list.size(); ++k)
{
const std::string& tag = tag_list[k];
bool found = false;
for (size_t i = 0; i < requiredTags.size(); ++i)
{
if (tag == requiredTags[i])
{
found = true;
break;
}
}
if (!found)
requiredTags.push_back(tag);
}
}
}
printTestTag = parser.get<bool>("test_tag_print");
::testing::UnitTest::GetInstance()->listeners().Append(new TestTagsListener());
}
} // namespace

@ -0,0 +1,26 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_TS_SRC_TAGS_HPP
#define OPENCV_TS_SRC_TAGS_HPP
// [all | test_tag] - (test_tag_skip - test_tag_enable) + test_tag_force
#define CV_TEST_TAGS_PARAMS \
"{ test_tag | |run tests with specified 'tag' markers only (comma ',' separated list) }" \
"{ test_tag_skip | |skip tests with 'tag' markers (comma ',' separated list) }" \
"{ test_tag_enable | |don't skip tests with 'tag' markers (comma ',' separated list) }" \
"{ test_tag_force | |force running of tests with 'tag' markers (comma ',' separated list) }" \
"{ test_tag_print | false |print assigned tags for each test }" \
// TODO
// "{ test_tag_file | |read test tags assignment }" \
namespace cvtest {
void activateTestTags(const cv::CommandLineParser& parser);
} // namespace
#endif // OPENCV_TS_SRC_TAGS_HPP

@ -1664,8 +1664,13 @@ static AVStream *icv_add_video_stream_FFMPEG(AVFormatContext *oc,
#endif
#if LIBAVCODEC_BUILD >= CALC_FFMPEG_VERSION(52, 42, 0)
#if defined(_MSC_VER)
AVRational avg_frame_rate = {frame_rate, frame_rate_base};
st->avg_frame_rate = avg_frame_rate;
#else
st->avg_frame_rate = (AVRational){frame_rate, frame_rate_base};
#endif
#endif
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(55, 20, 0)
st->time_base = c->time_base;
#endif

@ -19,6 +19,13 @@
fun:_ZN2cv20allocSingletonBufferEm
}
{
OpenCV-SingletonNewBuffer
Memcheck:Leak
...
fun:_ZN2cv23allocSingletonNewBufferEm
}
{
OpenCV-getStdAllocator
Memcheck:Leak

Loading…
Cancel
Save