pull/11678/head
Kuang Fangjun 7 years ago
parent 40a61ec64e
commit 9ae28415ec
  1. 10
      CMakeLists.txt
  2. 12
      cmake/FindCUDA.cmake
  3. 2
      cmake/OpenCVCompilerOptions.cmake
  4. 2
      cmake/OpenCVFindLibsPerf.cmake
  5. 6
      cmake/OpenCVModule.cmake
  6. 2
      cmake/OpenCVPCHSupport.cmake
  7. 2
      doc/tutorials/viz/launching_viz/launching_viz.markdown
  8. 6
      modules/dnn/include/opencv2/dnn.hpp
  9. 16
      modules/dnn/include/opencv2/dnn/all_layers.hpp
  10. 16
      modules/dnn/include/opencv2/dnn/dnn.hpp
  11. 4
      modules/dnn/misc/quantize_face_detector.py
  12. 6
      modules/dnn/src/dnn.cpp
  13. 2
      modules/dnn/src/halide_scheduler.cpp
  14. 2
      modules/dnn/src/layers/convolution_layer.cpp
  15. 2
      modules/dnn/src/layers/detection_output_layer.cpp
  16. 2
      modules/dnn/src/layers/eltwise_layer.cpp
  17. 6
      modules/dnn/src/layers/prior_box_layer.cpp
  18. 2
      modules/dnn/src/ocl4dnn/src/ocl4dnn_conv_spatial.cpp
  19. 4
      modules/dnn/src/op_inf_engine.cpp
  20. 2
      modules/dnn/src/tensorflow/graph.proto
  21. 6
      modules/dnn/src/torch/torch_importer.cpp
  22. 4
      modules/dnn/test/test_darknet_importer.cpp
  23. 2
      modules/dnn/test/test_torch_importer.cpp
  24. 2
      modules/viz/CMakeLists.txt
  25. 2
      modules/viz/include/opencv2/viz/widgets.hpp

@ -32,7 +32,7 @@ endif()
option(ENABLE_PIC "Generate position independent code (necessary for shared libraries)" TRUE)
set(CMAKE_POSITION_INDEPENDENT_CODE ${ENABLE_PIC})
# Following block can break build in case of cross-compilng
# Following block can break build in case of cross-compiling
# but CMAKE_CROSSCOMPILING variable will be set only on project(OpenCV) command
# so we will try to detect cross-compiling by the presence of CMAKE_TOOLCHAIN_FILE
if(NOT DEFINED CMAKE_INSTALL_PREFIX)
@ -43,17 +43,17 @@ if(NOT DEFINED CMAKE_INSTALL_PREFIX)
else()
set(CMAKE_INSTALL_PREFIX "/usr/local" CACHE PATH "Installation Directory")
endif()
else(NOT CMAKE_TOOLCHAIN_FILE)
else()
#Android: set output folder to ${CMAKE_BINARY_DIR}
set( LIBRARY_OUTPUT_PATH_ROOT ${CMAKE_BINARY_DIR} CACHE PATH "root for library output, set this to change where android libs are compiled to" )
set(LIBRARY_OUTPUT_PATH_ROOT ${CMAKE_BINARY_DIR} CACHE PATH "root for library output, set this to change where android libs are compiled to" )
# any cross-compiling
set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/install" CACHE PATH "Installation Directory")
endif(NOT CMAKE_TOOLCHAIN_FILE)
endif()
endif()
if(CMAKE_SYSTEM_NAME MATCHES WindowsPhone OR CMAKE_SYSTEM_NAME MATCHES WindowsStore)
set(WINRT TRUE)
endif(CMAKE_SYSTEM_NAME MATCHES WindowsPhone OR CMAKE_SYSTEM_NAME MATCHES WindowsStore)
endif()
if(WINRT)
add_definitions(-DWINRT -DNO_GETENV)

@ -1042,7 +1042,7 @@ function(CUDA_COMPUTE_BUILD_PATH path build_path)
# Only deal with CMake style paths from here on out
file(TO_CMAKE_PATH "${path}" bpath)
if (IS_ABSOLUTE "${bpath}")
# Absolute paths are generally unnessary, especially if something like
# Absolute paths are generally unnecessary, especially if something like
# file(GLOB_RECURSE) is used to pick up the files.
string(FIND "${bpath}" "${CMAKE_CURRENT_BINARY_DIR}" _binary_dir_pos)
@ -1065,7 +1065,7 @@ function(CUDA_COMPUTE_BUILD_PATH path build_path)
# Avoid spaces
string(REPLACE " " "_" bpath "${bpath}")
# Strip off the filename. I wait until here to do it, since removin the
# Strip off the filename. I wait until here to do it, since removing the
# basename can make a path that looked like path/../basename turn into
# path/.. (notice the trailing slash).
get_filename_component(bpath "${bpath}" PATH)
@ -1362,7 +1362,7 @@ macro(CUDA_WRAP_SRCS cuda_target format generated_files)
# Bring in the dependencies. Creates a variable CUDA_NVCC_DEPEND #######
cuda_include_nvcc_dependencies(${cmake_dependency_file})
# Convience string for output ###########################################
# Convenience string for output ###########################################
if(CUDA_BUILD_EMULATION)
set(cuda_build_type "Emulation")
else()
@ -1563,7 +1563,7 @@ macro(CUDA_ADD_LIBRARY cuda_target)
${_cmake_options} ${_cuda_shared_flag}
OPTIONS ${_options} )
# Compute the file name of the intermedate link file used for separable
# Compute the file name of the intermediate link file used for separable
# compilation.
CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME(link_file ${cuda_target} "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
@ -1607,7 +1607,7 @@ macro(CUDA_ADD_EXECUTABLE cuda_target)
# Create custom commands and targets for each file.
CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} OPTIONS ${_options} )
# Compute the file name of the intermedate link file used for separable
# Compute the file name of the intermediate link file used for separable
# compilation.
CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME(link_file ${cuda_target} "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}")
@ -1723,7 +1723,7 @@ endmacro()
###############################################################################
###############################################################################
macro(CUDA_BUILD_CLEAN_TARGET)
# Call this after you add all your CUDA targets, and you will get a convience
# Call this after you add all your CUDA targets, and you will get a convenience
# target. You should also make clean after running this target to get the
# build system to generate all the code again.

@ -1,5 +1,5 @@
if("${CMAKE_CXX_COMPILER};${CMAKE_C_COMPILER};${CMAKE_CXX_COMPILER_LAUNCHER}" MATCHES "ccache")
set(CMAKE_COMPILER_IS_CCACHE 1) # FIXIT Avoid setting of CMAKE_ variables
set(CMAKE_COMPILER_IS_CCACHE 1) # TODO: FIXIT Avoid setting of CMAKE_ variables
set(OPENCV_COMPILER_IS_CCACHE 1)
endif()
function(access_CMAKE_COMPILER_IS_CCACHE)

@ -43,7 +43,7 @@ endif(WITH_IPP_A)
if(WITH_CUDA)
include("${OpenCV_SOURCE_DIR}/cmake/OpenCVDetectCUDA.cmake")
if(NOT HAVE_CUDA)
message(WARNING "OpenCV is not able to find/confidure CUDA SDK (required by WITH_CUDA).
message(WARNING "OpenCV is not able to find/configure CUDA SDK (required by WITH_CUDA).
CUDA support will be disabled in OpenCV build.
To eliminate this warning remove WITH_CUDA=ON CMake configuration option.
")

@ -455,7 +455,7 @@ function(__ocv_sort_modules_by_deps __lst)
set(${__lst} "${result};${result_extra}" PARENT_SCOPE)
endfunction()
# resolve dependensies
# resolve dependencies
function(__ocv_resolve_dependencies)
foreach(m ${OPENCV_MODULES_DISABLED_USER})
set(HAVE_${m} OFF CACHE INTERNAL "Module ${m} will not be built in current configuration")
@ -727,7 +727,7 @@ macro(ocv_set_module_sources)
endif()
endforeach()
# the hacky way to embeed any files into the OpenCV without modification of its build system
# the hacky way to embed any files into the OpenCV without modification of its build system
if(COMMAND ocv_get_module_external_sources)
ocv_get_module_external_sources()
endif()
@ -958,7 +958,7 @@ macro(_ocv_create_module)
target_compile_definitions(${the_module} PRIVATE CVAPI_EXPORTS)
endif()
# For dynamic link numbering convenions
# For dynamic link numbering conventions
if(NOT ANDROID)
# Android SDK build scripts can include only .so files into final .apk
# As result we should not set version properties for Android

@ -383,7 +383,7 @@ MACRO(ADD_NATIVE_PRECOMPILED_HEADER _targetName _input)
# For Xcode, cmake needs my patch to process
# GCC_PREFIX_HEADER and GCC_PRECOMPILE_PREFIX_HEADER as target properties
# When buiding out of the tree, precompiled may not be located
# When building out of the tree, precompiled may not be located
# Use full path instead.
GET_FILENAME_COMPONENT(fullPath ${_input} ABSOLUTE)

@ -37,7 +37,7 @@ Here is the general structure of the program:
the same with **myWindow**. If the name does not exist, a new window is created.
@code{.cpp}
/// Access window via its name
viz::Viz3d sameWindow = viz::get("Viz Demo");
viz::Viz3d sameWindow = viz::getWindowByName("Viz Demo");
@endcode
- Start a controlled event loop. Once it starts, **wasStopped** is set to false. Inside the while
loop, in each iteration, **spinOnce** is called to prevent event loop from completely stopping.

@ -42,7 +42,7 @@
#ifndef OPENCV_DNN_HPP
#define OPENCV_DNN_HPP
// This is an umbrealla header to include into you project.
// This is an umbrella header to include into you project.
// We are free to change headers layout in dnn subfolder, so please include
// this header for future compatibility
@ -52,10 +52,10 @@
This module contains:
- API for new layers creation, layers are building bricks of neural networks;
- set of built-in most-useful Layers;
- API to constuct and modify comprehensive neural networks from layers;
- API to construct and modify comprehensive neural networks from layers;
- functionality for loading serialized networks models from different frameworks.
Functionality of this module is designed only for forward pass computations (i. e. network testing).
Functionality of this module is designed only for forward pass computations (i.e. network testing).
A network training is in principle not supported.
@}
*/

@ -58,7 +58,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
You can use both API, but factory API is less convenient for native C++ programming and basically designed for use inside importers (see @ref readNetFromCaffe(), @ref readNetFromTorch(), @ref readNetFromTensorflow()).
Built-in layers partially reproduce functionality of corresponding Caffe and Torch7 layers.
In partuclar, the following layers and Caffe importer were tested to reproduce <a href="http://caffe.berkeleyvision.org/tutorial/layers.html">Caffe</a> functionality:
In particular, the following layers and Caffe importer were tested to reproduce <a href="http://caffe.berkeleyvision.org/tutorial/layers.html">Caffe</a> functionality:
- Convolution
- Deconvolution
- Pooling
@ -108,13 +108,13 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
@f$W_{x?} \in R^{N_h \times N_x}@f$, @f$W_{h?} \in R^{N_h \times N_h}@f$, @f$b_? \in R^{N_h}@f$.
For simplicity and performance purposes we use @f$ W_x = [W_{xi}; W_{xf}; W_{xo}, W_{xg}] @f$
(i.e. @f$W_x@f$ is vertical contacentaion of @f$ W_{x?} @f$), @f$ W_x \in R^{4N_h \times N_x} @f$.
(i.e. @f$W_x@f$ is vertical concatenation of @f$ W_{x?} @f$), @f$ W_x \in R^{4N_h \times N_x} @f$.
The same for @f$ W_h = [W_{hi}; W_{hf}; W_{ho}, W_{hg}], W_h \in R^{4N_h \times N_h} @f$
and for @f$ b = [b_i; b_f, b_o, b_g]@f$, @f$b \in R^{4N_h} @f$.
@param Wh is matrix defining how previous output is transformed to internal gates (i.e. according to abovemtioned notation is @f$ W_h @f$)
@param Wx is matrix defining how current input is transformed to internal gates (i.e. according to abovemtioned notation is @f$ W_x @f$)
@param b is bias vector (i.e. according to abovemtioned notation is @f$ b @f$)
@param Wh is matrix defining how previous output is transformed to internal gates (i.e. according to above mentioned notation is @f$ W_h @f$)
@param Wx is matrix defining how current input is transformed to internal gates (i.e. according to above mentioned notation is @f$ W_x @f$)
@param b is bias vector (i.e. according to above mentioned notation is @f$ b @f$)
*/
CV_DEPRECATED virtual void setWeights(const Mat &Wh, const Mat &Wx, const Mat &b) = 0;
@ -148,7 +148,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
* If setUseTimstampsDim() is set to true then @p input[0] should has at least two dimensions with the following shape: [`T`, `N`, `[data dims]`],
* where `T` specifies number of timestamps, `N` is number of independent streams (i.e. @f$ x_{t_0 + t}^{stream} @f$ is stored inside @p input[0][t, stream, ...]).
*
* If setUseTimstampsDim() is set to fase then @p input[0] should contain single timestamp, its shape should has form [`N`, `[data dims]`] with at least one dimension.
* If setUseTimstampsDim() is set to false then @p input[0] should contain single timestamp, its shape should has form [`N`, `[data dims]`] with at least one dimension.
* (i.e. @f$ x_{t}^{stream} @f$ is stored inside @p input[0][stream, ...]).
*/
@ -550,7 +550,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
* dst(x, y, c) = \frac{ src(x, y, c) }{norm(c)}
* @f]
*
* Where `x, y` - spatial cooridnates, `c` - channel.
* Where `x, y` - spatial coordinates, `c` - channel.
*
* An every sample in the batch is normalized separately. Optionally,
* output is scaled by the trained parameters.
@ -565,7 +565,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
};
/**
* @brief Resize input 4-dimensional blob by nearest neghbor strategy.
* @brief Resize input 4-dimensional blob by nearest neighbor strategy.
*
* Layer is used to support TensorFlow's resize_nearest_neighbor op.
*/

@ -87,7 +87,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/** @brief This class provides all data needed to initialize layer.
*
* It includes dictionary with scalar params (which can be readed by using Dict interface),
* It includes dictionary with scalar params (which can be read by using Dict interface),
* blob params #blobs and optional meta information: #name and #type of layer instance.
*/
class CV_EXPORTS LayerParams : public Dict
@ -138,7 +138,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
* Initialize wrapper from another one. It'll wrap the same host CPU
* memory and mustn't allocate memory on device(i.e. GPU). It might
* has different shape. Use in case of CPU memory reusing for reuse
* associented memory on device too.
* associated memory on device too.
*/
BackendWrapper(const Ptr<BackendWrapper>& base, const MatShape& shape);
@ -346,7 +346,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/** @brief Create a network from Intel's Model Optimizer intermediate representation.
* @param[in] xml XML configuration file with network's topology.
* @param[in] bin Binary file with trained weights.
* Networks imported from Intel's Model Optimizer are lauched in Intel's Inference Engine
* Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
* backend.
*/
CV_WRAP static Net readFromModelOptimizer(const String& xml, const String& bin);
@ -402,8 +402,8 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
/** @brief Connects #@p outNum output of the first layer to #@p inNum input of the second layer.
* @param outLayerId identifier of the first layer
* @param inpLayerId identifier of the second layer
* @param outNum number of the first layer output
* @param inpLayerId identifier of the second layer
* @param inpNum number of the second layer input
*/
void connect(int outLayerId, int outNum, int inpLayerId, int inpNum);
@ -564,7 +564,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
*/
CV_WRAP int getLayersCount(const String& layerType) const;
/** @brief Computes bytes number which are requered to store
/** @brief Computes bytes number which are required to store
* all weights and intermediate blobs for model.
* @param netInputShapes vector of shapes for all net inputs.
* @param weights output parameter to store resulting bytes for weights.
@ -584,7 +584,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
const MatShape& netInputShape,
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
/** @brief Computes bytes number which are requered to store
/** @brief Computes bytes number which are required to store
* all weights and intermediate blobs for each layer.
* @param netInputShapes vector of shapes for all net inputs.
* @param layerIds output vector to save layer IDs.
@ -727,7 +727,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
* @param[in] xml XML configuration file with network's topology.
* @param[in] bin Binary file with trained weights.
* @returns Net object.
* Networks imported from Intel's Model Optimizer are lauched in Intel's Inference Engine
* Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
* backend.
*/
CV_EXPORTS_W Net readNetFromModelOptimizer(const String &xml, const String &bin);
@ -745,7 +745,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
* @details if @p crop is true, input image is resized so one side after resize is equal to corresponding
* dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
* If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
* @returns 4-dimansional Mat with NCHW dimensions order.
* @returns 4-dimensional Mat with NCHW dimensions order.
*/
CV_EXPORTS_W Mat blobFromImage(InputArray image, double scalefactor=1.0, const Size& size = Size(),
const Scalar& mean = Scalar(), bool swapRB=true, bool crop=true);

@ -223,9 +223,9 @@ with tf.Session() as sess:
# By default, float16 weights are stored in repeated tensor's field called
# `half_val`. It has type int32 with leading zeros for unused bytes.
# This type is encoded by Varint that means only 7 bits are used for value
# This type is encoded by Variant that means only 7 bits are used for value
# representation but the last one is indicated the end of encoding. This way
# float16 might takes 1 or 2 or 3 bytes depends on value. To impove compression,
# float16 might takes 1 or 2 or 3 bytes depends on value. To improve compression,
# we replace all `half_val` values to `tensor_content` using only 2 bytes for everyone.
for node in graph_def.node:
if 'value' in node.attr:

@ -541,7 +541,7 @@ public:
{
// if dst already has been allocated with total(shape) elements,
// it won't be recrreated and pointer of dst.data remains the same.
// it won't be recreated and pointer of dst.data remains the same.
dst.create(shape, use_half ? CV_16S : CV_32F);
addHost(lp, dst);
}
@ -1520,7 +1520,7 @@ struct Net::Impl
}
}
// fuse convlution layer followed by eltwise + relu
// fuse convolution layer followed by eltwise + relu
if ( IS_DNN_OPENCL_TARGET(preferableTarget) )
{
Ptr<EltwiseLayer> nextEltwiseLayer;
@ -1649,7 +1649,7 @@ struct Net::Impl
// the optimization #3. if there is concat layer that concatenates channels
// from the inputs together (i.e. axis == 1) then we make the inputs of
// the concat layer to write to the concatetion output buffer
// the concat layer to write to the concatenation output buffer
// (and so we eliminate the concatenation layer, because the channels
// are concatenated implicitly).
Ptr<ConcatLayer> concatLayer = ld.layerInstance.dynamicCast<ConcatLayer>();

@ -242,7 +242,7 @@ bool HalideScheduler::process(Ptr<BackendNode>& node)
std::map<std::string, Halide::Func> funcsMap; // Scheduled functions.
// For every function, from top to bottom, we try to find a scheduling node.
// Scheduling is successful (return true) if for the first function (top)
// node is respresented.
// node is represented.
CV_Assert(!node.empty());
std::vector<Halide::Func>& funcs = node.dynamicCast<HalideBackendNode>()->funcs;
for (int i = funcs.size() - 1; i >= 0; --i)

@ -676,7 +676,7 @@ public:
int j0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
int j1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
// here some non-continous sub-row of the row will not be
// here some non-continuous sub-row of the row will not be
// filled from the tensor; we need to make sure that the uncovered
// elements are explicitly set to 0's. the easiest way is to
// set all the elements to 0's before the loop.

@ -110,7 +110,7 @@ public:
float _nmsThreshold;
int _topK;
// Whenever predicted bounding boxes are respresented in YXHW instead of XYWH layout.
// Whenever predicted bounding boxes are represented in YXHW instead of XYWH layout.
bool _locPredTransposed;
// It's true whenever predicted bounding boxes and proposals are normalized to [0, 1].
bool _bboxesNormalized;

@ -79,7 +79,7 @@ public:
else if (operation == "max")
op = MAX;
else
CV_Error(cv::Error::StsBadArg, "Unknown operaticon type \"" + operation + "\"");
CV_Error(cv::Error::StsBadArg, "Unknown operation type \"" + operation + "\"");
}
if (params.has("coeff"))

@ -366,7 +366,7 @@ public:
kernel.set(13, (int)_imageWidth);
kernel.run(1, &nthreads, NULL, false);
// clip the prior's coordidate such that it is within [0, 1]
// clip the prior's coordinate such that it is within [0, 1]
if (_clip)
{
Mat mat = outputs[0].getMat(ACCESS_READ);
@ -442,7 +442,7 @@ public:
}
}
}
// clip the prior's coordidate such that it is within [0, 1]
// clip the prior's coordinate such that it is within [0, 1]
if (_clip)
{
int _outChannelSize = _layerHeight * _layerWidth * _numPriors * 4;
@ -565,7 +565,7 @@ private:
std::vector<float> _variance;
std::vector<float> _offsetsX;
std::vector<float> _offsetsY;
// Precomputed final widhts and heights based on aspect ratios or explicit sizes.
// Precomputed final widths and heights based on aspect ratios or explicit sizes.
std::vector<float> _boxWidths;
std::vector<float> _boxHeights;

@ -709,7 +709,7 @@ bool OCL4DNNConvSpatial<Dtype>::swizzleWeight(const UMat &weight,
return false;
}
} else {
// assumption: kernel dimesion is 2
// assumption: kernel dimension is 2
Mat weightMat = weight.getMat(ACCESS_READ);
Dtype* cpu_weight = (Dtype *)weightMat.ptr<float>();
Mat swizzledWeightMat;

@ -288,7 +288,7 @@ void InfEngineBackendNet::init(int targetId)
}
for (const InferenceEngine::DataPtr& out : l->outData)
{
// TODO: Replace to uniquness assertion.
// TODO: Replace to uniqueness assertion.
if (internalOutputs.find(out->name) == internalOutputs.end())
internalOutputs[out->name] = out;
}
@ -305,7 +305,7 @@ void InfEngineBackendNet::init(int targetId)
// Add all outputs.
for (const InferenceEngine::DataPtr& out : l->outData)
{
// TODO: Replace to uniquness assertion.
// TODO: Replace to uniqueness assertion.
if (unconnectedOuts.find(out->name) == unconnectedOuts.end())
unconnectedOuts[out->name] = out;
}

@ -86,7 +86,7 @@ message NodeDef {
// | ( ("gpu" | "cpu") ":" ([1-9][0-9]* | "*") )
//
// Valid values for this string include:
// * "@other/node" (colocate with "other/node")
// * "@other/node" (collocate with "other/node")
// * "/job:worker/replica:0/task:1/gpu:3" (full specification)
// * "/job:worker/gpu:3" (partial specification)
// * "" (no specification)

@ -311,11 +311,11 @@ struct TorchImporter
int numModules = curModule->modules.size();
readTorchObject(index);
if (tensors.count(index)) //tensor was readed
if (tensors.count(index)) //tensor was read
{
tensorParams.insert(std::make_pair(key, std::make_pair(index, tensors[index])));
}
else if (storages.count(index)) //storage was readed
else if (storages.count(index)) //storage was read
{
Mat &matStorage = storages[index];
Mat matCasted;
@ -399,7 +399,7 @@ struct TorchImporter
size_t requireElems = (size_t)offset + (size_t)steps[0] * (size_t)sizes[0];
size_t storageElems = storages[indexStorage].total();
if (requireElems > storageElems)
CV_Error(Error::StsBadSize, "Storage has insufficent number of elemements for requested Tensor");
CV_Error(Error::StsBadSize, "Storage has insufficient number of elements for requested Tensor");
//convert sizes
AutoBuffer<int, 4> isizes(ndims);

@ -143,7 +143,7 @@ TEST_P(Test_Darknet_nets, YoloVoc)
std::vector<float> confidences(3);
std::vector<Rect2d> boxes(3);
classIds[0] = 6; confidences[0] = 0.750469f; boxes[0] = Rect2d(0.577374, 0.127391, 0.325575, 0.173418); // a car
classIds[1] = 1; confidences[1] = 0.780879f; boxes[1] = Rect2d(0.270762, 0.264102, 0.461713, 0.48131); // a bycicle
classIds[1] = 1; confidences[1] = 0.780879f; boxes[1] = Rect2d(0.270762, 0.264102, 0.461713, 0.48131); // a bicycle
classIds[2] = 11; confidences[2] = 0.901615f; boxes[2] = Rect2d(0.1386, 0.338509, 0.282737, 0.60028); // a dog
double scoreDiff = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 7e-3 : 8e-5;
double iouDiff = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.013 : 3e-5;
@ -182,7 +182,7 @@ TEST_P(Test_Darknet_nets, YOLOv3)
std::vector<float> confidences(3);
std::vector<Rect2d> boxes(3);
classIds[0] = 7; confidences[0] = 0.952983f; boxes[0] = Rect2d(0.614622, 0.150257, 0.286747, 0.138994); // a truck
classIds[1] = 1; confidences[1] = 0.987908f; boxes[1] = Rect2d(0.150913, 0.221933, 0.591342, 0.524327); // a bycicle
classIds[1] = 1; confidences[1] = 0.987908f; boxes[1] = Rect2d(0.150913, 0.221933, 0.591342, 0.524327); // a bicycle
classIds[2] = 16; confidences[2] = 0.998836f; boxes[2] = Rect2d(0.160024, 0.389964, 0.257861, 0.553752); // a dog (COCO)
double scoreDiff = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 4e-3 : 8e-5;
double iouDiff = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.011 : 3e-5;

@ -250,7 +250,7 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
Mat out = net.forward();
Mat ref = blobFromNPY(_tf("torch_enet_prob.npy", false));
// Due to numerical instability in Pooling-Unpooling layers (indexes jittering)
// thresholds for ENet must be changed. Accuracy of resuults was checked on
// thresholds for ENet must be changed. Accuracy of results was checked on
// Cityscapes dataset and difference in mIOU with Torch is 10E-4%
normAssert(ref, out, "", 0.00044, 0.44);

@ -19,7 +19,7 @@ if(NOT BUILD_SHARED_LIBS)
endif()
endforeach()
if(_conflicts)
message(STATUS "Disabling VIZ module due conflicts with VTK dependencies: ${_conflicts}")
message(STATUS "Disabling VIZ module due to conflicts with VTK dependencies: ${_conflicts}")
ocv_module_disable(viz)
endif()
endif()

@ -506,7 +506,7 @@ namespace cv
};
/////////////////////////////////////////////////////////////////////////////
/// Compond widgets
/// Compound widgets
/** @brief This 3D Widget represents a coordinate system. :
*/

Loading…
Cancel
Save