Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/20830/head
Alexander Alekhin 4 years ago
commit 03a08435e2
  1. 7
      modules/core/src/system.cpp
  2. 6
      modules/dnn/misc/python/test/test_dnn.py
  3. 41
      modules/dnn/src/tensorflow/tf_graph_simplifier.cpp
  4. 2
      modules/dnn/src/tensorflow/tf_graph_simplifier.hpp
  5. 4
      modules/dnn/src/tensorflow/tf_importer.cpp
  6. 23
      modules/features2d/src/blobdetector.cpp
  7. 1
      modules/features2d/test/test_blobdetector.cpp

@ -1786,6 +1786,13 @@ static void WINAPI opencv_fls_destructor(void* pData)
#endif // CV_USE_FLS
#endif // _WIN32
static TlsAbstraction* const g_force_initialization_of_TlsAbstraction
#if defined __GNUC__
__attribute__((unused))
#endif
= getTlsAbstraction();
#else // OPENCV_DISABLE_THREAD_SUPPORT
// no threading (OPENCV_DISABLE_THREAD_SUPPORT=ON)

@ -391,20 +391,22 @@ class dnn_test(NewOpenCVTests):
raise unittest.SkipTest("Missing DNN test files (dnn/onnx/data/{input/output}_hidden_lstm.npy). "
"Verify OPENCV_DNN_TEST_DATA_PATH configuration parameter.")
net = cv.dnn.readNet(model)
input = np.load(input_file)
# we have to expand the shape of input tensor because Python bindings cut 3D tensors to 2D
# it should be fixed in future. see : https://github.com/opencv/opencv/issues/19091
# please remove `expand_dims` after that
input = np.expand_dims(input, axis=3)
gold_output = np.load(output_file)
net.setInput(input)
for backend, target in self.dnnBackendsAndTargets:
printParams(backend, target)
net = cv.dnn.readNet(model)
net.setPreferableBackend(backend)
net.setPreferableTarget(target)
net.setInput(input)
real_output = net.forward()
normAssert(self, real_output, gold_output, "", getDefaultThreshold(target))

@ -19,6 +19,16 @@ CV__DNN_INLINE_NS_BEGIN
using ::google::protobuf::RepeatedField;
using ::google::protobuf::MapPair;
static Mat getTensorContentRef_(const tensorflow::TensorProto& tensor);
static inline
bool isAlignedMat(const Mat& m)
{
int depth = m.depth();
int alignment = CV_ELEM_SIZE1(depth);
return (((size_t)m.data) & (alignment - 1)) == 0;
}
class TFNodeWrapper : public ImportNodeWrapper
{
public:
@ -719,8 +729,19 @@ public:
{
if (!negativeScales)
{
Mat scales = getTensorContent(inputNodes[1]->attr().at("value").tensor(), /*copy*/false);
scales *= -1;
Mat scalesRef = getTensorContentRef_(inputNodes[1]->attr().at("value").tensor());
// FIXME: This breaks the const guarantees of tensor() by writing to scalesRef
if (isAlignedMat(scalesRef))
{
scalesRef *= -1;
}
else
{
Mat scales = scalesRef.clone() * -1;
CV_Assert(scalesRef.isContinuous());
CV_Assert(scales.isContinuous());
memcpy(scalesRef.data, scales.data, scales.total() * scales.elemSize());
}
}
}
@ -832,7 +853,8 @@ void RemoveIdentityOps(tensorflow::GraphDef& net)
}
}
Mat getTensorContent(const tensorflow::TensorProto &tensor, bool copy)
// NB: returned Mat::data pointer may be unaligned
Mat getTensorContentRef_(const tensorflow::TensorProto& tensor)
{
const std::string& content = tensor.tensor_content();
Mat m;
@ -904,7 +926,18 @@ Mat getTensorContent(const tensorflow::TensorProto &tensor, bool copy)
CV_Error(Error::StsError, "Tensor's data type is not supported");
break;
}
return copy ? m.clone() : m;
return m;
}
Mat getTensorContent(const tensorflow::TensorProto& tensor, bool forceCopy)
{
// If necessary clone m to have aligned data pointer
Mat m = getTensorContentRef_(tensor);
if (forceCopy || !isAlignedMat(m))
return m.clone();
else
return m;
}
void releaseTensor(tensorflow::TensorProto* tensor)

@ -21,7 +21,7 @@ void RemoveIdentityOps(tensorflow::GraphDef& net);
void simplifySubgraphs(tensorflow::GraphDef& net);
Mat getTensorContent(const tensorflow::TensorProto &tensor, bool copy = true);
Mat getTensorContent(const tensorflow::TensorProto& tensor, bool forceCopy = true);
void releaseTensor(tensorflow::TensorProto* tensor);

@ -124,8 +124,10 @@ void parseTensor(const tensorflow::TensorProto &tensor, Mat &dstBlob)
}
dstBlob.create(shape, CV_32F);
CV_Assert(dstBlob.isContinuous());
Mat tensorContent = getTensorContent(tensor, /*no copy*/false);
CV_Assert(tensorContent.isContinuous());
int size = tensorContent.total();
CV_Assert(size == (int)dstBlob.total());
@ -2671,8 +2673,10 @@ void TFImporter::kernelFromTensor(const tensorflow::TensorProto &tensor, Mat &ds
out_c = shape[0]; input_c = shape[1];
dstBlob.create(shape, CV_32F);
CV_Assert(dstBlob.isContinuous());
Mat tensorContent = getTensorContent(tensor, /*no copy*/false);
CV_Assert(tensorContent.isContinuous());
int size = tensorContent.total();
CV_Assert(size == (int)dstBlob.total());

@ -44,6 +44,8 @@
#include <iterator>
#include <limits>
#include <opencv2/core/utils/logger.hpp>
// Requires CMake flag: DEBUG_opencv_features2d=ON
//#define DEBUG_BLOB_DETECTOR
@ -317,6 +319,19 @@ void SimpleBlobDetectorImpl::detect(InputArray image, std::vector<cv::KeyPoint>&
CV_Error(Error::StsUnsupportedFormat, "Blob detector only supports 8-bit images!");
}
CV_CheckGT(params.thresholdStep, 0.0f, "");
if (params.minThreshold + params.thresholdStep >= params.maxThreshold)
{
// https://github.com/opencv/opencv/issues/6667
CV_LOG_ONCE_INFO(NULL, "SimpleBlobDetector: params.minDistBetweenBlobs is ignored for case with single threshold");
#if 0 // OpenCV 5.0
CV_CheckEQ(params.minRepeatability, 1u, "Incompatible parameters for case with single threshold");
#else
if (params.minRepeatability != 1)
CV_LOG_WARNING(NULL, "SimpleBlobDetector: params.minRepeatability=" << params.minRepeatability << " is incompatible for case with single threshold. Empty result is expected.");
#endif
}
std::vector < std::vector<Center> > centers;
for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
{
@ -325,19 +340,13 @@ void SimpleBlobDetectorImpl::detect(InputArray image, std::vector<cv::KeyPoint>&
std::vector < Center > curCenters;
findBlobs(grayscaleImage, binarizedImage, curCenters);
if(params.maxThreshold - params.minThreshold <= params.thresholdStep) {
// if the difference between min and max threshold is less than the threshold step
// we're only going to enter the loop once, so we need to add curCenters
// to ensure we still use minDistBetweenBlobs
centers.push_back(curCenters);
}
std::vector < std::vector<Center> > newCenters;
for (size_t i = 0; i < curCenters.size(); i++)
{
bool isNew = true;
for (size_t j = 0; j < centers.size(); j++)
{
double dist = norm(centers[j][centers[j].size() / 2 ].location - curCenters[i].location);
double dist = norm(centers[j][ centers[j].size() / 2 ].location - curCenters[i].location);
isNew = dist >= params.minDistBetweenBlobs && dist >= centers[j][ centers[j].size() / 2 ].radius && dist >= curCenters[i].radius;
if (!isNew)
{

@ -12,6 +12,7 @@ TEST(Features2d_BlobDetector, bug_6667)
SimpleBlobDetector::Params params;
params.minThreshold = 250;
params.maxThreshold = 260;
params.minRepeatability = 1; // https://github.com/opencv/opencv/issues/6667
std::vector<KeyPoint> keypoints;
Ptr<SimpleBlobDetector> detector = SimpleBlobDetector::create(params);

Loading…
Cancel
Save