Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/20339/head
Alexander Alekhin 4 years ago
commit 8fad85edda
  1. 4
      cmake/OpenCVDetectInferenceEngine.cmake
  2. 4
      modules/core/include/opencv2/core/utils/filesystem.private.hpp
  3. 51
      modules/dnn/src/ie_ngraph.cpp
  4. 16
      modules/dnn/src/layers/batch_norm_layer.cpp
  5. 17
      modules/dnn/src/onnx/onnx_importer.cpp
  6. 5
      modules/dnn/src/op_inf_engine.hpp
  7. 10
      modules/dnn/test/test_backends.cpp
  8. 9
      modules/dnn/test/test_ie_models.cpp
  9. 1
      modules/dnn/test/test_onnx_importer.cpp
  10. 9
      modules/dnn/test/test_torch_importer.cpp
  11. 2
      samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp
  12. 6
      samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp
  13. 8
      samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp
  14. 2
      samples/python/camera_calibration_show_extrinsics.py
  15. 2
      samples/python/gaussian_mix.py
  16. 2
      samples/python/hist.py
  17. 6
      samples/python/lk_homography.py
  18. 2
      samples/python/lk_track.py
  19. 4
      samples/python/video_v4l2.py

@ -138,8 +138,8 @@ if(INF_ENGINE_TARGET)
math(EXPR INF_ENGINE_RELEASE "${InferenceEngine_VERSION_MAJOR} * 1000000 + ${InferenceEngine_VERSION_MINOR} * 10000 + ${InferenceEngine_VERSION_PATCH} * 100")
endif()
if(NOT INF_ENGINE_RELEASE)
message(WARNING "InferenceEngine version has not been set, 2021.3 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
set(INF_ENGINE_RELEASE "2021030000")
message(WARNING "InferenceEngine version has not been set, 2021.4 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
set(INF_ENGINE_RELEASE "2021040000")
endif()
set(INF_ENGINE_RELEASE "${INF_ENGINE_RELEASE}" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
set_target_properties(${INF_ENGINE_TARGET} PROPERTIES

@ -16,8 +16,8 @@
# define OPENCV_HAVE_FILESYSTEM_SUPPORT 1
# elif defined(__APPLE__)
# include <TargetConditionals.h>
# if (defined(TARGET_OS_OSX) && TARGET_OS_OSX) || (!defined(TARGET_OS_OSX) && !TARGET_OS_IPHONE)
# define OPENCV_HAVE_FILESYSTEM_SUPPORT 1 // OSX only
# if (defined(TARGET_OS_OSX) && TARGET_OS_OSX) || (defined(TARGET_OS_IOS) && TARGET_OS_IOS)
# define OPENCV_HAVE_FILESYSTEM_SUPPORT 1 // OSX, iOS only
# endif
# else
/* unknown */

@ -657,7 +657,11 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
try
{
InferenceEngine::IExtensionPtr extension =
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
std::make_shared<InferenceEngine::Extension>(libName);
#else
InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(libName);
#endif
ie.AddExtension(extension, "CPU");
CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName);
@ -1005,35 +1009,54 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
reqWrapper->req.SetInput(inpBlobs);
reqWrapper->req.SetOutput(outBlobs);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
InferenceEngine::InferRequest infRequest = reqWrapper->req;
NgraphReqWrapper* wrapperPtr = reqWrapper.get();
CV_Assert(wrapperPtr && "Internal error");
#else
InferenceEngine::IInferRequest::Ptr infRequestPtr = reqWrapper->req;
infRequestPtr->SetUserData(reqWrapper.get(), 0);
CV_Assert(infRequestPtr);
InferenceEngine::IInferRequest& infRequest = *infRequestPtr.get();
infRequest.SetUserData(reqWrapper.get(), 0);
#endif
infRequestPtr->SetCompletionCallback(
[](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status)
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
// do NOT capture 'reqWrapper' (smart ptr) in the lambda callback
infRequest.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[wrapperPtr](InferenceEngine::InferRequest /*request*/, InferenceEngine::StatusCode status)
#else
infRequest.SetCompletionCallback(
[](InferenceEngine::IInferRequest::Ptr requestPtr, InferenceEngine::StatusCode status)
#endif
{
CV_LOG_DEBUG(NULL, "DNN(nGraph): completionCallback(" << (int)status << ")");
#if !INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
CV_Assert(requestPtr);
InferenceEngine::IInferRequest& request = *requestPtr.get();
NgraphReqWrapper* wrapper;
request->GetUserData((void**)&wrapper, 0);
CV_Assert(wrapper && "Internal error");
NgraphReqWrapper* wrapperPtr;
request.GetUserData((void**)&wrapperPtr, 0);
CV_Assert(wrapperPtr && "Internal error");
#endif
NgraphReqWrapper& wrapper = *wrapperPtr;
size_t processedOutputs = 0;
try
{
for (; processedOutputs < wrapper->outProms.size(); ++processedOutputs)
for (; processedOutputs < wrapper.outProms.size(); ++processedOutputs)
{
const std::string& name = wrapper->outsNames[processedOutputs];
Mat m = ngraphBlobToMat(wrapper->req.GetBlob(name));
const std::string& name = wrapper.outsNames[processedOutputs];
Mat m = ngraphBlobToMat(wrapper.req.GetBlob(name));
try
{
CV_Assert(status == InferenceEngine::StatusCode::OK);
wrapper->outProms[processedOutputs].setValue(m.clone());
wrapper.outProms[processedOutputs].setValue(m.clone());
}
catch (...)
{
try {
wrapper->outProms[processedOutputs].setException(std::current_exception());
wrapper.outProms[processedOutputs].setException(std::current_exception());
} catch(...) {
CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation");
}
@ -1043,16 +1066,16 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
catch (...)
{
std::exception_ptr e = std::current_exception();
for (; processedOutputs < wrapper->outProms.size(); ++processedOutputs)
for (; processedOutputs < wrapper.outProms.size(); ++processedOutputs)
{
try {
wrapper->outProms[processedOutputs].setException(e);
wrapper.outProms[processedOutputs].setException(e);
} catch(...) {
CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation");
}
}
}
wrapper->isReady = true;
wrapper.isReady = true;
}
);
}

@ -35,6 +35,7 @@ namespace dnn
class BatchNormLayerImpl CV_FINAL : public BatchNormLayer
{
public:
Mat origin_weights, origin_bias;
Mat weights_, bias_;
UMat umat_weight, umat_bias;
mutable int dims;
@ -88,11 +89,11 @@ public:
const float* weightsData = hasWeights ? blobs[weightsBlobIndex].ptr<float>() : 0;
const float* biasData = hasBias ? blobs[biasBlobIndex].ptr<float>() : 0;
weights_.create(1, (int)n, CV_32F);
bias_.create(1, (int)n, CV_32F);
origin_weights.create(1, (int)n, CV_32F);
origin_bias.create(1, (int)n, CV_32F);
float* dstWeightsData = weights_.ptr<float>();
float* dstBiasData = bias_.ptr<float>();
float* dstWeightsData = origin_weights.ptr<float>();
float* dstBiasData = origin_bias.ptr<float>();
for (size_t i = 0; i < n; ++i)
{
@ -100,15 +101,12 @@ public:
dstWeightsData[i] = w;
dstBiasData[i] = (hasBias ? biasData[i] : 0.0f) - w * meanData[i] * varMeanScale;
}
// We will use blobs to store origin weights and bias to restore them in case of reinitialization.
weights_.copyTo(blobs[0].reshape(1, 1));
bias_.copyTo(blobs[1].reshape(1, 1));
}
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE
{
blobs[0].reshape(1, 1).copyTo(weights_);
blobs[1].reshape(1, 1).copyTo(bias_);
origin_weights.reshape(1, 1).copyTo(weights_);
origin_bias.reshape(1, 1).copyTo(bias_);
}
void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE

@ -1954,6 +1954,23 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
addConstant(layerParams.name, concatenated[0]);
return;
}
else
{
for (int i = 0; i < node_proto.input_size(); ++i)
{
if (constBlobs.find(node_proto.input(i)) != constBlobs.end())
{
LayerParams constParams;
constParams.name = node_proto.input(i);
constParams.type = "Const";
constParams.blobs.push_back(getBlob(node_proto, i));
opencv_onnx::NodeProto proto;
proto.add_output(constParams.name);
addLayer(constParams, proto);
}
}
}
}
else if (layer_type == "Resize")
{

@ -30,10 +30,11 @@
#define INF_ENGINE_RELEASE_2021_1 2021010000
#define INF_ENGINE_RELEASE_2021_2 2021020000
#define INF_ENGINE_RELEASE_2021_3 2021030000
#define INF_ENGINE_RELEASE_2021_4 2021040000
#ifndef INF_ENGINE_RELEASE
#warning("IE version have not been provided via command-line. Using 2021.3 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2021_3
#warning("IE version have not been provided via command-line. Using 2021.4 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2021_4
#endif
#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))

@ -204,7 +204,7 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe)
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 300), Scalar(127.5, 127.5, 127.5), false);
float scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1.5e-2 : 0.0;
float iouDiff = (target == DNN_TARGET_MYRIAD) ? 0.063 : 0.0;
float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.252 : FLT_MIN;
float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.262 : FLT_MIN;
processNet("dnn/MobileNetSSD_deploy.caffemodel", "dnn/MobileNetSSD_deploy.prototxt",
inp, "detection_out", "", scoreDiff, iouDiff, detectionConfThresh);
expectNoFallbacksFromIE(net);
@ -359,8 +359,8 @@ TEST_P(DNNTestNetwork, OpenPose_pose_coco)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.0056 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD) ? 0.072 : 0.0;
const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.009 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD) ? 0.09 : 0.0;
processNet("dnn/openpose_pose_coco.caffemodel", "dnn/openpose_pose_coco.prototxt",
Size(46, 46), "", "", l1, lInf);
expectNoFallbacksFromIE(net);
@ -380,8 +380,8 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi)
#endif
// output range: [-0.001, 0.97]
const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.012 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.16 : 0.0;
const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.02 : 0.0;
const float lInf = (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.2 : 0.0;
processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi.prototxt",
Size(46, 46), "", "", l1, lInf);
expectNoFallbacksFromIE(net);

@ -307,6 +307,15 @@ TEST_P(DNNTestOpenVINO, models)
ASSERT_FALSE(backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) <<
"Inference Engine backend is required";
#if INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (targetId == DNN_TARGET_MYRIAD && (
modelName == "person-detection-retail-0013" || // ncDeviceOpen:1013 Failed to find booted device after boot
modelName == "age-gender-recognition-retail-0013" // ncDeviceOpen:1013 Failed to find booted device after boot
)
)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if INF_ENGINE_VER_MAJOR_GE(2020020000)
if (targetId == DNN_TARGET_MYRIAD && backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{

@ -349,6 +349,7 @@ TEST_P(Test_ONNX_layers, Concatenation)
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
}
testONNXModels("concatenation");
testONNXModels("concat_const_blobs");
}
TEST_P(Test_ONNX_layers, Eltwise3D)

@ -290,9 +290,14 @@ TEST_P(Test_Torch_layers, net_padding)
TEST_P(Test_Torch_layers, net_non_spatial)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
#if defined(INF_ENGINE_RELEASE) && ( \
INF_ENGINE_VER_MAJOR_EQ(2021030000) || \
INF_ENGINE_VER_MAJOR_EQ(2021040000) \
)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // crash
// 2021.3: crash
// 2021.4: [ GENERAL_ERROR ] AssertionFailed: !out.networkInputs.empty()
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)

@ -89,7 +89,7 @@ void MatchingMethod( int, void* )
//! [create_result_matrix]
/// Create the result matrix
int result_cols = img.cols - templ.cols + 1;
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
result.create( result_rows, result_cols, CV_32FC1 );

@ -72,18 +72,18 @@ void Hist_and_Backproj(int, void* )
//! [initialize]
int histSize = MAX( bins, 2 );
float hue_range[] = { 0, 180 };
const float* ranges = { hue_range };
const float* ranges[] = { hue_range };
//! [initialize]
//! [Get the Histogram and normalize it]
Mat hist;
calcHist( &hue, 1, 0, Mat(), hist, 1, &histSize, &ranges, true, false );
calcHist( &hue, 1, 0, Mat(), hist, 1, &histSize, ranges, true, false );
normalize( hist, hist, 0, 255, NORM_MINMAX, -1, Mat() );
//! [Get the Histogram and normalize it]
//! [Get Backprojection]
Mat backproj;
calcBackProject( &hue, 1, 0, hist, backproj, &ranges, 1, true );
calcBackProject( &hue, 1, 0, hist, backproj, ranges, 1, true );
//! [Get Backprojection]
//! [Draw the backproj]

@ -37,7 +37,7 @@ int main(int argc, char** argv)
//! [Set the ranges ( for B,G,R) )]
float range[] = { 0, 256 }; //the upper boundary is exclusive
const float* histRange = { range };
const float* histRange[] = { range };
//! [Set the ranges ( for B,G,R) )]
//! [Set histogram param]
@ -46,9 +46,9 @@ int main(int argc, char** argv)
//! [Compute the histograms]
Mat b_hist, g_hist, r_hist;
calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );
calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, histRange, uniform, accumulate );
calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, histRange, uniform, accumulate );
calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, histRange, uniform, accumulate );
//! [Compute the histograms]
//! [Draw the histograms for B, G and R]

@ -188,7 +188,7 @@ def main():
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
ax.set_aspect("auto")
cam_width = args.cam_width
cam_height = args.cam_height

@ -32,7 +32,7 @@ def draw_gaussain(img, mean, cov, color):
w, u, _vt = cv.SVDecomp(cov)
ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
s1, s2 = np.sqrt(w)*3.0
cv.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv.LINE_AA)
cv.ellipse(img, (int(x), int(y)), (int(s1), int(s2)), ang, 0, 360, color, 1, cv.LINE_AA)
def main():

@ -48,7 +48,7 @@ def hist_lines(im):
cv.normalize(hist_item,hist_item,0,255,cv.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
for x,y in enumerate(hist):
cv.line(h,(x,0),(x,y),(255,255,255))
cv.line(h,(x,0),(x,y[0]),(255,255,255))
y = np.flipud(h)
return y

@ -77,8 +77,8 @@ class App:
for (x0, y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]):
if good:
cv.line(vis, (x0, y0), (x1, y1), (0, 128, 0))
cv.circle(vis, (x1, y1), 2, (red, green)[good], -1)
cv.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (0, 128, 0))
cv.circle(vis, (int(x1), int(y1)), 2, (red, green)[good], -1)
draw_str(vis, (20, 20), 'track count: %d' % len(self.p1))
if self.use_ransac:
draw_str(vis, (20, 40), 'RANSAC')
@ -86,7 +86,7 @@ class App:
p = cv.goodFeaturesToTrack(frame_gray, **feature_params)
if p is not None:
for x, y in p[:,0]:
cv.circle(vis, (x, y), 2, green, -1)
cv.circle(vis, (int(x), int(y)), 2, green, -1)
draw_str(vis, (20, 20), 'feature count: %d' % len(p))
cv.imshow('lk_homography', vis)

@ -65,7 +65,7 @@ class App:
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
cv.circle(vis, (x, y), 2, (0, 255, 0), -1)
cv.circle(vis, (int(x), int(y)), 2, (0, 255, 0), -1)
self.tracks = new_tracks
cv.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0))
draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks))

@ -30,7 +30,7 @@ def main():
color = (0, 255, 0)
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_AUTOFOCUS, False) # Known bug: https://github.com/opencv/opencv/pull/5474
cap.set(cv.CAP_PROP_AUTOFOCUS, 0) # Known bug: https://github.com/opencv/opencv/pull/5474
cv.namedWindow("Video")
@ -67,7 +67,7 @@ def main():
break
elif k == ord('g'):
convert_rgb = not convert_rgb
cap.set(cv.CAP_PROP_CONVERT_RGB, convert_rgb)
cap.set(cv.CAP_PROP_CONVERT_RGB, 1 if convert_rgb else 0)
print('Done')

Loading…
Cancel
Save