Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/18556/head
Alexander Alekhin 4 years ago
commit 1b443219ed
  1. 18
      modules/dnn/perf/perf_net.cpp
  2. 68
      modules/dnn/src/dnn.cpp
  3. 10
      modules/dnn/src/layers/convolution_layer.cpp
  4. 4
      modules/dnn/test/test_common.impl.hpp
  5. 5
      modules/dnn/test/test_darknet_importer.cpp
  6. 38
      modules/dnn/test/test_layers.cpp
  7. 4
      modules/dnn/test/test_torch_importer.cpp
  8. 12
      platforms/winpack_dldt/2020.4/20201005-dldt-fix-cldnn-compilation.patch
  9. 1
      platforms/winpack_dldt/2020.4/patch.config.py
  10. 14
      platforms/winpack_dldt/2021.1/20200413-dldt-pdb.patch
  11. 13
      platforms/winpack_dldt/2021.1/20200604-dldt-disable-multidevice.patch
  12. 178
      platforms/winpack_dldt/2021.1/20201005-dldt-disable-unused-targets.patch
  13. 3
      platforms/winpack_dldt/2021.1/patch.config.py
  14. 56
      platforms/winpack_dldt/2021.1/sysroot.config.py
  15. 4
      platforms/winpack_dldt/build_package.py

@ -111,6 +111,10 @@ PERF_TEST_P_(DNNTestNetwork, ENet)
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2021010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("");
#endif
processNet("dnn/Enet-model-best.net", "", "enet.yml",
Mat(cv::Size(512, 256), CV_32FC3));
}
@ -202,6 +206,10 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv3)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("Test is disabled in OpenVINO 2020.4");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000) // nGraph compilation failure
if (target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
#endif
Mat sample = imread(findDataFile("dnn/dog416.png"));
cvtColor(sample, sample, COLOR_BGR2RGB);
@ -214,7 +222,7 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv4)
{
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
if (target == DNN_TARGET_MYRIAD)
if (target == DNN_TARGET_MYRIAD) // not enough resources
throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000) // nGraph compilation failure
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
@ -233,6 +241,10 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv4_tiny)
{
if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000) // nGraph compilation failure
if (target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
#endif
Mat sample = imread(findDataFile("dnn/dog416.png"));
cvtColor(sample, sample, COLOR_BGR2RGB);
Mat inp;
@ -263,6 +275,10 @@ PERF_TEST_P_(DNNTestNetwork, Inception_v2_Faster_RCNN)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
throw SkipTestException("Test is disabled in OpenVINO 2019R2");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled in OpenVINO 2021.1 / MYRIAD");
#endif
if (backend == DNN_BACKEND_HALIDE ||
(backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) ||

@ -2654,12 +2654,15 @@ struct Net::Impl : public detail::NetImplBase
// OpenCL: fuse convolution layer followed by eltwise + relu
// CUDA: fuse convolution layer followed by eltwise (and optional activation)
if ((IS_DNN_OPENCL_TARGET(preferableTarget) || IS_DNN_CUDA_TARGET(preferableTarget)) &&
ld.layerInstance->type == "Convolution" )
while (nextData &&
(IS_DNN_OPENCL_TARGET(preferableTarget) || IS_DNN_CUDA_TARGET(preferableTarget)) &&
ld.layerInstance->type == "Convolution"
) // semantic of 'if'
{
Ptr<EltwiseLayer> nextEltwiseLayer;
if( nextData )
nextEltwiseLayer = nextData->layerInstance.dynamicCast<EltwiseLayer>();
Ptr<EltwiseLayer> nextEltwiseLayer = nextData->layerInstance.dynamicCast<EltwiseLayer>();
if (nextEltwiseLayer.empty())
break;
#ifdef HAVE_CUDA
// CUDA backend supports fusion with eltwise sum (without variable channels)
// `nextEltwiseLayer` is reset if eltwise layer doesn't have a compatible configuration for fusion
@ -2675,7 +2678,37 @@ struct Net::Impl : public detail::NetImplBase
nextEltwiseLayer = Ptr<EltwiseLayer>();
}
#endif
if (!nextEltwiseLayer.empty() && nextData && nextData->inputBlobsId.size() == 2)
if (pinsToKeep.count(lpNext) != 0)
break;
if (nextData->inputBlobsId.size() != 2)
break;
if (!nextData->params.has("operation") || toLowerCase(nextData->params.get<String>("operation")) == "sum")
{
if (nextData->params.has("coeff"))
{
DictValue paramCoeff = nextData->params.get("coeff");
int n = paramCoeff.size();
bool isCoeffOneOne = (n == 2);
for (int i = 0; isCoeffOneOne && i < n; i++)
{
float c = paramCoeff.get<float>(i);
isCoeffOneOne &= (c == 1.0f);
}
if (!isCoeffOneOne)
{
CV_LOG_DEBUG(NULL, "DNN/OpenCL: fusion of 'Sum' without coeffs (or {1.0, 1.0}) is supported only");
break;
}
}
}
else
{
CV_LOG_DEBUG(NULL, "DNN/OpenCL: fusion with eltwise operation is not supported: " << nextData->params.get<String>("operation"));
break;
}
{
LayerData *eltwiseData = nextData;
@ -2732,11 +2765,13 @@ struct Net::Impl : public detail::NetImplBase
// we need to check them separately; hence, the fuse variables
bool fuse_eltwise = false, fuse_activation = false;
Ptr<PowerLayer> activ_power;
if (IS_DNN_OPENCL_TARGET(preferableTarget) && !nextFusabeleActivLayer.empty() &&
nextData &&
(!nextData->type.compare("ReLU") ||
!nextData->type.compare("ChannelsPReLU") ||
!nextData->type.compare("Power")) &&
(!nextData->type.compare("Power") && (activ_power = nextFusabeleActivLayer.dynamicCast<PowerLayer>()) && activ_power->scale == 1.0f)
) &&
currLayer->setActivation(nextFusabeleActivLayer))
{
fuse_eltwise = true;
@ -2868,6 +2903,8 @@ struct Net::Impl : public detail::NetImplBase
}
}
}
break;
}
}
@ -3107,11 +3144,11 @@ struct Net::Impl : public detail::NetImplBase
Ptr<Layer> layer = ld.layerInstance;
TickMeter tm;
tm.start();
if( !ld.skip )
{
TickMeter tm;
tm.start();
std::map<int, Ptr<BackendNode> >::iterator it = ld.backendNodes.find(preferableBackend);
if (preferableBackend == DNN_BACKEND_OPENCV || it == ld.backendNodes.end() || it->second.empty())
{
@ -3320,12 +3357,15 @@ struct Net::Impl : public detail::NetImplBase
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
}
}
tm.stop();
int64 t = tm.getTimeTicks();
layersTimings[ld.id] = (t > 0) ? t : t + 1; // zero for skipped layers only
}
else
tm.reset();
tm.stop();
layersTimings[ld.id] = tm.getTimeTicks();
{
layersTimings[ld.id] = 0;
}
ld.flag = 1;
}

@ -48,6 +48,8 @@
#include "../ie_ngraph.hpp"
#include "../op_vkcom.hpp"
#include <opencv2/core/utils/logger.hpp>
#include "opencv2/core/hal/hal.hpp"
#include "opencv2/core/hal/intrin.hpp"
#include <iostream>
@ -436,6 +438,14 @@ public:
Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>();
if (!activ_power.empty())
{
if (activ_power->scale != 1.0f) // not supported well by implementation, #17964
{
// FIXIT no way to check number of blobs (like, eltwise input)
CV_LOG_DEBUG(NULL, "DNN/OpenCL: can't configure Power activation (scale != 1.0f)");
activ.release();
newActiv = false;
return false;
}
if (activ_power->scale != 1.f || activ_power->shift != 0.f)
{
const int outCh = blobs[0].size[0];

@ -67,10 +67,10 @@ void normAssert(
double l1 /*= 0.00001*/, double lInf /*= 0.0001*/)
{
double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
EXPECT_LE(normL1, l1) << comment;
EXPECT_LE(normL1, l1) << comment << " |ref| = " << cvtest::norm(ref, cv::NORM_INF);
double normInf = cvtest::norm(ref, test, cv::NORM_INF);
EXPECT_LE(normInf, lInf) << comment;
EXPECT_LE(normInf, lInf) << comment << " |ref| = " << cvtest::norm(ref, cv::NORM_INF);
}
std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m)

@ -656,6 +656,11 @@ TEST_P(Test_Darknet_nets, YOLOv4_tiny)
target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB
);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000) // nGraph compilation failure
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
const double confThreshold = 0.5;
// batchId, classId, confidence, left, top, right, bottom
const int N0 = 2;

@ -2264,10 +2264,6 @@ TEST_P(ConvolutionActivationFusion, Accuracy)
Backend backendId = get<0>(get<2>(GetParam()));
Target targetId = get<1>(get<2>(GetParam()));
// bug: https://github.com/opencv/opencv/issues/17964
if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
Net net;
int convId = net.addLayer(convParams.name, convParams.type, convParams);
int activId = net.addLayerToPrev(activationParams.name, activationParams.type, activationParams);
@ -2280,7 +2276,7 @@ TEST_P(ConvolutionActivationFusion, Accuracy)
expectedFusedLayers.push_back(activId); // all activations are fused
else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)
{
if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Power")
if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" /*|| actType == "Power"*/)
expectedFusedLayers.push_back(activId);
}
}
@ -2390,21 +2386,6 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy)
Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam()));
// bug: https://github.com/opencv/opencv/issues/17945
if ((eltwiseOp != "sum" || weightedEltwise) && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
// bug: https://github.com/opencv/opencv/issues/17953
if (eltwiseOp == "sum" && actType == "ChannelsPReLU" && bias_term == false &&
backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
{
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
}
// bug: https://github.com/opencv/opencv/issues/17964
if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
Net net;
int convId = net.addLayer(convParams.name, convParams.type, convParams);
int eltwiseId = net.addLayer(eltwiseParams.name, eltwiseParams.type, eltwiseParams);
@ -2421,7 +2402,9 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy)
expectedFusedLayers.push_back(activId); // activation is fused with eltwise layer
else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)
{
if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "Power")
if (eltwiseOp == "sum" && !weightedEltwise &&
(actType == "ReLU" || actType == "ChannelsPReLU" /*|| actType == "Power"*/)
)
{
expectedFusedLayers.push_back(eltwiseId);
expectedFusedLayers.push_back(activId);
@ -2483,17 +2466,6 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy)
Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam()));
// bug: https://github.com/opencv/opencv/issues/17964
if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
// bug: https://github.com/opencv/opencv/issues/17953
if (actType == "ChannelsPReLU" && bias_term == false &&
backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16))
{
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL);
}
Net net;
int convId = net.addLayer(convParams.name, convParams.type, convParams);
int activId = net.addLayer(activationParams.name, activationParams.type, activationParams);
@ -2510,7 +2482,7 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy)
expectedFusedLayers.push_back(activId); // activation fused with convolution
else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)
{
if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Power")
if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" /*|| actType == "Power"*/)
expectedFusedLayers.push_back(activId); // activation fused with convolution
}
}

@ -409,6 +409,10 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
throw SkipTestException("");
}
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2021010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
{

@ -0,0 +1,12 @@
diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h b/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h
index 3dbdfd0b..6b04b910 100644
--- a/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h
+++ b/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h
@@ -15,6 +15,7 @@
#pragma once
+#include <stdexcept>
#include "common_types.h"
#include "common_tools.h"
#include <vector>

@ -1,3 +1,4 @@
applyPatch('20200701-dldt-disable-unused-targets.patch')
applyPatch('20200413-dldt-pdb.patch')
applyPatch('20200604-dldt-disable-multidevice.patch')
applyPatch('20201005-dldt-fix-cldnn-compilation.patch')

@ -0,0 +1,14 @@
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1f981ed2..90eb500a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -22,6 +22,9 @@ endif()
project(OpenVINO)
+set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Zi /FS")
+set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /DEBUG /OPT:REF /OPT:ICF")
+
set(OpenVINO_MAIN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(IE_MAIN_SOURCE_DIR ${OpenVINO_MAIN_SOURCE_DIR}/inference-engine)
list(APPEND CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake")

@ -0,0 +1,13 @@
diff --git a/inference-engine/src/CMakeLists.txt b/inference-engine/src/CMakeLists.txt
index 0ba0dd78..7d34e7cb 100644
--- a/inference-engine/src/CMakeLists.txt
+++ b/inference-engine/src/CMakeLists.txt
@@ -26,7 +26,7 @@ endif()
add_subdirectory(hetero_plugin)
-add_subdirectory(multi_device)
+#add_subdirectory(multi_device)
add_subdirectory(transformations)

@ -0,0 +1,178 @@
diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt
index 7f45ab02..a7bac7e9 100644
--- a/inference-engine/CMakeLists.txt
+++ b/inference-engine/CMakeLists.txt
@@ -70,7 +70,7 @@ if(ENABLE_TESTS)
add_subdirectory(tests)
endif()
-add_subdirectory(tools)
+#add_subdirectory(tools)
function(ie_build_samples)
# samples should be build with the same flags as from OpenVINO package,
@@ -89,7 +89,7 @@ endfunction()
# gflags and format_reader targets are kept inside of samples directory and
# they must be built even if samples build is disabled (required for tests and tools).
-ie_build_samples()
+#ie_build_samples()
file(GLOB_RECURSE SAMPLES_SOURCES samples/*.cpp samples/*.hpp samples/*.h)
add_cpplint_target(sample_cpplint
@@ -180,7 +180,7 @@ endif()
# Developer package
#
-ie_developer_export_targets(format_reader)
+#ie_developer_export_targets(format_reader)
ie_developer_export_targets(${NGRAPH_LIBRARIES})
# for Template plugin
@@ -188,7 +188,7 @@ if(NGRAPH_INTERPRETER_ENABLE)
ie_developer_export_targets(ngraph_backend interpreter_backend)
endif()
-ie_developer_export()
+#ie_developer_export()
configure_file(
"${IE_MAIN_SOURCE_DIR}/cmake/developer_package_config.cmake.in"
diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt
index 9ab88898..8badb591 100644
--- a/inference-engine/src/inference_engine/CMakeLists.txt
+++ b/inference-engine/src/inference_engine/CMakeLists.txt
@@ -118,7 +118,7 @@ add_cpplint_target(${TARGET_NAME}_plugin_api_cpplint FOR_SOURCES ${plugin_api_sr
# Create common base object library
-add_library(${TARGET_NAME}_common_obj OBJECT
+add_library(${TARGET_NAME}_common_obj OBJECT EXCLUDE_FROM_ALL
${IE_BASE_SOURCE_FILES})
target_compile_definitions(${TARGET_NAME}_common_obj PRIVATE IMPLEMENT_INFERENCE_ENGINE_API)
@@ -132,7 +132,7 @@ target_include_directories(${TARGET_NAME}_common_obj SYSTEM PRIVATE
# Create object library
-add_library(${TARGET_NAME}_obj OBJECT
+add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL
${LIBRARY_SRC}
${LIBRARY_HEADERS}
${PUBLIC_HEADERS})
@@ -183,7 +183,7 @@ ie_register_plugins(MAIN_TARGET ${TARGET_NAME}
# Static library used for unit tests which are always built
-add_library(${TARGET_NAME}_s STATIC
+add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL
$<TARGET_OBJECTS:${TARGET_NAME}_obj>
$<TARGET_OBJECTS:${TARGET_NAME}_common_obj>
$<TARGET_OBJECTS:${TARGET_NAME}_legacy_obj>
diff --git a/inference-engine/src/legacy_api/CMakeLists.txt b/inference-engine/src/legacy_api/CMakeLists.txt
index ed87a073..b30e6671 100644
--- a/inference-engine/src/legacy_api/CMakeLists.txt
+++ b/inference-engine/src/legacy_api/CMakeLists.txt
@@ -26,7 +26,7 @@ endif()
# Create object library
-add_library(${TARGET_NAME}_obj OBJECT
+add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL
${LIBRARY_SRC}
${PUBLIC_HEADERS})
diff --git a/inference-engine/src/mkldnn_plugin/CMakeLists.txt b/inference-engine/src/mkldnn_plugin/CMakeLists.txt
index 166818cd..6c1e8e36 100644
--- a/inference-engine/src/mkldnn_plugin/CMakeLists.txt
+++ b/inference-engine/src/mkldnn_plugin/CMakeLists.txt
@@ -193,7 +193,7 @@ cross_compiled_file(${TARGET_NAME}
# add test object library
-add_library(${TARGET_NAME}_obj OBJECT ${SOURCES} ${HEADERS})
+add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL ${SOURCES} ${HEADERS})
target_include_directories(${TARGET_NAME}_obj PRIVATE $<TARGET_PROPERTY:inference_engine_preproc_s,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:inference_engine_lp_transformations,INTERFACE_INCLUDE_DIRECTORIES>
diff --git a/inference-engine/src/preprocessing/CMakeLists.txt b/inference-engine/src/preprocessing/CMakeLists.txt
index f4fed72a..9cedd6b5 100644
--- a/inference-engine/src/preprocessing/CMakeLists.txt
+++ b/inference-engine/src/preprocessing/CMakeLists.txt
@@ -124,7 +124,7 @@ endif()
# Create object library
-add_library(${TARGET_NAME}_obj OBJECT
+add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL
${LIBRARY_SRC}
${LIBRARY_HEADERS})
@@ -175,7 +175,7 @@ add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}
# Static library used for unit tests which are always built
-add_library(${TARGET_NAME}_s STATIC
+add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL
$<TARGET_OBJECTS:${TARGET_NAME}_obj>)
set_ie_threading_interface_for(${TARGET_NAME}_s)
diff --git a/inference-engine/src/vpu/common/CMakeLists.txt b/inference-engine/src/vpu/common/CMakeLists.txt
index b291d5b4..74ab8287 100644
--- a/inference-engine/src/vpu/common/CMakeLists.txt
+++ b/inference-engine/src/vpu/common/CMakeLists.txt
@@ -57,7 +57,7 @@ add_common_target("vpu_common_lib" FALSE)
# Unit tests support for graph transformer
if(WIN32)
- add_common_target("vpu_common_lib_test_static" TRUE)
+ #add_common_target("vpu_common_lib_test_static" TRUE)
else()
add_library("vpu_common_lib_test_static" ALIAS "vpu_common_lib")
endif()
diff --git a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt
index a4543745..807b8e36 100644
--- a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt
+++ b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt
@@ -65,7 +65,7 @@ add_graph_transformer_target("vpu_graph_transformer" FALSE)
# Unit tests support for graph transformer
if(WIN32)
- add_graph_transformer_target("vpu_graph_transformer_test_static" TRUE)
+ #add_graph_transformer_target("vpu_graph_transformer_test_static" TRUE)
else()
add_library("vpu_graph_transformer_test_static" ALIAS "vpu_graph_transformer")
endif()
diff --git a/inference-engine/thirdparty/CMakeLists.txt b/inference-engine/thirdparty/CMakeLists.txt
index a2550bfa..10ce316f 100644
--- a/inference-engine/thirdparty/CMakeLists.txt
+++ b/inference-engine/thirdparty/CMakeLists.txt
@@ -56,13 +56,13 @@ function(build_with_lto)
endfunction()
ie_build_pugixml()
- add_subdirectory(stb_lib)
+ #add_subdirectory(stb_lib)
add_subdirectory(ade)
add_subdirectory(fluid/modules/gapi)
target_include_directories(pugixml INTERFACE "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/pugixml/src>")
- set_target_properties(pugixml ade fluid stb_image
+ set_target_properties(pugixml ade fluid
PROPERTIES FOLDER thirdparty)
# developer package
diff --git a/inference-engine/thirdparty/pugixml/CMakeLists.txt b/inference-engine/thirdparty/pugixml/CMakeLists.txt
index 8bcb2801..380fb468 100644
--- a/inference-engine/thirdparty/pugixml/CMakeLists.txt
+++ b/inference-engine/thirdparty/pugixml/CMakeLists.txt
@@ -41,7 +41,7 @@ if(BUILD_SHARED_LIBS)
else()
add_library(pugixml STATIC ${SOURCES})
if (MSVC)
- add_library(pugixml_mt STATIC ${SOURCES})
+ #add_library(pugixml_mt STATIC ${SOURCES})
#if (WIN32)
# set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
# set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")

@ -0,0 +1,3 @@
applyPatch('20201005-dldt-disable-unused-targets.patch')
applyPatch('20200413-dldt-pdb.patch')
applyPatch('20200604-dldt-disable-multidevice.patch')

@ -0,0 +1,56 @@
sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin')
copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph')
#rm_one(self.sysrootdir / 'ngraph' / 'lib' / 'ngraph.dll')
build_config = 'Release' if not self.config.build_debug else 'Debug'
build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config
def copy_bin(name):
global build_bin_dir, sysroot_bin_dir
copytree(build_bin_dir / name, sysroot_bin_dir / name)
dll_suffix = 'd' if self.config.build_debug else ''
def copy_dll(name):
global copy_bin, dll_suffix
copy_bin(name + dll_suffix + '.dll')
copy_bin(name + dll_suffix + '.pdb')
copy_bin('cache.json')
copy_dll('clDNNPlugin')
copy_dll('HeteroPlugin')
copy_dll('inference_engine')
copy_dll('inference_engine_ir_reader')
copy_dll('inference_engine_legacy')
copy_dll('inference_engine_transformations') # runtime
copy_dll('inference_engine_lp_transformations') # runtime
copy_dll('MKLDNNPlugin') # runtime
copy_dll('myriadPlugin') # runtime
#copy_dll('MultiDevicePlugin') # runtime, not used
copy_dll('ngraph')
copy_bin('plugins.xml')
copytree(self.build_dir / 'bin' / 'intel64' / 'pcie-ma248x.elf', sysroot_bin_dir / 'pcie-ma248x.elf')
copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2x8x.mvcmd', sysroot_bin_dir / 'usb-ma2x8x.mvcmd')
copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2450.mvcmd', sysroot_bin_dir / 'usb-ma2450.mvcmd')
copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir)
copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb')
sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine')
sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64')
copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include')
if not self.config.build_debug:
copytree(self.build_dir / 'install' / 'lib' / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib')
copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib')
copytree(build_bin_dir / 'inference_engine_ir_reader.lib', sysroot_ie_lib_dir / 'inference_engine_ir_reader.lib')
copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib')
else:
copytree(self.build_dir / 'install' / 'lib' / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib')
copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib')
copytree(build_bin_dir / 'inference_engine_ir_readerd.lib', sysroot_ie_lib_dir / 'inference_engine_ir_readerd.lib')
copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib')
sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses')
copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE')
copytree(self.srcdir / 'ngraph/LICENSE', sysroot_license_dir / 'ngraph-LICENSE')
copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE')

@ -443,8 +443,8 @@ class Builder:
def main():
dldt_src_url = 'https://github.com/openvinotoolkit/openvino'
dldt_src_commit = '2020.4'
dldt_release = '2020040000'
dldt_src_commit = '2021.1'
dldt_release = '2021010000'
build_cache_dir_default = os.environ.get('BUILD_CACHE_DIR', '.build_cache')
build_subst_drive = os.environ.get('BUILD_SUBST_DRIVE', None)

Loading…
Cancel
Save