Merge remote-tracking branch 'upstream/3.4' into merge-3.4

pull/20423/head
Alexander Alekhin 4 years ago
commit 39b91c97f0
  1. 2
      CMakeLists.txt
  2. 2
      modules/core/src/parallel.cpp
  3. 22
      modules/core/src/system.cpp
  4. 29
      modules/dnn/src/dnn.cpp
  5. 97
      modules/dnn/src/ie_ngraph.cpp
  6. 10
      modules/dnn/src/ie_ngraph.hpp
  7. 39
      modules/dnn/src/onnx/onnx_importer.cpp
  8. 98
      modules/dnn/test/test_ie_models.cpp
  9. 1
      modules/dnn/test/test_onnx_importer.cpp
  10. 14
      modules/python/src2/cv2.cpp
  11. 10
      platforms/winpack_dldt/build_package.py
  12. 37
      samples/cpp/grabcut.cpp

@ -659,7 +659,7 @@ if(UNIX)
elseif(EMSCRIPTEN)
# no need to link to system libs with emscripten
elseif(QNXNTO)
# no need to link to system libs with QNX
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} m)
else()
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m pthread rt)
endif()

@ -56,7 +56,7 @@
#undef abs
#endif
#if defined __linux__ || defined __APPLE__ || defined __GLIBC__ \
#if defined __unix__ || defined __APPLE__ || defined __GLIBC__ \
|| defined __HAIKU__ || defined __EMSCRIPTEN__ || defined __FreeBSD__ \
|| defined __OpenBSD__
#include <unistd.h>

@ -116,10 +116,14 @@ void* allocSingletonNewBuffer(size_t size) { return malloc(size); }
#include <cstdlib> // std::abort
#endif
#if defined __ANDROID__ || defined __linux__ || defined __FreeBSD__ || defined __OpenBSD__ || defined __HAIKU__ || defined __Fuchsia__
#if defined __ANDROID__ || defined __unix__ || defined __FreeBSD__ || defined __OpenBSD__ || defined __HAIKU__ || defined __Fuchsia__
# include <unistd.h>
# include <fcntl.h>
#if defined __QNXNTO__
# include <sys/elf.h>
#else
# include <elf.h>
#endif
#if defined __ANDROID__ || defined __linux__
# include <linux/auxvec.h>
#endif
@ -130,7 +134,7 @@ void* allocSingletonNewBuffer(size_t size) { return malloc(size); }
#endif
#if (defined __ppc64__ || defined __PPC64__) && defined __linux__
#if (defined __ppc64__ || defined __PPC64__) && defined __unix__
# include "sys/auxv.h"
# ifndef AT_HWCAP2
# define AT_HWCAP2 26
@ -233,7 +237,7 @@ std::wstring GetTempFileNameWinRT(std::wstring prefix)
#include "omp.h"
#endif
#if defined __linux__ || defined __APPLE__ || defined __EMSCRIPTEN__ || defined __FreeBSD__ || defined __GLIBC__ || defined __HAIKU__
#if defined __unix__ || defined __APPLE__ || defined __EMSCRIPTEN__ || defined __FreeBSD__ || defined __GLIBC__ || defined __HAIKU__
#include <unistd.h>
#include <stdio.h>
#include <sys/types.h>
@ -600,7 +604,7 @@ struct HWFeatures
have[CV_CPU_MSA] = true;
#endif
#if (defined __ppc64__ || defined __PPC64__) && defined __linux__
#if (defined __ppc64__ || defined __PPC64__) && defined __unix__
unsigned int hwcap = getauxval(AT_HWCAP);
if (hwcap & PPC_FEATURE_HAS_VSX) {
hwcap = getauxval(AT_HWCAP2);
@ -814,12 +818,12 @@ int64 getTickCount(void)
LARGE_INTEGER counter;
QueryPerformanceCounter( &counter );
return (int64)counter.QuadPart;
#elif defined __linux || defined __linux__
#elif defined __MACH__ && defined __APPLE__
return (int64)mach_absolute_time();
#elif defined __unix__
struct timespec tp;
clock_gettime(CLOCK_MONOTONIC, &tp);
return (int64)tp.tv_sec*1000000000 + tp.tv_nsec;
#elif defined __MACH__ && defined __APPLE__
return (int64)mach_absolute_time();
#else
struct timeval tv;
gettimeofday(&tv, NULL);
@ -833,8 +837,6 @@ double getTickFrequency(void)
LARGE_INTEGER freq;
QueryPerformanceFrequency(&freq);
return (double)freq.QuadPart;
#elif defined __linux || defined __linux__
return 1e9;
#elif defined __MACH__ && defined __APPLE__
static double freq = 0;
if( freq == 0 )
@ -844,6 +846,8 @@ double getTickFrequency(void)
freq = sTimebaseInfo.denom*1e9/sTimebaseInfo.numer;
}
return freq;
#elif defined __unix__
return 1e9;
#else
return 1e6;
#endif

@ -2078,7 +2078,10 @@ struct Net::Impl : public detail::NetImplBase
Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
CV_Assert(!ieNode.empty());
ieNode->net->reset();
CV_Assert(ieNode->net);
InfEngineNgraphNet& ienet = *ieNode->net;
ienet.reset();
for (it = layers.begin(); it != layers.end(); ++it)
{
@ -2095,16 +2098,26 @@ struct Net::Impl : public detail::NetImplBase
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
dataPtr->setName(ld.name);
auto it = ienet.outputsDesc.find(ld.name);
if (it != ienet.outputsDesc.end())
{
const InferenceEngine::TensorDesc& descriptor = it->second;
InferenceEngine::DataPtr dataPtr = ngraphDataOutputNode(ld.outputBlobsWrappers[i], descriptor, ld.name);
dataPtr->setName(ld.name);
}
else
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
dataPtr->setName(ld.name);
}
}
}
ieNode->net->addBlobs(ld.inputBlobsWrappers);
ieNode->net->addBlobs(ld.outputBlobsWrappers);
ienet.addBlobs(ld.inputBlobsWrappers);
ienet.addBlobs(ld.outputBlobsWrappers);
ld.skip = true;
}
layers[lastLayerId].skip = false;
ieNode->net->init((Target)preferableTarget);
ienet.init((Target)preferableTarget);
return;
}
@ -4198,8 +4211,8 @@ void Net::forward(OutputArrayOfArrays outputBlobs,
matvec.push_back(impl->getBlob(pins[i]));
}
std::vector<Mat> & outputvec = *(std::vector<Mat> *)outputBlobs.getObj();
outputvec = matvec;
outputBlobs.create((int)matvec.size(), 1, CV_32F/*FIXIT*/, -1); // allocate vector
outputBlobs.assign(matvec);
}
void Net::forward(std::vector<std::vector<Mat> >& outputBlobs,

@ -792,21 +792,32 @@ void NgraphBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays
}
static InferenceEngine::Layout estimateLayout(const Mat& m)
static InferenceEngine::Layout estimateLayout(int dims)
{
if (m.dims == 4)
if (dims == 4)
return InferenceEngine::Layout::NCHW;
else if (m.dims == 3)
else if (dims == 3)
return InferenceEngine::Layout::CHW;
else if (m.dims == 2)
else if (dims == 2)
return InferenceEngine::Layout::NC;
else if (m.dims == 1)
else if (dims == 1)
return InferenceEngine::Layout::C;
else if (m.dims == 5)
else if (dims == 5)
return InferenceEngine::Layout::NCDHW;
else
return InferenceEngine::Layout::ANY;
}
static inline
InferenceEngine::Layout estimateLayout(size_t dims)
{
return estimateLayout((int)dims);
}
static inline
InferenceEngine::Layout estimateLayout(const Mat& m)
{
return estimateLayout(m.dims);
}
static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "")
{
@ -842,6 +853,7 @@ InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m, InferenceEngine::Layou
NgraphBackendWrapper::NgraphBackendWrapper(int targetId, const cv::Mat& m)
: BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, targetId)
, host((Mat*)&m)
{
dataPtr = wrapToInfEngineDataNode(m);
blob = wrapToNgraphBlob(m, estimateLayout(m));
@ -893,7 +905,11 @@ InferenceEngine::Blob::Ptr copyBlob(const InferenceEngine::Blob::Ptr& blob)
copy = InferenceEngine::make_shared_blob<uint8_t>(description);
}
else
CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
{
std::ostringstream msg;
msg << precision;
CV_Error_(Error::StsNotImplemented, ("Unsupported blob precision: %s", msg.str().c_str()));
}
copy->allocate();
return copy;
}
@ -906,6 +922,66 @@ InferenceEngine::DataPtr ngraphDataNode(const Ptr<BackendWrapper>& ptr)
return p->dataPtr;
}
static
InferenceEngine::Blob::Ptr reallocateBlob(Mat &m, const InferenceEngine::TensorDesc& description)
{
auto dims = description.getDims();
auto layout = estimateLayout(dims.size());
MatShape matShape(dims.begin(), dims.end());
if (description.getPrecision() == InferenceEngine::Precision::FP32)
{
m.create(matShape, CV_32FC1);
return InferenceEngine::make_shared_blob<float>(
{description.getPrecision(), dims, layout}, (float*)m.data);
}
else if (description.getPrecision() == InferenceEngine::Precision::I32)
{
m.create(matShape, CV_32SC1);
return InferenceEngine::make_shared_blob<int>(
{description.getPrecision(), dims, layout}, (int*)m.data);
}
else if (description.getPrecision() == InferenceEngine::Precision::U8)
{
m.create(matShape, CV_8UC1);
return InferenceEngine::make_shared_blob<uchar>(
{description.getPrecision(), dims, layout}, (uchar*)m.data);
}
std::ostringstream msg;
msg << "Unsupported IE precision: " << description.getPrecision();
CV_Error(Error::StsNotImplemented, msg.str());
}
InferenceEngine::DataPtr ngraphDataOutputNode(
const Ptr<BackendWrapper>& ptr,
const InferenceEngine::TensorDesc& description,
const std::string name)
{
CV_Assert(!ptr.empty());
Ptr<NgraphBackendWrapper> p = ptr.dynamicCast<NgraphBackendWrapper>();
CV_Assert(!p.empty());
NgraphBackendWrapper& w = *p;
const InferenceEngine::TensorDesc& blobDesc = w.blob.get()->getTensorDesc();
auto dims = description.getDims();
bool reallocate = false;
if (blobDesc.getPrecision() != description.getPrecision())
{
reallocate = true;
CV_LOG_WARNING(NULL, "Reallocate output '" << name << "' blob due to wrong precision: " << blobDesc.getPrecision() << " => " << description.getPrecision() << " ndims=" << dims.size());
}
if (dims.size() != blobDesc.getDims().size())
{
reallocate = true;
CV_LOG_WARNING(NULL, "Reallocate output '" << name << "' blob due to wrong dims: " << blobDesc.getDims().size() << " => " << dims.size());
}
if (reallocate)
{
auto layout = estimateLayout(dims.size());
w.dataPtr = InferenceEngine::DataPtr(new InferenceEngine::Data(name,
{description.getPrecision(), dims, layout}));
w.blob = reallocateBlob(*w.host, description);
}
return w.dataPtr;
}
void forwardNgraph(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
Ptr<BackendNode>& node, bool isAsync)
@ -921,6 +997,13 @@ void InfEngineNgraphNet::reset()
allBlobs.clear();
infRequests.clear();
isInit = false;
outputsDesc.clear();
for (const auto& it : cnn.getOutputsInfo())
{
const std::string& name = it.first;
outputsDesc.insert({name, it.second->getTensorDesc()});
}
}
void InfEngineNgraphNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs)

@ -54,7 +54,8 @@ public:
void setNodePtr(std::shared_ptr<ngraph::Node>* ptr);
void reset();
private:
//private:
detail::NetImplBase& netImpl_;
void release();
@ -89,6 +90,8 @@ private:
bool hasNetOwner;
std::vector<std::string> requestedOutputs;
std::unordered_set<std::shared_ptr<ngraph::Node>> unconnectedNodes;
std::map<std::string, InferenceEngine::TensorDesc> outputsDesc;
};
class InfEngineNgraphNode : public BackendNode
@ -121,12 +124,17 @@ public:
virtual void copyToHost() CV_OVERRIDE;
virtual void setHostDirty() CV_OVERRIDE;
Mat* host;
InferenceEngine::DataPtr dataPtr;
InferenceEngine::Blob::Ptr blob;
AsyncArray futureMat;
};
InferenceEngine::DataPtr ngraphDataNode(const Ptr<BackendWrapper>& ptr);
InferenceEngine::DataPtr ngraphDataOutputNode(
const Ptr<BackendWrapper>& ptr,
const InferenceEngine::TensorDesc& description,
const std::string name);
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by

@ -1425,6 +1425,45 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_)
}
int outCn = layerParams.blobs.empty() ? outShapes[node_proto.input(1)][0] : layerParams.blobs[0].size[0];
layerParams.set("num_output", outCn);
// Check for asymmetric padding in Conv2D
if (layerParams.has("pad"))
{
bool asymmetricPadding = false;
DictValue pads = layerParams.get("pad");
const int dims = pads.size() / 2;
for (int i = 0; i < dims; ++i)
{
if (pads.get<int>(i) != pads.get<int>(i + dims))
{
asymmetricPadding = true;
break;
}
}
if (asymmetricPadding && pads.size() == 4) // [pad_t, pad_l, pad_b, pad_r]
{
layerParams.erase("pad");
// No paddings required for N, C axis
std::vector<int> paddings(4, 0);
// Add paddings for H, W axis
for (int i = 0; i < dims; ++i)
{
paddings.push_back(pads.get<int>(i));
paddings.push_back(pads.get<int>(dims + i));
}
LayerParams padLp;
padLp.name = layerParams.name + "/pad";
padLp.type = "Padding";
padLp.set("paddings", DictValue::arrayInt(&paddings[0], paddings.size()));
opencv_onnx::NodeProto proto;
proto.add_input(node_proto.input(0));
proto.add_output(padLp.name);
addLayer(padLp, proto);
node_proto.set_input(0, padLp.name);
}
}
}
else if (layer_type == "ConvTranspose")
{

@ -112,6 +112,25 @@ static const std::map<std::string, OpenVINOModelTestCaseInfo>& getOpenVINOTestMo
"intel/age-gender-recognition-retail-0013/FP16/age-gender-recognition-retail-0013",
"intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013"
}},
#endif
#if INF_ENGINE_RELEASE >= 2021020000
// OMZ: 2020.2
{ "face-detection-0105", {
"intel/face-detection-0105/FP32/face-detection-0105",
"intel/face-detection-0105/FP16/face-detection-0105"
}},
{ "face-detection-0106", {
"intel/face-detection-0106/FP32/face-detection-0106",
"intel/face-detection-0106/FP16/face-detection-0106"
}},
#endif
#if INF_ENGINE_RELEASE >= 2021040000
// OMZ: 2021.4
{ "person-vehicle-bike-detection-2004", {
"intel/person-vehicle-bike-detection-2004/FP32/person-vehicle-bike-detection-2004",
"intel/person-vehicle-bike-detection-2004/FP16/person-vehicle-bike-detection-2004"
//"intel/person-vehicle-bike-detection-2004/FP16-INT8/person-vehicle-bike-detection-2004"
}},
#endif
};
@ -145,10 +164,22 @@ inline static std::string getOpenVINOModel(const std::string &modelName, bool is
static inline void genData(const InferenceEngine::TensorDesc& desc, Mat& m, Blob::Ptr& dataPtr)
{
const std::vector<size_t>& dims = desc.getDims();
m.create(std::vector<int>(dims.begin(), dims.end()), CV_32F);
randu(m, -1, 1);
dataPtr = make_shared_blob<float>(desc, (float*)m.data);
if (desc.getPrecision() == InferenceEngine::Precision::FP32)
{
m.create(std::vector<int>(dims.begin(), dims.end()), CV_32F);
randu(m, -1, 1);
dataPtr = make_shared_blob<float>(desc, (float*)m.data);
}
else if (desc.getPrecision() == InferenceEngine::Precision::I32)
{
m.create(std::vector<int>(dims.begin(), dims.end()), CV_32S);
randu(m, -100, 100);
dataPtr = make_shared_blob<int>(desc, (int*)m.data);
}
else
{
FAIL() << "Unsupported precision: " << desc.getPrecision();
}
}
void runIE(Target target, const std::string& xmlPath, const std::string& binPath,
@ -254,7 +285,16 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap inputBlobs;
for (auto& it : net.getInputsInfo())
{
genData(it.second->getTensorDesc(), inputsMap[it.first], inputBlobs[it.first]);
const InferenceEngine::TensorDesc& desc = it.second->getTensorDesc();
genData(desc, inputsMap[it.first], inputBlobs[it.first]);
if (cvtest::debugLevel > 0)
{
const std::vector<size_t>& dims = desc.getDims();
std::cout << "Input: '" << it.first << "' precison=" << desc.getPrecision() << " dims=" << dims.size() << " [";
for (auto d : dims)
std::cout << " " << d;
std::cout << "] ocv_mat=" << inputsMap[it.first].size << " of " << typeToString(inputsMap[it.first].type()) << std::endl;
}
}
infRequest.SetInput(inputBlobs);
@ -263,7 +303,16 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
BlobMap outputBlobs;
for (auto& it : net.getOutputsInfo())
{
genData(it.second->getTensorDesc(), outputsMap[it.first], outputBlobs[it.first]);
const InferenceEngine::TensorDesc& desc = it.second->getTensorDesc();
genData(desc, outputsMap[it.first], outputBlobs[it.first]);
if (cvtest::debugLevel > 0)
{
const std::vector<size_t>& dims = desc.getDims();
std::cout << "Output: '" << it.first << "' precison=" << desc.getPrecision() << " dims=" << dims.size() << " [";
for (auto d : dims)
std::cout << " " << d;
std::cout << "] ocv_mat=" << outputsMap[it.first].size << " of " << typeToString(outputsMap[it.first].type()) << std::endl;
}
}
infRequest.SetOutput(outputBlobs);
@ -284,6 +333,12 @@ void runCV(Backend backendId, Target targetId, const std::string& xmlPath, const
net.setPreferableTarget(targetId);
std::vector<String> outNames = net.getUnconnectedOutLayersNames();
if (cvtest::debugLevel > 0)
{
std::cout << "OpenCV output names: " << outNames.size() << std::endl;
for (auto name : outNames)
std::cout << "- " << name << std::endl;
}
std::vector<Mat> outs;
net.forward(outs, outNames);
@ -307,13 +362,26 @@ TEST_P(DNNTestOpenVINO, models)
ASSERT_FALSE(backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) <<
"Inference Engine backend is required";
#if INF_ENGINE_VER_MAJOR_EQ(2021040000)
if (targetId == DNN_TARGET_MYRIAD && (
modelName == "person-detection-retail-0013" || // ncDeviceOpen:1013 Failed to find booted device after boot
modelName == "age-gender-recognition-retail-0013" // ncDeviceOpen:1013 Failed to find booted device after boot
#if INF_ENGINE_VER_MAJOR_GE(2021030000)
if (targetId == DNN_TARGET_MYRIAD && (false
|| modelName == "person-detection-retail-0013" // ncDeviceOpen:1013 Failed to find booted device after boot
|| modelName == "age-gender-recognition-retail-0013" // ncDeviceOpen:1013 Failed to find booted device after boot
|| modelName == "face-detection-0105" // get_element_type() must be called on a node with exactly one output
|| modelName == "face-detection-0106" // get_element_type() must be called on a node with exactly one output
|| modelName == "person-vehicle-bike-detection-2004" // 2021.4+: ncDeviceOpen:1013 Failed to find booted device after boot
)
)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (targetId == DNN_TARGET_OPENCL && (false
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
)
)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (targetId == DNN_TARGET_OPENCL_FP16 && (false
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
)
)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if INF_ENGINE_VER_MAJOR_GE(2020020000)
@ -352,6 +420,8 @@ TEST_P(DNNTestOpenVINO, models)
if (targetId == DNN_TARGET_HDDL)
releaseHDDLPlugin();
EXPECT_NO_THROW(runIE(targetId, xmlPath, binPath, inputsMap, ieOutputsMap)) << "runIE";
if (targetId == DNN_TARGET_MYRIAD)
resetMyriadDevice();
EXPECT_NO_THROW(runCV(backendId, targetId, xmlPath, binPath, inputsMap, cvOutputsMap)) << "runCV";
double eps = 0;
@ -359,6 +429,14 @@ TEST_P(DNNTestOpenVINO, models)
if (targetId == DNN_TARGET_CPU && checkHardwareSupport(CV_CPU_AVX_512F))
eps = 1e-5;
#endif
#if INF_ENGINE_VER_MAJOR_GE(2021030000)
if (targetId == DNN_TARGET_CPU && modelName == "face-detection-0105")
eps = 2e-4;
#endif
#if INF_ENGINE_VER_MAJOR_GE(2021040000)
if (targetId == DNN_TARGET_CPU && modelName == "person-vehicle-bike-detection-2004")
eps = 1e-6;
#endif
EXPECT_EQ(ieOutputsMap.size(), cvOutputsMap.size());
for (auto& srcIt : ieOutputsMap)

@ -112,6 +112,7 @@ TEST_P(Test_ONNX_layers, MaxPooling_2)
TEST_P(Test_ONNX_layers, Convolution)
{
testONNXModels("convolution");
testONNXModels("conv_asymmetric_pads");
}
TEST_P(Test_ONNX_layers, Convolution_variable_weight)

@ -2081,15 +2081,23 @@ static void OnChange(int pos, void *param)
}
#ifdef HAVE_OPENCV_HIGHGUI
// workaround for #20408, use nullptr, set value later
static int _createTrackbar(const String &trackbar_name, const String &window_name, int value, int count,
TrackbarCallback onChange, PyObject* py_callback_info)
{
int n = createTrackbar(trackbar_name, window_name, NULL, count, onChange, py_callback_info);
setTrackbarPos(trackbar_name, window_name, value);
return n;
}
static PyObject *pycvCreateTrackbar(PyObject*, PyObject *args)
{
PyObject *on_change;
char* trackbar_name;
char* window_name;
int *value = new int;
int value;
int count;
if (!PyArg_ParseTuple(args, "ssiiO", &trackbar_name, &window_name, value, &count, &on_change))
if (!PyArg_ParseTuple(args, "ssiiO", &trackbar_name, &window_name, &value, &count, &on_change))
return NULL;
if (!PyCallable_Check(on_change)) {
PyErr_SetString(PyExc_TypeError, "on_change must be callable");
@ -2108,7 +2116,7 @@ static PyObject *pycvCreateTrackbar(PyObject*, PyObject *args)
{
registered_callbacks.insert(std::pair<std::string, PyObject*>(name, py_callback_info));
}
ERRWRAP2(createTrackbar(trackbar_name, window_name, value, count, OnChange, py_callback_info));
ERRWRAP2(_createTrackbar(trackbar_name, window_name, value, count, OnChange, py_callback_info));
Py_RETURN_NONE;
}

@ -189,7 +189,10 @@ class BuilderDLDT:
if self.srcdir is None:
self.srcdir = prepare_dir(self.outdir / 'sources', clean=clean_src_dir)
self.build_dir = prepare_dir(self.outdir / 'build', clean=self.config.clean_dldt)
self.sysrootdir = prepare_dir(self.outdir / 'sysroot', clean=self.config.clean_dldt)
self.sysrootdir = prepare_dir(self.outdir / 'sysroot', clean=self.config.clean_dldt or self.config.clean_dldt_sysroot)
if not (self.config.clean_dldt or self.config.clean_dldt_sysroot):
_ = prepare_dir(self.sysrootdir / 'bin', clean=True) # always clean sysroot/bin (package files)
_ = prepare_dir(self.sysrootdir / 'etc', clean=True) # always clean sysroot/etc (package files)
if self.config.build_subst_drive:
if os.path.exists(self.config.build_subst_drive + ':\\'):
@ -485,8 +488,9 @@ def main():
parser.add_argument('--cmake_option', action='append', help='Append OpenCV CMake option')
parser.add_argument('--cmake_option_dldt', action='append', help='Append CMake option for DLDT project')
parser.add_argument('--clean_dldt', action='store_true', help='Clear DLDT build and sysroot directories')
parser.add_argument('--clean_opencv', action='store_true', help='Clear OpenCV build directory')
parser.add_argument('--clean_dldt', action='store_true', help='Clean DLDT build and sysroot directories')
parser.add_argument('--clean_dldt_sysroot', action='store_true', help='Clean DLDT sysroot directories')
parser.add_argument('--clean_opencv', action='store_true', help='Clean OpenCV build directory')
parser.add_argument('--build_debug', action='store_true', help='Build debug binaries')
parser.add_argument('--build_tests', action='store_true', help='Build OpenCV tests')

@ -107,12 +107,14 @@ void GCApplication::showImage() const
Mat res;
Mat binMask;
if( !isInitialized )
image->copyTo( res );
else
{
getBinMask( mask, binMask );
image->copyTo( res, binMask );
image->copyTo( res );
if( isInitialized ){
getBinMask( mask, binMask);
Mat black (binMask.rows, binMask.cols, CV_8UC3, cv::Scalar(0,0,0));
black.setTo(Scalar::all(255), binMask);
addWeighted(black, 0.5, res, 0.5, 0.0, res);
}
vector<Point>::const_iterator it;
@ -201,24 +203,39 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* )
case EVENT_LBUTTONUP:
if( rectState == IN_PROCESS )
{
rect = Rect( Point(rect.x, rect.y), Point(x,y) );
rectState = SET;
setRectInMask();
CV_Assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );
if(rect.x == x || rect.y == y){
rectState = NOT_SET;
}
else{
rect = Rect( Point(rect.x, rect.y), Point(x,y) );
rectState = SET;
setRectInMask();
CV_Assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );
}
showImage();
}
if( lblsState == IN_PROCESS )
{
setLblsInMask(flags, Point(x,y), false);
lblsState = SET;
nextIter();
showImage();
}
else{
if(rectState == SET){
nextIter();
showImage();
}
}
break;
case EVENT_RBUTTONUP:
if( prLblsState == IN_PROCESS )
{
setLblsInMask(flags, Point(x,y), true);
prLblsState = SET;
}
if(rectState == SET){
nextIter();
showImage();
}
break;

Loading…
Cancel
Save