Intel Inference Engine deep learning backend (#10608)
* Intel Inference Engine deep learning backend. * OpenFace network using Inference Engine backendpull/10856/head
parent
292dfc2d72
commit
10e1de74d2
26 changed files with 1379 additions and 49 deletions
@ -0,0 +1,59 @@ |
||||
# The script detects Intel(R) Inference Engine installation |
||||
# |
||||
# Parameters: |
||||
# INTEL_CVSDK_DIR - Path to Inference Engine root folder |
||||
# IE_PLUGINS_PATH - Path to folder with Inference Engine plugins |
||||
# |
||||
# On return this will define: |
||||
# |
||||
# HAVE_INF_ENGINE - True if Intel Inference Engine was found |
||||
# INF_ENGINE_INCLUDE_DIRS - Inference Engine include folder |
||||
# INF_ENGINE_LIBRARIES - Inference Engine libraries and it's dependencies |
||||
# |
||||
macro(ie_fail) |
||||
set(HAVE_INF_ENGINE FALSE) |
||||
return() |
||||
endmacro() |
||||
|
||||
if(NOT INF_ENGINE_ROOT_DIR OR NOT EXISTS "${INF_ENGINE_ROOT_DIR}/inference_engine/include/inference_engine.hpp") |
||||
set(ie_root_paths "${INF_ENGINE_ROOT_DIR}") |
||||
if(DEFINED ENV{INTEL_CVSDK_DIR}) |
||||
list(APPEND ie_root_paths "$ENV{INTEL_CVSDK_DIR}") |
||||
endif() |
||||
|
||||
if(WITH_INF_ENGINE AND NOT ie_root_paths) |
||||
list(APPEND ie_root_paths "/opt/intel/deeplearning_deploymenttoolkit/deployment_tools") |
||||
endif() |
||||
|
||||
find_path(INF_ENGINE_ROOT_DIR inference_engine/include/inference_engine.hpp PATHS ${ie_root_paths}) |
||||
endif() |
||||
|
||||
set(INF_ENGINE_INCLUDE_DIRS "${INF_ENGINE_ROOT_DIR}/inference_engine/include" CACHE PATH "Path to Inference Engine include directory") |
||||
|
||||
if(NOT INF_ENGINE_ROOT_DIR |
||||
OR NOT EXISTS "${INF_ENGINE_ROOT_DIR}" |
||||
OR NOT EXISTS "${INF_ENGINE_INCLUDE_DIRS}" |
||||
OR NOT EXISTS "${INF_ENGINE_INCLUDE_DIRS}/inference_engine.hpp" |
||||
) |
||||
ie_fail() |
||||
endif() |
||||
|
||||
set(INF_ENGINE_LIBRARIES "") |
||||
foreach(lib inference_engine mklml_intel iomp5) |
||||
find_library(${lib} |
||||
NAMES ${lib} |
||||
HINTS ${IE_PLUGINS_PATH} |
||||
HINTS "$ENV{IE_PLUGINS_PATH}" |
||||
HINTS ${INF_ENGINE_ROOT_DIR}/external/mklml_lnx/lib |
||||
) |
||||
if(NOT ${lib}) |
||||
ie_fail() |
||||
endif() |
||||
list(APPEND INF_ENGINE_LIBRARIES ${${lib}}) |
||||
endforeach() |
||||
|
||||
set(HAVE_INF_ENGINE TRUE) |
||||
|
||||
include_directories(${INF_ENGINE_INCLUDE_DIRS}) |
||||
list(APPEND OPENCV_LINKER_LIBS ${INF_ENGINE_LIBRARIES}) |
||||
add_definitions(-DHAVE_INF_ENGINE) |
@ -0,0 +1,360 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
|
||||
#include "precomp.hpp" |
||||
#include "op_inf_engine.hpp" |
||||
#include <opencv2/dnn/shape_utils.hpp> |
||||
|
||||
namespace cv { namespace dnn { |
||||
|
||||
#ifdef HAVE_INF_ENGINE |
||||
|
||||
InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& _layer) |
||||
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {} |
||||
|
||||
void InfEngineBackendNode::connect(std::vector<Ptr<BackendWrapper> >& inputs, |
||||
std::vector<Ptr<BackendWrapper> >& outputs) |
||||
{ |
||||
layer->insData.resize(inputs.size()); |
||||
for (int i = 0; i < inputs.size(); ++i) |
||||
{ |
||||
InferenceEngine::DataPtr dataPtr = infEngineDataNode(inputs[i]); |
||||
layer->insData[i] = InferenceEngine::DataWeakPtr(dataPtr); |
||||
dataPtr->inputTo[layer->name] = layer; |
||||
} |
||||
|
||||
CV_Assert(!outputs.empty()); |
||||
|
||||
layer->outData.resize(1); |
||||
InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[0]); |
||||
dataPtr->name = layer->name; |
||||
layer->outData[0] = dataPtr; |
||||
dataPtr->creatorLayer = InferenceEngine::CNNLayerWeakPtr(layer); |
||||
} |
||||
|
||||
static std::vector<Ptr<InfEngineBackendWrapper> > |
||||
infEngineWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs) |
||||
{ |
||||
std::vector<Ptr<InfEngineBackendWrapper> > wrappers(ptrs.size()); |
||||
for (int i = 0; i < ptrs.size(); ++i) |
||||
{ |
||||
CV_Assert(!ptrs[i].empty()); |
||||
wrappers[i] = ptrs[i].dynamicCast<InfEngineBackendWrapper>(); |
||||
CV_Assert(!wrappers[i].empty()); |
||||
} |
||||
return wrappers; |
||||
} |
||||
|
||||
static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "") |
||||
{ |
||||
std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims); |
||||
std::reverse(reversedShape.begin(), reversedShape.end()); |
||||
return InferenceEngine::DataPtr( |
||||
new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32) |
||||
); |
||||
} |
||||
|
||||
InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape) |
||||
{ |
||||
return InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, |
||||
shape, (float*)m.data); |
||||
} |
||||
|
||||
InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m) |
||||
{ |
||||
std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims); |
||||
std::reverse(reversedShape.begin(), reversedShape.end()); |
||||
return wrapToInfEngineBlob(m, reversedShape); |
||||
} |
||||
|
||||
InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr) |
||||
{ |
||||
CV_Assert(!ptr.empty()); |
||||
Ptr<InfEngineBackendWrapper> p = ptr.dynamicCast<InfEngineBackendWrapper>(); |
||||
CV_Assert(!p.empty()); |
||||
return p->dataPtr; |
||||
} |
||||
|
||||
InfEngineBackendWrapper::InfEngineBackendWrapper(int targetId, const cv::Mat& m) |
||||
: BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE, targetId) |
||||
{ |
||||
dataPtr = wrapToInfEngineDataNode(m); |
||||
blob = wrapToInfEngineBlob(m); |
||||
} |
||||
|
||||
InfEngineBackendWrapper::~InfEngineBackendWrapper() |
||||
{ |
||||
|
||||
} |
||||
|
||||
void InfEngineBackendWrapper::copyToHost() |
||||
{ |
||||
|
||||
} |
||||
|
||||
void InfEngineBackendWrapper::setHostDirty() |
||||
{ |
||||
|
||||
} |
||||
|
||||
void InfEngineBackendNet::Release() noexcept |
||||
{ |
||||
layers.clear(); |
||||
inputs.clear(); |
||||
outputs.clear(); |
||||
} |
||||
|
||||
InferenceEngine::Precision InfEngineBackendNet::getPrecision() noexcept |
||||
{ |
||||
return InferenceEngine::Precision::FP32; |
||||
} |
||||
|
||||
// Assume that outputs of network is unconnected blobs.
|
||||
void InfEngineBackendNet::getOutputsInfo(InferenceEngine::OutputsDataMap &outputs_) noexcept |
||||
{ |
||||
if (outputs.empty()) |
||||
{ |
||||
for (const auto& l : layers) |
||||
{ |
||||
// Add all outputs.
|
||||
for (const InferenceEngine::DataPtr& out : l->outData) |
||||
{ |
||||
// TODO: Replace to uniquness assertion.
|
||||
if (outputs.find(out->name) == outputs.end()) |
||||
outputs[out->name] = out; |
||||
} |
||||
// Remove internally connected outputs.
|
||||
for (const InferenceEngine::DataWeakPtr& inp : l->insData) |
||||
{ |
||||
outputs.erase(InferenceEngine::DataPtr(inp)->name); |
||||
} |
||||
} |
||||
CV_Assert(layers.empty() || !outputs.empty()); |
||||
} |
||||
outBlobs.clear(); |
||||
for (const auto& it : outputs) |
||||
{ |
||||
CV_Assert(allBlobs.find(it.first) != allBlobs.end()); |
||||
outBlobs[it.first] = allBlobs[it.first]; |
||||
} |
||||
outputs_ = outputs; |
||||
} |
||||
|
||||
// Returns input references that aren't connected to internal outputs.
|
||||
void InfEngineBackendNet::getInputsInfo(InferenceEngine::InputsDataMap &inputs_) noexcept |
||||
{ |
||||
if (inputs.empty()) |
||||
{ |
||||
std::map<std::string, InferenceEngine::DataPtr> internalOutputs; |
||||
for (const auto& l : layers) |
||||
{ |
||||
for (const InferenceEngine::DataWeakPtr& ptr : l->insData) |
||||
{ |
||||
InferenceEngine::DataPtr inp(ptr); |
||||
if (internalOutputs.find(inp->name) == internalOutputs.end()) |
||||
{ |
||||
InferenceEngine::InputInfo::Ptr inpInfo(new InferenceEngine::InputInfo()); |
||||
inpInfo->setInputData(inp); |
||||
if (inputs.find(inp->name) == inputs.end()) |
||||
inputs[inp->name] = inpInfo; |
||||
} |
||||
} |
||||
for (const InferenceEngine::DataPtr& out : l->outData) |
||||
{ |
||||
// TODO: Replace to uniquness assertion.
|
||||
if (internalOutputs.find(out->name) == internalOutputs.end()) |
||||
internalOutputs[out->name] = out; |
||||
} |
||||
} |
||||
CV_Assert(layers.empty() || !inputs.empty()); |
||||
} |
||||
inpBlobs.clear(); |
||||
for (const auto& it : inputs) |
||||
{ |
||||
CV_Assert(allBlobs.find(it.first) != allBlobs.end()); |
||||
inpBlobs[it.first] = allBlobs[it.first]; |
||||
} |
||||
inputs_ = inputs; |
||||
} |
||||
|
||||
InferenceEngine::InputInfo::Ptr InfEngineBackendNet::getInput(const std::string &inputName) noexcept |
||||
{ |
||||
getInputsInfo(inputs); |
||||
const auto& it = inputs.find(inputName); |
||||
CV_Assert(it != inputs.end()); |
||||
return it->second; |
||||
} |
||||
|
||||
void InfEngineBackendNet::getName(char *pName, size_t len) noexcept |
||||
{ |
||||
CV_Error(Error::StsNotImplemented, ""); |
||||
} |
||||
|
||||
size_t InfEngineBackendNet::layerCount() noexcept |
||||
{ |
||||
return layers.size(); |
||||
} |
||||
|
||||
InferenceEngine::DataPtr& InfEngineBackendNet::getData(const char *dname) noexcept |
||||
{ |
||||
CV_Error(Error::StsNotImplemented, ""); |
||||
return outputs.begin()->second; // Just return something.
|
||||
} |
||||
|
||||
void InfEngineBackendNet::addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept |
||||
{ |
||||
layers.push_back(layer); |
||||
inputs.clear(); |
||||
outputs.clear(); |
||||
} |
||||
|
||||
InferenceEngine::StatusCode |
||||
InfEngineBackendNet::addOutput(const std::string &layerName, size_t outputIndex, |
||||
InferenceEngine::ResponseDesc *resp) noexcept |
||||
{ |
||||
CV_Error(Error::StsNotImplemented, ""); |
||||
return InferenceEngine::StatusCode::OK; |
||||
} |
||||
|
||||
InferenceEngine::StatusCode |
||||
InfEngineBackendNet::getLayerByName(const char *layerName, InferenceEngine::CNNLayerPtr &out, |
||||
InferenceEngine::ResponseDesc *resp) noexcept |
||||
{ |
||||
CV_Error(Error::StsNotImplemented, ""); |
||||
return InferenceEngine::StatusCode::OK; |
||||
} |
||||
|
||||
void InfEngineBackendNet::setTargetDevice(InferenceEngine::TargetDevice device) noexcept |
||||
{ |
||||
if (device != InferenceEngine::TargetDevice::eCPU) |
||||
CV_Error(Error::StsNotImplemented, ""); |
||||
} |
||||
|
||||
InferenceEngine::TargetDevice InfEngineBackendNet::getTargetDevice() noexcept |
||||
{ |
||||
return InferenceEngine::TargetDevice::eCPU; |
||||
} |
||||
|
||||
InferenceEngine::StatusCode InfEngineBackendNet::setBatchSize(const size_t size) noexcept |
||||
{ |
||||
CV_Error(Error::StsNotImplemented, ""); |
||||
return InferenceEngine::StatusCode::OK; |
||||
} |
||||
|
||||
size_t InfEngineBackendNet::getBatchSize() const noexcept |
||||
{ |
||||
CV_Error(Error::StsNotImplemented, ""); |
||||
return 0; |
||||
} |
||||
|
||||
void InfEngineBackendNet::initEngine() |
||||
{ |
||||
CV_Assert(!isInitialized()); |
||||
engine = InferenceEngine::InferenceEnginePluginPtr("libMKLDNNPlugin.so"); |
||||
InferenceEngine::ResponseDesc resp; |
||||
InferenceEngine::StatusCode status = engine->LoadNetwork(*this, &resp); |
||||
if (status != InferenceEngine::StatusCode::OK) |
||||
CV_Error(Error::StsAssert, resp.msg); |
||||
} |
||||
|
||||
bool InfEngineBackendNet::isInitialized() |
||||
{ |
||||
return (bool)engine; |
||||
} |
||||
|
||||
void InfEngineBackendNet::addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs) |
||||
{ |
||||
auto wrappers = infEngineWrappers(ptrs); |
||||
for (const auto& wrapper : wrappers) |
||||
{ |
||||
allBlobs[wrapper->dataPtr->name] = wrapper->blob; |
||||
} |
||||
} |
||||
|
||||
void InfEngineBackendNet::forward() |
||||
{ |
||||
InferenceEngine::ResponseDesc resp; |
||||
InferenceEngine::StatusCode status = engine->Infer(inpBlobs, outBlobs, &resp); |
||||
if (status != InferenceEngine::StatusCode::OK) |
||||
CV_Error(Error::StsAssert, resp.msg); |
||||
} |
||||
|
||||
static inline Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob) |
||||
{ |
||||
// NOTE: Inference Engine sizes are reversed.
|
||||
std::vector<int> size(blob->dims().begin(), blob->dims().end()); |
||||
std::reverse(size.begin(), size.end()); |
||||
return Mat(size, CV_32F, (void*)blob->buffer()); |
||||
} |
||||
|
||||
void fuseConvWeights(const std::shared_ptr<InferenceEngine::ConvolutionLayer>& conv, |
||||
const Mat& w, const Mat& b) |
||||
{ |
||||
// Get convolution's weights. Clone the data because Inference Engine can host it
|
||||
// and conv->_weights->allocate() below will deallocate it.
|
||||
Mat originWeights = infEngineBlobToMat(conv->_weights).clone(); |
||||
|
||||
// Create new weights blob.
|
||||
conv->_weights = InferenceEngine::make_shared_blob<float>( |
||||
InferenceEngine::Precision::FP32, conv->_weights->dims()); |
||||
conv->_weights->allocate(); |
||||
|
||||
// Convolution weights have OIHW data layout.
|
||||
// (conv(I) + b1 ) * w + b2
|
||||
// w*conv(I) + b1 * w + b2
|
||||
Mat fusedWeights = infEngineBlobToMat(conv->_weights); |
||||
|
||||
const int numChannels = fusedWeights.size[0]; |
||||
// Mat weights = blobs[0].reshape(1, 1);
|
||||
// Mat bias = hasBias ? blobs[1].reshape(1, 1) : Mat();
|
||||
CV_Assert(numChannels == w.total()); |
||||
CV_Assert(b.empty() || numChannels == b.total()); |
||||
for (int i = 0; i < numChannels; ++i) |
||||
{ |
||||
cv::multiply(slice(originWeights, i), w.at<float>(i), slice(fusedWeights, i)); |
||||
} |
||||
if (conv->_biases) |
||||
{ |
||||
// The same for biases.
|
||||
Mat originBiases = infEngineBlobToMat(conv->_biases).clone(); |
||||
|
||||
conv->_biases = InferenceEngine::make_shared_blob<float>( |
||||
InferenceEngine::Precision::FP32, conv->_biases->dims()); |
||||
conv->_biases->allocate(); |
||||
Mat fusedBiases = infEngineBlobToMat(conv->_biases); |
||||
|
||||
cv::multiply(w.reshape(1, fusedBiases.dims, &fusedBiases.size[0]), originBiases, fusedBiases); |
||||
if (!b.empty()) |
||||
cv::add(fusedBiases, b.reshape(1, fusedBiases.dims, &fusedBiases.size[0]), fusedBiases); |
||||
} |
||||
else |
||||
conv->_biases = wrapToInfEngineBlob(b); |
||||
} |
||||
|
||||
#endif // HAVE_INF_ENGINE
|
||||
|
||||
bool haveInfEngine() |
||||
{ |
||||
#ifdef HAVE_INF_ENGINE |
||||
return true; |
||||
#else |
||||
return false; |
||||
#endif // HAVE_INF_ENGINE
|
||||
} |
||||
|
||||
void forwardInfEngine(Ptr<BackendNode>& node) |
||||
{ |
||||
CV_Assert(haveInfEngine()); |
||||
#ifdef HAVE_INF_ENGINE |
||||
CV_Assert(!node.empty()); |
||||
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>(); |
||||
CV_Assert(!ieNode.empty()); |
||||
ieNode->net->forward(); |
||||
#endif // HAVE_INF_ENGINE
|
||||
} |
||||
|
||||
}} // namespace dnn, namespace cv
|
@ -0,0 +1,122 @@ |
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2018, Intel Corporation, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
|
||||
#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__ |
||||
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__ |
||||
|
||||
#include "precomp.hpp" |
||||
|
||||
#ifdef HAVE_INF_ENGINE |
||||
#include <inference_engine.hpp> |
||||
#endif // HAVE_INF_ENGINE
|
||||
|
||||
namespace cv { namespace dnn { |
||||
|
||||
#ifdef HAVE_INF_ENGINE |
||||
|
||||
class InfEngineBackendNet : public InferenceEngine::ICNNNetwork |
||||
{ |
||||
public: |
||||
virtual void Release() noexcept; |
||||
|
||||
virtual InferenceEngine::Precision getPrecision() noexcept; |
||||
|
||||
virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept; |
||||
|
||||
virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept; |
||||
|
||||
virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept; |
||||
|
||||
virtual void getName(char *pName, size_t len) noexcept; |
||||
|
||||
virtual size_t layerCount() noexcept; |
||||
|
||||
virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept; |
||||
|
||||
virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept; |
||||
|
||||
virtual InferenceEngine::StatusCode addOutput(const std::string &layerName, |
||||
size_t outputIndex = 0, |
||||
InferenceEngine::ResponseDesc *resp = nullptr) noexcept; |
||||
|
||||
virtual InferenceEngine::StatusCode getLayerByName(const char *layerName, |
||||
InferenceEngine::CNNLayerPtr &out, |
||||
InferenceEngine::ResponseDesc *resp) noexcept; |
||||
|
||||
virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept; |
||||
|
||||
virtual InferenceEngine::TargetDevice getTargetDevice() noexcept; |
||||
|
||||
virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept; |
||||
|
||||
virtual size_t getBatchSize() const noexcept; |
||||
|
||||
void initEngine(); |
||||
|
||||
void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers); |
||||
|
||||
void forward(); |
||||
|
||||
bool isInitialized(); |
||||
|
||||
private: |
||||
std::vector<InferenceEngine::CNNLayerPtr> layers; |
||||
InferenceEngine::InputsDataMap inputs; |
||||
InferenceEngine::OutputsDataMap outputs; |
||||
InferenceEngine::BlobMap inpBlobs; |
||||
InferenceEngine::BlobMap outBlobs; |
||||
InferenceEngine::BlobMap allBlobs; |
||||
InferenceEngine::InferenceEnginePluginPtr engine; |
||||
}; |
||||
|
||||
class InfEngineBackendNode : public BackendNode |
||||
{ |
||||
public: |
||||
InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer); |
||||
|
||||
void connect(std::vector<Ptr<BackendWrapper> >& inputs, |
||||
std::vector<Ptr<BackendWrapper> >& outputs); |
||||
|
||||
InferenceEngine::CNNLayerPtr layer; |
||||
// Inference Engine network object that allows to obtain the outputs of this layer.
|
||||
Ptr<InfEngineBackendNet> net; |
||||
}; |
||||
|
||||
class InfEngineBackendWrapper : public BackendWrapper |
||||
{ |
||||
public: |
||||
InfEngineBackendWrapper(int targetId, const Mat& m); |
||||
|
||||
~InfEngineBackendWrapper(); |
||||
|
||||
virtual void copyToHost(); |
||||
|
||||
virtual void setHostDirty(); |
||||
|
||||
InferenceEngine::DataPtr dataPtr; |
||||
InferenceEngine::TBlob<float>::Ptr blob; |
||||
}; |
||||
|
||||
InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m); |
||||
|
||||
InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape); |
||||
|
||||
InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr); |
||||
|
||||
// Fuses convolution weights and biases with channel-wise scales and shifts.
|
||||
void fuseConvWeights(const std::shared_ptr<InferenceEngine::ConvolutionLayer>& conv, |
||||
const Mat& w, const Mat& b = Mat()); |
||||
|
||||
#endif // HAVE_INF_ENGINE
|
||||
|
||||
bool haveInfEngine(); |
||||
|
||||
void forwardInfEngine(Ptr<BackendNode>& node); |
||||
|
||||
}} // namespace dnn, namespace cv
|
||||
|
||||
#endif // __OPENCV_DNN_OP_INF_ENGINE_HPP__
|
@ -0,0 +1,105 @@ |
||||
# To use Inference Engine backend, specify location of plugins: |
||||
# export LD_LIBRARY_PATH=/opt/intel/deeplearning_deploymenttoolkit/deployment_tools/external/mklml_lnx/lib:$LD_LIBRARY_PATH |
||||
import cv2 as cv |
||||
import numpy as np |
||||
import argparse |
||||
|
||||
parser = argparse.ArgumentParser( |
||||
description='This script is used to demonstrate OpenPose human pose estimation network ' |
||||
'from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. ' |
||||
'The sample and model are simplified and could be used for a single person on the frame.') |
||||
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera') |
||||
parser.add_argument('--proto', help='Path to .prototxt') |
||||
parser.add_argument('--model', help='Path to .caffemodel') |
||||
parser.add_argument('--dataset', help='Specify what kind of model was trained. ' |
||||
'It could be (COCO, MPI) depends on dataset.') |
||||
parser.add_argument('--thr', default=0.1, type=float, help='Threshold value for pose parts heat map') |
||||
parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.') |
||||
parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.') |
||||
parser.add_argument('--inf_engine', action='store_true', |
||||
help='Enable Intel Inference Engine computational backend. ' |
||||
'Check that plugins folder is in LD_LIBRARY_PATH environment variable') |
||||
|
||||
args = parser.parse_args() |
||||
|
||||
if args.dataset == 'COCO': |
||||
BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4, |
||||
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9, |
||||
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14, |
||||
"LEye": 15, "REar": 16, "LEar": 17, "Background": 18 } |
||||
|
||||
POSE_PAIRS = [ ["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"], |
||||
["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"], |
||||
["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"], |
||||
["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"], |
||||
["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"] ] |
||||
else: |
||||
assert(args.dataset == 'MPI') |
||||
BODY_PARTS = { "Head": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4, |
||||
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9, |
||||
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "Chest": 14, |
||||
"Background": 15 } |
||||
|
||||
POSE_PAIRS = [ ["Head", "Neck"], ["Neck", "RShoulder"], ["RShoulder", "RElbow"], |
||||
["RElbow", "RWrist"], ["Neck", "LShoulder"], ["LShoulder", "LElbow"], |
||||
["LElbow", "LWrist"], ["Neck", "Chest"], ["Chest", "RHip"], ["RHip", "RKnee"], |
||||
["RKnee", "RAnkle"], ["Chest", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"] ] |
||||
|
||||
inWidth = args.width |
||||
inHeight = args.height |
||||
|
||||
net = cv.dnn.readNetFromCaffe(args.proto, args.model) |
||||
if args.inf_engine: |
||||
net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE) |
||||
|
||||
cap = cv.VideoCapture(args.input if args.input else 0) |
||||
|
||||
while cv.waitKey(1) < 0: |
||||
hasFrame, frame = cap.read() |
||||
if not hasFrame: |
||||
cv.waitKey() |
||||
break |
||||
|
||||
frameWidth = frame.shape[1] |
||||
frameHeight = frame.shape[0] |
||||
inp = cv.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), |
||||
(0, 0, 0), swapRB=False, crop=False) |
||||
net.setInput(inp) |
||||
out = net.forward() |
||||
|
||||
assert(len(BODY_PARTS) == out.shape[1]) |
||||
|
||||
points = [] |
||||
for i in range(len(BODY_PARTS)): |
||||
# Slice heatmap of corresponging body's part. |
||||
heatMap = out[0, i, :, :] |
||||
|
||||
# Originally, we try to find all the local maximums. To simplify a sample |
||||
# we just find a global one. However only a single pose at the same time |
||||
# could be detected this way. |
||||
_, conf, _, point = cv.minMaxLoc(heatMap) |
||||
x = (frameWidth * point[0]) / out.shape[3] |
||||
y = (frameHeight * point[1]) / out.shape[2] |
||||
|
||||
# Add a point if it's confidence is higher than threshold. |
||||
points.append((x, y) if conf > args.thr else None) |
||||
|
||||
for pair in POSE_PAIRS: |
||||
partFrom = pair[0] |
||||
partTo = pair[1] |
||||
assert(partFrom in BODY_PARTS) |
||||
assert(partTo in BODY_PARTS) |
||||
|
||||
idFrom = BODY_PARTS[partFrom] |
||||
idTo = BODY_PARTS[partTo] |
||||
|
||||
if points[idFrom] and points[idTo]: |
||||
cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3) |
||||
cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) |
||||
cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) |
||||
|
||||
t, _ = net.getPerfProfile() |
||||
freq = cv.getTickFrequency() / 1000 |
||||
cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) |
||||
|
||||
cv.imshow('OpenPose using OpenCV', frame) |
Loading…
Reference in new issue