@ -35,6 +35,7 @@ static bool DNN_IE_SERIALIZE = utils::getConfigurationParameterBool("OPENCV_DNN_
static std : : string kDefaultInpLayerName = " opencv_ngraph_empty_inp_layer_name " ;
static constexpr const char * kOpenCVLayersType = " opencv_ngraph_layer " ;
# if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1)
static std : : string shapesToStr ( const std : : vector < Mat > & mats )
{
std : : ostringstream shapes ;
@ -62,6 +63,7 @@ static void strToShapes(const std::string& str, std::vector<std::vector<size_t>
ss > > shapes [ i ] [ j ] ;
}
}
# endif // OpenVINO < 2022.1
static std : : vector < Ptr < NgraphBackendWrapper > >
ngraphWrappers ( const std : : vector < Ptr < BackendWrapper > > & ptrs )
@ -76,6 +78,61 @@ ngraphWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
return wrappers ;
}
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
class NgraphCustomOp : public ov : : op : : Op {
public :
OPENVINO_OP ( kOpenCVLayersType ) ;
NgraphCustomOp ( const ngraph : : OutputVector & inputs , Ptr < Layer > & cvLayer , const std : : vector < Mat > & outputs , const std : : vector < Mat > & internals ) :
Op ( inputs ) , cvLayer ( cvLayer ) , outputs ( outputs ) , internals ( internals )
{
constructor_validate_and_infer_types ( ) ;
}
void validate_and_infer_types ( ) override
{
set_output_size ( outputs . size ( ) ) ;
for ( int i = 0 ; i < outputs . size ( ) ; + + i )
{
ov : : PartialShape shape ;
for ( int j = 0 ; j < outputs [ i ] . dims ; + + j ) {
shape . push_back ( outputs [ i ] . size [ j ] ) ;
}
set_output_type ( i , get_input_element_type ( 0 ) , shape ) ;
}
}
std : : shared_ptr < ngraph : : Node > clone_with_new_inputs ( const ngraph : : OutputVector & new_args ) const override
{
return std : : make_shared < NgraphCustomOp > ( new_args , cvLayer , outputs , internals ) ;
}
bool has_evaluate ( ) const {
return true ;
}
bool evaluate ( ov : : TensorVector & outputs , const ov : : TensorVector & inputs ) const override {
std : : vector < Mat > inpMats , outMats ;
infEngineBlobsToMats ( inputs , inpMats ) ;
infEngineBlobsToMats ( outputs , outMats ) ;
try
{
cvLayer - > forward ( inpMats , outMats , internals ) ;
return true ;
}
catch ( . . . )
{
return false ;
}
}
Ptr < Layer > & cvLayer ;
std : : vector < Mat > outputs , internals ;
} ;
# else
class NgraphCustomOp : public ngraph : : op : : Op {
public :
const ngraph : : NodeTypeInfo & get_type_info ( ) const override
@ -324,7 +381,7 @@ public:
# endif
} ;
# endif // OpenVINO >= 2022.1
InfEngineNgraphNode : : InfEngineNgraphNode ( std : : shared_ptr < ngraph : : Node > & & _node )
: BackendNode ( DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ) , node ( std : : move ( _node ) ) { }
@ -337,15 +394,6 @@ InfEngineNgraphNode::InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& n
std : : vector < Mat > & outputs , std : : vector < Mat > & internals )
: BackendNode ( DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ) , cvLayer ( cvLayer_ )
{
std : : ostringstream oss ;
oss < < ( size_t ) cvLayer . get ( ) ;
std : : map < std : : string , InferenceEngine : : Parameter > params = {
{ " impl " , oss . str ( ) } ,
{ " outputs " , shapesToStr ( outputs ) } ,
{ " internals " , shapesToStr ( internals ) }
} ;
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_3)
ngraph : : OutputVector inp_nodes ;
# else
@ -353,7 +401,19 @@ InfEngineNgraphNode::InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& n
# endif
for ( const auto & node : nodes )
inp_nodes . emplace_back ( node . dynamicCast < InfEngineNgraphNode > ( ) - > node ) ;
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
node = std : : make_shared < NgraphCustomOp > ( inp_nodes , cvLayer , outputs , internals ) ;
# else
std : : ostringstream oss ;
oss < < ( size_t ) cvLayer . get ( ) ;
std : : map < std : : string , InferenceEngine : : Parameter > params = {
{ " impl " , oss . str ( ) } ,
{ " outputs " , shapesToStr ( outputs ) } ,
{ " internals " , shapesToStr ( internals ) }
} ;
node = std : : make_shared < NgraphCustomOp > ( inp_nodes , params ) ;
# endif
CV_Assert ( ! cvLayer - > name . empty ( ) ) ;
setName ( cvLayer - > name ) ;
@ -383,7 +443,7 @@ void InfEngineNgraphNet::addOutput(const Ptr<InfEngineNgraphNode>& node)
CV_Assert ( node ) ;
CV_Assert ( node - > node ) ;
const std : : string & name = node - > node - > get_friendly_name ( ) ;
requestedOutputs . insert ( { name , node } ) ;
requestedOutputs . insert ( { name , node . get ( ) } ) ;
}
void InfEngineNgraphNet : : setNodePtr ( std : : shared_ptr < ngraph : : Node > * ptr ) {
@ -457,6 +517,9 @@ void InfEngineNgraphNet::createNet(Target targetId) {
CV_LOG_DEBUG ( NULL , " DNN/NGRAPH: Add 'Result' output: " < < output_node_it - > first ) ;
CV_Assert ( output_node_it - > second ) ;
auto out = std : : make_shared < ngraph : : op : : Result > ( output_node_it - > second - > node ) ;
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
out - > set_friendly_name ( output_node_it - > first + ( output_node_it - > second - > node - > get_output_size ( ) = = 1 ? " " : " .0 " ) ) ;
# endif
outs . push_back ( out ) ;
}
CV_Assert_N ( ! inputs_vec . empty ( ) , ! outs . empty ( ) ) ;
@ -504,12 +567,20 @@ void InfEngineNgraphNet::createNet(Target targetId) {
}
}
# if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1)
static inline
InferenceEngine : : Layout estimateLayout ( size_t dims ) ;
# endif
void InfEngineNgraphNet : : init ( Target targetId )
{
if ( ! hasNetOwner )
{
if ( targetId = = DNN_TARGET_OPENCL_FP16 )
{
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
ov : : pass : : ConvertFP32ToFP16 ( ) . run_on_model ( ngraph_function ) ;
# else
auto nodes = ngraph_function - > get_ordered_ops ( ) ;
for ( auto & node : nodes )
{
@ -533,6 +604,7 @@ void InfEngineNgraphNet::init(Target targetId)
}
}
ngraph_function - > validate_nodes_and_infer_types ( ) ;
# endif // OpenVINO >= 2022.1
}
cnn = InferenceEngine : : CNNNetwork ( ngraph_function ) ;
@ -580,20 +652,45 @@ void InfEngineNgraphNet::init(Target targetId)
CV_Error ( Error : : StsNotImplemented , " Unknown target " ) ;
} ;
if ( ! hasNetOwner ) {
for ( size_t i = 0 ; i < ngraph_function - > get_output_size ( ) ; + + i ) {
auto node = ngraph_function - > output ( i ) . get_node ( ) ;
for ( size_t j = 0 ; j < node - > get_input_size ( ) ; + + j ) {
std : : string name = node - > input_value ( j ) . get_node ( ) - > get_friendly_name ( ) ;
auto iter = requestedOutputs . find ( name ) ;
if ( iter ! = requestedOutputs . end ( ) ) {
requestedOutputs . erase ( iter ) ;
cnn . addOutput ( name ) ;
}
}
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
auto model = cnn . getFunction ( ) ;
ov : : preprocess : : PrePostProcessor ppp ( model ) ;
int i = 0 ;
for ( const auto & inp : model - > inputs ( ) ) { // TODO: not sure why but ngraph_function->inputs() here causes segfault.
const std : : string & name = inp . get_node ( ) - > get_friendly_name ( ) ;
auto blobIt = allBlobs . find ( name ) ;
CV_Assert ( blobIt ! = allBlobs . end ( ) ) ;
auto srcT = blobIt - > second . get_element_type ( ) ;
if ( srcT ! = inp . get_node ( ) - > get_element_type ( ) ) {
ppp . input ( i + + ) . tensor ( ) . set_element_type ( srcT ) ;
}
}
i = 0 ;
for ( const auto & it : model - > outputs ( ) )
{
const std : : string & name = it . get_node ( ) - > get_friendly_name ( ) ;
auto blobIt = allBlobs . find ( name ) ;
CV_Assert ( blobIt ! = allBlobs . end ( ) ) ;
const auto & src = blobIt - > second ;
// A workaround for single dimension output for which OpenCV allocates 2d Mat.
// For example, face-detection-0105 with Result of shape {200} while output blob is {200, 1}
auto outShape = it . get_partial_shape ( ) . get_max_shape ( ) ;
if ( outShape ! = src . get_shape ( ) ) {
size_t sz = std : : accumulate ( outShape . begin ( ) , outShape . end ( ) , 1 , std : : multiplies < size_t > ( ) ) ;
CV_Assert ( sz = = src . get_size ( ) ) ;
allBlobs [ name ] = ov : : Tensor ( src . get_element_type ( ) , outShape , src . data ( ) ) ;
}
ppp . output ( i + + ) . tensor ( ) . set_element_type ( ov : : element : : f32 ) ; // Should be always FP32
}
ppp . build ( ) ;
# else
for ( const auto & it : cnn . getInputsInfo ( ) )
{
const std : : string & name = it . first ;
@ -607,8 +704,16 @@ void InfEngineNgraphNet::init(Target targetId)
const std : : string & name = it . first ;
auto blobIt = allBlobs . find ( name ) ;
CV_Assert ( blobIt ! = allBlobs . end ( ) ) ;
InferenceEngine : : TensorDesc & desc = blobIt - > second - > getTensorDesc ( ) ;
auto outShape = it . second - > getDims ( ) ;
if ( outShape ! = desc . getDims ( ) ) {
desc . reshape ( outShape , estimateLayout ( outShape . size ( ) ) ) ;
}
it . second - > setPrecision ( blobIt - > second - > getTensorDesc ( ) . getPrecision ( ) ) ; // Should be always FP32
}
# endif // OpenVINO >= 2022.1
initPlugin ( cnn ) ;
}
@ -660,6 +765,9 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
const std : : string & libName = candidates [ i ] ;
try
{
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
ie . add_extension ( libName ) ;
# else
InferenceEngine : : IExtensionPtr extension =
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
std : : make_shared < InferenceEngine : : Extension > ( libName ) ;
@ -668,6 +776,7 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
# endif
ie . AddExtension ( extension , " CPU " ) ;
# endif
CV_LOG_INFO ( NULL , " DNN-IE: Loaded extension plugin: " < < libName ) ;
found = true ;
break ;
@ -678,6 +787,7 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
{
CV_LOG_WARNING ( NULL , " DNN-IE: Can't load extension plugin (extra layers for some networks). Specify path via OPENCV_DNN_IE_EXTRA_PLUGIN_PATH parameter " ) ;
}
# if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1)
// Some of networks can work without a library of extra layers.
// OpenCV fallbacks as extensions.
try
@ -688,12 +798,17 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
{
CV_LOG_INFO ( NULL , " DNN-IE: Can't register OpenCV custom layers nGraph extension: " < < e . what ( ) ) ;
}
# endif // OpenVINO < 2022.1
# ifndef _WIN32
// Limit the number of CPU threads.
if ( device_name = = " CPU " )
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
ie . set_property ( device_name , ov : : inference_num_threads ( getNumThreads ( ) ) ) ;
# else
ie . SetConfig ( { {
InferenceEngine : : PluginConfigParams : : KEY_CPU_THREADS_NUM , format ( " %d " , getNumThreads ( ) ) ,
} } , device_name ) ;
# endif // OpenVINO >= 2022.1
# endif
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_2)
if ( device_name . find ( " GPU " ) = = 0 )
@ -706,9 +821,13 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
if ( ! cache_path . empty ( ) & & cache_path ! = " disabled " )
{
CV_LOG_INFO ( NULL , " OpenCV/nGraph: using GPU kernels cache: " < < cache_path ) ;
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
ie . set_property ( device_name , ov : : cache_dir ( cache_path ) ) ;
# else
ie . SetConfig ( { {
InferenceEngine : : PluginConfigParams : : KEY_CACHE_DIR , cache_path ,
} } , device_name ) ;
# endif // OpenVINO >= 2022.1
}
}
# endif
@ -716,9 +835,9 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
std : : map < std : : string , std : : string > config ;
if ( device_name = = " MYRIAD " | | device_name = = " HDDL " ) {
# if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
config . emplace ( " MYRIAD_DETECT_NETWORK_BATCH " , CONFIG_VALUE ( NO ) ) ;
config . emplace ( " MYRIAD_DETECT_NETWORK_BATCH " , " NO " ) ;
# else
config . emplace ( " VPU_DETECT_NETWORK_BATCH " , CONFIG_VALUE ( NO ) ) ;
config . emplace ( " VPU_DETECT_NETWORK_BATCH " , " NO " ) ;
# endif
}
@ -757,16 +876,17 @@ bool NgraphBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
std : : vector < MatShape > & outputs ,
std : : vector < MatShape > & internals ) const
{
InferenceEngine : : ICNNNetwork : : InputShapes inShapes = t_net . getInputShapes ( ) ;
InferenceEngine : : ICNNNetwork : : InputShapes : : iterator itr ;
auto ngraphFunction = t_net . getFunction ( ) ;
bool equal_flag = true ;
size_t i = 0 ;
for ( itr = inShapes . begin ( ) ; itr ! = inShapes . end ( ) ; + + itr )
std : : map < std : : string , std : : vector < size_t > > inShapes ;
int i = 0 ;
for ( const auto & inp : ngraphFunction - > get_parameters ( ) )
{
InferenceEngine : : SizeVector currentInShape ( inputs [ i ] . begin ( ) , inputs [ i ] . end ( ) ) ;
if ( itr - > second ! = currentInShape )
std : : vector < size_t > oldShape = inp - > get_shape ( ) ;
std : : vector < size_t > newShape ( inputs [ i ] . begin ( ) , inputs [ i ] . end ( ) ) ;
inShapes . insert ( { inp - > get_friendly_name ( ) , newShape } ) ;
if ( oldShape ! = newShape )
{
itr - > second = currentInShape ;
equal_flag = false ;
}
i + + ;
@ -777,7 +897,18 @@ bool NgraphBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
InferenceEngine : : CNNNetwork curr_t_net ( t_net ) ;
curr_t_net . reshape ( inShapes ) ;
}
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
std : : vector < size_t > dims ;
for ( const auto & it : ngraphFunction - > outputs ( ) ) {
if ( it . get_node ( ) - > get_friendly_name ( ) = = name ) {
dims = it . get_partial_shape ( ) . get_max_shape ( ) ;
}
}
if ( dims . empty ( ) )
CV_Error ( Error : : StsError , format ( " Unable find result with name %s " , name . c_str ( ) ) ) ;
# else
std : : vector < size_t > dims = t_net . getOutputsInfo ( ) [ name ] - > getDims ( ) ;
# endif
outputs . push_back ( MatShape ( dims . begin ( ) , dims . end ( ) ) ) ;
return false ;
}
@ -795,6 +926,21 @@ void NgraphBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays
CV_Error ( Error : : StsInternal , " Choose Inference Engine as a preferable backend. " ) ;
}
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
ov : : Tensor wrapToNgraphBlob ( const Mat & m ) {
std : : vector < size_t > shape = getShape < size_t > ( m ) ;
if ( m . type ( ) = = CV_32F )
return ov : : Tensor ( ov : : element : : f32 , shape , m . data ) ;
else if ( m . type ( ) = = CV_8U )
return ov : : Tensor ( ov : : element : : u8 , shape , m . data ) ;
else if ( m . type ( ) = = CV_32SC1 )
return ov : : Tensor ( ov : : element : : i32 , shape , m . data ) ;
else
CV_Error ( Error : : StsNotImplemented , format ( " Unsupported data type %s " , typeToString ( m . type ( ) ) . c_str ( ) ) ) ;
}
# else
static InferenceEngine : : Layout estimateLayout ( int dims )
{
@ -823,19 +969,6 @@ InferenceEngine::Layout estimateLayout(const Mat& m)
return estimateLayout ( m . dims ) ;
}
static InferenceEngine : : DataPtr wrapToInfEngineDataNode ( const Mat & m , const std : : string & name = " " )
{
std : : vector < size_t > shape = getShape < size_t > ( m ) ;
if ( m . type ( ) = = CV_32F )
return InferenceEngine : : DataPtr ( new InferenceEngine : : Data ( name ,
{ InferenceEngine : : Precision : : FP32 , shape , estimateLayout ( m ) } ) ) ;
else if ( m . type ( ) = = CV_8U )
return InferenceEngine : : DataPtr ( new InferenceEngine : : Data ( name ,
{ InferenceEngine : : Precision : : U8 , shape , estimateLayout ( m ) } ) ) ;
else
CV_Error ( Error : : StsNotImplemented , format ( " Unsupported data type %s " , typeToString ( m . type ( ) ) . c_str ( ) ) ) ;
}
InferenceEngine : : Blob : : Ptr wrapToNgraphBlob ( const Mat & m , const std : : vector < size_t > & shape ,
InferenceEngine : : Layout layout )
{
@ -845,6 +978,9 @@ InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m, const std::vector<size
else if ( m . type ( ) = = CV_8U )
return InferenceEngine : : make_shared_blob < uint8_t > (
{ InferenceEngine : : Precision : : U8 , shape , layout } , ( uint8_t * ) m . data ) ;
else if ( m . type ( ) = = CV_32SC1 )
return InferenceEngine : : make_shared_blob < int32_t > (
{ InferenceEngine : : Precision : : I32 , shape , layout } , ( int32_t * ) m . data ) ;
else
CV_Error ( Error : : StsNotImplemented , format ( " Unsupported data type %s " , typeToString ( m . type ( ) ) . c_str ( ) ) ) ;
}
@ -855,12 +991,15 @@ InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m, InferenceEngine::Layou
return wrapToNgraphBlob ( m , shape , layout ) ;
}
InferenceEngine : : Blob : : Ptr wrapToNgraphBlob ( const Mat & m ) { return wrapToNgraphBlob ( m , estimateLayout ( m ) ) ; }
# endif // OpenVINO >= 2022.1
NgraphBackendWrapper : : NgraphBackendWrapper ( int targetId , const cv : : Mat & m )
: BackendWrapper ( DNN_BACKEND_INFERENCE_ENGINE_NGRAPH , targetId )
, host ( ( Mat * ) & m )
{
dataPtr = wrapToInfEngineDataNode ( m ) ;
blob = wrapToNgraphBlob ( m , estimateLayout ( m ) ) ;
blob = wrapToNgraphBlob ( m ) ;
}
NgraphBackendWrapper : : NgraphBackendWrapper ( Ptr < BackendWrapper > wrapper )
@ -868,8 +1007,7 @@ NgraphBackendWrapper::NgraphBackendWrapper(Ptr<BackendWrapper> wrapper)
{
Ptr < NgraphBackendWrapper > ieWrapper = wrapper . dynamicCast < NgraphBackendWrapper > ( ) ;
CV_Assert ( ! ieWrapper . empty ( ) ) ;
InferenceEngine : : DataPtr srcData = ieWrapper - > dataPtr ;
dataPtr = InferenceEngine : : DataPtr ( new InferenceEngine : : Data ( srcData - > getName ( ) , srcData - > getTensorDesc ( ) ) ) ;
name = ieWrapper - > name ;
blob = ieWrapper - > blob ;
}
@ -895,6 +1033,12 @@ void NgraphBackendWrapper::setHostDirty()
//CV_Error(Error::StsNotImplemented, "");
}
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
ov : : Tensor copyBlob ( const ov : : Tensor & blob )
{
return ov : : Tensor ( blob . get_element_type ( ) , blob . get_shape ( ) ) ;
}
# else
InferenceEngine : : Blob : : Ptr copyBlob ( const InferenceEngine : : Blob : : Ptr & blob )
{
InferenceEngine : : Blob : : Ptr copy ;
@ -918,88 +1062,13 @@ InferenceEngine::Blob::Ptr copyBlob(const InferenceEngine::Blob::Ptr& blob)
return copy ;
}
InferenceEngine : : DataPtr ngraphDataNode ( const Ptr < BackendWrapper > & ptr )
{
CV_Assert ( ! ptr . empty ( ) ) ;
Ptr < NgraphBackendWrapper > p = ptr . dynamicCast < NgraphBackendWrapper > ( ) ;
CV_Assert ( ! p . empty ( ) ) ;
return p - > dataPtr ;
}
static
InferenceEngine : : Blob : : Ptr reallocateBlob ( Mat & m , const InferenceEngine : : TensorDesc & description )
{
auto dims = description . getDims ( ) ;
auto layout = estimateLayout ( dims . size ( ) ) ;
MatShape matShape ( dims . begin ( ) , dims . end ( ) ) ;
if ( description . getPrecision ( ) = = InferenceEngine : : Precision : : FP32 )
{
m . create ( matShape , CV_32FC1 ) ;
return InferenceEngine : : make_shared_blob < float > (
{ description . getPrecision ( ) , dims , layout } , ( float * ) m . data ) ;
}
else if ( description . getPrecision ( ) = = InferenceEngine : : Precision : : I32 )
{
m . create ( matShape , CV_32SC1 ) ;
return InferenceEngine : : make_shared_blob < int > (
{ description . getPrecision ( ) , dims , layout } , ( int * ) m . data ) ;
}
else if ( description . getPrecision ( ) = = InferenceEngine : : Precision : : U8 )
{
m . create ( matShape , CV_8UC1 ) ;
return InferenceEngine : : make_shared_blob < uchar > (
{ description . getPrecision ( ) , dims , layout } , ( uchar * ) m . data ) ;
}
std : : ostringstream msg ;
msg < < " Unsupported IE precision: " < < description . getPrecision ( ) ;
CV_Error ( Error : : StsNotImplemented , msg . str ( ) ) ;
}
InferenceEngine : : DataPtr ngraphDataOutputNode (
const Ptr < BackendWrapper > & ptr ,
const InferenceEngine : : TensorDesc & description ,
const std : : string name )
{
CV_Assert ( ! ptr . empty ( ) ) ;
Ptr < NgraphBackendWrapper > p = ptr . dynamicCast < NgraphBackendWrapper > ( ) ;
CV_Assert ( ! p . empty ( ) ) ;
NgraphBackendWrapper & w = * p ;
const InferenceEngine : : TensorDesc & blobDesc = w . blob . get ( ) - > getTensorDesc ( ) ;
auto dims = description . getDims ( ) ;
bool reallocate = false ;
if ( blobDesc . getPrecision ( ) ! = description . getPrecision ( ) )
{
reallocate = true ;
CV_LOG_WARNING ( NULL , " Reallocate output ' " < < name < < " ' blob due to wrong precision: " < < blobDesc . getPrecision ( ) < < " => " < < description . getPrecision ( ) < < " ndims= " < < dims . size ( ) ) ;
}
if ( dims . size ( ) ! = blobDesc . getDims ( ) . size ( ) )
{
reallocate = true ;
CV_LOG_WARNING ( NULL , " Reallocate output ' " < < name < < " ' blob due to wrong dims: " < < blobDesc . getDims ( ) . size ( ) < < " => " < < dims . size ( ) ) ;
}
if ( reallocate )
{
auto layout = estimateLayout ( dims . size ( ) ) ;
w . dataPtr = InferenceEngine : : DataPtr ( new InferenceEngine : : Data ( name ,
{ description . getPrecision ( ) , dims , layout } ) ) ;
w . blob = reallocateBlob ( * w . host , description ) ;
}
return w . dataPtr ;
}
# endif // OpenVINO < 2022.1
void InfEngineNgraphNet : : reset ( )
{
allBlobs . clear ( ) ;
infRequests . clear ( ) ;
isInit = false ;
outputsDesc . clear ( ) ;
for ( const auto & it : cnn . getOutputsInfo ( ) )
{
const std : : string & name = it . first ;
outputsDesc . insert ( { name , it . second - > getTensorDesc ( ) } ) ;
}
}
void InfEngineNgraphNet : : addBlobs ( const std : : vector < cv : : Ptr < BackendWrapper > > & ptrs )
@ -1007,7 +1076,7 @@ void InfEngineNgraphNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& p
auto wrappers = ngraphWrappers ( ptrs ) ;
for ( const auto & wrapper : wrappers )
{
std : : string name = wrapper - > dataPtr - > getN ame( ) ;
std : : string name = wrapper - > n ame;
name = name . empty ( ) ? kDefaultInpLayerName : name ;
allBlobs . insert ( { name , wrapper - > blob } ) ;
}
@ -1022,25 +1091,8 @@ void InfEngineNgraphNet::NgraphReqWrapper::makePromises(const std::vector<Ptr<Ba
for ( int i = 0 ; i < outs . size ( ) ; + + i )
{
outs [ i ] - > futureMat = outProms [ i ] . getArrayResult ( ) ;
outsNames [ i ] = outs [ i ] - > dataPtr - > getName ( ) ;
}
}
Mat ngraphBlobToMat ( const InferenceEngine : : Blob : : Ptr & blob )
{
std : : vector < size_t > dims = blob - > getTensorDesc ( ) . getDims ( ) ;
std : : vector < int > size ( dims . begin ( ) , dims . end ( ) ) ;
auto precision = blob - > getTensorDesc ( ) . getPrecision ( ) ;
int type = - 1 ;
switch ( precision )
{
case InferenceEngine : : Precision : : FP32 : type = CV_32F ; break ;
case InferenceEngine : : Precision : : U8 : type = CV_8U ; break ;
default :
CV_Error ( Error : : StsNotImplemented , " Unsupported blob precision " ) ;
outsNames [ i ] = outs [ i ] - > name ;
}
return Mat ( size , type , ( void * ) blob - > buffer ( ) ) ;
}
void InfEngineNgraphNet : : forward ( const std : : vector < Ptr < BackendWrapper > > & outBlobsWrappers , bool isAsync )
@ -1070,6 +1122,25 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
}
infRequests . push_back ( reqWrapper ) ;
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
int i = 0 ;
for ( const auto & it : netExec . inputs ( ) )
{
const std : : string & name = it . get_node ( ) - > get_friendly_name ( ) ;
auto blobIt = allBlobs . find ( name ) ;
CV_Assert ( blobIt ! = allBlobs . end ( ) ) ;
reqWrapper - > req . set_input_tensor ( i + + , isAsync ? copyBlob ( blobIt - > second ) : blobIt - > second ) ;
}
i = 0 ;
for ( const auto & it : netExec . outputs ( ) )
{
const std : : string & name = it . get_node ( ) - > get_friendly_name ( ) ;
auto blobIt = allBlobs . find ( name ) ;
CV_Assert ( blobIt ! = allBlobs . end ( ) ) ;
reqWrapper - > req . set_output_tensor ( i + + , isAsync ? copyBlob ( blobIt - > second ) : blobIt - > second ) ;
}
# else
InferenceEngine : : BlobMap inpBlobs , outBlobs ;
for ( const auto & it : cnn . getInputsInfo ( ) )
{
@ -1087,6 +1158,53 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
}
reqWrapper - > req . SetInput ( inpBlobs ) ;
reqWrapper - > req . SetOutput ( outBlobs ) ;
# endif
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
if ( isAsync ) {
bool * isReady = & reqWrapper - > isReady ;
auto * promises = & reqWrapper - > outProms ;
auto * req = & reqWrapper - > req ;
reqWrapper - > req . set_callback ( [ isReady , promises , req ] ( std : : exception_ptr ex ) {
CV_LOG_DEBUG ( NULL , " DNN(nGraph): completionCallback( " < < ( int ) status < < " ) " ) ;
size_t processedOutputs = 0 ;
try
{
for ( ; processedOutputs < promises - > size ( ) ; + + processedOutputs )
{
Mat m = infEngineBlobToMat ( req - > get_output_tensor ( processedOutputs ) ) ;
try
{
( * promises ) [ processedOutputs ] . setValue ( m . clone ( ) ) ;
}
catch ( . . . )
{
try {
( * promises ) [ processedOutputs ] . setException ( std : : current_exception ( ) ) ;
} catch ( . . . ) {
CV_LOG_ERROR ( NULL , " DNN: Exception occurred during async inference exception propagation " ) ;
}
}
}
}
catch ( . . . )
{
std : : exception_ptr e = std : : current_exception ( ) ;
for ( ; processedOutputs < promises - > size ( ) ; + + processedOutputs )
{
try {
( * promises ) [ processedOutputs ] . setException ( e ) ;
} catch ( . . . ) {
CV_LOG_ERROR ( NULL , " DNN: Exception occurred during async inference exception propagation " ) ;
}
}
}
* isReady = true ;
} ) ;
}
# else // OpenVINO >= 2022.1
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
InferenceEngine : : InferRequest infRequest = reqWrapper - > req ;
@ -1125,7 +1243,7 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
for ( ; processedOutputs < wrapper . outProms . size ( ) ; + + processedOutputs )
{
const std : : string & name = wrapper . outsNames [ processedOutputs ] ;
Mat m = ngraph BlobToMat( wrapper . req . GetBlob ( name ) ) ;
Mat m = infEngine BlobToMat( wrapper . req . GetBlob ( name ) ) ;
try
{
@ -1157,8 +1275,34 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
wrapper . isReady = true ;
}
) ;
# endif // OpenVINO >= 2022.1
}
# if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
if ( isAsync )
{
// Copy actual data to infer request's input blobs.
int i = 0 ;
for ( const auto & it : cnn . getFunction ( ) - > get_parameters ( ) )
{
const std : : string & name = it - > get_friendly_name ( ) ;
auto blobIt = allBlobs . find ( name ) ;
Mat srcMat = infEngineBlobToMat ( blobIt - > second ) ;
Mat dstMat = infEngineBlobToMat ( reqWrapper - > req . get_input_tensor ( i + + ) ) ;
srcMat . copyTo ( dstMat ) ;
}
// Set promises to output blobs wrappers.
reqWrapper - > makePromises ( outBlobsWrappers ) ;
reqWrapper - > isReady = false ;
reqWrapper - > req . start_async ( ) ;
}
else
{
reqWrapper - > req . infer ( ) ;
}
# else
if ( isAsync )
{
// Copy actual data to infer request's input blobs.
@ -1166,8 +1310,8 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
{
const std : : string & name = it . first ;
auto blobIt = allBlobs . find ( name ) ;
Mat srcMat = ngraph BlobToMat( blobIt - > second ) ;
Mat dstMat = ngraph BlobToMat( reqWrapper - > req . GetBlob ( name ) ) ;
Mat srcMat = infEngine BlobToMat( blobIt - > second ) ;
Mat dstMat = infEngine BlobToMat( reqWrapper - > req . GetBlob ( name ) ) ;
srcMat . copyTo ( dstMat ) ;
}
@ -1181,6 +1325,7 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
{
reqWrapper - > req . Infer ( ) ;
}
# endif // OpenVINO >= 2022.1
}
# endif