@ -39,6 +39,7 @@
typedef struct OVOptions {
typedef struct OVOptions {
char * device_type ;
char * device_type ;
int nireq ;
int nireq ;
uint8_t async ;
int batch_size ;
int batch_size ;
int input_resizable ;
int input_resizable ;
} OVOptions ;
} OVOptions ;
@ -271,14 +272,14 @@ static void infer_completion_callback(void *args)
av_log ( ctx , AV_LOG_ERROR , " detect filter needs to provide post proc \n " ) ;
av_log ( ctx , AV_LOG_ERROR , " detect filter needs to provide post proc \n " ) ;
return ;
return ;
}
}
ov_model - > model - > detect_post_proc ( task - > out _frame, & output , 1 , ov_model - > model - > filter_ctx ) ;
ov_model - > model - > detect_post_proc ( task - > in _frame, & output , 1 , ov_model - > model - > filter_ctx ) ;
break ;
break ;
case DFT_ANALYTICS_CLASSIFY :
case DFT_ANALYTICS_CLASSIFY :
if ( ! ov_model - > model - > classify_post_proc ) {
if ( ! ov_model - > model - > classify_post_proc ) {
av_log ( ctx , AV_LOG_ERROR , " classify filter needs to provide post proc \n " ) ;
av_log ( ctx , AV_LOG_ERROR , " classify filter needs to provide post proc \n " ) ;
return ;
return ;
}
}
ov_model - > model - > classify_post_proc ( task - > out _frame, & output , request - > inferences [ i ] - > bbox_index , ov_model - > model - > filter_ctx ) ;
ov_model - > model - > classify_post_proc ( task - > in _frame, & output , request - > inferences [ i ] - > bbox_index , ov_model - > model - > filter_ctx ) ;
break ;
break ;
default :
default :
av_assert0 ( ! " should not reach here " ) ;
av_assert0 ( ! " should not reach here " ) ;
@ -758,55 +759,6 @@ err:
}
}
DNNReturnType ff_dnn_execute_model_ov ( const DNNModel * model , DNNExecBaseParams * exec_params )
DNNReturnType ff_dnn_execute_model_ov ( const DNNModel * model , DNNExecBaseParams * exec_params )
{
OVModel * ov_model = model - > model ;
OVContext * ctx = & ov_model - > ctx ;
TaskItem task ;
OVRequestItem * request ;
if ( ff_check_exec_params ( ctx , DNN_OV , model - > func_type , exec_params ) ! = 0 ) {
return DNN_ERROR ;
}
if ( model - > func_type = = DFT_ANALYTICS_CLASSIFY ) {
// Once we add async support for tensorflow backend and native backend,
// we'll combine the two sync/async functions in dnn_interface.h to
// simplify the code in filter, and async will be an option within backends.
// so, do not support now, and classify filter will not call this function.
return DNN_ERROR ;
}
if ( ctx - > options . batch_size > 1 ) {
avpriv_report_missing_feature ( ctx , " batch mode for sync execution " ) ;
return DNN_ERROR ;
}
if ( ! ov_model - > exe_network ) {
if ( init_model_ov ( ov_model , exec_params - > input_name , exec_params - > output_names [ 0 ] ) ! = DNN_SUCCESS ) {
av_log ( ctx , AV_LOG_ERROR , " Failed init OpenVINO exectuable network or inference request \n " ) ;
return DNN_ERROR ;
}
}
if ( ff_dnn_fill_task ( & task , exec_params , ov_model , 0 , 1 ) ! = DNN_SUCCESS ) {
return DNN_ERROR ;
}
if ( extract_inference_from_task ( ov_model - > model - > func_type , & task , ov_model - > inference_queue , exec_params ) ! = DNN_SUCCESS ) {
av_log ( ctx , AV_LOG_ERROR , " unable to extract inference from task. \n " ) ;
return DNN_ERROR ;
}
request = ff_safe_queue_pop_front ( ov_model - > request_queue ) ;
if ( ! request ) {
av_log ( ctx , AV_LOG_ERROR , " unable to get infer request. \n " ) ;
return DNN_ERROR ;
}
return execute_model_ov ( request , ov_model - > inference_queue ) ;
}
DNNReturnType ff_dnn_execute_model_async_ov ( const DNNModel * model , DNNExecBaseParams * exec_params )
{
{
OVModel * ov_model = model - > model ;
OVModel * ov_model = model - > model ;
OVContext * ctx = & ov_model - > ctx ;
OVContext * ctx = & ov_model - > ctx ;
@ -831,7 +783,8 @@ DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, DNNExecBasePa
return DNN_ERROR ;
return DNN_ERROR ;
}
}
if ( ff_dnn_fill_task ( task , exec_params , ov_model , 1 , 1 ) ! = DNN_SUCCESS ) {
if ( ff_dnn_fill_task ( task , exec_params , ov_model , ctx - > options . async , 1 ) ! = DNN_SUCCESS ) {
av_freep ( & task ) ;
return DNN_ERROR ;
return DNN_ERROR ;
}
}
@ -846,26 +799,48 @@ DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, DNNExecBasePa
return DNN_ERROR ;
return DNN_ERROR ;
}
}
while ( ff_queue_size ( ov_model - > inference_queue ) > = ctx - > options . batch_size ) {
if ( ctx - > options . async ) {
while ( ff_queue_size ( ov_model - > inference_queue ) > = ctx - > options . batch_size ) {
request = ff_safe_queue_pop_front ( ov_model - > request_queue ) ;
if ( ! request ) {
av_log ( ctx , AV_LOG_ERROR , " unable to get infer request. \n " ) ;
return DNN_ERROR ;
}
ret = execute_model_ov ( request , ov_model - > inference_queue ) ;
if ( ret ! = DNN_SUCCESS ) {
return ret ;
}
}
return DNN_SUCCESS ;
}
else {
if ( model - > func_type = = DFT_ANALYTICS_CLASSIFY ) {
// Classification filter has not been completely
// tested with the sync mode. So, do not support now.
avpriv_report_missing_feature ( ctx , " classify for sync execution " ) ;
return DNN_ERROR ;
}
if ( ctx - > options . batch_size > 1 ) {
avpriv_report_missing_feature ( ctx , " batch mode for sync execution " ) ;
return DNN_ERROR ;
}
request = ff_safe_queue_pop_front ( ov_model - > request_queue ) ;
request = ff_safe_queue_pop_front ( ov_model - > request_queue ) ;
if ( ! request ) {
if ( ! request ) {
av_log ( ctx , AV_LOG_ERROR , " unable to get infer request. \n " ) ;
av_log ( ctx , AV_LOG_ERROR , " unable to get infer request. \n " ) ;
return DNN_ERROR ;
return DNN_ERROR ;
}
}
return execute_model_ov ( request , ov_model - > inference_queue ) ;
ret = execute_model_ov ( request , ov_model - > inference_queue ) ;
if ( ret ! = DNN_SUCCESS ) {
return ret ;
}
}
}
return DNN_SUCCESS ;
}
}
DNNAsyncStatusType ff_dnn_get_async_result_ov ( const DNNModel * model , AVFrame * * in , AVFrame * * out )
DNNAsyncStatusType ff_dnn_get_result_ov ( const DNNModel * model , AVFrame * * in , AVFrame * * out )
{
{
OVModel * ov_model = model - > model ;
OVModel * ov_model = model - > model ;
return ff_dnn_get_async_result_common ( ov_model - > task_queue , in , out ) ;
return ff_dnn_get_result_common ( ov_model - > task_queue , in , out ) ;
}
}
DNNReturnType ff_dnn_flush_ov ( const DNNModel * model )
DNNReturnType ff_dnn_flush_ov ( const DNNModel * model )