@ -37,6 +37,7 @@
typedef struct OVOptions {
typedef struct OVOptions {
char * device_type ;
char * device_type ;
int nireq ;
int nireq ;
int batch_size ;
} OVOptions ;
} OVOptions ;
typedef struct OVContext {
typedef struct OVContext {
@ -70,7 +71,8 @@ typedef struct TaskItem {
typedef struct RequestItem {
typedef struct RequestItem {
ie_infer_request_t * infer_request ;
ie_infer_request_t * infer_request ;
TaskItem * task ;
TaskItem * * tasks ;
int task_count ;
ie_complete_call_back_t callback ;
ie_complete_call_back_t callback ;
} RequestItem ;
} RequestItem ;
@ -83,6 +85,7 @@ typedef struct RequestItem {
static const AVOption dnn_openvino_options [ ] = {
static const AVOption dnn_openvino_options [ ] = {
{ " device " , " device to run model " , OFFSET ( options . device_type ) , AV_OPT_TYPE_STRING , { . str = " CPU " } , 0 , 0 , FLAGS } ,
{ " device " , " device to run model " , OFFSET ( options . device_type ) , AV_OPT_TYPE_STRING , { . str = " CPU " } , 0 , 0 , FLAGS } ,
{ " nireq " , " number of request " , OFFSET ( options . nireq ) , AV_OPT_TYPE_INT , { . i64 = 0 } , 0 , INT_MAX , FLAGS } ,
{ " nireq " , " number of request " , OFFSET ( options . nireq ) , AV_OPT_TYPE_INT , { . i64 = 0 } , 0 , INT_MAX , FLAGS } ,
{ " batch_size " , " batch size per request " , OFFSET ( options . batch_size ) , AV_OPT_TYPE_INT , { . i64 = 1 } , 1 , 1000 , FLAGS } ,
{ NULL }
{ NULL }
} ;
} ;
@ -100,7 +103,19 @@ static DNNDataType precision_to_datatype(precision_e precision)
}
}
}
}
static DNNReturnType fill_model_input_ov ( OVModel * ov_model , TaskItem * task , RequestItem * request )
static int get_datatype_size ( DNNDataType dt )
{
switch ( dt )
{
case DNN_FLOAT :
return sizeof ( float ) ;
default :
av_assert0 ( ! " not supported yet. " ) ;
return 1 ;
}
}
static DNNReturnType fill_model_input_ov ( OVModel * ov_model , RequestItem * request )
{
{
dimensions_t dims ;
dimensions_t dims ;
precision_e precision ;
precision_e precision ;
@ -109,6 +124,7 @@ static DNNReturnType fill_model_input_ov(OVModel *ov_model, TaskItem *task, Requ
IEStatusCode status ;
IEStatusCode status ;
DNNData input ;
DNNData input ;
ie_blob_t * input_blob = NULL ;
ie_blob_t * input_blob = NULL ;
TaskItem * task = request - > tasks [ 0 ] ;
status = ie_infer_request_get_blob ( request - > infer_request , task - > input_name , & input_blob ) ;
status = ie_infer_request_get_blob ( request - > infer_request , task - > input_name , & input_blob ) ;
if ( status ! = OK ) {
if ( status ! = OK ) {
@ -134,12 +150,19 @@ static DNNReturnType fill_model_input_ov(OVModel *ov_model, TaskItem *task, Requ
input . channels = dims . dims [ 1 ] ;
input . channels = dims . dims [ 1 ] ;
input . data = blob_buffer . buffer ;
input . data = blob_buffer . buffer ;
input . dt = precision_to_datatype ( precision ) ;
input . dt = precision_to_datatype ( precision ) ;
if ( task - > do_ioproc ) {
if ( ov_model - > model - > pre_proc ! = NULL ) {
av_assert0 ( request - > task_count < = dims . dims [ 0 ] ) ;
ov_model - > model - > pre_proc ( task - > in_frame , & input , ov_model - > model - > filter_ctx ) ;
for ( int i = 0 ; i < request - > task_count ; + + i ) {
} else {
task = request - > tasks [ i ] ;
proc_from_frame_to_dnn ( task - > in_frame , & input , ctx ) ;
if ( task - > do_ioproc ) {
if ( ov_model - > model - > pre_proc ! = NULL ) {
ov_model - > model - > pre_proc ( task - > in_frame , & input , ov_model - > model - > filter_ctx ) ;
} else {
proc_from_frame_to_dnn ( task - > in_frame , & input , ctx ) ;
}
}
}
input . data = ( uint8_t * ) input . data
+ input . width * input . height * input . channels * get_datatype_size ( input . dt ) ;
}
}
ie_blob_free ( & input_blob ) ;
ie_blob_free ( & input_blob ) ;
@ -152,7 +175,7 @@ static void infer_completion_callback(void *args)
precision_e precision ;
precision_e precision ;
IEStatusCode status ;
IEStatusCode status ;
RequestItem * request = args ;
RequestItem * request = args ;
TaskItem * task = request - > task ;
TaskItem * task = request - > tasks [ 0 ] ;
ie_blob_t * output_blob = NULL ;
ie_blob_t * output_blob = NULL ;
ie_blob_buffer_t blob_buffer ;
ie_blob_buffer_t blob_buffer ;
DNNData output ;
DNNData output ;
@ -194,41 +217,56 @@ static void infer_completion_callback(void *args)
output . width = dims . dims [ 3 ] ;
output . width = dims . dims [ 3 ] ;
output . dt = precision_to_datatype ( precision ) ;
output . dt = precision_to_datatype ( precision ) ;
output . data = blob_buffer . buffer ;
output . data = blob_buffer . buffer ;
if ( task - > do_ioproc ) {
if ( task - > ov_model - > model - > post_proc ! = NULL ) {
av_assert0 ( request - > task_count < = dims . dims [ 0 ] ) ;
task - > ov_model - > model - > post_proc ( task - > out_frame , & output , task - > ov_model - > model - > filter_ctx ) ;
av_assert0 ( request - > task_count > = 1 ) ;
for ( int i = 0 ; i < request - > task_count ; + + i ) {
task = request - > tasks [ i ] ;
if ( task - > do_ioproc ) {
if ( task - > ov_model - > model - > post_proc ! = NULL ) {
task - > ov_model - > model - > post_proc ( task - > out_frame , & output , task - > ov_model - > model - > filter_ctx ) ;
} else {
proc_from_dnn_to_frame ( task - > out_frame , & output , ctx ) ;
}
} else {
} else {
proc_from_dnn_to_frame ( task - > out_frame , & output , ctx ) ;
task - > out_frame - > width = output . width ;
task - > out_frame - > height = output . height ;
}
}
} else {
task - > done = 1 ;
task - > out_frame - > width = output . width ;
output . data = ( uint8_t * ) output . data
task - > out_frame - > height = output . height ;
+ output . width * output . height * output . channels * get_datatype_size ( output . dt ) ;
}
}
ie_blob_free ( & output_blob ) ;
ie_blob_free ( & output_blob ) ;
request - > task_count = 0 ;
if ( task - > async ) {
if ( task - > async ) {
request - > task = NULL ;
if ( ff_safe_queue_push_back ( task - > ov_model - > request_queue , request ) < 0 ) {
if ( ff_safe_queue_push_back ( task - > ov_model - > request_queue , request ) < 0 ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to push back request_queue. \n " ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to push back request_queue. \n " ) ;
return ;
return ;
}
}
}
}
task - > done = 1 ;
}
}
static DNNReturnType execute_model_ov ( TaskItem * task , RequestItem * request )
static DNNReturnType execute_model_ov ( RequestItem * request )
{
{
IEStatusCode status ;
IEStatusCode status ;
DNNReturnType ret ;
TaskItem * task = request - > tasks [ 0 ] ;
OVContext * ctx = & task - > ov_model - > ctx ;
OVContext * ctx = & task - > ov_model - > ctx ;
DNNReturnType ret = fill_model_input_ov ( task - > ov_model , task , request ) ;
if ( ret ! = DNN_SUCCESS ) {
return ret ;
}
if ( task - > async ) {
if ( task - > async ) {
request - > task = task ;
if ( request - > task_count < ctx - > options . batch_size ) {
if ( ff_safe_queue_push_front ( task - > ov_model - > request_queue , request ) < 0 ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to push back request_queue. \n " ) ;
return DNN_ERROR ;
}
return DNN_SUCCESS ;
}
ret = fill_model_input_ov ( task - > ov_model , request ) ;
if ( ret ! = DNN_SUCCESS ) {
return ret ;
}
status = ie_infer_set_completion_callback ( request - > infer_request , & request - > callback ) ;
status = ie_infer_set_completion_callback ( request - > infer_request , & request - > callback ) ;
if ( status ! = OK ) {
if ( status ! = OK ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to set completion callback for inference \n " ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to set completion callback for inference \n " ) ;
@ -241,12 +279,15 @@ static DNNReturnType execute_model_ov(TaskItem *task, RequestItem *request)
}
}
return DNN_SUCCESS ;
return DNN_SUCCESS ;
} else {
} else {
ret = fill_model_input_ov ( task - > ov_model , request ) ;
if ( ret ! = DNN_SUCCESS ) {
return ret ;
}
status = ie_infer_request_infer ( request - > infer_request ) ;
status = ie_infer_request_infer ( request - > infer_request ) ;
if ( status ! = OK ) {
if ( status ! = OK ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to start synchronous model inference \n " ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to start synchronous model inference \n " ) ;
return DNN_ERROR ;
return DNN_ERROR ;
}
}
request - > task = task ;
infer_completion_callback ( request ) ;
infer_completion_callback ( request ) ;
return task - > done ? DNN_SUCCESS : DNN_ERROR ;
return task - > done ? DNN_SUCCESS : DNN_ERROR ;
}
}
@ -319,6 +360,7 @@ static DNNReturnType get_output_ov(void *model, const char *input_name, int inpu
RequestItem request ;
RequestItem request ;
AVFrame * in_frame = av_frame_alloc ( ) ;
AVFrame * in_frame = av_frame_alloc ( ) ;
AVFrame * out_frame = NULL ;
AVFrame * out_frame = NULL ;
TaskItem * ptask = & task ;
if ( ! in_frame ) {
if ( ! in_frame ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to allocate memory for input frame \n " ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to allocate memory for input frame \n " ) ;
@ -343,8 +385,10 @@ static DNNReturnType get_output_ov(void *model, const char *input_name, int inpu
task . ov_model = ov_model ;
task . ov_model = ov_model ;
request . infer_request = ov_model - > infer_request ;
request . infer_request = ov_model - > infer_request ;
request . task_count = 1 ;
request . tasks = & ptask ;
ret = execute_model_ov ( & task , & request ) ;
ret = execute_model_ov ( & request ) ;
* output_width = out_frame - > width ;
* output_width = out_frame - > width ;
* output_height = out_frame - > height ;
* output_height = out_frame - > height ;
@ -393,6 +437,24 @@ DNNModel *ff_dnn_load_model_ov(const char *model_filename, const char *options,
if ( status ! = OK )
if ( status ! = OK )
goto err ;
goto err ;
// batch size
if ( ctx - > options . batch_size < = 0 ) {
ctx - > options . batch_size = 1 ;
}
if ( ctx - > options . batch_size > 1 ) {
input_shapes_t input_shapes ;
status = ie_network_get_input_shapes ( ov_model - > network , & input_shapes ) ;
if ( status ! = OK )
goto err ;
for ( int i = 0 ; i < input_shapes . shape_num ; i + + )
input_shapes . shapes [ i ] . shape . dims [ 0 ] = ctx - > options . batch_size ;
status = ie_network_reshape ( ov_model - > network , input_shapes ) ;
ie_network_input_shapes_free ( & input_shapes ) ;
if ( status ! = OK )
goto err ;
}
status = ie_core_load_network ( ov_model - > core , ov_model - > network , ctx - > options . device_type , & config , & ov_model - > exe_network ) ;
status = ie_core_load_network ( ov_model - > core , ov_model - > network , ctx - > options . device_type , & config , & ov_model - > exe_network ) ;
if ( status ! = OK ) {
if ( status ! = OK ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to init OpenVINO model \n " ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to init OpenVINO model \n " ) ;
@ -426,17 +488,24 @@ DNNModel *ff_dnn_load_model_ov(const char *model_filename, const char *options,
}
}
for ( int i = 0 ; i < ctx - > options . nireq ; i + + ) {
for ( int i = 0 ; i < ctx - > options . nireq ; i + + ) {
ie_infer_request_t * request ;
RequestItem * item = av_mallocz ( sizeof ( * item ) ) ;
RequestItem * item = av_mallocz ( sizeof ( * item ) ) ;
if ( ! item ) {
if ( ! item ) {
goto err ;
goto err ;
}
}
status = ie_exec_network_create_infer_request ( ov_model - > exe_network , & request ) ;
status = ie_exec_network_create_infer_request ( ov_model - > exe_network , & item - > infer_request ) ;
if ( status ! = OK ) {
if ( status ! = OK ) {
av_freep ( & item ) ;
av_freep ( & item ) ;
goto err ;
goto err ;
}
}
item - > infer_request = request ;
item - > tasks = av_malloc_array ( ctx - > options . batch_size , sizeof ( * item - > tasks ) ) ;
if ( ! item - > tasks ) {
av_freep ( & item ) ;
goto err ;
}
item - > task_count = 0 ;
item - > callback . completeCallBackFunc = infer_completion_callback ;
item - > callback . completeCallBackFunc = infer_completion_callback ;
item - > callback . args = item ;
item - > callback . args = item ;
if ( ff_safe_queue_push_back ( ov_model - > request_queue , item ) < 0 ) {
if ( ff_safe_queue_push_back ( ov_model - > request_queue , item ) < 0 ) {
@ -469,6 +538,7 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, const char *input_n
OVContext * ctx = & ov_model - > ctx ;
OVContext * ctx = & ov_model - > ctx ;
TaskItem task ;
TaskItem task ;
RequestItem request ;
RequestItem request ;
TaskItem * ptask = & task ;
if ( ! in_frame ) {
if ( ! in_frame ) {
av_log ( ctx , AV_LOG_ERROR , " in frame is NULL when execute model. \n " ) ;
av_log ( ctx , AV_LOG_ERROR , " in frame is NULL when execute model. \n " ) ;
@ -487,6 +557,11 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, const char *input_n
return DNN_ERROR ;
return DNN_ERROR ;
}
}
if ( ctx - > options . batch_size > 1 ) {
av_log ( ctx , AV_LOG_ERROR , " do not support batch mode for sync execution. \n " ) ;
return DNN_ERROR ;
}
task . done = 0 ;
task . done = 0 ;
task . do_ioproc = 1 ;
task . do_ioproc = 1 ;
task . async = 0 ;
task . async = 0 ;
@ -497,8 +572,10 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, const char *input_n
task . ov_model = ov_model ;
task . ov_model = ov_model ;
request . infer_request = ov_model - > infer_request ;
request . infer_request = ov_model - > infer_request ;
request . task_count = 1 ;
request . tasks = & ptask ;
return execute_model_ov ( & task , & request ) ;
return execute_model_ov ( & request ) ;
}
}
DNNReturnType ff_dnn_execute_model_async_ov ( const DNNModel * model , const char * input_name , AVFrame * in_frame ,
DNNReturnType ff_dnn_execute_model_async_ov ( const DNNModel * model , const char * input_name , AVFrame * in_frame ,
@ -545,7 +622,8 @@ DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, const char *i
return DNN_ERROR ;
return DNN_ERROR ;
}
}
return execute_model_ov ( task , request ) ;
request - > tasks [ request - > task_count + + ] = task ;
return execute_model_ov ( request ) ;
}
}
DNNAsyncStatusType ff_dnn_get_async_result_ov ( const DNNModel * model , AVFrame * * in , AVFrame * * out )
DNNAsyncStatusType ff_dnn_get_async_result_ov ( const DNNModel * model , AVFrame * * in , AVFrame * * out )
@ -569,6 +647,48 @@ DNNAsyncStatusType ff_dnn_get_async_result_ov(const DNNModel *model, AVFrame **i
return DAST_SUCCESS ;
return DAST_SUCCESS ;
}
}
DNNReturnType ff_dnn_flush_ov ( const DNNModel * model )
{
OVModel * ov_model = ( OVModel * ) model - > model ;
OVContext * ctx = & ov_model - > ctx ;
RequestItem * request ;
IEStatusCode status ;
DNNReturnType ret ;
request = ff_safe_queue_pop_front ( ov_model - > request_queue ) ;
if ( ! request ) {
av_log ( ctx , AV_LOG_ERROR , " unable to get infer request. \n " ) ;
return DNN_ERROR ;
}
if ( request - > task_count = = 0 ) {
// no pending task need to flush
if ( ff_safe_queue_push_back ( ov_model - > request_queue , request ) < 0 ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to push back request_queue. \n " ) ;
return DNN_ERROR ;
}
return DNN_SUCCESS ;
}
ret = fill_model_input_ov ( ov_model , request ) ;
if ( ret ! = DNN_SUCCESS ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to fill model input. \n " ) ;
return ret ;
}
status = ie_infer_set_completion_callback ( request - > infer_request , & request - > callback ) ;
if ( status ! = OK ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to set completion callback for inference \n " ) ;
return DNN_ERROR ;
}
status = ie_infer_request_infer_async ( request - > infer_request ) ;
if ( status ! = OK ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to start async inference \n " ) ;
return DNN_ERROR ;
}
return DNN_SUCCESS ;
}
void ff_dnn_free_model_ov ( DNNModel * * model )
void ff_dnn_free_model_ov ( DNNModel * * model )
{
{
if ( * model ) {
if ( * model ) {
@ -578,12 +698,15 @@ void ff_dnn_free_model_ov(DNNModel **model)
if ( item & & item - > infer_request ) {
if ( item & & item - > infer_request ) {
ie_infer_request_free ( & item - > infer_request ) ;
ie_infer_request_free ( & item - > infer_request ) ;
}
}
av_freep ( & item - > tasks ) ;
av_freep ( & item ) ;
av_freep ( & item ) ;
}
}
ff_safe_queue_destroy ( ov_model - > request_queue ) ;
ff_safe_queue_destroy ( ov_model - > request_queue ) ;
while ( ff_queue_size ( ov_model - > task_queue ) ! = 0 ) {
while ( ff_queue_size ( ov_model - > task_queue ) ! = 0 ) {
TaskItem * item = ff_queue_pop_front ( ov_model - > task_queue ) ;
TaskItem * item = ff_queue_pop_front ( ov_model - > task_queue ) ;
av_frame_free ( & item - > in_frame ) ;
av_frame_free ( & item - > out_frame ) ;
av_freep ( & item ) ;
av_freep ( & item ) ;
}
}
ff_queue_destroy ( ov_model - > task_queue ) ;
ff_queue_destroy ( ov_model - > task_queue ) ;