@ -915,45 +915,19 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
return DNN_SUCCESS ;
}
static DNNReturnType execute_model_tf ( TFRequestItem * request , Queue * inference_queue )
{
TFModel * tf_model ;
TFContext * ctx ;
TFInferRequest * infer_request ;
InferenceItem * inference ;
TaskItem * task ;
static void infer_completion_callback ( void * args ) {
TFRequestItem * request = args ;
InferenceItem * inference = request - > inference ;
TaskItem * task = inference - > task ;
DNNData * outputs ;
inference = ff_queue_peek_front ( inference_queue ) ;
task = inference - > task ;
tf_model = task - > model ;
ctx = & tf_model - > ctx ;
if ( task - > async ) {
avpriv_report_missing_feature ( ctx , " Async execution not supported " ) ;
return DNN_ERROR ;
} else {
if ( fill_model_input_tf ( tf_model , request ) ! = DNN_SUCCESS ) {
return DNN_ERROR ;
}
infer_request = request - > infer_request ;
TF_SessionRun ( tf_model - > session , NULL ,
infer_request - > tf_input , & infer_request - > input_tensor , 1 ,
infer_request - > tf_outputs , infer_request - > output_tensors ,
task - > nb_output , NULL , 0 , NULL ,
tf_model - > status ) ;
if ( TF_GetCode ( tf_model - > status ) ! = TF_OK ) {
tf_free_request ( infer_request ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to run session when executing model \n " ) ;
return DNN_ERROR ;
}
TFInferRequest * infer_request = request - > infer_request ;
TFModel * tf_model = task - > model ;
TFContext * ctx = & tf_model - > ctx ;
outputs = av_malloc_array ( task - > nb_output , sizeof ( * outputs ) ) ;
if ( ! outputs ) {
tf_free_request ( infer_request ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to allocate memory for *outputs \n " ) ;
return DNN_ERROR ;
goto err ;
}
for ( uint32_t i = 0 ; i < task - > nb_output ; + + i ) {
@ -980,20 +954,59 @@ static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *inference_q
case DFT_ANALYTICS_DETECT :
if ( ! tf_model - > model - > detect_post_proc ) {
av_log ( ctx , AV_LOG_ERROR , " Detect filter needs provide post proc \n " ) ;
return DNN_ERROR ;
return ;
}
tf_model - > model - > detect_post_proc ( task - > out_frame , outputs , task - > nb_output , tf_model - > model - > filter_ctx ) ;
break ;
default :
tf_free_request ( infer_request ) ;
av_log ( ctx , AV_LOG_ERROR , " Tensorflow backend does not support this kind of dnn filter now \n " ) ;
return DNN_ERROR ;
goto err ;
}
task - > inference_done + + ;
err :
tf_free_request ( infer_request ) ;
av_freep ( & outputs ) ;
ff_safe_queue_push_back ( tf_model - > request_queue , request ) ;
if ( ff_safe_queue_push_back ( tf_model - > request_queue , request ) < 0 ) {
av_freep ( & request - > infer_request ) ;
av_freep ( & request ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to push back request_queue. \n " ) ;
}
}
static DNNReturnType execute_model_tf ( TFRequestItem * request , Queue * inference_queue )
{
TFModel * tf_model ;
TFContext * ctx ;
TFInferRequest * infer_request ;
InferenceItem * inference ;
TaskItem * task ;
inference = ff_queue_peek_front ( inference_queue ) ;
task = inference - > task ;
tf_model = task - > model ;
ctx = & tf_model - > ctx ;
if ( task - > async ) {
avpriv_report_missing_feature ( ctx , " Async execution not supported " ) ;
return DNN_ERROR ;
} else {
if ( fill_model_input_tf ( tf_model , request ) ! = DNN_SUCCESS ) {
return DNN_ERROR ;
}
infer_request = request - > infer_request ;
TF_SessionRun ( tf_model - > session , NULL ,
infer_request - > tf_input , & infer_request - > input_tensor , 1 ,
infer_request - > tf_outputs , infer_request - > output_tensors ,
task - > nb_output , NULL , 0 , NULL ,
tf_model - > status ) ;
if ( TF_GetCode ( tf_model - > status ) ! = TF_OK ) {
tf_free_request ( infer_request ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to run session when executing model \n " ) ;
return DNN_ERROR ;
}
infer_completion_callback ( request ) ;
return ( task - > inference_done = = task - > inference_todo ) ? DNN_SUCCESS : DNN_ERROR ;
}
}