@ -90,7 +90,7 @@ static const AVOption dnn_tensorflow_options[] = {
AVFILTER_DEFINE_CLASS ( dnn_tensorflow ) ;
static DNNReturnType execute_model_tf ( TFRequestItem * request , Queue * lltask_queue ) ;
static int execute_model_tf ( TFRequestItem * request , Queue * lltask_queue ) ;
static void infer_completion_callback ( void * args ) ;
static inline void destroy_request_item ( TFRequestItem * * arg ) ;
@ -152,9 +152,10 @@ static TFInferRequest *tf_create_inference_request(void)
*
* @ param request pointer to the TFRequestItem for inference
* @ retval DNN_SUCCESS if execution is successful
* @ retval DNN_ERROR if execution fails
* @ retval AVERROR ( EINVAL ) if request is NULL
* @ retval DNN_GENERIC_ERROR if execution fails
*/
static DNNReturnType tf_start_inference ( void * args )
static int tf_start_inference ( void * args )
{
TFRequestItem * request = args ;
TFInferRequest * infer_request = request - > infer_request ;
@ -164,7 +165,7 @@ static DNNReturnType tf_start_inference(void *args)
if ( ! request ) {
av_log ( & tf_model - > ctx , AV_LOG_ERROR , " TFRequestItem is NULL \n " ) ;
return DNN_ERROR ;
return AVERROR ( EINVAL ) ;
}
TF_SessionRun ( tf_model - > session , NULL ,
@ -178,7 +179,7 @@ static DNNReturnType tf_start_inference(void *args)
if ( ff_safe_queue_push_back ( tf_model - > request_queue , request ) < 0 ) {
destroy_request_item ( & request ) ;
}
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
return DNN_SUCCESS ;
}
@ -202,14 +203,14 @@ static inline void destroy_request_item(TFRequestItem **arg) {
av_freep ( arg ) ;
}
static DNNReturnType extract_lltask_from_task ( TaskItem * task , Queue * lltask_queue )
static int extract_lltask_from_task ( TaskItem * task , Queue * lltask_queue )
{
TFModel * tf_model = task - > model ;
TFContext * ctx = & tf_model - > ctx ;
LastLevelTaskItem * lltask = av_malloc ( sizeof ( * lltask ) ) ;
if ( ! lltask ) {
av_log ( ctx , AV_LOG_ERROR , " Unable to allocate space for LastLevelTaskItem \n " ) ;
return DNN_ERROR ;
return AVERROR ( ENOMEM ) ;
}
task - > inference_todo = 1 ;
task - > inference_done = 0 ;
@ -217,7 +218,7 @@ static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queu
if ( ff_queue_push_back ( lltask_queue , lltask ) < 0 ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to push back lltask_queue. \n " ) ;
av_freep ( & lltask ) ;
return DNN_ERROR ;
return AVERROR ( ENOMEM ) ;
}
return DNN_SUCCESS ;
}
@ -277,7 +278,7 @@ static TF_Tensor *allocate_input_tensor(const DNNData *input)
input_dims [ 1 ] * input_dims [ 2 ] * input_dims [ 3 ] * size ) ;
}
static DNNReturnType get_input_tf ( void * model , DNNData * input , const char * input_name )
static int get_input_tf ( void * model , DNNData * input , const char * input_name )
{
TFModel * tf_model = model ;
TFContext * ctx = & tf_model - > ctx ;
@ -288,7 +289,7 @@ static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input
tf_output . oper = TF_GraphOperationByName ( tf_model - > graph , input_name ) ;
if ( ! tf_output . oper ) {
av_log ( ctx , AV_LOG_ERROR , " Could not find \" %s \" in model \n " , input_name ) ;
return DNN_ERROR ;
return AVERROR ( EINVAL ) ;
}
tf_output . index = 0 ;
@ -300,7 +301,7 @@ static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input
if ( TF_GetCode ( status ) ! = TF_OK ) {
TF_DeleteStatus ( status ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to get input tensor shape: number of dimension incorrect \n " ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
TF_DeleteStatus ( status ) ;
@ -313,10 +314,10 @@ static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input
return DNN_SUCCESS ;
}
static DNNReturnType get_output_tf ( void * model , const char * input_name , int input_width , int input_height ,
static int get_output_tf ( void * model , const char * input_name , int input_width , int input_height ,
const char * output_name , int * output_width , int * output_height )
{
DNNReturnType ret ;
int ret ;
TFModel * tf_model = model ;
TFContext * ctx = & tf_model - > ctx ;
TaskItem task ;
@ -329,20 +330,21 @@ static DNNReturnType get_output_tf(void *model, const char *input_name, int inpu
. out_frame = NULL ,
} ;
if ( ff_dnn_fill_gettingoutput_task ( & task , & exec_params , tf_model , input_height , input_width , ctx ) ! = DNN_SUCCESS ) {
ret = ff_dnn_fill_gettingoutput_task ( & task , & exec_params , tf_model , input_height , input_width , ctx ) ;
if ( ret ! = DNN_SUCCESS ) {
goto err ;
}
if ( extract_lltask_from_task ( & task , tf_model - > lltask_queue ) ! = DNN_SUCCESS ) {
ret = extract_lltask_from_task ( & task , tf_model - > lltask_queue ) ;
if ( ret ! = DNN_SUCCESS ) {
av_log ( ctx , AV_LOG_ERROR , " unable to extract inference from task. \n " ) ;
ret = DNN_ERROR ;
goto err ;
}
request = ff_safe_queue_pop_front ( tf_model - > request_queue ) ;
if ( ! request ) {
av_log ( ctx , AV_LOG_ERROR , " unable to get infer request. \n " ) ;
ret = DNN_ERROR ;
ret = AVERROR ( EINVAL ) ;
goto err ;
}
@ -386,7 +388,7 @@ static int hex_to_data(uint8_t *data, const char *p)
return len ;
}
static DNNReturnType load_tf_model ( TFModel * tf_model , const char * model_filename )
static int load_tf_model ( TFModel * tf_model , const char * model_filename )
{
TFContext * ctx = & tf_model - > ctx ;
TF_Buffer * graph_def ;
@ -407,7 +409,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
*/
if ( strncmp ( tf_model - > ctx . options . sess_config , " 0x " , 2 ) ! = 0 ) {
av_log ( ctx , AV_LOG_ERROR , " sess_config should start with '0x' \n " ) ;
return DNN_ERROR ;
return AVERROR ( EINVAL ) ;
}
config = tf_model - > ctx . options . sess_config + 2 ;
sess_config_length = hex_to_data ( NULL , config ) ;
@ -415,11 +417,11 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
sess_config = av_mallocz ( sess_config_length + AV_INPUT_BUFFER_PADDING_SIZE ) ;
if ( ! sess_config ) {
av_log ( ctx , AV_LOG_ERROR , " failed to allocate memory \n " ) ;
return DNN_ERROR ;
return AVERROR ( ENOMEM ) ;
}
if ( hex_to_data ( sess_config , config ) < 0 ) {
av_log ( ctx , AV_LOG_ERROR , " failed to convert hex to data \n " ) ;
return DNN_ERROR ;
return AVERROR ( EINVAL ) ;
}
}
@ -427,7 +429,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
if ( ! graph_def ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to read model \" %s \" graph \n " , model_filename ) ;
av_freep ( & sess_config ) ;
return DNN_ERROR ;
return AVERROR ( EINVAL ) ;
}
tf_model - > graph = TF_NewGraph ( ) ;
tf_model - > status = TF_NewStatus ( ) ;
@ -440,7 +442,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
TF_DeleteStatus ( tf_model - > status ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to import serialized graph to model graph \n " ) ;
av_freep ( & sess_config ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
init_op = TF_GraphOperationByName ( tf_model - > graph , " init " ) ;
@ -455,7 +457,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
TF_DeleteSessionOptions ( sess_opts ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to set config for sess options with %s \n " ,
tf_model - > ctx . options . sess_config ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
}
@ -466,7 +468,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
TF_DeleteGraph ( tf_model - > graph ) ;
TF_DeleteStatus ( tf_model - > status ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to create new session with model graph \n " ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
// Run initialization operation with name "init" if it is present in graph
@ -481,7 +483,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
TF_DeleteGraph ( tf_model - > graph ) ;
TF_DeleteStatus ( tf_model - > status ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to run session when initializing \n " ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
}
@ -490,7 +492,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
# define NAME_BUFFER_SIZE 256
static DNNReturnType add_conv_layer ( TFModel * tf_model , TF_Operation * transpose_op , TF_Operation * * cur_op ,
static int add_conv_layer ( TFModel * tf_model , TF_Operation * transpose_op , TF_Operation * * cur_op ,
ConvolutionalParams * params , const int layer )
{
TFContext * ctx = & tf_model - > ctx ;
@ -594,7 +596,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
break ;
default :
avpriv_report_missing_feature ( ctx , " convolutional activation function %d " , params - > activation ) ;
return DNN_ERROR ;
return AVERROR ( ENOSYS ) ;
}
input . oper = * cur_op ;
TF_AddInput ( op_desc , input ) ;
@ -609,10 +611,10 @@ err:
TF_DeleteTensor ( kernel_tensor ) ;
TF_DeleteTensor ( biases_tensor ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to add conv layer %d \n " , layer ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
static DNNReturnType add_depth_to_space_layer ( TFModel * tf_model , TF_Operation * * cur_op ,
static int add_depth_to_space_layer ( TFModel * tf_model , TF_Operation * * cur_op ,
DepthToSpaceParams * params , const int layer )
{
TFContext * ctx = & tf_model - > ctx ;
@ -630,13 +632,13 @@ static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **
* cur_op = TF_FinishOperation ( op_desc , tf_model - > status ) ;
if ( TF_GetCode ( tf_model - > status ) ! = TF_OK ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to add depth_to_space to layer %d \n " , layer ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
return DNN_SUCCESS ;
}
static DNNReturnType add_pad_layer ( TFModel * tf_model , TF_Operation * * cur_op ,
static int add_pad_layer ( TFModel * tf_model , TF_Operation * * cur_op ,
LayerPadParams * params , const int layer )
{
TFContext * ctx = & tf_model - > ctx ;
@ -666,13 +668,13 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
if ( TF_GetCode ( tf_model - > status ) ! = TF_OK ) {
TF_DeleteTensor ( tensor ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to set value for pad of layer %d \n " , layer ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
op = TF_FinishOperation ( op_desc , tf_model - > status ) ;
if ( TF_GetCode ( tf_model - > status ) ! = TF_OK ) {
TF_DeleteTensor ( tensor ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to add pad to layer %d \n " , layer ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
op_desc = TF_NewOperation ( tf_model - > graph , " MirrorPad " , " mirror_pad " ) ;
@ -688,13 +690,13 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
if ( TF_GetCode ( tf_model - > status ) ! = TF_OK ) {
TF_DeleteTensor ( tensor ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to add mirror_pad to layer %d \n " , layer ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
return DNN_SUCCESS ;
}
static DNNReturnType add_maximum_layer ( TFModel * tf_model , TF_Operation * * cur_op ,
static int add_maximum_layer ( TFModel * tf_model , TF_Operation * * cur_op ,
DnnLayerMaximumParams * params , const int layer )
{
TFContext * ctx = & tf_model - > ctx ;
@ -716,13 +718,13 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
if ( TF_GetCode ( tf_model - > status ) ! = TF_OK ) {
TF_DeleteTensor ( tensor ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to set value for maximum/y of layer %d " , layer ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
op = TF_FinishOperation ( op_desc , tf_model - > status ) ;
if ( TF_GetCode ( tf_model - > status ) ! = TF_OK ) {
TF_DeleteTensor ( tensor ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to add maximum/y to layer %d \n " , layer ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
snprintf ( name_buffer , NAME_BUFFER_SIZE , " maximum%d " , layer ) ;
@ -737,13 +739,13 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
if ( TF_GetCode ( tf_model - > status ) ! = TF_OK ) {
TF_DeleteTensor ( tensor ) ;
av_log ( ctx , AV_LOG_ERROR , " Failed to add maximum to layer %d \n " , layer ) ;
return DNN_ERROR ;
return DNN_GENERIC_ ERROR ;
}
return DNN_SUCCESS ;
}
static DNNReturnType load_native_model ( TFModel * tf_model , const char * model_filename )
static int load_native_model ( TFModel * tf_model , const char * model_filename )
{
TFContext * ctx = & tf_model - > ctx ;
int32_t layer ;
@ -755,14 +757,14 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
int32_t * transpose_perm ;
int64_t transpose_perm_shape [ ] = { 4 } ;
int64_t input_shape [ ] = { 1 , - 1 , - 1 , - 1 } ;
DNNReturnType layer_add_res ;
int layer_add_res ;
DNNModel * model = NULL ;
NativeModel * native_model ;
model = ff_dnn_load_model_native ( model_filename , DFT_PROCESS_FRAME , NULL , NULL ) ;
if ( ! model ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to load native model \n " ) ;
return DNN_ERROR ;
return AVERROR ( EINVAL ) ;
}
native_model = model - > model ;
@ -775,7 +777,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
TF_DeleteGraph ( tf_model - > graph ) ; \
TF_DeleteStatus ( tf_model - > status ) ; \
av_log ( ctx , AV_LOG_ERROR , " Failed to set value or add operator to layer \n " ) ; \
return DNN_ERROR ; \
return DNN_GENERIC_ ERROR ; \
}
op_desc = TF_NewOperation ( tf_model - > graph , " Placeholder " , " x " ) ;
@ -942,19 +944,21 @@ err:
return NULL ;
}
static DNNReturnType fill_model_input_tf ( TFModel * tf_model , TFRequestItem * request ) {
static int fill_model_input_tf ( TFModel * tf_model , TFRequestItem * request ) {
DNNData input ;
LastLevelTaskItem * lltask ;
TaskItem * task ;
TFInferRequest * infer_request ;
TFContext * ctx = & tf_model - > ctx ;
int ret = 0 ;
lltask = ff_queue_pop_front ( tf_model - > lltask_queue ) ;
av_assert0 ( lltask ) ;
task = lltask - > task ;
request - > lltask = lltask ;
if ( get_input_tf ( tf_model , & input , task - > input_name ) ! = DNN_SUCCESS ) {
ret = get_input_tf ( tf_model , & input , task - > input_name ) ;
if ( ret ! = DNN_SUCCESS ) {
goto err ;
}
@ -965,12 +969,14 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
infer_request - > tf_input = av_malloc ( sizeof ( TF_Output ) ) ;
if ( ! infer_request - > tf_input ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to allocate memory for input tensor \n " ) ;
ret = AVERROR ( ENOMEM ) ;
goto err ;
}
infer_request - > tf_input - > oper = TF_GraphOperationByName ( tf_model - > graph , task - > input_name ) ;
if ( ! infer_request - > tf_input - > oper ) {
av_log ( ctx , AV_LOG_ERROR , " Could not find \" %s \" in model \n " , task - > input_name ) ;
ret = DNN_GENERIC_ERROR ;
goto err ;
}
infer_request - > tf_input - > index = 0 ;
@ -978,6 +984,7 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
infer_request - > input_tensor = allocate_input_tensor ( & input ) ;
if ( ! infer_request - > input_tensor ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to allocate memory for input tensor \n " ) ;
ret = AVERROR ( ENOMEM ) ;
goto err ;
}
input . data = ( float * ) TF_TensorData ( infer_request - > input_tensor ) ;
@ -1003,12 +1010,14 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
infer_request - > tf_outputs = av_malloc_array ( task - > nb_output , sizeof ( TF_Output ) ) ;
if ( infer_request - > tf_outputs = = NULL ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to allocate memory for *tf_outputs \n " ) ;
ret = AVERROR ( ENOMEM ) ;
goto err ;
}
infer_request - > output_tensors = av_calloc ( task - > nb_output , sizeof ( * infer_request - > output_tensors ) ) ;
if ( ! infer_request - > output_tensors ) {
av_log ( ctx , AV_LOG_ERROR , " Failed to allocate memory for output tensor \n " ) ;
ret = AVERROR ( ENOMEM ) ;
goto err ;
}
@ -1017,6 +1026,7 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
infer_request - > tf_outputs [ i ] . oper = TF_GraphOperationByName ( tf_model - > graph , task - > output_names [ i ] ) ;
if ( ! infer_request - > tf_outputs [ i ] . oper ) {
av_log ( ctx , AV_LOG_ERROR , " Could not find output \" %s \" in model \n " , task - > output_names [ i ] ) ;
ret = DNN_GENERIC_ERROR ;
goto err ;
}
infer_request - > tf_outputs [ i ] . index = 0 ;
@ -1025,7 +1035,7 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
return DNN_SUCCESS ;
err :
tf_free_request ( infer_request ) ;
return DNN_ERROR ;
return ret ;
}
static void infer_completion_callback ( void * args ) {
@ -1086,12 +1096,13 @@ err:
}
}
static DNNReturnType execute_model_tf ( TFRequestItem * request , Queue * lltask_queue )
static int execute_model_tf ( TFRequestItem * request , Queue * lltask_queue )
{
TFModel * tf_model ;
TFContext * ctx ;
LastLevelTaskItem * lltask ;
TaskItem * task ;
int ret = 0 ;
if ( ff_queue_size ( lltask_queue ) = = 0 ) {
destroy_request_item ( & request ) ;
@ -1103,7 +1114,8 @@ static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *lltask_queu
tf_model = task - > model ;
ctx = & tf_model - > ctx ;
if ( fill_model_input_tf ( tf_model , request ) ! = DNN_SUCCESS ) {
ret = fill_model_input_tf ( tf_model , request ) ;
if ( ret ! = DNN_SUCCESS ) {
goto err ;
}
@ -1112,58 +1124,64 @@ static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *lltask_queu
goto err ;
}
return DNN_SUCCESS ;
} else {
if ( tf_start_inference ( request ) ! = DNN_SUCCESS ) {
}
else {
ret = tf_start_inference ( request ) ;
if ( ret ! = DNN_SUCCESS ) {
goto err ;
}
infer_completion_callback ( request ) ;
return ( task - > inference_done = = task - > inference_todo ) ? DNN_SUCCESS : DNN_ERROR ;
return ( task - > inference_done = = task - > inference_todo ) ? DNN_SUCCESS : DNN_GENERIC_ ERROR ;
}
err :
tf_free_request ( request - > infer_request ) ;
if ( ff_safe_queue_push_back ( tf_model - > request_queue , request ) < 0 ) {
destroy_request_item ( & request ) ;
}
return DNN_ERROR ;
return ret ;
}
DNNReturnType ff_dnn_execute_model_tf ( const DNNModel * model , DNNExecBaseParams * exec_params )
int ff_dnn_execute_model_tf ( const DNNModel * model , DNNExecBaseParams * exec_params )
{
TFModel * tf_model = model - > model ;
TFContext * ctx = & tf_model - > ctx ;
TaskItem * task ;
TFRequestItem * request ;
int ret = 0 ;
if ( ff_check_exec_params ( ctx , DNN_TF , model - > func_type , exec_params ) ! = 0 ) {
return DNN_ERROR ;
ret = ff_check_exec_params ( ctx , DNN_TF , model - > func_type , exec_params ) ;
if ( ret ! = 0 ) {
return ret ;
}
task = av_malloc ( sizeof ( * task ) ) ;
if ( ! task ) {
av_log ( ctx , AV_LOG_ERROR , " unable to alloc memory for task item. \n " ) ;
return DNN_ERROR ;
return AVERROR ( ENOMEM ) ;
}
if ( ff_dnn_fill_task ( task , exec_params , tf_model , ctx - > options . async , 1 ) ! = DNN_SUCCESS ) {
ret = ff_dnn_fill_task ( task , exec_params , tf_model , ctx - > options . async , 1 ) ;
if ( ret ! = DNN_SUCCESS ) {
av_freep ( & task ) ;
return DNN_ERROR ;
return ret ;
}
if ( ff_queue_push_back ( tf_model - > task_queue , task ) < 0 ) {
av_freep ( & task ) ;
av_log ( ctx , AV_LOG_ERROR , " unable to push back task_queue. \n " ) ;
return DNN_ERROR ;
return AVERROR ( ENOMEM ) ;
}
if ( extract_lltask_from_task ( task , tf_model - > lltask_queue ) ! = DNN_SUCCESS ) {
ret = extract_lltask_from_task ( task , tf_model - > lltask_queue ) ;
if ( ret ! = DNN_SUCCESS ) {
av_log ( ctx , AV_LOG_ERROR , " unable to extract last level task from task. \n " ) ;
return DNN_ERROR ;
return ret ;
}
request = ff_safe_queue_pop_front ( tf_model - > request_queue ) ;
if ( ! request ) {
av_log ( ctx , AV_LOG_ERROR , " unable to get infer request. \n " ) ;
return DNN_ERROR ;
return AVERROR ( EINVAL ) ;
}
return execute_model_tf ( request , tf_model - > lltask_queue ) ;
}
@ -1174,12 +1192,12 @@ DNNAsyncStatusType ff_dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVF
return ff_dnn_get_result_common ( tf_model - > task_queue , in , out ) ;
}
DNNReturnType ff_dnn_flush_tf ( const DNNModel * model )
int ff_dnn_flush_tf ( const DNNModel * model )
{
TFModel * tf_model = model - > model ;
TFContext * ctx = & tf_model - > ctx ;
TFRequestItem * request ;
DNNReturnType ret ;
int ret ;
if ( ff_queue_size ( tf_model - > lltask_queue ) = = 0 ) {
// no pending task need to flush
@ -1189,7 +1207,7 @@ DNNReturnType ff_dnn_flush_tf(const DNNModel *model)
request = ff_safe_queue_pop_front ( tf_model - > request_queue ) ;
if ( ! request ) {
av_log ( ctx , AV_LOG_ERROR , " unable to get infer request. \n " ) ;
return DNN_ERROR ;
return AVERROR ( EINVAL ) ;
}
ret = fill_model_input_tf ( tf_model , request ) ;