@ -25,6 +25,7 @@
# define AVFILTER_DNN_DNN_BACKEND_COMMON_H
# include "../dnn_interface.h"
# include "libavutil/thread.h"
# define DNN_BACKEND_COMMON_OPTIONS \
{ " nireq " , " number of request " , OFFSET ( options . nireq ) , AV_OPT_TYPE_INT , { . i64 = 0 } , 0 , INT_MAX , FLAGS } ,
@ -49,6 +50,34 @@ typedef struct InferenceItem {
uint32_t bbox_index ;
} InferenceItem ;
/**
* Common Async Execution Mechanism for the DNN Backends .
*/
typedef struct DNNAsyncExecModule {
/**
* Synchronous inference function for the backend
* with corresponding request item as the argument .
*/
DNNReturnType ( * start_inference ) ( void * request ) ;
/**
* Completion Callback for the backend .
* Expected argument type of callback must match that
* of the inference function .
*/
void ( * callback ) ( void * args ) ;
/**
* Argument for the execution functions .
* i . e . Request item for the backend .
*/
void * args ;
# if HAVE_PTHREAD_CANCEL
pthread_t thread_id ;
pthread_attr_t thread_attr ;
# endif
} DNNAsyncExecModule ;
int ff_check_exec_params ( void * ctx , DNNBackendType backend , DNNFunctionType func_type , DNNExecBaseParams * exec_params ) ;
/**
@ -66,4 +95,31 @@ int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func
*/
DNNReturnType ff_dnn_fill_task ( TaskItem * task , DNNExecBaseParams * exec_params , void * backend_model , int async , int do_ioproc ) ;
/**
* Join the Async Execution thread and set module pointers to NULL .
*
* @ param async_module pointer to DNNAsyncExecModule module
*
* @ retval DNN_SUCCESS if successful
* @ retval DNN_ERROR if async_module is NULL
*/
DNNReturnType ff_dnn_async_module_cleanup ( DNNAsyncExecModule * async_module ) ;
/**
* Start asynchronous inference routine for the TensorFlow
* model on a detached thread . It calls the completion callback
* after the inference completes . Completion callback and inference
* function must be set before calling this function .
*
* If POSIX threads aren ' t supported , the execution rolls back
* to synchronous mode , calling completion callback after inference .
*
* @ param ctx pointer to the backend context
* @ param async_module pointer to DNNAsyncExecModule module
*
* @ retval DNN_SUCCESS on the start of async inference .
* @ retval DNN_ERROR in case async inference cannot be started
*/
DNNReturnType ff_dnn_start_inference_async ( void * ctx , DNNAsyncExecModule * async_module ) ;
# endif