lavfi/dnn_backend_tf: Separate function for Completion Callback

This commit rearranges the existing code to create a separate function
for the completion callback in execute_model_tf.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
pull/359/head
Shubhanshu Saxena 4 years ago committed by Guo Yejun
parent b849228ae0
commit 84e4e60fdc
  1. 91
      libavfilter/dnn/dnn_backend_tf.c

@ -915,45 +915,19 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
return DNN_SUCCESS; return DNN_SUCCESS;
} }
static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *inference_queue) static void infer_completion_callback(void *args) {
{ TFRequestItem *request = args;
TFModel *tf_model; InferenceItem *inference = request->inference;
TFContext *ctx; TaskItem *task = inference->task;
TFInferRequest *infer_request;
InferenceItem *inference;
TaskItem *task;
DNNData *outputs; DNNData *outputs;
TFInferRequest *infer_request = request->infer_request;
inference = ff_queue_peek_front(inference_queue); TFModel *tf_model = task->model;
task = inference->task; TFContext *ctx = &tf_model->ctx;
tf_model = task->model;
ctx = &tf_model->ctx;
if (task->async) {
avpriv_report_missing_feature(ctx, "Async execution not supported");
return DNN_ERROR;
} else {
if (fill_model_input_tf(tf_model, request) != DNN_SUCCESS) {
return DNN_ERROR;
}
infer_request = request->infer_request;
TF_SessionRun(tf_model->session, NULL,
infer_request->tf_input, &infer_request->input_tensor, 1,
infer_request->tf_outputs, infer_request->output_tensors,
task->nb_output, NULL, 0, NULL,
tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK) {
tf_free_request(infer_request);
av_log(ctx, AV_LOG_ERROR, "Failed to run session when executing model\n");
return DNN_ERROR;
}
outputs = av_malloc_array(task->nb_output, sizeof(*outputs)); outputs = av_malloc_array(task->nb_output, sizeof(*outputs));
if (!outputs) { if (!outputs) {
tf_free_request(infer_request);
av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n"); av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n");
return DNN_ERROR; goto err;
} }
for (uint32_t i = 0; i < task->nb_output; ++i) { for (uint32_t i = 0; i < task->nb_output; ++i) {
@ -980,20 +954,59 @@ static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *inference_q
case DFT_ANALYTICS_DETECT: case DFT_ANALYTICS_DETECT:
if (!tf_model->model->detect_post_proc) { if (!tf_model->model->detect_post_proc) {
av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n"); av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
return DNN_ERROR; return;
} }
tf_model->model->detect_post_proc(task->out_frame, outputs, task->nb_output, tf_model->model->filter_ctx); tf_model->model->detect_post_proc(task->out_frame, outputs, task->nb_output, tf_model->model->filter_ctx);
break; break;
default: default:
tf_free_request(infer_request);
av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n"); av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
return DNN_ERROR; goto err;
} }
task->inference_done++; task->inference_done++;
err:
tf_free_request(infer_request); tf_free_request(infer_request);
av_freep(&outputs); av_freep(&outputs);
ff_safe_queue_push_back(tf_model->request_queue, request);
if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
av_freep(&request->infer_request);
av_freep(&request);
av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
}
}
static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *inference_queue)
{
TFModel *tf_model;
TFContext *ctx;
TFInferRequest *infer_request;
InferenceItem *inference;
TaskItem *task;
inference = ff_queue_peek_front(inference_queue);
task = inference->task;
tf_model = task->model;
ctx = &tf_model->ctx;
if (task->async) {
avpriv_report_missing_feature(ctx, "Async execution not supported");
return DNN_ERROR;
} else {
if (fill_model_input_tf(tf_model, request) != DNN_SUCCESS) {
return DNN_ERROR;
}
infer_request = request->infer_request;
TF_SessionRun(tf_model->session, NULL,
infer_request->tf_input, &infer_request->input_tensor, 1,
infer_request->tf_outputs, infer_request->output_tensors,
task->nb_output, NULL, 0, NULL,
tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK) {
tf_free_request(infer_request);
av_log(ctx, AV_LOG_ERROR, "Failed to run session when executing model\n");
return DNN_ERROR;
}
infer_completion_callback(request);
return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_ERROR; return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_ERROR;
} }
} }

Loading…
Cancel
Save