lavfi/dnn_backend_openvino.c: Fix Memory Leak in execute_model_ov

In cases where the execution inside the function execute_model_ov fails,
push the RequestItem back to the request_queue before returning the error.
In case pushing back fails, release the allocated memory.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
pull/359/head
Shubhanshu Saxena 3 years ago committed by Guo Yejun
parent 301d275301
commit f73943d514
  1. 12
      libavfilter/dnn/dnn_backend_openvino.c

@ -448,12 +448,12 @@ static DNNReturnType execute_model_ov(RequestItem *request, Queue *inferenceq)
status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
return DNN_ERROR;
goto err;
}
status = ie_infer_request_infer_async(request->infer_request);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
return DNN_ERROR;
goto err;
}
return DNN_SUCCESS;
} else {
@ -464,11 +464,17 @@ static DNNReturnType execute_model_ov(RequestItem *request, Queue *inferenceq)
status = ie_infer_request_infer(request->infer_request);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
return DNN_ERROR;
goto err;
}
infer_completion_callback(request);
return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_ERROR;
}
err:
if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
ie_infer_request_free(&request->infer_request);
av_freep(&request);
}
return DNN_ERROR;
}
static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)

Loading…
Cancel
Save