diff --git a/cmake/FindONNX.cmake b/cmake/FindONNX.cmake index b2c79a9031..956eeafedb 100644 --- a/cmake/FindONNX.cmake +++ b/cmake/FindONNX.cmake @@ -32,6 +32,14 @@ if(ORT_LIB AND ORT_INCLUDE) HAVE_ONNX_DML ) + # Check CoreML Execution Provider availability + get_filename_component(coreml_dir ${ONNXRT_ROOT_DIR}/include/onnxruntime/core/providers/coreml ABSOLUTE) + detect_onxxrt_ep( + coreml_provider_factory.h + ${coreml_dir} + HAVE_ONNX_COREML + ) + set(HAVE_ONNX TRUE) # For CMake output only set(ONNX_LIBRARIES "${ORT_LIB}" CACHE STRING "ONNX Runtime libraries") diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt index b3dce577dc..85831500c3 100644 --- a/modules/gapi/CMakeLists.txt +++ b/modules/gapi/CMakeLists.txt @@ -165,6 +165,7 @@ set(gapi_srcs # ONNX backend src/backends/onnx/gonnxbackend.cpp src/backends/onnx/dml_ep.cpp + src/backends/onnx/coreml_ep.cpp # Render backend src/backends/render/grenderocv.cpp diff --git a/modules/gapi/include/opencv2/gapi/infer/bindings_onnx.hpp b/modules/gapi/include/opencv2/gapi/infer/bindings_onnx.hpp index c418c0d496..fb2376ece8 100644 --- a/modules/gapi/include/opencv2/gapi/infer/bindings_onnx.hpp +++ b/modules/gapi/include/opencv2/gapi/infer/bindings_onnx.hpp @@ -39,6 +39,9 @@ public: GAPI_WRAP PyParams& cfgAddExecutionProvider(ep::DirectML ep); + GAPI_WRAP + PyParams& cfgAddExecutionProvider(ep::CoreML ep); + GAPI_WRAP PyParams& cfgAddExecutionProvider(ep::CUDA ep); diff --git a/modules/gapi/include/opencv2/gapi/infer/onnx.hpp b/modules/gapi/include/opencv2/gapi/infer/onnx.hpp index ae160ac3e5..ec5950718e 100644 --- a/modules/gapi/include/opencv2/gapi/infer/onnx.hpp +++ b/modules/gapi/include/opencv2/gapi/infer/onnx.hpp @@ -32,6 +32,65 @@ namespace onnx { */ namespace ep { +/** + * @brief This structure provides functions + * that fill inference options for ONNX CoreML Execution Provider. + * Please follow https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml-execution-provider + */ +struct GAPI_EXPORTS_W_SIMPLE CoreML { + /** @brief Class constructor. + + Constructs CoreML parameters. + + */ + GAPI_WRAP + CoreML() = default; + + /** @brief Limit CoreML Execution Provider to run on CPU only. + + This function is used to limit CoreML to run on CPU only. + Please follow: https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml_flag_use_cpu_only + + @return reference to this parameter structure. + */ + GAPI_WRAP + CoreML& cfgUseCPUOnly() { + use_cpu_only = true; + return *this; + } + + /** @brief Enable CoreML EP to run on a subgraph in the body of a control flow ONNX operator (i.e. a Loop, Scan or If operator). + + This function is used to enable CoreML EP to run on + a subgraph of a control flow of ONNX operation. + Please follow: https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml_flag_enable_on_subgraph + + @return reference to this parameter structure. + */ + GAPI_WRAP + CoreML& cfgEnableOnSubgraph() { + enable_on_subgraph = true; + return *this; + } + + /** @brief Enable CoreML EP to run only on Apple Neural Engine. + + This function is used to enable CoreML EP to run only on Apple Neural Engine. + Please follow: https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml_flag_only_enable_device_with_ane + + @return reference to this parameter structure. + */ + GAPI_WRAP + CoreML& cfgEnableOnlyNeuralEngine() { + enable_only_ane = true; + return *this; + } + + bool use_cpu_only = false; + bool enable_on_subgraph = false; + bool enable_only_ane = false; +}; + /** * @brief This structure provides functions * that fill inference options for CUDA Execution Provider. @@ -205,6 +264,7 @@ public: using EP = cv::util::variant< cv::util::monostate , OpenVINO , DirectML + , CoreML , CUDA , TensorRT>; @@ -496,6 +556,20 @@ public: /** @brief Adds execution provider for runtime. + The function is used to add ONNX Runtime CoreML Execution Provider options. + + @param ep CoreML Execution Provider options. + @see cv::gapi::onnx::ep::CoreML. + + @return the reference on modified object. + */ + Params& cfgAddExecutionProvider(ep::CoreML&& ep) { + desc.execution_providers.emplace_back(std::move(ep)); + return *this; + } + + /** @brief Adds execution provider for runtime. + The function is used to add ONNX Runtime CUDA Execution Provider options. @param ep CUDA Execution Provider options. @@ -582,6 +656,11 @@ public: desc.execution_providers.emplace_back(std::move(ep)); } + /** @see onnx::Params::cfgAddExecutionProvider. */ + void cfgAddExecutionProvider(ep::CoreML&& ep) { + desc.execution_providers.emplace_back(std::move(ep)); + } + /** @see onnx::Params::cfgAddExecutionProvider. */ void cfgAddExecutionProvider(ep::CUDA&& ep) { desc.execution_providers.emplace_back(std::move(ep)); diff --git a/modules/gapi/misc/python/pyopencv_gapi.hpp b/modules/gapi/misc/python/pyopencv_gapi.hpp index a13b8e545d..91faa4b2fc 100644 --- a/modules/gapi/misc/python/pyopencv_gapi.hpp +++ b/modules/gapi/misc/python/pyopencv_gapi.hpp @@ -31,6 +31,7 @@ using map_string_and_vector_float = std::map>; using map_int_and_double = std::map; using ep_OpenVINO = cv::gapi::onnx::ep::OpenVINO; using ep_DirectML = cv::gapi::onnx::ep::DirectML; +using ep_CoreML = cv::gapi::onnx::ep::CoreML; using ep_CUDA = cv::gapi::onnx::ep::CUDA; using ep_TensorRT = cv::gapi::onnx::ep::TensorRT; diff --git a/modules/gapi/src/backends/onnx/bindings_onnx.cpp b/modules/gapi/src/backends/onnx/bindings_onnx.cpp index b41ec7b1b1..0703f1753d 100644 --- a/modules/gapi/src/backends/onnx/bindings_onnx.cpp +++ b/modules/gapi/src/backends/onnx/bindings_onnx.cpp @@ -33,6 +33,12 @@ cv::gapi::onnx::PyParams::cfgAddExecutionProvider(cv::gapi::onnx::ep::DirectML e return *this; } +cv::gapi::onnx::PyParams& +cv::gapi::onnx::PyParams::cfgAddExecutionProvider(cv::gapi::onnx::ep::CoreML ep) { + m_priv->cfgAddExecutionProvider(std::move(ep)); + return *this; +} + cv::gapi::onnx::PyParams& cv::gapi::onnx::PyParams::cfgAddExecutionProvider(cv::gapi::onnx::ep::CUDA ep) { m_priv->cfgAddExecutionProvider(std::move(ep)); diff --git a/modules/gapi/src/backends/onnx/coreml_ep.cpp b/modules/gapi/src/backends/onnx/coreml_ep.cpp new file mode 100644 index 0000000000..3c9507863d --- /dev/null +++ b/modules/gapi/src/backends/onnx/coreml_ep.cpp @@ -0,0 +1,50 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2023 Intel Corporation + +#include "backends/onnx/coreml_ep.hpp" +#include "logger.hpp" + +#ifdef HAVE_ONNX +#include + +#ifdef HAVE_ONNX_COREML +#include "../providers/coreml/coreml_provider_factory.h" + +void cv::gimpl::onnx::addCoreMLExecutionProvider(Ort::SessionOptions *session_options, + const cv::gapi::onnx::ep::CoreML &coreml_ep) { + uint32_t flags = 0u; + if (coreml_ep.use_cpu_only) { + flags |= COREML_FLAG_USE_CPU_ONLY; + } + + if (coreml_ep.enable_on_subgraph) { + flags |= COREML_FLAG_ENABLE_ON_SUBGRAPH; + } + + if (coreml_ep.enable_only_ane) { + flags |= COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE; + } + + try { + OrtSessionOptionsAppendExecutionProvider_CoreML(*session_options, flags); + } catch (const std::exception &e) { + std::stringstream ss; + ss << "ONNX Backend: Failed to enable CoreML" + << " Execution Provider: " << e.what(); + cv::util::throw_error(std::runtime_error(ss.str())); + } +} + +#else // HAVE_ONNX_COREML + +void cv::gimpl::onnx::addCoreMLExecutionProvider(Ort::SessionOptions*, + const cv::gapi::onnx::ep::CoreML&) { + util::throw_error(std::runtime_error("G-API has been compiled with ONNXRT" + " without CoreML support")); +} + +#endif // HAVE_ONNX_COREML +#endif // HAVE_ONNX diff --git a/modules/gapi/src/backends/onnx/coreml_ep.hpp b/modules/gapi/src/backends/onnx/coreml_ep.hpp new file mode 100644 index 0000000000..ddc2baeae9 --- /dev/null +++ b/modules/gapi/src/backends/onnx/coreml_ep.hpp @@ -0,0 +1,23 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2023 Intel Corporation + +#ifndef OPENCV_GAPI_COREML_EP_HPP +#define OPENCV_GAPI_COREML_EP_HPP + +#include "opencv2/gapi/infer/onnx.hpp" +#ifdef HAVE_ONNX + +#include + +namespace cv { +namespace gimpl { +namespace onnx { +void addCoreMLExecutionProvider(Ort::SessionOptions *session_options, + const cv::gapi::onnx::ep::CoreML &coreml_ep); +}}} + +#endif // HAVE_ONNX +#endif // OPENCV_GAPI_COREML_EP_HPP diff --git a/modules/gapi/src/backends/onnx/gonnxbackend.cpp b/modules/gapi/src/backends/onnx/gonnxbackend.cpp index c552b8b0e6..e2ddd3ea59 100644 --- a/modules/gapi/src/backends/onnx/gonnxbackend.cpp +++ b/modules/gapi/src/backends/onnx/gonnxbackend.cpp @@ -10,6 +10,7 @@ #ifdef HAVE_ONNX #include "backends/onnx/dml_ep.hpp" +#include "backends/onnx/coreml_ep.hpp" #include // any_of #include @@ -211,6 +212,12 @@ static void addExecutionProvider(Ort::SessionOptions *session_options, addDMLExecutionProvider(session_options, dml_ep); break; } + case ep::EP::index_of(): { + GAPI_LOG_INFO(NULL, "CoreML Execution Provider is added."); + const auto &coreml_ep = cv::util::get(execution_provider); + addCoreMLExecutionProvider(session_options, coreml_ep); + break; + } case ep::EP::index_of(): { GAPI_LOG_INFO(NULL, "CUDA Execution Provider is added."); const auto &cuda_ep = cv::util::get(execution_provider);