diff --git a/examples/YOLOv8-ONNXRuntime-CPP/CMakeLists.txt b/examples/YOLOv8-ONNXRuntime-CPP/CMakeLists.txt
index 494a6f1c..86232ccb 100644
--- a/examples/YOLOv8-ONNXRuntime-CPP/CMakeLists.txt
+++ b/examples/YOLOv8-ONNXRuntime-CPP/CMakeLists.txt
@@ -11,20 +11,22 @@ set(CMAKE_CXX_EXTENSIONS ON)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
-# OpenCV
+# -------------- OpenCV ------------------#
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
# -------------- Compile CUDA for FP16 inference if needed ------------------#
option(USE_CUDA "Enable CUDA support" ON)
-if (USE_CUDA)
+if (NOT APPLE AND USE_CUDA)
find_package(CUDA REQUIRED)
include_directories(${CUDA_INCLUDE_DIRS})
add_definitions(-DUSE_CUDA)
+else ()
+ set(USE_CUDA OFF)
endif ()
-# ONNXRUNTIME
+# -------------- ONNXRUNTIME ------------------#
# Set ONNXRUNTIME_VERSION
set(ONNXRUNTIME_VERSION 1.15.1)
@@ -84,3 +86,11 @@ endif ()
# Download https://raw.githubusercontent.com/ultralytics/ultralytics/main/ultralytics/cfg/datasets/coco.yaml
# and put it in the same folder of the executable file
configure_file(coco.yaml ${CMAKE_CURRENT_BINARY_DIR}/coco.yaml COPYONLY)
+
+# Copy yolov8n.onnx file to the same folder of the executable file
+configure_file(yolov8n.onnx ${CMAKE_CURRENT_BINARY_DIR}/yolov8n.onnx COPYONLY)
+
+# Create folder name images in the same folder of the executable file
+add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/images
+)
diff --git a/examples/YOLOv8-ONNXRuntime-CPP/README.md b/examples/YOLOv8-ONNXRuntime-CPP/README.md
index 91fb3bc5..f70127ff 100644
--- a/examples/YOLOv8-ONNXRuntime-CPP/README.md
+++ b/examples/YOLOv8-ONNXRuntime-CPP/README.md
@@ -1,14 +1,19 @@
-# YOLOv8 OnnxRuntime C++
+
YOLOv8 OnnxRuntime C++
+
+
+
+
+
This example demonstrates how to perform inference using YOLOv8 in C++ with ONNX Runtime and OpenCV's API.
-## Benefits
+## Benefits ✨
- Friendly for deployment in the industrial sector.
- Faster than OpenCV's DNN inference on both CPU and GPU.
- Supports FP32 and FP16 CUDA acceleration.
-## Exporting YOLOv8 Models
+## Exporting YOLOv8 Models 📦
To export YOLOv8 models, use the following Python script:
@@ -28,25 +33,50 @@ Alternatively, you can use the following command for exporting the model in the
yolo export model=yolov8n.pt opset=12 simplify=True dynamic=False format=onnx imgsz=640,640
```
-## Download COCO.yaml file
+## Download COCO.yaml file 📂
In order to run example, you also need to download coco.yaml. You can download the file manually from [here](https://raw.githubusercontent.com/ultralytics/ultralytics/main/ultralytics/cfg/datasets/coco.yaml)
-## Dependencies
+## Dependencies ⚙️
-| Dependency | Version |
-| -------------------------------- | ------------- |
-| Onnxruntime(linux,windows,macos) | >=1.14.1 |
-| OpenCV | >=4.0.0 |
-| C++ | >=17 |
-| Cmake | >=3.5 |
-| Cuda (Optional) | >=11.4,\<12.0 |
-| cuDNN (Cuda required) | =8 |
+| Dependency | Version |
+| -------------------------------- | -------------- |
+| Onnxruntime(linux,windows,macos) | >=1.14.1 |
+| OpenCV | >=4.0.0 |
+| C++ Standard | >=17 |
+| Cmake | >=3.5 |
+| Cuda (Optional) | >=11.4 \<12.0 |
+| cuDNN (Cuda required) | =8 |
Note: The dependency on C++17 is due to the usage of the C++17 filesystem feature.
+
Note (2): Due to ONNX Runtime, we need to use CUDA 11 and cuDNN 8. Keep in mind that this requirement might change in the future.
-## Usage
+## Build 🛠️
+
+1. Clone the repository to your local machine.
+1. Navigate to the root directory of the repository.
+1. Create a build directory and navigate to it:
+
+```console
+mkdir build && cd build
+```
+
+4. Run CMake to generate the build files:
+
+```console
+cmake ..
+```
+
+5. Build the project:
+
+```console
+make
+```
+
+6. The built executable should now be located in the `build` directory.
+
+## Usage 🚀
```c++
// CPU inference
diff --git a/examples/YOLOv8-ONNXRuntime-CPP/main.cpp b/examples/YOLOv8-ONNXRuntime-CPP/main.cpp
index 2619ba57..00abec8a 100644
--- a/examples/YOLOv8-ONNXRuntime-CPP/main.cpp
+++ b/examples/YOLOv8-ONNXRuntime-CPP/main.cpp
@@ -1,4 +1,5 @@
#include
+#include
#include "inference.h"
#include
#include
@@ -18,16 +19,31 @@ void file_iterator(DCSP_CORE *&p) {
cv::Scalar color(rng.uniform(0, 256), rng.uniform(0, 256), rng.uniform(0, 256));
cv::rectangle(img, re.box, color, 3);
- std::string label = p->classes[re.classId] + " " + std::to_string(re.confidence);
+
+ float confidence = floor(100 * re.confidence) / 100;
+ std::cout << std::fixed << std::setprecision(2);
+ std::string label = p->classes[re.classId] + " " +
+ std::to_string(confidence).substr(0, std::to_string(confidence).size() - 4);
+
+ cv::rectangle(
+ img,
+ cv::Point(re.box.x, re.box.y - 25),
+ cv::Point(re.box.x + label.length() * 15, re.box.y),
+ color,
+ cv::FILLED
+ );
+
cv::putText(
img,
label,
cv::Point(re.box.x, re.box.y - 5),
cv::FONT_HERSHEY_SIMPLEX,
0.75,
- color,
+ cv::Scalar(0, 0, 0),
2
);
+
+
}
std::cout << "Press any key to exit" << std::endl;
cv::imshow("Result of Detection", img);