diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 216250fba9..4dd8aa38b0 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -1,6 +1,7 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee # Ignores the following status codes to reduce false positives: +# - 401(Vimeo, 'unauthorized') # - 403(OpenVINO, 'forbidden') # - 429(Instagram, 'too many requests') # - 500(Zenodo, 'cached') @@ -38,7 +39,7 @@ jobs: --scheme https \ --timeout 60 \ --insecure \ - --accept 403,429,500,502,999 \ + --accept 401,403,429,500,502,999 \ --exclude-all-private \ --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \ --exclude-path docs/zh \ @@ -68,7 +69,7 @@ jobs: --scheme https \ --timeout 60 \ --insecure \ - --accept 429,999 \ + --accept 401,403,429,500,502,999 \ --exclude-all-private \ --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \ --exclude-path '**/ci.yaml' \ diff --git a/docs/en/guides/nvidia-jetson.md b/docs/en/guides/nvidia-jetson.md index d715b086e9..a361121ab9 100644 --- a/docs/en/guides/nvidia-jetson.md +++ b/docs/en/guides/nvidia-jetson.md @@ -23,7 +23,7 @@ This comprehensive guide provides a detailed walkthrough for deploying Ultralyti !!! Note - This guide has been tested with both [Seeed Studio reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) which is based on NVIDIA Jetson Orin NX 16GB running the latest stable JetPack release of [JP5.1.3](https://developer.nvidia.com/embedded/jetpack-sdk-513) and [Seeed Studio reComputer J1020 v2](https://www.seeedstudio.com/reComputer-J1020-v2-p-5498.html) which is based on NVIDIA Jetson Nano 4GB running JetPack release of [JP4.6.1](https://developer.nvidia.com/embedded/jetpack-sdk-461). It is expected to work across all the NVIDIA Jetson hardware lineup including latest and legacy. + This guide has been tested with both [Seeed Studio reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) which is based on NVIDIA Jetson Orin NX 16GB running the latest stable JetPack release of [JP6.0](https://developer.nvidia.com/embedded/jetpack-sdk-60), JetPack release of [JP5.1.3](https://developer.nvidia.com/embedded/jetpack-sdk-513) and [Seeed Studio reComputer J1020 v2](https://www.seeedstudio.com/reComputer-J1020-v2-p-5498.html) which is based on NVIDIA Jetson Nano 4GB running JetPack release of [JP4.6.1](https://developer.nvidia.com/embedded/jetpack-sdk-461). It is expected to work across all the NVIDIA Jetson hardware lineup including latest and legacy. ## What is NVIDIA Jetson? @@ -61,32 +61,111 @@ The first step after getting your hands on an NVIDIA Jetson device is to flash N For methods 3 and 4 above, after flashing the system and booting the device, please enter "sudo apt update && sudo apt install nvidia-jetpack -y" on the device terminal to install all the remaining JetPack components needed. -## Run on JetPack 5.x +## JetPack Support Based on Jetson Device -If you own a Jetson Xavier NX, AGX Xavier, AGX Orin, Orin Nano or Orin NX which supports JetPack 5.x, you can continue to follow this guide. However, if you have a legacy device such as Jetson Nano, please skip to [Run on JetPack 4.x](#run-on-jetpack-4x). +The below table highlights NVIDIA JetPack versions supported by different NVIDIA Jetson devices. -### Set Up Ultralytics +| | JetPack 4 | JetPack 5 | JetPack 6 | +| ----------------- | --------- | --------- | --------- | +| Jetson Nano | ✅ | ❌ | ❌ | +| Jetson TX2 | ✅ | ❌ | ❌ | +| Jetson Xavier NX | ✅ | ✅ | ❌ | +| Jetson AGX Xavier | ✅ | ✅ | ❌ | +| Jetson AGX Orin | ❌ | ✅ | ✅ | +| Jetson Orin NX | ❌ | ✅ | ✅ | +| Jetson Orin Nano | ❌ | ✅ | ✅ | -There are two ways of setting up Ultralytics package on NVIDIA Jetson to build your next Computer Vision project. You can use either of them. +## Quick Start with Docker -- [Start with Docker](#start-with-docker) -- [Start without Docker](#start-without-docker) +The fastest way to get started with Ultralytics YOLOv8 on NVIDIA Jetson is to run with pre-built docker images for Jetson. Refer to the table above and choose the JetPack version according to the Jetson device you own. -#### Start with Docker +=== "JetPack 4" -The fastest way to get started with Ultralytics YOLOv8 on NVIDIA Jetson is to run with pre-built docker image for Jetson. + ```bash + t=ultralytics/ultralytics:latest-jetson-jetpack4 + sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t + ``` + +=== "JetPack 5" + + ```bash + t=ultralytics/ultralytics:latest-jetson-jetpack5 + sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t + ``` + +=== "JetPack 6" + + ```bash + t=ultralytics/ultralytics:latest-jetson-jetpack6 + sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t + ``` + +After this is done, skip to [Use TensorRT on NVIDIA Jetson section](#use-tensorrt-on-nvidia-jetson). + +## Start with Native Installation + +For a native installation without Docker, please refer to the steps below. + +### Run on JetPack 6.x + +#### Install Ultralytics Package + +Here we will install Ultralytics package on the Jetson with optional dependencies so that we can export the PyTorch models to other different formats. We will mainly focus on [NVIDIA TensorRT exports](../integrations/tensorrt.md) because TensorRT will make sure we can get the maximum performance out of the Jetson devices. + +1. Update packages list, install pip and upgrade to latest + + ```bash + sudo apt update + sudo apt install python3-pip -y + pip install -U pip + ``` + +2. Install `ultralytics` pip package with optional dependencies + + ```bash + pip install ultralytics[export] + ``` + +3. Reboot the device + + ```bash + sudo reboot + ``` + +#### Install PyTorch and Torchvision -Execute the below command to pull the Docker container and run on Jetson. This is based on [l4t-pytorch](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch) docker image which contains PyTorch and Torchvision in a Python3 environment. +The above ultralytics installation will install Torch and Torchvision. However, these 2 packages installed via pip are not compatible to run on Jetson platform which is based on ARM64 architecture. Therefore, we need to manually install pre-built PyTorch pip wheel and compile/ install Torchvision from source. + +Install `torch 2.3.0` and `torchvision 0.18` according to JP6.0 ```bash -t=ultralytics/ultralytics:latest-jetson-jetpack5 && sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t +sudo apt-get install libopenmpi-dev libopenblas-base libomp-dev -y +pip install https://github.com/ultralytics/assets/releases/download/v0.0.0/torch-2.3.0-cp310-cp310-linux_aarch64.whl +pip install https://github.com/ultralytics/assets/releases/download/v0.0.0/torchvision-0.18.0a0+6043bc2-cp310-cp310-linux_aarch64.whl ``` -After this is done, skip to [Use TensorRT on NVIDIA Jetson section](#use-tensorrt-on-nvidia-jetson). +Visit the [PyTorch for Jetson page](https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048) to access all different versions of PyTorch for different JetPack versions. For a more detailed list on the PyTorch, Torchvision compatibility, visit the [PyTorch and Torchvision compatibility page](https://github.com/pytorch/vision). -#### Start without Docker +#### Install `onnxruntime-gpu` -##### Install Ultralytics Package +The [onnxruntime-gpu](https://pypi.org/project/onnxruntime-gpu/) package hosted in PyPI does not have `aarch64` binaries for the Jetson. So we need to manually install this package. This package is needed for some of the exports. + +All different `onnxruntime-gpu` packages corresponding to different JetPack and Python versions are listed [here](https://elinux.org/Jetson_Zoo#ONNX_Runtime). However, here we will download and install `onnxruntime-gpu 1.18.0` with `Python3.10` support. + +```bash +wget https://nvidia.box.com/shared/static/48dtuob7meiw6ebgfsfqakc9vse62sg4.whl -O onnxruntime_gpu-1.18.0-cp310-cp310-linux_aarch64.whl +pip install onnxruntime_gpu-1.18.0-cp310-cp310-linux_aarch64.whl +``` + +!!! Note + + `onnxruntime-gpu` will automatically revert back the numpy version to latest. So we need to reinstall numpy to `1.23.5` to fix an issue by executing: + + `pip install numpy==1.23.5` + +### Run on JetPack 5.x + +#### Install Ultralytics Package Here we will install Ultralytics package on the Jetson with optional dependencies so that we can export the PyTorch models to other different formats. We will mainly focus on [NVIDIA TensorRT exports](../integrations/tensorrt.md) because TensorRT will make sure we can get the maximum performance out of the Jetson devices. @@ -110,7 +189,7 @@ Here we will install Ultralytics package on the Jetson with optional dependencie sudo reboot ``` -##### Install PyTorch and Torchvision +#### Install PyTorch and Torchvision The above ultralytics installation will install Torch and Torchvision. However, these 2 packages installed via pip are not compatible to run on Jetson platform which is based on ARM64 architecture. Therefore, we need to manually install pre-built PyTorch pip wheel and compile/ install Torchvision from source. @@ -140,11 +219,11 @@ The above ultralytics installation will install Torch and Torchvision. However, Visit the [PyTorch for Jetson page](https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048) to access all different versions of PyTorch for different JetPack versions. For a more detailed list on the PyTorch, Torchvision compatibility, visit the [PyTorch and Torchvision compatibility page](https://github.com/pytorch/vision). -##### Install `onnxruntime-gpu` +#### Install `onnxruntime-gpu` The [onnxruntime-gpu](https://pypi.org/project/onnxruntime-gpu/) package hosted in PyPI does not have `aarch64` binaries for the Jetson. So we need to manually install this package. This package is needed for some of the exports. -All different `onnxruntime-gpu` packages corresponding to different JetPack and Python versions are listed [here](https://elinux.org/Jetson_Zoo#ONNX_Runtime). However, here we will download and install `onnxruntime-gpu 1.17.0` with `Python3.8` support for the JetPack we are using for this guide. +All different `onnxruntime-gpu` packages corresponding to different JetPack and Python versions are listed [here](https://elinux.org/Jetson_Zoo#ONNX_Runtime). However, here we will download and install `onnxruntime-gpu 1.17.0` with `Python3.8` support. ```bash wget https://nvidia.box.com/shared/static/zostg6agm00fb6t5uisw51qi6kpcuwzd.whl -O onnxruntime_gpu-1.17.0-cp38-cp38-linux_aarch64.whl @@ -157,16 +236,6 @@ pip install onnxruntime_gpu-1.17.0-cp38-cp38-linux_aarch64.whl `pip install numpy==1.23.5` -## Run on JetPack 4.x - -Here we support to run Ultralytics on legacy hardware such as the Jetson Nano. Currently we use Docker to achieve this. - -Execute the below command to pull the Docker container and run on Jetson. This is based on [l4t-cuda](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-cuda) docker image which contains CUDA in a L4T environment. - -```bash -t=ultralytics/ultralytics:latest-jetson-jetpack4 && sudo docker pull $t && sudo docker run -it --ipc=host --runtime=nvidia $t -``` - ## Use TensorRT on NVIDIA Jetson Out of all the model export formats supported by Ultralytics, TensorRT delivers the best inference performance when working with NVIDIA Jetson devices and our recommendation is to use TensorRT with Jetson. We also have a detailed document on TensorRT [here](../integrations/tensorrt.md). @@ -372,7 +441,7 @@ Congratulations on successfully setting up YOLOv8 on your NVIDIA Jetson! For fur ### How do I deploy Ultralytics YOLOv8 on NVIDIA Jetson devices? -Deploying Ultralytics YOLOv8 on NVIDIA Jetson devices is a straightforward process. First, flash your Jetson device with the NVIDIA JetPack SDK. Then, either use a pre-built Docker image for quick setup or manually install the required packages. Detailed steps for each approach can be found in sections [Start with Docker](#start-with-docker) and [Start without Docker](#start-without-docker). +Deploying Ultralytics YOLOv8 on NVIDIA Jetson devices is a straightforward process. First, flash your Jetson device with the NVIDIA JetPack SDK. Then, either use a pre-built Docker image for quick setup or manually install the required packages. Detailed steps for each approach can be found in sections [Quick Start with Docker](#quick-start-with-docker) and [Start with Native Installation](#start-with-native-installation). ### What performance benchmarks can I expect from YOLOv8 models on NVIDIA Jetson devices? diff --git a/docs/en/integrations/openvino.md b/docs/en/integrations/openvino.md index 37f63b4338..32dd031224 100644 --- a/docs/en/integrations/openvino.md +++ b/docs/en/integrations/openvino.md @@ -59,13 +59,14 @@ Export a YOLOv8n model to OpenVINO format and run inference with the exported mo ## Arguments -| Key | Value | Description | -| -------- | ------------ | ---------------------------------------------------- | -| `format` | `'openvino'` | format to export to | -| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | -| `half` | `False` | FP16 quantization | -| `int8` | `False` | INT8 quantization | -| `batch` | `1` | batch size for inference | +| Key | Value | Description | +| --------- | ------------ | ---------------------------------------------------- | +| `format` | `'openvino'` | format to export to | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `half` | `False` | FP16 quantization | +| `int8` | `False` | INT8 quantization | +| `batch` | `1` | batch size for inference | +| `dynamic` | `False` | allows dynamic input sizes | ## Benefits of OpenVINO diff --git a/docs/en/integrations/ray-tune.md b/docs/en/integrations/ray-tune.md index 34f685609a..9b2d25fb06 100644 --- a/docs/en/integrations/ray-tune.md +++ b/docs/en/integrations/ray-tune.md @@ -165,7 +165,7 @@ You can plot the history of reported metrics for each trial to see how the metri ```python import matplotlib.pyplot as plt -for result in result_grid: +for i, result in enumerate(result_grid): plt.plot( result.metrics_dataframe["training_iteration"], result.metrics_dataframe["mean_accuracy"], diff --git a/docs/en/modes/benchmark.md b/docs/en/modes/benchmark.md index 9b4ebd5cf3..3329a5e821 100644 --- a/docs/en/modes/benchmark.md +++ b/docs/en/modes/benchmark.md @@ -92,7 +92,7 @@ Benchmarks will attempt to run automatically on all possible export formats belo | [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | | [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | | [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch`, `dynamic` | | [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | | [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | | [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | diff --git a/docs/en/modes/export.md b/docs/en/modes/export.md index cd4b532050..eff8d49387 100644 --- a/docs/en/modes/export.md +++ b/docs/en/modes/export.md @@ -82,7 +82,7 @@ This table details the configurations and options available for exporting YOLO m | `optimize` | `bool` | `False` | Applies optimization for mobile devices when exporting to TorchScript, potentially reducing model size and improving performance. | | `half` | `bool` | `False` | Enables FP16 (half-precision) quantization, reducing model size and potentially speeding up inference on supported hardware. | | `int8` | `bool` | `False` | Activates INT8 quantization, further compressing the model and speeding up inference with minimal accuracy loss, primarily for edge devices. | -| `dynamic` | `bool` | `False` | Allows dynamic input sizes for ONNX and TensorRT exports, enhancing flexibility in handling varying image dimensions. | +| `dynamic` | `bool` | `False` | Allows dynamic input sizes for ONNX, TensorRT and OpenVINO exports, enhancing flexibility in handling varying image dimensions. | | `simplify` | `bool` | `False` | Simplifies the model graph for ONNX exports with `onnxslim`, potentially improving performance and compatibility. | | `opset` | `int` | `None` | Specifies the ONNX opset version for compatibility with different ONNX parsers and runtimes. If not set, uses the latest supported version. | | `workspace` | `float` | `4.0` | Sets the maximum workspace size in GiB for TensorRT optimizations, balancing memory usage and performance. | @@ -100,7 +100,7 @@ Available YOLOv8 export formats are in the table below. You can export to any fo | [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | | [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | | [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch`, `dynamic` | | [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | | [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | | [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | diff --git a/docs/en/tasks/classify.md b/docs/en/tasks/classify.md index 111e66f2ba..8658652751 100644 --- a/docs/en/tasks/classify.md +++ b/docs/en/tasks/classify.md @@ -170,7 +170,7 @@ Available YOLOv8-cls export formats are in the table below. You can export to an | [PyTorch](https://pytorch.org/) | - | `yolov8n-cls.pt` | ✅ | - | | [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-cls.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | | [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-cls.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-cls_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-cls_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch`, `dynamic` | | [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-cls.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | | [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-cls.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | | [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-cls_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | diff --git a/docs/en/tasks/detect.md b/docs/en/tasks/detect.md index 2acf08d1a0..32d9a1065b 100644 --- a/docs/en/tasks/detect.md +++ b/docs/en/tasks/detect.md @@ -172,7 +172,7 @@ Available YOLOv8 export formats are in the table below. You can export to any fo | [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | | [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | | [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch`, `dynamic` | | [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | | [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | | [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | diff --git a/docs/en/tasks/obb.md b/docs/en/tasks/obb.md index 4c7b972539..4ce2bb35f4 100644 --- a/docs/en/tasks/obb.md +++ b/docs/en/tasks/obb.md @@ -193,7 +193,7 @@ Available YOLOv8-obb export formats are in the table below. You can export to an | [PyTorch](https://pytorch.org/) | - | `yolov8n-obb.pt` | ✅ | - | | [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-obb.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | | [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-obb.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-obb_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-obb_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch`, `dynamic` | | [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-obb.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | | [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-obb.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | | [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-obb_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | diff --git a/docs/en/tasks/pose.md b/docs/en/tasks/pose.md index b78d0c8bf0..ba3e58090a 100644 --- a/docs/en/tasks/pose.md +++ b/docs/en/tasks/pose.md @@ -206,7 +206,7 @@ Available YOLOv8-pose export formats are in the table below. You can export to a | [PyTorch](https://pytorch.org/) | - | `yolov8n-pose.pt` | ✅ | - | | [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-pose.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | | [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-pose.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-pose_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-pose_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch`, `dynamic` | | [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-pose.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | | [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-pose.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | | [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-pose_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | diff --git a/docs/en/tasks/segment.md b/docs/en/tasks/segment.md index 5e3a126d23..2c168c734e 100644 --- a/docs/en/tasks/segment.md +++ b/docs/en/tasks/segment.md @@ -177,7 +177,7 @@ Available YOLOv8-seg export formats are in the table below. You can export to an | [PyTorch](https://pytorch.org/) | - | `yolov8n-seg.pt` | ✅ | - | | [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-seg.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | | [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-seg.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-seg_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-seg_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch`, `dynamic` | | [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-seg.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | | [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-seg.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | | [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-seg_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | diff --git a/docs/en/usage/cfg.md b/docs/en/usage/cfg.md index 9630e1fb0f..ec5bc64a86 100644 --- a/docs/en/usage/cfg.md +++ b/docs/en/usage/cfg.md @@ -223,7 +223,7 @@ Export settings for YOLO models encompass configurations and options related to | `optimize` | `bool` | `False` | Applies optimization for mobile devices when exporting to TorchScript, potentially reducing model size and improving performance. | | `half` | `bool` | `False` | Enables FP16 (half-precision) quantization, reducing model size and potentially speeding up inference on supported hardware. | | `int8` | `bool` | `False` | Activates INT8 quantization, further compressing the model and speeding up inference with minimal accuracy loss, primarily for edge devices. | -| `dynamic` | `bool` | `False` | Allows dynamic input sizes for ONNX and TensorRT exports, enhancing flexibility in handling varying image dimensions. | +| `dynamic` | `bool` | `False` | Allows dynamic input sizes for ONNX, TensorRT and OpenVINO exports, enhancing flexibility in handling varying image dimensions. | | `simplify` | `bool` | `False` | Simplifies the model graph for ONNX exports, potentially improving performance and compatibility. | | `opset` | `int` | `None` | Specifies the ONNX opset version for compatibility with different ONNX parsers and runtimes. If not set, uses the latest supported version. | | `workspace` | `float` | `4.0` | Sets the maximum workspace size in GB for TensorRT optimizations, balancing memory usage and performance. | diff --git a/docs/en/usage/cli.md b/docs/en/usage/cli.md index b78b031ab0..a083c6835d 100644 --- a/docs/en/usage/cli.md +++ b/docs/en/usage/cli.md @@ -174,7 +174,7 @@ Available YOLOv8 export formats are in the table below. You can export to any fo | [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | | [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | | [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch`, `dynamic` | | [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | | [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | | [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | diff --git a/docs/mkdocs_github_authors.yaml b/docs/mkdocs_github_authors.yaml index 49d50284e1..a99a784223 100644 --- a/docs/mkdocs_github_authors.yaml +++ b/docs/mkdocs_github_authors.yaml @@ -14,6 +14,7 @@ 49699333+dependabot[bot]@users.noreply.github.com: dependabot 52826299+Chayanonjackal@users.noreply.github.com: Chayanonjackal 53246858+hasanghaffari93@users.noreply.github.com: hasanghaffari93 +60036186+mfloto@users.noreply.github.com: mfloto 61612323+Laughing-q@users.noreply.github.com: Laughing-q 62214284+Burhan-Q@users.noreply.github.com: Burhan-Q 68285002+Kayzwer@users.noreply.github.com: Kayzwer diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index 313c06fd06..4b6386f0d7 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -__version__ = "8.2.73" +__version__ = "8.2.74" import os diff --git a/ultralytics/cfg/trackers/botsort.yaml b/ultralytics/cfg/trackers/botsort.yaml index 0c66dc6ccb..01cebb6478 100644 --- a/ultralytics/cfg/trackers/botsort.yaml +++ b/ultralytics/cfg/trackers/botsort.yaml @@ -7,8 +7,8 @@ track_low_thresh: 0.1 # threshold for the second association new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks track_buffer: 30 # buffer to calculate the time when to remove tracks match_thresh: 0.8 # threshold for matching tracks +fuse_score: True # Whether to fuse confidence scores with the iou distances before matching # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) -# mot20: False # for tracker evaluation(not used for now) # BoT-SORT settings gmc_method: sparseOptFlow # method of global motion compensation diff --git a/ultralytics/cfg/trackers/bytetrack.yaml b/ultralytics/cfg/trackers/bytetrack.yaml index 29d352c6a7..49ab3f697b 100644 --- a/ultralytics/cfg/trackers/bytetrack.yaml +++ b/ultralytics/cfg/trackers/bytetrack.yaml @@ -7,5 +7,5 @@ track_low_thresh: 0.1 # threshold for the second association new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks track_buffer: 30 # buffer to calculate the time when to remove tracks match_thresh: 0.8 # threshold for matching tracks +fuse_score: True # Whether to fuse confidence scores with the iou distances before matching # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) -# mot20: False # for tracker evaluation(not used for now) diff --git a/ultralytics/trackers/bot_sort.py b/ultralytics/trackers/bot_sort.py index 31d5e1bec4..862e217d24 100644 --- a/ultralytics/trackers/bot_sort.py +++ b/ultralytics/trackers/bot_sort.py @@ -179,9 +179,8 @@ class BOTSORT(BYTETracker): dists = matching.iou_distance(tracks, detections) dists_mask = dists > self.proximity_thresh - # TODO: mot20 - # if not self.args.mot20: - dists = matching.fuse_score(dists, detections) + if self.args.fuse_score: + dists = matching.fuse_score(dists, detections) if self.args.with_reid and self.encoder is not None: emb_dists = matching.embedding_distance(tracks, detections) / 2.0 diff --git a/ultralytics/trackers/byte_tracker.py b/ultralytics/trackers/byte_tracker.py index e0e5bd618a..7b4dc00f08 100644 --- a/ultralytics/trackers/byte_tracker.py +++ b/ultralytics/trackers/byte_tracker.py @@ -375,9 +375,8 @@ class BYTETracker: def get_dists(self, tracks, detections): """Calculates the distance between tracks and detections using IoU and fuses scores.""" dists = matching.iou_distance(tracks, detections) - # TODO: mot20 - # if not self.args.mot20: - dists = matching.fuse_score(dists, detections) + if self.args.fuse_score: + dists = matching.fuse_score(dists, detections) return dists def multi_predict(self, tracks):