Merge branch 'main' into benchmark-format-args

benchmark-format-args
Ultralytics Assistant 2 months ago committed by GitHub
commit a7cd01a7c8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 6
      .github/workflows/format.yml
  2. 14
      .github/workflows/publish.yml
  3. 10
      CONTRIBUTING.md
  4. 14
      README.md
  5. 14
      README.zh-CN.md
  6. 4
      docs/README.md
  7. 2
      docs/coming_soon_template.md
  8. 4
      docs/en/datasets/index.md
  9. 2
      docs/en/datasets/segment/coco8-seg.md
  10. 2
      docs/en/datasets/segment/package-seg.md
  11. 4
      docs/en/guides/object-blurring.md
  12. 2
      docs/en/guides/object-counting.md
  13. 8
      docs/en/guides/optimizing-openvino-latency-vs-throughput-modes.md
  14. 4
      docs/en/guides/raspberry-pi.md
  15. 2
      docs/en/guides/security-alarm-system.md
  16. 4
      docs/en/guides/streamlit-live-inference.md
  17. 4
      docs/en/guides/triton-inference-server.md
  18. 10
      docs/en/help/contributing.md
  19. 2
      docs/en/hub/datasets.md
  20. 4
      docs/en/hub/index.md
  21. 2
      docs/en/hub/inference-api.md
  22. 2
      docs/en/hub/integrations.md
  23. 4
      docs/en/hub/models.md
  24. 6
      docs/en/hub/quickstart.md
  25. 2
      docs/en/integrations/edge-tpu.md
  26. 86
      docs/en/integrations/openvino.md
  27. 10
      docs/en/modes/export.md
  28. 2
      docs/en/modes/index.md
  29. 6
      docs/en/modes/train.md
  30. 4
      docs/en/modes/val.md
  31. 2
      docs/en/tasks/classify.md
  32. 2
      docs/en/tasks/detect.md
  33. 3
      docs/en/tasks/obb.md
  34. 3
      docs/en/tasks/pose.md
  35. 3
      docs/en/tasks/segment.md
  36. 2
      docs/en/usage/cli.md
  37. 2
      docs/en/yolov5/tutorials/train_custom_data.md
  38. 2
      examples/README.md
  39. 4
      pyproject.toml
  40. 2
      tests/test_cli.py
  41. 2
      ultralytics/__init__.py
  42. 8
      ultralytics/cfg/models/README.md
  43. 42
      ultralytics/engine/trainer.py
  44. 12
      ultralytics/models/sam/predict.py
  45. 2
      ultralytics/trackers/README.md
  46. 56
      ultralytics/utils/plotting.py

@ -38,12 +38,12 @@ jobs:
Join the Ultralytics community where it suits you best. For real-time chat, head to [Discord](https://ultralytics.com/discord) 🎧. Prefer in-depth discussions? Check out [Discourse](https://community.ultralytics.com). Or dive into threads on our [Subreddit](https://reddit.com/r/ultralytics) to share knowledge with the community.
## Install
## Upgrade
Pip install the `ultralytics` package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) in a [**Python>=3.8**](https://www.python.org/) environment with [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/).
Upgrade to the latest `ultralytics` package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) in a [**Python>=3.8**](https://www.python.org/) environment with [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) to verify your issue is not already resolved in the latest version:
```bash
pip install ultralytics
pip install -U ultralytics
```
## Environments

@ -85,6 +85,12 @@ jobs:
if publish:
print('Ready to publish new version to PyPI ✅.')
id: check_pypi
- name: Publish to PyPI
continue-on-error: true
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
run: |
python -m build
python -m twine upload dist/* -u __token__ -p ${{ secrets.PYPI_TOKEN }}
- name: Publish new tag
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
run: |
@ -100,14 +106,6 @@ jobs:
run: |
curl -s "https://raw.githubusercontent.com/ultralytics/actions/main/utils/summarize_release.py" | python -
shell: bash
- name: Publish to PyPI
continue-on-error: true
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
env:
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
run: |
python -m build
python -m twine upload dist/* -u __token__ -p $PYPI_TOKEN
- name: Extract PR Details
env:
GH_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}

@ -25,7 +25,7 @@ Welcome! We're thrilled that you're considering contributing to our [Ultralytics
## Code of Conduct
To ensure a welcoming and inclusive environment for everyone, all contributors must adhere to our [Code of Conduct](https://docs.ultralytics.com/help/code_of_conduct). Respect, kindness, and professionalism are at the heart of our community.
To ensure a welcoming and inclusive environment for everyone, all contributors must adhere to our [Code of Conduct](https://docs.ultralytics.com/help/code_of_conduct/). Respect, kindness, and professionalism are at the heart of our community.
## Contributing via Pull Requests
@ -45,7 +45,7 @@ We greatly appreciate contributions in the form of pull requests. To make the re
### CLA Signing
Before we can merge your pull request, you must sign our [Contributor License Agreement (CLA)](https://docs.ultralytics.com/help/CLA). This legal agreement ensures that your contributions are properly licensed, allowing the project to continue being distributed under the AGPL-3.0 license.
Before we can merge your pull request, you must sign our [Contributor License Agreement (CLA)](https://docs.ultralytics.com/help/CLA/). This legal agreement ensures that your contributions are properly licensed, allowing the project to continue being distributed under the AGPL-3.0 license.
After submitting your pull request, the CLA bot will guide you through the signing process. To sign the CLA, simply add a comment in your PR stating:
@ -117,11 +117,11 @@ def example_small_function(arg1: int, arg2: int = 4) -> bool:
### GitHub Actions CI Tests
All pull requests must pass the GitHub Actions [Continuous Integration](https://docs.ultralytics.com/help/CI) (CI) tests before they can be merged. These tests include linting, unit tests, and other checks to ensure that your changes meet the project's quality standards. Review the CI output and address any issues that arise.
All pull requests must pass the GitHub Actions [Continuous Integration](https://docs.ultralytics.com/help/CI/) (CI) tests before they can be merged. These tests include linting, unit tests, and other checks to ensure that your changes meet the project's quality standards. Review the CI output and address any issues that arise.
## Reporting Bugs
We highly value bug reports as they help us maintain the quality of our projects. When reporting a bug, please provide a [Minimum Reproducible Example](https://docs.ultralytics.com/help/minimum_reproducible_example)—a simple, clear code example that consistently reproduces the issue. This allows us to quickly identify and resolve the problem.
We highly value bug reports as they help us maintain the quality of our projects. When reporting a bug, please provide a [Minimum Reproducible Example](https://docs.ultralytics.com/help/minimum_reproducible_example/)—a simple, clear code example that consistently reproduces the issue. This allows us to quickly identify and resolve the problem.
## License
@ -163,4 +163,4 @@ the project's quality standards. Review the CI output and fix any issues. For de
### How do I report a bug in Ultralytics YOLO repositories?
To report a bug, provide a clear and concise [Minimum Reproducible Example](https://docs.ultralytics.com/help/minimum_reproducible_example) along with your bug report. This helps developers quickly identify and fix the issue. Ensure your example is minimal yet sufficient to replicate the problem. For more detailed steps on reporting bugs, refer to the [Reporting Bugs](#reporting-bugs) section.
To report a bug, provide a clear and concise [Minimum Reproducible Example](https://docs.ultralytics.com/help/minimum_reproducible_example/) along with your bug report. This helps developers quickly identify and fix the issue. Ensure your example is minimal yet sufficient to replicate the problem. For more detailed steps on reporting bugs, refer to the [Reporting Bugs](#reporting-bugs) section.

@ -47,7 +47,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens
## <div align="center">Documentation</div>
See below for a quickstart installation and usage example, and see the [YOLOv8 Docs](https://docs.ultralytics.com) for full documentation on training, validation, prediction and deployment.
See below for a quickstart installation and usage example, and see the [YOLOv8 Docs](https://docs.ultralytics.com/) for full documentation on training, validation, prediction and deployment.
<details open>
<summary>Install</summary>
@ -60,7 +60,7 @@ Pip install the ultralytics package including all [requirements](https://github.
pip install ultralytics
```
For alternative installation methods including [Conda](https://anaconda.org/conda-forge/ultralytics), [Docker](https://hub.docker.com/r/ultralytics/ultralytics), and Git, please refer to the [Quickstart Guide](https://docs.ultralytics.com/quickstart).
For alternative installation methods including [Conda](https://anaconda.org/conda-forge/ultralytics), [Docker](https://hub.docker.com/r/ultralytics/ultralytics), and Git, please refer to the [Quickstart Guide](https://docs.ultralytics.com/quickstart/).
[![Conda Version](https://img.shields.io/conda/vn/conda-forge/ultralytics?logo=condaforge)](https://anaconda.org/conda-forge/ultralytics) [![Docker Image Version](https://img.shields.io/docker/v/ultralytics/ultralytics?sort=semver&logo=docker)](https://hub.docker.com/r/ultralytics/ultralytics)
@ -77,7 +77,7 @@ YOLOv8 may be used directly in the Command Line Interface (CLI) with a `yolo` co
yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg'
```
`yolo` can be used for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See the YOLOv8 [CLI Docs](https://docs.ultralytics.com/usage/cli) for examples.
`yolo` can be used for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See the YOLOv8 [CLI Docs](https://docs.ultralytics.com/usage/cli/) for examples.
### Python
@ -97,7 +97,7 @@ results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
path = model.export(format="onnx") # export the model to ONNX format
```
See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python) for more examples.
See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python/) for more examples.
</details>
@ -116,7 +116,7 @@ Ultralytics provides interactive notebooks for YOLOv8, covering training, valida
## <div align="center">Models</div>
YOLOv8 [Detect](https://docs.ultralytics.com/tasks/detect), [Segment](https://docs.ultralytics.com/tasks/segment) and [Pose](https://docs.ultralytics.com/tasks/pose) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco) dataset are available here, as well as YOLOv8 [Classify](https://docs.ultralytics.com/tasks/classify) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet) dataset. [Track](https://docs.ultralytics.com/modes/track) mode is available for all Detect, Segment and Pose models.
YOLOv8 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLOv8 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models.
<img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png" alt="Ultralytics YOLO supported tasks">
@ -227,7 +227,7 @@ See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usag
## <div align="center">Integrations</div>
Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [Roboflow](https://roboflow.com/?ref=ultralytics), ClearML, [Comet](https://bit.ly/yolov8-readme-comet), Neural Magic and [OpenVINO](https://docs.ultralytics.com/integrations/openvino), can optimize your AI workflow.
Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [Roboflow](https://roboflow.com/?ref=ultralytics), ClearML, [Comet](https://bit.ly/yolov8-readme-comet), Neural Magic and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow.
<br>
<a href="https://ultralytics.com/hub" target="_blank">
@ -262,7 +262,7 @@ Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub)
## <div align="center">Contribute</div>
We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started, and fill out our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors!
We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors!
<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->

@ -49,7 +49,7 @@
## <div align="center">文档</div>
请参阅下面的快速安装和使用示例,以及 [YOLOv8 文档](https://docs.ultralytics.com) 上有关训练、验证、预测和部署的完整文档。
请参阅下面的快速安装和使用示例,以及 [YOLOv8 文档](https://docs.ultralytics.com/) 上有关训练、验证、预测和部署的完整文档。
<details open>
<summary>安装</summary>
@ -62,7 +62,7 @@
pip install ultralytics
```
如需使用包括[Conda](https://anaconda.org/conda-forge/ultralytics),[Docker](https://hub.docker.com/r/ultralytics/ultralytics)和Git在内的其他安装方法,请参考[快速入门指南](https://docs.ultralytics.com/quickstart)。
如需使用包括[Conda](https://anaconda.org/conda-forge/ultralytics),[Docker](https://hub.docker.com/r/ultralytics/ultralytics)和Git在内的其他安装方法,请参考[快速入门指南](https://docs.ultralytics.com/quickstart/)。
[![Conda Version](https://img.shields.io/conda/vn/conda-forge/ultralytics?logo=condaforge)](https://anaconda.org/conda-forge/ultralytics) [![Docker Image Version](https://img.shields.io/docker/v/ultralytics/ultralytics?sort=semver&logo=docker)](https://hub.docker.com/r/ultralytics/ultralytics)
@ -79,7 +79,7 @@ YOLOv8 可以在命令行界面(CLI)中直接使用,只需输入 `yolo`
yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg'
```
`yolo` 可用于各种任务和模式,并接受其他参数,例如 `imgsz=640`。查看 YOLOv8 [CLI 文档](https://docs.ultralytics.com/usage/cli)以获取示例。
`yolo` 可用于各种任务和模式,并接受其他参数,例如 `imgsz=640`。查看 YOLOv8 [CLI 文档](https://docs.ultralytics.com/usage/cli/)以获取示例。
### Python
@ -99,7 +99,7 @@ results = model("https://ultralytics.com/images/bus.jpg") # 对图像进行预
success = model.export(format="onnx") # 将模型导出为 ONNX 格式
```
查看 YOLOv8 [Python 文档](https://docs.ultralytics.com/usage/python)以获取更多示例。
查看 YOLOv8 [Python 文档](https://docs.ultralytics.com/usage/python/)以获取更多示例。
</details>
@ -118,7 +118,7 @@ Ultralytics 提供了 YOLOv8 的交互式笔记本,涵盖训练、验证、跟
## <div align="center">模型</div>
在[COCO](https://docs.ultralytics.com/datasets/detect/coco)数据集上预训练的YOLOv8 [检测](https://docs.ultralytics.com/tasks/detect),[分割](https://docs.ultralytics.com/tasks/segment)和[姿态](https://docs.ultralytics.com/tasks/pose)模型可以在这里找到,以及在[ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet)数据集上预训练的YOLOv8 [分类](https://docs.ultralytics.com/tasks/classify)模型。所有的检测,分割和姿态模型都支持[追踪](https://docs.ultralytics.com/modes/track)模式。
在[COCO](https://docs.ultralytics.com/datasets/detect/coco/)数据集上预训练的YOLOv8 [检测](https://docs.ultralytics.com/tasks/detect/),[分割](https://docs.ultralytics.com/tasks/segment/)和[姿态](https://docs.ultralytics.com/tasks/pose/)模型可以在这里找到,以及在[ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/)数据集上预训练的YOLOv8 [分类](https://docs.ultralytics.com/tasks/classify/)模型。所有的检测,分割和姿态模型都支持[追踪](https://docs.ultralytics.com/modes/track/)模式。
<img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png" alt="Ultralytics YOLO supported tasks">
@ -229,7 +229,7 @@ Ultralytics 提供了 YOLOv8 的交互式笔记本,涵盖训练、验证、跟
## <div align="center">集成</div>
我们与领先的AI平台的关键整合扩展了Ultralytics产品的功能,增强了数据集标签化、训练、可视化和模型管理等任务。探索Ultralytics如何与[Roboflow](https://roboflow.com/?ref=ultralytics)、ClearML、[Comet](https://bit.ly/yolov8-readme-comet)、Neural Magic以及[OpenVINO](https://docs.ultralytics.com/integrations/openvino)合作,优化您的AI工作流程。
我们与领先的AI平台的关键整合扩展了Ultralytics产品的功能,增强了数据集标签化、训练、可视化和模型管理等任务。探索Ultralytics如何与[Roboflow](https://roboflow.com/?ref=ultralytics)、ClearML、[Comet](https://bit.ly/yolov8-readme-comet)、Neural Magic以及[OpenVINO](https://docs.ultralytics.com/integrations/openvino/)合作,优化您的AI工作流程。
<br>
<a href="https://ultralytics.com/hub" target="_blank">
@ -264,7 +264,7 @@ Ultralytics 提供了 YOLOv8 的交互式笔记本,涵盖训练、验证、跟
## <div align="center">贡献</div>
我们喜欢您的参与!没有社区的帮助,YOLOv5 和 YOLOv8 将无法实现。请参阅我们的[贡献指南](https://docs.ultralytics.com/help/contributing)以开始使用,并填写我们的[调查问卷](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们提供您的使用体验反馈。感谢所有贡献者的支持!🙏
我们喜欢您的参与!没有社区的帮助,YOLOv5 和 YOLOv8 将无法实现。请参阅我们的[贡献指南](https://docs.ultralytics.com/help/contributing/)以开始使用,并填写我们的[调查问卷](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们提供您的使用体验反馈。感谢所有贡献者的支持!🙏
<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->

@ -3,7 +3,7 @@
# 📚 Ultralytics Docs
[Ultralytics](https://www.ultralytics.com/) Docs are the gateway to understanding and utilizing our cutting-edge machine learning tools. These documents are deployed to [https://docs.ultralytics.com](https://docs.ultralytics.com) for your convenience.
[Ultralytics](https://www.ultralytics.com/) Docs are the gateway to understanding and utilizing our cutting-edge machine learning tools. These documents are deployed to [https://docs.ultralytics.com](https://docs.ultralytics.com/) for your convenience.
[![pages-build-deployment](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment)
[![Check Broken links](https://github.com/ultralytics/docs/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/links.yml)
@ -113,7 +113,7 @@ Choose a hosting provider and deployment method for your MkDocs documentation:
## 💡 Contribute
We cherish the community's input as it drives Ultralytics open-source initiatives. Dive into the [Contributing Guide](https://docs.ultralytics.com/help/contributing) and share your thoughts via our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey). A heartfelt thank you 🙏 to each contributor!
We cherish the community's input as it drives Ultralytics open-source initiatives. Dive into the [Contributing Guide](https://docs.ultralytics.com/help/contributing/) and share your thoughts via our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey). A heartfelt thank you 🙏 to each contributor!
![Ultralytics open-source contributors](https://github.com/ultralytics/docs/releases/download/0/ultralytics-open-source-contributors.avif)

@ -27,7 +27,7 @@ Your feedback shapes our future releases. Share your thoughts and suggestions [h
## Thank You, Community! 🌍
Your [contributions](https://docs.ultralytics.com/help/contributing) inspire our continuous [innovation](https://github.com/ultralytics/ultralytics). Stay tuned for the big reveal of what's next in AI and ML at Ultralytics!
Your [contributions](https://docs.ultralytics.com/help/contributing/) inspire our continuous [innovation](https://github.com/ultralytics/ultralytics). Stay tuned for the big reveal of what's next in AI and ML at Ultralytics!
---

@ -123,7 +123,7 @@ Contributing a new dataset involves several steps to ensure that it aligns well
5. **Create a `data.yaml` File**: In your dataset's root directory, create a `data.yaml` file that describes the dataset, classes, and other necessary information.
6. **Optimize Images (Optional)**: If you want to reduce the size of the dataset for more efficient processing, you can optimize the images using the code below. This is not required, but recommended for smaller dataset sizes and faster download speeds.
7. **Zip Dataset**: Compress the entire dataset folder into a zip file.
8. **Document and PR**: Create a documentation page describing your dataset and how it fits into the existing framework. After that, submit a Pull Request (PR). Refer to [Ultralytics Contribution Guidelines](https://docs.ultralytics.com/help/contributing) for more details on how to submit a PR.
8. **Document and PR**: Create a documentation page describing your dataset and how it fits into the existing framework. After that, submit a Pull Request (PR). Refer to [Ultralytics Contribution Guidelines](https://docs.ultralytics.com/help/contributing/) for more details on how to submit a PR.
### Example Code to Optimize and Zip a Dataset
@ -175,7 +175,7 @@ Contributing a new dataset involves several steps:
5. **Create a `data.yaml` File**: Include dataset descriptions, classes, and other relevant information.
6. **Optimize Images (Optional)**: Reduce dataset size for efficiency.
7. **Zip Dataset**: Compress the dataset into a zip file.
8. **Document and PR**: Describe your dataset and submit a Pull Request following [Ultralytics Contribution Guidelines](https://docs.ultralytics.com/help/contributing).
8. **Document and PR**: Describe your dataset and submit a Pull Request following [Ultralytics Contribution Guidelines](https://docs.ultralytics.com/help/contributing/).
Visit [Contribute New Datasets](#contribute-new-datasets) for a comprehensive guide.

@ -113,7 +113,7 @@ For a thorough explanation of available arguments and configuration options, you
### Why is the COCO8-Seg dataset important for model development and debugging?
The **COCO8-Seg dataset** is ideal for its manageability and diversity within a small size. It consists of only 8 images, providing a quick way to test and debug segmentation models or new detection approaches without the overhead of larger datasets. This makes it an efficient tool for sanity checks and pipeline error identification before committing to extensive training on large datasets. Learn more about dataset formats [here](https://docs.ultralytics.com/datasets/segment).
The **COCO8-Seg dataset** is ideal for its manageability and diversity within a small size. It consists of only 8 images, providing a quick way to test and debug segmentation models or new detection approaches without the overhead of larger datasets. This makes it an efficient tool for sanity checks and pipeline error identification before committing to extensive training on large datasets. Learn more about dataset formats [here](https://docs.ultralytics.com/datasets/segment/).
### Where can I find the YAML configuration file for the COCO8-Seg dataset?

@ -136,7 +136,7 @@ This structure ensures a balanced dataset for thorough model training, validatio
### Why should I use Ultralytics YOLOv8 with the Package Segmentation Dataset?
Ultralytics YOLOv8 provides state-of-the-art accuracy and speed for real-time object detection and segmentation tasks. Using it with the Package Segmentation Dataset allows you to leverage YOLOv8's capabilities for precise package segmentation. This combination is especially beneficial for industries like logistics and warehouse automation, where accurate package identification is critical. For more information, check out our [page on YOLOv8 segmentation](https://docs.ultralytics.com/models/yolov8).
Ultralytics YOLOv8 provides state-of-the-art accuracy and speed for real-time object detection and segmentation tasks. Using it with the Package Segmentation Dataset allows you to leverage YOLOv8's capabilities for precise package segmentation. This combination is especially beneficial for industries like logistics and warehouse automation, where accurate package identification is critical. For more information, check out our [page on YOLOv8 segmentation](https://docs.ultralytics.com/models/yolov8/).
### How can I access and use the package-seg.yaml file for the Package Segmentation Dataset?

@ -132,8 +132,8 @@ For more detailed applications, check the [advantages of object blurring section
### Can I use Ultralytics YOLOv8 to blur faces in a video for privacy reasons?
Yes, Ultralytics YOLOv8 can be configured to detect and blur faces in videos to protect privacy. By training or using a pre-trained model to specifically recognize faces, the detection results can be processed with OpenCV to apply a blur effect. Refer to our guide on [object detection with YOLOv8](https://docs.ultralytics.com/models/yolov8) and modify the code to target face detection.
Yes, Ultralytics YOLOv8 can be configured to detect and blur faces in videos to protect privacy. By training or using a pre-trained model to specifically recognize faces, the detection results can be processed with OpenCV to apply a blur effect. Refer to our guide on [object detection with YOLOv8](https://docs.ultralytics.com/models/yolov8/) and modify the code to target face detection.
### How does YOLOv8 compare to other object detection models like Faster R-CNN for object blurring?
Ultralytics YOLOv8 typically outperforms models like Faster R-CNN in terms of speed, making it more suitable for real-time applications. While both models offer accurate detection, YOLOv8's architecture is optimized for rapid inference, which is critical for tasks like real-time object blurring. Learn more about the technical differences and performance metrics in our [YOLOv8 documentation](https://docs.ultralytics.com/models/yolov8).
Ultralytics YOLOv8 typically outperforms models like Faster R-CNN in terms of speed, making it more suitable for real-time applications. While both models offer accurate detection, YOLOv8's architecture is optimized for rapid inference, which is critical for tasks like real-time object blurring. Learn more about the technical differences and performance metrics in our [YOLOv8 documentation](https://docs.ultralytics.com/models/yolov8/).

@ -349,7 +349,7 @@ Ultralytics YOLOv8 provides several advantages over other object detection model
3. **Ease of Integration:** YOLOv8 offers seamless integration with various platforms and devices, including mobile and edge devices, which is crucial for modern AI applications.
4. **Flexibility:** Supports various tasks like object detection, segmentation, and tracking with configurable models to meet specific use-case requirements.
Check out Ultralytics [YOLOv8 Documentation](https://docs.ultralytics.com/models/yolov8) for a deeper dive into its features and performance comparisons.
Check out Ultralytics [YOLOv8 Documentation](https://docs.ultralytics.com/models/yolov8/) for a deeper dive into its features and performance comparisons.
### Can I use YOLOv8 for advanced applications like crowd analysis and traffic management?

@ -121,8 +121,8 @@ Using OpenVINO's high-level performance hints and multi-device modes can help st
Yes, Ultralytics YOLO models are highly versatile and can be integrated with various AI frameworks. Options include:
- **TensorRT:** For NVIDIA GPU optimization, follow the [TensorRT integration guide](https://docs.ultralytics.com/integrations/tensorrt).
- **CoreML:** For Apple devices, refer to our [CoreML export instructions](https://docs.ultralytics.com/integrations/coreml).
- **TensorFlow.js:** For web and Node.js apps, see the [TF.js conversion guide](https://docs.ultralytics.com/integrations/tfjs).
- **TensorRT:** For NVIDIA GPU optimization, follow the [TensorRT integration guide](https://docs.ultralytics.com/integrations/tensorrt/).
- **CoreML:** For Apple devices, refer to our [CoreML export instructions](https://docs.ultralytics.com/integrations/coreml/).
- **TensorFlow.js:** For web and Node.js apps, see the [TF.js conversion guide](https://docs.ultralytics.com/integrations/tfjs/).
Explore more integrations on the [Ultralytics Integrations page](https://docs.ultralytics.com/integrations).
Explore more integrations on the [Ultralytics Integrations page](https://docs.ultralytics.com/integrations/).

@ -94,7 +94,7 @@ Here we will install Ultralytics package on the Raspberry Pi with optional depen
## Use NCNN on Raspberry Pi
Out of all the model export formats supported by Ultralytics, [NCNN](https://docs.ultralytics.com/integrations/ncnn) delivers the best inference performance when working with Raspberry Pi devices because NCNN is highly optimized for mobile/ embedded platforms (such as ARM architecture). Therefor our recommendation is to use NCNN with Raspberry Pi.
Out of all the model export formats supported by Ultralytics, [NCNN](https://docs.ultralytics.com/integrations/ncnn/) delivers the best inference performance when working with Raspberry Pi devices because NCNN is highly optimized for mobile/ embedded platforms (such as ARM architecture). Therefor our recommendation is to use NCNN with Raspberry Pi.
## Convert Model to NCNN and Run Inference
@ -132,7 +132,7 @@ The YOLOv8n model in PyTorch format is converted to NCNN to run inference with t
!!! tip
For more details about supported export options, visit the [Ultralytics documentation page on deployment options](https://docs.ultralytics.com/guides/model-deployment-options).
For more details about supported export options, visit the [Ultralytics documentation page on deployment options](https://docs.ultralytics.com/guides/model-deployment-options/).
## Raspberry Pi 5 vs Raspberry Pi 4 YOLOv8 Benchmarks

@ -193,7 +193,7 @@ Running Ultralytics YOLOv8 on a standard setup typically requires around 5GB of
### What makes Ultralytics YOLOv8 different from other object detection models like Faster R-CNN or SSD?
Ultralytics YOLOv8 provides an edge over models like Faster R-CNN or SSD with its real-time detection capabilities and higher accuracy. Its unique architecture allows it to process images much faster without compromising on precision, making it ideal for time-sensitive applications like security alarm systems. For a comprehensive comparison of object detection models, you can explore our [guide](https://docs.ultralytics.com/models).
Ultralytics YOLOv8 provides an edge over models like Faster R-CNN or SSD with its real-time detection capabilities and higher accuracy. Its unique architecture allows it to process images much faster without compromising on precision, making it ideal for time-sensitive applications like security alarm systems. For a comprehensive comparison of object detection models, you can explore our [guide](https://docs.ultralytics.com/models/).
### How can I reduce the frequency of false positives in my security system using Ultralytics YOLOv8?

@ -152,7 +152,7 @@ Real-time object detection using Streamlit and Ultralytics YOLOv8 can be applied
- **Retail**: Customer counting, shelf management, and more.
- **Wildlife and Agriculture**: Monitoring animals and crop conditions.
For more in-depth use cases and examples, explore [Ultralytics Solutions](https://docs.ultralytics.com/solutions).
For more in-depth use cases and examples, explore [Ultralytics Solutions](https://docs.ultralytics.com/solutions/).
### How does Ultralytics YOLOv8 compare to other object detection models like YOLOv5 and RCNNs?
@ -162,4 +162,4 @@ Ultralytics YOLOv8 provides several enhancements over prior models like YOLOv5 a
- **Ease of Use**: Simplified interfaces and deployment.
- **Resource Efficiency**: Optimized for better speed with minimal computational requirements.
For a comprehensive comparison, check [Ultralytics YOLOv8 Documentation](https://docs.ultralytics.com/models/yolov8) and related blog posts discussing model performance.
For a comprehensive comparison, check [Ultralytics YOLOv8 Documentation](https://docs.ultralytics.com/models/yolov8/) and related blog posts discussing model performance.

@ -147,7 +147,7 @@ By following the above steps, you can deploy and run Ultralytics YOLOv8 models e
### How do I set up Ultralytics YOLOv8 with NVIDIA Triton Inference Server?
Setting up [Ultralytics YOLOv8](https://docs.ultralytics.com/models/yolov8) with [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) involves a few key steps:
Setting up [Ultralytics YOLOv8](https://docs.ultralytics.com/models/yolov8/) with [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) involves a few key steps:
1. **Export YOLOv8 to ONNX format**:
@ -258,7 +258,7 @@ For an in-depth guide on setting up and running Triton Server with YOLOv8, refer
### How does Ultralytics YOLOv8 compare to TensorFlow and PyTorch models for deployment?
[Ultralytics YOLOv8](https://docs.ultralytics.com/models/yolov8) offers several unique advantages compared to TensorFlow and PyTorch models for deployment:
[Ultralytics YOLOv8](https://docs.ultralytics.com/models/yolov8/) offers several unique advantages compared to TensorFlow and PyTorch models for deployment:
- **Real-time Performance**: Optimized for real-time object detection tasks, YOLOv8 provides state-of-the-art accuracy and speed, making it ideal for applications requiring live video analytics.
- **Ease of Use**: YOLOv8 integrates seamlessly with Triton Inference Server and supports diverse export formats (ONNX, TensorRT, CoreML), making it flexible for various deployment scenarios.

@ -25,7 +25,7 @@ Welcome! We're thrilled that you're considering contributing to our [Ultralytics
## Code of Conduct
To ensure a welcoming and inclusive environment for everyone, all contributors must adhere to our [Code of Conduct](https://docs.ultralytics.com/help/code_of_conduct). Respect, kindness, and professionalism are at the heart of our community.
To ensure a welcoming and inclusive environment for everyone, all contributors must adhere to our [Code of Conduct](https://docs.ultralytics.com/help/code_of_conduct/). Respect, kindness, and professionalism are at the heart of our community.
## Contributing via Pull Requests
@ -45,7 +45,7 @@ We greatly appreciate contributions in the form of pull requests. To make the re
### CLA Signing
Before we can merge your pull request, you must sign our [Contributor License Agreement (CLA)](https://docs.ultralytics.com/help/CLA). This legal agreement ensures that your contributions are properly licensed, allowing the project to continue being distributed under the AGPL-3.0 license.
Before we can merge your pull request, you must sign our [Contributor License Agreement (CLA)](https://docs.ultralytics.com/help/CLA/). This legal agreement ensures that your contributions are properly licensed, allowing the project to continue being distributed under the AGPL-3.0 license.
After submitting your pull request, the CLA bot will guide you through the signing process. To sign the CLA, simply add a comment in your PR stating:
@ -119,11 +119,11 @@ When adding new functions or classes, please include [Google-style docstrings](h
### GitHub Actions CI Tests
All pull requests must pass the GitHub Actions [Continuous Integration](https://docs.ultralytics.com/help/CI) (CI) tests before they can be merged. These tests include linting, unit tests, and other checks to ensure that your changes meet the project's quality standards. Review the CI output and address any issues that arise.
All pull requests must pass the GitHub Actions [Continuous Integration](https://docs.ultralytics.com/help/CI/) (CI) tests before they can be merged. These tests include linting, unit tests, and other checks to ensure that your changes meet the project's quality standards. Review the CI output and address any issues that arise.
## Reporting Bugs
We highly value bug reports as they help us maintain the quality of our projects. When reporting a bug, please provide a [Minimum Reproducible Example](https://docs.ultralytics.com/help/minimum_reproducible_example)—a simple, clear code example that consistently reproduces the issue. This allows us to quickly identify and resolve the problem.
We highly value bug reports as they help us maintain the quality of our projects. When reporting a bug, please provide a [Minimum Reproducible Example](https://docs.ultralytics.com/help/minimum_reproducible_example/)—a simple, clear code example that consistently reproduces the issue. This allows us to quickly identify and resolve the problem.
## License
@ -165,4 +165,4 @@ the project's quality standards. Review the CI output and fix any issues. For de
### How do I report a bug in Ultralytics YOLO repositories?
To report a bug, provide a clear and concise [Minimum Reproducible Example](https://docs.ultralytics.com/help/minimum_reproducible_example) along with your bug report. This helps developers quickly identify and fix the issue. Ensure your example is minimal yet sufficient to replicate the problem. For more detailed steps on reporting bugs, refer to the [Reporting Bugs](#reporting-bugs) section.
To report a bug, provide a clear and concise [Minimum Reproducible Example](https://docs.ultralytics.com/help/minimum_reproducible_example/) along with your bug report. This helps developers quickly identify and fix the issue. Ensure your example is minimal yet sufficient to replicate the problem. For more detailed steps on reporting bugs, refer to the [Reporting Bugs](#reporting-bugs) section.

@ -26,7 +26,7 @@ Once uploaded, datasets can be immediately utilized for model training. This int
Before you upload a dataset to [Ultralytics HUB](https://www.ultralytics.com/hub), make sure to **place your dataset YAML file inside the dataset root directory** and that **your dataset YAML, directory and ZIP have the same name**, as shown in the example below, and then zip the dataset directory.
For example, if your dataset is called "coco8", as our [COCO8](https://docs.ultralytics.com/datasets/detect/coco8) example dataset, then you should have a `coco8.yaml` inside your `coco8/` directory, which will create a `coco8.zip` when zipped:
For example, if your dataset is called "coco8", as our [COCO8](https://docs.ultralytics.com/datasets/detect/coco8/) example dataset, then you should have a `coco8.yaml` inside your `coco8/` directory, which will create a `coco8.zip` when zipped:
```bash
zip -r coco8.zip coco8

@ -120,7 +120,7 @@ Ultralytics HUB allows you to manage and organize your projects efficiently. You
Ultralytics HUB offers seamless integrations with various platforms to enhance your machine learning workflows. Some key integrations include:
- **Roboflow:** For dataset management and model training. Learn more on the [Integrations](integrations.md) page.
- **Google Colab:** Efficiently train models using Google Colab's cloud-based environment. Detailed steps are available in the [Google Colab](https://docs.ultralytics.com/integrations/google-colab) section.
- **Weights & Biases:** For enhanced experiment tracking and visualization. Explore the [Weights & Biases](https://docs.ultralytics.com/integrations/weights-biases) integration.
- **Google Colab:** Efficiently train models using Google Colab's cloud-based environment. Detailed steps are available in the [Google Colab](https://docs.ultralytics.com/integrations/google-colab/) section.
- **Weights & Biases:** For enhanced experiment tracking and visualization. Explore the [Weights & Biases](https://docs.ultralytics.com/integrations/weights-biases/) integration.
For a complete list of integrations, refer to the [Integrations](integrations.md) page.

@ -39,7 +39,7 @@ To use the [Ultralytics HUB](https://www.ultralytics.com/hub) Dedicated Inferenc
!!! tip
Choose the region with the lowest latency for the best performance as described in the [documentation](https://docs.ultralytics.com/reference/hub/google/__init__).
Choose the region with the lowest latency for the best performance as described in the [documentation](https://docs.ultralytics.com/reference/hub/google/__init__/).
To shut down the dedicated endpoint, click on the **Stop Endpoint** button.

@ -124,4 +124,4 @@ Your feedback shapes our future releases. Share your thoughts and suggestions [h
## Thank You, Community! 🌍
Your [contributions](https://docs.ultralytics.com/help/contributing) inspire our continuous [innovation](https://github.com/ultralytics/ultralytics). Stay tuned for the big reveal of what's next in AI and ML at Ultralytics!
Your [contributions](https://docs.ultralytics.com/help/contributing/) inspire our continuous [innovation](https://github.com/ultralytics/ultralytics). Stay tuned for the big reveal of what's next in AI and ML at Ultralytics!

@ -66,9 +66,9 @@ In this step, you have to choose the project in which you want to create your mo
!!! info
You can read more about the available [YOLOv8](https://docs.ultralytics.com/models/yolov8) (and [YOLOv5](https://docs.ultralytics.com/models/yolov5)) architectures in our documentation.
You can read more about the available [YOLOv8](https://docs.ultralytics.com/models/yolov8/) (and [YOLOv5](https://docs.ultralytics.com/models/yolov5/)) architectures in our documentation.
By default, your model will use a pre-trained model (trained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco) dataset) to reduce training time. You can change this behavior and tweak your model's configuration by opening the **Advanced Model Configuration** accordion.
By default, your model will use a pre-trained model (trained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset) to reduce training time. You can change this behavior and tweak your model's configuration by opening the **Advanced Model Configuration** accordion.
![Ultralytics HUB screenshot of the Train Model dialog with an arrow pointing to the Advanced Model Configuration accordion](https://github.com/ultralytics/docs/releases/download/0/ultralytics-hub-train-model-dialog-2.avif)

@ -54,7 +54,7 @@ You can upload a dataset directly from the [Home](https://hub.ultralytics.com/ho
![Ultralytics HUB screenshot of the Home page with an arrow pointing to the Upload Dataset card](https://github.com/ultralytics/docs/releases/download/0/ultralytics-hub-upload-dataset-card.avif)
Read more about [datasets](https://docs.ultralytics.com/hub/datasets).
Read more about [datasets](https://docs.ultralytics.com/hub/datasets/).
### Create Project
@ -62,7 +62,7 @@ You can create a project directly from the [Home](https://hub.ultralytics.com/ho
![Ultralytics HUB screenshot of the Home page with an arrow pointing to the Create Project card](https://github.com/ultralytics/docs/releases/download/0/hub-create-project-card.avif)
Read more about [projects](https://docs.ultralytics.com/hub/projects).
Read more about [projects](https://docs.ultralytics.com/hub/projects/).
### Train Model
@ -70,7 +70,7 @@ You can train a model directly from the [Home](https://hub.ultralytics.com/home)
![Ultralytics HUB screenshot of the Home page with an arrow pointing to the Train Model card](https://github.com/ultralytics/docs/releases/download/0/ultralytics-hub-train-model-card.avif)
Read more about [models](https://docs.ultralytics.com/hub/models).
Read more about [models](https://docs.ultralytics.com/hub/models/).
## Feedback

@ -113,7 +113,7 @@ However, for in-depth instructions on deploying your TFLite Edge TPU models, tak
In this guide, we've learned how to export Ultralytics YOLOv8 models to TFLite Edge TPU format. By following the steps mentioned above, you can increase the speed and power of your computer vision applications.
For further details on usage, visit the [Edge TPU official website](https://cloud.google.com/edge-tpu).
For further details on usage, visit the [Edge TPU official website](https://cloud.google.com/tpu).
Also, for more information on other Ultralytics YOLOv8 integrations, please visit our [integration guide page](index.md). There, you'll discover valuable resources and insights.

@ -251,6 +251,92 @@ Benchmarks below run on 13th Gen Intel® Core® i7-13700H CPU at FP32 precision.
| YOLOv8x | ONNX | ✅ | 260.4 | 0.6650 | 526.66 |
| YOLOv8x | OpenVINO | ✅ | 260.6 | 0.6619 | 158.73 |
### Intel Ultra 7 155H Meteor Lake CPU
The Intel® Ultra™ 7 155H represents a new benchmark in high-performance computing, designed to cater to the most demanding users, from gamers to content creators. The Ultra™ 7 155H is not just a CPU; it integrates a powerful GPU and an advanced NPU (Neural Processing Unit) within a single chip, offering a comprehensive solution for diverse computing needs.
This hybrid architecture allows the Ultra™ 7 155H to excel in both traditional CPU tasks and GPU-accelerated workloads, while the NPU enhances AI-driven processes, enabling faster and more efficient machine learning operations. This makes the Ultra™ 7 155H a versatile choice for applications requiring high-performance graphics, complex computations, and AI inference.
The Ultra™ 7 series includes multiple models, each offering different levels of performance, with the 'H' designation indicating a high-power variant suitable for laptops and compact devices. Early benchmarks have highlighted the exceptional performance of the Ultra™ 7 155H, particularly in multitasking environments, where the combined power of the CPU, GPU, and NPU leads to remarkable efficiency and speed.
As part of Intel's commitment to cutting-edge technology, the Ultra™ 7 155H is designed to meet the needs of future computing, with more models expected to be released. The availability of the Ultra™ 7 155H varies by region, and it continues to receive praise for its integration of three powerful processing units in a single chip, setting new standards in computing performance.
Benchmarks below run on Intel® Ultra™ 7 155H at FP32 and INT8 precision.
!!! tip "Benchmarks"
=== "Integrated Intel® Arc™ GPU"
| Model | Format | Precision | Status | Size (MB) | metrics/mAP50-95(B) | Inference time (ms/im) |
| ------- | ----------- | --------- | ------ | --------- | ------------------- | ---------------------- |
| YOLOv8n | PyTorch | FP32 | ✅ | 6.2 | 0.6381 | 35.95 |
| YOLOv8n | OpenVINO | FP32 | ✅ | 12.3 | 0.6117 | 8.32 |
| YOLOv8n | OpenVINO | INT8 | ✅ | 3.6 | 0.5791 | 9.88 |
| YOLOv8s | PyTorch | FP32 | ✅ | 21.5 | 0.6967 | 79.72 |
| YOLOv8s | OpenVINO | FP32 | ✅ | 42.9 | 0.7136 | 13.37 |
| YOLOv8s | OpenVINO | INT8 | ✅ | 11.2 | 0.7086 | 9.96 |
| YOLOv8m | PyTorch | FP32 | ✅ | 49.7 | 0.737 | 202.05 |
| YOLOv8m | OpenVINO | FP32 | ✅ | 99.1 | 0.7331 | 28.07 |
| YOLOv8m | OpenVINO | INT8 | ✅ | 25.5 | 0.7259 | 21.11 |
| YOLOv8l | PyTorch | FP32 | ✅ | 83.7 | 0.7769 | 393.37 |
| YOLOv8l | OpenVINO | FP32 | ✅ | 167.0 | 0.0 | 52.73 |
| YOLOv8l | OpenVINO | INT8 | ✅ | 42.6 | 0.7861 | 28.11 |
| YOLOv8x | PyTorch | FP32 | ✅ | 130.5 | 0.7759 | 610.71 |
| YOLOv8x | OpenVINO | FP32 | ✅ | 260.6 | 0.748 | 73.51 |
| YOLOv8x | OpenVINO | INT8 | ✅ | 66.0 | 0.8085 | 51.71 |
<div align="center">
<img width="800" src="https://github.com/ultralytics/docs/releases/download/0/intel-ultra-gpu.avif" alt="Intel Core Ultra GPU benchmarks">
</div>
=== "Intel® Meteor Lake CPU"
| Model | Format | Precision | Status | Size (MB) | metrics/mAP50-95(B) | Inference time (ms/im) |
| ------- | ----------- | --------- | ------ | --------- | ------------------- | ---------------------- |
| YOLOv8n | PyTorch | FP32 | ✅ | 6.2 | 0.6381 | 34.69 |
| YOLOv8n | OpenVINO | FP32 | ✅ | 12.3 | 0.6092 | 39.06 |
| YOLOv8n | OpenVINO | INT8 | ✅ | 3.6 | 0.5968 | 18.37 |
| YOLOv8s | PyTorch | FP32 | ✅ | 21.5 | 0.6967 | 79.9 |
| YOLOv8s | OpenVINO | FP32 | ✅ | 42.9 | 0.7136 | 82.6 |
| YOLOv8s | OpenVINO | INT8 | ✅ | 11.2 | 0.7083 | 29.51 |
| YOLOv8m | PyTorch | FP32 | ✅ | 49.7 | 0.737 | 202.43 |
| YOLOv8m | OpenVINO | FP32 | ✅ | 99.1 | 0.728 | 181.27 |
| YOLOv8m | OpenVINO | INT8 | ✅ | 25.5 | 0.7285 | 51.25 |
| YOLOv8l | PyTorch | FP32 | ✅ | 83.7 | 0.7769 | 385.87 |
| YOLOv8l | OpenVINO | FP32 | ✅ | 167.0 | 0.7551 | 347.75 |
| YOLOv8l | OpenVINO | INT8 | ✅ | 42.6 | 0.7675 | 91.66 |
| YOLOv8x | PyTorch | FP32 | ✅ | 130.5 | 0.7759 | 603.63 |
| YOLOv8x | OpenVINO | FP32 | ✅ | 260.6 | 0.7479 | 516.39 |
| YOLOv8x | OpenVINO | INT8 | ✅ | 66.0 | 0.8119 | 142.42 |
<div align="center">
<img width="800" src="https://github.com/ultralytics/docs/releases/download/0/intel-ultra-cpu.avif" alt="Intel Core Ultra CPU benchmarks">
</div>
=== "Integrated Intel® AI Boost NPU"
| Model | Format | Precision | Status | Size (MB) | metrics/mAP50-95(B) | Inference time (ms/im) |
| ------- | ----------- | --------- | ------ | --------- | ------------------- | ---------------------- |
| YOLOv8n | PyTorch | FP32 | ✅ | 6.2 | 0.6381 | 36.98 |
| YOLOv8n | OpenVINO | FP32 | ✅ | 12.3 | 0.6103 | 16.68 |
| YOLOv8n | OpenVINO | INT8 | ✅ | 3.6 | 0.5941 | 14.6 |
| YOLOv8s | PyTorch | FP32 | ✅ | 21.5 | 0.6967 | 79.76 |
| YOLOv8s | OpenVINO | FP32 | ✅ | 42.9 | 0.7144 | 32.89 |
| YOLOv8s | OpenVINO | INT8 | ✅ | 11.2 | 0.7062 | 26.13 |
| YOLOv8m | PyTorch | FP32 | ✅ | 49.7 | 0.737 | 201.44 |
| YOLOv8m | OpenVINO | FP32 | ✅ | 99.1 | 0.7284 | 54.4 |
| YOLOv8m | OpenVINO | INT8 | ✅ | 25.5 | 0.7268 | 30.76 |
| YOLOv8l | PyTorch | FP32 | ✅ | 83.7 | 0.7769 | 385.46 |
| YOLOv8l | OpenVINO | FP32 | ✅ | 167.0 | 0.7539 | 80.1 |
| YOLOv8l | OpenVINO | INT8 | ✅ | 42.6 | 0.7508 | 52.25 |
| YOLOv8x | PyTorch | FP32 | ✅ | 130.5 | 0.7759 | 609.4 |
| YOLOv8x | OpenVINO | FP32 | ✅ | 260.6 | 0.7637 | 104.79 |
| YOLOv8x | OpenVINO | INT8 | ✅ | 66.0 | 0.8077 | 64.96 |
<div align="center">
<img width="800" src="https://github.com/ultralytics/docs/releases/download/0/intel-ultra-npu.avif" alt="Intel Core Ultra NPU benchmarks">
</div>
## Reproduce Our Results
To reproduce the Ultralytics benchmarks above on all export [formats](../modes/export.md) run this code:

@ -46,7 +46,7 @@ Here are some of the standout functionalities:
## Usage Examples
Export a YOLOv8n model to a different format like ONNX or TensorRT. See Arguments section below for a full list of export arguments.
Export a YOLOv8n model to a different format like ONNX or TensorRT. See the Arguments section below for a full list of export arguments.
!!! example
@ -112,7 +112,7 @@ Exporting a YOLOv8 model to ONNX format is straightforward with Ultralytics. It
yolo export model=path/to/best.pt format=onnx # export custom trained model
```
For more details on the process, including advanced options like handling different input sizes, refer to the [ONNX](../integrations/onnx.md) section.
For more details on the process, including advanced options like handling different input sizes, refer to the [ONNX section](../integrations/onnx.md).
### What are the benefits of using TensorRT for model export?
@ -122,7 +122,7 @@ Using TensorRT for model export offers significant performance improvements. YOL
- **Speed:** Achieve faster inference through advanced optimizations.
- **Compatibility:** Integrate smoothly with NVIDIA hardware.
To learn more about integrating TensorRT, see the [TensorRT](../integrations/tensorrt.md) integration guide.
To learn more about integrating TensorRT, see the [TensorRT integration guide](../integrations/tensorrt.md).
### How do I enable INT8 quantization when exporting my YOLOv8 model?
@ -145,7 +145,7 @@ INT8 quantization is an excellent way to compress the model and speed up inferen
yolo export model=yolov8n.pt format=onnx int8=True # export model with INT8 quantization
```
INT8 quantization can be applied to various formats, such as TensorRT and CoreML. More details can be found in the [Export](../modes/export.md) section.
INT8 quantization can be applied to various formats, such as TensorRT and CoreML. More details can be found in the [Export section](../modes/export.md).
### Why is dynamic input size important when exporting models?
@ -182,4 +182,4 @@ Understanding and configuring export arguments is crucial for optimizing model p
- **`optimize:`** Applies specific optimizations for mobile or constrained environments.
- **`int8:`** Enables INT8 quantization, highly beneficial for edge deployments.
For a detailed list and explanations of all the export arguments, visit the [Export Arguments](#arguments) section.
For a detailed list and explanations of all the export arguments, visit the [Export Arguments section](#arguments).

@ -68,7 +68,7 @@ Track mode is used for tracking objects in real-time using a YOLOv8 model. In th
## [Benchmark](benchmark.md)
Benchmark mode is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks provide information on the size of the exported format, its `mAP50-95` metrics (for object detection, segmentation and pose) or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for their specific use case based on their requirements for speed and accuracy.
Benchmark mode is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks provide information on the size of the exported format, its `mAP50-95` metrics (for object detection, segmentation, and pose) or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various formats like ONNX, OpenVINO, TensorRT, and others. This information can help users choose the optimal export format for their specific use case based on their requirements for speed and accuracy.
[Benchmark Examples](benchmark.md){ .md-button }

@ -123,14 +123,14 @@ To enable training on Apple M1 and M2 chips, you should specify 'mps' as your de
# Load a model
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
# Train the model with 2 GPUs
# Train the model with MPS
results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device="mps")
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model using GPUs 0 and 1
# Start training from a pretrained *.pt model using MPS
yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 device=mps
```
@ -169,7 +169,7 @@ Below is an example of how to resume an interrupted training using Python and vi
By setting `resume=True`, the `train` function will continue training from where it left off, using the state stored in the 'path/to/last.pt' file. If the `resume` argument is omitted or set to `False`, the `train` function will start a new training session.
Remember that checkpoints are saved at the end of every epoch by default, or at fixed interval using the `save_period` argument, so you must complete at least 1 epoch to resume a training run.
Remember that checkpoints are saved at the end of every epoch by default, or at fixed intervals using the `save_period` argument, so you must complete at least 1 epoch to resume a training run.
## Train Settings

@ -47,7 +47,7 @@ These are the notable functionalities offered by YOLOv8's Val mode:
## Usage Examples
Validate trained YOLOv8n model accuracy on the COCO8 dataset. No argument need to passed as the `model` retains its training `data` and arguments as model attributes. See Arguments section below for a full list of export arguments.
Validate trained YOLOv8n model accuracy on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. See Arguments section below for a full list of export arguments.
!!! example
@ -165,7 +165,7 @@ These benefits ensure that your models are evaluated thoroughly and can be optim
### Can I validate my YOLOv8 model using a custom dataset?
Yes, you can validate your YOLOv8 model using a custom dataset. Specify the `data` argument with the path to your dataset configuration file. This file should include paths to the validation data, class names, and other relevant details.
Yes, you can validate your YOLOv8 model using a [custom dataset](https://docs.ultralytics.com/datasets/). Specify the `data` argument with the path to your dataset configuration file. This file should include paths to the validation data, class names, and other relevant details.
Example in Python:

@ -84,7 +84,7 @@ YOLO classification dataset format can be found in detail in the [Dataset Guide]
## Val
Validate trained YOLOv8n-cls model accuracy on the MNIST160 dataset. No argument need to passed as the `model` retains its training `data` and arguments as model attributes.
Validate trained YOLOv8n-cls model accuracy on the MNIST160 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes.
!!! example

@ -83,7 +83,7 @@ YOLO detection dataset format can be found in detail in the [Dataset Guide](../d
## Val
Validate trained YOLOv8n model accuracy on the COCO8 dataset. No argument need to passed as the `model` retains its training `data` and arguments as model attributes.
Validate trained YOLOv8n model accuracy on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes.
!!! example

@ -104,8 +104,7 @@ OBB dataset format can be found in detail in the [Dataset Guide](../datasets/obb
## Val
Validate trained YOLOv8n-obb model accuracy on the DOTA8 dataset. No argument need to passed as the `model`
retains its training `data` and arguments as model attributes.
Validate trained YOLOv8n-obb model accuracy on the DOTA8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes.
!!! example

@ -117,8 +117,7 @@ YOLO pose dataset format can be found in detail in the [Dataset Guide](../datase
## Val
Validate trained YOLOv8n-pose model accuracy on the COCO128-pose dataset. No argument need to passed as the `model`
retains its training `data` and arguments as model attributes.
Validate trained YOLOv8n-pose model accuracy on the COCO128-pose dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes.
!!! example

@ -84,8 +84,7 @@ YOLO segmentation dataset format can be found in detail in the [Dataset Guide](.
## Val
Validate trained YOLOv8n-seg model accuracy on the COCO128-seg dataset. No argument need to passed as the `model`
retains its training `data` and arguments as model attributes.
Validate trained YOLOv8n-seg model accuracy on the COCO128-seg dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes.
!!! example

@ -109,7 +109,7 @@ Train YOLOv8n on the COCO8 dataset for 100 epochs at image size 640. For a full
## Val
Validate trained YOLOv8n model accuracy on the COCO8 dataset. No argument need to passed as the `model` retains its training `data` and arguments as model attributes.
Validate trained YOLOv8n model accuracy on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes.
!!! example

@ -42,7 +42,7 @@ YOLOv5 models must be trained on labelled data in order to learn classes of obje
Your model will learn by example. Training on images similar to the ones it will see in the wild is of the utmost importance. Ideally, you will collect a wide variety of images from the same configuration (camera, angle, lighting, etc.) as you will ultimately deploy your project.
If this is not possible, you can start from [a public dataset](https://universe.roboflow.com/?ref=ultralytics) to train your initial model and then [sample images from the wild during inference](https://blog.roboflow.com/computer-vision-active-learning-tips/?ref=ultralytics) to improve your dataset and model iteratively.
If this is not possible, you can start from [a public dataset](https://universe.roboflow.com/?ref=ultralytics) to train your initial model and then [sample images from the wild during inference](https://blog.roboflow.com/what-is-active-learning/?ref=ultralytics) to improve your dataset and model iteratively.
### 1.2 Create Labels

@ -32,6 +32,6 @@ We greatly appreciate contributions from the community, including examples, appl
- Avoids adding large files or dependencies unless they are absolutely necessary for the example.
- Contributors should be willing to provide support for their examples and address related issues.
For more detailed information and guidance on contributing, please visit our [contribution documentation](https://docs.ultralytics.com/help/contributing).
For more detailed information and guidance on contributing, please visit our [contribution documentation](https://docs.ultralytics.com/help/contributing/).
If you encounter any questions or concerns regarding these guidelines, feel free to open a PR or an issue in the repository, and we will assist you in the contribution process.

@ -26,11 +26,11 @@ build-backend = "setuptools.build_meta"
[project]
name = "ultralytics"
dynamic = ["version"]
description = "Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification."
description = "Ultralytics YOLO for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification."
readme = "README.md"
requires-python = ">=3.8"
license = { "text" = "AGPL-3.0" }
keywords = ["machine-learning", "deep-learning", "computer-vision", "ML", "DL", "AI", "YOLO", "YOLOv3", "YOLOv5", "YOLOv8", "HUB", "Ultralytics"]
keywords = ["machine-learning", "deep-learning", "computer-vision", "ML", "DL", "AI", "YOLO", "YOLOv3", "YOLOv5", "YOLOv8", "YOLOv9", "YOLOv10", "HUB", "Ultralytics"]
authors = [
{ name = "Glenn Jocher" },
{ name = "Ayush Chaurasia" },

@ -101,7 +101,7 @@ def test_mobilesam():
model.predict(source, points=[900, 370], labels=[1])
# Predict a segment based on a box prompt
model.predict(source, bboxes=[439, 437, 524, 709])
model.predict(source, bboxes=[439, 437, 524, 709], save=True)
# Predict all
# model(source)

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.2.93"
__version__ = "8.2.94"
import os

@ -4,11 +4,11 @@ Welcome to the [Ultralytics](https://www.ultralytics.com/) Models directory! Her
These model configurations cover a wide range of scenarios, from simple object detection to more complex tasks like instance segmentation and object tracking. They are also designed to run efficiently on a variety of hardware platforms, from CPUs to GPUs. Whether you are a seasoned machine learning practitioner or just getting started with YOLO, this directory provides a great starting point for your custom model development needs.
To get started, simply browse through the models in this directory and find one that best suits your needs. Once you've selected a model, you can use the provided `*.yaml` file to train and deploy your custom YOLO model with ease. See full details at the Ultralytics [Docs](https://docs.ultralytics.com/models), and if you need help or have any questions, feel free to reach out to the Ultralytics team for support. So, don't wait, start creating your custom YOLO model now!
To get started, simply browse through the models in this directory and find one that best suits your needs. Once you've selected a model, you can use the provided `*.yaml` file to train and deploy your custom YOLO model with ease. See full details at the Ultralytics [Docs](https://docs.ultralytics.com/models/), and if you need help or have any questions, feel free to reach out to the Ultralytics team for support. So, don't wait, start creating your custom YOLO model now!
### Usage
Model `*.yaml` files may be used directly in the [Command Line Interface (CLI)](https://docs.ultralytics.com/usage/cli) with a `yolo` command:
Model `*.yaml` files may be used directly in the [Command Line Interface (CLI)](https://docs.ultralytics.com/usage/cli/) with a `yolo` command:
```bash
# Train a YOLOv8n model using the coco8 dataset for 100 epochs
@ -35,7 +35,7 @@ model.train(data="coco8.yaml", epochs=100)
## Pre-trained Model Architectures
Ultralytics supports many model architectures. Visit [Ultralytics Models](https://docs.ultralytics.com/models) to view detailed information and usage. Any of these models can be used by loading their configurations or pretrained checkpoints if available.
Ultralytics supports many model architectures. Visit [Ultralytics Models](https://docs.ultralytics.com/models/) to view detailed information and usage. Any of these models can be used by loading their configurations or pretrained checkpoints if available.
## Contribute New Models
@ -43,6 +43,6 @@ Have you trained a new YOLO variant or achieved state-of-the-art performance wit
By contributing to this section, you're helping us offer a wider array of model choices and configurations to the community. It's a fantastic way to share your knowledge and expertise while making the Ultralytics YOLO ecosystem even more versatile.
To get started, please consult our [Contributing Guide](https://docs.ultralytics.com/help/contributing) for step-by-step instructions on how to submit a Pull Request (PR) 🛠. Your contributions are eagerly awaited!
To get started, please consult our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) for step-by-step instructions on how to submit a Pull Request (PR) 🛠. Your contributions are eagerly awaited!
Let's join hands to extend the range and capabilities of the Ultralytics YOLO models 🙏!

@ -28,7 +28,6 @@ from ultralytics.utils import (
DEFAULT_CFG,
LOCAL_RANK,
LOGGER,
MACOS,
RANK,
TQDM,
__version__,
@ -409,13 +408,17 @@ class BaseTrainer:
break
# Log
mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
loss_len = self.tloss.shape[0] if len(self.tloss.shape) else 1
losses = self.tloss if loss_len > 1 else torch.unsqueeze(self.tloss, 0)
if RANK in {-1, 0}:
loss_length = self.tloss.shape[0] if len(self.tloss.shape) else 1
pbar.set_description(
("%11s" * 2 + "%11.4g" * (2 + loss_len))
% (f"{epoch + 1}/{self.epochs}", mem, *losses, batch["cls"].shape[0], batch["img"].shape[-1])
("%11s" * 2 + "%11.4g" * (2 + loss_length))
% (
f"{epoch + 1}/{self.epochs}",
f"{self._get_memory():.3g}G", # (GB) GPU memory util
*(self.tloss if loss_length > 1 else torch.unsqueeze(self.tloss, 0)), # losses
batch["cls"].shape[0], # batch size, i.e. 8
batch["img"].shape[-1], # imgsz, i.e 640
)
)
self.run_callbacks("on_batch_end")
if self.args.plots and ni in self.plot_idx:
@ -453,11 +456,7 @@ class BaseTrainer:
self.scheduler.last_epoch = self.epoch # do not move
self.stop |= epoch >= self.epochs # stop if exceeded epochs
self.run_callbacks("on_fit_epoch_end")
gc.collect()
if MACOS:
torch.mps.empty_cache() # clear unified memory at end of epoch, may help MPS' management of 'unlimited' virtual memoy
else:
torch.cuda.empty_cache() # clear GPU memory at end of epoch, may help reduce CUDA out of memory errors
self._clear_memory()
# Early Stopping
if RANK != -1: # if DDP training
@ -478,14 +477,29 @@ class BaseTrainer:
if self.args.plots:
self.plot_metrics()
self.run_callbacks("on_train_end")
self._clear_memory()
self.run_callbacks("teardown")
def _get_memory(self):
"""Get accelerator memory utilization in GB."""
if self.device.type == "mps":
memory = torch.mps.driver_allocated_memory()
elif self.device.type == "cpu":
memory = 0
else:
memory = torch.cuda.memory_reserved()
return memory / 1e9
def _clear_memory(self):
"""Clear accelerator memory on different platforms."""
gc.collect()
if MACOS:
if self.device.type == "mps":
torch.mps.empty_cache()
elif self.device.type == "cpu":
return
else:
torch.cuda.empty_cache()
self.run_callbacks("teardown")
def read_results_csv(self):
"""Read results.csv into a dict using pandas."""
import pandas as pd # scope for faster 'import ultralytics'

@ -450,16 +450,18 @@ class Predictor(BasePredictor):
results = []
for masks, orig_img, img_path in zip([pred_masks], orig_imgs, self.batch[0]):
if pred_bboxes is not None:
pred_bboxes = ops.scale_boxes(img.shape[2:], pred_bboxes.float(), orig_img.shape, padding=False)
cls = torch.arange(len(pred_masks), dtype=torch.int32, device=pred_masks.device)
pred_bboxes = torch.cat([pred_bboxes, pred_scores[:, None], cls[:, None]], dim=-1)
if len(masks) == 0:
masks = None
else:
masks = ops.scale_masks(masks[None].float(), orig_img.shape[:2], padding=False)[0]
masks = masks > self.model.mask_threshold # to bool
if pred_bboxes is not None:
pred_bboxes = ops.scale_boxes(img.shape[2:], pred_bboxes.float(), orig_img.shape, padding=False)
else:
pred_bboxes = batched_mask_to_box(masks)
# NOTE: SAM models do not return cls info. This `cls` here is just a placeholder for consistency.
cls = torch.arange(len(pred_masks), dtype=torch.int32, device=pred_masks.device)
pred_bboxes = torch.cat([pred_bboxes, pred_scores[:, None], cls[:, None]], dim=-1)
results.append(Results(orig_img, path=img_path, names=names, masks=masks, boxes=pred_bboxes))
# Reset segment-all mode.
self.segment_all = False

@ -309,6 +309,6 @@ Are you proficient in multi-object tracking and have successfully implemented or
By contributing to this section, you help expand the scope of tracking solutions available within the Ultralytics YOLO framework, adding another layer of functionality and utility for the community.
To initiate your contribution, please refer to our [Contributing Guide](https://docs.ultralytics.com/help/contributing) for comprehensive instructions on submitting a Pull Request (PR) 🛠. We are excited to see what you bring to the table!
To initiate your contribution, please refer to our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) for comprehensive instructions on submitting a Pull Request (PR) 🛠. We are excited to see what you bring to the table!
Together, let's enhance the tracking capabilities of the Ultralytics YOLO ecosystem 🙏!

@ -20,7 +20,7 @@ from ultralytics.utils.files import increment_path
class Colors:
"""
Ultralytics default color palette https://ultralytics.com/.
Ultralytics color palette https://docs.ultralytics.com/reference/utils/plotting/#ultralytics.utils.plotting.Colors.
This class provides methods to work with the Ultralytics color palette, including converting hex color codes to
RGB values.
@ -29,6 +29,60 @@ class Colors:
palette (list of tuple): List of RGB color values.
n (int): The number of colors in the palette.
pose_palette (np.ndarray): A specific color palette array with dtype np.uint8.
## Ultralytics Color Palette
| Index | Color | HEX | RGB |
|-------|-------------------------------------------------------------------|-----------|-------------------|
| 0 | <i class="fa-solid fa-square fa-2xl" style="color: #042aff;"></i> | `#042aff` | (4, 42, 255) |
| 1 | <i class="fa-solid fa-square fa-2xl" style="color: #0bdbeb;"></i> | `#0bdbeb` | (11, 219, 235) |
| 2 | <i class="fa-solid fa-square fa-2xl" style="color: #f3f3f3;"></i> | `#f3f3f3` | (243, 243, 243) |
| 3 | <i class="fa-solid fa-square fa-2xl" style="color: #00dfb7;"></i> | `#00dfb7` | (0, 223, 183) |
| 4 | <i class="fa-solid fa-square fa-2xl" style="color: #111f68;"></i> | `#111f68` | (17, 31, 104) |
| 5 | <i class="fa-solid fa-square fa-2xl" style="color: #ff6fdd;"></i> | `#ff6fdd` | (255, 111, 221) |
| 6 | <i class="fa-solid fa-square fa-2xl" style="color: #ff444f;"></i> | `#ff444f` | (255, 68, 79) |
| 7 | <i class="fa-solid fa-square fa-2xl" style="color: #cced00;"></i> | `#cced00` | (204, 237, 0) |
| 8 | <i class="fa-solid fa-square fa-2xl" style="color: #00f344;"></i> | `#00f344` | (0, 243, 68) |
| 9 | <i class="fa-solid fa-square fa-2xl" style="color: #bd00ff;"></i> | `#bd00ff` | (189, 0, 255) |
| 10 | <i class="fa-solid fa-square fa-2xl" style="color: #00b4ff;"></i> | `#00b4ff` | (0, 180, 255) |
| 11 | <i class="fa-solid fa-square fa-2xl" style="color: #dd00ba;"></i> | `#dd00ba` | (221, 0, 186) |
| 12 | <i class="fa-solid fa-square fa-2xl" style="color: #00ffff;"></i> | `#00ffff` | (0, 255, 255) |
| 13 | <i class="fa-solid fa-square fa-2xl" style="color: #26c000;"></i> | `#26c000` | (38, 192, 0) |
| 14 | <i class="fa-solid fa-square fa-2xl" style="color: #01ffb3;"></i> | `#01ffb3` | (1, 255, 179) |
| 15 | <i class="fa-solid fa-square fa-2xl" style="color: #7d24ff;"></i> | `#7d24ff` | (125, 36, 255) |
| 16 | <i class="fa-solid fa-square fa-2xl" style="color: #7b0068;"></i> | `#7b0068` | (123, 0, 104) |
| 17 | <i class="fa-solid fa-square fa-2xl" style="color: #ff1b6c;"></i> | `#ff1b6c` | (255, 27, 108) |
| 18 | <i class="fa-solid fa-square fa-2xl" style="color: #fc6d2f;"></i> | `#fc6d2f` | (252, 109, 47) |
| 19 | <i class="fa-solid fa-square fa-2xl" style="color: #a2ff0b;"></i> | `#a2ff0b` | (162, 255, 11) |
## Pose Color Palette
| Index | Color | HEX | RGB |
|-------|-------------------------------------------------------------------|-----------|-------------------|
| 0 | <i class="fa-solid fa-square fa-2xl" style="color: #ff8000;"></i> | `#ff8000` | (255, 128, 0) |
| 1 | <i class="fa-solid fa-square fa-2xl" style="color: #ff9933;"></i> | `#ff9933` | (255, 153, 51) |
| 2 | <i class="fa-solid fa-square fa-2xl" style="color: #ffb266;"></i> | `#ffb266` | (255, 178, 102) |
| 3 | <i class="fa-solid fa-square fa-2xl" style="color: #e6e600;"></i> | `#e6e600` | (230, 230, 0) |
| 4 | <i class="fa-solid fa-square fa-2xl" style="color: #ff99ff;"></i> | `#ff99ff` | (255, 153, 255) |
| 5 | <i class="fa-solid fa-square fa-2xl" style="color: #99ccff;"></i> | `#99ccff` | (153, 204, 255) |
| 6 | <i class="fa-solid fa-square fa-2xl" style="color: #ff66ff;"></i> | `#ff66ff` | (255, 102, 255) |
| 7 | <i class="fa-solid fa-square fa-2xl" style="color: #ff33ff;"></i> | `#ff33ff` | (255, 51, 255) |
| 8 | <i class="fa-solid fa-square fa-2xl" style="color: #66b2ff;"></i> | `#66b2ff` | (102, 178, 255) |
| 9 | <i class="fa-solid fa-square fa-2xl" style="color: #3399ff;"></i> | `#3399ff` | (51, 153, 255) |
| 10 | <i class="fa-solid fa-square fa-2xl" style="color: #ff9999;"></i> | `#ff9999` | (255, 153, 153) |
| 11 | <i class="fa-solid fa-square fa-2xl" style="color: #ff6666;"></i> | `#ff6666` | (255, 102, 102) |
| 12 | <i class="fa-solid fa-square fa-2xl" style="color: #ff3333;"></i> | `#ff3333` | (255, 51, 51) |
| 13 | <i class="fa-solid fa-square fa-2xl" style="color: #99ff99;"></i> | `#99ff99` | (153, 255, 153) |
| 14 | <i class="fa-solid fa-square fa-2xl" style="color: #66ff66;"></i> | `#66ff66` | (102, 255, 102) |
| 15 | <i class="fa-solid fa-square fa-2xl" style="color: #33ff33;"></i> | `#33ff33` | (51, 255, 51) |
| 16 | <i class="fa-solid fa-square fa-2xl" style="color: #00ff00;"></i> | `#00ff00` | (0, 255, 0) |
| 17 | <i class="fa-solid fa-square fa-2xl" style="color: #0000ff;"></i> | `#0000ff` | (0, 0, 255) |
| 18 | <i class="fa-solid fa-square fa-2xl" style="color: #ff0000;"></i> | `#ff0000` | (255, 0, 0) |
| 19 | <i class="fa-solid fa-square fa-2xl" style="color: #ffffff;"></i> | `#ffffff` | (255, 255, 255) |
!!! note "Ultralytics Brand Colors"
For Ultralytics brand colors see [https://www.ultralytics.com/brand](https://www.ultralytics.com/brand). Please use the official Ultralytics colors for all marketing materials.
"""
def __init__(self):

Loading…
Cancel
Save