`ultralytics 8.3.0` YOLO11 Models Release (#16539)

Signed-off-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: Laughing-q <1185102784@qq.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
pull/16545/head v8.3.0
Glenn Jocher 2 months ago committed by GitHub
parent efb0c17881
commit 6e43d1e1e5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 29
      .github/workflows/ci.yaml
  2. 1
      .github/workflows/merge-main-into-prs.yml
  3. 8
      CITATION.cff
  4. 114
      README.md
  5. 200
      README.zh-CN.md
  6. 8
      docker/Dockerfile
  7. 2
      docker/Dockerfile-arm64
  8. 2
      docker/Dockerfile-conda
  9. 8
      docker/Dockerfile-cpu
  10. 4
      docker/Dockerfile-jetson-jetpack4
  11. 4
      docker/Dockerfile-jetson-jetpack5
  12. 4
      docker/Dockerfile-jetson-jetpack6
  13. 8
      docker/Dockerfile-python
  14. 2
      docker/Dockerfile-runner
  15. 67
      docs/en/datasets/pose/hand-keypoints.md
  16. 3
      docs/en/index.md
  17. 39
      docs/en/macros/augmentation-args.md
  18. 17
      docs/en/models/index.md
  19. 228
      docs/en/models/yolo11.md
  20. 2
      docs/en/models/yolov8.md
  21. 24
      docs/en/reference/nn/modules/block.md
  22. 3
      docs/mkdocs_github_authors.yaml
  23. 6
      examples/YOLOv8-ONNXRuntime-CPP/inference.cpp
  24. 2
      examples/tutorial.ipynb
  25. 1
      mkdocs.yml
  26. 4
      tests/__init__.py
  27. 2
      tests/conftest.py
  28. 2
      tests/test_cuda.py
  29. 10
      tests/test_engine.py
  30. 6
      tests/test_explorer.py
  31. 18
      tests/test_integrations.py
  32. 22
      tests/test_python.py
  33. 6
      tests/test_solutions.py
  34. 3
      ultralytics/__init__.py
  35. 1
      ultralytics/cfg/default.yaml
  36. 30
      ultralytics/cfg/models/11/yolo11-cls.yaml
  37. 47
      ultralytics/cfg/models/11/yolo11-obb.yaml
  38. 48
      ultralytics/cfg/models/11/yolo11-pose.yaml
  39. 47
      ultralytics/cfg/models/11/yolo11-seg.yaml
  40. 47
      ultralytics/cfg/models/11/yolo11.yaml
  41. 181
      ultralytics/data/augment.py
  42. 9
      ultralytics/engine/trainer.py
  43. 8
      ultralytics/nn/modules/__init__.py
  44. 235
      ultralytics/nn/modules/block.py
  45. 3
      ultralytics/nn/modules/conv.py
  46. 11
      ultralytics/nn/modules/head.py
  47. 27
      ultralytics/nn/tasks.py
  48. 5
      ultralytics/utils/downloads.py
  49. 3
      ultralytics/utils/loss.py
  50. 2
      ultralytics/utils/torch_utils.py

@ -100,7 +100,7 @@ jobs:
matrix: matrix:
os: [ubuntu-latest, windows-latest, macos-14] os: [ubuntu-latest, windows-latest, macos-14]
python-version: ["3.11"] python-version: ["3.11"]
model: [yolov8n] model: [yolo11n]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions/setup-python@v5 - uses: actions/setup-python@v5
@ -116,24 +116,27 @@ jobs:
run: | run: |
yolo checks yolo checks
pip list pip list
- name: Benchmark DetectionModel
shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}.pt' imgsz=160 verbose=0.309
- name: Benchmark ClassificationModel - name: Benchmark ClassificationModel
shell: bash shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-cls.pt' imgsz=160 verbose=0.166 run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-cls.pt' imgsz=160 verbose=0.249
- name: Benchmark YOLOWorld DetectionModel - name: Benchmark YOLOWorld DetectionModel
shell: bash shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/yolov8s-worldv2.pt' imgsz=160 verbose=0.318 run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/yolov8s-worldv2.pt' imgsz=160 verbose=0.337
- name: Benchmark SegmentationModel - name: Benchmark SegmentationModel
shell: bash shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-seg.pt' imgsz=160 verbose=0.279 run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-seg.pt' imgsz=160 verbose=0.195
- name: Benchmark PoseModel - name: Benchmark PoseModel
shell: bash shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-pose.pt' imgsz=160 verbose=0.183 run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-pose.pt' imgsz=160 verbose=0.197
- name: Benchmark OBBModel - name: Benchmark OBBModel
shell: bash shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-obb.pt' imgsz=160 verbose=0.472 run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-obb.pt' imgsz=160 verbose=0.597
- name: Benchmark YOLOv10Model - name: Benchmark YOLOv10Model
shell: bash shell: bash
run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/yolov10n.pt' imgsz=160 verbose=0.178 run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/yolov10n.pt' imgsz=160 verbose=0.205
- name: Merge Coverage Reports - name: Merge Coverage Reports
run: | run: |
coverage xml -o coverage-benchmarks.xml coverage xml -o coverage-benchmarks.xml
@ -251,17 +254,17 @@ jobs:
- name: Pytest tests - name: Pytest tests
run: pytest --slow tests/ run: pytest --slow tests/
- name: Benchmark ClassificationModel - name: Benchmark ClassificationModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolov8n-cls.pt' imgsz=160 verbose=0.166 run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-cls.pt' imgsz=160 verbose=0.249
- name: Benchmark YOLOWorld DetectionModel - name: Benchmark YOLOWorld DetectionModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolov8s-worldv2.pt' imgsz=160 verbose=0.318 run: python -m ultralytics.cfg.__init__ benchmark model='yolo11s-worldv2.pt' imgsz=160 verbose=0.337
- name: Benchmark SegmentationModel - name: Benchmark SegmentationModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolov8n-seg.pt' imgsz=160 verbose=0.267 run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-seg.pt' imgsz=160 verbose=0.195
- name: Benchmark PoseModel - name: Benchmark PoseModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolov8n-pose.pt' imgsz=160 verbose=0.179 run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-pose.pt' imgsz=160 verbose=0.197
- name: Benchmark OBBModel - name: Benchmark OBBModel
run: python -m ultralytics.cfg.__init__ benchmark model='yolov8n-obb.pt' imgsz=160 verbose=0.472 run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-obb.pt' imgsz=160 verbose=0.597
- name: Benchmark YOLOv10Model - name: Benchmark YOLOv10Model
run: python -m ultralytics.cfg.__init__ benchmark model='yolov10n.pt' imgsz=160 verbose=0.178 run: python -m ultralytics.cfg.__init__ benchmark model='yolov10n.pt' imgsz=160 verbose=0.205
- name: Benchmark Summary - name: Benchmark Summary
run: | run: |
cat benchmarks.log cat benchmarks.log

@ -85,4 +85,3 @@ jobs:
print(f"Branches updated: {updated_branches}") print(f"Branches updated: {updated_branches}")
print(f"Branches already up-to-date: {up_to_date_branches}") print(f"Branches already up-to-date: {up_to_date_branches}")
print(f"Total errors: {errors}") print(f"Total errors: {errors}")

@ -11,14 +11,14 @@ authors:
family-names: Jocher family-names: Jocher
affiliation: Ultralytics affiliation: Ultralytics
orcid: 'https://orcid.org/0000-0001-5950-6979' orcid: 'https://orcid.org/0000-0001-5950-6979'
- given-names: Ayush
family-names: Chaurasia
affiliation: Ultralytics
orcid: 'https://orcid.org/0000-0002-7603-6750'
- family-names: Qiu - family-names: Qiu
given-names: Jing given-names: Jing
affiliation: Ultralytics affiliation: Ultralytics
orcid: 'https://orcid.org/0000-0003-3783-7069' orcid: 'https://orcid.org/0000-0003-3783-7069'
- given-names: Ayush
family-names: Chaurasia
affiliation: Ultralytics
orcid: 'https://orcid.org/0000-0002-7603-6750'
repository-code: 'https://github.com/ultralytics/ultralytics' repository-code: 'https://github.com/ultralytics/ultralytics'
url: 'https://ultralytics.com' url: 'https://ultralytics.com'
license: AGPL-3.0 license: AGPL-3.0

@ -8,7 +8,7 @@
<div> <div>
<a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg" alt="Ultralytics CI"></a> <a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg" alt="Ultralytics CI"></a>
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="Ultralytics YOLOv8 Citation"></a> <a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="Ultralytics YOLO Citation"></a>
<a href="https://hub.docker.com/r/ultralytics/ultralytics"><img src="https://img.shields.io/docker/pulls/ultralytics/ultralytics?logo=docker" alt="Ultralytics Docker Pulls"></a> <a href="https://hub.docker.com/r/ultralytics/ultralytics"><img src="https://img.shields.io/docker/pulls/ultralytics/ultralytics?logo=docker" alt="Ultralytics Docker Pulls"></a>
<a href="https://ultralytics.com/discord"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a> <a href="https://ultralytics.com/discord"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
<a href="https://community.ultralytics.com"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a> <a href="https://community.ultralytics.com"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
@ -20,13 +20,13 @@
</div> </div>
<br> <br>
[Ultralytics](https://www.ultralytics.com/) [YOLOv8](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks. [Ultralytics](https://www.ultralytics.com/) [YOLO11](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLO11 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks.
We hope that the resources here will help you get the most out of YOLOv8. Please browse the YOLOv8 <a href="https://docs.ultralytics.com/">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/ultralytics/issues/new/choose">GitHub</a> for support, questions, or discussions, become a member of the Ultralytics <a href="https://ultralytics.com/discord">Discord</a>, <a href="https://reddit.com/r/ultralytics">Reddit</a> and <a href="https://community.ultralytics.com">Forums</a>! We hope that the resources here will help you get the most out of YOLO. Please browse the Ultralytics <a href="https://docs.ultralytics.com/">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/ultralytics/issues/new/choose">GitHub</a> for support, questions, or discussions, become a member of the Ultralytics <a href="https://ultralytics.com/discord">Discord</a>, <a href="https://reddit.com/r/ultralytics">Reddit</a> and <a href="https://community.ultralytics.com">Forums</a>!
To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license). To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/yolo-comparison-plots.png" alt="YOLOv8 performance plots"></a> <img width="100%" src="https://github.com/user-attachments/assets/a311a4ed-bbf2-43b5-8012-5f183a28a845" alt="YOLO11 performance plots"></a>
<div align="center"> <div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a> <a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
@ -47,7 +47,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens
## <div align="center">Documentation</div> ## <div align="center">Documentation</div>
See below for a quickstart installation and usage example, and see the [YOLOv8 Docs](https://docs.ultralytics.com/) for full documentation on training, validation, prediction and deployment. See below for a quickstart install and usage examples, and see our [Docs](https://docs.ultralytics.com/) for full documentation on training, validation, prediction and deployment.
<details open> <details open>
<summary>Install</summary> <summary>Install</summary>
@ -71,23 +71,23 @@ For alternative installation methods including [Conda](https://anaconda.org/cond
### CLI ### CLI
YOLOv8 may be used directly in the Command Line Interface (CLI) with a `yolo` command: YOLO may be used directly in the Command Line Interface (CLI) with a `yolo` command:
```bash ```bash
yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg'
``` ```
`yolo` can be used for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See the YOLOv8 [CLI Docs](https://docs.ultralytics.com/usage/cli/) for examples. `yolo` can be used for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See the YOLO [CLI Docs](https://docs.ultralytics.com/usage/cli/) for examples.
### Python ### Python
YOLOv8 may also be used directly in a Python environment, and accepts the same [arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above: YOLO may also be used directly in a Python environment, and accepts the same [arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above:
```python ```python
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.pt") model = YOLO("yolo11n.pt")
# Train the model # Train the model
train_results = model.train( train_results = model.train(
@ -108,26 +108,13 @@ results[0].show()
path = model.export(format="onnx") # return path to exported model path = model.export(format="onnx") # return path to exported model
``` ```
See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python/) for more examples. See YOLO [Python Docs](https://docs.ultralytics.com/usage/python/) for more examples.
</details> </details>
### Notebooks
Ultralytics provides interactive notebooks for YOLOv8, covering training, validation, tracking, and more. Each notebook is paired with a [YouTube](https://www.youtube.com/ultralytics?sub_confirmation=1) tutorial, making it easy to learn and implement advanced YOLOv8 features.
| Docs | Notebook | YouTube |
| ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| <a href="https://docs.ultralytics.com/modes/">YOLOv8 Train, Val, Predict and Export Modes</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | <a href="https://youtu.be/j8uQc0qB91s"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube Video"></center></a> |
| <a href="https://docs.ultralytics.com/hub/quickstart/">Ultralytics HUB QuickStart</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/hub.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | <a href="https://youtu.be/lveF9iCMIzc"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube Video"></center></a> |
| <a href="https://docs.ultralytics.com/modes/track/">YOLOv8 Multi-Object Tracking in Videos</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/object_tracking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | <a href="https://youtu.be/hHyHmOtmEgs"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube Video"></center></a> |
| <a href="https://docs.ultralytics.com/guides/object-counting/">YOLOv8 Object Counting in Videos</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/object_counting.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | <a href="https://youtu.be/Ag2e-5_NpS0"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube Video"></center></a> |
| <a href="https://docs.ultralytics.com/guides/heatmaps/">YOLOv8 Heatmaps in Videos</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/heatmaps.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | <a href="https://youtu.be/4ezde5-nZZw"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube Video"></center></a> |
| <a href="https://docs.ultralytics.com/datasets/explorer/">Ultralytics Datasets Explorer with SQL and OpenAI Integration 🚀 New</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/docs/en/datasets/explorer/explorer.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> | <a href="https://youtu.be/3VryynorQeo"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube Video"></center></a> |
## <div align="center">Models</div> ## <div align="center">Models</div>
YOLOv8 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLOv8 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models. YOLO11 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLO11 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models.
<img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png" alt="Ultralytics YOLO supported tasks"> <img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png" alt="Ultralytics YOLO supported tasks">
@ -137,13 +124,13 @@ All [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cf
See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examples with these models trained on [COCO](https://docs.ultralytics.com/datasets/detect/coco/), which include 80 pre-trained classes. See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examples with these models trained on [COCO](https://docs.ultralytics.com/datasets/detect/coco/), which include 80 pre-trained classes.
| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) | | Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>Tesla T4 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | | ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | --------------------------------------- | ------------------ | ----------------- |
| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | | [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) | 640 | 39.5 | 56.12 ± 0.82 ms | 1.55 ± 0.01 ms | 2.6 | 6.5 |
| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | | [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) | 640 | 47.0 | 90.01 ± 1.17 ms | 2.46 ± 0.00 ms | 9.4 | 21.5 |
| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | | [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) | 640 | 51.5 | 183.20 ± 2.04 ms | 4.70 ± 0.06 ms | 20.1 | 68.0 |
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | | [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) | 640 | 53.4 | 238.64 ± 1.39 ms | 6.16 ± 0.08 ms | 25.3 | 86.9 |
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | | [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) | 640 | 54.7 | 462.78 ± 6.66 ms | 11.31 ± 0.24 ms | 56.9 | 194.9 |
- **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val detect data=coco.yaml device=0` - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val detect data=coco.yaml device=0`
- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val detect data=coco.yaml batch=1 device=0|cpu` - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val detect data=coco.yaml batch=1 device=0|cpu`
@ -154,13 +141,13 @@ See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examp
See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage examples with these models trained on [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/), which include 80 pre-trained classes. See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage examples with these models trained on [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/), which include 80 pre-trained classes.
| Model | size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) | | Model | size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>Tesla T4 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | | -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | --------------------------------------- | ------------------ | ----------------- |
| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | | [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.90 ± 1.14 ms | 1.84 ± 0.00 ms | 2.9 | 10.4 |
| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | | [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.56 ± 4.89 ms | 2.94 ± 0.01 ms | 10.1 | 35.5 |
| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | | [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.63 ± 1.16 ms | 6.31 ± 0.09 ms | 22.4 | 123.3 |
| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | | [YOLO11l-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt) | 640 | 53.4 | 42.9 | 344.16 ± 3.17 ms | 7.78 ± 0.16 ms | 27.6 | 142.2 |
| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | | [YOLO11x-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt) | 640 | 54.7 | 43.8 | 664.50 ± 3.24 ms | 15.75 ± 0.67 ms | 62.1 | 319.0 |
- **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val segment data=coco-seg.yaml device=0` - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val segment data=coco-seg.yaml device=0`
- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu` - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu`
@ -171,14 +158,13 @@ See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage e
See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples with these models trained on [COCO-Pose](https://docs.ultralytics.com/datasets/pose/coco/), which include 1 pre-trained class, person. See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples with these models trained on [COCO-Pose](https://docs.ultralytics.com/datasets/pose/coco/), which include 1 pre-trained class, person.
| Model | size<br><sup>(pixels) | mAP<sup>pose<br>50-95 | mAP<sup>pose<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) | | Model | size<br><sup>(pixels) | mAP<sup>pose<br>50-95 | mAP<sup>pose<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>Tesla T4 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| ---------------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | | ---------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | --------------------------------------- | ------------------ | ----------------- |
| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-pose.pt) | 640 | 50.4 | 80.1 | 131.8 | 1.18 | 3.3 | 9.2 | | [YOLO11n-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt) | 640 | 50.0 | 81.0 | 52.40 ± 0.51 ms | 1.72 ± 0.01 ms | 2.9 | 7.6 |
| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-pose.pt) | 640 | 60.0 | 86.2 | 233.2 | 1.42 | 11.6 | 30.2 | | [YOLO11s-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-pose.pt) | 640 | 58.9 | 86.3 | 90.54 ± 0.59 ms | 2.57 ± 0.00 ms | 9.9 | 23.2 |
| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-pose.pt) | 640 | 65.0 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | | [YOLO11m-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-pose.pt) | 640 | 64.9 | 89.4 | 187.28 ± 0.77 ms | 4.94 ± 0.05 ms | 20.9 | 71.7 |
| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-pose.pt) | 640 | 67.6 | 90.0 | 784.5 | 2.59 | 44.4 | 168.6 | | [YOLO11l-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-pose.pt) | 640 | 66.1 | 89.9 | 247.69 ± 1.10 ms | 6.42 ± 0.13 ms | 26.2 | 90.7 |
| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 | | [YOLO11x-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-pose.pt) | 640 | 69.5 | 91.1 | 487.97 ± 13.91 ms | 12.06 ± 0.20 ms | 58.8 | 203.3 |
| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 |
- **mAP<sup>val</sup>** values are for single-model single-scale on [COCO Keypoints val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val pose data=coco-pose.yaml device=0` - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO Keypoints val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val pose data=coco-pose.yaml device=0`
- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu` - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
@ -189,13 +175,13 @@ See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples wit
See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples with these models trained on [DOTAv1](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10/), which include 15 pre-trained classes. See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples with these models trained on [DOTAv1](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10/), which include 15 pre-trained classes.
| Model | size<br><sup>(pixels) | mAP<sup>test<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) | | Model | size<br><sup>(pixels) | mAP<sup>test<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>Tesla T4 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | | -------------------------------------------------------------------------------------------- | --------------------- | ------------------ | ------------------------------ | --------------------------------------- | ------------------ | ----------------- |
| [YOLOv8n-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-obb.pt) | 1024 | 78.0 | 204.77 | 3.57 | 3.1 | 23.3 | | [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.56 ± 0.80 ms | 4.43 ± 0.01 ms | 2.7 | 17.2 |
| [YOLOv8s-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-obb.pt) | 1024 | 79.5 | 424.88 | 4.07 | 11.4 | 76.3 | | [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.41 ± 4.00 ms | 5.13 ± 0.02 ms | 9.7 | 57.5 |
| [YOLOv8m-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-obb.pt) | 1024 | 80.5 | 763.48 | 7.61 | 26.4 | 208.6 | | [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.81 ± 2.87 ms | 10.07 ± 0.38 ms | 20.9 | 183.5 |
| [YOLOv8l-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-obb.pt) | 1024 | 80.7 | 1278.42 | 11.83 | 44.5 | 433.8 | | [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.49 ± 4.98 ms | 13.46 ± 0.55 ms | 26.2 | 232.0 |
| [YOLOv8x-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-obb.pt) | 1024 | 81.36 | 1759.10 | 13.23 | 69.5 | 676.7 | | [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.63 ± 7.67 ms | 28.59 ± 0.96 ms | 58.8 | 520.2 |
- **mAP<sup>test</sup>** values are for single-model multiscale on [DOTAv1](https://captain-whu.github.io/DOTA/index.html) dataset. <br>Reproduce by `yolo val obb data=DOTAv1.yaml device=0 split=test` and submit merged results to [DOTA evaluation](https://captain-whu.github.io/DOTA/evaluation.html). - **mAP<sup>test</sup>** values are for single-model multiscale on [DOTAv1](https://captain-whu.github.io/DOTA/index.html) dataset. <br>Reproduce by `yolo val obb data=DOTAv1.yaml device=0 split=test` and submit merged results to [DOTA evaluation](https://captain-whu.github.io/DOTA/evaluation.html).
- **Speed** averaged over DOTAv1 val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu` - **Speed** averaged over DOTAv1 val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu`
@ -206,13 +192,13 @@ See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples with
See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usage examples with these models trained on [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/), which include 1000 pretrained classes. See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usage examples with these models trained on [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/), which include 1000 pretrained classes.
| Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) at 640 | | Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>Tesla T4 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) at 640 |
| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | | -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | --------------------------------------- | ------------------ | ------------------------ |
| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-cls.pt) | 224 | 69.0 | 88.3 | 12.9 | 0.31 | 2.7 | 4.3 | | [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.03 ± 0.32 ms | 1.10 ± 0.01 ms | 1.6 | 3.3 |
| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-cls.pt) | 224 | 73.8 | 91.7 | 23.4 | 0.35 | 6.4 | 13.5 | | [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.89 ± 0.18 ms | 1.34 ± 0.01 ms | 5.5 | 12.1 |
| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-cls.pt) | 224 | 76.8 | 93.5 | 85.4 | 0.62 | 17.0 | 42.7 | | [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.17 ± 0.40 ms | 1.95 ± 0.00 ms | 10.4 | 39.3 |
| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-cls.pt) | 224 | 78.3 | 94.2 | 163.0 | 0.87 | 37.5 | 99.7 | | [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.17 ± 0.29 ms | 2.76 ± 0.00 ms | 12.9 | 49.4 |
| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-cls.pt) | 224 | 79.0 | 94.6 | 232.0 | 1.01 | 57.4 | 154.8 | | [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.41 ± 0.94 ms | 3.82 ± 0.00 ms | 28.4 | 110.4 |
- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set. <br>Reproduce by `yolo val classify data=path/to/ImageNet device=0` - **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set. <br>Reproduce by `yolo val classify data=path/to/ImageNet device=0`
- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` - **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
@ -245,18 +231,18 @@ Our key integrations with leading AI platforms extend the functionality of Ultra
| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | | Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW |
| :--------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | | :--------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
| Label and export your custom datasets directly to YOLOv8 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv8 using [ClearML](https://clear.ml/) (open-source!) | Free forever, [Comet](https://bit.ly/yolov8-readme-comet) lets you save YOLOv8 models, resume training, and interactively visualize and debug predictions | Run YOLOv8 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | | Label and export your custom datasets directly to YOLO11 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLO11 using [ClearML](https://clear.ml/) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLO11 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |
## <div align="center">Ultralytics HUB</div> ## <div align="center">Ultralytics HUB</div>
Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now! Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLO11 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now!
<a href="https://ultralytics.com/hub" target="_blank"> <a href="https://ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png" alt="Ultralytics HUB preview image"></a> <img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png" alt="Ultralytics HUB preview image"></a>
## <div align="center">Contribute</div> ## <div align="center">Contribute</div>
We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors! We love your input! Ultralytics YOLO would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors!
<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 --> <!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->

@ -8,25 +8,25 @@
<div> <div>
<a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg" alt="Ultralytics CI"></a> <a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg" alt="Ultralytics CI"></a>
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="YOLOv8 Citation"></a> <a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="Ultralytics YOLO Citation"></a>
<a href="https://hub.docker.com/r/ultralytics/ultralytics"><img src="https://img.shields.io/docker/pulls/ultralytics/ultralytics?logo=docker" alt="Docker Pulls"></a> <a href="https://hub.docker.com/r/ultralytics/ultralytics"><img src="https://img.shields.io/docker/pulls/ultralytics/ultralytics?logo=docker" alt="Ultralytics Docker Pulls"></a>
<a href="https://ultralytics.com/discord"><img alt="Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a> <a href="https://ultralytics.com/discord"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
<a href="https://community.ultralytics.com"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a> <a href="https://community.ultralytics.com"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
<a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a> <a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
<br> <br>
<a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a> <a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run Ultralytics on Gradient"></a>
<a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Ultralytics In Colab"></a>
<a href="https://www.kaggle.com/ultralytics/yolov8"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a> <a href="https://www.kaggle.com/ultralytics/yolov8"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open Ultralytics In Kaggle"></a>
</div> </div>
<br> <br>
[Ultralytics](https://www.ultralytics.com/) [YOLOv8](https://github.com/ultralytics/ultralytics) 是一款前沿、最先进(SOTA)的模型,基于先前 YOLO 版本的成功,引入了新功能和改进,进一步提升性能和灵活性。YOLOv8 设计快速、准确且易于使用,使其成为各种物体检测与跟踪、实例分割、图像分类和姿态估计任务的绝佳选择。 [Ultralytics](https://www.ultralytics.com/) [YOLO11](https://github.com/ultralytics/ultralytics) 是一个尖端的、最先进(SOTA)的模型,基于之前 YOLO 版本的成功,并引入了新功能和改进以进一步提升性能和灵活性。YOLO11 被设计得快速、准确且易于使用,是进行广泛对象检测和跟踪、实例分割、图像分类和姿态估计任务的理想选择。
我们希望这里的资源能帮助您充分利用 YOLOv8。请浏览 YOLOv8 的<a href="https://docs.ultralytics.com/">文档</a>了解详情,如需支持、提问或讨论,请<a href="https://github.com/ultralytics/ultralytics/issues/new/choose">GitHub</a> 上提出问题,成为 Ultralytics <a href="https://ultralytics.com/discord">Discord</a><a href="https://reddit.com/r/ultralytics">Reddit</a><a href="https://community.ultralytics.com">论坛</a>员! 我们希望这里的资源能帮助你充分利用 YOLO。请浏览 Ultralytics <a href="https://docs.ultralytics.com/">文档</a> 以获取详细信息,<a href="https://github.com/ultralytics/ultralytics/issues/new/choose">GitHub</a> 上提出问题或讨论,成为 Ultralytics <a href="https://ultralytics.com/discord">Discord</a><a href="https://reddit.com/r/ultralytics">Reddit</a><a href="https://community.ultralytics.com">论坛</a>员!
如需申请企业许可,请在 [Ultralytics Licensing](https://www.ultralytics.com/license) 处填写表格 想申请企业许可证,请完成 [Ultralytics Licensing](https://www.ultralytics.com/license) 上的表单。
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/yolo-comparison-plots.png" alt="YOLOv8 performance plots"></a> <img width="100%" src="https://github.com/user-attachments/assets/a311a4ed-bbf2-43b5-8012-5f183a28a845" alt="YOLO11 performance plots"></a>
<div align="center"> <div align="center">
<a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a> <a href="https://github.com/ultralytics"><img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="Ultralytics GitHub"></a>
@ -45,16 +45,14 @@
</div> </div>
</div> </div>
以下是提供的内容的中文翻译:
## <div align="center">文档</div> ## <div align="center">文档</div>
请参阅下面的快速安装和使用示例,以及 [YOLOv8 文档](https://docs.ultralytics.com/) 上有关训练、验证、预测和部署的完整文档。 请参阅下方的快速开始安装和使用示例,并查看我们的 [文档](https://docs.ultralytics.com/) 以获取有关训练、验证、预测和部署的完整文档。
<details open> <details open>
<summary>安装</summary> <summary>安装</summary>
使用Pip在一个[**Python>=3.8**](https://www.python.org/)环境中安装`ultralytics`包,此环境还需包含[**PyTorch>=1.8**](https://pytorch.org/get-started/locally/)。这也会安装所有必要的[依赖项](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml)。 在 [**Python>=3.8**](https://www.python.org/) 环境中使用 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) 通过 pip 安装包含所有[依赖项](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) 的 ultralytics 包
[![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/) [![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)
@ -62,168 +60,154 @@
pip install ultralytics pip install ultralytics
``` ```
如需使用包括[Conda](https://anaconda.org/conda-forge/ultralytics),[Docker](https://hub.docker.com/r/ultralytics/ultralytics)和Git在内的其他安装方法,请参考[快速入门指南](https://docs.ultralytics.com/quickstart/)。 有关其他安装方法,包括 [Conda](https://anaconda.org/conda-forge/ultralytics)、[Docker](https://hub.docker.com/r/ultralytics/ultralytics) 和 Git,请参阅 [快速开始指南](https://docs.ultralytics.com/quickstart/)。
[![Conda Version](https://img.shields.io/conda/vn/conda-forge/ultralytics?logo=condaforge)](https://anaconda.org/conda-forge/ultralytics) [![Docker Image Version](https://img.shields.io/docker/v/ultralytics/ultralytics?sort=semver&logo=docker)](https://hub.docker.com/r/ultralytics/ultralytics) [![Conda Version](https://img.shields.io/conda/vn/conda-forge/ultralytics?logo=condaforge)](https://anaconda.org/conda-forge/ultralytics) [![Docker Image Version](https://img.shields.io/docker/v/ultralytics/ultralytics?sort=semver&logo=docker)](https://hub.docker.com/r/ultralytics/ultralytics)
</details> </details>
<details open> <details open>
<summary>Usage</summary> <summary>使用</summary>
### CLI ### CLI
YOLOv8 可以在命令行界面(CLI)中直接使用,只需输入 `yolo` 命令: YOLO 可以直接在命令行接口(CLI)中使用 `yolo` 命令:
```bash ```bash
yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg'
``` ```
`yolo`用于各种任务和模式,并接受其他参数,例如 `imgsz=640`。查看 YOLOv8 [CLI 文档](https://docs.ultralytics.com/usage/cli/)以获取示例。 `yolo`以用于各种任务和模式,并接受额外参数,例如 `imgsz=640`。请参阅 YOLO [CLI 文档](https://docs.ultralytics.com/usage/cli/) 以获取示例。
### Python ### Python
YOLOv8 也可以在 Python 环境中直接使用,并接受与上述 CLI 示例中相同的[参数](https://docs.ultralytics.com/usage/cfg/): YOLO 也可以直接在 Python 环境中使用,并接受与上述 CLI 示例中相同的[参数](https://docs.ultralytics.com/usage/cfg/):
```python ```python
from ultralytics import YOLO from ultralytics import YOLO
# 加载模型 # 加载模型
model = YOLO("yolov8n.pt") model = YOLO("yolo11n.pt")
# 训练模型 # 训练模型
train_results = model.train( train_results = model.train(
data="coco8.yaml", # 数据配置文件的路径 data="coco8.yaml", # 数据集 YAML 路径
epochs=100, # 训练的轮数 epochs=100, # 训练轮次
imgsz=640, # 训练图像大小 imgsz=640, # 训练图像尺寸
device="cpu", # 运行设备,例如 device=0 或 device=0,1,2,3 或 device=cpu device="cpu", # 运行设备,例如 device=0 或 device=0,1,2,3 或 device=cpu
) )
# 在验证集上评估模型性能 # 评估模型在验证集上的性能
metrics = model.val() metrics = model.val()
# 对图像进行目标检测 # 在图像上执行对象检测
results = model("path/to/image.jpg") results = model("path/to/image.jpg")
results[0].show() results[0].show()
# 将模型导出为 ONNX 格式 # 将模型导出为 ONNX 格式
path = model.export(format="onnx") # 返回导出模型路径 path = model.export(format="onnx") # 返回导出模型路径
``` ```
查看 YOLOv8 [Python 文档](https://docs.ultralytics.com/usage/python/)以获取更多示例。 请参阅 YOLO [Python 文档](https://docs.ultralytics.com/usage/python/) 以获取更多示例。
</details> </details>
### 笔记本
Ultralytics 提供了 YOLOv8 的交互式笔记本,涵盖训练、验证、跟踪等内容。每个笔记本都配有 [YouTube](https://www.youtube.com/ultralytics?sub_confirmation=1) 教程,使学习和实现高级 YOLOv8 功能变得简单。
| 文档 | 笔记本 | YouTube |
| ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
| <a href="https://docs.ultralytics.com/modes/">YOLOv8 训练、验证、预测和导出模式</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="在 Colab 中打开"></a> | <a href="https://youtu.be/j8uQc0qB91s"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube 视频"></center></a> |
| <a href="https://docs.ultralytics.com/hub/quickstart/">Ultralytics HUB 快速开始</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/hub.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="在 Colab 中打开"></a> | <a href="https://youtu.be/lveF9iCMIzc"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube 视频"></center></a> |
| <a href="https://docs.ultralytics.com/modes/track/">YOLOv8 视频中的多对象跟踪</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/object_tracking.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="在 Colab 中打开"></a> | <a href="https://youtu.be/hHyHmOtmEgs"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube 视频"></center></a> |
| <a href="https://docs.ultralytics.com/guides/object-counting/">YOLOv8 视频中的对象计数</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/object_counting.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="在 Colab 中打开"></a> | <a href="https://youtu.be/Ag2e-5_NpS0"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube 视频"></center></a> |
| <a href="https://docs.ultralytics.com/guides/heatmaps/">YOLOv8 视频中的热图</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/heatmaps.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="在 Colab 中打开"></a> | <a href="https://youtu.be/4ezde5-nZZw"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube 视频"></center></a> |
| <a href="https://docs.ultralytics.com/datasets/explorer/">Ultralytics 数据集浏览器,集成 SQL 和 OpenAI 🚀 New</a> | <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/docs/en/datasets/explorer/explorer.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="在 Colab 中打开"></a> | <a href="https://youtu.be/3VryynorQeo"><center><img width=30% src="https://raw.githubusercontent.com/ultralytics/assets/main/social/logo-social-youtube-rect.png" alt="Ultralytics Youtube Video"></center></a> |
## <div align="center">模型</div> ## <div align="center">模型</div>
在[COCO](https://docs.ultralytics.com/datasets/detect/coco/)数据集上预训练的YOLOv8 [检测](https://docs.ultralytics.com/tasks/detect/),[分割](https://docs.ultralytics.com/tasks/segment/)和[姿态](https://docs.ultralytics.com/tasks/pose/)模型可以在这里找到,以及在[ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/)数据集上预训练的YOLOv8 [分类](https://docs.ultralytics.com/tasks/classify/)模型。所有的检测,分割和姿态模型都支持[追踪](https://docs.ultralytics.com/modes/track/)模式。 YOLO11 [检测](https://docs.ultralytics.com/tasks/detect/)、[分割](https://docs.ultralytics.com/tasks/segment/) 和 [姿态](https://docs.ultralytics.com/tasks/pose/) 模型在 [COCO](https://docs.ultralytics.com/datasets/detect/coco/) 数据集上进行预训练,这些模型可在此处获得,此外还有在 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 数据集上预训练的 YOLO11 [分类](https://docs.ultralytics.com/tasks/classify/) 模型。所有检测、分割和姿态模型均支持 [跟踪](https://docs.ultralytics.com/modes/track/) 模式。
<img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png" alt="Ultralytics YOLO supported tasks"> <img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png" alt="Ultralytics YOLO supported tasks">
所有[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models)在首次使用时自动从最新的Ultralytics [发布版本](https://github.com/ultralytics/assets/releases)下载。 所有[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models)在首次使用时自动从最新的 Ultralytics [发布](https://github.com/ultralytics/assets/releases)下载。
<details open><summary>检测 (COCO)</summary> <details open><summary>检测 (COCO)</summary>
查看[检测文档](https://docs.ultralytics.com/tasks/detect/)以获取这些在[COCO](https://docs.ultralytics.com/datasets/detect/coco/)上训练的模型的使用示例,其中包括80个预训练类别。 请参阅 [检测文档](https://docs.ultralytics.com/tasks/detect/) 以获取使用这些在 [COCO](https://docs.ultralytics.com/datasets/detect/coco/) 数据集上训练的模型的示例,其中包含 80 个预训练类别。
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>val<br>50-95 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>A100 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) | | 模型 | 尺寸<br><sup>(像素) | mAP<sup>val<br>50-95 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>Tesla T4 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) |
| ------------------------------------------------------------------------------------ | ------------------- | -------------------- | ----------------------------- | ---------------------------------- | ---------------- | ----------------- | | ------------------------------------------------------------------------------------ | ------------------- | -------------------- | ----------------------------- | -------------------------------------- | ---------------- | ----------------- |
| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | | [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) | 640 | 39.5 | 56.12 ± 0.82 ms | 1.55 ± 0.01 ms | 2.6 | 6.5 |
| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | | [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) | 640 | 47.0 | 90.01 ± 1.17 ms | 2.46 ± 0.00 ms | 9.4 | 21.5 |
| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | | [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) | 640 | 51.5 | 183.20 ± 2.04 ms | 4.70 ± 0.06 ms | 20.1 | 68.0 |
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | | [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) | 640 | 53.4 | 238.64 ± 1.39 ms | 6.16 ± 0.08 ms | 25.3 | 86.9 |
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | | [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) | 640 | 54.7 | 462.78 ± 6.66 ms | 11.31 ± 0.24 ms | 56.9 | 194.9 |
- **mAP<sup>val</sup>**是基于单模型单尺度在 [COCO val2017](https://cocodataset.org/) 数据集上的结果。 <br>通过 `yolo val detect data=coco.yaml device=0` 复现 - **mAP<sup>val</sup>**针对单模型单尺度在 [COCO val2017](https://cocodataset.org/) 数据集上进行。 <br>复制命令 `yolo val detect data=coco.yaml device=0`
- **速度**使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。 <br>通过 `yolo val detect data=coco.yaml batch=1 device=0|cpu` 复现 - **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 COCO 验证图像上平均。 <br>复制命令 `yolo val detect data=coco.yaml batch=1 device=0|cpu`
</details> </details>
<details><summary>分割 (COCO)</summary> <details><summary>分割 (COCO)</summary>
查看[分割文档](https://docs.ultralytics.com/tasks/segment/)以获取这些在[COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/)上训练的模型的使用示例,其中包括80个预训练类别。 请参阅 [分割文档](https://docs.ultralytics.com/tasks/segment/) 以获取使用这些在 [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/) 数据集上训练的模型的示例,其中包含 80 个预训练类别。
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>A100 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) | | 模型 | 尺寸<br><sup>(像素) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>Tesla T4 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | ------------------- | -------------------- | --------------------- | ----------------------------- | ---------------------------------- | ---------------- | ----------------- | | -------------------------------------------------------------------------------------------- | ------------------- | -------------------- | --------------------- | ----------------------------- | -------------------------------------- | ---------------- | ----------------- |
| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | | [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.90 ± 1.14 ms | 1.84 ± 0.00 ms | 2.9 | 10.4 |
| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | | [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.56 ± 4.89 ms | 2.94 ± 0.01 ms | 10.1 | 35.5 |
| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | | [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.63 ± 1.16 ms | 6.31 ± 0.09 ms | 22.4 | 123.3 |
| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | | [YOLO11l-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt) | 640 | 53.4 | 42.9 | 344.16 ± 3.17 ms | 7.78 ± 0.16 ms | 27.6 | 142.2 |
| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | | [YOLO11x-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt) | 640 | 54.7 | 43.8 | 664.50 ± 3.24 ms | 15.75 ± 0.67 ms | 62.1 | 319.0 |
- **mAP<sup>val</sup>**是基于单模型单尺度在 [COCO val2017](https://cocodataset.org/) 数据集上的结果。 <br>通过 `yolo val segment data=coco-seg.yaml device=0` 复现 - **mAP<sup>val</sup>**针对单模型单尺度在 [COCO val2017](https://cocodataset.org/) 数据集上进行。 <br>复制命令 `yolo val segment data=coco-seg.yaml device=0`
- **速度**使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。 <br>通过 `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu` 复现 - **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 COCO 验证图像上平均。 <br>复制命令 `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu`
</details> </details>
<details><summary>姿态 (COCO)</summary> <details><summary>姿态 (COCO)</summary>
查看[姿态文档](https://docs.ultralytics.com/tasks/pose/)以获取这些在[COCO-Pose](https://docs.ultralytics.com/datasets/pose/coco/)上训练的模型的使用示例,其中包括1个预训练类别,即人 请参阅 [姿态文档](https://docs.ultralytics.com/tasks/pose/) 以获取使用这些在 [COCO-Pose](https://docs.ultralytics.com/datasets/pose/coco/) 数据集上训练的模型的示例,其中包含 1 个预训练类别(人)
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>pose<br>50-95 | mAP<sup>pose<br>50 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>A100 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) | | 模型 | 尺寸<br><sup>(像素) | mAP<sup>pose<br>50-95 | mAP<sup>pose<br>50 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>Tesla T4 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) |
| ---------------------------------------------------------------------------------------------------- | ------------------- | --------------------- | ------------------ | ----------------------------- | ---------------------------------- | ---------------- | ----------------- | | ---------------------------------------------------------------------------------------------- | ------------------- | --------------------- | ------------------ | ----------------------------- | -------------------------------------- | ---------------- | ----------------- |
| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-pose.pt) | 640 | 50.4 | 80.1 | 131.8 | 1.18 | 3.3 | 9.2 | | [YOLO11n-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt) | 640 | 50.0 | 81.0 | 52.40 ± 0.51 ms | 1.72 ± 0.01 ms | 2.9 | 7.6 |
| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-pose.pt) | 640 | 60.0 | 86.2 | 233.2 | 1.42 | 11.6 | 30.2 | | [YOLO11s-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-pose.pt) | 640 | 58.9 | 86.3 | 90.54 ± 0.59 ms | 2.57 ± 0.00 ms | 9.9 | 23.2 |
| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-pose.pt) | 640 | 65.0 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | | [YOLO11m-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-pose.pt) | 640 | 64.9 | 89.4 | 187.28 ± 0.77 ms | 4.94 ± 0.05 ms | 20.9 | 71.7 |
| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-pose.pt) | 640 | 67.6 | 90.0 | 784.5 | 2.59 | 44.4 | 168.6 | | [YOLO11l-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-pose.pt) | 640 | 66.1 | 89.9 | 247.69 ± 1.10 ms | 6.42 ± 0.13 ms | 26.2 | 90.7 |
| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 | | [YOLO11x-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-pose.pt) | 640 | 69.5 | 91.1 | 487.97 ± 13.91 ms | 12.06 ± 0.20 ms | 58.8 | 203.3 |
| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 |
- **mAP<sup>val</sup>**是基于单模型单尺度在 [COCO Keypoints val2017](https://cocodataset.org/) 数据集上的结果。 <br>通过 `yolo val pose data=coco-pose.yaml device=0` 复现 - **mAP<sup>val</sup>**针对单模型单尺度在 [COCO Keypoints val2017](https://cocodataset.org/) 数据集上进行。 <br>复制命令 `yolo val pose data=coco-pose.yaml device=0`
- **速度**使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。 <br>通过 `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu` 复现 - **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 COCO 验证图像上平均。 <br>复制命令 `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
</details> </details>
<details><summary>旋转检测 (DOTAv1)</summary> <details><summary>OBB (DOTAv1)</summary>
查看[旋转检测文档](https://docs.ultralytics.com/tasks/obb/)以获取这些在[DOTAv1](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10/)上训练的模型的使用示例,其中包括15个预训练类别。 请参阅 [OBB 文档](https://docs.ultralytics.com/tasks/obb/) 以获取使用这些在 [DOTAv1](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10/) 数据集上训练的模型的示例,其中包含 15 个预训练类别。
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>test<br>50 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>A100 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) | | 模型 | 尺寸<br><sup>(像素) | mAP<sup>test<br>50 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>Tesla T4 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | ------------------- | ------------------ | ----------------------------- | ---------------------------------- | ---------------- | ----------------- | | -------------------------------------------------------------------------------------------- | ------------------- | ------------------ | ----------------------------- | -------------------------------------- | ---------------- | ----------------- |
| [YOLOv8n-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-obb.pt) | 1024 | 78.0 | 204.77 | 3.57 | 3.1 | 23.3 | | [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.56 ± 0.80 ms | 4.43 ± 0.01 ms | 2.7 | 17.2 |
| [YOLOv8s-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-obb.pt) | 1024 | 79.5 | 424.88 | 4.07 | 11.4 | 76.3 | | [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.41 ± 4.00 ms | 5.13 ± 0.02 ms | 9.7 | 57.5 |
| [YOLOv8m-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-obb.pt) | 1024 | 80.5 | 763.48 | 7.61 | 26.4 | 208.6 | | [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.81 ± 2.87 ms | 10.07 ± 0.38 ms | 20.9 | 183.5 |
| [YOLOv8l-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-obb.pt) | 1024 | 80.7 | 1278.42 | 11.83 | 44.5 | 433.8 | | [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.49 ± 4.98 ms | 13.46 ± 0.55 ms | 26.2 | 232.0 |
| [YOLOv8x-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-obb.pt) | 1024 | 81.36 | 1759.10 | 13.23 | 69.5 | 676.7 | | [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.63 ± 7.67 ms | 28.59 ± 0.96 ms | 58.8 | 520.2 |
- **mAP<sup>val</sup>** 值是基于单模型多尺度在 [DOTAv1](https://captain-whu.github.io/DOTA/index.html) 数据集上的结果。 <br>通过 `yolo val obb data=DOTAv1.yaml device=0 split=test` 复现 - **mAP<sup>test</sup>** 值针对单模型多尺度在 [DOTAv1](https://captain-whu.github.io/DOTA/index.html) 数据集上进行。 <br>复制命令 `yolo val obb data=DOTAv1.yaml device=0 split=test` 并提交合并结果到 [DOTA 评估](https://captain-whu.github.io/DOTA/evaluation.html)。
- **速度**使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。 <br>通过 `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu` 复现 - **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 DOTAv1 验证图像上平均。 <br>复制命令 `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu`
</details> </details>
<details><summary>分类 (ImageNet)</summary> <details><summary>分类 (ImageNet)</summary>
查看[分类文档](https://docs.ultralytics.com/tasks/classify/)以获取这些在[ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/)上训练的模型的使用示例,其中包括1000个预训练类别。 请参阅 [分类文档](https://docs.ultralytics.com/tasks/classify/) 以获取使用这些在 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 数据集上训练的模型的示例,其中包含 1000 个预训练类别。
| 模型 | 尺寸<br><sup>(像素) | acc<br><sup>top1 | acc<br><sup>top5 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>A100 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) at 640 | | 模型 | 尺寸<br><sup>(像素) | acc<br><sup>top1 | acc<br><sup>top5 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>Tesla T4 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) at 640 |
| -------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ----------------------------- | ---------------------------------- | ---------------- | ------------------------ | | -------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ----------------------------- | -------------------------------------- | ---------------- | ------------------------ |
| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-cls.pt) | 224 | 69.0 | 88.3 | 12.9 | 0.31 | 2.7 | 4.3 | | [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.03 ± 0.32 ms | 1.10 ± 0.01 ms | 1.6 | 3.3 |
| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-cls.pt) | 224 | 73.8 | 91.7 | 23.4 | 0.35 | 6.4 | 13.5 | | [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.89 ± 0.18 ms | 1.34 ± 0.01 ms | 5.5 | 12.1 |
| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-cls.pt) | 224 | 76.8 | 93.5 | 85.4 | 0.62 | 17.0 | 42.7 | | [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.17 ± 0.40 ms | 1.95 ± 0.00 ms | 10.4 | 39.3 |
| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-cls.pt) | 224 | 78.3 | 94.2 | 163.0 | 0.87 | 37.5 | 99.7 | | [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.17 ± 0.29 ms | 2.76 ± 0.00 ms | 12.9 | 49.4 |
| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-cls.pt) | 224 | 79.0 | 94.6 | 232.0 | 1.01 | 57.4 | 154.8 | | [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.41 ± 0.94 ms | 3.82 ± 0.00 ms | 28.4 | 110.4 |
- **acc**是模型在 [ImageNet](https://www.image-net.org/) 数据集验证集上的准确率。 <br>通过 `yolo val classify data=path/to/ImageNet device=0` 复现 - **acc**在 [ImageNet](https://www.image-net.org/) 数据集验证集上的模型准确率。 <br>复制命令 `yolo val classify data=path/to/ImageNet device=0`
- **速度**使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 ImageNet val 图像进行平均计算的。 <br>通过 `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` 复现 - **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 ImageNet 验证图像上平均。 <br>复制命令 `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
</details> </details>
## <div align="center">集成</div> ## <div align="center">集成</div>
我们与领先的AI平台的关键整合扩展了Ultralytics产品的功能,增强了数据集标签化、训练、可视化和模型管理等任务。探索Ultralytics如何与[Roboflow](https://roboflow.com/?ref=ultralytics)、ClearML、[Comet](https://bit.ly/yolov8-readme-comet)、Neural Magic以及[OpenVINO](https://docs.ultralytics.com/integrations/openvino/)合作,优化您的AI工作流程。 我们与领先的 AI 平台的关键集成扩展了 Ultralytics 产品的功能,增强了数据集标记、训练、可视化和模型管理等任务的能力。了解 Ultralytics 如何与 [Roboflow](https://roboflow.com/?ref=ultralytics)、ClearML、[Comet](https://bit.ly/yolov8-readme-comet)、Neural Magic 和 [OpenVINO](https://docs.ultralytics.com/integrations/openvino/) 合作,优化您的 AI 工作流程。
<br> <br>
<a href="https://ultralytics.com/hub" target="_blank"> <a href="https://ultralytics.com/hub" target="_blank">
@ -245,36 +229,36 @@ Ultralytics 提供了 YOLOv8 的交互式笔记本,涵盖训练、验证、跟
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" alt="NeuralMagic logo"></a> <img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" alt="NeuralMagic logo"></a>
</div> </div>
| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | | Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW |
| :-------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------: | | :--------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
| 使用 [Roboflow](https://roboflow.com/?ref=ultralytics) 将您的自定义数据集直接标记并导出至 YOLOv8 进行训练 | 使用 [ClearML](https://clear.ml/)(开源!)自动跟踪、可视化,甚至远程训练 YOLOv8 | 免费且永久,[Comet](https://bit.ly/yolov8-readme-comet) 让您保存 YOLOv8 模型、恢复训练,并以交互式方式查看和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) 使 YOLOv8 推理速度提高多达 6 倍 | | Label and export your custom datasets directly to YOLO11 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLO11 using [ClearML](https://clear.ml/) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLO11 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |
## <div align="center">Ultralytics HUB</div> ## <div align="center">Ultralytics HUB</div>
体验 [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐ 带来的无缝 AI,这是一个一体化解决方案,用于数据可视化、YOLOv5 和即将推出的 YOLOv8 🚀 模型训练和部署,无需任何编码。通过我们先进的平台和用户友好的 [Ultralytics 应用程序](https://www.ultralytics.com/app-install),轻松将图像转化为可操作的见解,并实现您的 AI 愿景。现在就开始您的**免费**之旅 体验无缝 AI 使用 [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐,一个集数据可视化、YOLO11 🚀 模型训练和部署于一体的解决方案,无需编写代码。利用我们最先进的平台和用户友好的 [Ultralytics 应用](https://www.ultralytics.com/app-install),将图像转换为可操作见解,并轻松实现您的 AI 愿景。免费开始您的旅程
<a href="https://ultralytics.com/hub" target="_blank"> <a href="https://ultralytics.com/hub" target="_blank">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png" alt="Ultralytics HUB preview image"></a> <img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png" alt="Ultralytics HUB preview image"></a>
## <div align="center">贡献</div> ## <div align="center">贡献</div>
我们喜欢您的参与!没有社区的帮助,YOLOv5 和 YOLOv8 将无法实现。请参阅我们的[贡献指南](https://docs.ultralytics.com/help/contributing/)以开始使用,并填写我们的[调查问卷](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们提供您的使用体验反馈。感谢所有贡献者的支持!🙏 我们欢迎您的意见!没有社区的帮助,Ultralytics YOLO 就不可能实现。请参阅我们的 [贡献指南](https://docs.ultralytics.com/help/contributing/) 开始,并填写我们的 [调查问卷](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们提供您体验的反馈。感谢所有贡献者 🙏!
<!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 --> <!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->
<a href="https://github.com/ultralytics/ultralytics/graphs/contributors"> <a href="https://github.com/ultralytics/ultralytics/graphs/contributors">
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" alt="Ultralytics open-source contributors"></a> <img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/image-contributors.png" alt="Ultralytics open-source contributors"></a>
## <div align="center">许可</div> ## <div align="center">许可</div>
Ultralytics 提供两种许可证选项以适应各种使用场景 Ultralytics 提供两种许可选项以适应各种用例
- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/license)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) 文件以了解更多细节 - **AGPL-3.0 许可**:这是一个 [OSI 批准](https://opensource.org/license) 的开源许可,适合学生和爱好者,促进开放协作和知识共享。有关详细信息,请参阅 [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) 文件。
- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品,请通过 [Ultralytics Licensing](https://www.ultralytics.com/license)与我们联系 - **企业许可**:专为商业使用设计,此许可允许将 Ultralytics 软件和 AI 模型无缝集成到商业产品和服务中,无需满足 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品,请通过 [Ultralytics Licensing](https://www.ultralytics.com/license) 联系我们
## <div align="center">联系方式</div> ## <div align="center">联系</div>
有关 Ultralytics 错误报告和功能请求,请访问 [GitHub 问题](https://github.com/ultralytics/ultralytics/issues)。成为 Ultralytics [Discord](https://discord.com/invite/ultralytics)、[Reddit](https://www.reddit.com/r/ultralytics/) 或 [论坛](https://community.ultralytics.com/) 的成员 用于提出问题、共享项目、学习讨论或寻求有关 Ultralytics 的所有帮助! 如需 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/ultralytics/issues)。成为 Ultralytics [Discord](https://discord.com/invite/ultralytics)、[Reddit](https://www.reddit.com/r/ultralytics/) 或 [论坛](https://community.ultralytics.com/) 的成员,提出问题、分享项目、探讨学习讨论,或寻求所有 Ultralytics 相关的帮助!
<br> <br>
<div align="center"> <div align="center">

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics # Builds ultralytics/ultralytics:latest image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is CUDA-optimized for YOLOv8 single/multi-GPU training and inference # Image is CUDA-optimized for YOLO11 single/multi-GPU training and inference
# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch or nvcr.io/nvidia/pytorch:23.03-py3 # Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch or nvcr.io/nvidia/pytorch:23.03-py3
FROM pytorch/pytorch:2.3.1-cuda12.1-cudnn8-runtime FROM pytorch/pytorch:2.3.1-cuda12.1-cudnn8-runtime
@ -36,7 +36,7 @@ WORKDIR /ultralytics
# Copy contents and configure git # Copy contents and configure git
COPY . . COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install pip packages # Install pip packages
RUN python3 -m pip install --upgrade pip wheel RUN python3 -m pip install --upgrade pip wheel
@ -45,8 +45,8 @@ RUN pip install -e ".[export]" "tensorrt-cu12==10.1.0" "albumentations>=1.4.6" c
# Run exports to AutoInstall packages # Run exports to AutoInstall packages
# Edge TPU export fails the first time so is run twice here # Edge TPU export fails the first time so is run twice here
RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 || yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32 || yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32
RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32 RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32
# Requires <= Python 3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991 # Requires <= Python 3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
RUN pip install "paddlepaddle>=2.6.0" x2paddle RUN pip install "paddlepaddle>=2.6.0" x2paddle
# Fix error: `np.bool` was a deprecated alias for the builtin `bool` segmentation error in Tests # Fix error: `np.bool` was a deprecated alias for the builtin `bool` segmentation error in Tests

@ -31,7 +31,7 @@ WORKDIR /ultralytics
# Copy contents and configure git # Copy contents and configure git
COPY . . COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install pip packages # Install pip packages
RUN python3 -m pip install --upgrade pip wheel RUN python3 -m pip install --upgrade pip wheel

@ -23,7 +23,7 @@ RUN apt-get update && \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# Copy contents # Copy contents
ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install conda packages # Install conda packages
# mkl required to fix 'OSError: libmkl_intel_lp64.so.2: cannot open shared object file: No such file or directory' # mkl required to fix 'OSError: libmkl_intel_lp64.so.2: cannot open shared object file: No such file or directory'

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics # Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv8 deployments # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLO11 deployments
# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu
FROM ubuntu:23.10 FROM ubuntu:23.10
@ -29,15 +29,15 @@ WORKDIR /ultralytics
# Copy contents and configure git # Copy contents and configure git
COPY . . COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install pip packages # Install pip packages
RUN python3 -m pip install --upgrade pip wheel RUN python3 -m pip install --upgrade pip wheel
RUN pip install -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu RUN pip install -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu
# Run exports to AutoInstall packages # Run exports to AutoInstall packages
RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32
RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32 RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32
# Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991 # Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
# RUN pip install "paddlepaddle>=2.6.0" x2paddle # RUN pip install "paddlepaddle>=2.6.0" x2paddle

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:jetson-jetpack4 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics # Builds ultralytics/ultralytics:jetson-jetpack4 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Supports JetPack4.x for YOLOv8 on Jetson Nano, TX2, Xavier NX, AGX Xavier # Supports JetPack4.x for YOLO11 on Jetson Nano, TX2, Xavier NX, AGX Xavier
# Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-cuda # Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-cuda
FROM nvcr.io/nvidia/l4t-cuda:10.2.460-runtime FROM nvcr.io/nvidia/l4t-cuda:10.2.460-runtime
@ -35,7 +35,7 @@ WORKDIR /ultralytics
# Copy contents and configure git # Copy contents and configure git
COPY . . COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Download onnxruntime-gpu 1.8.0 and tensorrt 8.2.0.6 # Download onnxruntime-gpu 1.8.0 and tensorrt 8.2.0.6
# Other versions can be seen in https://elinux.org/Jetson_Zoo and https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048 # Other versions can be seen in https://elinux.org/Jetson_Zoo and https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:jetson-jetson-jetpack5 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics # Builds ultralytics/ultralytics:jetson-jetson-jetpack5 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Supports JetPack5.x for YOLOv8 on Jetson Xavier NX, AGX Xavier, AGX Orin, Orin Nano and Orin NX # Supports JetPack5.x for YOLO11 on Jetson Xavier NX, AGX Xavier, AGX Orin, Orin Nano and Orin NX
# Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch # Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch
FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3 FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3
@ -31,7 +31,7 @@ WORKDIR /ultralytics
# Copy contents and configure git # Copy contents and configure git
COPY . . COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Remove opencv-python from Ultralytics dependencies as it conflicts with opencv-python installed in base image # Remove opencv-python from Ultralytics dependencies as it conflicts with opencv-python installed in base image
RUN sed -i '/opencv-python/d' pyproject.toml RUN sed -i '/opencv-python/d' pyproject.toml

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:jetson-jetpack6 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics # Builds ultralytics/ultralytics:jetson-jetpack6 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Supports JetPack6.x for YOLOv8 on Jetson AGX Orin, Orin NX and Orin Nano Series # Supports JetPack6.x for YOLO11 on Jetson AGX Orin, Orin NX and Orin Nano Series
# Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-jetpack # Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-jetpack
FROM nvcr.io/nvidia/l4t-jetpack:r36.3.0 FROM nvcr.io/nvidia/l4t-jetpack:r36.3.0
@ -28,7 +28,7 @@ WORKDIR /ultralytics
# Copy contents and configure git # Copy contents and configure git
COPY . . COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Download onnxruntime-gpu 1.18.0 from https://elinux.org/Jetson_Zoo and https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048 # Download onnxruntime-gpu 1.18.0 from https://elinux.org/Jetson_Zoo and https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048
ADD https://nvidia.box.com/shared/static/48dtuob7meiw6ebgfsfqakc9vse62sg4.whl onnxruntime_gpu-1.18.0-cp310-cp310-linux_aarch64.whl ADD https://nvidia.box.com/shared/static/48dtuob7meiw6ebgfsfqakc9vse62sg4.whl onnxruntime_gpu-1.18.0-cp310-cp310-linux_aarch64.whl

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics # Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv8 deployments # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLO11 deployments
# Use official Python base image for reproducibility (3.11.10 for export and 3.12.6 for inference) # Use official Python base image for reproducibility (3.11.10 for export and 3.12.6 for inference)
FROM python:3.11.10-slim-bookworm FROM python:3.11.10-slim-bookworm
@ -29,15 +29,15 @@ WORKDIR /ultralytics
# Copy contents and configure git # Copy contents and configure git
COPY . . COPY . .
RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config
ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt .
# Install pip packages # Install pip packages
RUN python3 -m pip install --upgrade pip wheel RUN python3 -m pip install --upgrade pip wheel
RUN pip install -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu RUN pip install -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu
# Run exports to AutoInstall packages # Run exports to AutoInstall packages
RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32
RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32 RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32
# Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991 # Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991
RUN pip install "paddlepaddle>=2.6.0" x2paddle RUN pip install "paddlepaddle>=2.6.0" x2paddle

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
# Builds GitHub actions CI runner image for deployment to DockerHub https://hub.docker.com/r/ultralytics/ultralytics # Builds GitHub actions CI runner image for deployment to DockerHub https://hub.docker.com/r/ultralytics/ultralytics
# Image is CUDA-optimized for YOLOv8 single/multi-GPU training and inference tests # Image is CUDA-optimized for YOLO11 single/multi-GPU training and inference tests
# Start FROM Ultralytics GPU image # Start FROM Ultralytics GPU image
FROM ultralytics/ultralytics:latest FROM ultralytics/ultralytics:latest

@ -106,3 +106,70 @@ If you use the hand-keypoints dataset in your research or development work, plea
The images were collected and used under the respective licenses provided by each platform and are distributed under the [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-nc-sa/4.0/). The images were collected and used under the respective licenses provided by each platform and are distributed under the [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-nc-sa/4.0/).
We would also like to acknowledge the creator of this dataset, [Rion Dsilva](https://www.linkedin.com/in/rion-dsilva-043464229/), for his great contribution to Vision AI research. We would also like to acknowledge the creator of this dataset, [Rion Dsilva](https://www.linkedin.com/in/rion-dsilva-043464229/), for his great contribution to Vision AI research.
## FAQ
### How do I train a YOLOv8 model on the Hand Keypoints dataset?
To train a YOLOv8 model on the Hand Keypoints dataset, you can use either Python or the command line interface (CLI). Here's an example for training a YOLOv8n-pose model for 100 epochs with an image size of 640:
!!! Example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolov8n-pose.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="hand-keypoints.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo pose train data=hand-keypoints.yaml model=yolov8n-pose.pt epochs=100 imgsz=640
```
For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
### What are the key features of the Hand Keypoints dataset?
The Hand Keypoints dataset is designed for advanced pose estimation tasks and includes several key features:
- **Large Dataset**: Contains 26,768 images with hand keypoint annotations.
- **YOLOv8 Compatibility**: Ready for use with YOLOv8 models.
- **21 Keypoints**: Detailed hand pose representation, including wrist and finger joints.
For more details, you can explore the [Hand Keypoints Dataset](#introduction) section.
### What applications can benefit from using the Hand Keypoints dataset?
The Hand Keypoints dataset can be applied in various fields, including:
- **Gesture Recognition**: Enhancing human-computer interaction.
- **AR/VR Controls**: Improving user experience in augmented and virtual reality.
- **Robotic Manipulation**: Enabling precise control of robotic hands.
- **Healthcare**: Analyzing hand movements for medical diagnostics.
- **Animation**: Capturing motion for realistic animations.
- **Biometric Authentication**: Enhancing security systems.
For more information, refer to the [Applications](#applications) section.
### How is the Hand Keypoints dataset structured?
The Hand Keypoints dataset is divided into two subsets:
1. **Train**: Contains 18,776 images for training pose estimation models.
2. **Val**: Contains 7,992 images for validation purposes during model training.
This structure ensures a comprehensive training and validation process. For more details, see the [Dataset Structure](#dataset-structure) section.
### How do I use the dataset YAML file for training?
The dataset configuration is defined in a YAML file, which includes paths, classes, and other relevant information. The `hand-keypoints.yaml` file can be found at [hand-keypoints.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/hand-keypoints.yaml).
To use this YAML file for training, specify it in your training script or CLI command as shown in the training example above. For more details, refer to the [Dataset YAML](#dataset-yaml) section.

@ -58,7 +58,7 @@ Explore the YOLOv8 Docs, a comprehensive resource designed to help you understan
- **Predict** new images and videos with YOLOv8 &nbsp; [:octicons-image-16: Predict on Images](modes/predict.md){ .md-button } - **Predict** new images and videos with YOLOv8 &nbsp; [:octicons-image-16: Predict on Images](modes/predict.md){ .md-button }
- **Train** a new YOLOv8 model on your own custom dataset &nbsp; [:fontawesome-solid-brain: Train a Model](modes/train.md){ .md-button } - **Train** a new YOLOv8 model on your own custom dataset &nbsp; [:fontawesome-solid-brain: Train a Model](modes/train.md){ .md-button }
- **Tasks** YOLOv8 tasks like segment, classify, pose and track &nbsp; [:material-magnify-expand: Explore Tasks](tasks/index.md){ .md-button } - **Tasks** YOLOv8 tasks like segment, classify, pose and track &nbsp; [:material-magnify-expand: Explore Tasks](tasks/index.md){ .md-button }
- **NEW 🚀 Explore** datasets with advanced semantic and SQL search &nbsp; [:material-magnify-expand: Explore a Dataset](datasets/explorer/index.md){ .md-button } - **[YOLO11](models/yolo11.md) NEW 🚀**: Ultralytics' latest SOTA models &nbsp; [:material-magnify-expand: Explore a Dataset](models/yolo11.md){ .md-button }
<p align="center"> <p align="center">
<br> <br>
@ -84,6 +84,7 @@ Explore the YOLOv8 Docs, a comprehensive resource designed to help you understan
- [YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of YOLO by Ultralytics. As a cutting-edge, state-of-the-art (SOTA) model, YOLOv8 builds on the success of previous versions, introducing new features and improvements for enhanced performance, flexibility, and efficiency. YOLOv8 supports a full range of vision AI tasks, including [detection](tasks/detect.md), [segmentation](tasks/segment.md), [pose estimation](tasks/pose.md), [tracking](modes/track.md), and [classification](tasks/classify.md). This versatility allows users to leverage YOLOv8's capabilities across diverse applications and domains. - [YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of YOLO by Ultralytics. As a cutting-edge, state-of-the-art (SOTA) model, YOLOv8 builds on the success of previous versions, introducing new features and improvements for enhanced performance, flexibility, and efficiency. YOLOv8 supports a full range of vision AI tasks, including [detection](tasks/detect.md), [segmentation](tasks/segment.md), [pose estimation](tasks/pose.md), [tracking](modes/track.md), and [classification](tasks/classify.md). This versatility allows users to leverage YOLOv8's capabilities across diverse applications and domains.
- [YOLOv9](models/yolov9.md) introduces innovative methods like Programmable Gradient Information (PGI) and the Generalized Efficient Layer Aggregation Network (GELAN). - [YOLOv9](models/yolov9.md) introduces innovative methods like Programmable Gradient Information (PGI) and the Generalized Efficient Layer Aggregation Network (GELAN).
- [YOLOv10](models/yolov10.md) is created by researchers from [Tsinghua University](https://www.tsinghua.edu.cn/en/) using the [Ultralytics](https://www.ultralytics.com/) [Python package](https://pypi.org/project/ultralytics/). This version provides real-time [object detection](tasks/detect.md) advancements by introducing an End-to-End head that eliminates Non-Maximum Suppression (NMS) requirements. - [YOLOv10](models/yolov10.md) is created by researchers from [Tsinghua University](https://www.tsinghua.edu.cn/en/) using the [Ultralytics](https://www.ultralytics.com/) [Python package](https://pypi.org/project/ultralytics/). This version provides real-time [object detection](tasks/detect.md) advancements by introducing an End-to-End head that eliminates Non-Maximum Suppression (NMS) requirements.
- **[YOLO11](models/yolo11.md) NEW 🚀**: Ultralytics' latest YOLO models delivering state-of-the-art (SOTA) performance across multiple tasks.
## YOLO Licenses: How is Ultralytics YOLO licensed? ## YOLO Licenses: How is Ultralytics YOLO licensed?

@ -1,19 +1,20 @@
| Argument | Type | Default | Range | Description | | Argument | Type | Default | Range | Description |
| --------------- | ------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ----------------- | ------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `hsv_h` | `float` | `0.015` | `0.0 - 1.0` | Adjusts the hue of the image by a fraction of the color wheel, introducing color variability. Helps the model generalize across different lighting conditions. | | `hsv_h` | `float` | `0.015` | `0.0 - 1.0` | Adjusts the hue of the image by a fraction of the color wheel, introducing color variability. Helps the model generalize across different lighting conditions. |
| `hsv_s` | `float` | `0.7` | `0.0 - 1.0` | Alters the saturation of the image by a fraction, affecting the intensity of colors. Useful for simulating different environmental conditions. | | `hsv_s` | `float` | `0.7` | `0.0 - 1.0` | Alters the saturation of the image by a fraction, affecting the intensity of colors. Useful for simulating different environmental conditions. |
| `hsv_v` | `float` | `0.4` | `0.0 - 1.0` | Modifies the value (brightness) of the image by a fraction, helping the model to perform well under various lighting conditions. | | `hsv_v` | `float` | `0.4` | `0.0 - 1.0` | Modifies the value (brightness) of the image by a fraction, helping the model to perform well under various lighting conditions. |
| `degrees` | `float` | `0.0` | `-180 - +180` | Rotates the image randomly within the specified degree range, improving the model's ability to recognize objects at various orientations. | | `degrees` | `float` | `0.0` | `-180 - +180` | Rotates the image randomly within the specified degree range, improving the model's ability to recognize objects at various orientations. |
| `translate` | `float` | `0.1` | `0.0 - 1.0` | Translates the image horizontally and vertically by a fraction of the image size, aiding in learning to detect partially visible objects. | | `translate` | `float` | `0.1` | `0.0 - 1.0` | Translates the image horizontally and vertically by a fraction of the image size, aiding in learning to detect partially visible objects. |
| `scale` | `float` | `0.5` | `>=0.0` | Scales the image by a gain factor, simulating objects at different distances from the camera. | | `scale` | `float` | `0.5` | `>=0.0` | Scales the image by a gain factor, simulating objects at different distances from the camera. |
| `shear` | `float` | `0.0` | `-180 - +180` | Shears the image by a specified degree, mimicking the effect of objects being viewed from different angles. | | `shear` | `float` | `0.0` | `-180 - +180` | Shears the image by a specified degree, mimicking the effect of objects being viewed from different angles. |
| `perspective` | `float` | `0.0` | `0.0 - 0.001` | Applies a random perspective transformation to the image, enhancing the model's ability to understand objects in 3D space. | | `perspective` | `float` | `0.0` | `0.0 - 0.001` | Applies a random perspective transformation to the image, enhancing the model's ability to understand objects in 3D space. |
| `flipud` | `float` | `0.0` | `0.0 - 1.0` | Flips the image upside down with the specified probability, increasing the data variability without affecting the object's characteristics. | | `flipud` | `float` | `0.0` | `0.0 - 1.0` | Flips the image upside down with the specified probability, increasing the data variability without affecting the object's characteristics. |
| `fliplr` | `float` | `0.5` | `0.0 - 1.0` | Flips the image left to right with the specified probability, useful for learning symmetrical objects and increasing dataset diversity. | | `fliplr` | `float` | `0.5` | `0.0 - 1.0` | Flips the image left to right with the specified probability, useful for learning symmetrical objects and increasing dataset diversity. |
| `bgr` | `float` | `0.0` | `0.0 - 1.0` | Flips the image channels from RGB to BGR with the specified probability, useful for increasing robustness to incorrect channel ordering. | | `bgr` | `float` | `0.0` | `0.0 - 1.0` | Flips the image channels from RGB to BGR with the specified probability, useful for increasing robustness to incorrect channel ordering. |
| `mosaic` | `float` | `1.0` | `0.0 - 1.0` | Combines four training images into one, simulating different scene compositions and object interactions. Highly effective for complex scene understanding. | | `mosaic` | `float` | `1.0` | `0.0 - 1.0` | Combines four training images into one, simulating different scene compositions and object interactions. Highly effective for complex scene understanding. |
| `mixup` | `float` | `0.0` | `0.0 - 1.0` | Blends two images and their labels, creating a composite image. Enhances the model's ability to generalize by introducing label noise and visual variability. | | `mixup` | `float` | `0.0` | `0.0 - 1.0` | Blends two images and their labels, creating a composite image. Enhances the model's ability to generalize by introducing label noise and visual variability. |
| `copy_paste` | `float` | `0.0` | `0.0 - 1.0` | Copies objects from one image and pastes them onto another, useful for increasing object instances and learning object occlusion. | | `copy_paste` | `float` | `0.0` | `0.0 - 1.0` | Copies objects from one image and pastes them onto another, useful for increasing object instances and learning object occlusion. |
| `auto_augment` | `str` | `randaugment` | - | Automatically applies a predefined augmentation policy (`randaugment`, `autoaugment`, `augmix`), optimizing for classification tasks by diversifying the visual features. | | `copy_paste_mode` | `str` | `flip` | - | Copy-Paste augmentation method selection among the options of (`"flip"`, `"mixup"`). |
| `erasing` | `float` | `0.4` | `0.0 - 0.9` | Randomly erases a portion of the image during classification training, encouraging the model to focus on less obvious features for recognition. | | `auto_augment` | `str` | `randaugment` | - | Automatically applies a predefined augmentation policy (`randaugment`, `autoaugment`, `augmix`), optimizing for classification tasks by diversifying the visual features. |
| `crop_fraction` | `float` | `1.0` | `0.1 - 1.0` | Crops the classification image to a fraction of its size to emphasize central features and adapt to object scales, reducing background distractions. | | `erasing` | `float` | `0.4` | `0.0 - 0.9` | Randomly erases a portion of the image during classification training, encouraging the model to focus on less obvious features for recognition. |
| `crop_fraction` | `float` | `1.0` | `0.1 - 1.0` | Crops the classification image to a fraction of its size to emphasize central features and adapt to object scales, reducing background distractions. |

@ -17,16 +17,17 @@ Here are some of the key models supported:
3. **[YOLOv5](yolov5.md)**: An improved version of the YOLO architecture by Ultralytics, offering better performance and speed trade-offs compared to previous versions. 3. **[YOLOv5](yolov5.md)**: An improved version of the YOLO architecture by Ultralytics, offering better performance and speed trade-offs compared to previous versions.
4. **[YOLOv6](yolov6.md)**: Released by [Meituan](https://about.meituan.com/) in 2022, and in use in many of the company's autonomous delivery robots. 4. **[YOLOv6](yolov6.md)**: Released by [Meituan](https://about.meituan.com/) in 2022, and in use in many of the company's autonomous delivery robots.
5. **[YOLOv7](yolov7.md)**: Updated YOLO models released in 2022 by the authors of YOLOv4. 5. **[YOLOv7](yolov7.md)**: Updated YOLO models released in 2022 by the authors of YOLOv4.
6. **[YOLOv8](yolov8.md) NEW 🚀**: The latest version of the YOLO family, featuring enhanced capabilities such as [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), pose/keypoints estimation, and classification. 6. **[YOLOv8](yolov8.md)**: The latest version of the YOLO family, featuring enhanced capabilities such as [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), pose/keypoints estimation, and classification.
7. **[YOLOv9](yolov9.md)**: An experimental model trained on the Ultralytics [YOLOv5](yolov5.md) codebase implementing Programmable Gradient Information (PGI). 7. **[YOLOv9](yolov9.md)**: An experimental model trained on the Ultralytics [YOLOv5](yolov5.md) codebase implementing Programmable Gradient Information (PGI).
8. **[YOLOv10](yolov10.md)**: By Tsinghua University, featuring NMS-free training and efficiency-accuracy driven architecture, delivering state-of-the-art performance and latency. 8. **[YOLOv10](yolov10.md)**: By Tsinghua University, featuring NMS-free training and efficiency-accuracy driven architecture, delivering state-of-the-art performance and latency.
9. **[Segment Anything Model (SAM)](sam.md)**: Meta's original Segment Anything Model (SAM). 9. **[YOLO11](yolo11.md) NEW 🚀**: Ultralytics' latest YOLO models delivering state-of-the-art (SOTA) performance across multiple tasks.
10. **[Segment Anything Model 2 (SAM2)](sam-2.md)**: The next generation of Meta's Segment Anything Model (SAM) for videos and images. 10. **[Segment Anything Model (SAM)](sam.md)**: Meta's original Segment Anything Model (SAM).
11. **[Mobile Segment Anything Model (MobileSAM)](mobile-sam.md)**: MobileSAM for mobile applications, by Kyung Hee University. 11. **[Segment Anything Model 2 (SAM2)](sam-2.md)**: The next generation of Meta's Segment Anything Model (SAM) for videos and images.
12. **[Fast Segment Anything Model (FastSAM)](fast-sam.md)**: FastSAM by Image & Video Analysis Group, Institute of Automation, Chinese Academy of Sciences. 12. **[Mobile Segment Anything Model (MobileSAM)](mobile-sam.md)**: MobileSAM for mobile applications, by Kyung Hee University.
13. **[YOLO-NAS](yolo-nas.md)**: YOLO Neural Architecture Search (NAS) Models. 13. **[Fast Segment Anything Model (FastSAM)](fast-sam.md)**: FastSAM by Image & Video Analysis Group, Institute of Automation, Chinese Academy of Sciences.
14. **[Realtime Detection Transformers (RT-DETR)](rtdetr.md)**: Baidu's PaddlePaddle Realtime Detection [Transformer](https://www.ultralytics.com/glossary/transformer) (RT-DETR) models. 14. **[YOLO-NAS](yolo-nas.md)**: YOLO Neural Architecture Search (NAS) Models.
15. **[YOLO-World](yolo-world.md)**: Real-time Open Vocabulary Object Detection models from Tencent AI Lab. 15. **[Realtime Detection Transformers (RT-DETR)](rtdetr.md)**: Baidu's PaddlePaddle Realtime Detection [Transformer](https://www.ultralytics.com/glossary/transformer) (RT-DETR) models.
16. **[YOLO-World](yolo-world.md)**: Real-time Open Vocabulary Object Detection models from Tencent AI Lab.
<p align="center"> <p align="center">
<br> <br>

@ -0,0 +1,228 @@
---
comments: true
description: Discover YOLO11, the latest advancement in state-of-the-art object detection, offering unmatched accuracy and efficiency for diverse computer vision tasks.
keywords: YOLO11, state-of-the-art object detection, YOLO series, Ultralytics, computer vision, AI, machine learning, deep learning
---
# Ultralytics YOLO11
## Overview
YOLO11 is the latest iteration in the Ultralytics YOLO series of real-time object detectors, redefining what's possible with cutting-edge accuracy, speed, and efficiency. Building upon the impressive advancements of previous YOLO versions, YOLO11 introduces significant improvements in architecture and training methods, making it a versatile choice for a wide range of computer vision tasks.
![Ultralytics YOLO11 Comparison Plots](https://github.com/user-attachments/assets/a311a4ed-bbf2-43b5-8012-5f183a28a845)
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/live/rfI5vOo3-_A?si=pRdMeLLus0ryYZP7"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Ultralytics YOLO11 Announcement at #YV24
</p>
## Key Features
- **Enhanced Feature Extraction:** YOLO11 employs an improved backbone and neck architecture, which enhances [feature extraction](https://www.ultralytics.com/glossary/feature-extraction) capabilities for more precise object detection and complex task performance.
- **Optimized for Efficiency and Speed:** YOLO11 introduces refined architectural designs and optimized training pipelines, delivering faster processing speeds and maintaining an optimal balance between accuracy and performance.
- **Greater Accuracy with Fewer Parameters:** With advancements in model design, YOLO11m achieves a higher mean Average Precision (mAP) on the COCO dataset while using 22% fewer parameters than YOLOv8m, making it computationally efficient without compromising accuracy.
- **Adaptability Across Environments:** YOLO11 can be seamlessly deployed across various environments, including edge devices, cloud platforms, and systems supporting NVIDIA GPUs, ensuring maximum flexibility.
- **Broad Range of Supported Tasks:** Whether it's object detection, instance segmentation, image classification, pose estimation, or oriented object detection (OBB), YOLO11 is designed to cater to a diverse set of computer vision challenges.
## Supported Tasks and Modes
YOLO11 builds upon the versatile model range introduced in YOLOv8, offering enhanced support across various computer vision tasks:
| Model | Filenames | Task | Inference | Validation | Training | Export |
| ----------- | ----------------------------------------------------------------------------------------- | -------------------------------------------- | --------- | ---------- | -------- | ------ |
| YOLO11 | `yolo11n.pt` `yolo11s.pt` `yolo11m.pt` `yolo11l.pt` `yolo11x.pt` | [Detection](../tasks/detect.md) | ✅ | ✅ | ✅ | ✅ |
| YOLO11-seg | `yolo11n-seg.pt` `yolo11s-seg.pt` `yolo11m-seg.pt` `yolo11l-seg.pt` `yolo11x-seg.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ✅ | ✅ | ✅ |
| YOLO11-pose | `yolo11n-pose.pt` `yolo11s-pose.pt` `yolo11m-pose.pt` `yolo11l-pose.pt` `yolo11x-pose.pt` | [Pose/Keypoints](../tasks/pose.md) | ✅ | ✅ | ✅ | ✅ |
| YOLO11-obb | `yolo11n-obb.pt` `yolo11s-obb.pt` `yolo11m-obb.pt` `yolo11l-obb.pt` `yolo11x-obb.pt` | [Oriented Detection](../tasks/obb.md) | ✅ | ✅ | ✅ | ✅ |
| YOLO11-cls | `yolo11n-cls.pt` `yolo11s-cls.pt` `yolo11m-cls.pt` `yolo11l-cls.pt` `yolo11x-cls.pt` | [Classification](../tasks/classify.md) | ✅ | ✅ | ✅ | ✅ |
This table provides an overview of the YOLO11 model variants, showcasing their applicability in specific tasks and compatibility with operational modes such as Inference, Validation, Training, and Export. This flexibility makes YOLO11 suitable for a wide range of applications in computer vision, from real-time detection to complex segmentation tasks.
## Performance Metrics
!!! performance
=== "Detection (COCO)"
See [Detection Docs](../tasks/detect.md) for usage examples with these models trained on [COCO](../datasets/detect/coco.md), which include 80 pre-trained classes.
| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>Tesla T4 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | --------------------------------------- | ------------------ | ----------------- |
| [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) | 640 | 39.5 | 56.12 ± 0.82 ms | 1.55 ± 0.01 ms | 2.6 | 6.5 |
| [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) | 640 | 47.0 | 90.01 ± 1.17 ms | 2.46 ± 0.00 ms | 9.4 | 21.5 |
| [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) | 640 | 51.5 | 183.20 ± 2.04 ms | 4.70 ± 0.06 ms | 20.1 | 68.0 |
| [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) | 640 | 53.4 | 238.64 ± 1.39 ms | 6.16 ± 0.08 ms | 25.3 | 86.9 |
| [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) | 640 | 54.7 | 462.78 ± 6.66 ms | 11.31 ± 0.24 ms | 56.9 | 194.9 |
=== "Segmentation (COCO)"
See [Segmentation Docs](../tasks/segment.md) for usage examples with these models trained on [COCO](../datasets/segment/coco.md), which include 80 pre-trained classes.
| Model | size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>Tesla T4 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | --------------------------------------- | ------------------ | ----------------- |
| [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.90 ± 1.14 ms | 1.84 ± 0.00 ms | 2.9 | 10.4 |
| [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.56 ± 4.89 ms | 2.94 ± 0.01 ms | 10.1 | 35.5 |
| [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.63 ± 1.16 ms | 6.31 ± 0.09 ms | 22.4 | 123.3 |
| [YOLO11l-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt) | 640 | 53.4 | 42.9 | 344.16 ± 3.17 ms | 7.78 ± 0.16 ms | 27.6 | 142.2 |
| [YOLO11x-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt) | 640 | 54.7 | 43.8 | 664.50 ± 3.24 ms | 15.75 ± 0.67 ms | 62.1 | 319.0 |
=== "Classification (ImageNet)"
See [Classification Docs](../tasks/classify.md) for usage examples with these models trained on [ImageNet](../datasets/classify/imagenet.md), which include 1000 pre-trained classes.
| Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>Tesla T4 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) at 640 |
| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | --------------------------------------- | ------------------ | ------------------------ |
| [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.03 ± 0.32 ms | 1.10 ± 0.01 ms | 1.6 | 3.3 |
| [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.89 ± 0.18 ms | 1.34 ± 0.01 ms | 5.5 | 12.1 |
| [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.17 ± 0.40 ms | 1.95 ± 0.00 ms | 10.4 | 39.3 |
| [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.17 ± 0.29 ms | 2.76 ± 0.00 ms | 12.9 | 49.4 |
| [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.41 ± 0.94 ms | 3.82 ± 0.00 ms | 28.4 | 110.4 |
=== "Pose (COCO)"
See [Pose Estimation Docs](../tasks/pose.md) for usage examples with these models trained on [COCO](../datasets/pose/coco.md), which include 1 pre-trained class, 'person'.
| Model | size<br><sup>(pixels) | mAP<sup>pose<br>50-95 | mAP<sup>pose<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>Tesla T4 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| ---------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | --------------------------------------- | ------------------ | ----------------- |
| [YOLO11n-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt) | 640 | 50.0 | 81.0 | 52.40 ± 0.51 ms | 1.72 ± 0.01 ms | 2.9 | 7.6 |
| [YOLO11s-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-pose.pt) | 640 | 58.9 | 86.3 | 90.54 ± 0.59 ms | 2.57 ± 0.00 ms | 9.9 | 23.2 |
| [YOLO11m-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-pose.pt) | 640 | 64.9 | 89.4 | 187.28 ± 0.77 ms | 4.94 ± 0.05 ms | 20.9 | 71.7 |
| [YOLO11l-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-pose.pt) | 640 | 66.1 | 89.9 | 247.69 ± 1.10 ms | 6.42 ± 0.13 ms | 26.2 | 90.7 |
| [YOLO11x-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-pose.pt) | 640 | 69.5 | 91.1 | 487.97 ± 13.91 ms | 12.06 ± 0.20 ms | 58.8 | 203.3 |
=== "OBB (DOTAv1)"
See [Oriented Detection Docs](../tasks/obb.md) for usage examples with these models trained on [DOTAv1](../datasets/obb/dota-v2.md#dota-v10), which include 15 pre-trained classes.
| Model | size<br><sup>(pixels) | mAP<sup>test<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>Tesla T4 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
| -------------------------------------------------------------------------------------------- | --------------------- | ------------------ | ------------------------------ | --------------------------------------- | ------------------ | ----------------- |
| [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.56 ± 0.80 ms | 4.43 ± 0.01 ms | 2.7 | 17.2 |
| [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.41 ± 4.00 ms | 5.13 ± 0.02 ms | 9.7 | 57.5 |
| [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.81 ± 2.87 ms | 10.07 ± 0.38 ms | 20.9 | 183.5 |
| [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.49 ± 4.98 ms | 13.46 ± 0.55 ms | 26.2 | 232.0 |
| [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.63 ± 7.67 ms | 28.59 ± 0.96 ms | 58.8 | 520.2 |
## Usage Examples
This section provides simple YOLO11 training and inference examples. For full documentation on these and other [modes](../modes/index.md), see the [Predict](../modes/predict.md), [Train](../modes/train.md), [Val](../modes/val.md), and [Export](../modes/export.md) docs pages.
Note that the example below is for YOLO11 [Detect](../tasks/detect.md) models for object detection. For additional supported tasks, see the [Segment](../tasks/segment.md), [Classify](../tasks/classify.md), [OBB](../tasks/obb.md), and [Pose](../tasks/pose.md) docs.
!!! example
=== "Python"
[PyTorch](https://www.ultralytics.com/glossary/pytorch) pretrained `*.pt` models as well as configuration `*.yaml` files can be passed to the `YOLO()` class to create a model instance in Python:
```python
from ultralytics import YOLO
# Load a COCO-pretrained YOLO11n model
model = YOLO("yolo11n.pt")
# Train the model on the COCO8 example dataset for 100 epochs
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
# Run inference with the YOLO11n model on the 'bus.jpg' image
results = model("path/to/bus.jpg")
```
=== "CLI"
CLI commands are available to directly run the models:
```bash
# Load a COCO-pretrained YOLO11n model and train it on the COCO8 example dataset for 100 epochs
yolo train model=yolo11n.pt data=coco8.yaml epochs=100 imgsz=640
# Load a COCO-pretrained YOLO11n model and run inference on the 'bus.jpg' image
yolo predict model=yolo11n.pt source=path/to/bus.jpg
```
## Citations and Acknowledgements
If you use YOLO11 or any other software from this repository in your work, please cite it using the following format:
!!! quote ""
=== "BibTeX"
```bibtex
@software{yolo11_ultralytics,
author = {Glenn Jocher and Jing Qiu},
title = {Ultralytics YOLO11},
version = {11.0.0},
year = {2024},
url = {https://github.com/ultralytics/ultralytics},
orcid = {0000-0001-5950-6979, 0000-0002-7603-6750, 0000-0003-3783-7069},
license = {AGPL-3.0}
}
```
Please note that the DOI is pending and will be added to the citation once it is available. YOLO11 models are provided under [AGPL-3.0](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) and [Enterprise](https://www.ultralytics.com/license) licenses.
## FAQ
### What are the key improvements in Ultralytics YOLO11 compared to previous versions?
Ultralytics YOLO11 introduces several significant advancements over its predecessors. Key improvements include:
- **Enhanced Feature Extraction:** YOLO11 employs an improved backbone and neck architecture, enhancing [feature extraction](https://www.ultralytics.com/glossary/feature-extraction) capabilities for more precise object detection.
- **Optimized Efficiency and Speed:** Refined architectural designs and optimized training pipelines deliver faster processing speeds while maintaining a balance between accuracy and performance.
- **Greater Accuracy with Fewer Parameters:** YOLO11m achieves higher mean Average Precision (mAP) on the COCO dataset with 22% fewer parameters than YOLOv8m, making it computationally efficient without compromising accuracy.
- **Adaptability Across Environments:** YOLO11 can be deployed across various environments, including edge devices, cloud platforms, and systems supporting NVIDIA GPUs.
- **Broad Range of Supported Tasks:** YOLO11 supports diverse computer vision tasks such as object detection, instance segmentation, image classification, pose estimation, and oriented object detection (OBB).
### How do I train a YOLO11 model for object detection?
Training a YOLO11 model for object detection can be done using Python or CLI commands. Below are examples for both methods:
!!! Example
=== "Python"
```python
from ultralytics import YOLO
# Load a COCO-pretrained YOLO11n model
model = YOLO("yolo11n.pt")
# Train the model on the COCO8 example dataset for 100 epochs
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
```bash
# Load a COCO-pretrained YOLO11n model and train it on the COCO8 example dataset for 100 epochs
yolo train model=yolo11n.pt data=coco8.yaml epochs=100 imgsz=640
```
For more detailed instructions, refer to the [Train](../modes/train.md) documentation.
### What tasks can YOLO11 models perform?
YOLO11 models are versatile and support a wide range of computer vision tasks, including:
- **Object Detection:** Identifying and locating objects within an image.
- **Instance Segmentation:** Detecting objects and delineating their boundaries.
- **Image Classification:** Categorizing images into predefined classes.
- **Pose Estimation:** Detecting and tracking keypoints on human bodies.
- **Oriented Object Detection (OBB):** Detecting objects with rotation for higher precision.
For more information on each task, see the [Detection](../tasks/detect.md), [Instance Segmentation](../tasks/segment.md), [Classification](../tasks/classify.md), [Pose Estimation](../tasks/pose.md), and [Oriented Detection](../tasks/obb.md) documentation.
### How does YOLO11 achieve greater accuracy with fewer parameters?
YOLO11 achieves greater accuracy with fewer parameters through advancements in model design and optimization techniques. The improved architecture allows for efficient feature extraction and processing, resulting in higher mean Average Precision (mAP) on datasets like COCO while using 22% fewer parameters than YOLOv8m. This makes YOLO11 computationally efficient without compromising on accuracy, making it suitable for deployment on resource-constrained devices.
### Can YOLO11 be deployed on edge devices?
Yes, YOLO11 is designed for adaptability across various environments, including edge devices. Its optimized architecture and efficient processing capabilities make it suitable for deployment on edge devices, cloud platforms, and systems supporting NVIDIA GPUs. This flexibility ensures that YOLO11 can be used in diverse applications, from real-time detection on mobile devices to complex segmentation tasks in cloud environments. For more details on deployment options, refer to the [Export](../modes/export.md) documentation.

@ -4,7 +4,7 @@ description: Discover YOLOv8, the latest advancement in real-time object detecti
keywords: YOLOv8, real-time object detection, YOLO series, Ultralytics, computer vision, advanced object detection, AI, machine learning, deep learning keywords: YOLOv8, real-time object detection, YOLO series, Ultralytics, computer vision, advanced object detection, AI, machine learning, deep learning
--- ---
# YOLOv8 # Ultralytics YOLOv8
## Overview ## Overview

@ -143,6 +143,18 @@ keywords: Ultralytics, YOLO, neural networks, block modules, DFL, Proto, HGStem,
<br><br><hr><br> <br><br><hr><br>
## ::: ultralytics.nn.modules.block.C3f
<br><br><hr><br>
## ::: ultralytics.nn.modules.block.C3k2
<br><br><hr><br>
## ::: ultralytics.nn.modules.block.C3k
<br><br><hr><br>
## ::: ultralytics.nn.modules.block.RepVGGDW ## ::: ultralytics.nn.modules.block.RepVGGDW
<br><br><hr><br> <br><br><hr><br>
@ -159,10 +171,22 @@ keywords: Ultralytics, YOLO, neural networks, block modules, DFL, Proto, HGStem,
<br><br><hr><br> <br><br><hr><br>
## ::: ultralytics.nn.modules.block.PSABlock
<br><br><hr><br>
## ::: ultralytics.nn.modules.block.PSA ## ::: ultralytics.nn.modules.block.PSA
<br><br><hr><br> <br><br><hr><br>
## ::: ultralytics.nn.modules.block.C2PSA
<br><br><hr><br>
## ::: ultralytics.nn.modules.block.C2fPSA
<br><br><hr><br>
## ::: ultralytics.nn.modules.block.SCDown ## ::: ultralytics.nn.modules.block.SCDown
<br><br> <br><br>

@ -1,6 +1,9 @@
116908874+jk4e@users.noreply.github.com: 116908874+jk4e@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/116908874?v=4 avatar: https://avatars.githubusercontent.com/u/116908874?v=4
username: jk4e username: jk4e
1185102784@qq.com:
avatar: null
username: null
130829914+IvorZhu331@users.noreply.github.com: 130829914+IvorZhu331@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/130829914?v=4 avatar: https://avatars.githubusercontent.com/u/130829914?v=4
username: IvorZhu331 username: IvorZhu331

@ -238,9 +238,9 @@ char* YOLO_V8::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std::
rawData = cv::Mat(signalResultNum, strideNum, CV_16F, output); rawData = cv::Mat(signalResultNum, strideNum, CV_16F, output);
rawData.convertTo(rawData, CV_32F); rawData.convertTo(rawData, CV_32F);
} }
//Note: // Note:
//ultralytics add transpose operator to the output of yolov8 model.which make yolov8/v5/v7 has same shape // ultralytics add transpose operator to the output of yolov8 model.which make yolov8/v5/v7 has same shape
//https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt // https://github.com/ultralytics/assets/releases/download/v8.3.0/yolov8n.pt
rawData = rawData.t(); rawData = rawData.t();
float* data = (float*)rawData.data; float* data = (float*)rawData.data;

@ -114,7 +114,7 @@
"output_type": "stream", "output_type": "stream",
"name": "stdout", "name": "stdout",
"text": [ "text": [
"Downloading https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt to 'yolov8n.pt'...\n", "Downloading https://github.com/ultralytics/assets/releases/download/v8.3.0/yolov8n.pt to 'yolov8n.pt'...\n",
"100% 6.23M/6.23M [00:00<00:00, 83.2MB/s]\n", "100% 6.23M/6.23M [00:00<00:00, 83.2MB/s]\n",
"Ultralytics YOLOv8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", "Ultralytics YOLOv8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n",
"YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n", "YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n",

@ -251,6 +251,7 @@ nav:
- YOLOv8: models/yolov8.md - YOLOv8: models/yolov8.md
- YOLOv9: models/yolov9.md - YOLOv9: models/yolov9.md
- YOLOv10: models/yolov10.md - YOLOv10: models/yolov10.md
- YOLO11: models/yolo11.md
- SAM (Segment Anything Model): models/sam.md - SAM (Segment Anything Model): models/sam.md
- SAM 2 (Segment Anything Model 2): models/sam-2.md - SAM 2 (Segment Anything Model 2): models/sam-2.md
- MobileSAM (Mobile Segment Anything Model): models/mobile-sam.md - MobileSAM (Mobile Segment Anything Model): models/mobile-sam.md

@ -3,8 +3,8 @@
from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks
# Constants used in tests # Constants used in tests
MODEL = WEIGHTS_DIR / "path with spaces" / "yolov8n.pt" # test spaces in path MODEL = WEIGHTS_DIR / "path with spaces" / "yolo11n.pt" # test spaces in path
CFG = "yolov8n.yaml" CFG = "yolo11n.yaml"
SOURCE = ASSETS / "bus.jpg" SOURCE = ASSETS / "bus.jpg"
SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"] SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]
TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files

@ -74,7 +74,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
# Remove files # Remove files
models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)] models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
for file in ["bus.jpg", "yolov8n.onnx", "yolov8n.torchscript"] + models: for file in ["bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
Path(file).unlink(missing_ok=True) Path(file).unlink(missing_ok=True)
# Remove directories # Remove directories

@ -60,7 +60,7 @@ def test_train():
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available") @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_predict_multiple_devices(): def test_predict_multiple_devices():
"""Validate model prediction consistency across CPU and CUDA devices.""" """Validate model prediction consistency across CPU and CUDA devices."""
model = YOLO("yolov8n.pt") model = YOLO("yolo11n.pt")
model = model.cpu() model = model.cpu()
assert str(model.device) == "cpu" assert str(model.device) == "cpu"
_ = model(SOURCE) # CPU inference _ = model(SOURCE) # CPU inference

@ -21,13 +21,13 @@ def test_export():
exporter = Exporter() exporter = Exporter()
exporter.add_callback("on_export_start", test_func) exporter.add_callback("on_export_start", test_func)
assert test_func in exporter.callbacks["on_export_start"], "callback test failed" assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
f = exporter(model=YOLO("yolov8n.yaml").model) f = exporter(model=YOLO("yolo11n.yaml").model)
YOLO(f)(ASSETS) # exported model inference YOLO(f)(ASSETS) # exported model inference
def test_detect(): def test_detect():
"""Test YOLO object detection training, validation, and prediction functionality.""" """Test YOLO object detection training, validation, and prediction functionality."""
overrides = {"data": "coco8.yaml", "model": "yolov8n.yaml", "imgsz": 32, "epochs": 1, "save": False} overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG) cfg = get_cfg(DEFAULT_CFG)
cfg.data = "coco8.yaml" cfg.data = "coco8.yaml"
cfg.imgsz = 32 cfg.imgsz = 32
@ -66,7 +66,7 @@ def test_detect():
def test_segment(): def test_segment():
"""Tests image segmentation training, validation, and prediction pipelines using YOLO models.""" """Tests image segmentation training, validation, and prediction pipelines using YOLO models."""
overrides = {"data": "coco8-seg.yaml", "model": "yolov8n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False} overrides = {"data": "coco8-seg.yaml", "model": "yolo11n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG) cfg = get_cfg(DEFAULT_CFG)
cfg.data = "coco8-seg.yaml" cfg.data = "coco8-seg.yaml"
cfg.imgsz = 32 cfg.imgsz = 32
@ -88,7 +88,7 @@ def test_segment():
pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]}) pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
pred.add_callback("on_predict_start", test_func) pred.add_callback("on_predict_start", test_func)
assert test_func in pred.callbacks["on_predict_start"], "callback test failed" assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolov8n-seg.pt") result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo11n-seg.pt")
assert len(result), "predictor test failed" assert len(result), "predictor test failed"
# Test resume # Test resume
@ -105,7 +105,7 @@ def test_segment():
def test_classify(): def test_classify():
"""Test image classification including training, validation, and prediction phases.""" """Test image classification including training, validation, and prediction phases."""
overrides = {"data": "imagenet10", "model": "yolov8n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False} overrides = {"data": "imagenet10", "model": "yolo11n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG) cfg = get_cfg(DEFAULT_CFG)
cfg.data = "imagenet10" cfg.data = "imagenet10"
cfg.imgsz = 32 cfg.imgsz = 32

@ -30,7 +30,7 @@ def test_similarity():
@pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13") @pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
def test_det(): def test_det():
"""Test detection functionalities and verify embedding table includes bounding boxes.""" """Test detection functionalities and verify embedding table includes bounding boxes."""
exp = Explorer(data="coco8.yaml", model="yolov8n.pt") exp = Explorer(data="coco8.yaml", model="yolo11n.pt")
exp.create_embeddings_table(force=True) exp.create_embeddings_table(force=True)
assert len(exp.table.head()["bboxes"]) > 0 assert len(exp.table.head()["bboxes"]) > 0
similar = exp.get_similar(idx=[1, 2], limit=10) similar = exp.get_similar(idx=[1, 2], limit=10)
@ -44,7 +44,7 @@ def test_det():
@pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13") @pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
def test_seg(): def test_seg():
"""Test segmentation functionalities and ensure the embedding table includes segmentation masks.""" """Test segmentation functionalities and ensure the embedding table includes segmentation masks."""
exp = Explorer(data="coco8-seg.yaml", model="yolov8n-seg.pt") exp = Explorer(data="coco8-seg.yaml", model="yolo11n-seg.pt")
exp.create_embeddings_table(force=True) exp.create_embeddings_table(force=True)
assert len(exp.table.head()["masks"]) > 0 assert len(exp.table.head()["masks"]) > 0
similar = exp.get_similar(idx=[1, 2], limit=10) similar = exp.get_similar(idx=[1, 2], limit=10)
@ -57,7 +57,7 @@ def test_seg():
@pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13") @pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
def test_pose(): def test_pose():
"""Test pose estimation functionality and verify the embedding table includes keypoints.""" """Test pose estimation functionality and verify the embedding table includes keypoints."""
exp = Explorer(data="coco8-pose.yaml", model="yolov8n-pose.pt") exp = Explorer(data="coco8-pose.yaml", model="yolo11n-pose.pt")
exp.create_embeddings_table(force=True) exp.create_embeddings_table(force=True)
assert len(exp.table.head()["keypoints"]) > 0 assert len(exp.table.head()["keypoints"]) > 0
similar = exp.get_similar(idx=[1, 2], limit=10) similar = exp.get_similar(idx=[1, 2], limit=10)

@ -17,7 +17,7 @@ from ultralytics.utils.checks import check_requirements
@pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed") @pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
def test_model_ray_tune(): def test_model_ray_tune():
"""Tune YOLO model using Ray for hyperparameter optimization.""" """Tune YOLO model using Ray for hyperparameter optimization."""
YOLO("yolov8n-cls.yaml").tune( YOLO("yolo11n-cls.yaml").tune(
use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu" use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
) )
@ -26,7 +26,7 @@ def test_model_ray_tune():
def test_mlflow(): def test_mlflow():
"""Test training with MLflow tracking enabled (see https://mlflow.org/ for details).""" """Test training with MLflow tracking enabled (see https://mlflow.org/ for details)."""
SETTINGS["mlflow"] = True SETTINGS["mlflow"] = True
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu") YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
SETTINGS["mlflow"] = False SETTINGS["mlflow"] = False
@ -42,7 +42,7 @@ def test_mlflow_keep_run_active():
# Test with MLFLOW_KEEP_RUN_ACTIVE=True # Test with MLFLOW_KEEP_RUN_ACTIVE=True
os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True" os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True"
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu") YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
status = mlflow.active_run().info.status status = mlflow.active_run().info.status
assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True" assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True"
@ -50,13 +50,13 @@ def test_mlflow_keep_run_active():
# Test with MLFLOW_KEEP_RUN_ACTIVE=False # Test with MLFLOW_KEEP_RUN_ACTIVE=False
os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False" os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False"
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu") YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
status = mlflow.get_run(run_id=run_id).info.status status = mlflow.get_run(run_id=run_id).info.status
assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False" assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False"
# Test with MLFLOW_KEEP_RUN_ACTIVE not set # Test with MLFLOW_KEEP_RUN_ACTIVE not set
os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None) os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None)
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu") YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
status = mlflow.get_run(run_id=run_id).info.status status = mlflow.get_run(run_id=run_id).info.status
assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set" assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set"
SETTINGS["mlflow"] = False SETTINGS["mlflow"] = False
@ -126,23 +126,23 @@ def test_pycocotools():
from ultralytics.models.yolo.segment import SegmentationValidator from ultralytics.models.yolo.segment import SegmentationValidator
# Download annotations after each dataset downloads first # Download annotations after each dataset downloads first
url = "https://github.com/ultralytics/assets/releases/download/v8.2.0/" url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
args = {"model": "yolov8n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64} args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
validator = DetectionValidator(args=args) validator = DetectionValidator(args=args)
validator() validator()
validator.is_coco = True validator.is_coco = True
download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations") download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
_ = validator.eval_json(validator.stats) _ = validator.eval_json(validator.stats)
args = {"model": "yolov8n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64} args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
validator = SegmentationValidator(args=args) validator = SegmentationValidator(args=args)
validator() validator()
validator.is_coco = True validator.is_coco = True
download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations") download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
_ = validator.eval_json(validator.stats) _ = validator.eval_json(validator.stats)
args = {"model": "yolov8n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64} args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
validator = PoseValidator(args=args) validator = PoseValidator(args=args)
validator() validator()
validator.is_coco = True validator.is_coco = True

@ -211,7 +211,7 @@ def test_train_scratch():
def test_train_pretrained(): def test_train_pretrained():
"""Test training of the YOLO model starting from a pre-trained checkpoint.""" """Test training of the YOLO model starting from a pre-trained checkpoint."""
model = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt") model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0) model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0)
model(SOURCE) model(SOURCE)
@ -281,13 +281,13 @@ def test_results(model):
def test_labels_and_crops(): def test_labels_and_crops():
"""Test output from prediction args for saving YOLO detection labels and crops; ensures accurate saving.""" """Test output from prediction args for saving YOLO detection labels and crops; ensures accurate saving."""
imgs = [SOURCE, ASSETS / "zidane.jpg"] imgs = [SOURCE, ASSETS / "zidane.jpg"]
results = YOLO(WEIGHTS_DIR / "yolov8n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True) results = YOLO(WEIGHTS_DIR / "yolo11n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
save_path = Path(results[0].save_dir) save_path = Path(results[0].save_dir)
for r in results: for r in results:
im_name = Path(r.path).stem im_name = Path(r.path).stem
cls_idxs = r.boxes.cls.int().tolist() cls_idxs = r.boxes.cls.int().tolist()
# Check correct detections # Check correct detections
assert cls_idxs == ([0, 0, 5, 0, 7] if r.path.endswith("bus.jpg") else [0, 0]) # bus.jpg and zidane.jpg classes assert cls_idxs == ([0, 7, 0, 0] if r.path.endswith("bus.jpg") else [0, 0, 0]) # bus.jpg and zidane.jpg classes
# Check label path # Check label path
labels = save_path / f"labels/{im_name}.txt" labels = save_path / f"labels/{im_name}.txt"
assert labels.exists() assert labels.exists()
@ -339,7 +339,7 @@ def test_data_annotator():
auto_annotate( auto_annotate(
ASSETS, ASSETS,
det_model=WEIGHTS_DIR / "yolov8n.pt", det_model=WEIGHTS_DIR / "yolo11n.pt",
sam_model=WEIGHTS_DIR / "mobile_sam.pt", sam_model=WEIGHTS_DIR / "mobile_sam.pt",
output_dir=TMP / "auto_annotate_labels", output_dir=TMP / "auto_annotate_labels",
) )
@ -393,7 +393,7 @@ def test_utils_benchmarks():
"""Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'.""" """Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
from ultralytics.utils.benchmarks import ProfileModels from ultralytics.utils.benchmarks import ProfileModels
ProfileModels(["yolov8n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile() ProfileModels(["yolo11n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
def test_utils_torchutils(): def test_utils_torchutils():
@ -568,14 +568,14 @@ def test_classify_transforms_train(image, auto_augment, erasing, force_color_jit
@pytest.mark.skipif(not ONLINE, reason="environment is offline") @pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_model_tune(): def test_model_tune():
"""Tune YOLO model for performance improvement.""" """Tune YOLO model for performance improvement."""
YOLO("yolov8n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu") YOLO("yolo11n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
YOLO("yolov8n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu") YOLO("yolo11n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
def test_model_embeddings(): def test_model_embeddings():
"""Test YOLO model embeddings.""" """Test YOLO model embeddings."""
model_detect = YOLO(MODEL) model_detect = YOLO(MODEL)
model_segment = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt") model_segment = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2 for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch) assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
@ -585,11 +585,11 @@ def test_model_embeddings():
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12") @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
def test_yolo_world(): def test_yolo_world():
"""Tests YOLO world models with CLIP support, including detection and training scenarios.""" """Tests YOLO world models with CLIP support, including detection and training scenarios."""
model = YOLO("yolov8s-world.pt") # no YOLOv8n-world model yet model = YOLO("yolov8s-world.pt") # no YOLO11n-world model yet
model.set_classes(["tree", "window"]) model.set_classes(["tree", "window"])
model(SOURCE, conf=0.01) model(SOURCE, conf=0.01)
model = YOLO("yolov8s-worldv2.pt") # no YOLOv8n-world model yet model = YOLO("yolov8s-worldv2.pt") # no YOLO11n-world model yet
# Training from a pretrained model. Eval is included at the final stage of training. # Training from a pretrained model. Eval is included at the final stage of training.
# Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model # Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
model.train( model.train(
@ -603,7 +603,7 @@ def test_yolo_world():
# test WorWorldTrainerFromScratch # test WorWorldTrainerFromScratch
from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
model = YOLO("yolov8s-worldv2.yaml") # no YOLOv8n-world model yet model = YOLO("yolov8s-worldv2.yaml") # no YOLO11n-world model yet
model.train( model.train(
data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}}, data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}},
epochs=1, epochs=1,

@ -14,7 +14,7 @@ WORKOUTS_SOLUTION_DEMO = "https://github.com/ultralytics/assets/releases/downloa
def test_major_solutions(): def test_major_solutions():
"""Test the object counting, heatmap, speed estimation and queue management solution.""" """Test the object counting, heatmap, speed estimation and queue management solution."""
safe_download(url=MAJOR_SOLUTIONS_DEMO) safe_download(url=MAJOR_SOLUTIONS_DEMO)
model = YOLO("yolov8n.pt") model = YOLO("yolo11n.pt")
names = model.names names = model.names
cap = cv2.VideoCapture("solutions_ci_demo.mp4") cap = cv2.VideoCapture("solutions_ci_demo.mp4")
assert cap.isOpened(), "Error reading video file" assert cap.isOpened(), "Error reading video file"
@ -41,7 +41,7 @@ def test_major_solutions():
def test_aigym(): def test_aigym():
"""Test the workouts monitoring solution.""" """Test the workouts monitoring solution."""
safe_download(url=WORKOUTS_SOLUTION_DEMO) safe_download(url=WORKOUTS_SOLUTION_DEMO)
model = YOLO("yolov8n-pose.pt") model = YOLO("yolo11n-pose.pt")
cap = cv2.VideoCapture("solution_ci_pose_demo.mp4") cap = cv2.VideoCapture("solution_ci_pose_demo.mp4")
assert cap.isOpened(), "Error reading video file" assert cap.isOpened(), "Error reading video file"
gym_object = solutions.AIGym(line_thickness=2, pose_type="squat", kpts_to_check=[5, 11, 13]) gym_object = solutions.AIGym(line_thickness=2, pose_type="squat", kpts_to_check=[5, 11, 13])
@ -60,7 +60,7 @@ def test_instance_segmentation():
"""Test the instance segmentation solution.""" """Test the instance segmentation solution."""
from ultralytics.utils.plotting import Annotator, colors from ultralytics.utils.plotting import Annotator, colors
model = YOLO("yolov8n-seg.pt") model = YOLO("yolo11n-seg.pt")
names = model.names names = model.names
cap = cv2.VideoCapture("solutions_ci_demo.mp4") cap = cv2.VideoCapture("solutions_ci_demo.mp4")
assert cap.isOpened(), "Error reading video file" assert cap.isOpened(), "Error reading video file"

@ -1,7 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.2.103" __version__ = "8.3.0"
import os import os

@ -115,6 +115,7 @@ bgr: 0.0 # (float) image channel BGR (probability)
mosaic: 1.0 # (float) image mosaic (probability) mosaic: 1.0 # (float) image mosaic (probability)
mixup: 0.0 # (float) image mixup (probability) mixup: 0.0 # (float) image mixup (probability)
copy_paste: 0.0 # (float) segment copy-paste (probability) copy_paste: 0.0 # (float) segment copy-paste (probability)
copy_paste_mode: "flip" # (str) the method to do copy_paste augmentation (flip, mixup)
auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix) auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)
erasing: 0.4 # (float) probability of random erasing during classification training (0-0.9), 0 means no erasing, must be less than 1.0. erasing: 0.4 # (float) probability of random erasing during classification training (0-0.9), 0 means no erasing, must be less than 1.0.
crop_fraction: 1.0 # (float) image crop fraction for classification (0.1-1), 1.0 means no crop, must be greater than 0. crop_fraction: 1.0 # (float) image crop fraction for classification (0.1-1), 1.0 means no crop, must be greater than 0.

@ -0,0 +1,30 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLO11-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolo11n-cls.yaml' will call yolo11-cls.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.50, 0.25, 1024] # summary: 151 layers, 1633584 parameters, 1633584 gradients, 3.3 GFLOPs
s: [0.50, 0.50, 1024] # summary: 151 layers, 5545488 parameters, 5545488 gradients, 12.2 GFLOPs
m: [0.50, 1.00, 512] # summary: 187 layers, 10455696 parameters, 10455696 gradients, 39.7 GFLOPs
l: [1.00, 1.00, 512] # summary: 309 layers, 12937104 parameters, 12937104 gradients, 49.9 GFLOPs
x: [1.00, 1.50, 512] # summary: 309 layers, 28458544 parameters, 28458544 gradients, 111.1 GFLOPs
# YOLO11n backbone
backbone:
# [from, repeats, module, args]
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
- [-1, 2, C3k2, [256, False, 0.25]]
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
- [-1, 2, C3k2, [512, False, 0.25]]
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
- [-1, 2, C3k2, [512, True]]
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
- [-1, 2, C3k2, [1024, True]]
- [-1, 2, C2PSA, [1024]] # 9
# YOLO11n head
head:
- [-1, 1, Classify, [nc]] # Classify

@ -0,0 +1,47 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLO11 Oriented Bounding Boxes (OBB) model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/obb
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolo11n-obb.yaml' will call yolo11-obb.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.50, 0.25, 1024] # summary: 344 layers, 2695747 parameters, 2695731 gradients, 6.9 GFLOPs
s: [0.50, 0.50, 1024] # summary: 344 layers, 9744931 parameters, 9744915 gradients, 22.7 GFLOPs
m: [0.50, 1.00, 512] # summary: 434 layers, 20963523 parameters, 20963507 gradients, 72.2 GFLOPs
l: [1.00, 1.00, 512] # summary: 656 layers, 26220995 parameters, 26220979 gradients, 91.3 GFLOPs
x: [1.00, 1.50, 512] # summary: 656 layers, 58875331 parameters, 58875315 gradients, 204.3 GFLOPs
# YOLO11n backbone
backbone:
# [from, repeats, module, args]
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
- [-1, 2, C3k2, [256, False, 0.25]]
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
- [-1, 2, C3k2, [512, False, 0.25]]
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
- [-1, 2, C3k2, [512, True]]
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
- [-1, 2, C3k2, [1024, True]]
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 2, C2PSA, [1024]] # 10
# YOLO11n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
- [-1, 2, C3k2, [512, False]] # 13
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 4], 1, Concat, [1]] # cat backbone P3
- [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
- [-1, 1, Conv, [256, 3, 2]]
- [[-1, 13], 1, Concat, [1]] # cat head P4
- [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
- [-1, 1, Conv, [512, 3, 2]]
- [[-1, 10], 1, Concat, [1]] # cat head P5
- [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
- [[16, 19, 22], 1, OBB, [nc, 1]] # Detect(P3, P4, P5)

@ -0,0 +1,48 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLO11-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose
# Parameters
nc: 80 # number of classes
kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
scales: # model compound scaling constants, i.e. 'model=yolo11n-pose.yaml' will call yolo11.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.50, 0.25, 1024] # summary: 344 layers, 2908507 parameters, 2908491 gradients, 7.7 GFLOPs
s: [0.50, 0.50, 1024] # summary: 344 layers, 9948811 parameters, 9948795 gradients, 23.5 GFLOPs
m: [0.50, 1.00, 512] # summary: 434 layers, 20973273 parameters, 20973257 gradients, 72.3 GFLOPs
l: [1.00, 1.00, 512] # summary: 656 layers, 26230745 parameters, 26230729 gradients, 91.4 GFLOPs
x: [1.00, 1.50, 512] # summary: 656 layers, 58889881 parameters, 58889865 gradients, 204.3 GFLOPs
# YOLO11n backbone
backbone:
# [from, repeats, module, args]
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
- [-1, 2, C3k2, [256, False, 0.25]]
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
- [-1, 2, C3k2, [512, False, 0.25]]
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
- [-1, 2, C3k2, [512, True]]
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
- [-1, 2, C3k2, [1024, True]]
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 2, C2PSA, [1024]] # 10
# YOLO11n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
- [-1, 2, C3k2, [512, False]] # 13
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 4], 1, Concat, [1]] # cat backbone P3
- [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
- [-1, 1, Conv, [256, 3, 2]]
- [[-1, 13], 1, Concat, [1]] # cat head P4
- [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
- [-1, 1, Conv, [512, 3, 2]]
- [[-1, 10], 1, Concat, [1]] # cat head P5
- [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
- [[16, 19, 22], 1, Pose, [nc, kpt_shape]] # Detect(P3, P4, P5)

@ -0,0 +1,47 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLO11-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolo11n-seg.yaml' will call yolo11-seg.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.50, 0.25, 1024] # summary: 355 layers, 2876848 parameters, 2876832 gradients, 10.5 GFLOPs
s: [0.50, 0.50, 1024] # summary: 355 layers, 10113248 parameters, 10113232 gradients, 35.8 GFLOPs
m: [0.50, 1.00, 512] # summary: 445 layers, 22420896 parameters, 22420880 gradients, 123.9 GFLOPs
l: [1.00, 1.00, 512] # summary: 667 layers, 27678368 parameters, 27678352 gradients, 143.0 GFLOPs
x: [1.00, 1.50, 512] # summary: 667 layers, 62142656 parameters, 62142640 gradients, 320.2 GFLOPs
# YOLO11n backbone
backbone:
# [from, repeats, module, args]
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
- [-1, 2, C3k2, [256, False, 0.25]]
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
- [-1, 2, C3k2, [512, False, 0.25]]
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
- [-1, 2, C3k2, [512, True]]
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
- [-1, 2, C3k2, [1024, True]]
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 2, C2PSA, [1024]] # 10
# YOLO11n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
- [-1, 2, C3k2, [512, False]] # 13
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 4], 1, Concat, [1]] # cat backbone P3
- [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
- [-1, 1, Conv, [256, 3, 2]]
- [[-1, 13], 1, Concat, [1]] # cat head P4
- [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
- [-1, 1, Conv, [512, 3, 2]]
- [[-1, 10], 1, Concat, [1]] # cat head P5
- [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
- [[16, 19, 22], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)

@ -0,0 +1,47 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLO11 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
# Parameters
nc: 80 # number of classes
scales: # model compound scaling constants, i.e. 'model=yolo11n.yaml' will call yolo11.yaml with scale 'n'
# [depth, width, max_channels]
n: [0.50, 0.25, 1024] # summary: 319 layers, 2624080 parameters, 2624064 gradients, 6.6 GFLOPs
s: [0.50, 0.50, 1024] # summary: 319 layers, 9458752 parameters, 9458736 gradients, 21.7 GFLOPs
m: [0.50, 1.00, 512] # summary: 409 layers, 20114688 parameters, 20114672 gradients, 68.5 GFLOPs
l: [1.00, 1.00, 512] # summary: 631 layers, 25372160 parameters, 25372144 gradients, 87.6 GFLOPs
x: [1.00, 1.50, 512] # summary: 631 layers, 56966176 parameters, 56966160 gradients, 196.0 GFLOPs
# YOLO11n backbone
backbone:
# [from, repeats, module, args]
- [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
- [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
- [-1, 2, C3k2, [256, False, 0.25]]
- [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
- [-1, 2, C3k2, [512, False, 0.25]]
- [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
- [-1, 2, C3k2, [512, True]]
- [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
- [-1, 2, C3k2, [1024, True]]
- [-1, 1, SPPF, [1024, 5]] # 9
- [-1, 2, C2PSA, [1024]] # 10
# YOLO11n head
head:
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
- [-1, 2, C3k2, [512, False]] # 13
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
- [[-1, 4], 1, Concat, [1]] # cat backbone P3
- [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
- [-1, 1, Conv, [256, 3, 2]]
- [[-1, 13], 1, Concat, [1]] # cat head P4
- [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
- [-1, 1, Conv, [512, 3, 2]]
- [[-1, 10], 1, Concat, [1]] # cat head P5
- [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
- [[16, 19, 22], 1, Detect, [nc]] # Detect(P3, P4, P5)

@ -1628,92 +1628,105 @@ class LetterBox:
return labels return labels
class CopyPaste: class CopyPaste(BaseMixTransform):
""" """
Implements Copy-Paste augmentation as described in https://arxiv.org/abs/2012.07177. CopyPaste class for applying Copy-Paste augmentation to image datasets.
This class applies Copy-Paste augmentation on images and their corresponding instances. This class implements the Copy-Paste augmentation technique as described in the paper "Simple Copy-Paste is a Strong
Data Augmentation Method for Instance Segmentation" (https://arxiv.org/abs/2012.07177). It combines objects from
different images to create new training samples.
Attributes: Attributes:
p (float): Probability of applying the Copy-Paste augmentation. Must be between 0 and 1. dataset (Any): The dataset to which Copy-Paste augmentation will be applied.
pre_transform (Callable | None): Optional transform to apply before Copy-Paste.
p (float): Probability of applying Copy-Paste augmentation.
Methods: Methods:
__call__: Applies Copy-Paste augmentation to given image and instances. get_indexes: Returns a random index from the dataset.
_mix_transform: Applies Copy-Paste augmentation to the input labels.
__call__: Applies the Copy-Paste transformation to images and annotations.
Examples: Examples:
>>> copypaste = CopyPaste(p=0.5) >>> from ultralytics.data.augment import CopyPaste
>>> augmented_labels = copypaste(labels) >>> dataset = YourDataset(...) # Your image dataset
>>> augmented_image = augmented_labels["img"] >>> copypaste = CopyPaste(dataset, p=0.5)
>>> augmented_labels = copypaste(original_labels)
""" """
def __init__(self, p=0.5) -> None: def __init__(self, dataset=None, pre_transform=None, p=0.5, mode="flip") -> None:
""" """Initializes CopyPaste object with dataset, pre_transform, and probability of applying MixUp."""
Initializes the CopyPaste augmentation object. super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
assert mode in {"flip", "mixup"}, f"Expected `mode` to be `flip` or `mixup`, but got {mode}."
self.mode = mode
This class implements the Copy-Paste augmentation as described in the paper "Simple Copy-Paste is a Strong Data def get_indexes(self):
Augmentation Method for Instance Segmentation" (https://arxiv.org/abs/2012.07177). It applies the Copy-Paste """Returns a list of random indexes from the dataset for CopyPaste augmentation."""
augmentation on images and their corresponding instances with a given probability. return random.randint(0, len(self.dataset) - 1)
Args: def _mix_transform(self, labels):
p (float): The probability of applying the Copy-Paste augmentation. Must be between 0 and 1. """Applies Copy-Paste augmentation to combine objects from another image into the current image."""
labels2 = labels["mix_labels"][0]
return self._transform(labels, labels2)
Attributes: def __call__(self, labels):
p (float): Stores the probability of applying the augmentation. """Applies Copy-Paste augmentation to an image and its labels."""
if len(labels["instances"].segments) == 0 or self.p == 0:
return labels
if self.mode == "flip":
return self._transform(labels)
Examples: # Get index of one or three other images
>>> augment = CopyPaste(p=0.7) indexes = self.get_indexes()
>>> augmented_data = augment(original_data) if isinstance(indexes, int):
""" indexes = [indexes]
self.p = p
def __call__(self, labels): # Get images information will be used for Mosaic or MixUp
""" mix_labels = [self.dataset.get_image_and_label(i) for i in indexes]
Applies Copy-Paste augmentation to an image and its instances.
Args: if self.pre_transform is not None:
labels (Dict): A dictionary containing: for i, data in enumerate(mix_labels):
- 'img' (np.ndarray): The image to augment. mix_labels[i] = self.pre_transform(data)
- 'cls' (np.ndarray): Class labels for the instances. labels["mix_labels"] = mix_labels
- 'instances' (ultralytics.engine.results.Instances): Object containing bounding boxes, segments, etc.
Returns: # Update cls and texts
(Dict): Dictionary with augmented image and updated instances under 'img', 'cls', and 'instances' keys. labels = self._update_label_text(labels)
# Mosaic or MixUp
labels = self._mix_transform(labels)
labels.pop("mix_labels", None)
return labels
Examples: def _transform(self, labels1, labels2={}):
>>> labels = {"img": np.random.rand(640, 640, 3), "cls": np.array([0, 1, 2]), "instances": Instances(...)} """Applies Copy-Paste augmentation to combine objects from another image into the current image."""
>>> augmenter = CopyPaste(p=0.5) im = labels1["img"]
>>> augmented_labels = augmenter(labels) cls = labels1["cls"]
"""
im = labels["img"]
cls = labels["cls"]
h, w = im.shape[:2] h, w = im.shape[:2]
instances = labels.pop("instances") instances = labels1.pop("instances")
instances.convert_bbox(format="xyxy") instances.convert_bbox(format="xyxy")
instances.denormalize(w, h) instances.denormalize(w, h)
if self.p and len(instances.segments):
_, w, _ = im.shape # height, width, channels im_new = np.zeros(im.shape, np.uint8)
im_new = np.zeros(im.shape, np.uint8) instances2 = labels2.pop("instances", None)
if instances2 is None:
# Calculate ioa first then select indexes randomly instances2 = deepcopy(instances)
ins_flip = deepcopy(instances) instances2.fliplr(w)
ins_flip.fliplr(w) ioa = bbox_ioa(instances2.bboxes, instances.bboxes) # intersection over area, (N, M)
indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, )
ioa = bbox_ioa(ins_flip.bboxes, instances.bboxes) # intersection over area, (N, M) n = len(indexes)
indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) sorted_idx = np.argsort(ioa.max(1)[indexes])
n = len(indexes) indexes = indexes[sorted_idx]
for j in random.sample(list(indexes), k=round(self.p * n)): for j in indexes[: round(self.p * n)]:
cls = np.concatenate((cls, cls[[j]]), axis=0) cls = np.concatenate((cls, labels2.get("cls", cls)[[j]]), axis=0)
instances = Instances.concatenate((instances, ins_flip[[j]]), axis=0) instances = Instances.concatenate((instances, instances2[[j]]), axis=0)
cv2.drawContours(im_new, instances.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED) cv2.drawContours(im_new, instances2.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED)
result = cv2.flip(im, 1) # augment segments (flip left-right) result = labels2.get("img", cv2.flip(im, 1)) # augment segments
i = cv2.flip(im_new, 1).astype(bool) i = im_new.astype(bool)
im[i] = result[i] im[i] = result[i]
labels["img"] = im labels1["img"] = im
labels["cls"] = cls labels1["cls"] = cls
labels["instances"] = instances labels1["instances"] = instances
return labels return labels1
class Albumentations: class Albumentations:
@ -2259,9 +2272,9 @@ class RandomLoadText:
def v8_transforms(dataset, imgsz, hyp, stretch=False): def v8_transforms(dataset, imgsz, hyp, stretch=False):
""" """
Applies a series of image transformations for YOLOv8 training. Applies a series of image transformations for training.
This function creates a composition of image augmentation techniques to prepare images for YOLOv8 training. This function creates a composition of image augmentation techniques to prepare images for YOLO training.
It includes operations such as mosaic, copy-paste, random perspective, mixup, and various color adjustments. It includes operations such as mosaic, copy-paste, random perspective, mixup, and various color adjustments.
Args: Args:
@ -2280,20 +2293,28 @@ def v8_transforms(dataset, imgsz, hyp, stretch=False):
>>> transforms = v8_transforms(dataset, imgsz=640, hyp=hyp) >>> transforms = v8_transforms(dataset, imgsz=640, hyp=hyp)
>>> augmented_data = transforms(dataset[0]) >>> augmented_data = transforms(dataset[0])
""" """
pre_transform = Compose( mosaic = Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic)
[ affine = RandomPerspective(
Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic), degrees=hyp.degrees,
CopyPaste(p=hyp.copy_paste), translate=hyp.translate,
RandomPerspective( scale=hyp.scale,
degrees=hyp.degrees, shear=hyp.shear,
translate=hyp.translate, perspective=hyp.perspective,
scale=hyp.scale, pre_transform=None if stretch else LetterBox(new_shape=(imgsz, imgsz)),
shear=hyp.shear,
perspective=hyp.perspective,
pre_transform=None if stretch else LetterBox(new_shape=(imgsz, imgsz)),
),
]
) )
pre_transform = Compose([mosaic, affine])
if hyp.copy_paste_mode == "flip":
pre_transform.insert(1, CopyPaste(p=hyp.copy_paste, mode=hyp.copy_paste_mode))
else:
pre_transform.append(
CopyPaste(
dataset,
pre_transform=Compose([Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic), affine]),
p=hyp.copy_paste,
mode=hyp.copy_paste_mode,
)
)
flip_idx = dataset.data.get("flip_idx", []) # for keypoints augmentation flip_idx = dataset.data.get("flip_idx", []) # for keypoints augmentation
if dataset.use_keypoints: if dataset.use_keypoints:
kpt_shape = dataset.data.get("kpt_shape", None) kpt_shape = dataset.data.get("kpt_shape", None)

@ -538,6 +538,8 @@ class BaseTrainer:
self.best.write_bytes(serialized_ckpt) # save best.pt self.best.write_bytes(serialized_ckpt) # save best.pt
if (self.save_period > 0) and (self.epoch % self.save_period == 0): if (self.save_period > 0) and (self.epoch % self.save_period == 0):
(self.wdir / f"epoch{self.epoch}.pt").write_bytes(serialized_ckpt) # save epoch, i.e. 'epoch3.pt' (self.wdir / f"epoch{self.epoch}.pt").write_bytes(serialized_ckpt) # save epoch, i.e. 'epoch3.pt'
# if self.args.close_mosaic and self.epoch == (self.epochs - self.args.close_mosaic - 1):
# (self.wdir / "last_mosaic.pt").write_bytes(serialized_ckpt) # save mosaic checkpoint
def get_dataset(self): def get_dataset(self):
""" """
@ -698,7 +700,12 @@ class BaseTrainer:
resume = True resume = True
self.args = get_cfg(ckpt_args) self.args = get_cfg(ckpt_args)
self.args.model = self.args.resume = str(last) # reinstate model self.args.model = self.args.resume = str(last) # reinstate model
for k in "imgsz", "batch", "device": # allow arg updates to reduce memory or update device on resume for k in (
"imgsz",
"batch",
"device",
"close_mosaic",
): # allow arg updates to reduce memory or update device on resume
if k in overrides: if k in overrides:
setattr(self.args, k, overrides[k]) setattr(self.args, k, overrides[k])

@ -20,6 +20,7 @@ Example:
from .block import ( from .block import (
C1, C1,
C2, C2,
C2PSA,
C3, C3,
C3TR, C3TR,
CIB, CIB,
@ -38,7 +39,9 @@ from .block import (
C2f, C2f,
C2fAttn, C2fAttn,
C2fCIB, C2fCIB,
C2fPSA,
C3Ghost, C3Ghost,
C3k2,
C3x, C3x,
CBFuse, CBFuse,
CBLinear, CBLinear,
@ -110,6 +113,10 @@ __all__ = (
"C2", "C2",
"C3", "C3",
"C2f", "C2f",
"C3k2",
"SCDown",
"C2fPSA",
"C2PSA",
"C2fAttn", "C2fAttn",
"C3x", "C3x",
"C3TR", "C3TR",
@ -149,5 +156,4 @@ __all__ = (
"C2fCIB", "C2fCIB",
"Attention", "Attention",
"PSA", "PSA",
"SCDown",
) )

@ -40,6 +40,9 @@ __all__ = (
"SPPELAN", "SPPELAN",
"CBFuse", "CBFuse",
"CBLinear", "CBLinear",
"C3k2",
"C2fPSA",
"C2PSA",
"RepVGGDW", "RepVGGDW",
"CIB", "CIB",
"C2fCIB", "C2fCIB",
@ -696,6 +699,49 @@ class CBFuse(nn.Module):
return torch.sum(torch.stack(res + xs[-1:]), dim=0) return torch.sum(torch.stack(res + xs[-1:]), dim=0)
class C3f(nn.Module):
"""Faster Implementation of CSP Bottleneck with 2 convolutions."""
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
"""Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups,
expansion.
"""
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv((2 + n) * c_, c2, 1) # optional act=FReLU(c2)
self.m = nn.ModuleList(Bottleneck(c_, c_, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))
def forward(self, x):
"""Forward pass through C2f layer."""
y = [self.cv2(x), self.cv1(x)]
y.extend(m(y[-1]) for m in self.m)
return self.cv3(torch.cat(y, 1))
class C3k2(C2f):
"""Faster Implementation of CSP Bottleneck with 2 convolutions."""
def __init__(self, c1, c2, n=1, c3k=False, e=0.5, g=1, shortcut=True):
"""Initializes the C3k2 module, a faster CSP Bottleneck with 2 convolutions and optional C3k blocks."""
super().__init__(c1, c2, n, shortcut, g, e)
self.m = nn.ModuleList(
C3k(self.c, self.c, 2, shortcut, g) if c3k else Bottleneck(self.c, self.c, shortcut, g) for _ in range(n)
)
class C3k(C3):
"""C3k is a CSP bottleneck module with customizable kernel sizes for feature extraction in neural networks."""
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, k=3):
"""Initializes the C3k module with specified channels, number of layers, and configurations."""
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e) # hidden channels
# self.m = nn.Sequential(*(RepBottleneck(c_, c_, shortcut, g, k=(k, k), e=1.0) for _ in range(n)))
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=(k, k), e=1.0) for _ in range(n)))
class RepVGGDW(torch.nn.Module): class RepVGGDW(torch.nn.Module):
"""RepVGGDW is a class that represents a depth wise separable convolutional block in RepVGG architecture.""" """RepVGGDW is a class that represents a depth wise separable convolutional block in RepVGG architecture."""
@ -873,25 +919,69 @@ class Attention(nn.Module):
return x return x
class PSABlock(nn.Module):
"""
PSABlock class implementing a Position-Sensitive Attention block for neural networks.
This class encapsulates the functionality for applying multi-head attention and feed-forward neural network layers
with optional shortcut connections.
Attributes:
attn (Attention): Multi-head attention module.
ffn (nn.Sequential): Feed-forward neural network module.
add (bool): Flag indicating whether to add shortcut connections.
Methods:
forward: Performs a forward pass through the PSABlock, applying attention and feed-forward layers.
Examples:
Create a PSABlock and perform a forward pass
>>> psablock = PSABlock(c=128, attn_ratio=0.5, num_heads=4, shortcut=True)
>>> input_tensor = torch.randn(1, 128, 32, 32)
>>> output_tensor = psablock(input_tensor)
"""
def __init__(self, c, attn_ratio=0.5, num_heads=4, shortcut=True) -> None:
"""Initializes the PSABlock with attention and feed-forward layers for enhanced feature extraction."""
super().__init__()
self.attn = Attention(c, attn_ratio=attn_ratio, num_heads=num_heads)
self.ffn = nn.Sequential(Conv(c, c * 2, 1), Conv(c * 2, c, 1, act=False))
self.add = shortcut
def forward(self, x):
"""Executes a forward pass through PSABlock, applying attention and feed-forward layers to the input tensor."""
x = x + self.attn(x) if self.add else self.attn(x)
x = x + self.ffn(x) if self.add else self.ffn(x)
return x
class PSA(nn.Module): class PSA(nn.Module):
""" """
Position-wise Spatial Attention module. PSA class for implementing Position-Sensitive Attention in neural networks.
Args: This class encapsulates the functionality for applying position-sensitive attention and feed-forward networks to
c1 (int): Number of input channels. input tensors, enhancing feature extraction and processing capabilities.
c2 (int): Number of output channels.
e (float): Expansion factor for the intermediate channels. Default is 0.5.
Attributes: Attributes:
c (int): Number of intermediate channels. c (int): Number of hidden channels after applying the initial convolution.
cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c. cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c.
cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c. cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c.
attn (Attention): Attention module for spatial attention. attn (Attention): Attention module for position-sensitive attention.
ffn (nn.Sequential): Feed-forward network module. ffn (nn.Sequential): Feed-forward network for further processing.
Methods:
forward: Applies position-sensitive attention and feed-forward network to the input tensor.
Examples:
Create a PSA module and apply it to an input tensor
>>> psa = PSA(c1=128, c2=128, e=0.5)
>>> input_tensor = torch.randn(1, 128, 64, 64)
>>> output_tensor = psa.forward(input_tensor)
""" """
def __init__(self, c1, c2, e=0.5): def __init__(self, c1, c2, e=0.5):
"""Initializes convolution layers, attention module, and feed-forward network with channel reduction.""" """Initializes the PSA module with input/output channels and attention mechanism for feature extraction."""
super().__init__() super().__init__()
assert c1 == c2 assert c1 == c2
self.c = int(c1 * e) self.c = int(c1 * e)
@ -902,46 +992,117 @@ class PSA(nn.Module):
self.ffn = nn.Sequential(Conv(self.c, self.c * 2, 1), Conv(self.c * 2, self.c, 1, act=False)) self.ffn = nn.Sequential(Conv(self.c, self.c * 2, 1), Conv(self.c * 2, self.c, 1, act=False))
def forward(self, x): def forward(self, x):
""" """Executes forward pass in PSA module, applying attention and feed-forward layers to the input tensor."""
Forward pass of the PSA module.
Args:
x (torch.Tensor): Input tensor.
Returns:
(torch.Tensor): Output tensor.
"""
a, b = self.cv1(x).split((self.c, self.c), dim=1) a, b = self.cv1(x).split((self.c, self.c), dim=1)
b = b + self.attn(b) b = b + self.attn(b)
b = b + self.ffn(b) b = b + self.ffn(b)
return self.cv2(torch.cat((a, b), 1)) return self.cv2(torch.cat((a, b), 1))
class C2PSA(nn.Module):
"""
C2PSA module with attention mechanism for enhanced feature extraction and processing.
This module implements a convolutional block with attention mechanisms to enhance feature extraction and processing
capabilities. It includes a series of PSABlock modules for self-attention and feed-forward operations.
Attributes:
c (int): Number of hidden channels.
cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c.
cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c.
m (nn.Sequential): Sequential container of PSABlock modules for attention and feed-forward operations.
Methods:
forward: Performs a forward pass through the C2PSA module, applying attention and feed-forward operations.
Notes:
This module essentially is the same as PSA module, but refactored to allow stacking more PSABlock modules.
Examples:
>>> c2psa = C2PSA(c1=256, c2=256, n=3, e=0.5)
>>> input_tensor = torch.randn(1, 256, 64, 64)
>>> output_tensor = c2psa(input_tensor)
"""
def __init__(self, c1, c2, n=1, e=0.5):
"""Initializes the C2PSA module with specified input/output channels, number of layers, and expansion ratio."""
super().__init__()
assert c1 == c2
self.c = int(c1 * e)
self.cv1 = Conv(c1, 2 * self.c, 1, 1)
self.cv2 = Conv(2 * self.c, c1, 1)
self.m = nn.Sequential(*(PSABlock(self.c, attn_ratio=0.5, num_heads=self.c // 64) for _ in range(n)))
def forward(self, x):
"""Processes the input tensor 'x' through a series of PSA blocks and returns the transformed tensor."""
a, b = self.cv1(x).split((self.c, self.c), dim=1)
b = self.m(b)
return self.cv2(torch.cat((a, b), 1))
class C2fPSA(C2f):
"""
C2fPSA module with enhanced feature extraction using PSA blocks.
This class extends the C2f module by incorporating PSA blocks for improved attention mechanisms and feature extraction.
Attributes:
c (int): Number of hidden channels.
cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c.
cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c.
m (nn.ModuleList): List of PSA blocks for feature extraction.
Methods:
forward: Performs a forward pass through the C2fPSA module.
forward_split: Performs a forward pass using split() instead of chunk().
Examples:
>>> import torch
>>> from ultralytics.models.common import C2fPSA
>>> model = C2fPSA(c1=64, c2=64, n=3, e=0.5)
>>> x = torch.randn(1, 64, 128, 128)
>>> output = model(x)
>>> print(output.shape)
"""
def __init__(self, c1, c2, n=1, e=0.5):
"""Initializes the C2fPSA module, a variant of C2f with PSA blocks for enhanced feature extraction."""
assert c1 == c2
super().__init__(c1, c2, n=n, e=e)
self.m = nn.ModuleList(PSABlock(self.c, attn_ratio=0.5, num_heads=self.c // 64) for _ in range(n))
class SCDown(nn.Module): class SCDown(nn.Module):
"""Spatial Channel Downsample (SCDown) module for reducing spatial and channel dimensions.""" """
SCDown module for downsampling with separable convolutions.
def __init__(self, c1, c2, k, s): This module performs downsampling using a combination of pointwise and depthwise convolutions, which helps in
""" efficiently reducing the spatial dimensions of the input tensor while maintaining the channel information.
Spatial Channel Downsample (SCDown) module.
Args: Attributes:
c1 (int): Number of input channels. cv1 (Conv): Pointwise convolution layer that reduces the number of channels.
c2 (int): Number of output channels. cv2 (Conv): Depthwise convolution layer that performs spatial downsampling.
k (int): Kernel size for the convolutional layer.
s (int): Stride for the convolutional layer. Methods:
""" forward: Applies the SCDown module to the input tensor.
Examples:
>>> import torch
>>> from ultralytics import SCDown
>>> model = SCDown(c1=64, c2=128, k=3, s=2)
>>> x = torch.randn(1, 64, 128, 128)
>>> y = model(x)
>>> print(y.shape)
torch.Size([1, 128, 64, 64])
"""
def __init__(self, c1, c2, k, s):
"""Initializes the SCDown module with specified input/output channels, kernel size, and stride."""
super().__init__() super().__init__()
self.cv1 = Conv(c1, c2, 1, 1) self.cv1 = Conv(c1, c2, 1, 1)
self.cv2 = Conv(c2, c2, k=k, s=s, g=c2, act=False) self.cv2 = Conv(c2, c2, k=k, s=s, g=c2, act=False)
def forward(self, x): def forward(self, x):
""" """Applies convolution and downsampling to the input tensor in the SCDown module."""
Forward pass of the SCDown module.
Args:
x (torch.Tensor): Input tensor.
Returns:
(torch.Tensor): Output tensor after applying the SCDown module.
"""
return self.cv2(self.cv1(x)) return self.cv2(self.cv1(x))

@ -209,7 +209,8 @@ class RepConv(nn.Module):
kernelid, biasid = self._fuse_bn_tensor(self.bn) kernelid, biasid = self._fuse_bn_tensor(self.bn)
return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
def _pad_1x1_to_3x3_tensor(self, kernel1x1): @staticmethod
def _pad_1x1_to_3x3_tensor(kernel1x1):
"""Pads a 1x1 tensor to a 3x3 tensor.""" """Pads a 1x1 tensor to a 3x3 tensor."""
if kernel1x1 is None: if kernel1x1 is None:
return 0 return 0

@ -11,7 +11,7 @@ from torch.nn.init import constant_, xavier_uniform_
from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors
from .block import DFL, BNContrastiveHead, ContrastiveHead, Proto from .block import DFL, BNContrastiveHead, ContrastiveHead, Proto
from .conv import Conv from .conv import Conv, DWConv
from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer
from .utils import bias_init_with_prob, linear_init from .utils import bias_init_with_prob, linear_init
@ -41,7 +41,14 @@ class Detect(nn.Module):
self.cv2 = nn.ModuleList( self.cv2 = nn.ModuleList(
nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch
) )
self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch) self.cv3 = nn.ModuleList(
nn.Sequential(
nn.Sequential(DWConv(x, x, 3), Conv(x, c3, 1)),
nn.Sequential(DWConv(c3, c3, 3), Conv(c3, c3, 1)),
nn.Conv2d(c3, self.nc, 1),
)
for x in ch
)
self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity() self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity()
if self.end2end: if self.end2end:

@ -13,6 +13,7 @@ from ultralytics.nn.modules import (
AIFI, AIFI,
C1, C1,
C2, C2,
C2PSA,
C3, C3,
C3TR, C3TR,
ELAN1, ELAN1,
@ -28,7 +29,9 @@ from ultralytics.nn.modules import (
C2f, C2f,
C2fAttn, C2fAttn,
C2fCIB, C2fCIB,
C2fPSA,
C3Ghost, C3Ghost,
C3k2,
C3x, C3x,
CBFuse, CBFuse,
CBLinear, CBLinear,
@ -968,12 +971,15 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
GhostBottleneck, GhostBottleneck,
SPP, SPP,
SPPF, SPPF,
C2fPSA,
C2PSA,
DWConv, DWConv,
Focus, Focus,
BottleneckCSP, BottleneckCSP,
C1, C1,
C2, C2,
C2f, C2f,
C3k2,
RepNCSPELAN4, RepNCSPELAN4,
ELAN1, ELAN1,
ADown, ADown,
@ -1001,9 +1007,26 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
) # num heads ) # num heads
args = [c1, c2, *args[1:]] args = [c1, c2, *args[1:]]
if m in {BottleneckCSP, C1, C2, C2f, C2fAttn, C3, C3TR, C3Ghost, C3x, RepC3, C2fCIB}: if m in {
BottleneckCSP,
C1,
C2,
C2f,
C3k2,
C2fAttn,
C3,
C3TR,
C3Ghost,
C3x,
RepC3,
C2fPSA,
C2fCIB,
C2PSA,
}:
args.insert(2, n) # number of repeats args.insert(2, n) # number of repeats
n = 1 n = 1
if m is C3k2 and scale in "mlx": # for M/L/X sizes
args[3] = True
elif m is AIFI: elif m is AIFI:
args = [ch[f], *args] args = [ch[f], *args]
elif m in {HGStem, HGBlock}: elif m in {HGStem, HGBlock}:
@ -1080,7 +1103,7 @@ def guess_model_scale(model_path):
with contextlib.suppress(AttributeError): with contextlib.suppress(AttributeError):
import re import re
return re.search(r"yolov\d+([nslmx])", Path(model_path).stem).group(1) # n, s, m, l, or x return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # n, s, m, l, or x
return "" return ""

@ -18,6 +18,7 @@ from ultralytics.utils import LOGGER, TQDM, checks, clean_url, emojis, is_online
GITHUB_ASSETS_REPO = "ultralytics/assets" GITHUB_ASSETS_REPO = "ultralytics/assets"
GITHUB_ASSETS_NAMES = ( GITHUB_ASSETS_NAMES = (
[f"yolov8{k}{suffix}.pt" for k in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose", "-obb", "-oiv7")] [f"yolov8{k}{suffix}.pt" for k in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose", "-obb", "-oiv7")]
+ [f"yolo11{k}{suffix}.pt" for k in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose", "-obb")]
+ [f"yolov5{k}{resolution}u.pt" for k in "nsmlx" for resolution in ("", "6")] + [f"yolov5{k}{resolution}u.pt" for k in "nsmlx" for resolution in ("", "6")]
+ [f"yolov3{k}u.pt" for k in ("", "-spp", "-tiny")] + [f"yolov3{k}u.pt" for k in ("", "-spp", "-tiny")]
+ [f"yolov8{k}-world.pt" for k in "smlx"] + [f"yolov8{k}-world.pt" for k in "smlx"]
@ -408,7 +409,7 @@ def get_github_assets(repo="ultralytics/assets", version="latest", retry=False):
return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolov8n.pt', 'yolov8s.pt', ...] return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolov8n.pt', 'yolov8s.pt', ...]
def attempt_download_asset(file, repo="ultralytics/assets", release="v8.2.0", **kwargs): def attempt_download_asset(file, repo="ultralytics/assets", release="v8.3.0", **kwargs):
""" """
Attempt to download a file from GitHub release assets if it is not found locally. The function checks for the file Attempt to download a file from GitHub release assets if it is not found locally. The function checks for the file
locally first, then tries to download it from the specified GitHub repository release. locally first, then tries to download it from the specified GitHub repository release.
@ -416,7 +417,7 @@ def attempt_download_asset(file, repo="ultralytics/assets", release="v8.2.0", **
Args: Args:
file (str | Path): The filename or file path to be downloaded. file (str | Path): The filename or file path to be downloaded.
repo (str, optional): The GitHub repository in the format 'owner/repo'. Defaults to 'ultralytics/assets'. repo (str, optional): The GitHub repository in the format 'owner/repo'. Defaults to 'ultralytics/assets'.
release (str, optional): The specific release version to be downloaded. Defaults to 'v8.2.0'. release (str, optional): The specific release version to be downloaded. Defaults to 'v8.3.0'.
**kwargs (any): Additional keyword arguments for the download process. **kwargs (any): Additional keyword arguments for the download process.
Returns: Returns:

@ -228,8 +228,11 @@ class v8DetectionLoss:
# Pboxes # Pboxes
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
# dfl_conf = pred_distri.view(batch_size, -1, 4, self.reg_max).detach().softmax(-1)
# dfl_conf = (dfl_conf.amax(-1).mean(-1) + dfl_conf.amax(-1).amin(-1)) / 2
_, target_bboxes, target_scores, fg_mask, _ = self.assigner( _, target_bboxes, target_scores, fg_mask, _ = self.assigner(
# pred_scores.detach().sigmoid() * 0.8 + dfl_conf.unsqueeze(-1) * 0.2,
pred_scores.detach().sigmoid(), pred_scores.detach().sigmoid(),
(pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype),
anchor_points * stride_tensor, anchor_points * stride_tensor,

@ -159,7 +159,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
if isinstance(device, torch.device): if isinstance(device, torch.device):
return device return device
s = f"Ultralytics YOLOv{__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} " s = f"Ultralytics {__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} "
device = str(device).lower() device = str(device).lower()
for remove in "cuda:", "none", "(", ")", "[", "]", "'", " ": for remove in "cuda:", "none", "(", ")", "[", "]", "'", " ":
device = device.replace(remove, "") # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1' device = device.replace(remove, "") # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1'

Loading…
Cancel
Save