diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 430b05957a..0631213715 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -56,7 +56,7 @@ body: placeholder: | Paste output of `yolo checks` or `ultralytics.checks()` command, i.e.: ``` - Ultralytics YOLOv8.0.181 🚀 Python-3.11.2 torch-2.0.1 CPU (Apple M2) + Ultralytics 8.3.2 🚀 Python-3.11.2 torch-2.4.1 CPU (Apple M3) Setup complete ✅ (8 CPUs, 16.0 GB RAM, 266.5/460.4 GB disk) OS macOS-13.5.2 @@ -64,7 +64,7 @@ body: Python 3.11.2 Install git RAM 16.00 GB - CPU Apple M2 + CPU Apple M3 CUDA None ``` validations: diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 73745a3a56..99cadfd893 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -4,7 +4,7 @@ blank_issues_enabled: true contact_links: - name: 📄 Docs url: https://docs.ultralytics.com/ - about: Full Ultralytics YOLOv8 Documentation + about: Full Ultralytics YOLO Documentation - name: 💬 Forum url: https://community.ultralytics.com/ about: Ask on Ultralytics Community Forum diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index c065446c1f..04cefb18cf 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -1,14 +1,14 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license name: 🚀 Feature Request -description: Suggest a YOLOv8 idea +description: Suggest an Ultralytics YOLO idea # title: " " labels: [enhancement] body: - type: markdown attributes: value: | - Thank you for submitting a YOLOv8 🚀 Feature Request! + Thank you for submitting an Ultralytics 🚀 Feature Request! - type: checkboxes attributes: @@ -17,7 +17,7 @@ body: Please search the Ultralytics [Docs](https://docs.ultralytics.com) and [issues](https://github.com/ultralytics/ultralytics/issues) to see if a similar feature request already exists. options: - label: > - I have searched the YOLOv8 [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar feature requests. + I have searched the Ultralytics [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar feature requests. required: true - type: textarea @@ -25,7 +25,7 @@ body: label: Description description: A short description of your feature. placeholder: | - What new feature would you like to see in YOLOv8? + What new feature would you like to see in YOLO? validations: required: true @@ -46,7 +46,7 @@ body: attributes: label: Are you willing to submit a PR? description: > - (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/ultralytics/pulls) (PR) to help improve YOLOv8 for everyone, especially if you have a good understanding of how to implement a fix or feature. - See the YOLOv8 [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started. + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/ultralytics/pulls) (PR) to help improve YOLO for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the Ultralytics [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started. options: - label: Yes I'd like to help by submitting a PR! diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 5501283260..c54ee4494c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -9,7 +9,7 @@ on: pull_request: branches: [main] schedule: - - cron: "0 0 * * *" # runs at 00:00 UTC every day + - cron: "0 8 * * *" # runs at 08:00 UTC every day workflow_dispatch: inputs: hub: @@ -98,9 +98,9 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-14] + os: [ubuntu-latest, windows-latest, macos-14] python-version: ["3.11"] - model: [yolov8n] + model: [yolo11n] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 @@ -116,24 +116,27 @@ jobs: run: | yolo checks pip list + - name: Benchmark DetectionModel + shell: bash + run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}.pt' imgsz=160 verbose=0.309 - name: Benchmark ClassificationModel shell: bash - run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-cls.pt' imgsz=160 verbose=0.166 + run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-cls.pt' imgsz=160 verbose=0.249 - name: Benchmark YOLOWorld DetectionModel shell: bash - run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/yolov8s-worldv2.pt' imgsz=160 verbose=0.318 + run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/yolov8s-worldv2.pt' imgsz=160 verbose=0.337 - name: Benchmark SegmentationModel shell: bash - run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-seg.pt' imgsz=160 verbose=0.279 + run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-seg.pt' imgsz=160 verbose=0.195 - name: Benchmark PoseModel shell: bash - run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-pose.pt' imgsz=160 verbose=0.183 + run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-pose.pt' imgsz=160 verbose=0.197 - name: Benchmark OBBModel shell: bash - run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-obb.pt' imgsz=160 verbose=0.472 + run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/${{ matrix.model }}-obb.pt' imgsz=160 verbose=0.597 - name: Benchmark YOLOv10Model shell: bash - run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/yolov10n.pt' imgsz=160 verbose=0.178 + run: coverage run -a --source=ultralytics -m ultralytics.cfg.__init__ benchmark model='path with spaces/yolov10n.pt' imgsz=160 verbose=0.205 - name: Merge Coverage Reports run: | coverage xml -o coverage-benchmarks.xml @@ -251,17 +254,17 @@ jobs: - name: Pytest tests run: pytest --slow tests/ - name: Benchmark ClassificationModel - run: python -m ultralytics.cfg.__init__ benchmark model='yolov8n-cls.pt' imgsz=160 verbose=0.166 + run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-cls.pt' imgsz=160 verbose=0.249 - name: Benchmark YOLOWorld DetectionModel - run: python -m ultralytics.cfg.__init__ benchmark model='yolov8s-worldv2.pt' imgsz=160 verbose=0.318 + run: python -m ultralytics.cfg.__init__ benchmark model='yolov8s-worldv2.pt' imgsz=160 verbose=0.337 - name: Benchmark SegmentationModel - run: python -m ultralytics.cfg.__init__ benchmark model='yolov8n-seg.pt' imgsz=160 verbose=0.267 + run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-seg.pt' imgsz=160 verbose=0.195 - name: Benchmark PoseModel - run: python -m ultralytics.cfg.__init__ benchmark model='yolov8n-pose.pt' imgsz=160 verbose=0.179 + run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-pose.pt' imgsz=160 verbose=0.197 - name: Benchmark OBBModel - run: python -m ultralytics.cfg.__init__ benchmark model='yolov8n-obb.pt' imgsz=160 verbose=0.472 + run: python -m ultralytics.cfg.__init__ benchmark model='yolo11n-obb.pt' imgsz=160 verbose=0.597 - name: Benchmark YOLOv10Model - run: python -m ultralytics.cfg.__init__ benchmark model='yolov10n.pt' imgsz=160 verbose=0.178 + run: python -m ultralytics.cfg.__init__ benchmark model='yolov10n.pt' imgsz=160 verbose=0.205 - name: Benchmark Summary run: | cat benchmarks.log @@ -317,16 +320,16 @@ jobs: conda list - name: Test CLI run: | - yolo predict model=yolov8n.pt imgsz=320 - yolo train model=yolov8n.pt data=coco8.yaml epochs=1 imgsz=32 - yolo val model=yolov8n.pt data=coco8.yaml imgsz=32 - yolo export model=yolov8n.pt format=torchscript imgsz=160 + yolo predict model=yolo11n.pt imgsz=320 + yolo train model=yolo11n.pt data=coco8.yaml epochs=1 imgsz=32 + yolo val model=yolo11n.pt data=coco8.yaml imgsz=32 + yolo export model=yolo11n.pt format=torchscript imgsz=160 - name: Test Python # Note this step must use the updated default bash environment, not a python environment run: | python -c " from ultralytics import YOLO - model = YOLO('yolov8n.pt') + model = YOLO('yolo11n.pt') results = model.train(data='coco8.yaml', epochs=3, imgsz=160) results = model.val(imgsz=160) results = model.predict(imgsz=160) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 1edf59bb44..5ca3abefba 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -26,7 +26,7 @@ jobs: steps: - name: CLA Assistant if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target' - uses: contributor-assistant/github-action@v2.5.2 + uses: contributor-assistant/github-action@v2.6.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Must be repository secret PAT diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index 5242d9efa6..8a3d41a91a 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -140,7 +140,7 @@ jobs: with: timeout_minutes: 120 retry_wait_seconds: 60 - max_attempts: 2 # retry once + max_attempts: 3 # retry twice command: | docker build \ --platform ${{ matrix.platforms }} \ @@ -156,7 +156,7 @@ jobs: - name: Run Benchmarks # WARNING: Dockerfile (GPU) error on TF.js export 'module 'numpy' has no attribute 'object'. if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners - run: docker run ultralytics/ultralytics:${{ matrix.tags }} yolo benchmark model=yolov8n.pt imgsz=160 verbose=0.318 + run: docker run ultralytics/ultralytics:${{ matrix.tags }} yolo benchmark model=yolo11n.pt imgsz=160 verbose=0.309 - name: Push Docker Image with Ultralytics version tag if: (github.event_name == 'push' || (github.event.inputs[matrix.dockerfile] == 'true' && github.event.inputs.push == 'true')) && steps.check_tag.outputs.exists == 'false' && matrix.dockerfile != 'Dockerfile-conda' diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 9befe5c968..516450f876 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -48,7 +48,7 @@ jobs: ## Environments - YOLOv8 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + YOLO may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) @@ -59,4 +59,4 @@ jobs: Ultralytics CI - If this badge is green, all [Ultralytics CI](https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml?query=event%3Aschedule) tests are currently passing. CI tests verify correct operation of all YOLOv8 [Modes](https://docs.ultralytics.com/modes/) and [Tasks](https://docs.ultralytics.com/tasks/) on macOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [Ultralytics CI](https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml?query=event%3Aschedule) tests are currently passing. CI tests verify correct operation of all YOLO [Modes](https://docs.ultralytics.com/modes/) and [Tasks](https://docs.ultralytics.com/tasks/) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index cb4cfbb64b..347ec1b99c 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -85,4 +85,3 @@ jobs: print(f"Branches updated: {updated_branches}") print(f"Branches already up-to-date: {up_to_date_branches}") print(f"Total errors: {errors}") - diff --git a/CITATION.cff b/CITATION.cff index bee0abe21a..6c294c0d9b 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -11,14 +11,14 @@ authors: family-names: Jocher affiliation: Ultralytics orcid: 'https://orcid.org/0000-0001-5950-6979' - - given-names: Ayush - family-names: Chaurasia - affiliation: Ultralytics - orcid: 'https://orcid.org/0000-0002-7603-6750' - family-names: Qiu given-names: Jing affiliation: Ultralytics orcid: 'https://orcid.org/0000-0003-3783-7069' + - given-names: Ayush + family-names: Chaurasia + affiliation: Ultralytics + orcid: 'https://orcid.org/0000-0002-7603-6750' repository-code: 'https://github.com/ultralytics/ultralytics' url: 'https://ultralytics.com' license: AGPL-3.0 diff --git a/README.md b/README.md index 704c04b794..b39b089a2e 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@
Ultralytics CI - Ultralytics YOLOv8 Citation + Ultralytics YOLO Citation Ultralytics Docker Pulls Ultralytics Discord Ultralytics Forums @@ -20,13 +20,13 @@

-[Ultralytics](https://www.ultralytics.com/) [YOLOv8](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks. +[Ultralytics](https://www.ultralytics.com/) [YOLO11](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLO11 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks. -We hope that the resources here will help you get the most out of YOLOv8. Please browse the YOLOv8 Docs for details, raise an issue on GitHub for support, questions, or discussions, become a member of the Ultralytics Discord, Reddit and Forums! +We hope that the resources here will help you get the most out of YOLO. Please browse the Ultralytics Docs for details, raise an issue on GitHub for support, questions, or discussions, become a member of the Ultralytics Discord, Reddit and Forums! To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license). -YOLOv8 performance plots +YOLO11 performance plots
Ultralytics GitHub @@ -47,7 +47,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens ##
Documentation
-See below for a quickstart installation and usage example, and see the [YOLOv8 Docs](https://docs.ultralytics.com/) for full documentation on training, validation, prediction and deployment. +See below for a quickstart install and usage examples, and see our [Docs](https://docs.ultralytics.com/) for full documentation on training, validation, prediction and deployment.
Install @@ -71,23 +71,23 @@ For alternative installation methods including [Conda](https://anaconda.org/cond ### CLI -YOLOv8 may be used directly in the Command Line Interface (CLI) with a `yolo` command: +YOLO may be used directly in the Command Line Interface (CLI) with a `yolo` command: ```bash -yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' +yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg' ``` -`yolo` can be used for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See the YOLOv8 [CLI Docs](https://docs.ultralytics.com/usage/cli/) for examples. +`yolo` can be used for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See the YOLO [CLI Docs](https://docs.ultralytics.com/usage/cli/) for examples. ### Python -YOLOv8 may also be used directly in a Python environment, and accepts the same [arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above: +YOLO may also be used directly in a Python environment, and accepts the same [arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above: ```python from ultralytics import YOLO # Load a model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Train the model train_results = model.train( @@ -108,26 +108,13 @@ results[0].show() path = model.export(format="onnx") # return path to exported model ``` -See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python/) for more examples. +See YOLO [Python Docs](https://docs.ultralytics.com/usage/python/) for more examples.
-### Notebooks - -Ultralytics provides interactive notebooks for YOLOv8, covering training, validation, tracking, and more. Each notebook is paired with a [YouTube](https://www.youtube.com/ultralytics?sub_confirmation=1) tutorial, making it easy to learn and implement advanced YOLOv8 features. - -| Docs | Notebook | YouTube | -| ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| YOLOv8 Train, Val, Predict and Export Modes | Open In Colab |
Ultralytics Youtube Video
| -| Ultralytics HUB QuickStart | Open In Colab |
Ultralytics Youtube Video
| -| YOLOv8 Multi-Object Tracking in Videos | Open In Colab |
Ultralytics Youtube Video
| -| YOLOv8 Object Counting in Videos | Open In Colab |
Ultralytics Youtube Video
| -| YOLOv8 Heatmaps in Videos | Open In Colab |
Ultralytics Youtube Video
| -| Ultralytics Datasets Explorer with SQL and OpenAI Integration 🚀 New | Open In Colab |
Ultralytics Youtube Video
| - ##
Models
-YOLOv8 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLOv8 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models. +YOLO11 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLO11 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models. Ultralytics YOLO supported tasks @@ -137,13 +124,13 @@ All [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cf See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examples with these models trained on [COCO](https://docs.ultralytics.com/datasets/detect/coco/), which include 80 pre-trained classes. -| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | | ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | -| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | -| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | -| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | -| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | +| [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) | 640 | 39.5 | 56.1 ± 0.8 | 1.5 ± 0.0 | 2.6 | 6.5 | +| [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) | 640 | 47.0 | 90.0 ± 1.2 | 2.5 ± 0.0 | 9.4 | 21.5 | +| [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) | 640 | 51.5 | 183.2 ± 2.0 | 4.7 ± 0.1 | 20.1 | 68.0 | +| [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) | 640 | 53.4 | 238.6 ± 1.4 | 6.2 ± 0.1 | 25.3 | 86.9 | +| [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) | 640 | 54.7 | 462.8 ± 6.7 | 11.3 ± 0.2 | 56.9 | 194.9 | - **mAPval** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset.
Reproduce by `yolo val detect data=coco.yaml device=0` - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val detect data=coco.yaml batch=1 device=0|cpu` @@ -154,31 +141,47 @@ See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examp See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage examples with these models trained on [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/), which include 80 pre-trained classes. -| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | | -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | -| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | -| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | -| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | -| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | +| [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.9 ± 1.1 | 1.8 ± 0.0 | 2.9 | 10.4 | +| [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.6 ± 4.9 | 2.9 ± 0.0 | 10.1 | 35.5 | +| [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.6 ± 1.2 | 6.3 ± 0.1 | 22.4 | 123.3 | +| [YOLO11l-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt) | 640 | 53.4 | 42.9 | 344.2 ± 3.2 | 7.8 ± 0.2 | 27.6 | 142.2 | +| [YOLO11x-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt) | 640 | 54.7 | 43.8 | 664.5 ± 3.2 | 15.8 ± 0.7 | 62.1 | 319.0 | - **mAPval** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset.
Reproduce by `yolo val segment data=coco-seg.yaml device=0` - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu` +
Classification (ImageNet) + +See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usage examples with these models trained on [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/), which include 1000 pretrained classes. + +| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) at 640 | +| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | +| [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 | 3.3 | +| [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 | 12.1 | +| [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 | 39.3 | +| [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 12.9 | 49.4 | +| [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 28.4 | 110.4 | + +- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set.
Reproduce by `yolo val classify data=path/to/ImageNet device=0` +- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` + +
+
Pose (COCO) See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples with these models trained on [COCO-Pose](https://docs.ultralytics.com/datasets/pose/coco/), which include 1 pre-trained class, person. -| Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| ---------------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-pose.pt) | 640 | 50.4 | 80.1 | 131.8 | 1.18 | 3.3 | 9.2 | -| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-pose.pt) | 640 | 60.0 | 86.2 | 233.2 | 1.42 | 11.6 | 30.2 | -| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-pose.pt) | 640 | 65.0 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | -| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-pose.pt) | 640 | 67.6 | 90.0 | 784.5 | 2.59 | 44.4 | 168.6 | -| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 | -| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 | +| Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | +| ---------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLO11n-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt) | 640 | 50.0 | 81.0 | 52.4 ± 0.5 | 1.7 ± 0.0 | 2.9 | 7.6 | +| [YOLO11s-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-pose.pt) | 640 | 58.9 | 86.3 | 90.5 ± 0.6 | 2.6 ± 0.0 | 9.9 | 23.2 | +| [YOLO11m-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-pose.pt) | 640 | 64.9 | 89.4 | 187.3 ± 0.8 | 4.9 ± 0.1 | 20.9 | 71.7 | +| [YOLO11l-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-pose.pt) | 640 | 66.1 | 89.9 | 247.7 ± 1.1 | 6.4 ± 0.1 | 26.2 | 90.7 | +| [YOLO11x-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-pose.pt) | 640 | 69.5 | 91.1 | 488.0 ± 13.9 | 12.1 ± 0.2 | 58.8 | 203.3 | - **mAPval** values are for single-model single-scale on [COCO Keypoints val2017](https://cocodataset.org/) dataset.
Reproduce by `yolo val pose data=coco-pose.yaml device=0` - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu` @@ -189,36 +192,19 @@ See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples wit See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples with these models trained on [DOTAv1](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10/), which include 15 pre-trained classes. -| Model | size
(pixels) | mAPtest
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| Model | size
(pixels) | mAPtest
50 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | | -------------------------------------------------------------------------------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-obb.pt) | 1024 | 78.0 | 204.77 | 3.57 | 3.1 | 23.3 | -| [YOLOv8s-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-obb.pt) | 1024 | 79.5 | 424.88 | 4.07 | 11.4 | 76.3 | -| [YOLOv8m-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-obb.pt) | 1024 | 80.5 | 763.48 | 7.61 | 26.4 | 208.6 | -| [YOLOv8l-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-obb.pt) | 1024 | 80.7 | 1278.42 | 11.83 | 44.5 | 433.8 | -| [YOLOv8x-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-obb.pt) | 1024 | 81.36 | 1759.10 | 13.23 | 69.5 | 676.7 | +| [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.6 ± 0.8 | 4.4 ± 0.0 | 2.7 | 17.2 | +| [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.4 ± 4.0 | 5.1 ± 0.0 | 9.7 | 57.5 | +| [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.8 ± 2.9 | 10.1 ± 0.4 | 20.9 | 183.5 | +| [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.5 ± 5.0 | 13.5 ± 0.6 | 26.2 | 232.0 | +| [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.6 ± 7.7 | 28.6 ± 1.0 | 58.8 | 520.2 | - **mAPtest** values are for single-model multiscale on [DOTAv1](https://captain-whu.github.io/DOTA/index.html) dataset.
Reproduce by `yolo val obb data=DOTAv1.yaml device=0 split=test` and submit merged results to [DOTA evaluation](https://captain-whu.github.io/DOTA/evaluation.html). - **Speed** averaged over DOTAv1 val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu`
-
Classification (ImageNet) - -See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usage examples with these models trained on [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/), which include 1000 pretrained classes. - -| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 | -| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | -| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-cls.pt) | 224 | 69.0 | 88.3 | 12.9 | 0.31 | 2.7 | 4.3 | -| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-cls.pt) | 224 | 73.8 | 91.7 | 23.4 | 0.35 | 6.4 | 13.5 | -| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-cls.pt) | 224 | 76.8 | 93.5 | 85.4 | 0.62 | 17.0 | 42.7 | -| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-cls.pt) | 224 | 78.3 | 94.2 | 163.0 | 0.87 | 37.5 | 99.7 | -| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-cls.pt) | 224 | 79.0 | 94.6 | 232.0 | 1.01 | 57.4 | 154.8 | - -- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set.
Reproduce by `yolo val classify data=path/to/ImageNet device=0` -- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` - -
- ##
Integrations
Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [Roboflow](https://roboflow.com/?ref=ultralytics), ClearML, [Comet](https://bit.ly/yolov8-readme-comet), Neural Magic and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow. @@ -245,18 +231,18 @@ Our key integrations with leading AI platforms extend the functionality of Ultra | Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | | :--------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | -| Label and export your custom datasets directly to YOLOv8 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv8 using [ClearML](https://clear.ml/) (open-source!) | Free forever, [Comet](https://bit.ly/yolov8-readme-comet) lets you save YOLOv8 models, resume training, and interactively visualize and debug predictions | Run YOLOv8 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | +| Label and export your custom datasets directly to YOLO11 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLO11 using [ClearML](https://clear.ml/) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLO11 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | ##
Ultralytics HUB
-Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLO11 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now! Ultralytics HUB preview image ##
Contribute
-We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors! +We love your input! Ultralytics YOLO would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors! diff --git a/README.zh-CN.md b/README.zh-CN.md index 1e7b972762..e43aba2399 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -8,25 +8,25 @@
Ultralytics CI - YOLOv8 Citation - Docker Pulls - Discord + Ultralytics YOLO Citation + Ultralytics Docker Pulls + Ultralytics Discord Ultralytics Forums Ultralytics Reddit
- Run on Gradient - Open In Colab - Open In Kaggle + Run Ultralytics on Gradient + Open Ultralytics In Colab + Open Ultralytics In Kaggle

-[Ultralytics](https://www.ultralytics.com/) [YOLOv8](https://github.com/ultralytics/ultralytics) 是一款前沿、最先进(SOTA)的模型,基于先前 YOLO 版本的成功,引入了新功能和改进,进一步提升性能和灵活性。YOLOv8 设计快速、准确且易于使用,使其成为各种物体检测与跟踪、实例分割、图像分类和姿态估计任务的绝佳选择。 +[Ultralytics](https://www.ultralytics.com/) [YOLO11](https://github.com/ultralytics/ultralytics) 是一个尖端的、最先进(SOTA)的模型,基于之前 YOLO 版本的成功,并引入了新功能和改进以进一步提升性能和灵活性。YOLO11 被设计得快速、准确且易于使用,是进行广泛对象检测和跟踪、实例分割、图像分类和姿态估计任务的理想选择。 -我们希望这里的资源能帮助您充分利用 YOLOv8。请浏览 YOLOv8 的文档了解详情,如需支持、提问或讨论,请在 GitHub 上提出问题,成为 Ultralytics DiscordReddit论坛 的一员! +我们希望这里的资源能帮助你充分利用 YOLO。请浏览 Ultralytics 文档 以获取详细信息,在 GitHub 上提出问题或讨论,成为 Ultralytics DiscordReddit论坛 的成员! -如需申请企业许可,请在 [Ultralytics Licensing](https://www.ultralytics.com/license) 处填写表格 +想申请企业许可证,请完成 [Ultralytics Licensing](https://www.ultralytics.com/license) 上的表单。 -YOLOv8 performance plots +YOLO11 performance plots
Ultralytics GitHub @@ -45,16 +45,14 @@
-以下是提供的内容的中文翻译: - ##
文档
-请参阅下面的快速安装和使用示例,以及 [YOLOv8 文档](https://docs.ultralytics.com/) 上有关训练、验证、预测和部署的完整文档。 +请参阅下方的快速开始安装和使用示例,并查看我们的 [文档](https://docs.ultralytics.com/) 以获取有关训练、验证、预测和部署的完整文档。
安装 -使用Pip在一个[**Python>=3.8**](https://www.python.org/)环境中安装`ultralytics`包,此环境还需包含[**PyTorch>=1.8**](https://pytorch.org/get-started/locally/)。这也会安装所有必要的[依赖项](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml)。 +在 [**Python>=3.8**](https://www.python.org/) 环境中使用 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) 通过 pip 安装包含所有[依赖项](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) 的 ultralytics 包。 [![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/) @@ -62,168 +60,154 @@ pip install ultralytics ``` -如需使用包括[Conda](https://anaconda.org/conda-forge/ultralytics),[Docker](https://hub.docker.com/r/ultralytics/ultralytics)和Git在内的其他安装方法,请参考[快速入门指南](https://docs.ultralytics.com/quickstart/)。 +有关其他安装方法,包括 [Conda](https://anaconda.org/conda-forge/ultralytics)、[Docker](https://hub.docker.com/r/ultralytics/ultralytics) 和 Git,请参阅 [快速开始指南](https://docs.ultralytics.com/quickstart/)。 [![Conda Version](https://img.shields.io/conda/vn/conda-forge/ultralytics?logo=condaforge)](https://anaconda.org/conda-forge/ultralytics) [![Docker Image Version](https://img.shields.io/docker/v/ultralytics/ultralytics?sort=semver&logo=docker)](https://hub.docker.com/r/ultralytics/ultralytics)
-Usage +使用 ### CLI -YOLOv8 可以在命令行界面(CLI)中直接使用,只需输入 `yolo` 命令: +YOLO 可以直接在命令行接口(CLI)中使用 `yolo` 命令: ```bash -yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' +yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg' ``` -`yolo` 可用于各种任务和模式,并接受其他参数,例如 `imgsz=640`。查看 YOLOv8 [CLI 文档](https://docs.ultralytics.com/usage/cli/)以获取示例。 +`yolo` 可以用于各种任务和模式,并接受额外参数,例如 `imgsz=640`。请参阅 YOLO [CLI 文档](https://docs.ultralytics.com/usage/cli/) 以获取示例。 ### Python -YOLOv8 也可以在 Python 环境中直接使用,并接受与上述 CLI 示例中相同的[参数](https://docs.ultralytics.com/usage/cfg/): +YOLO 也可以直接在 Python 环境中使用,并接受与上述 CLI 示例中相同的[参数](https://docs.ultralytics.com/usage/cfg/): ```python from ultralytics import YOLO # 加载模型 -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # 训练模型 train_results = model.train( - data="coco8.yaml", # 数据配置文件的路径 - epochs=100, # 训练的轮数 - imgsz=640, # 训练图像大小 - device="cpu", # 运行的设备,例如 device=0 或 device=0,1,2,3 或 device=cpu + data="coco8.yaml", # 数据集 YAML 路径 + epochs=100, # 训练轮次 + imgsz=640, # 训练图像尺寸 + device="cpu", # 运行设备,例如 device=0 或 device=0,1,2,3 或 device=cpu ) -# 在验证集上评估模型性能 +# 评估模型在验证集上的性能 metrics = model.val() -# 对图像进行目标检测 +# 在图像上执行对象检测 results = model("path/to/image.jpg") results[0].show() # 将模型导出为 ONNX 格式 -path = model.export(format="onnx") # 返回导出的模型路径 +path = model.export(format="onnx") # 返回导出模型的路径 ``` -查看 YOLOv8 [Python 文档](https://docs.ultralytics.com/usage/python/)以获取更多示例。 +请参阅 YOLO [Python 文档](https://docs.ultralytics.com/usage/python/) 以获取更多示例。
-### 笔记本 - -Ultralytics 提供了 YOLOv8 的交互式笔记本,涵盖训练、验证、跟踪等内容。每个笔记本都配有 [YouTube](https://www.youtube.com/ultralytics?sub_confirmation=1) 教程,使学习和实现高级 YOLOv8 功能变得简单。 - -| 文档 | 笔记本 | YouTube | -| ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| YOLOv8 训练、验证、预测和导出模式 | 在 Colab 中打开 |
Ultralytics Youtube 视频
| -| Ultralytics HUB 快速开始 | 在 Colab 中打开 |
Ultralytics Youtube 视频
| -| YOLOv8 视频中的多对象跟踪 | 在 Colab 中打开 |
Ultralytics Youtube 视频
| -| YOLOv8 视频中的对象计数 | 在 Colab 中打开 |
Ultralytics Youtube 视频
| -| YOLOv8 视频中的热图 | 在 Colab 中打开 |
Ultralytics Youtube 视频
| -| Ultralytics 数据集浏览器,集成 SQL 和 OpenAI 🚀 New | 在 Colab 中打开 |
Ultralytics Youtube Video
| - ##
模型
-在[COCO](https://docs.ultralytics.com/datasets/detect/coco/)数据集上预训练的YOLOv8 [检测](https://docs.ultralytics.com/tasks/detect/),[分割](https://docs.ultralytics.com/tasks/segment/)和[姿态](https://docs.ultralytics.com/tasks/pose/)模型可以在这里找到,以及在[ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/)数据集上预训练的YOLOv8 [分类](https://docs.ultralytics.com/tasks/classify/)模型。所有的检测,分割和姿态模型都支持[追踪](https://docs.ultralytics.com/modes/track/)模式。 +YOLO11 [检测](https://docs.ultralytics.com/tasks/detect/)、[分割](https://docs.ultralytics.com/tasks/segment/) 和 [姿态](https://docs.ultralytics.com/tasks/pose/) 模型在 [COCO](https://docs.ultralytics.com/datasets/detect/coco/) 数据集上进行预训练,这些模型可在此处获得,此外还有在 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 数据集上预训练的 YOLO11 [分类](https://docs.ultralytics.com/tasks/classify/) 模型。所有检测、分割和姿态模型均支持 [跟踪](https://docs.ultralytics.com/modes/track/) 模式。 Ultralytics YOLO supported tasks -所有[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models)在首次使用时会自动从最新的Ultralytics [发布版本](https://github.com/ultralytics/assets/releases)下载。 +所有[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models)在首次使用时自动从最新的 Ultralytics [发布](https://github.com/ultralytics/assets/releases)下载。
检测 (COCO) -查看[检测文档](https://docs.ultralytics.com/tasks/detect/)以获取这些在[COCO](https://docs.ultralytics.com/datasets/detect/coco/)上训练的模型的使用示例,其中包括80个预训练类别。 +请参阅 [检测文档](https://docs.ultralytics.com/tasks/detect/) 以获取使用这些在 [COCO](https://docs.ultralytics.com/datasets/detect/coco/) 数据集上训练的模型的示例,其中包含 80 个预训练类别。 -| 模型 | 尺寸
(像素) | mAPval
50-95 | 速度
CPU ONNX
(ms) | 速度
A100 TensorRT
(ms) | 参数
(M) | FLOPs
(B) | +| 模型 | 尺寸
(像素) | mAPval
50-95 | 速度
CPU ONNX
(ms) | 速度
T4 TensorRT10
(ms) | 参数
(M) | FLOPs
(B) | | ------------------------------------------------------------------------------------ | ------------------- | -------------------- | ----------------------------- | ---------------------------------- | ---------------- | ----------------- | -| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | -| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | -| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | -| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | -| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | +| [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) | 640 | 39.5 | 56.1 ± 0.8 | 1.5 ± 0.0 | 2.6 | 6.5 | +| [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) | 640 | 47.0 | 90.0 ± 1.2 | 2.5 ± 0.0 | 9.4 | 21.5 | +| [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) | 640 | 51.5 | 183.2 ± 2.0 | 4.7 ± 0.1 | 20.1 | 68.0 | +| [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) | 640 | 53.4 | 238.6 ± 1.4 | 6.2 ± 0.1 | 25.3 | 86.9 | +| [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) | 640 | 54.7 | 462.8 ± 6.7 | 11.3 ± 0.2 | 56.9 | 194.9 | -- **mAPval** 值是基于单模型单尺度在 [COCO val2017](https://cocodataset.org/) 数据集上的结果。
通过 `yolo val detect data=coco.yaml device=0` 复现 -- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。
通过 `yolo val detect data=coco.yaml batch=1 device=0|cpu` 复现 +- **mAPval** 值针对单模型单尺度在 [COCO val2017](https://cocodataset.org/) 数据集上进行。
复制命令 `yolo val detect data=coco.yaml device=0` +- **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 COCO 验证图像上平均。
复制命令 `yolo val detect data=coco.yaml batch=1 device=0|cpu`
分割 (COCO) -查看[分割文档](https://docs.ultralytics.com/tasks/segment/)以获取这些在[COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/)上训练的模型的使用示例,其中包括80个预训练类别。 +请参阅 [分割文档](https://docs.ultralytics.com/tasks/segment/) 以获取使用这些在 [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/) 数据集上训练的模型的示例,其中包含 80 个预训练类别。 -| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 速度
CPU ONNX
(ms) | 速度
A100 TensorRT
(ms) | 参数
(M) | FLOPs
(B) | +| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 速度
CPU ONNX
(ms) | 速度
T4 TensorRT10
(ms) | 参数
(M) | FLOPs
(B) | | -------------------------------------------------------------------------------------------- | ------------------- | -------------------- | --------------------- | ----------------------------- | ---------------------------------- | ---------------- | ----------------- | -| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | -| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | -| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | -| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | -| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | +| [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.9 ± 1.1 | 1.8 ± 0.0 | 2.9 | 10.4 | +| [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.6 ± 4.9 | 2.9 ± 0.0 | 10.1 | 35.5 | +| [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.6 ± 1.2 | 6.3 ± 0.1 | 22.4 | 123.3 | +| [YOLO11l-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt) | 640 | 53.4 | 42.9 | 344.2 ± 3.2 | 7.8 ± 0.2 | 27.6 | 142.2 | +| [YOLO11x-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt) | 640 | 54.7 | 43.8 | 664.5 ± 3.2 | 15.8 ± 0.7 | 62.1 | 319.0 | -- **mAPval** 值是基于单模型单尺度在 [COCO val2017](https://cocodataset.org/) 数据集上的结果。
通过 `yolo val segment data=coco-seg.yaml device=0` 复现 -- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。
通过 `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu` 复现 +- **mAPval** 值针对单模型单尺度在 [COCO val2017](https://cocodataset.org/) 数据集上进行。
复制命令 `yolo val segment data=coco-seg.yaml device=0` +- **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 COCO 验证图像上平均。
复制命令 `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu`
-
姿态 (COCO) +
分类 (ImageNet) -查看[姿态文档](https://docs.ultralytics.com/tasks/pose/)以获取这些在[COCO-Pose](https://docs.ultralytics.com/datasets/pose/coco/)上训练的模型的使用示例,其中包括1个预训练类别,即人。 +请参阅 [分类文档](https://docs.ultralytics.com/tasks/classify/) 以获取使用这些在 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 数据集上训练的模型的示例,其中包含 1000 个预训练类别。 -| 模型 | 尺寸
(像素) | mAPpose
50-95 | mAPpose
50 | 速度
CPU ONNX
(ms) | 速度
A100 TensorRT
(ms) | 参数
(M) | FLOPs
(B) | -| ---------------------------------------------------------------------------------------------------- | ------------------- | --------------------- | ------------------ | ----------------------------- | ---------------------------------- | ---------------- | ----------------- | -| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-pose.pt) | 640 | 50.4 | 80.1 | 131.8 | 1.18 | 3.3 | 9.2 | -| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-pose.pt) | 640 | 60.0 | 86.2 | 233.2 | 1.42 | 11.6 | 30.2 | -| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-pose.pt) | 640 | 65.0 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | -| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-pose.pt) | 640 | 67.6 | 90.0 | 784.5 | 2.59 | 44.4 | 168.6 | -| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 | -| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 | +| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 速度
CPU ONNX
(ms) | 速度
T4 TensorRT10
(ms) | 参数
(M) | FLOPs
(B) at 640 | +| -------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ----------------------------- | ---------------------------------- | ---------------- | ------------------------ | +| [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 | 3.3 | +| [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 | 12.1 | +| [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 | 39.3 | +| [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 12.9 | 49.4 | +| [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 28.4 | 110.4 | -- **mAPval** 值是基于单模型单尺度在 [COCO Keypoints val2017](https://cocodataset.org/) 数据集上的结果。
通过 `yolo val pose data=coco-pose.yaml device=0` 复现 -- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。
通过 `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu` 复现 +- **acc** 值为在 [ImageNet](https://www.image-net.org/) 数据集验证集上的模型准确率。
复制命令 `yolo val classify data=path/to/ImageNet device=0` +- **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 ImageNet 验证图像上平均。
复制命令 `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
-
旋转检测 (DOTAv1) +
姿态 (COCO) -查看[旋转检测文档](https://docs.ultralytics.com/tasks/obb/)以获取这些在[DOTAv1](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10/)上训练的模型的使用示例,其中包括15个预训练类别。 +请参阅 [姿态文档](https://docs.ultralytics.com/tasks/pose/) 以获取使用这些在 [COCO-Pose](https://docs.ultralytics.com/datasets/pose/coco/) 数据集上训练的模型的示例,其中包含 1 个预训练类别(人)。 -| 模型 | 尺寸
(像素) | mAPtest
50 | 速度
CPU ONNX
(ms) | 速度
A100 TensorRT
(ms) | 参数
(M) | FLOPs
(B) | -| -------------------------------------------------------------------------------------------- | ------------------- | ------------------ | ----------------------------- | ---------------------------------- | ---------------- | ----------------- | -| [YOLOv8n-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-obb.pt) | 1024 | 78.0 | 204.77 | 3.57 | 3.1 | 23.3 | -| [YOLOv8s-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-obb.pt) | 1024 | 79.5 | 424.88 | 4.07 | 11.4 | 76.3 | -| [YOLOv8m-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-obb.pt) | 1024 | 80.5 | 763.48 | 7.61 | 26.4 | 208.6 | -| [YOLOv8l-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-obb.pt) | 1024 | 80.7 | 1278.42 | 11.83 | 44.5 | 433.8 | -| [YOLOv8x-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-obb.pt) | 1024 | 81.36 | 1759.10 | 13.23 | 69.5 | 676.7 | +| 模型 | 尺寸
(像素) | mAPpose
50-95 | mAPpose
50 | 速度
CPU ONNX
(ms) | 速度
T4 TensorRT10
(ms) | 参数
(M) | FLOPs
(B) | +| -------------------------------------------------------------------------------------------- | ------------------- | --------------------- | ------------------ | ----------------------------- | ---------------------------------- | ---------------- | ----------------- | +| [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.6 ± 0.8 | 4.4 ± 0.0 | 2.7 | 17.2 | +| [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.4 ± 4.0 | 5.1 ± 0.0 | 9.7 | 57.5 | +| [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.8 ± 2.9 | 10.1 ± 0.4 | 20.9 | 183.5 | +| [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.5 ± 5.0 | 13.5 ± 0.6 | 26.2 | 232.0 | +| [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.6 ± 7.7 | 28.6 ± 1.0 | 58.8 | 520.2 | -- **mAPval** 值是基于单模型多尺度在 [DOTAv1](https://captain-whu.github.io/DOTA/index.html) 数据集上的结果。
通过 `yolo val obb data=DOTAv1.yaml device=0 split=test` 复现 -- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。
通过 `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu` 复现 +- **mAPval** 值针对单模型单尺度在 [COCO Keypoints val2017](https://cocodataset.org/) 数据集上进行。
复制命令 `yolo val pose data=coco-pose.yaml device=0` +- **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 COCO 验证图像上平均。
复制命令 `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
-
分类 (ImageNet) +
OBB (DOTAv1) -查看[分类文档](https://docs.ultralytics.com/tasks/classify/)以获取这些在[ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/)上训练的模型的使用示例,其中包括1000个预训练类别。 +请参阅 [OBB 文档](https://docs.ultralytics.com/tasks/obb/) 以获取使用这些在 [DOTAv1](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10/) 数据集上训练的模型的示例,其中包含 15 个预训练类别。 -| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 速度
CPU ONNX
(ms) | 速度
A100 TensorRT
(ms) | 参数
(M) | FLOPs
(B) at 640 | -| -------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ----------------------------- | ---------------------------------- | ---------------- | ------------------------ | -| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-cls.pt) | 224 | 69.0 | 88.3 | 12.9 | 0.31 | 2.7 | 4.3 | -| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-cls.pt) | 224 | 73.8 | 91.7 | 23.4 | 0.35 | 6.4 | 13.5 | -| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-cls.pt) | 224 | 76.8 | 93.5 | 85.4 | 0.62 | 17.0 | 42.7 | -| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-cls.pt) | 224 | 78.3 | 94.2 | 163.0 | 0.87 | 37.5 | 99.7 | -| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-cls.pt) | 224 | 79.0 | 94.6 | 232.0 | 1.01 | 57.4 | 154.8 | +| 模型 | 尺寸
(像素) | mAPtest
50 | 速度
CPU ONNX
(ms) | 速度
T4 TensorRT10
(ms) | 参数
(M) | FLOPs
(B) | +| -------------------------------------------------------------------------------------------- | ------------------- | ------------------ | ----------------------------- | ---------------------------------- | ---------------- | ----------------- | +| [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.56 ± 0.80 | 4.43 ± 0.01 | 2.7 | 17.2 | +| [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.41 ± 4.00 | 5.13 ± 0.02 | 9.7 | 57.5 | +| [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.81 ± 2.87 | 10.07 ± 0.38 | 20.9 | 183.5 | +| [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.49 ± 4.98 | 13.46 ± 0.55 | 26.2 | 232.0 | +| [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.63 ± 7.67 | 28.59 ± 0.96 | 58.8 | 520.2 | -- **acc** 值是模型在 [ImageNet](https://www.image-net.org/) 数据集验证集上的准确率。
通过 `yolo val classify data=path/to/ImageNet device=0` 复现 -- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 ImageNet val 图像进行平均计算的。
通过 `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` 复现 +- **mAPtest** 值针对单模型多尺度在 [DOTAv1](https://captain-whu.github.io/DOTA/index.html) 数据集上进行。
复制命令 `yolo val obb data=DOTAv1.yaml device=0 split=test` 并提交合并结果到 [DOTA 评估](https://captain-whu.github.io/DOTA/evaluation.html)。 +- **速度**在使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例的 DOTAv1 验证图像上平均。
复制命令 `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu`
##
集成
-我们与领先的AI平台的关键整合扩展了Ultralytics产品的功能,增强了数据集标签化、训练、可视化和模型管理等任务。探索Ultralytics如何与[Roboflow](https://roboflow.com/?ref=ultralytics)、ClearML、[Comet](https://bit.ly/yolov8-readme-comet)、Neural Magic以及[OpenVINO](https://docs.ultralytics.com/integrations/openvino/)合作,优化您的AI工作流程。 +我们与领先的 AI 平台的关键集成扩展了 Ultralytics 产品的功能,增强了数据集标记、训练、可视化和模型管理等任务的能力。了解 Ultralytics 如何与 [Roboflow](https://roboflow.com/?ref=ultralytics)、ClearML、[Comet](https://bit.ly/yolov8-readme-comet)、Neural Magic 和 [OpenVINO](https://docs.ultralytics.com/integrations/openvino/) 合作,优化您的 AI 工作流程。
@@ -245,36 +229,36 @@ Ultralytics 提供了 YOLOv8 的交互式笔记本,涵盖训练、验证、跟 NeuralMagic logo -| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | -| :-------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------: | -| 使用 [Roboflow](https://roboflow.com/?ref=ultralytics) 将您的自定义数据集直接标记并导出至 YOLOv8 进行训练 | 使用 [ClearML](https://clear.ml/)(开源!)自动跟踪、可视化,甚至远程训练 YOLOv8 | 免费且永久,[Comet](https://bit.ly/yolov8-readme-comet) 让您保存 YOLOv8 模型、恢复训练,并以交互式方式查看和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) 使 YOLOv8 推理速度提高多达 6 倍 | +| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | +| :--------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Label and export your custom datasets directly to YOLO11 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLO11 using [ClearML](https://clear.ml/) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLO11 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | ##
Ultralytics HUB
-体验 [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐ 带来的无缝 AI,这是一个一体化解决方案,用于数据可视化、YOLOv5 和即将推出的 YOLOv8 🚀 模型训练和部署,无需任何编码。通过我们先进的平台和用户友好的 [Ultralytics 应用程序](https://www.ultralytics.com/app-install),轻松将图像转化为可操作的见解,并实现您的 AI 愿景。现在就开始您的**免费**之旅! +体验无缝 AI 使用 [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐,一个集数据可视化、YOLO11 🚀 模型训练和部署于一体的解决方案,无需编写代码。利用我们最先进的平台和用户友好的 [Ultralytics 应用](https://www.ultralytics.com/app-install),将图像转换为可操作见解,并轻松实现您的 AI 愿景。免费开始您的旅程! Ultralytics HUB preview image ##
贡献
-我们喜欢您的参与!没有社区的帮助,YOLOv5 和 YOLOv8 将无法实现。请参阅我们的[贡献指南](https://docs.ultralytics.com/help/contributing/)以开始使用,并填写我们的[调查问卷](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们提供您的使用体验反馈。感谢所有贡献者的支持!🙏 +我们欢迎您的意见!没有社区的帮助,Ultralytics YOLO 就不可能实现。请参阅我们的 [贡献指南](https://docs.ultralytics.com/help/contributing/) 开始,并填写我们的 [调查问卷](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们提供您体验的反馈。感谢所有贡献者 🙏! Ultralytics open-source contributors -##
许可证
+##
许可
-Ultralytics 提供两种许可证选项以适应各种使用场景: +Ultralytics 提供两种许可选项以适应各种用例: -- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/license)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) 文件以了解更多细节。 -- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://www.ultralytics.com/license)与我们联系。 +- **AGPL-3.0 许可**:这是一个 [OSI 批准](https://opensource.org/license) 的开源许可,适合学生和爱好者,促进开放协作和知识共享。有关详细信息,请参阅 [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) 文件。 +- **企业许可**:专为商业使用设计,此许可允许将 Ultralytics 软件和 AI 模型无缝集成到商业产品和服务中,无需满足 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品,请通过 [Ultralytics Licensing](https://www.ultralytics.com/license) 联系我们。 -##
联系方式
+##
联系
-有关 Ultralytics 错误报告和功能请求,请访问 [GitHub 问题](https://github.com/ultralytics/ultralytics/issues)。成为 Ultralytics [Discord](https://discord.com/invite/ultralytics)、[Reddit](https://www.reddit.com/r/ultralytics/) 或 [论坛](https://community.ultralytics.com/) 的成员 用于提出问题、共享项目、学习讨论或寻求有关 Ultralytics 的所有帮助! +如需 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/ultralytics/issues)。成为 Ultralytics [Discord](https://discord.com/invite/ultralytics)、[Reddit](https://www.reddit.com/r/ultralytics/) 或 [论坛](https://community.ultralytics.com/) 的成员,提出问题、分享项目、探讨学习讨论,或寻求所有 Ultralytics 相关的帮助!
diff --git a/docker/Dockerfile b/docker/Dockerfile index 44c84b0021..3283c65076 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Builds ultralytics/ultralytics:latest image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics -# Image is CUDA-optimized for YOLOv8 single/multi-GPU training and inference +# Image is CUDA-optimized for YOLO11 single/multi-GPU training and inference # Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch or nvcr.io/nvidia/pytorch:23.03-py3 FROM pytorch/pytorch:2.3.1-cuda12.1-cudnn8-runtime @@ -21,8 +21,10 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \ # Install linux packages # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package # libsm6 required by libqxcb to create QT-based windows for visualization; set 'QT_DEBUG_PLUGINS=1' to test in docker -RUN apt update \ - && apt install --no-install-recommends -y gcc git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 libsm6 +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + gcc git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 libsm6 \ + && rm -rf /var/lib/apt/lists/* # Security updates # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 @@ -34,7 +36,7 @@ WORKDIR /ultralytics # Copy contents and configure git COPY . . RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config -ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . +ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt . # Install pip packages RUN python3 -m pip install --upgrade pip wheel @@ -43,14 +45,15 @@ RUN pip install -e ".[export]" "tensorrt-cu12==10.1.0" "albumentations>=1.4.6" c # Run exports to AutoInstall packages # Edge TPU export fails the first time so is run twice here -RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 || yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 -RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32 +RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32 || yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32 +RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32 # Requires <= Python 3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991 RUN pip install "paddlepaddle>=2.6.0" x2paddle # Fix error: `np.bool` was a deprecated alias for the builtin `bool` segmentation error in Tests RUN pip install numpy==1.23.5 -# Remove exported models -RUN rm -rf tmp + +# Remove extra build files +RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/docker/Dockerfile-arm64 b/docker/Dockerfile-arm64 index 7eeade1024..b5bdbb0fb5 100644 --- a/docker/Dockerfile-arm64 +++ b/docker/Dockerfile-arm64 @@ -20,8 +20,10 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \ # Install linux packages # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package # pkg-config and libhdf5-dev (not included) are needed to build 'h5py==3.11.0' aarch64 wheel required by 'tensorflow' -RUN apt update \ - && apt install --no-install-recommends -y python3-pip git zip unzip wget curl htop gcc libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + python3-pip git zip unzip wget curl htop gcc libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 \ + && rm -rf /var/lib/apt/lists/* # Create working directory WORKDIR /ultralytics @@ -29,7 +31,7 @@ WORKDIR /ultralytics # Copy contents and configure git COPY . . RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config -ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . +ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt . # Install pip packages RUN python3 -m pip install --upgrade pip wheel @@ -38,6 +40,8 @@ RUN pip install -e ".[export]" # Creates a symbolic link to make 'python' point to 'python3' RUN ln -sf /usr/bin/python3 /usr/bin/python +# Remove extra build files +RUN rm -rf /root/.config/Ultralytics/persistent_cache.json # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/docker/Dockerfile-conda b/docker/Dockerfile-conda index c6a31f1cc0..aa1dff53bf 100644 --- a/docker/Dockerfile-conda +++ b/docker/Dockerfile-conda @@ -17,11 +17,13 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \ /root/.config/Ultralytics/ # Install linux packages -RUN apt update \ - && apt install --no-install-recommends -y libgl1 +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + libgl1 \ + && rm -rf /var/lib/apt/lists/* # Copy contents -ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . +ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt . # Install conda packages # mkl required to fix 'OSError: libmkl_intel_lp64.so.2: cannot open shared object file: No such file or directory' @@ -30,6 +32,8 @@ RUN conda config --set solver libmamba && \ conda install -c conda-forge ultralytics mkl # conda install -c pytorch -c nvidia -c conda-forge pytorch torchvision pytorch-cuda=12.1 ultralytics mkl +# Remove extra build files +RUN rm -rf /root/.config/Ultralytics/persistent_cache.json # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/docker/Dockerfile-cpu b/docker/Dockerfile-cpu index fcd5336ece..fe8d88521f 100644 --- a/docker/Dockerfile-cpu +++ b/docker/Dockerfile-cpu @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics -# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv8 deployments +# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLO11 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu FROM ubuntu:23.10 @@ -18,8 +18,10 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \ # Install linux packages # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package -RUN apt update \ - && apt install --no-install-recommends -y python3-pip git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + python3-pip git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 \ + && rm -rf /var/lib/apt/lists/* # Create working directory WORKDIR /ultralytics @@ -27,23 +29,23 @@ WORKDIR /ultralytics # Copy contents and configure git COPY . . RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config -ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . +ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt . # Install pip packages RUN python3 -m pip install --upgrade pip wheel RUN pip install -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu # Run exports to AutoInstall packages -RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 -RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32 +RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32 +RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32 # Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991 # RUN pip install "paddlepaddle>=2.6.0" x2paddle -# Remove exported models -RUN rm -rf tmp # Creates a symbolic link to make 'python' point to 'python3' RUN ln -sf /usr/bin/python3 /usr/bin/python +# Remove extra build files +RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/docker/Dockerfile-jetson-jetpack4 b/docker/Dockerfile-jetson-jetpack4 index ca238b9676..c140974807 100644 --- a/docker/Dockerfile-jetson-jetpack4 +++ b/docker/Dockerfile-jetson-jetpack4 @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Builds ultralytics/ultralytics:jetson-jetpack4 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics -# Supports JetPack4.x for YOLOv8 on Jetson Nano, TX2, Xavier NX, AGX Xavier +# Supports JetPack4.x for YOLO11 on Jetson Nano, TX2, Xavier NX, AGX Xavier # Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-cuda FROM nvcr.io/nvidia/l4t-cuda:10.2.460-runtime @@ -20,8 +20,10 @@ RUN wget -q -O - https://repo.download.nvidia.com/jetson/jetson-ota-public.asc | echo "deb https://repo.download.nvidia.com/jetson/t194 r32.7 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list # Install dependencies -RUN apt update && \ - apt install --no-install-recommends -y git python3.8 python3.8-dev python3-pip python3-libnvinfer libopenmpi-dev libopenblas-base libomp-dev gcc +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + git python3.8 python3.8-dev python3-pip python3-libnvinfer libopenmpi-dev libopenblas-base libomp-dev gcc \ + && rm -rf /var/lib/apt/lists/* # Create symbolic links for python3.8 and pip3 RUN ln -sf /usr/bin/python3.8 /usr/bin/python3 @@ -33,7 +35,7 @@ WORKDIR /ultralytics # Copy contents and configure git COPY . . RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config -ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . +ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt . # Download onnxruntime-gpu 1.8.0 and tensorrt 8.2.0.6 # Other versions can be seen in https://elinux.org/Jetson_Zoo and https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048 @@ -48,7 +50,9 @@ RUN pip install \ https://github.com/ultralytics/assets/releases/download/v0.0.0/torch-1.11.0a0+gitbc2c6ed-cp38-cp38-linux_aarch64.whl \ https://github.com/ultralytics/assets/releases/download/v0.0.0/torchvision-0.12.0a0+9b5a3fe-cp38-cp38-linux_aarch64.whl RUN pip install -e ".[export]" -RUN rm *.whl + +# Remove extra build files +RUN rm -rf *.whl /root/.config/Ultralytics/persistent_cache.json # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/docker/Dockerfile-jetson-jetpack5 b/docker/Dockerfile-jetson-jetpack5 index 9ec2e64a06..9949d26b71 100644 --- a/docker/Dockerfile-jetson-jetpack5 +++ b/docker/Dockerfile-jetson-jetpack5 @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Builds ultralytics/ultralytics:jetson-jetson-jetpack5 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics -# Supports JetPack5.x for YOLOv8 on Jetson Xavier NX, AGX Xavier, AGX Orin, Orin Nano and Orin NX +# Supports JetPack5.x for YOLO11 on Jetson Xavier NX, AGX Xavier, AGX Orin, Orin Nano and Orin NX # Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-pytorch FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3 @@ -20,8 +20,10 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \ # g++ required to build 'tflite_support' and 'lap' packages # libusb-1.0-0 required for 'tflite_support' package when exporting to TFLite # pkg-config and libhdf5-dev (not included) are needed to build 'h5py==3.11.0' aarch64 wheel required by 'tensorflow' -RUN apt update \ - && apt install --no-install-recommends -y gcc git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + gcc git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 \ + && rm -rf /var/lib/apt/lists/* # Create working directory WORKDIR /ultralytics @@ -29,7 +31,7 @@ WORKDIR /ultralytics # Copy contents and configure git COPY . . RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config -ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . +ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt . # Remove opencv-python from Ultralytics dependencies as it conflicts with opencv-python installed in base image RUN sed -i '/opencv-python/d' pyproject.toml @@ -41,8 +43,9 @@ ADD https://nvidia.box.com/shared/static/mvdcltm9ewdy2d5nurkiqorofz1s53ww.whl on RUN python3 -m pip install --upgrade pip wheel RUN pip install onnxruntime_gpu-1.15.1-cp38-cp38-linux_aarch64.whl RUN pip install -e ".[export]" -RUN rm *.whl +# Remove extra build files +RUN rm -rf *.whl /root/.config/Ultralytics/persistent_cache.json # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/docker/Dockerfile-jetson-jetpack6 b/docker/Dockerfile-jetson-jetpack6 index 3b53c31d41..e4da5461db 100644 --- a/docker/Dockerfile-jetson-jetpack6 +++ b/docker/Dockerfile-jetson-jetpack6 @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Builds ultralytics/ultralytics:jetson-jetpack6 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics -# Supports JetPack6.x for YOLOv8 on Jetson AGX Orin, Orin NX and Orin Nano Series +# Supports JetPack6.x for YOLO11 on Jetson AGX Orin, Orin NX and Orin Nano Series # Start FROM https://catalog.ngc.nvidia.com/orgs/nvidia/containers/l4t-jetpack FROM nvcr.io/nvidia/l4t-jetpack:r36.3.0 @@ -17,8 +17,10 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \ /root/.config/Ultralytics/ # Install dependencies -RUN apt update && \ - apt install --no-install-recommends -y git python3-pip libopenmpi-dev libopenblas-base libomp-dev +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + git python3-pip libopenmpi-dev libopenblas-base libomp-dev \ + && rm -rf /var/lib/apt/lists/* # Create working directory WORKDIR /ultralytics @@ -26,7 +28,7 @@ WORKDIR /ultralytics # Copy contents and configure git COPY . . RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config -ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . +ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt . # Download onnxruntime-gpu 1.18.0 from https://elinux.org/Jetson_Zoo and https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048 ADD https://nvidia.box.com/shared/static/48dtuob7meiw6ebgfsfqakc9vse62sg4.whl onnxruntime_gpu-1.18.0-cp310-cp310-linux_aarch64.whl @@ -38,7 +40,9 @@ RUN pip install \ https://github.com/ultralytics/assets/releases/download/v0.0.0/torch-2.3.0-cp310-cp310-linux_aarch64.whl \ https://github.com/ultralytics/assets/releases/download/v0.0.0/torchvision-0.18.0a0+6043bc2-cp310-cp310-linux_aarch64.whl RUN pip install -e ".[export]" -RUN rm *.whl + +# Remove extra build files +RUN rm -rf *.whl /root/.config/Ultralytics/persistent_cache.json # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/docker/Dockerfile-python b/docker/Dockerfile-python index f8a8611c4c..c275dcd9d1 100644 --- a/docker/Dockerfile-python +++ b/docker/Dockerfile-python @@ -1,9 +1,9 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics -# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv8 deployments +# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLO11 deployments -# Use the official Python 3.10 slim-bookworm as base image -FROM python:3.10-slim-bookworm +# Use official Python base image for reproducibility (3.11.10 for export and 3.12.6 for inference) +FROM python:3.11.10-slim-bookworm # Set environment variables ENV PYTHONUNBUFFERED=1 \ @@ -18,8 +18,10 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \ # Install linux packages # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package -RUN apt update \ - && apt install --no-install-recommends -y python3-pip git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + python3-pip git zip unzip wget curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 \ + && rm -rf /var/lib/apt/lists/* # Create working directory WORKDIR /ultralytics @@ -27,20 +29,20 @@ WORKDIR /ultralytics # Copy contents and configure git COPY . . RUN sed -i '/^\[http "https:\/\/github\.com\/"\]/,+1d' .git/config -ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt . +ADD https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt . # Install pip packages RUN python3 -m pip install --upgrade pip wheel RUN pip install -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu # Run exports to AutoInstall packages -RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32 -RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32 +RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32 +RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32 # Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991 RUN pip install "paddlepaddle>=2.6.0" x2paddle -# Remove exported models -RUN rm -rf tmp +# Remove extra build files +RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/docker/Dockerfile-runner b/docker/Dockerfile-runner index 8f01477252..642f1a1bae 100644 --- a/docker/Dockerfile-runner +++ b/docker/Dockerfile-runner @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Builds GitHub actions CI runner image for deployment to DockerHub https://hub.docker.com/r/ultralytics/ultralytics -# Image is CUDA-optimized for YOLOv8 single/multi-GPU training and inference tests +# Image is CUDA-optimized for YOLO11 single/multi-GPU training and inference tests # Start FROM Ultralytics GPU image FROM ultralytics/ultralytics:latest diff --git a/docs/en/datasets/classify/caltech101.md b/docs/en/datasets/classify/caltech101.md index 51faa4957c..98ecd8b8c9 100644 --- a/docs/en/datasets/classify/caltech101.md +++ b/docs/en/datasets/classify/caltech101.md @@ -36,7 +36,7 @@ To train a YOLO model on the Caltech-101 dataset for 100 epochs, you can use the from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="caltech101", epochs=100, imgsz=416) @@ -46,7 +46,7 @@ To train a YOLO model on the Caltech-101 dataset for 100 epochs, you can use the ```bash # Start training from a pretrained *.pt model - yolo classify train data=caltech101 model=yolov8n-cls.pt epochs=100 imgsz=416 + yolo classify train data=caltech101 model=yolo11n-cls.pt epochs=100 imgsz=416 ``` ## Sample Images and Annotations @@ -98,7 +98,7 @@ To train an Ultralytics YOLO model on the Caltech-101 dataset, you can use the p from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="caltech101", epochs=100, imgsz=416) @@ -108,7 +108,7 @@ To train an Ultralytics YOLO model on the Caltech-101 dataset, you can use the p ```bash # Start training from a pretrained *.pt model - yolo classify train data=caltech101 model=yolov8n-cls.pt epochs=100 imgsz=416 + yolo classify train data=caltech101 model=yolo11n-cls.pt epochs=100 imgsz=416 ``` For more detailed arguments and options, refer to the model [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/classify/caltech256.md b/docs/en/datasets/classify/caltech256.md index de7c2ea461..f0c534f20d 100644 --- a/docs/en/datasets/classify/caltech256.md +++ b/docs/en/datasets/classify/caltech256.md @@ -16,7 +16,7 @@ The [Caltech-256](https://data.caltech.edu/records/nyy15-4j048) dataset is an ex allowfullscreen>
- Watch: How to Train [Image Classification](https://www.ultralytics.com/glossary/image-classification) Model using Caltech-256 Dataset with Ultralytics HUB + Watch: How to Train Image Classification Model using Caltech-256 Dataset with Ultralytics HUB

## Key Features @@ -47,7 +47,7 @@ To train a YOLO model on the Caltech-256 dataset for 100 epochs, you can use the from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="caltech256", epochs=100, imgsz=416) @@ -57,7 +57,7 @@ To train a YOLO model on the Caltech-256 dataset for 100 epochs, you can use the ```bash # Start training from a pretrained *.pt model - yolo classify train data=caltech256 model=yolov8n-cls.pt epochs=100 imgsz=416 + yolo classify train data=caltech256 model=yolo11n-cls.pt epochs=100 imgsz=416 ``` ## Sample Images and Annotations @@ -106,7 +106,7 @@ To train a YOLO model on the Caltech-256 dataset for 100 [epochs](https://www.ul from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model + model = YOLO("yolo11n-cls.pt") # load a pretrained model # Train the model results = model.train(data="caltech256", epochs=100, imgsz=416) @@ -116,7 +116,7 @@ To train a YOLO model on the Caltech-256 dataset for 100 [epochs](https://www.ul ```bash # Start training from a pretrained *.pt model - yolo classify train data=caltech256 model=yolov8n-cls.pt epochs=100 imgsz=416 + yolo classify train data=caltech256 model=yolo11n-cls.pt epochs=100 imgsz=416 ``` ### What are the most common use cases for the Caltech-256 dataset? @@ -141,6 +141,6 @@ Ultralytics YOLO models offer several advantages for training on the Caltech-256 - **High Accuracy**: YOLO models are known for their state-of-the-art performance in object detection tasks. - **Speed**: They provide real-time inference capabilities, making them suitable for applications requiring quick predictions. - **Ease of Use**: With Ultralytics HUB, users can train, validate, and deploy models without extensive coding. -- **Pretrained Models**: Starting from pretrained models, like `yolov8n-cls.pt`, can significantly reduce training time and improve model [accuracy](https://www.ultralytics.com/glossary/accuracy). +- **Pretrained Models**: Starting from pretrained models, like `yolo11n-cls.pt`, can significantly reduce training time and improve model [accuracy](https://www.ultralytics.com/glossary/accuracy). For more details, explore our [comprehensive training guide](../../modes/train.md). diff --git a/docs/en/datasets/classify/cifar10.md b/docs/en/datasets/classify/cifar10.md index 7bae78b38a..e081bc1681 100644 --- a/docs/en/datasets/classify/cifar10.md +++ b/docs/en/datasets/classify/cifar10.md @@ -16,7 +16,7 @@ The [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) (Canadian Institute allowfullscreen>
- Watch: How to Train an [Image Classification](https://www.ultralytics.com/glossary/image-classification) Model with CIFAR-10 Dataset using Ultralytics YOLOv8 + Watch: How to Train an Image Classification Model with CIFAR-10 Dataset using Ultralytics YOLO11

## Key Features @@ -50,7 +50,7 @@ To train a YOLO model on the CIFAR-10 dataset for 100 epochs with an image size from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="cifar10", epochs=100, imgsz=32) @@ -60,7 +60,7 @@ To train a YOLO model on the CIFAR-10 dataset for 100 epochs with an image size ```bash # Start training from a pretrained *.pt model - yolo classify train data=cifar10 model=yolov8n-cls.pt epochs=100 imgsz=32 + yolo classify train data=cifar10 model=yolo11n-cls.pt epochs=100 imgsz=32 ``` ## Sample Images and Annotations @@ -104,7 +104,7 @@ To train a YOLO model on the CIFAR-10 dataset using Ultralytics, you can follow from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="cifar10", epochs=100, imgsz=32) @@ -114,7 +114,7 @@ To train a YOLO model on the CIFAR-10 dataset using Ultralytics, you can follow ```bash # Start training from a pretrained *.pt model - yolo classify train data=cifar10 model=yolov8n-cls.pt epochs=100 imgsz=32 + yolo classify train data=cifar10 model=yolo11n-cls.pt epochs=100 imgsz=32 ``` For more details, refer to the model [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/classify/cifar100.md b/docs/en/datasets/classify/cifar100.md index a6735bbcc4..ddf48b3758 100644 --- a/docs/en/datasets/classify/cifar100.md +++ b/docs/en/datasets/classify/cifar100.md @@ -39,7 +39,7 @@ To train a YOLO model on the CIFAR-100 dataset for 100 [epochs](https://www.ultr from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="cifar100", epochs=100, imgsz=32) @@ -49,7 +49,7 @@ To train a YOLO model on the CIFAR-100 dataset for 100 [epochs](https://www.ultr ```bash # Start training from a pretrained *.pt model - yolo classify train data=cifar100 model=yolov8n-cls.pt epochs=100 imgsz=32 + yolo classify train data=cifar100 model=yolo11n-cls.pt epochs=100 imgsz=32 ``` ## Sample Images and Annotations @@ -97,7 +97,7 @@ You can train a YOLO model on the CIFAR-100 dataset using either Python or CLI c from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="cifar100", epochs=100, imgsz=32) @@ -107,7 +107,7 @@ You can train a YOLO model on the CIFAR-100 dataset using either Python or CLI c ```bash # Start training from a pretrained *.pt model - yolo classify train data=cifar100 model=yolov8n-cls.pt epochs=100 imgsz=32 + yolo classify train data=cifar100 model=yolo11n-cls.pt epochs=100 imgsz=32 ``` For a comprehensive list of available arguments, please refer to the model [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/classify/fashion-mnist.md b/docs/en/datasets/classify/fashion-mnist.md index 531cd2c1bd..f9d61fd0e8 100644 --- a/docs/en/datasets/classify/fashion-mnist.md +++ b/docs/en/datasets/classify/fashion-mnist.md @@ -16,7 +16,7 @@ The [Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset is allowfullscreen>
- Watch: How to do [Image Classification](https://www.ultralytics.com/glossary/image-classification) on Fashion MNIST Dataset using Ultralytics YOLOv8 + Watch: How to do Image Classification on Fashion MNIST Dataset using Ultralytics YOLO11

## Key Features @@ -64,7 +64,7 @@ To train a CNN model on the Fashion-MNIST dataset for 100 [epochs](https://www.u from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="fashion-mnist", epochs=100, imgsz=28) @@ -74,7 +74,7 @@ To train a CNN model on the Fashion-MNIST dataset for 100 [epochs](https://www.u ```bash # Start training from a pretrained *.pt model - yolo classify train data=fashion-mnist model=yolov8n-cls.pt epochs=100 imgsz=28 + yolo classify train data=fashion-mnist model=yolo11n-cls.pt epochs=100 imgsz=28 ``` ## Sample Images and Annotations @@ -107,7 +107,7 @@ To train an Ultralytics YOLO model on the Fashion-MNIST dataset, you can use bot from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n-cls.pt") + model = YOLO("yolo11n-cls.pt") # Train the model on Fashion-MNIST results = model.train(data="fashion-mnist", epochs=100, imgsz=28) @@ -117,7 +117,7 @@ To train an Ultralytics YOLO model on the Fashion-MNIST dataset, you can use bot === "CLI" ```bash - yolo classify train data=fashion-mnist model=yolov8n-cls.pt epochs=100 imgsz=28 + yolo classify train data=fashion-mnist model=yolo11n-cls.pt epochs=100 imgsz=28 ``` For more detailed training parameters, refer to the [Training page](../../modes/train.md). @@ -128,7 +128,7 @@ The [Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset is ### Can I use Ultralytics YOLO for image classification tasks like Fashion-MNIST? -Yes, Ultralytics YOLO models can be used for image classification tasks, including those involving the Fashion-MNIST dataset. YOLOv8, for example, supports various vision tasks such as detection, segmentation, and classification. To get started with image classification tasks, refer to the [Classification page](https://docs.ultralytics.com/tasks/classify/). +Yes, Ultralytics YOLO models can be used for image classification tasks, including those involving the Fashion-MNIST dataset. YOLO11, for example, supports various vision tasks such as detection, segmentation, and classification. To get started with image classification tasks, refer to the [Classification page](https://docs.ultralytics.com/tasks/classify/). ### What are the key features and structure of the Fashion-MNIST dataset? diff --git a/docs/en/datasets/classify/imagenet.md b/docs/en/datasets/classify/imagenet.md index 76e59b3f18..72c2e2a3b5 100644 --- a/docs/en/datasets/classify/imagenet.md +++ b/docs/en/datasets/classify/imagenet.md @@ -10,13 +10,7 @@ keywords: ImageNet, deep learning, visual recognition, computer vision, pretrain ## ImageNet Pretrained Models -| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 | -| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | -| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-cls.pt) | 224 | 69.0 | 88.3 | 12.9 | 0.31 | 2.7 | 4.3 | -| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-cls.pt) | 224 | 73.8 | 91.7 | 23.4 | 0.35 | 6.4 | 13.5 | -| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-cls.pt) | 224 | 76.8 | 93.5 | 85.4 | 0.62 | 17.0 | 42.7 | -| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-cls.pt) | 224 | 76.8 | 93.5 | 163.0 | 0.87 | 37.5 | 99.7 | -| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-cls.pt) | 224 | 79.0 | 94.6 | 232.0 | 1.01 | 57.4 | 154.8 | +{% include "macros/yolo-cls-perf.md" %} ## Key Features @@ -49,7 +43,7 @@ To train a deep learning model on the ImageNet dataset for 100 [epochs](https:// from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="imagenet", epochs=100, imgsz=224) @@ -59,7 +53,7 @@ To train a deep learning model on the ImageNet dataset for 100 [epochs](https:// ```bash # Start training from a pretrained *.pt model - yolo classify train data=imagenet model=yolov8n-cls.pt epochs=100 imgsz=224 + yolo classify train data=imagenet model=yolo11n-cls.pt epochs=100 imgsz=224 ``` ## Sample Images and Annotations @@ -110,7 +104,7 @@ To use a pretrained Ultralytics YOLO model for image classification on the Image from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="imagenet", epochs=100, imgsz=224) @@ -120,14 +114,14 @@ To use a pretrained Ultralytics YOLO model for image classification on the Image ```bash # Start training from a pretrained *.pt model - yolo classify train data=imagenet model=yolov8n-cls.pt epochs=100 imgsz=224 + yolo classify train data=imagenet model=yolo11n-cls.pt epochs=100 imgsz=224 ``` For more in-depth training instruction, refer to our [Training page](../../modes/train.md). -### Why should I use the Ultralytics YOLOv8 pretrained models for my ImageNet dataset projects? +### Why should I use the Ultralytics YOLO11 pretrained models for my ImageNet dataset projects? -Ultralytics YOLOv8 pretrained models offer state-of-the-art performance in terms of speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) for various computer vision tasks. For example, the YOLOv8n-cls model, with a top-1 accuracy of 69.0% and a top-5 accuracy of 88.3%, is optimized for real-time applications. Pretrained models reduce the computational resources required for training from scratch and accelerate development cycles. Learn more about the performance metrics of YOLOv8 models in the [ImageNet Pretrained Models section](#imagenet-pretrained-models). +Ultralytics YOLO11 pretrained models offer state-of-the-art performance in terms of speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) for various computer vision tasks. For example, the YOLO11n-cls model, with a top-1 accuracy of 69.0% and a top-5 accuracy of 88.3%, is optimized for real-time applications. Pretrained models reduce the computational resources required for training from scratch and accelerate development cycles. Learn more about the performance metrics of YOLO11 models in the [ImageNet Pretrained Models section](#imagenet-pretrained-models). ### How is the ImageNet dataset structured, and why is it important? diff --git a/docs/en/datasets/classify/imagenet10.md b/docs/en/datasets/classify/imagenet10.md index 4e40e6655f..217d56c54b 100644 --- a/docs/en/datasets/classify/imagenet10.md +++ b/docs/en/datasets/classify/imagenet10.md @@ -35,7 +35,7 @@ To test a deep learning model on the ImageNet10 dataset with an image size of 22 from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="imagenet10", epochs=5, imgsz=224) @@ -45,7 +45,7 @@ To test a deep learning model on the ImageNet10 dataset with an image size of 22 ```bash # Start training from a pretrained *.pt model - yolo classify train data=imagenet10 model=yolov8n-cls.pt epochs=5 imgsz=224 + yolo classify train data=imagenet10 model=yolo11n-cls.pt epochs=5 imgsz=224 ``` ## Sample Images and Annotations @@ -94,7 +94,7 @@ To test your deep learning model on the ImageNet10 dataset with an image size of from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="imagenet10", epochs=5, imgsz=224) @@ -104,7 +104,7 @@ To test your deep learning model on the ImageNet10 dataset with an image size of ```bash # Start training from a pretrained *.pt model - yolo classify train data=imagenet10 model=yolov8n-cls.pt epochs=5 imgsz=224 + yolo classify train data=imagenet10 model=yolo11n-cls.pt epochs=5 imgsz=224 ``` Refer to the [Training](../../modes/train.md) page for a comprehensive list of available arguments. diff --git a/docs/en/datasets/classify/imagenette.md b/docs/en/datasets/classify/imagenette.md index bf371502ad..dd2af1c3d1 100644 --- a/docs/en/datasets/classify/imagenette.md +++ b/docs/en/datasets/classify/imagenette.md @@ -37,7 +37,7 @@ To train a model on the ImageNette dataset for 100 epochs with a standard image from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="imagenette", epochs=100, imgsz=224) @@ -47,7 +47,7 @@ To train a model on the ImageNette dataset for 100 epochs with a standard image ```bash # Start training from a pretrained *.pt model - yolo classify train data=imagenette model=yolov8n-cls.pt epochs=100 imgsz=224 + yolo classify train data=imagenette model=yolo11n-cls.pt epochs=100 imgsz=224 ``` ## Sample Images and Annotations @@ -72,7 +72,7 @@ To use these datasets, simply replace 'imagenette' with 'imagenette160' or 'imag from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model with ImageNette160 results = model.train(data="imagenette160", epochs=100, imgsz=160) @@ -82,7 +82,7 @@ To use these datasets, simply replace 'imagenette' with 'imagenette160' or 'imag ```bash # Start training from a pretrained *.pt model with ImageNette160 - yolo classify train data=imagenette160 model=yolov8n-cls.pt epochs=100 imgsz=160 + yolo classify train data=imagenette160 model=yolo11n-cls.pt epochs=100 imgsz=160 ``` !!! example "Train Example with ImageNette320" @@ -93,7 +93,7 @@ To use these datasets, simply replace 'imagenette' with 'imagenette160' or 'imag from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model with ImageNette320 results = model.train(data="imagenette320", epochs=100, imgsz=320) @@ -103,7 +103,7 @@ To use these datasets, simply replace 'imagenette' with 'imagenette160' or 'imag ```bash # Start training from a pretrained *.pt model with ImageNette320 - yolo classify train data=imagenette320 model=yolov8n-cls.pt epochs=100 imgsz=320 + yolo classify train data=imagenette320 model=yolo11n-cls.pt epochs=100 imgsz=320 ``` These smaller versions of the dataset allow for rapid iterations during the development process while still providing valuable and realistic image classification tasks. @@ -130,7 +130,7 @@ To train a YOLO model on the ImageNette dataset for 100 [epochs](https://www.ult from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="imagenette", epochs=100, imgsz=224) @@ -140,7 +140,7 @@ To train a YOLO model on the ImageNette dataset for 100 [epochs](https://www.ult ```bash # Start training from a pretrained *.pt model - yolo classify train data=imagenette model=yolov8n-cls.pt epochs=100 imgsz=224 + yolo classify train data=imagenette model=yolo11n-cls.pt epochs=100 imgsz=224 ``` For more details, see the [Training](../../modes/train.md) documentation page. @@ -167,7 +167,7 @@ Yes, the ImageNette dataset is also available in two resized versions: ImageNett from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") + model = YOLO("yolo11n-cls.pt") # Train the model with ImageNette160 results = model.train(data="imagenette160", epochs=100, imgsz=160) @@ -177,7 +177,7 @@ Yes, the ImageNette dataset is also available in two resized versions: ImageNett ```bash # Start training from a pretrained *.pt model with ImageNette160 - yolo detect train data=imagenette160 model=yolov8n-cls.pt epochs=100 imgsz=160 + yolo detect train data=imagenette160 model=yolo11n-cls.pt epochs=100 imgsz=160 ``` For more information, refer to [Training with ImageNette160 and ImageNette320](#imagenette160-and-imagenette320). diff --git a/docs/en/datasets/classify/imagewoof.md b/docs/en/datasets/classify/imagewoof.md index 2ed0273b60..2e33f44542 100644 --- a/docs/en/datasets/classify/imagewoof.md +++ b/docs/en/datasets/classify/imagewoof.md @@ -34,7 +34,7 @@ To train a CNN model on the ImageWoof dataset for 100 [epochs](https://www.ultra from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="imagewoof", epochs=100, imgsz=224) @@ -44,7 +44,7 @@ To train a CNN model on the ImageWoof dataset for 100 [epochs](https://www.ultra ```bash # Start training from a pretrained *.pt model - yolo classify train data=imagewoof model=yolov8n-cls.pt epochs=100 imgsz=224 + yolo classify train data=imagewoof model=yolo11n-cls.pt epochs=100 imgsz=224 ``` ## Dataset Variants @@ -67,7 +67,7 @@ To use these variants in your training, simply replace 'imagewoof' in the datase from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # For medium-sized dataset model.train(data="imagewoof320", epochs=100, imgsz=224) @@ -80,7 +80,7 @@ To use these variants in your training, simply replace 'imagewoof' in the datase ```bash # Load a pretrained model and train on the small-sized dataset - yolo classify train model=yolov8n-cls.pt data=imagewoof320 epochs=100 imgsz=224 + yolo classify train model=yolo11n-cls.pt data=imagewoof320 epochs=100 imgsz=224 ``` It's important to note that using smaller images will likely yield lower performance in terms of classification accuracy. However, it's an excellent way to iterate quickly in the early stages of model development and prototyping. @@ -116,7 +116,7 @@ To train a [Convolutional Neural Network](https://www.ultralytics.com/glossary/c ```python from ultralytics import YOLO - model = YOLO("yolov8n-cls.pt") # Load a pretrained model + model = YOLO("yolo11n-cls.pt") # Load a pretrained model results = model.train(data="imagewoof", epochs=100, imgsz=224) ``` @@ -124,7 +124,7 @@ To train a [Convolutional Neural Network](https://www.ultralytics.com/glossary/c === "CLI" ```bash - yolo classify train data=imagewoof model=yolov8n-cls.pt epochs=100 imgsz=224 + yolo classify train data=imagewoof model=yolo11n-cls.pt epochs=100 imgsz=224 ``` For more details on available training arguments, refer to the [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/classify/index.md b/docs/en/datasets/classify/index.md index 3567d6a295..700f4af35a 100644 --- a/docs/en/datasets/classify/index.md +++ b/docs/en/datasets/classify/index.md @@ -86,7 +86,7 @@ This structured approach ensures that the model can effectively learn from well- from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="path/to/dataset", epochs=100, imgsz=640) @@ -96,7 +96,7 @@ This structured approach ensures that the model can effectively learn from well- ```bash # Start training from a pretrained *.pt model - yolo detect train data=path/to/data model=yolov8n-cls.pt epochs=100 imgsz=640 + yolo detect train data=path/to/data model=yolo11n-cls.pt epochs=100 imgsz=640 ``` ## Supported Datasets @@ -170,7 +170,7 @@ To use your own dataset with Ultralytics YOLO, ensure it follows the specified d from ultralytics import YOLO # Load a model -model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) +model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="path/to/your/dataset", epochs=100, imgsz=640) @@ -182,7 +182,7 @@ More details can be found in the [Adding your own dataset](#adding-your-own-data Ultralytics YOLO offers several benefits for image classification, including: -- **Pretrained Models**: Load pretrained models like `yolov8n-cls.pt` to jump-start your training process. +- **Pretrained Models**: Load pretrained models like `yolo11n-cls.pt` to jump-start your training process. - **Ease of Use**: Simple API and CLI commands for training and evaluation. - **High Performance**: State-of-the-art [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed, ideal for real-time applications. - **Support for Multiple Datasets**: Seamless integration with various popular datasets like CIFAR-10, ImageNet, and more. @@ -202,7 +202,7 @@ Training a model using Ultralytics YOLO can be done easily in both Python and CL from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model + model = YOLO("yolo11n-cls.pt") # load a pretrained model # Train the model results = model.train(data="path/to/dataset", epochs=100, imgsz=640) @@ -213,7 +213,7 @@ Training a model using Ultralytics YOLO can be done easily in both Python and CL ```bash # Start training from a pretrained *.pt model - yolo detect train data=path/to/data model=yolov8n-cls.pt epochs=100 imgsz=640 + yolo detect train data=path/to/data model=yolo11n-cls.pt epochs=100 imgsz=640 ``` These examples demonstrate the straightforward process of training a YOLO model using either approach. For more information, visit the [Usage](#usage) section. diff --git a/docs/en/datasets/classify/mnist.md b/docs/en/datasets/classify/mnist.md index 07f0a70a1d..055c8b7788 100644 --- a/docs/en/datasets/classify/mnist.md +++ b/docs/en/datasets/classify/mnist.md @@ -42,7 +42,7 @@ To train a CNN model on the MNIST dataset for 100 [epochs](https://www.ultralyti from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="mnist", epochs=100, imgsz=32) @@ -52,7 +52,7 @@ To train a CNN model on the MNIST dataset for 100 [epochs](https://www.ultralyti ```bash # Start training from a pretrained *.pt model - yolo classify train data=mnist model=yolov8n-cls.pt epochs=100 imgsz=28 + yolo classify train data=mnist model=yolo11n-cls.pt epochs=100 imgsz=28 ``` ## Sample Images and Annotations @@ -103,7 +103,7 @@ To train a model on the MNIST dataset using Ultralytics YOLO, you can follow the from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="mnist", epochs=100, imgsz=32) @@ -113,7 +113,7 @@ To train a model on the MNIST dataset using Ultralytics YOLO, you can follow the ```bash # Start training from a pretrained *.pt model - yolo classify train data=mnist model=yolov8n-cls.pt epochs=100 imgsz=28 + yolo classify train data=mnist model=yolo11n-cls.pt epochs=100 imgsz=28 ``` For a detailed list of available training arguments, refer to the [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/detect/african-wildlife.md b/docs/en/datasets/detect/african-wildlife.md index 0d01372dbd..519b8cfa67 100644 --- a/docs/en/datasets/detect/african-wildlife.md +++ b/docs/en/datasets/detect/african-wildlife.md @@ -1,7 +1,7 @@ --- comments: true description: Explore our African Wildlife Dataset featuring images of buffalo, elephant, rhino, and zebra for training computer vision models. Ideal for research and conservation. -keywords: African Wildlife Dataset, South African animals, object detection, computer vision, YOLOv8, wildlife research, conservation, dataset +keywords: African Wildlife Dataset, South African animals, object detection, computer vision, YOLO11, wildlife research, conservation, dataset --- # African Wildlife Dataset @@ -16,7 +16,7 @@ This dataset showcases four common animal classes typically found in South Afric allowfullscreen>
- Watch: African Wildlife Animals Detection using Ultralytics YOLOv8 + Watch: African Wildlife Animals Detection using Ultralytics YOLO11

## Dataset Structure @@ -43,7 +43,7 @@ A YAML (Yet Another Markup Language) file defines the dataset configuration, inc ## Usage -To train a YOLOv8n model on the African wildlife dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, use the provided code samples. For a comprehensive list of available parameters, refer to the model's [Training](../../modes/train.md) page. +To train a YOLO11n model on the African wildlife dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, use the provided code samples. For a comprehensive list of available parameters, refer to the model's [Training](../../modes/train.md) page. !!! example "Train Example" @@ -53,7 +53,7 @@ To train a YOLOv8n model on the African wildlife dataset for 100 [epochs](https: from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="african-wildlife.yaml", epochs=100, imgsz=640) @@ -63,7 +63,7 @@ To train a YOLOv8n model on the African wildlife dataset for 100 [epochs](https: ```bash # Start training from a pretrained *.pt model - yolo detect train data=african-wildlife.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=african-wildlife.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` !!! example "Inference Example" @@ -107,9 +107,9 @@ The dataset has been released available under the [AGPL-3.0 License](https://git The African Wildlife Dataset includes images of four common animal species found in South African nature reserves: buffalo, elephant, rhino, and zebra. It is a valuable resource for training computer vision algorithms in object detection and animal identification. The dataset supports various tasks like object tracking, research, and conservation efforts. For more information on its structure and applications, refer to the [Dataset Structure](#dataset-structure) section and [Applications](#applications) of the dataset. -### How do I train a YOLOv8 model using the African Wildlife Dataset? +### How do I train a YOLO11 model using the African Wildlife Dataset? -You can train a YOLOv8 model on the African Wildlife Dataset by using the `african-wildlife.yaml` configuration file. Below is an example of how to train the YOLOv8n model for 100 epochs with an image size of 640: +You can train a YOLO11 model on the African Wildlife Dataset by using the `african-wildlife.yaml` configuration file. Below is an example of how to train the YOLO11n model for 100 epochs with an image size of 640: !!! example @@ -119,7 +119,7 @@ You can train a YOLOv8 model on the African Wildlife Dataset by using the `afric from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="african-wildlife.yaml", epochs=100, imgsz=640) @@ -129,7 +129,7 @@ You can train a YOLOv8 model on the African Wildlife Dataset by using the `afric ```bash # Start training from a pretrained *.pt model - yolo detect train data=african-wildlife.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=african-wildlife.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For additional training parameters and options, refer to the [Training](../../modes/train.md) documentation. diff --git a/docs/en/datasets/detect/argoverse.md b/docs/en/datasets/detect/argoverse.md index a834be90ed..4280b09a25 100644 --- a/docs/en/datasets/detect/argoverse.md +++ b/docs/en/datasets/detect/argoverse.md @@ -43,7 +43,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n model on the Argoverse dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n model on the Argoverse dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -53,7 +53,7 @@ To train a YOLOv8n model on the Argoverse dataset for 100 [epochs](https://www.u from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="Argoverse.yaml", epochs=100, imgsz=640) @@ -63,7 +63,7 @@ To train a YOLOv8n model on the Argoverse dataset for 100 [epochs](https://www.u ```bash # Start training from a pretrained *.pt model - yolo detect train data=Argoverse.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=Argoverse.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Data and Annotations @@ -104,7 +104,7 @@ The [Argoverse](https://www.argoverse.org/) dataset, developed by Argo AI, suppo ### How can I train an Ultralytics YOLO model using the Argoverse dataset? -To train a YOLOv8 model with the Argoverse dataset, use the provided YAML configuration file and the following code: +To train a YOLO11 model with the Argoverse dataset, use the provided YAML configuration file and the following code: !!! example "Train Example" @@ -114,7 +114,7 @@ To train a YOLOv8 model with the Argoverse dataset, use the provided YAML config from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="Argoverse.yaml", epochs=100, imgsz=640) @@ -125,7 +125,7 @@ To train a YOLOv8 model with the Argoverse dataset, use the provided YAML config ```bash # Start training from a pretrained *.pt model - yolo detect train data=Argoverse.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=Argoverse.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For a detailed explanation of the arguments, refer to the model [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/detect/brain-tumor.md b/docs/en/datasets/detect/brain-tumor.md index 9f108e7388..b2498d6f00 100644 --- a/docs/en/datasets/detect/brain-tumor.md +++ b/docs/en/datasets/detect/brain-tumor.md @@ -42,7 +42,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n model on the brain tumor dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, utilize the provided code snippets. For a detailed list of available arguments, consult the model's [Training](../../modes/train.md) page. +To train a YOLO11n model on the brain tumor dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, utilize the provided code snippets. For a detailed list of available arguments, consult the model's [Training](../../modes/train.md) page. !!! example "Train Example" @@ -52,7 +52,7 @@ To train a YOLOv8n model on the brain tumor dataset for 100 [epochs](https://www from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="brain-tumor.yaml", epochs=100, imgsz=640) @@ -62,7 +62,7 @@ To train a YOLOv8n model on the brain tumor dataset for 100 [epochs](https://www ```bash # Start training from a pretrained *.pt model - yolo detect train data=brain-tumor.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=brain-tumor.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` !!! example "Inference Example" @@ -106,9 +106,9 @@ The dataset has been released available under the [AGPL-3.0 License](https://git The brain tumor dataset is divided into two subsets: the **training set** consists of 893 images with corresponding annotations, while the **testing set** comprises 223 images with paired annotations. This structured division aids in developing robust and accurate computer vision models for detecting brain tumors. For more information on the dataset structure, visit the [Dataset Structure](#dataset-structure) section. -### How can I train a YOLOv8 model on the brain tumor dataset using Ultralytics? +### How can I train a YOLO11 model on the brain tumor dataset using Ultralytics? -You can train a YOLOv8 model on the brain tumor dataset for 100 epochs with an image size of 640px using both Python and CLI methods. Below are the examples for both: +You can train a YOLO11 model on the brain tumor dataset for 100 epochs with an image size of 640px using both Python and CLI methods. Below are the examples for both: !!! example "Train Example" @@ -118,7 +118,7 @@ You can train a YOLOv8 model on the brain tumor dataset for 100 epochs with an i from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="brain-tumor.yaml", epochs=100, imgsz=640) @@ -129,7 +129,7 @@ You can train a YOLOv8 model on the brain tumor dataset for 100 epochs with an i ```bash # Start training from a pretrained *.pt model - yolo detect train data=brain-tumor.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=brain-tumor.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For a detailed list of available arguments, refer to the [Training](../../modes/train.md) page. @@ -138,9 +138,9 @@ For a detailed list of available arguments, refer to the [Training](../../modes/ Using the brain tumor dataset in AI projects enables early diagnosis and treatment planning for brain tumors. It helps in automating brain tumor identification through computer vision, facilitating accurate and timely medical interventions, and supporting personalized treatment strategies. This application holds significant potential in improving patient outcomes and medical efficiencies. -### How do I perform inference using a fine-tuned YOLOv8 model on the brain tumor dataset? +### How do I perform inference using a fine-tuned YOLO11 model on the brain tumor dataset? -Inference using a fine-tuned YOLOv8 model can be performed with either Python or CLI approaches. Here are the examples: +Inference using a fine-tuned YOLO11 model can be performed with either Python or CLI approaches. Here are the examples: !!! example "Inference Example" diff --git a/docs/en/datasets/detect/coco.md b/docs/en/datasets/detect/coco.md index d090142838..9af5207d61 100644 --- a/docs/en/datasets/detect/coco.md +++ b/docs/en/datasets/detect/coco.md @@ -21,13 +21,7 @@ The [COCO](https://cocodataset.org/#home) (Common Objects in Context) dataset is ## COCO Pretrained Models -| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | -| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | -| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | -| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | -| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | +{% include "macros/yolo-det-perf.md" %} ## Key Features @@ -60,7 +54,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n model on the COCO dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n model on the COCO dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -70,7 +64,7 @@ To train a YOLOv8n model on the COCO dataset for 100 [epochs](https://www.ultral from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco.yaml", epochs=100, imgsz=640) @@ -80,7 +74,7 @@ To train a YOLOv8n model on the COCO dataset for 100 [epochs](https://www.ultral ```bash # Start training from a pretrained *.pt model - yolo detect train data=coco.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Images and Annotations @@ -122,7 +116,7 @@ The [COCO dataset](https://cocodataset.org/#home) (Common Objects in Context) is ### How can I train a YOLO model using the COCO dataset? -To train a YOLOv8 model using the COCO dataset, you can use the following code snippets: +To train a YOLO11 model using the COCO dataset, you can use the following code snippets: !!! example "Train Example" @@ -132,7 +126,7 @@ To train a YOLOv8 model using the COCO dataset, you can use the following code s from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco.yaml", epochs=100, imgsz=640) @@ -142,7 +136,7 @@ To train a YOLOv8 model using the COCO dataset, you can use the following code s ```bash # Start training from a pretrained *.pt model - yolo detect train data=coco.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` Refer to the [Training page](../../modes/train.md) for more details on available arguments. @@ -156,13 +150,15 @@ The COCO dataset includes: - Standardized evaluation metrics for object detection (mAP) and segmentation (mean Average Recall, mAR). - **Mosaicing** technique in training batches to enhance model generalization across various object sizes and contexts. -### Where can I find pretrained YOLOv8 models trained on the COCO dataset? +### Where can I find pretrained YOLO11 models trained on the COCO dataset? -Pretrained YOLOv8 models on the COCO dataset can be downloaded from the links provided in the documentation. Examples include: +Pretrained YOLO11 models on the COCO dataset can be downloaded from the links provided in the documentation. Examples include: -- [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt) -- [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt) -- [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m.pt) +- [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) +- [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) +- [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) +- [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) +- [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) These models vary in size, mAP, and inference speed, providing options for different performance and resource requirements. diff --git a/docs/en/datasets/detect/coco8.md b/docs/en/datasets/detect/coco8.md index 4a8ad5a852..b6b7a5a0f1 100644 --- a/docs/en/datasets/detect/coco8.md +++ b/docs/en/datasets/detect/coco8.md @@ -1,7 +1,7 @@ --- comments: true description: Explore the Ultralytics COCO8 dataset, a versatile and manageable set of 8 images perfect for testing object detection models and training pipelines. -keywords: COCO8, Ultralytics, dataset, object detection, YOLOv8, training, validation, machine learning, computer vision +keywords: COCO8, Ultralytics, dataset, object detection, YOLO11, training, validation, machine learning, computer vision --- # COCO8 Dataset @@ -21,7 +21,7 @@ keywords: COCO8, Ultralytics, dataset, object detection, YOLOv8, training, valid Watch: Ultralytics COCO Dataset Overview

-This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics). +This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics). ## Dataset YAML @@ -35,7 +35,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n model on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n model on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -45,7 +45,7 @@ To train a YOLOv8n model on the COCO8 dataset for 100 [epochs](https://www.ultra from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) @@ -55,7 +55,7 @@ To train a YOLOv8n model on the COCO8 dataset for 100 [epochs](https://www.ultra ```bash # Start training from a pretrained *.pt model - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Images and Annotations @@ -95,9 +95,9 @@ We would like to acknowledge the COCO Consortium for creating and maintaining th The Ultralytics COCO8 dataset is a compact yet versatile object detection dataset consisting of the first 8 images from the COCO train 2017 set, with 4 images for training and 4 for validation. It is designed for testing and debugging object detection models and experimentation with new detection approaches. Despite its small size, COCO8 offers enough diversity to act as a sanity check for your training pipelines before deploying larger datasets. For more details, view the [COCO8 dataset](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8.yaml). -### How do I train a YOLOv8 model using the COCO8 dataset? +### How do I train a YOLO11 model using the COCO8 dataset? -To train a YOLOv8 model using the COCO8 dataset, you can employ either Python or CLI commands. Here's how you can start: +To train a YOLO11 model using the COCO8 dataset, you can employ either Python or CLI commands. Here's how you can start: !!! example "Train Example" @@ -107,7 +107,7 @@ To train a YOLOv8 model using the COCO8 dataset, you can employ either Python or from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) @@ -117,19 +117,19 @@ To train a YOLOv8 model using the COCO8 dataset, you can employ either Python or ```bash # Start training from a pretrained *.pt model - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. ### Why should I use Ultralytics HUB for managing my COCO8 training? -Ultralytics HUB is an all-in-one web tool designed to simplify the training and deployment of YOLO models, including the Ultralytics YOLOv8 models on the COCO8 dataset. It offers cloud training, real-time tracking, and seamless dataset management. HUB allows you to start training with a single click and avoids the complexities of manual setups. Discover more about [Ultralytics HUB](https://hub.ultralytics.com/) and its benefits. +Ultralytics HUB is an all-in-one web tool designed to simplify the training and deployment of YOLO models, including the Ultralytics YOLO11 models on the COCO8 dataset. It offers cloud training, real-time tracking, and seamless dataset management. HUB allows you to start training with a single click and avoids the complexities of manual setups. Discover more about [Ultralytics HUB](https://hub.ultralytics.com/) and its benefits. ### What are the benefits of using mosaic augmentation in training with the COCO8 dataset? Mosaic augmentation, demonstrated in the COCO8 dataset, combines multiple images into a single image during training. This technique increases the variety of objects and scenes in each training batch, improving the model's ability to generalize across different object sizes, aspect ratios, and contexts. This results in a more robust object detection model. For more details, refer to the [training guide](#usage). -### How can I validate my YOLOv8 model trained on the COCO8 dataset? +### How can I validate my YOLO11 model trained on the COCO8 dataset? -Validation of your YOLOv8 model trained on the COCO8 dataset can be performed using the model's validation commands. You can invoke the validation mode via CLI or Python script to evaluate the model's performance using precise metrics. For detailed instructions, visit the [Validation](../../modes/val.md) page. +Validation of your YOLO11 model trained on the COCO8 dataset can be performed using the model's validation commands. You can invoke the validation mode via CLI or Python script to evaluate the model's performance using precise metrics. For detailed instructions, visit the [Validation](../../modes/val.md) page. diff --git a/docs/en/datasets/detect/globalwheat2020.md b/docs/en/datasets/detect/globalwheat2020.md index ef7ff7ac31..e744b8d666 100644 --- a/docs/en/datasets/detect/globalwheat2020.md +++ b/docs/en/datasets/detect/globalwheat2020.md @@ -38,7 +38,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n model on the Global Wheat Head Dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n model on the Global Wheat Head Dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -48,7 +48,7 @@ To train a YOLOv8n model on the Global Wheat Head Dataset for 100 [epochs](https from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="GlobalWheat2020.yaml", epochs=100, imgsz=640) @@ -58,7 +58,7 @@ To train a YOLOv8n model on the Global Wheat Head Dataset for 100 [epochs](https ```bash # Start training from a pretrained *.pt model - yolo detect train data=GlobalWheat2020.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=GlobalWheat2020.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Data and Annotations @@ -96,9 +96,9 @@ We would like to acknowledge the researchers and institutions that contributed t The Global Wheat Head Dataset is primarily used for developing and training deep learning models aimed at wheat head detection. This is crucial for applications in wheat phenotyping and crop management, allowing for more accurate estimations of wheat head density, size, and overall crop yield potential. Accurate detection methods help in assessing crop health and maturity, essential for efficient crop management. -### How do I train a YOLOv8n model on the Global Wheat Head Dataset? +### How do I train a YOLO11n model on the Global Wheat Head Dataset? -To train a YOLOv8n model on the Global Wheat Head Dataset, you can use the following code snippets. Make sure you have the `GlobalWheat2020.yaml` configuration file specifying dataset paths and classes: +To train a YOLO11n model on the Global Wheat Head Dataset, you can use the following code snippets. Make sure you have the `GlobalWheat2020.yaml` configuration file specifying dataset paths and classes: !!! example "Train Example" @@ -108,7 +108,7 @@ To train a YOLOv8n model on the Global Wheat Head Dataset, you can use the follo from ultralytics import YOLO # Load a pre-trained model (recommended for training) - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model results = model.train(data="GlobalWheat2020.yaml", epochs=100, imgsz=640) @@ -118,7 +118,7 @@ To train a YOLOv8n model on the Global Wheat Head Dataset, you can use the follo ```bash # Start training from a pretrained *.pt model - yolo detect train data=GlobalWheat2020.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=GlobalWheat2020.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/detect/index.md b/docs/en/datasets/detect/index.md index 61640480f6..f5586a2868 100644 --- a/docs/en/datasets/detect/index.md +++ b/docs/en/datasets/detect/index.md @@ -56,7 +56,7 @@ Here's how you can use these formats to train your model: from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) @@ -66,7 +66,7 @@ Here's how you can use these formats to train your model: ```bash # Start training from a pretrained *.pt model - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Supported Datasets @@ -158,11 +158,11 @@ Ultralytics YOLO supports a wide range of datasets, including: - [Objects365](objects365.md) - [OpenImagesV7](open-images-v7.md) -Each dataset page provides detailed information on the structure and usage tailored for efficient YOLOv8 training. Explore the full list in the [Supported Datasets](#supported-datasets) section. +Each dataset page provides detailed information on the structure and usage tailored for efficient YOLO11 training. Explore the full list in the [Supported Datasets](#supported-datasets) section. -### How do I start training a YOLOv8 model using my dataset? +### How do I start training a YOLO11 model using my dataset? -To start training a YOLOv8 model, ensure your dataset is formatted correctly and the paths are defined in a YAML file. Use the following script to begin training: +To start training a YOLO11 model, ensure your dataset is formatted correctly and the paths are defined in a YAML file. Use the following script to begin training: !!! example @@ -171,18 +171,18 @@ To start training a YOLOv8 model, ensure your dataset is formatted correctly and ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") # Load a pretrained model + model = YOLO("yolo11n.pt") # Load a pretrained model results = model.train(data="path/to/your_dataset.yaml", epochs=100, imgsz=640) ``` === "CLI" ```bash - yolo detect train data=path/to/your_dataset.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=path/to/your_dataset.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` Refer to the [Usage](#usage) section for more details on utilizing different modes, including CLI commands. ### Where can I find practical examples of using Ultralytics YOLO for object detection? -Ultralytics provides numerous examples and practical guides for using YOLOv8 in diverse applications. For a comprehensive overview, visit the [Ultralytics Blog](https://www.ultralytics.com/blog) where you can find case studies, detailed tutorials, and community stories showcasing object detection, segmentation, and more with YOLOv8. For specific examples, check the [Usage](../../modes/predict.md) section in the documentation. +Ultralytics provides numerous examples and practical guides for using YOLO11 in diverse applications. For a comprehensive overview, visit the [Ultralytics Blog](https://www.ultralytics.com/blog) where you can find case studies, detailed tutorials, and community stories showcasing object detection, segmentation, and more with YOLO11. For specific examples, check the [Usage](../../modes/predict.md) section in the documentation. diff --git a/docs/en/datasets/detect/lvis.md b/docs/en/datasets/detect/lvis.md index c4a4ff76ed..7bcfd088eb 100644 --- a/docs/en/datasets/detect/lvis.md +++ b/docs/en/datasets/detect/lvis.md @@ -56,7 +56,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n model on the LVIS dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n model on the LVIS dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -66,7 +66,7 @@ To train a YOLOv8n model on the LVIS dataset for 100 [epochs](https://www.ultral from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="lvis.yaml", epochs=100, imgsz=640) @@ -76,7 +76,7 @@ To train a YOLOv8n model on the LVIS dataset for 100 [epochs](https://www.ultral ```bash # Start training from a pretrained *.pt model - yolo detect train data=lvis.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=lvis.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Images and Annotations @@ -114,9 +114,9 @@ We would like to acknowledge the LVIS Consortium for creating and maintaining th The [LVIS dataset](https://www.lvisdataset.org/) is a large-scale dataset with fine-grained vocabulary-level annotations developed by Facebook AI Research (FAIR). It is primarily used for object detection and instance segmentation, featuring over 1203 object categories and 2 million instance annotations. Researchers and practitioners use it to train and benchmark models like Ultralytics YOLO for advanced computer vision tasks. The dataset's extensive size and diversity make it an essential resource for pushing the boundaries of model performance in detection and segmentation. -### How can I train a YOLOv8n model using the LVIS dataset? +### How can I train a YOLO11n model using the LVIS dataset? -To train a YOLOv8n model on the LVIS dataset for 100 epochs with an image size of 640, follow the example below. This process utilizes Ultralytics' framework, which offers comprehensive training features. +To train a YOLO11n model on the LVIS dataset for 100 epochs with an image size of 640, follow the example below. This process utilizes Ultralytics' framework, which offers comprehensive training features. !!! example "Train Example" @@ -126,7 +126,7 @@ To train a YOLOv8n model on the LVIS dataset for 100 epochs with an image size o from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="lvis.yaml", epochs=100, imgsz=640) @@ -137,7 +137,7 @@ To train a YOLOv8n model on the LVIS dataset for 100 epochs with an image size o ```bash # Start training from a pretrained *.pt model - yolo detect train data=lvis.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=lvis.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For detailed training configurations, refer to the [Training](../../modes/train.md) documentation. @@ -148,7 +148,7 @@ The images in the LVIS dataset are the same as those in the [COCO dataset](./coc ### Why should I use Ultralytics YOLO for training on the LVIS dataset? -Ultralytics YOLO models, including the latest YOLOv8, are optimized for real-time object detection with state-of-the-art [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed. They support a wide range of annotations, such as the fine-grained ones provided by the LVIS dataset, making them ideal for advanced computer vision applications. Moreover, Ultralytics offers seamless integration with various [training](../../modes/train.md), [validation](../../modes/val.md), and [prediction](../../modes/predict.md) modes, ensuring efficient model development and deployment. +Ultralytics YOLO models, including the latest YOLO11, are optimized for real-time object detection with state-of-the-art [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed. They support a wide range of annotations, such as the fine-grained ones provided by the LVIS dataset, making them ideal for advanced computer vision applications. Moreover, Ultralytics offers seamless integration with various [training](../../modes/train.md), [validation](../../modes/val.md), and [prediction](../../modes/predict.md) modes, ensuring efficient model development and deployment. ### Can I see some sample annotations from the LVIS dataset? diff --git a/docs/en/datasets/detect/objects365.md b/docs/en/datasets/detect/objects365.md index 49947617af..96e6f3140c 100644 --- a/docs/en/datasets/detect/objects365.md +++ b/docs/en/datasets/detect/objects365.md @@ -1,7 +1,7 @@ --- comments: true description: Explore the Objects365 Dataset with 2M images and 30M bounding boxes across 365 categories. Enhance your object detection models with diverse, high-quality data. -keywords: Objects365 dataset, object detection, machine learning, deep learning, computer vision, annotated images, bounding boxes, YOLOv8, high-resolution images, dataset configuration +keywords: Objects365 dataset, object detection, machine learning, deep learning, computer vision, annotated images, bounding boxes, YOLO11, high-resolution images, dataset configuration --- # Objects365 Dataset @@ -38,7 +38,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n model on the Objects365 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n model on the Objects365 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -48,7 +48,7 @@ To train a YOLOv8n model on the Objects365 dataset for 100 [epochs](https://www. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="Objects365.yaml", epochs=100, imgsz=640) @@ -58,7 +58,7 @@ To train a YOLOv8n model on the Objects365 dataset for 100 [epochs](https://www. ```bash # Start training from a pretrained *.pt model - yolo detect train data=Objects365.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=Objects365.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Data and Annotations @@ -97,9 +97,9 @@ We would like to acknowledge the team of researchers who created and maintain th The [Objects365 dataset](https://www.objects365.org/) is designed for object detection tasks in [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and computer vision. It provides a large-scale, high-quality dataset with 2 million annotated images and 30 million bounding boxes across 365 categories. Leveraging such a diverse dataset helps improve the performance and generalization of object detection models, making it invaluable for research and development in the field. -### How can I train a YOLOv8 model on the Objects365 dataset? +### How can I train a YOLO11 model on the Objects365 dataset? -To train a YOLOv8n model using the Objects365 dataset for 100 epochs with an image size of 640, follow these instructions: +To train a YOLO11n model using the Objects365 dataset for 100 epochs with an image size of 640, follow these instructions: !!! example "Train Example" @@ -109,7 +109,7 @@ To train a YOLOv8n model using the Objects365 dataset for 100 epochs with an ima from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="Objects365.yaml", epochs=100, imgsz=640) @@ -119,7 +119,7 @@ To train a YOLOv8n model using the Objects365 dataset for 100 epochs with an ima ```bash # Start training from a pretrained *.pt model - yolo detect train data=Objects365.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=Objects365.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` Refer to the [Training](../../modes/train.md) page for a comprehensive list of available arguments. diff --git a/docs/en/datasets/detect/open-images-v7.md b/docs/en/datasets/detect/open-images-v7.md index 1e6f1f7e4f..7083a6354c 100644 --- a/docs/en/datasets/detect/open-images-v7.md +++ b/docs/en/datasets/detect/open-images-v7.md @@ -1,7 +1,7 @@ --- comments: true -description: Explore the comprehensive Open Images V7 dataset by Google. Learn about its annotations, applications, and use YOLOv8 pretrained models for computer vision tasks. -keywords: Open Images V7, Google dataset, computer vision, YOLOv8 models, object detection, image segmentation, visual relationships, AI research, Ultralytics +description: Explore the comprehensive Open Images V7 dataset by Google. Learn about its annotations, applications, and use YOLO11 pretrained models for computer vision tasks. +keywords: Open Images V7, Google dataset, computer vision, YOLO11 models, object detection, image segmentation, visual relationships, AI research, Ultralytics --- # Open Images V7 Dataset @@ -16,7 +16,7 @@ keywords: Open Images V7, Google dataset, computer vision, YOLOv8 models, object allowfullscreen>
- Watch: [Object Detection](https://www.ultralytics.com/glossary/object-detection) using OpenImagesV7 Pretrained Model + Watch: Object Detection using OpenImagesV7 Pretrained Model

## Open Images V7 Pretrained Models @@ -69,7 +69,7 @@ Typically, datasets come with a YAML (Yet Another Markup Language) file that del ## Usage -To train a YOLOv8n model on the Open Images V7 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n model on the Open Images V7 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! warning @@ -87,8 +87,8 @@ To train a YOLOv8n model on the Open Images V7 dataset for 100 [epochs](https:// ```python from ultralytics import YOLO - # Load a COCO-pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a COCO-pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Train the model on the Open Images V7 dataset results = model.train(data="open-images-v7.yaml", epochs=100, imgsz=640) @@ -97,8 +97,8 @@ To train a YOLOv8n model on the Open Images V7 dataset for 100 [epochs](https:// === "CLI" ```bash - # Train a COCO-pretrained YOLOv8n model on the Open Images V7 dataset - yolo detect train data=open-images-v7.yaml model=yolov8n.pt epochs=100 imgsz=640 + # Train a COCO-pretrained YOLO11n model on the Open Images V7 dataset + yolo detect train data=open-images-v7.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Data and Annotations @@ -136,9 +136,9 @@ A heartfelt acknowledgment goes out to the Google AI team for creating and maint Open Images V7 is an extensive and versatile dataset created by Google, designed to advance research in computer vision. It includes image-level labels, object bounding boxes, object segmentation masks, visual relationships, and localized narratives, making it ideal for various computer vision tasks such as object detection, segmentation, and relationship detection. -### How do I train a YOLOv8 model on the Open Images V7 dataset? +### How do I train a YOLO11 model on the Open Images V7 dataset? -To train a YOLOv8 model on the Open Images V7 dataset, you can use both Python and CLI commands. Here's an example of training the YOLOv8n model for 100 epochs with an image size of 640: +To train a YOLO11 model on the Open Images V7 dataset, you can use both Python and CLI commands. Here's an example of training the YOLO11n model for 100 epochs with an image size of 640: !!! example "Train Example" @@ -147,8 +147,8 @@ To train a YOLOv8 model on the Open Images V7 dataset, you can use both Python a ```python from ultralytics import YOLO - # Load a COCO-pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a COCO-pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Train the model on the Open Images V7 dataset results = model.train(data="open-images-v7.yaml", epochs=100, imgsz=640) @@ -158,8 +158,8 @@ To train a YOLOv8 model on the Open Images V7 dataset, you can use both Python a === "CLI" ```bash - # Train a COCO-pretrained YOLOv8n model on the Open Images V7 dataset - yolo detect train data=open-images-v7.yaml model=yolov8n.pt epochs=100 imgsz=640 + # Train a COCO-pretrained YOLO11n model on the Open Images V7 dataset + yolo detect train data=open-images-v7.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For more details on arguments and settings, refer to the [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/detect/roboflow-100.md b/docs/en/datasets/detect/roboflow-100.md index 6b3c540e03..ba228242a6 100644 --- a/docs/en/datasets/detect/roboflow-100.md +++ b/docs/en/datasets/detect/roboflow-100.md @@ -67,7 +67,7 @@ Dataset benchmarking evaluates machine learning model performance on specific da if path.exists(): # Fix YAML file and run training benchmark.fix_yaml(str(path)) - os.system(f"yolo detect train data={path} model=yolov8s.pt epochs=1 batch=16") + os.system(f"yolo detect train data={path} model=yolo11s.pt epochs=1 batch=16") # Run validation and evaluate os.system(f"yolo detect val data={path} model=runs/detect/train/weights/best.pt > {val_log_file} 2>&1") @@ -165,7 +165,7 @@ To use the Roboflow 100 dataset for benchmarking, you can implement the RF100Ben if path.exists(): # Fix YAML file and run training benchmark.fix_yaml(str(path)) - os.system(f"yolo detect train data={path} model=yolov8s.pt epochs=1 batch=16") + os.system(f"yolo detect train data={path} model=yolo11n.pt epochs=1 batch=16") # Run validation and evaluate os.system(f"yolo detect val data={path} model=runs/detect/train/weights/best.pt > {val_log_file} 2>&1") diff --git a/docs/en/datasets/detect/signature.md b/docs/en/datasets/detect/signature.md index 5746d57e02..834e711a45 100644 --- a/docs/en/datasets/detect/signature.md +++ b/docs/en/datasets/detect/signature.md @@ -1,7 +1,7 @@ --- comments: true description: Discover the Signature Detection Dataset for training models to identify and verify human signatures in various documents. Perfect for document verification and fraud prevention. -keywords: Signature Detection Dataset, document verification, fraud detection, computer vision, YOLOv8, Ultralytics, annotated signatures, training dataset +keywords: Signature Detection Dataset, document verification, fraud detection, computer vision, YOLO11, Ultralytics, annotated signatures, training dataset --- # Signature Detection Dataset @@ -31,7 +31,7 @@ A YAML (Yet Another Markup Language) file defines the dataset configuration, inc ## Usage -To train a YOLOv8n model on the signature detection dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, use the provided code samples. For a comprehensive list of available parameters, refer to the model's [Training](../../modes/train.md) page. +To train a YOLO11n model on the signature detection dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, use the provided code samples. For a comprehensive list of available parameters, refer to the model's [Training](../../modes/train.md) page. !!! example "Train Example" @@ -41,7 +41,7 @@ To train a YOLOv8n model on the signature detection dataset for 100 [epochs](htt from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="signature.yaml", epochs=100, imgsz=640) @@ -51,7 +51,7 @@ To train a YOLOv8n model on the signature detection dataset for 100 [epochs](htt ```bash # Start training from a pretrained *.pt model - yolo detect train data=signature.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=signature.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` !!! example "Inference Example" @@ -95,9 +95,9 @@ The dataset has been released available under the [AGPL-3.0 License](https://git The Signature Detection Dataset is a collection of annotated images aimed at detecting human signatures within various document types. It can be applied in computer vision tasks such as [object detection](https://www.ultralytics.com/glossary/object-detection) and tracking, primarily for document verification, fraud detection, and archival research. This dataset helps train models to recognize signatures in different contexts, making it valuable for both research and practical applications. -### How do I train a YOLOv8n model on the Signature Detection Dataset? +### How do I train a YOLO11n model on the Signature Detection Dataset? -To train a YOLOv8n model on the Signature Detection Dataset, follow these steps: +To train a YOLO11n model on the Signature Detection Dataset, follow these steps: 1. Download the `signature.yaml` dataset configuration file from [signature.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/signature.yaml). 2. Use the following Python script or CLI command to start training: @@ -110,7 +110,7 @@ To train a YOLOv8n model on the Signature Detection Dataset, follow these steps: from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model results = model.train(data="signature.yaml", epochs=100, imgsz=640) @@ -119,7 +119,7 @@ To train a YOLOv8n model on the Signature Detection Dataset, follow these steps: === "CLI" ```bash - yolo detect train data=signature.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=signature.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For more details, refer to the [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/detect/sku-110k.md b/docs/en/datasets/detect/sku-110k.md index c6cddc483f..1862dbdc7e 100644 --- a/docs/en/datasets/detect/sku-110k.md +++ b/docs/en/datasets/detect/sku-110k.md @@ -51,7 +51,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n model on the SKU-110K dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n model on the SKU-110K dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -61,7 +61,7 @@ To train a YOLOv8n model on the SKU-110K dataset for 100 [epochs](https://www.ul from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="SKU-110K.yaml", epochs=100, imgsz=640) @@ -71,7 +71,7 @@ To train a YOLOv8n model on the SKU-110K dataset for 100 [epochs](https://www.ul ```bash # Start training from a pretrained *.pt model - yolo detect train data=SKU-110K.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=SKU-110K.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Data and Annotations @@ -109,9 +109,9 @@ We would like to acknowledge Eran Goldman et al. for creating and maintaining th The SKU-110k dataset consists of densely packed retail shelf images designed to aid research in object detection tasks. Developed by Eran Goldman et al., it includes over 110,000 unique SKU categories. Its importance lies in its ability to challenge state-of-the-art object detectors with diverse object appearances and close proximity, making it an invaluable resource for researchers and practitioners in computer vision. Learn more about the dataset's structure and applications in our [SKU-110k Dataset](#sku-110k-dataset) section. -### How do I train a YOLOv8 model using the SKU-110k dataset? +### How do I train a YOLO11 model using the SKU-110k dataset? -Training a YOLOv8 model on the SKU-110k dataset is straightforward. Here's an example to train a YOLOv8n model for 100 epochs with an image size of 640: +Training a YOLO11 model on the SKU-110k dataset is straightforward. Here's an example to train a YOLO11n model for 100 epochs with an image size of 640: !!! example "Train Example" @@ -121,7 +121,7 @@ Training a YOLOv8 model on the SKU-110k dataset is straightforward. Here's an ex from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="SKU-110K.yaml", epochs=100, imgsz=640) @@ -132,7 +132,7 @@ Training a YOLOv8 model on the SKU-110k dataset is straightforward. Here's an ex ```bash # Start training from a pretrained *.pt model - yolo detect train data=SKU-110K.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=SKU-110K.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/detect/visdrone.md b/docs/en/datasets/detect/visdrone.md index 99b182cb4e..cbc89d835c 100644 --- a/docs/en/datasets/detect/visdrone.md +++ b/docs/en/datasets/detect/visdrone.md @@ -47,7 +47,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n model on the VisDrone dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n model on the VisDrone dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -57,7 +57,7 @@ To train a YOLOv8n model on the VisDrone dataset for 100 [epochs](https://www.ul from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="VisDrone.yaml", epochs=100, imgsz=640) @@ -67,7 +67,7 @@ To train a YOLOv8n model on the VisDrone dataset for 100 [epochs](https://www.ul ```bash # Start training from a pretrained *.pt model - yolo detect train data=VisDrone.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=VisDrone.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Data and Annotations @@ -113,9 +113,9 @@ The [VisDrone Dataset](https://github.com/VisDrone/VisDrone-Dataset) is a large- - **Diversity**: Collected across 14 cities, in urban and rural settings, under different weather and lighting conditions. - **Tasks**: Split into five main tasks—object detection in images and videos, single-object and multi-object tracking, and crowd counting. -### How can I use the VisDrone Dataset to train a YOLOv8 model with Ultralytics? +### How can I use the VisDrone Dataset to train a YOLO11 model with Ultralytics? -To train a YOLOv8 model on the VisDrone dataset for 100 epochs with an image size of 640, you can follow these steps: +To train a YOLO11 model on the VisDrone dataset for 100 epochs with an image size of 640, you can follow these steps: !!! example "Train Example" @@ -125,7 +125,7 @@ To train a YOLOv8 model on the VisDrone dataset for 100 epochs with an image siz from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model results = model.train(data="VisDrone.yaml", epochs=100, imgsz=640) @@ -135,7 +135,7 @@ To train a YOLOv8 model on the VisDrone dataset for 100 epochs with an image siz ```bash # Start training from a pretrained *.pt model - yolo detect train data=VisDrone.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=VisDrone.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For additional configuration options, please refer to the model [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/detect/voc.md b/docs/en/datasets/detect/voc.md index 7dc67fb5a4..449810e698 100644 --- a/docs/en/datasets/detect/voc.md +++ b/docs/en/datasets/detect/voc.md @@ -39,7 +39,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n model on the VOC dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n model on the VOC dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -49,7 +49,7 @@ To train a YOLOv8n model on the VOC dataset for 100 [epochs](https://www.ultraly from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="VOC.yaml", epochs=100, imgsz=640) @@ -59,7 +59,7 @@ To train a YOLOv8n model on the VOC dataset for 100 [epochs](https://www.ultraly ```bash # Start training from a pretrained *.pt model - yolo detect train data=VOC.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=VOC.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Images and Annotations @@ -99,9 +99,9 @@ We would like to acknowledge the PASCAL VOC Consortium for creating and maintain The [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/) (Visual Object Classes) dataset is a renowned benchmark for [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification in computer vision. It includes comprehensive annotations like bounding boxes, class labels, and segmentation masks across 20 different object categories. Researchers use it widely to evaluate the performance of models like Faster R-CNN, YOLO, and Mask R-CNN due to its standardized evaluation metrics such as mean Average Precision (mAP). -### How do I train a YOLOv8 model using the VOC dataset? +### How do I train a YOLO11 model using the VOC dataset? -To train a YOLOv8 model with the VOC dataset, you need the dataset configuration in a YAML file. Here's an example to start training a YOLOv8n model for 100 epochs with an image size of 640: +To train a YOLO11 model with the VOC dataset, you need the dataset configuration in a YAML file. Here's an example to start training a YOLO11n model for 100 epochs with an image size of 640: !!! example "Train Example" @@ -111,7 +111,7 @@ To train a YOLOv8 model with the VOC dataset, you need the dataset configuration from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="VOC.yaml", epochs=100, imgsz=640) @@ -121,7 +121,7 @@ To train a YOLOv8 model with the VOC dataset, you need the dataset configuration ```bash # Start training from a pretrained *.pt model - yolo detect train data=VOC.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=VOC.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ### What are the primary challenges included in the VOC dataset? diff --git a/docs/en/datasets/detect/xview.md b/docs/en/datasets/detect/xview.md index df8e493357..41b6c20ad8 100644 --- a/docs/en/datasets/detect/xview.md +++ b/docs/en/datasets/detect/xview.md @@ -52,7 +52,7 @@ To train a model on the xView dataset for 100 [epochs](https://www.ultralytics.c from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="xView.yaml", epochs=100, imgsz=640) @@ -62,7 +62,7 @@ To train a model on the xView dataset for 100 [epochs](https://www.ultralytics.c ```bash # Start training from a pretrained *.pt model - yolo detect train data=xView.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=xView.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` ## Sample Data and Annotations @@ -114,7 +114,7 @@ To train a model on the xView dataset using Ultralytics YOLO, follow these steps from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="xView.yaml", epochs=100, imgsz=640) @@ -125,7 +125,7 @@ To train a model on the xView dataset using Ultralytics YOLO, follow these steps ```bash # Start training from a pretrained *.pt model - yolo detect train data=xView.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=xView.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For detailed arguments and settings, refer to the model [Training](../../modes/train.md) page. diff --git a/docs/en/datasets/explorer/api.md b/docs/en/datasets/explorer/api.md index 6c716c14c5..a3e525daa1 100644 --- a/docs/en/datasets/explorer/api.md +++ b/docs/en/datasets/explorer/api.md @@ -36,7 +36,7 @@ pip install ultralytics[explorer] from ultralytics import Explorer # Create an Explorer object -explorer = Explorer(data="coco128.yaml", model="yolov8n.pt") +explorer = Explorer(data="coco128.yaml", model="yolo11n.pt") # Create embeddings for your dataset explorer.create_embeddings_table() @@ -75,7 +75,7 @@ You get a pandas dataframe with the `limit` number of most similar data points t from ultralytics import Explorer # create an Explorer object - exp = Explorer(data="coco128.yaml", model="yolov8n.pt") + exp = Explorer(data="coco128.yaml", model="yolo11n.pt") exp.create_embeddings_table() similar = exp.get_similar(img="https://ultralytics.com/images/bus.jpg", limit=10) @@ -95,7 +95,7 @@ You get a pandas dataframe with the `limit` number of most similar data points t from ultralytics import Explorer # create an Explorer object - exp = Explorer(data="coco128.yaml", model="yolov8n.pt") + exp = Explorer(data="coco128.yaml", model="yolo11n.pt") exp.create_embeddings_table() similar = exp.get_similar(idx=1, limit=10) @@ -118,7 +118,7 @@ You can also plot the similar images using the `plot_similar` method. This metho from ultralytics import Explorer # create an Explorer object - exp = Explorer(data="coco128.yaml", model="yolov8n.pt") + exp = Explorer(data="coco128.yaml", model="yolo11n.pt") exp.create_embeddings_table() plt = exp.plot_similar(img="https://ultralytics.com/images/bus.jpg", limit=10) @@ -131,7 +131,7 @@ You can also plot the similar images using the `plot_similar` method. This metho from ultralytics import Explorer # create an Explorer object - exp = Explorer(data="coco128.yaml", model="yolov8n.pt") + exp = Explorer(data="coco128.yaml", model="yolo11n.pt") exp.create_embeddings_table() plt = exp.plot_similar(idx=1, limit=10) @@ -150,7 +150,7 @@ Note: This works using LLMs under the hood so the results are probabilistic and from ultralytics.data.explorer import plot_query_result # create an Explorer object - exp = Explorer(data="coco128.yaml", model="yolov8n.pt") + exp = Explorer(data="coco128.yaml", model="yolo11n.pt") exp.create_embeddings_table() df = exp.ask_ai("show me 100 images with exactly one person and 2 dogs. There can be other objects too") @@ -171,7 +171,7 @@ You can run SQL queries on your dataset using the `sql_query` method. This metho from ultralytics import Explorer # create an Explorer object - exp = Explorer(data="coco128.yaml", model="yolov8n.pt") + exp = Explorer(data="coco128.yaml", model="yolo11n.pt") exp.create_embeddings_table() df = exp.sql_query("WHERE labels LIKE '%person%' AND labels LIKE '%dog%'") @@ -188,7 +188,7 @@ You can also plot the results of a SQL query using the `plot_sql_query` method. from ultralytics import Explorer # create an Explorer object - exp = Explorer(data="coco128.yaml", model="yolov8n.pt") + exp = Explorer(data="coco128.yaml", model="yolo11n.pt") exp.create_embeddings_table() # plot the SQL Query @@ -235,7 +235,7 @@ Here are some examples of what you can do with the table: ```python from ultralytics import Explorer - exp = Explorer(model="yolov8n.pt") + exp = Explorer(model="yolo11n.pt") exp.create_embeddings_table() table = exp.table @@ -359,7 +359,7 @@ You can use the Ultralytics Explorer API to perform similarity searches by creat from ultralytics import Explorer # Create an Explorer object -explorer = Explorer(data="coco128.yaml", model="yolov8n.pt") +explorer = Explorer(data="coco128.yaml", model="yolo11n.pt") explorer.create_embeddings_table() # Search for similar images to a given image @@ -381,7 +381,7 @@ The Ask AI feature allows users to filter datasets using natural language querie from ultralytics import Explorer # Create an Explorer object -explorer = Explorer(data="coco128.yaml", model="yolov8n.pt") +explorer = Explorer(data="coco128.yaml", model="yolo11n.pt") explorer.create_embeddings_table() # Query with natural language diff --git a/docs/en/datasets/explorer/explorer.ipynb b/docs/en/datasets/explorer/explorer.ipynb index fbca01737c..c0fac941a8 100644 --- a/docs/en/datasets/explorer/explorer.ipynb +++ b/docs/en/datasets/explorer/explorer.ipynb @@ -88,7 +88,7 @@ }, "outputs": [], "source": [ - "exp = Explorer(\"VOC.yaml\", model=\"yolov8n.pt\")\n", + "exp = Explorer(\"VOC.yaml\", model=\"yolo11n.pt\")\n", "exp.create_embeddings_table()" ] }, diff --git a/docs/en/datasets/index.md b/docs/en/datasets/index.md index 19e395307b..a53d2040d1 100644 --- a/docs/en/datasets/index.md +++ b/docs/en/datasets/index.md @@ -69,6 +69,7 @@ Pose estimation is a technique used to determine the pose of the object relative - [COCO](pose/coco.md): A large-scale dataset with human pose annotations designed for pose estimation tasks. - [COCO8-pose](pose/coco8-pose.md): A smaller dataset for pose estimation tasks, containing a subset of 8 COCO images with human pose annotations. - [Tiger-pose](pose/tiger-pose.md): A compact dataset consisting of 263 images focused on tigers, annotated with 12 keypoints per tiger for pose estimation tasks. +- [Hand-Keypoints](pose/hand-keypoints.md): A concise dataset featuring over 26,000 images centered on human hands, annotated with 21 keypoints per hand, designed for pose estimation tasks. ## [Classification](classify/index.md) diff --git a/docs/en/datasets/obb/dota-v2.md b/docs/en/datasets/obb/dota-v2.md index 76024cac10..c2f64e1a1e 100644 --- a/docs/en/datasets/obb/dota-v2.md +++ b/docs/en/datasets/obb/dota-v2.md @@ -108,8 +108,8 @@ To train a model on the DOTA v1 dataset, you can utilize the following code snip ```python from ultralytics import YOLO - # Create a new YOLOv8n-OBB model from scratch - model = YOLO("yolov8n-obb.yaml") + # Create a new YOLO11n-OBB model from scratch + model = YOLO("yolo11n-obb.yaml") # Train the model on the DOTAv1 dataset results = model.train(data="DOTAv1.yaml", epochs=100, imgsz=1024) @@ -118,8 +118,8 @@ To train a model on the DOTA v1 dataset, you can utilize the following code snip === "CLI" ```bash - # Train a new YOLOv8n-OBB model on the DOTAv1 dataset - yolo obb train data=DOTAv1.yaml model=yolov8n-obb.pt epochs=100 imgsz=1024 + # Train a new YOLO11n-OBB model on the DOTAv1 dataset + yolo obb train data=DOTAv1.yaml model=yolo11n-obb.pt epochs=100 imgsz=1024 ``` ## Sample Data and Annotations @@ -176,8 +176,8 @@ To train a model on the DOTA dataset, you can use the following example with Ult ```python from ultralytics import YOLO - # Create a new YOLOv8n-OBB model from scratch - model = YOLO("yolov8n-obb.yaml") + # Create a new YOLO11n-OBB model from scratch + model = YOLO("yolo11n-obb.yaml") # Train the model on the DOTAv1 dataset results = model.train(data="DOTAv1.yaml", epochs=100, imgsz=1024) @@ -186,8 +186,8 @@ To train a model on the DOTA dataset, you can use the following example with Ult === "CLI" ```bash - # Train a new YOLOv8n-OBB model on the DOTAv1 dataset - yolo obb train data=DOTAv1.yaml model=yolov8n-obb.pt epochs=100 imgsz=1024 + # Train a new YOLO11n-OBB model on the DOTAv1 dataset + yolo obb train data=DOTAv1.yaml model=yolo11n-obb.pt epochs=100 imgsz=1024 ``` For more details on how to split and preprocess the DOTA images, refer to the [split DOTA images section](#split-dota-images). diff --git a/docs/en/datasets/obb/dota8.md b/docs/en/datasets/obb/dota8.md index f24ea5bce2..199c91bf06 100644 --- a/docs/en/datasets/obb/dota8.md +++ b/docs/en/datasets/obb/dota8.md @@ -1,7 +1,7 @@ --- comments: true -description: Explore the DOTA8 dataset - a small, versatile oriented object detection dataset ideal for testing and debugging object detection models using Ultralytics YOLOv8. -keywords: DOTA8 dataset, Ultralytics, YOLOv8, object detection, debugging, training models, oriented object detection, dataset YAML +description: Explore the DOTA8 dataset - a small, versatile oriented object detection dataset ideal for testing and debugging object detection models using Ultralytics YOLO11. +keywords: DOTA8 dataset, Ultralytics, YOLO11, object detection, debugging, training models, oriented object detection, dataset YAML --- # DOTA8 Dataset @@ -10,7 +10,7 @@ keywords: DOTA8 dataset, Ultralytics, YOLOv8, object detection, debugging, train [Ultralytics](https://www.ultralytics.com/) DOTA8 is a small, but versatile oriented [object detection](https://www.ultralytics.com/glossary/object-detection) dataset composed of the first 8 images of 8 images of the split DOTAv1 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets. -This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics). +This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics). ## Dataset YAML @@ -24,7 +24,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n-obb model on the DOTA8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n-obb model on the DOTA8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -34,7 +34,7 @@ To train a YOLOv8n-obb model on the DOTA8 dataset for 100 [epochs](https://www.u from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-obb.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="dota8.yaml", epochs=100, imgsz=640) @@ -44,7 +44,7 @@ To train a YOLOv8n-obb model on the DOTA8 dataset for 100 [epochs](https://www.u ```bash # Start training from a pretrained *.pt model - yolo obb train data=dota8.yaml model=yolov8n-obb.pt epochs=100 imgsz=640 + yolo obb train data=dota8.yaml model=yolo11n-obb.pt epochs=100 imgsz=640 ``` ## Sample Images and Annotations @@ -84,11 +84,11 @@ A special note of gratitude to the team behind the DOTA datasets for their comme ### What is the DOTA8 dataset and how can it be used? -The DOTA8 dataset is a small, versatile oriented object detection dataset made up of the first 8 images from the DOTAv1 split set, with 4 images designated for training and 4 for validation. It's ideal for testing and debugging object detection models like Ultralytics YOLOv8. Due to its manageable size and diversity, it helps in identifying pipeline errors and running sanity checks before deploying larger datasets. Learn more about object detection with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics). +The DOTA8 dataset is a small, versatile oriented object detection dataset made up of the first 8 images from the DOTAv1 split set, with 4 images designated for training and 4 for validation. It's ideal for testing and debugging object detection models like Ultralytics YOLO11. Due to its manageable size and diversity, it helps in identifying pipeline errors and running sanity checks before deploying larger datasets. Learn more about object detection with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics). -### How do I train a YOLOv8 model using the DOTA8 dataset? +### How do I train a YOLO11 model using the DOTA8 dataset? -To train a YOLOv8n-obb model on the DOTA8 dataset for 100 epochs with an image size of 640, you can use the following code snippets. For comprehensive argument options, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n-obb model on the DOTA8 dataset for 100 epochs with an image size of 640, you can use the following code snippets. For comprehensive argument options, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -98,7 +98,7 @@ To train a YOLOv8n-obb model on the DOTA8 dataset for 100 epochs with an image s from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-obb.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="dota8.yaml", epochs=100, imgsz=640) @@ -108,7 +108,7 @@ To train a YOLOv8n-obb model on the DOTA8 dataset for 100 epochs with an image s ```bash # Start training from a pretrained *.pt model - yolo obb train data=dota8.yaml model=yolov8n-obb.pt epochs=100 imgsz=640 + yolo obb train data=dota8.yaml model=yolo11n-obb.pt epochs=100 imgsz=640 ``` ### What are the key features of the DOTA dataset and where can I access the YAML file? @@ -119,6 +119,6 @@ The DOTA dataset is known for its large-scale benchmark and the challenges it pr Mosaicing combines multiple images into one during training, increasing the variety of objects and contexts within each batch. This improves a model's ability to generalize to different object sizes, aspect ratios, and scenes. This technique can be visually demonstrated through a training batch composed of mosaiced DOTA8 dataset images, helping in robust model development. Explore more about mosaicing and training techniques on our [Training](../../modes/train.md) page. -### Why should I use Ultralytics YOLOv8 for object detection tasks? +### Why should I use Ultralytics YOLO11 for object detection tasks? -Ultralytics YOLOv8 provides state-of-the-art real-time object detection capabilities, including features like oriented bounding boxes (OBB), [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), and a highly versatile training pipeline. It's suitable for various applications and offers pretrained models for efficient fine-tuning. Explore further about the advantages and usage in the [Ultralytics YOLOv8 documentation](https://github.com/ultralytics/ultralytics). +Ultralytics YOLO11 provides state-of-the-art real-time object detection capabilities, including features like oriented bounding boxes (OBB), [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), and a highly versatile training pipeline. It's suitable for various applications and offers pretrained models for efficient fine-tuning. Explore further about the advantages and usage in the [Ultralytics YOLO11 documentation](https://github.com/ultralytics/ultralytics). diff --git a/docs/en/datasets/obb/index.md b/docs/en/datasets/obb/index.md index edeffb83af..7ecaf3450e 100644 --- a/docs/en/datasets/obb/index.md +++ b/docs/en/datasets/obb/index.md @@ -39,8 +39,8 @@ To train a model using these OBB formats: ```python from ultralytics import YOLO - # Create a new YOLOv8n-OBB model from scratch - model = YOLO("yolov8n-obb.yaml") + # Create a new YOLO11n-OBB model from scratch + model = YOLO("yolo11n-obb.yaml") # Train the model on the DOTAv1 dataset results = model.train(data="DOTAv1.yaml", epochs=100, imgsz=1024) @@ -49,8 +49,8 @@ To train a model using these OBB formats: === "CLI" ```bash - # Train a new YOLOv8n-OBB model on the DOTAv1 dataset - yolo obb train data=DOTAv1.yaml model=yolov8n-obb.pt epochs=100 imgsz=1024 + # Train a new YOLO11n-OBB model on the DOTAv1 dataset + yolo obb train data=DOTAv1.yaml model=yolo11n-obb.pt epochs=100 imgsz=1024 ``` ## Supported Datasets @@ -92,7 +92,7 @@ It's imperative to validate the compatibility of the dataset with your model and Oriented Bounding Boxes (OBB) are a type of bounding box annotation where the box can be rotated to align more closely with the object being detected, rather than just being axis-aligned. This is particularly useful in aerial or satellite imagery where objects might not be aligned with the image axes. In Ultralytics YOLO models, OBBs are represented by their four corner points in the YOLO OBB format. This allows for more accurate object detection since the bounding boxes can rotate to fit the objects better. -### How do I convert my existing DOTA dataset labels to YOLO OBB format for use with Ultralytics YOLOv8? +### How do I convert my existing DOTA dataset labels to YOLO OBB format for use with Ultralytics YOLO11? You can convert DOTA dataset labels to YOLO OBB format using the `convert_dota_to_yolo_obb` function from Ultralytics. This conversion ensures compatibility with the Ultralytics YOLO models, enabling you to leverage the OBB capabilities for enhanced object detection. Here's a quick example: @@ -104,9 +104,9 @@ convert_dota_to_yolo_obb("path/to/DOTA") This script will reformat your DOTA annotations into a YOLO-compatible format. -### How do I train a YOLOv8 model with oriented bounding boxes (OBB) on my dataset? +### How do I train a YOLO11 model with oriented bounding boxes (OBB) on my dataset? -Training a YOLOv8 model with OBBs involves ensuring your dataset is in the YOLO OBB format and then using the Ultralytics API to train the model. Here's an example in both Python and CLI: +Training a YOLO11 model with OBBs involves ensuring your dataset is in the YOLO OBB format and then using the Ultralytics API to train the model. Here's an example in both Python and CLI: !!! example @@ -115,8 +115,8 @@ Training a YOLOv8 model with OBBs involves ensuring your dataset is in the YOLO ```python from ultralytics import YOLO - # Create a new YOLOv8n-OBB model from scratch - model = YOLO("yolov8n-obb.yaml") + # Create a new YOLO11n-OBB model from scratch + model = YOLO("yolo11n-obb.yaml") # Train the model on the custom dataset results = model.train(data="your_dataset.yaml", epochs=100, imgsz=640) @@ -125,8 +125,8 @@ Training a YOLOv8 model with OBBs involves ensuring your dataset is in the YOLO === "CLI" ```bash - # Train a new YOLOv8n-OBB model on the custom dataset - yolo obb train data=your_dataset.yaml model=yolov8n-obb.yaml epochs=100 imgsz=640 + # Train a new YOLO11n-OBB model on the custom dataset + yolo obb train data=your_dataset.yaml model=yolo11n-obb.yaml epochs=100 imgsz=640 ``` This ensures your model leverages the detailed OBB annotations for improved detection [accuracy](https://www.ultralytics.com/glossary/accuracy). @@ -142,6 +142,6 @@ Currently, Ultralytics supports the following datasets for OBB training: These datasets are tailored for scenarios where OBBs offer a significant advantage, such as aerial and satellite image analysis. -### Can I use my own dataset with oriented bounding boxes for YOLOv8 training, and if so, how? +### Can I use my own dataset with oriented bounding boxes for YOLO11 training, and if so, how? -Yes, you can use your own dataset with oriented bounding boxes for YOLOv8 training. Ensure your dataset annotations are converted to the YOLO OBB format, which involves defining bounding boxes by their four corner points. You can then create a YAML configuration file specifying the dataset paths, classes, and other necessary details. For more information on creating and configuring your datasets, refer to the [Supported Datasets](#supported-datasets) section. +Yes, you can use your own dataset with oriented bounding boxes for YOLO11 training. Ensure your dataset annotations are converted to the YOLO OBB format, which involves defining bounding boxes by their four corner points. You can then create a YAML configuration file specifying the dataset paths, classes, and other necessary details. For more information on creating and configuring your datasets, refer to the [Supported Datasets](#supported-datasets) section. diff --git a/docs/en/datasets/pose/coco.md b/docs/en/datasets/pose/coco.md index 20042b40e2..22adca3b9f 100644 --- a/docs/en/datasets/pose/coco.md +++ b/docs/en/datasets/pose/coco.md @@ -12,14 +12,7 @@ The [COCO-Pose](https://cocodataset.org/#keypoints-2017) dataset is a specialize ## COCO-Pose Pretrained Models -| Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| ---------------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-pose.pt) | 640 | 50.4 | 80.1 | 131.8 | 1.18 | 3.3 | 9.2 | -| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-pose.pt) | 640 | 60.0 | 86.2 | 233.2 | 1.42 | 11.6 | 30.2 | -| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-pose.pt) | 640 | 65.0 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | -| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-pose.pt) | 640 | 67.6 | 90.0 | 784.5 | 2.59 | 44.4 | 168.6 | -| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 | -| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 | +{% include "macros/yolo-pose-perf.md" %} ## Key Features @@ -51,7 +44,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n-pose model on the COCO-Pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n-pose model on the COCO-Pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -61,7 +54,7 @@ To train a YOLOv8n-pose model on the COCO-Pose dataset for 100 [epochs](https:// from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco-pose.yaml", epochs=100, imgsz=640) @@ -71,7 +64,7 @@ To train a YOLOv8n-pose model on the COCO-Pose dataset for 100 [epochs](https:// ```bash # Start training from a pretrained *.pt model - yolo pose train data=coco-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 + yolo pose train data=coco-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 ``` ## Sample Images and Annotations @@ -109,11 +102,11 @@ We would like to acknowledge the COCO Consortium for creating and maintaining th ### What is the COCO-Pose dataset and how is it used with Ultralytics YOLO for pose estimation? -The [COCO-Pose](https://cocodataset.org/#keypoints-2017) dataset is a specialized version of the COCO (Common Objects in Context) dataset designed for pose estimation tasks. It builds upon the COCO Keypoints 2017 images and annotations, allowing for the training of models like Ultralytics YOLO for detailed pose estimation. For instance, you can use the COCO-Pose dataset to train a YOLOv8n-pose model by loading a pretrained model and training it with a YAML configuration. For training examples, refer to the [Training](../../modes/train.md) documentation. +The [COCO-Pose](https://cocodataset.org/#keypoints-2017) dataset is a specialized version of the COCO (Common Objects in Context) dataset designed for pose estimation tasks. It builds upon the COCO Keypoints 2017 images and annotations, allowing for the training of models like Ultralytics YOLO for detailed pose estimation. For instance, you can use the COCO-Pose dataset to train a YOLO11n-pose model by loading a pretrained model and training it with a YAML configuration. For training examples, refer to the [Training](../../modes/train.md) documentation. -### How can I train a YOLOv8 model on the COCO-Pose dataset? +### How can I train a YOLO11 model on the COCO-Pose dataset? -Training a YOLOv8 model on the COCO-Pose dataset can be accomplished using either Python or CLI commands. For example, to train a YOLOv8n-pose model for 100 epochs with an image size of 640, you can follow the steps below: +Training a YOLO11 model on the COCO-Pose dataset can be accomplished using either Python or CLI commands. For example, to train a YOLO11n-pose model for 100 epochs with an image size of 640, you can follow the steps below: !!! example "Train Example" @@ -123,7 +116,7 @@ Training a YOLOv8 model on the COCO-Pose dataset can be accomplished using eithe from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco-pose.yaml", epochs=100, imgsz=640) @@ -133,14 +126,14 @@ Training a YOLOv8 model on the COCO-Pose dataset can be accomplished using eithe ```bash # Start training from a pretrained *.pt model - yolo pose train data=coco-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 + yolo pose train data=coco-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 ``` For more details on the training process and available arguments, check the [training page](../../modes/train.md). ### What are the different metrics provided by the COCO-Pose dataset for evaluating model performance? -The COCO-Pose dataset provides several standardized evaluation metrics for pose estimation tasks, similar to the original COCO dataset. Key metrics include the Object Keypoint Similarity (OKS), which evaluates the [accuracy](https://www.ultralytics.com/glossary/accuracy) of predicted keypoints against ground truth annotations. These metrics allow for thorough performance comparisons between different models. For instance, the COCO-Pose pretrained models such as YOLOv8n-pose, YOLOv8s-pose, and others have specific performance metrics listed in the documentation, like mAPpose50-95 and mAPpose50. +The COCO-Pose dataset provides several standardized evaluation metrics for pose estimation tasks, similar to the original COCO dataset. Key metrics include the Object Keypoint Similarity (OKS), which evaluates the [accuracy](https://www.ultralytics.com/glossary/accuracy) of predicted keypoints against ground truth annotations. These metrics allow for thorough performance comparisons between different models. For instance, the COCO-Pose pretrained models such as YOLO11n-pose, YOLO11s-pose, and others have specific performance metrics listed in the documentation, like mAPpose50-95 and mAPpose50. ### How is the dataset structured and split for the COCO-Pose dataset? @@ -154,6 +147,6 @@ These subsets help organize the training, validation, and testing phases effecti ### What are the key features and applications of the COCO-Pose dataset? -The COCO-Pose dataset extends the COCO Keypoints 2017 annotations to include 17 keypoints for human figures, enabling detailed pose estimation. Standardized evaluation metrics (e.g., OKS) facilitate comparisons across different models. Applications of the COCO-Pose dataset span various domains, such as sports analytics, healthcare, and human-computer interaction, wherever detailed pose estimation of human figures is required. For practical use, leveraging pretrained models like those provided in the documentation (e.g., YOLOv8n-pose) can significantly streamline the process ([Key Features](#key-features)). +The COCO-Pose dataset extends the COCO Keypoints 2017 annotations to include 17 keypoints for human figures, enabling detailed pose estimation. Standardized evaluation metrics (e.g., OKS) facilitate comparisons across different models. Applications of the COCO-Pose dataset span various domains, such as sports analytics, healthcare, and human-computer interaction, wherever detailed pose estimation of human figures is required. For practical use, leveraging pretrained models like those provided in the documentation (e.g., YOLO11n-pose) can significantly streamline the process ([Key Features](#key-features)). If you use the COCO-Pose dataset in your research or development work, please cite the paper with the following [BibTeX entry](#citations-and-acknowledgments). diff --git a/docs/en/datasets/pose/coco8-pose.md b/docs/en/datasets/pose/coco8-pose.md index 95157b794e..ab24f86207 100644 --- a/docs/en/datasets/pose/coco8-pose.md +++ b/docs/en/datasets/pose/coco8-pose.md @@ -1,7 +1,7 @@ --- comments: true -description: Explore the compact, versatile COCO8-Pose dataset for testing and debugging object detection models. Ideal for quick experiments with YOLOv8. -keywords: COCO8-Pose, Ultralytics, pose detection dataset, object detection, YOLOv8, machine learning, computer vision, training data +description: Explore the compact, versatile COCO8-Pose dataset for testing and debugging object detection models. Ideal for quick experiments with YOLO11. +keywords: COCO8-Pose, Ultralytics, pose detection dataset, object detection, YOLO11, machine learning, computer vision, training data --- # COCO8-Pose Dataset @@ -10,7 +10,7 @@ keywords: COCO8-Pose, Ultralytics, pose detection dataset, object detection, YOL [Ultralytics](https://www.ultralytics.com/) COCO8-Pose is a small, but versatile pose detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging [object detection](https://www.ultralytics.com/glossary/object-detection) models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets. -This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics). +This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics). ## Dataset YAML @@ -24,7 +24,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n-pose model on the COCO8-Pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n-pose model on the COCO8-Pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -34,7 +34,7 @@ To train a YOLOv8n-pose model on the COCO8-Pose dataset for 100 [epochs](https:/ from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco8-pose.yaml", epochs=100, imgsz=640) @@ -44,7 +44,7 @@ To train a YOLOv8n-pose model on the COCO8-Pose dataset for 100 [epochs](https:/ ```bash # Start training from a pretrained *.pt model - yolo pose train data=coco8-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 + yolo pose train data=coco8-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 ``` ## Sample Images and Annotations @@ -80,13 +80,13 @@ We would like to acknowledge the COCO Consortium for creating and maintaining th ## FAQ -### What is the COCO8-Pose dataset, and how is it used with Ultralytics YOLOv8? +### What is the COCO8-Pose dataset, and how is it used with Ultralytics YOLO11? -The COCO8-Pose dataset is a small, versatile pose detection dataset that includes the first 8 images from the COCO train 2017 set, with 4 images for training and 4 for validation. It's designed for testing and debugging object detection models and experimenting with new detection approaches. This dataset is ideal for quick experiments with [Ultralytics YOLOv8](https://docs.ultralytics.com/models/yolov8/). For more details on dataset configuration, check out the dataset YAML file [here](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-pose.yaml). +The COCO8-Pose dataset is a small, versatile pose detection dataset that includes the first 8 images from the COCO train 2017 set, with 4 images for training and 4 for validation. It's designed for testing and debugging object detection models and experimenting with new detection approaches. This dataset is ideal for quick experiments with [Ultralytics YOLO11](https://docs.ultralytics.com/models/yolo11/). For more details on dataset configuration, check out the dataset YAML file [here](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-pose.yaml). -### How do I train a YOLOv8 model using the COCO8-Pose dataset in Ultralytics? +### How do I train a YOLO11 model using the COCO8-Pose dataset in Ultralytics? -To train a YOLOv8n-pose model on the COCO8-Pose dataset for 100 epochs with an image size of 640, follow these examples: +To train a YOLO11n-pose model on the COCO8-Pose dataset for 100 epochs with an image size of 640, follow these examples: !!! example "Train Example" @@ -96,7 +96,7 @@ To train a YOLOv8n-pose model on the COCO8-Pose dataset for 100 epochs with an i from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.pt") + model = YOLO("yolo11n-pose.pt") # Train the model results = model.train(data="coco8-pose.yaml", epochs=100, imgsz=640) @@ -105,7 +105,7 @@ To train a YOLOv8n-pose model on the COCO8-Pose dataset for 100 epochs with an i === "CLI" ```bash - yolo pose train data=coco8-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 + yolo pose train data=coco8-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 ``` For a comprehensive list of training arguments, refer to the model [Training](../../modes/train.md) page. @@ -120,12 +120,12 @@ The COCO8-Pose dataset offers several benefits: For more about its features and usage, see the [Dataset Introduction](#introduction) section. -### How does mosaicing benefit the YOLOv8 training process using the COCO8-Pose dataset? +### How does mosaicing benefit the YOLO11 training process using the COCO8-Pose dataset? Mosaicing, demonstrated in the sample images of the COCO8-Pose dataset, combines multiple images into one, increasing the variety of objects and scenes within each training batch. This technique helps improve the model's ability to generalize across various object sizes, aspect ratios, and contexts, ultimately enhancing model performance. See the [Sample Images and Annotations](#sample-images-and-annotations) section for example images. ### Where can I find the COCO8-Pose dataset YAML file and how do I use it? -The COCO8-Pose dataset YAML file can be found [here](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-pose.yaml). This file defines the dataset configuration, including paths, classes, and other relevant information. Use this file with the YOLOv8 training scripts as mentioned in the [Train Example](#how-do-i-train-a-yolov8-model-using-the-coco8-pose-dataset-in-ultralytics) section. +The COCO8-Pose dataset YAML file can be found [here](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-pose.yaml). This file defines the dataset configuration, including paths, classes, and other relevant information. Use this file with the YOLO11 training scripts as mentioned in the [Train Example](#how-do-i-train-a-yolo11-model-using-the-coco8-pose-dataset-in-ultralytics) section. For more FAQs and detailed documentation, visit the [Ultralytics Documentation](https://docs.ultralytics.com/). diff --git a/docs/en/datasets/pose/hand-keypoints.md b/docs/en/datasets/pose/hand-keypoints.md new file mode 100644 index 0000000000..86548a0233 --- /dev/null +++ b/docs/en/datasets/pose/hand-keypoints.md @@ -0,0 +1,175 @@ +--- +comments: true +description: Explore the hand keypoints estimation dataset for advanced pose estimation. Learn about datasets, pretrained models, metrics, and applications for training with YOLO. +keywords: Hand KeyPoints, pose estimation, dataset, keypoints, MediaPipe, YOLO, deep learning, computer vision +--- + +# Hand Keypoints Dataset + +## Introduction + +The hand-keypoints dataset contains 26,768 images of hands annotated with keypoints, making it suitable for training models like Ultralytics YOLO for pose estimation tasks. The annotations were generated using the Google MediaPipe library, ensuring high accuracy and consistency, and the dataset is compatible [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) formats. + +## Hand Landmarks + +![Hand Landmarks](https://github.com/ultralytics/docs/releases/download/0/hand_landmarks.jpg) + +## KeyPoints + +The dataset includes keypoints for hand detection. The keypoints are annotated as follows: + +1. Wrist +2. Thumb (4 points) +3. Index finger (4 points) +4. Middle finger (4 points) +5. Ring finger (4 points) +6. Little finger (4 points) + +Each hand has a total of 21 keypoints. + +## Key Features + +- **Large Dataset**: 26,768 images with hand keypoint annotations. +- **YOLO11 Compatibility**: Ready for use with YOLO11 models. +- **21 Keypoints**: Detailed hand pose representation. + +## Dataset Structure + +The hand keypoint dataset is split into two subsets: + +1. **Train**: This subset contains 18,776 images from the hand keypoints dataset, annotated for training pose estimation models. +2. **Val**: This subset contains 7992 images that can be used for validation purposes during model training. + +## Applications + +Hand keypoints can be used for gesture recognition, AR/VR controls, robotic manipulation, and hand movement analysis in healthcare. They can be also applied in animation for motion capture and biometric authentication systems for security. + +## Dataset YAML + +A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the Hand Keypoints dataset, the `hand-keypoints.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/hand-keypoints.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/hand-keypoints.yaml). + +!!! example "ultralytics/cfg/datasets/hand-keypoints.yaml" + + ```yaml + --8<-- "ultralytics/cfg/datasets/hand-keypoints.yaml" + ``` + +## Usage + +To train a YOLO11n-pose model on the Hand Keypoints dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. + +!!! example "Train Example" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) + + # Train the model + results = model.train(data="hand-keypoints.yaml", epochs=100, imgsz=640) + ``` + + === "CLI" + + ```bash + # Start training from a pretrained *.pt model + yolo pose train data=hand-keypoints.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 + ``` + +## Sample Images and Annotations + +The Hand keypoints dataset contains a diverse set of images with human hands annotated with keypoints. Here are some examples of images from the dataset, along with their corresponding annotations: + +![Dataset sample image](https://github.com/ultralytics/docs/releases/download/0/human-hand-pose.jpg) + +- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts. + +The example showcases the variety and complexity of the images in the Hand Keypoints dataset and the benefits of using mosaicing during the training process. + +## Citations and Acknowledgments + +If you use the hand-keypoints dataset in your research or development work, please acknowledge the following sources: + +!!! quote "" + + === "Credits" + + We would like to thank the following sources for providing the images used in this dataset: + + - [11k Hands](https://sites.google.com/view/11khands) + - [2000 Hand Gestures](https://www.kaggle.com/datasets/ritikagiridhar/2000-hand-gestures) + - [Gesture Recognition](https://www.kaggle.com/datasets/imsparsh/gesture-recognition) + + The images were collected and used under the respective licenses provided by each platform and are distributed under the [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-nc-sa/4.0/). + +We would also like to acknowledge the creator of this dataset, [Rion Dsilva](https://www.linkedin.com/in/rion-dsilva-043464229/), for his great contribution to Vision AI research. + +## FAQ + +### How do I train a YOLO11 model on the Hand Keypoints dataset? + +To train a YOLO11 model on the Hand Keypoints dataset, you can use either Python or the command line interface (CLI). Here's an example for training a YOLO11n-pose model for 100 epochs with an image size of 640: + +!!! Example + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) + + # Train the model + results = model.train(data="hand-keypoints.yaml", epochs=100, imgsz=640) + ``` + + === "CLI" + + ```bash + # Start training from a pretrained *.pt model + yolo pose train data=hand-keypoints.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 + ``` + +For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. + +### What are the key features of the Hand Keypoints dataset? + +The Hand Keypoints dataset is designed for advanced pose estimation tasks and includes several key features: + +- **Large Dataset**: Contains 26,768 images with hand keypoint annotations. +- **YOLO11 Compatibility**: Ready for use with YOLO11 models. +- **21 Keypoints**: Detailed hand pose representation, including wrist and finger joints. + +For more details, you can explore the [Hand Keypoints Dataset](#introduction) section. + +### What applications can benefit from using the Hand Keypoints dataset? + +The Hand Keypoints dataset can be applied in various fields, including: + +- **Gesture Recognition**: Enhancing human-computer interaction. +- **AR/VR Controls**: Improving user experience in augmented and virtual reality. +- **Robotic Manipulation**: Enabling precise control of robotic hands. +- **Healthcare**: Analyzing hand movements for medical diagnostics. +- **Animation**: Capturing motion for realistic animations. +- **Biometric Authentication**: Enhancing security systems. + +For more information, refer to the [Applications](#applications) section. + +### How is the Hand Keypoints dataset structured? + +The Hand Keypoints dataset is divided into two subsets: + +1. **Train**: Contains 18,776 images for training pose estimation models. +2. **Val**: Contains 7,992 images for validation purposes during model training. + +This structure ensures a comprehensive training and validation process. For more details, see the [Dataset Structure](#dataset-structure) section. + +### How do I use the dataset YAML file for training? + +The dataset configuration is defined in a YAML file, which includes paths, classes, and other relevant information. The `hand-keypoints.yaml` file can be found at [hand-keypoints.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/hand-keypoints.yaml). + +To use this YAML file for training, specify it in your training script or CLI command as shown in the training example above. For more details, refer to the [Dataset YAML](#dataset-yaml) section. diff --git a/docs/en/datasets/pose/index.md b/docs/en/datasets/pose/index.md index 9a6b6d930f..296b74f831 100644 --- a/docs/en/datasets/pose/index.md +++ b/docs/en/datasets/pose/index.md @@ -72,7 +72,7 @@ The `train` and `val` fields specify the paths to the directories containing the from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco8-pose.yaml", epochs=100, imgsz=640) @@ -82,7 +82,7 @@ The `train` and `val` fields specify the paths to the directories containing the ```bash # Start training from a pretrained *.pt model - yolo pose train data=coco8-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 + yolo pose train data=coco8-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 ``` ## Supported Datasets @@ -118,6 +118,15 @@ This section outlines the datasets that are compatible with Ultralytics YOLO for - **Usage**: Great for animal pose or any other pose that is not human-based. - [Read more about Tiger-Pose](tiger-pose.md) +### Hand Keypoints + +- **Description**: Hand keypoints pose dataset comprises nearly 26K images, with 18776 images allocated for training and 7992 for validation. +- **Label Format**: Same as Ultralytics YOLO format as described above, but with 21 keypoints for human hand and visible dimension. +- **Number of Classes**: 1 (Hand). +- **Keypoints**: 21 keypoints. +- **Usage**: Great for human hand pose estimation. +- [Read more about Hand Keypoints](hand-keypoints.md) + ### Adding your own dataset If you have your own dataset and would like to use it for training pose estimation models with Ultralytics YOLO format, ensure that it follows the format specified above under "Ultralytics YOLO format". Convert your annotations to the required format and specify the paths, number of classes, and class names in the YAML configuration file. @@ -162,7 +171,7 @@ To use the COCO-Pose dataset with Ultralytics YOLO: ```python from ultralytics import YOLO - model = YOLO("yolov8n-pose.pt") # load pretrained model + model = YOLO("yolo11n-pose.pt") # load pretrained model results = model.train(data="coco-pose.yaml", epochs=100, imgsz=640) ``` @@ -179,7 +188,7 @@ To add your dataset: ```python from ultralytics import YOLO - model = YOLO("yolov8n-pose.pt") + model = YOLO("yolo11n-pose.pt") results = model.train(data="your-dataset.yaml", epochs=100, imgsz=640) ``` diff --git a/docs/en/datasets/pose/tiger-pose.md b/docs/en/datasets/pose/tiger-pose.md index 06333b345b..d97d999664 100644 --- a/docs/en/datasets/pose/tiger-pose.md +++ b/docs/en/datasets/pose/tiger-pose.md @@ -1,7 +1,7 @@ --- comments: true description: Explore Ultralytics Tiger-Pose dataset with 263 diverse images. Ideal for testing, training, and refining pose estimation algorithms. -keywords: Ultralytics, Tiger-Pose, dataset, pose estimation, YOLOv8, training data, machine learning, neural networks +keywords: Ultralytics, Tiger-Pose, dataset, pose estimation, YOLO11, training data, machine learning, neural networks --- # Tiger-Pose Dataset @@ -12,7 +12,7 @@ keywords: Ultralytics, Tiger-Pose, dataset, pose estimation, YOLOv8, training da Despite its manageable size of 210 images, tiger-pose dataset offers diversity, making it suitable for assessing training pipelines, identifying potential errors, and serving as a valuable preliminary step before working with larger datasets for pose estimation. -This dataset is intended for use with [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics). +This dataset is intended for use with [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics).


@@ -22,7 +22,7 @@ This dataset is intended for use with [Ultralytics HUB](https://hub.ultralytics. allowfullscreen>
- Watch: Train YOLOv8 Pose Model on Tiger-Pose Dataset Using Ultralytics HUB + Watch: Train YOLO11 Pose Model on Tiger-Pose Dataset Using Ultralytics HUB

## Dataset YAML @@ -37,7 +37,7 @@ A YAML (Yet Another Markup Language) file serves as the means to specify the con ## Usage -To train a YOLOv8n-pose model on the Tiger-Pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n-pose model on the Tiger-Pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -47,7 +47,7 @@ To train a YOLOv8n-pose model on the Tiger-Pose dataset for 100 [epochs](https:/ from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="tiger-pose.yaml", epochs=100, imgsz=640) @@ -57,7 +57,7 @@ To train a YOLOv8n-pose model on the Tiger-Pose dataset for 100 [epochs](https:/ ```bash # Start training from a pretrained *.pt model - yolo task=pose mode=train data=tiger-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 + yolo task=pose mode=train data=tiger-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 ``` ## Sample Images and Annotations @@ -101,11 +101,11 @@ The dataset has been released available under the [AGPL-3.0 License](https://git ### What is the Ultralytics Tiger-Pose dataset used for? -The Ultralytics Tiger-Pose dataset is designed for pose estimation tasks, consisting of 263 images sourced from a [YouTube video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0). The dataset is divided into 210 training images and 53 validation images. It is particularly useful for testing, training, and refining pose estimation algorithms using [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics). +The Ultralytics Tiger-Pose dataset is designed for pose estimation tasks, consisting of 263 images sourced from a [YouTube video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0). The dataset is divided into 210 training images and 53 validation images. It is particularly useful for testing, training, and refining pose estimation algorithms using [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics). -### How do I train a YOLOv8 model on the Tiger-Pose dataset? +### How do I train a YOLO11 model on the Tiger-Pose dataset? -To train a YOLOv8n-pose model on the Tiger-Pose dataset for 100 epochs with an image size of 640, use the following code snippets. For more details, visit the [Training](../../modes/train.md) page: +To train a YOLO11n-pose model on the Tiger-Pose dataset for 100 epochs with an image size of 640, use the following code snippets. For more details, visit the [Training](../../modes/train.md) page: !!! example "Train Example" @@ -115,7 +115,7 @@ To train a YOLOv8n-pose model on the Tiger-Pose dataset for 100 epochs with an i from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="tiger-pose.yaml", epochs=100, imgsz=640) @@ -126,16 +126,16 @@ To train a YOLOv8n-pose model on the Tiger-Pose dataset for 100 epochs with an i ```bash # Start training from a pretrained *.pt model - yolo task=pose mode=train data=tiger-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 + yolo task=pose mode=train data=tiger-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 ``` ### What configurations does the `tiger-pose.yaml` file include? The `tiger-pose.yaml` file is used to specify the configuration details of the Tiger-Pose dataset. It includes crucial data such as file paths and class definitions. To see the exact configuration, you can check out the [Ultralytics Tiger-Pose Dataset Configuration File](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/tiger-pose.yaml). -### How can I run inference using a YOLOv8 model trained on the Tiger-Pose dataset? +### How can I run inference using a YOLO11 model trained on the Tiger-Pose dataset? -To perform inference using a YOLOv8 model trained on the Tiger-Pose dataset, you can use the following code snippets. For a detailed guide, visit the [Prediction](../../modes/predict.md) page: +To perform inference using a YOLO11 model trained on the Tiger-Pose dataset, you can use the following code snippets. For a detailed guide, visit the [Prediction](../../modes/predict.md) page: !!! example "Inference Example" @@ -161,4 +161,4 @@ To perform inference using a YOLOv8 model trained on the Tiger-Pose dataset, you ### What are the benefits of using the Tiger-Pose dataset for pose estimation? -The Tiger-Pose dataset, despite its manageable size of 210 images for training, provides a diverse collection of images that are ideal for testing pose estimation pipelines. The dataset helps identify potential errors and acts as a preliminary step before working with larger datasets. Additionally, the dataset supports the training and refinement of pose estimation algorithms using advanced tools like [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics), enhancing model performance and [accuracy](https://www.ultralytics.com/glossary/accuracy). +The Tiger-Pose dataset, despite its manageable size of 210 images for training, provides a diverse collection of images that are ideal for testing pose estimation pipelines. The dataset helps identify potential errors and acts as a preliminary step before working with larger datasets. Additionally, the dataset supports the training and refinement of pose estimation algorithms using advanced tools like [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics), enhancing model performance and [accuracy](https://www.ultralytics.com/glossary/accuracy). diff --git a/docs/en/datasets/segment/carparts-seg.md b/docs/en/datasets/segment/carparts-seg.md index b798cacad1..96615752bf 100644 --- a/docs/en/datasets/segment/carparts-seg.md +++ b/docs/en/datasets/segment/carparts-seg.md @@ -18,7 +18,7 @@ Whether you're working on automotive research, developing AI solutions for vehic allowfullscreen>
- Watch: Carparts [Instance Segmentation](https://www.ultralytics.com/glossary/instance-segmentation) Using Ultralytics HUB + Watch: Carparts Instance Segmentation Using Ultralytics HUB

## Dataset Structure @@ -45,7 +45,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train Ultralytics YOLOv8n model on the Carparts Segmentation dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train Ultralytics YOLO11n model on the Carparts Segmentation dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -55,7 +55,7 @@ To train Ultralytics YOLOv8n model on the Carparts Segmentation dataset for 100 from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="carparts-seg.yaml", epochs=100, imgsz=640) @@ -65,7 +65,7 @@ To train Ultralytics YOLOv8n model on the Carparts Segmentation dataset for 100 ```bash # Start training from a pretrained *.pt model - yolo segment train data=carparts-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=carparts-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` ## Sample Data and Annotations @@ -108,9 +108,9 @@ We extend our thanks to the Roboflow team for their dedication in developing and The [Roboflow Carparts Segmentation Dataset](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm?ref=ultralytics) is a curated collection of images and videos specifically designed for car part segmentation tasks in computer vision. This dataset includes a diverse range of visuals captured from multiple perspectives, making it an invaluable resource for training and testing segmentation models for automotive applications. -### How can I use the Carparts Segmentation Dataset with Ultralytics YOLOv8? +### How can I use the Carparts Segmentation Dataset with Ultralytics YOLO11? -To train a YOLOv8 model on the Carparts Segmentation dataset, you can follow these steps: +To train a YOLO11 model on the Carparts Segmentation dataset, you can follow these steps: !!! example "Train Example" @@ -120,7 +120,7 @@ To train a YOLOv8 model on the Carparts Segmentation dataset, you can follow the from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="carparts-seg.yaml", epochs=100, imgsz=640) @@ -130,7 +130,7 @@ To train a YOLOv8 model on the Carparts Segmentation dataset, you can follow the ```bash # Start training from a pretrained *.pt model - yolo segment train data=carparts-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=carparts-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` For more details, refer to the [Training](../../modes/train.md) documentation. diff --git a/docs/en/datasets/segment/coco.md b/docs/en/datasets/segment/coco.md index 0f403c69af..5ff52f46a2 100644 --- a/docs/en/datasets/segment/coco.md +++ b/docs/en/datasets/segment/coco.md @@ -1,7 +1,7 @@ --- comments: true description: Explore the COCO-Seg dataset, an extension of COCO, with detailed segmentation annotations. Learn how to train YOLO models with COCO-Seg. -keywords: COCO-Seg, dataset, YOLO models, instance segmentation, object detection, COCO dataset, YOLOv8, computer vision, Ultralytics, machine learning +keywords: COCO-Seg, dataset, YOLO models, instance segmentation, object detection, COCO dataset, YOLO11, computer vision, Ultralytics, machine learning --- # COCO-Seg Dataset @@ -10,13 +10,7 @@ The [COCO-Seg](https://cocodataset.org/#home) dataset, an extension of the COCO ## COCO-Seg Pretrained Models -| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | -| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | -| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | -| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | -| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | +{% include "macros/yolo-seg-perf.md" %} ## Key Features @@ -49,7 +43,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n-seg model on the COCO-Seg dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n-seg model on the COCO-Seg dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -59,7 +53,7 @@ To train a YOLOv8n-seg model on the COCO-Seg dataset for 100 [epochs](https://ww from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco-seg.yaml", epochs=100, imgsz=640) @@ -69,7 +63,7 @@ To train a YOLOv8n-seg model on the COCO-Seg dataset for 100 [epochs](https://ww ```bash # Start training from a pretrained *.pt model - yolo segment train data=coco-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=coco-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` ## Sample Images and Annotations @@ -109,9 +103,9 @@ We extend our thanks to the COCO Consortium for creating and maintaining this in The [COCO-Seg](https://cocodataset.org/#home) dataset is an extension of the original COCO (Common Objects in Context) dataset, specifically designed for instance segmentation tasks. While it uses the same images as the COCO dataset, COCO-Seg includes more detailed segmentation annotations, making it a powerful resource for researchers and developers focusing on object instance segmentation. -### How can I train a YOLOv8 model using the COCO-Seg dataset? +### How can I train a YOLO11 model using the COCO-Seg dataset? -To train a YOLOv8n-seg model on the COCO-Seg dataset for 100 epochs with an image size of 640, you can use the following code snippets. For a detailed list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n-seg model on the COCO-Seg dataset for 100 epochs with an image size of 640, you can use the following code snippets. For a detailed list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -121,7 +115,7 @@ To train a YOLOv8n-seg model on the COCO-Seg dataset for 100 epochs with an imag from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco-seg.yaml", epochs=100, imgsz=640) @@ -131,7 +125,7 @@ To train a YOLOv8n-seg model on the COCO-Seg dataset for 100 epochs with an imag ```bash # Start training from a pretrained *.pt model - yolo segment train data=coco-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=coco-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` ### What are the key features of the COCO-Seg dataset? @@ -145,15 +139,9 @@ The COCO-Seg dataset includes several key features: ### What pretrained models are available for COCO-Seg, and what are their performance metrics? -The COCO-Seg dataset supports multiple pretrained YOLOv8 segmentation models with varying performance metrics. Here's a summary of the available models and their key metrics: +The COCO-Seg dataset supports multiple pretrained YOLO11 segmentation models with varying performance metrics. Here's a summary of the available models and their key metrics: -| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | -| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | -| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | -| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | -| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | +{% include "macros/yolo-seg-perf.md" %} ### How is the COCO-Seg dataset structured and what subsets does it contain? diff --git a/docs/en/datasets/segment/coco8-seg.md b/docs/en/datasets/segment/coco8-seg.md index 21abf3d802..3fc6974513 100644 --- a/docs/en/datasets/segment/coco8-seg.md +++ b/docs/en/datasets/segment/coco8-seg.md @@ -1,7 +1,7 @@ --- comments: true description: Discover the versatile and manageable COCO8-Seg dataset by Ultralytics, ideal for testing and debugging segmentation models or new detection approaches. -keywords: COCO8-Seg, Ultralytics, segmentation dataset, YOLOv8, COCO 2017, model training, computer vision, dataset configuration +keywords: COCO8-Seg, Ultralytics, segmentation dataset, YOLO11, COCO 2017, model training, computer vision, dataset configuration --- # COCO8-Seg Dataset @@ -10,7 +10,7 @@ keywords: COCO8-Seg, Ultralytics, segmentation dataset, YOLOv8, COCO 2017, model [Ultralytics](https://www.ultralytics.com/) COCO8-Seg is a small, but versatile [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging segmentation models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets. -This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics). +This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics). ## Dataset YAML @@ -24,7 +24,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train a YOLOv8n-seg model on the COCO8-Seg dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train a YOLO11n-seg model on the COCO8-Seg dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -34,7 +34,7 @@ To train a YOLOv8n-seg model on the COCO8-Seg dataset for 100 [epochs](https://w from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco8-seg.yaml", epochs=100, imgsz=640) @@ -44,7 +44,7 @@ To train a YOLOv8n-seg model on the COCO8-Seg dataset for 100 [epochs](https://w ```bash # Start training from a pretrained *.pt model - yolo segment train data=coco8-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=coco8-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` ## Sample Images and Annotations @@ -80,13 +80,13 @@ We would like to acknowledge the COCO Consortium for creating and maintaining th ## FAQ -### What is the COCO8-Seg dataset, and how is it used in Ultralytics YOLOv8? +### What is the COCO8-Seg dataset, and how is it used in Ultralytics YOLO11? -The **COCO8-Seg dataset** is a compact instance segmentation dataset by Ultralytics, consisting of the first 8 images from the COCO train 2017 set—4 images for training and 4 for validation. This dataset is tailored for testing and debugging segmentation models or experimenting with new detection methods. It is particularly useful with Ultralytics [YOLOv8](https://github.com/ultralytics/ultralytics) and [HUB](https://hub.ultralytics.com/) for rapid iteration and pipeline error-checking before scaling to larger datasets. For detailed usage, refer to the model [Training](../../modes/train.md) page. +The **COCO8-Seg dataset** is a compact instance segmentation dataset by Ultralytics, consisting of the first 8 images from the COCO train 2017 set—4 images for training and 4 for validation. This dataset is tailored for testing and debugging segmentation models or experimenting with new detection methods. It is particularly useful with Ultralytics [YOLO11](https://github.com/ultralytics/ultralytics) and [HUB](https://hub.ultralytics.com/) for rapid iteration and pipeline error-checking before scaling to larger datasets. For detailed usage, refer to the model [Training](../../modes/train.md) page. -### How can I train a YOLOv8n-seg model using the COCO8-Seg dataset? +### How can I train a YOLO11n-seg model using the COCO8-Seg dataset? -To train a **YOLOv8n-seg** model on the COCO8-Seg dataset for 100 epochs with an image size of 640, you can use Python or CLI commands. Here's a quick example: +To train a **YOLO11n-seg** model on the COCO8-Seg dataset for 100 epochs with an image size of 640, you can use Python or CLI commands. Here's a quick example: !!! example "Train Example" @@ -96,7 +96,7 @@ To train a **YOLOv8n-seg** model on the COCO8-Seg dataset for 100 epochs with an from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # Load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.pt") # Load a pretrained model (recommended for training) # Train the model results = model.train(data="coco8-seg.yaml", epochs=100, imgsz=640) @@ -106,7 +106,7 @@ To train a **YOLOv8n-seg** model on the COCO8-Seg dataset for 100 epochs with an ```bash # Start training from a pretrained *.pt model - yolo segment train data=coco8-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=coco8-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` For a thorough explanation of available arguments and configuration options, you can check the [Training](../../modes/train.md) documentation. diff --git a/docs/en/datasets/segment/crack-seg.md b/docs/en/datasets/segment/crack-seg.md index f5ffbe92e0..1526fa5e90 100644 --- a/docs/en/datasets/segment/crack-seg.md +++ b/docs/en/datasets/segment/crack-seg.md @@ -34,7 +34,7 @@ A YAML (Yet Another Markup Language) file is employed to outline the configurati ## Usage -To train Ultralytics YOLOv8n model on the Crack Segmentation dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train Ultralytics YOLO11n model on the Crack Segmentation dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -44,7 +44,7 @@ To train Ultralytics YOLOv8n model on the Crack Segmentation dataset for 100 [ep from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="crack-seg.yaml", epochs=100, imgsz=640) @@ -54,7 +54,7 @@ To train Ultralytics YOLOv8n model on the Crack Segmentation dataset for 100 [ep ```bash # Start training from a pretrained *.pt model - yolo segment train data=crack-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=crack-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` ## Sample Data and Annotations @@ -98,9 +98,9 @@ We would like to acknowledge the Roboflow team for creating and maintaining the The [Roboflow Crack Segmentation Dataset](https://universe.roboflow.com/university-bswxt/crack-bphdr?ref=ultralytics) is a comprehensive collection of 4029 static images designed specifically for transportation and public safety studies. It is ideal for tasks such as self-driving car model development and infrastructure maintenance. The dataset includes training, testing, and validation sets, aiding in accurate crack detection and segmentation. -### How do I train a model using the Crack Segmentation Dataset with Ultralytics YOLOv8? +### How do I train a model using the Crack Segmentation Dataset with Ultralytics YOLO11? -To train an Ultralytics YOLOv8 model on the Crack Segmentation dataset, use the following code snippets. Detailed instructions and further parameters can be found on the model [Training](../../modes/train.md) page. +To train an Ultralytics YOLO11 model on the Crack Segmentation dataset, use the following code snippets. Detailed instructions and further parameters can be found on the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -110,7 +110,7 @@ To train an Ultralytics YOLOv8 model on the Crack Segmentation dataset, use the from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="crack-seg.yaml", epochs=100, imgsz=640) @@ -120,7 +120,7 @@ To train an Ultralytics YOLOv8 model on the Crack Segmentation dataset, use the ```bash # Start training from a pretrained *.pt model - yolo segment train data=crack-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=crack-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` ### Why should I use the Crack Segmentation Dataset for my self-driving car project? diff --git a/docs/en/datasets/segment/index.md b/docs/en/datasets/segment/index.md index 52b1978164..9a83ed1645 100644 --- a/docs/en/datasets/segment/index.md +++ b/docs/en/datasets/segment/index.md @@ -74,7 +74,7 @@ The `train` and `val` fields specify the paths to the directories containing the from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco8-seg.yaml", epochs=100, imgsz=640) @@ -84,7 +84,7 @@ The `train` and `val` fields specify the paths to the directories containing the ```bash # Start training from a pretrained *.pt model - yolo segment train data=coco8-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=coco8-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` ## Supported Datasets @@ -137,13 +137,13 @@ To auto-annotate your dataset using the Ultralytics framework, you can use the ` ```python from ultralytics.data.annotator import auto_annotate - auto_annotate(data="path/to/images", det_model="yolov8x.pt", sam_model="sam_b.pt") + auto_annotate(data="path/to/images", det_model="yolo11x.pt", sam_model="sam_b.pt") ``` | Argument | Type | Description | Default | | ------------ | ----------------------- | ----------------------------------------------------------------------------------------------------------- | -------------- | | `data` | `str` | Path to a folder containing images to be annotated. | `None` | -| `det_model` | `str, optional` | Pre-trained YOLO detection model. Defaults to `'yolov8x.pt'`. | `'yolov8x.pt'` | +| `det_model` | `str, optional` | Pre-trained YOLO detection model. Defaults to `'yolo11x.pt'`. | `'yolo11x.pt'` | | `sam_model` | `str, optional` | Pre-trained SAM segmentation model. Defaults to `'sam_b.pt'`. | `'sam_b.pt'` | | `device` | `str, optional` | Device to run the models on. Defaults to an empty string (CPU or GPU, if available). | `''` | | `output_dir` | `str or None, optional` | Directory to save the annotated results. Defaults to a `'labels'` folder in the same directory as `'data'`. | `None` | @@ -195,7 +195,7 @@ Auto-annotation in Ultralytics YOLO allows you to generate segmentation annotati ```python from ultralytics.data.annotator import auto_annotate -auto_annotate(data="path/to/images", det_model="yolov8x.pt", sam_model="sam_b.pt") +auto_annotate(data="path/to/images", det_model="yolo11x.pt", sam_model="sam_b.pt") ``` This function automates the annotation process, making it faster and more efficient. For more details, explore the [Auto-Annotation](#auto-annotation) section. diff --git a/docs/en/datasets/segment/package-seg.md b/docs/en/datasets/segment/package-seg.md index 477072fb57..8f377e0ae6 100644 --- a/docs/en/datasets/segment/package-seg.md +++ b/docs/en/datasets/segment/package-seg.md @@ -34,7 +34,7 @@ A YAML (Yet Another Markup Language) file is used to define the dataset configur ## Usage -To train Ultralytics YOLOv8n model on the Package Segmentation dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. +To train Ultralytics YOLO11n model on the Package Segmentation dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. !!! example "Train Example" @@ -44,7 +44,7 @@ To train Ultralytics YOLOv8n model on the Package Segmentation dataset for 100 [ from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="package-seg.yaml", epochs=100, imgsz=640) @@ -54,7 +54,7 @@ To train Ultralytics YOLOv8n model on the Package Segmentation dataset for 100 [ ```bash # Start training from a pretrained *.pt model - yolo segment train data=package-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=package-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` ## Sample Data and Annotations @@ -97,9 +97,9 @@ We express our gratitude to the Roboflow team for their efforts in creating and The [Roboflow Package Segmentation Dataset](https://universe.roboflow.com/factorypackage/factory_package?ref=ultralytics) is a curated collection of images tailored for tasks involving package segmentation. It includes diverse images of packages in various contexts, making it invaluable for training and evaluating segmentation models. This dataset is particularly useful for applications in logistics, warehouse automation, and any project requiring precise package analysis. It helps optimize logistics and enhance vision models for accurate package identification and sorting. -### How do I train an Ultralytics YOLOv8 model on the Package Segmentation Dataset? +### How do I train an Ultralytics YOLO11 model on the Package Segmentation Dataset? -You can train an Ultralytics YOLOv8n model using both Python and CLI methods. Use the snippets below: +You can train an Ultralytics YOLO11n model using both Python and CLI methods. Use the snippets below: !!! example "Train Example" @@ -109,7 +109,7 @@ You can train an Ultralytics YOLOv8n model using both Python and CLI methods. Us from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load a pretrained model + model = YOLO("yolo11n-seg.pt") # load a pretrained model # Train the model results = model.train(data="package-seg.yaml", epochs=100, imgsz=640) @@ -119,7 +119,7 @@ You can train an Ultralytics YOLOv8n model using both Python and CLI methods. Us ```bash # Start training from a pretrained *.pt model - yolo segment train data=package-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=package-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` Refer to the model [Training](../../modes/train.md) page for more details. @@ -134,9 +134,9 @@ The dataset is structured into three main components: This structure ensures a balanced dataset for thorough model training, validation, and testing, enhancing the performance of segmentation algorithms. -### Why should I use Ultralytics YOLOv8 with the Package Segmentation Dataset? +### Why should I use Ultralytics YOLO11 with the Package Segmentation Dataset? -Ultralytics YOLOv8 provides state-of-the-art [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed for real-time object detection and segmentation tasks. Using it with the Package Segmentation Dataset allows you to leverage YOLOv8's capabilities for precise package segmentation. This combination is especially beneficial for industries like logistics and warehouse automation, where accurate package identification is critical. For more information, check out our [page on YOLOv8 segmentation](https://docs.ultralytics.com/models/yolov8/). +Ultralytics YOLO11 provides state-of-the-art [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed for real-time object detection and segmentation tasks. Using it with the Package Segmentation Dataset allows you to leverage YOLO11's capabilities for precise package segmentation. This combination is especially beneficial for industries like logistics and warehouse automation, where accurate package identification is critical. For more information, check out our [page on YOLO11 segmentation](https://docs.ultralytics.com/models/yolo11/). ### How can I access and use the package-seg.yaml file for the Package Segmentation Dataset? diff --git a/docs/en/datasets/track/index.md b/docs/en/datasets/track/index.md index f9a8b4f81b..0aa3d8c50b 100644 --- a/docs/en/datasets/track/index.md +++ b/docs/en/datasets/track/index.md @@ -19,14 +19,14 @@ Multi-Object Detector doesn't need standalone training and directly supports pre ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") results = model.track(source="https://youtu.be/LNwODJXcvt4", conf=0.3, iou=0.5, show=True) ``` === "CLI" ```bash - yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3, iou=0.5 show + yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3, iou=0.5 show ``` ## FAQ @@ -42,17 +42,17 @@ To use Multi-Object Tracking with Ultralytics YOLO, you can start by using the P ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") # Load the YOLOv8 model + model = YOLO("yolo11n.pt") # Load the YOLO11 model results = model.track(source="https://youtu.be/LNwODJXcvt4", conf=0.3, iou=0.5, show=True) ``` === "CLI" ```bash - yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3 iou=0.5 show + yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3 iou=0.5 show ``` -These commands load the YOLOv8 model and use it for tracking objects in the given video source with specific confidence (`conf`) and [Intersection over Union](https://www.ultralytics.com/glossary/intersection-over-union-iou) (`iou`) thresholds. For more details, refer to the [track mode documentation](../../modes/track.md). +These commands load the YOLO11 model and use it for tracking objects in the given video source with specific confidence (`conf`) and [Intersection over Union](https://www.ultralytics.com/glossary/intersection-over-union-iou) (`iou`) thresholds. For more details, refer to the [track mode documentation](../../modes/track.md). ### What are the upcoming features for training trackers in Ultralytics? diff --git a/docs/en/guides/analytics.md b/docs/en/guides/analytics.md index 7519f0324f..1b7049c601 100644 --- a/docs/en/guides/analytics.md +++ b/docs/en/guides/analytics.md @@ -1,15 +1,26 @@ --- comments: true description: Learn to create line graphs, bar plots, and pie charts using Python with guided instructions and code snippets. Maximize your data visualization skills!. -keywords: Ultralytics, YOLOv8, data visualization, line graphs, bar plots, pie charts, Python, analytics, tutorial, guide +keywords: Ultralytics, YOLO11, data visualization, line graphs, bar plots, pie charts, Python, analytics, tutorial, guide --- -# Analytics using Ultralytics YOLOv8 +# Analytics using Ultralytics YOLO11 ## Introduction This guide provides a comprehensive overview of three fundamental types of [data visualizations](https://www.ultralytics.com/glossary/data-visualization): line graphs, bar plots, and pie charts. Each section includes step-by-step instructions and code snippets on how to create these visualizations using Python. +

+
+ +
+ Watch: How to generate Analytical Graphs using Ultralytics | Line Graphs, Bar Plots, Area and Pie Charts +

+ ### Visual Samples | Line Graph | Bar Plot | Pie Chart | @@ -31,7 +42,7 @@ This guide provides a comprehensive overview of three fundamental types of [data from ultralytics import YOLO, solutions - model = YOLO("yolov8s.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("Path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" @@ -80,7 +91,7 @@ This guide provides a comprehensive overview of three fundamental types of [data from ultralytics import YOLO, solutions - model = YOLO("yolov8s.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("Path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" @@ -141,7 +152,7 @@ This guide provides a comprehensive overview of three fundamental types of [data from ultralytics import YOLO, solutions - model = YOLO("yolov8s.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("Path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" @@ -191,7 +202,7 @@ This guide provides a comprehensive overview of three fundamental types of [data from ultralytics import YOLO, solutions - model = YOLO("yolov8s.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("Path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" @@ -241,7 +252,7 @@ This guide provides a comprehensive overview of three fundamental types of [data from ultralytics import YOLO, solutions - model = YOLO("yolov8s.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" @@ -319,11 +330,11 @@ Understanding when and how to use different types of visualizations is crucial f ## FAQ -### How do I create a line graph using Ultralytics YOLOv8 Analytics? +### How do I create a line graph using Ultralytics YOLO11 Analytics? -To create a line graph using Ultralytics YOLOv8 Analytics, follow these steps: +To create a line graph using Ultralytics YOLO11 Analytics, follow these steps: -1. Load a YOLOv8 model and open your video file. +1. Load a YOLO11 model and open your video file. 2. Initialize the `Analytics` class with the type set to "line." 3. Iterate through video frames, updating the line graph with relevant data, such as object counts per frame. 4. Save the output video displaying the line graph. @@ -335,7 +346,7 @@ import cv2 from ultralytics import YOLO, solutions -model = YOLO("yolov8s.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("Path/to/video/file.mp4") out = cv2.VideoWriter("line_plot.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h)) @@ -355,11 +366,11 @@ out.release() cv2.destroyAllWindows() ``` -For further details on configuring the `Analytics` class, visit the [Analytics using Ultralytics YOLOv8 📊](#analytics-using-ultralytics-yolov8) section. +For further details on configuring the `Analytics` class, visit the [Analytics using Ultralytics YOLO11 📊](#analytics-using-ultralytics-yolo11) section. -### What are the benefits of using Ultralytics YOLOv8 for creating bar plots? +### What are the benefits of using Ultralytics YOLO11 for creating bar plots? -Using Ultralytics YOLOv8 for creating bar plots offers several benefits: +Using Ultralytics YOLO11 for creating bar plots offers several benefits: 1. **Real-time Data Visualization**: Seamlessly integrate [object detection](https://www.ultralytics.com/glossary/object-detection) results into bar plots for dynamic updates. 2. **Ease of Use**: Simple API and functions make it straightforward to implement and visualize data. @@ -373,7 +384,7 @@ import cv2 from ultralytics import YOLO, solutions -model = YOLO("yolov8s.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("Path/to/video/file.mp4") out = cv2.VideoWriter("bar_plot.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h)) @@ -398,9 +409,9 @@ cv2.destroyAllWindows() To learn more, visit the [Bar Plot](#visual-samples) section in the guide. -### Why should I use Ultralytics YOLOv8 for creating pie charts in my data visualization projects? +### Why should I use Ultralytics YOLO11 for creating pie charts in my data visualization projects? -Ultralytics YOLOv8 is an excellent choice for creating pie charts because: +Ultralytics YOLO11 is an excellent choice for creating pie charts because: 1. **Integration with Object Detection**: Directly integrate object detection results into pie charts for immediate insights. 2. **User-Friendly API**: Simple to set up and use with minimal code. @@ -414,7 +425,7 @@ import cv2 from ultralytics import YOLO, solutions -model = YOLO("yolov8s.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("Path/to/video/file.mp4") out = cv2.VideoWriter("pie_chart.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h)) @@ -439,9 +450,9 @@ cv2.destroyAllWindows() For more information, refer to the [Pie Chart](#visual-samples) section in the guide. -### Can Ultralytics YOLOv8 be used to track objects and dynamically update visualizations? +### Can Ultralytics YOLO11 be used to track objects and dynamically update visualizations? -Yes, Ultralytics YOLOv8 can be used to track objects and dynamically update visualizations. It supports tracking multiple objects in real-time and can update various visualizations like line graphs, bar plots, and pie charts based on the tracked objects' data. +Yes, Ultralytics YOLO11 can be used to track objects and dynamically update visualizations. It supports tracking multiple objects in real-time and can update various visualizations like line graphs, bar plots, and pie charts based on the tracked objects' data. Example for tracking and updating a line graph: @@ -450,7 +461,7 @@ import cv2 from ultralytics import YOLO, solutions -model = YOLO("yolov8s.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("Path/to/video/file.mp4") out = cv2.VideoWriter("line_plot.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h)) @@ -472,11 +483,11 @@ cv2.destroyAllWindows() To learn about the complete functionality, see the [Tracking](../modes/track.md) section. -### What makes Ultralytics YOLOv8 different from other object detection solutions like [OpenCV](https://www.ultralytics.com/glossary/opencv) and [TensorFlow](https://www.ultralytics.com/glossary/tensorflow)? +### What makes Ultralytics YOLO11 different from other object detection solutions like [OpenCV](https://www.ultralytics.com/glossary/opencv) and [TensorFlow](https://www.ultralytics.com/glossary/tensorflow)? -Ultralytics YOLOv8 stands out from other object detection solutions like OpenCV and TensorFlow for multiple reasons: +Ultralytics YOLO11 stands out from other object detection solutions like OpenCV and TensorFlow for multiple reasons: -1. **State-of-the-art [Accuracy](https://www.ultralytics.com/glossary/accuracy)**: YOLOv8 provides superior accuracy in object detection, segmentation, and classification tasks. +1. **State-of-the-art [Accuracy](https://www.ultralytics.com/glossary/accuracy)**: YOLO11 provides superior accuracy in object detection, segmentation, and classification tasks. 2. **Ease of Use**: User-friendly API allows for quick implementation and integration without extensive coding. 3. **Real-time Performance**: Optimized for high-speed inference, suitable for real-time applications. 4. **Diverse Applications**: Supports various tasks including multi-object tracking, custom model training, and exporting to different formats like ONNX, TensorRT, and CoreML. diff --git a/docs/en/guides/azureml-quickstart.md b/docs/en/guides/azureml-quickstart.md index a769eee10d..99bb62a2c1 100644 --- a/docs/en/guides/azureml-quickstart.md +++ b/docs/en/guides/azureml-quickstart.md @@ -1,10 +1,10 @@ --- comments: true -description: Learn how to run YOLOv8 on AzureML. Quickstart instructions for terminal and notebooks to harness Azure's cloud computing for efficient model training. -keywords: YOLOv8, AzureML, machine learning, cloud computing, quickstart, terminal, notebooks, model training, Python SDK, AI, Ultralytics +description: Learn how to run YOLO11 on AzureML. Quickstart instructions for terminal and notebooks to harness Azure's cloud computing for efficient model training. +keywords: YOLO11, AzureML, machine learning, cloud computing, quickstart, terminal, notebooks, model training, Python SDK, AI, Ultralytics --- -# YOLOv8 🚀 on AzureML +# YOLO11 🚀 on AzureML ## What is Azure? @@ -22,7 +22,7 @@ For users of YOLO (You Only Look Once), AzureML provides a robust, scalable, and - Utilize built-in tools for data preprocessing, feature selection, and model training. - Collaborate more efficiently with capabilities for MLOps (Machine Learning Operations), including but not limited to monitoring, auditing, and versioning of models and data. -In the subsequent sections, you will find a quickstart guide detailing how to run YOLOv8 object detection models using AzureML, either from a compute terminal or a notebook. +In the subsequent sections, you will find a quickstart guide detailing how to run YOLO11 object detection models using AzureML, either from a compute terminal or a notebook. ## Prerequisites @@ -49,8 +49,8 @@ Start your compute and open a Terminal: Create your conda virtualenv and install pip in it: ```bash -conda create --name yolov8env -y -conda activate yolov8env +conda create --name yolo11env -y +conda activate yolo11env conda install pip -y ``` @@ -63,18 +63,18 @@ pip install ultralytics pip install onnx>=1.12.0 ``` -### Perform YOLOv8 tasks +### Perform YOLO11 tasks Predict: ```bash -yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' +yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg' ``` Train a detection model for 10 [epochs](https://www.ultralytics.com/glossary/epoch) with an initial learning_rate of 0.01: ```bash -yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 +yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` You can find more [instructions to use the Ultralytics CLI here](../quickstart.md#use-ultralytics-with-cli). @@ -92,11 +92,11 @@ Open the compute Terminal. From your compute terminal, you need to create a new ipykernel that will be used by your notebook to manage your dependencies: ```bash -conda create --name yolov8env -y -conda activate yolov8env +conda create --name yolo11env -y +conda activate yolo11env conda install pip -y conda install ipykernel -y -python -m ipykernel install --user --name yolov8env --display-name "yolov8env" +python -m ipykernel install --user --name yolo11env --display-name "yolo11env" ``` Close your terminal and create a new notebook. From your Notebook, you can select the new kernel. @@ -105,21 +105,21 @@ Then you can open a Notebook cell and install the required dependencies: ```bash %%bash -source activate yolov8env +source activate yolo11env cd ultralytics pip install -r requirements.txt pip install ultralytics pip install onnx>=1.12.0 ``` -Note that we need to use the `source activate yolov8env` for all the %%bash cells, to make sure that the %%bash cell uses environment we want. +Note that we need to use the `source activate yolo11env` for all the %%bash cells, to make sure that the %%bash cell uses environment we want. Run some predictions using the [Ultralytics CLI](../quickstart.md#use-ultralytics-with-cli): ```bash %%bash -source activate yolov8env -yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' +source activate yolo11env +yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg' ``` Or with the [Ultralytics Python interface](../quickstart.md#use-ultralytics-with-python), for example to train the model: @@ -128,7 +128,7 @@ Or with the [Ultralytics Python interface](../quickstart.md#use-ultralytics-with from ultralytics import YOLO # Load a model -model = YOLO("yolov8n.pt") # load an official YOLOv8n model +model = YOLO("yolo11n.pt") # load an official YOLO11n model # Use the model model.train(data="coco8.yaml", epochs=3) # train the model @@ -137,47 +137,47 @@ results = model("https://ultralytics.com/images/bus.jpg") # predict on an image path = model.export(format="onnx") # export the model to ONNX format ``` -You can use either the Ultralytics CLI or Python interface for running YOLOv8 tasks, as described in the terminal section above. +You can use either the Ultralytics CLI or Python interface for running YOLO11 tasks, as described in the terminal section above. -By following these steps, you should be able to get YOLOv8 running quickly on AzureML for quick trials. For more advanced uses, you may refer to the full AzureML documentation linked at the beginning of this guide. +By following these steps, you should be able to get YOLO11 running quickly on AzureML for quick trials. For more advanced uses, you may refer to the full AzureML documentation linked at the beginning of this guide. ## Explore More with AzureML -This guide serves as an introduction to get you up and running with YOLOv8 on AzureML. However, it only scratches the surface of what AzureML can offer. To delve deeper and unlock the full potential of AzureML for your machine learning projects, consider exploring the following resources: +This guide serves as an introduction to get you up and running with YOLO11 on AzureML. However, it only scratches the surface of what AzureML can offer. To delve deeper and unlock the full potential of AzureML for your machine learning projects, consider exploring the following resources: - [Create a Data Asset](https://learn.microsoft.com/azure/machine-learning/how-to-create-data-assets): Learn how to set up and manage your data assets effectively within the AzureML environment. - [Initiate an AzureML Job](https://learn.microsoft.com/azure/machine-learning/how-to-train-model): Get a comprehensive understanding of how to kickstart your machine learning training jobs on AzureML. - [Register a Model](https://learn.microsoft.com/azure/machine-learning/how-to-manage-models): Familiarize yourself with model management practices including registration, versioning, and deployment. -- [Train YOLOv8 with AzureML Python SDK](https://medium.com/@ouphi/how-to-train-the-yolov8-model-with-azure-machine-learning-python-sdk-8268696be8ba): Explore a step-by-step guide on using the AzureML Python SDK to train your YOLOv8 models. -- [Train YOLOv8 with AzureML CLI](https://medium.com/@ouphi/how-to-train-the-yolov8-model-with-azureml-and-the-az-cli-73d3c870ba8e): Discover how to utilize the command-line interface for streamlined training and management of YOLOv8 models on AzureML. +- [Train YOLO11 with AzureML Python SDK](https://medium.com/@ouphi/how-to-train-the-yolov8-model-with-azure-machine-learning-python-sdk-8268696be8ba): Explore a step-by-step guide on using the AzureML Python SDK to train your YOLO11 models. +- [Train YOLO11 with AzureML CLI](https://medium.com/@ouphi/how-to-train-the-yolov8-model-with-azureml-and-the-az-cli-73d3c870ba8e): Discover how to utilize the command-line interface for streamlined training and management of YOLO11 models on AzureML. ## FAQ -### How do I run YOLOv8 on AzureML for model training? +### How do I run YOLO11 on AzureML for model training? -Running YOLOv8 on AzureML for model training involves several steps: +Running YOLO11 on AzureML for model training involves several steps: 1. **Create a Compute Instance**: From your AzureML workspace, navigate to Compute > Compute instances > New, and select the required instance. 2. **Setup Environment**: Start your compute instance, open a terminal, and create a conda environment: ```bash - conda create --name yolov8env -y - conda activate yolov8env + conda create --name yolo11env -y + conda activate yolo11env conda install pip -y pip install ultralytics onnx>=1.12.0 ``` -3. **Run YOLOv8 Tasks**: Use the Ultralytics CLI to train your model: +3. **Run YOLO11 Tasks**: Use the Ultralytics CLI to train your model: ```bash - yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 + yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` For more details, you can refer to the [instructions to use the Ultralytics CLI](../quickstart.md#use-ultralytics-with-cli). -### What are the benefits of using AzureML for YOLOv8 training? +### What are the benefits of using AzureML for YOLO11 training? -AzureML provides a robust and efficient ecosystem for training YOLOv8 models: +AzureML provides a robust and efficient ecosystem for training YOLO11 models: - **Scalability**: Easily scale your compute resources as your data and model complexity grows. - **MLOps Integration**: Utilize features like versioning, monitoring, and auditing to streamline ML operations. @@ -185,9 +185,9 @@ AzureML provides a robust and efficient ecosystem for training YOLOv8 models: These advantages make AzureML an ideal platform for projects ranging from quick prototypes to large-scale deployments. For more tips, check out [AzureML Jobs](https://learn.microsoft.com/azure/machine-learning/how-to-train-model). -### How do I troubleshoot common issues when running YOLOv8 on AzureML? +### How do I troubleshoot common issues when running YOLO11 on AzureML? -Troubleshooting common issues with YOLOv8 on AzureML can involve the following steps: +Troubleshooting common issues with YOLO11 on AzureML can involve the following steps: - **Dependency Issues**: Ensure all required packages are installed. Refer to the `requirements.txt` file for dependencies. - **Environment Setup**: Verify that your conda environment is correctly activated before running commands. @@ -202,7 +202,7 @@ Yes, AzureML allows you to use both the Ultralytics CLI and the Python interface - **CLI**: Ideal for quick tasks and running standard scripts directly from the terminal. ```bash - yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' + yolo predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg' ``` - **Python Interface**: Useful for more complex tasks requiring custom coding and integration within notebooks. @@ -210,18 +210,18 @@ Yes, AzureML allows you to use both the Ultralytics CLI and the Python interface ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") model.train(data="coco8.yaml", epochs=3) ``` Refer to the quickstart guides for more detailed instructions [here](../quickstart.md#use-ultralytics-with-cli) and [here](../quickstart.md#use-ultralytics-with-python). -### What is the advantage of using Ultralytics YOLOv8 over other [object detection](https://www.ultralytics.com/glossary/object-detection) models? +### What is the advantage of using Ultralytics YOLO11 over other [object detection](https://www.ultralytics.com/glossary/object-detection) models? -Ultralytics YOLOv8 offers several unique advantages over competing object detection models: +Ultralytics YOLO11 offers several unique advantages over competing object detection models: - **Speed**: Faster inference and training times compared to models like Faster R-CNN and SSD. - **[Accuracy](https://www.ultralytics.com/glossary/accuracy)**: High accuracy in detection tasks with features like anchor-free design and enhanced augmentation strategies. - **Ease of Use**: Intuitive API and CLI for quick setup, making it accessible both to beginners and experts. -To explore more about YOLOv8's features, visit the [Ultralytics YOLO](https://www.ultralytics.com/yolo) page for detailed insights. +To explore more about YOLO11's features, visit the [Ultralytics YOLO](https://www.ultralytics.com/yolo) page for detailed insights. diff --git a/docs/en/guides/conda-quickstart.md b/docs/en/guides/conda-quickstart.md index 6b52339260..e37e89911f 100644 --- a/docs/en/guides/conda-quickstart.md +++ b/docs/en/guides/conda-quickstart.md @@ -73,7 +73,7 @@ With Ultralytics installed, you can now start using its robust features for [obj ```python from ultralytics import YOLO -model = YOLO("yolov8n.pt") # initialize model +model = YOLO("yolo11n.pt") # initialize model results = model("path/to/image.jpg") # perform inference results[0].show() # display results for the first image ``` diff --git a/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md b/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md index db61c08196..5f6fceb781 100644 --- a/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md +++ b/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md @@ -1,10 +1,10 @@ --- comments: true -description: Learn how to boost your Raspberry Pi's ML performance using Coral Edge TPU with Ultralytics YOLOv8. Follow our detailed setup and installation guide. -keywords: Coral Edge TPU, Raspberry Pi, YOLOv8, Ultralytics, TensorFlow Lite, ML inference, machine learning, AI, installation guide, setup tutorial +description: Learn how to boost your Raspberry Pi's ML performance using Coral Edge TPU with Ultralytics YOLO11. Follow our detailed setup and installation guide. +keywords: Coral Edge TPU, Raspberry Pi, YOLO11, Ultralytics, TensorFlow Lite, ML inference, machine learning, AI, installation guide, setup tutorial --- -# Coral Edge TPU on a Raspberry Pi with Ultralytics YOLOv8 🚀 +# Coral Edge TPU on a Raspberry Pi with Ultralytics YOLO11 🚀

Raspberry Pi single board computer with USB Edge TPU accelerator @@ -152,9 +152,9 @@ Find comprehensive information on the [Predict](../modes/predict.md) page for fu ## FAQ -### What is a Coral Edge TPU and how does it enhance Raspberry Pi's performance with Ultralytics YOLOv8? +### What is a Coral Edge TPU and how does it enhance Raspberry Pi's performance with Ultralytics YOLO11? -The Coral Edge TPU is a compact device designed to add an Edge TPU coprocessor to your system. This coprocessor enables low-power, high-performance [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) inference, particularly optimized for TensorFlow Lite models. When using a Raspberry Pi, the Edge TPU accelerates ML model inference, significantly boosting performance, especially for Ultralytics YOLOv8 models. You can read more about the Coral Edge TPU on their [home page](https://coral.ai/products/accelerator). +The Coral Edge TPU is a compact device designed to add an Edge TPU coprocessor to your system. This coprocessor enables low-power, high-performance [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) inference, particularly optimized for TensorFlow Lite models. When using a Raspberry Pi, the Edge TPU accelerates ML model inference, significantly boosting performance, especially for Ultralytics YOLO11 models. You can read more about the Coral Edge TPU on their [home page](https://coral.ai/products/accelerator). ### How do I install the Coral Edge TPU runtime on a Raspberry Pi? @@ -166,9 +166,9 @@ sudo dpkg -i path/to/package.deb Make sure to uninstall any previous Coral Edge TPU runtime versions by following the steps outlined in the [Installation Walkthrough](#installation-walkthrough) section. -### Can I export my Ultralytics YOLOv8 model to be compatible with Coral Edge TPU? +### Can I export my Ultralytics YOLO11 model to be compatible with Coral Edge TPU? -Yes, you can export your Ultralytics YOLOv8 model to be compatible with the Coral Edge TPU. It is recommended to perform the export on Google Colab, an x86_64 Linux machine, or using the [Ultralytics Docker container](docker-quickstart.md). You can also use Ultralytics HUB for exporting. Here is how you can export your model using Python and CLI: +Yes, you can export your Ultralytics YOLO11 model to be compatible with the Coral Edge TPU. It is recommended to perform the export on Google Colab, an x86_64 Linux machine, or using the [Ultralytics Docker container](docker-quickstart.md). You can also use Ultralytics HUB for exporting. Here is how you can export your model using Python and CLI: !!! note "Exporting the model" @@ -208,9 +208,9 @@ pip install -U tflite-runtime For a specific wheel, such as TensorFlow 2.15.0 `tflite-runtime`, you can download it from [this link](https://github.com/feranick/TFlite-builds/releases) and install it using `pip`. Detailed instructions are available in the section on running the model [Running the Model](#running-the-model). -### How do I run inference with an exported YOLOv8 model on a Raspberry Pi using the Coral Edge TPU? +### How do I run inference with an exported YOLO11 model on a Raspberry Pi using the Coral Edge TPU? -After exporting your YOLOv8 model to an Edge TPU-compatible format, you can run inference using the following code snippets: +After exporting your YOLO11 model to an Edge TPU-compatible format, you can run inference using the following code snippets: !!! note "Running the model" diff --git a/docs/en/guides/data-collection-and-annotation.md b/docs/en/guides/data-collection-and-annotation.md index 6ca205260e..058323ee29 100644 --- a/docs/en/guides/data-collection-and-annotation.md +++ b/docs/en/guides/data-collection-and-annotation.md @@ -88,7 +88,7 @@ Let's say you are ready to annotate now. There are several open-source tools ava - **[Label Studio](https://github.com/HumanSignal/label-studio)**: A flexible tool that supports a wide range of annotation tasks and includes features for managing projects and quality control. - **[CVAT](https://github.com/cvat-ai/cvat)**: A powerful tool that supports various annotation formats and customizable workflows, making it suitable for complex projects. -- **[Labelme](https://github.com/labelmeai/labelme)**: A simple and easy-to-use tool that allows for quick annotation of images with polygons, making it ideal for straightforward tasks. +- **[Labelme](https://github.com/wkentaro/labelme)**: A simple and easy-to-use tool that allows for quick annotation of images with polygons, making it ideal for straightforward tasks.

LabelMe Overview @@ -136,12 +136,12 @@ Bouncing your ideas and queries off other [computer vision](https://www.ultralyt ### Where to Find Help and Support -- **GitHub Issues:** Visit the YOLOv8 GitHub repository and use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers are there to help with any issues you face. +- **GitHub Issues:** Visit the YOLO11 GitHub repository and use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers are there to help with any issues you face. - **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to connect with other users and developers, get support, share knowledge, and brainstorm ideas. ### Official Documentation -- **Ultralytics YOLOv8 Documentation:** Refer to the [official YOLOv8 documentation](./index.md) for thorough guides and valuable insights on numerous computer vision tasks and projects. +- **Ultralytics YOLO11 Documentation:** Refer to the [official YOLO11 documentation](./index.md) for thorough guides and valuable insights on numerous computer vision tasks and projects. ## Conclusion @@ -159,7 +159,7 @@ Ensuring high consistency and accuracy in data annotation involves establishing ### How many images do I need for training Ultralytics YOLO models? -For effective [transfer learning](https://www.ultralytics.com/glossary/transfer-learning) and object detection with Ultralytics YOLO models, start with a minimum of a few hundred annotated objects per class. If training for just one class, begin with at least 100 annotated images and train for approximately 100 [epochs](https://www.ultralytics.com/glossary/epoch). More complex tasks might require thousands of images per class to achieve high reliability and performance. Quality annotations are crucial, so ensure your data collection and annotation processes are rigorous and aligned with your project's specific goals. Explore detailed training strategies in the [YOLOv8 training guide](../modes/train.md). +For effective [transfer learning](https://www.ultralytics.com/glossary/transfer-learning) and object detection with Ultralytics YOLO models, start with a minimum of a few hundred annotated objects per class. If training for just one class, begin with at least 100 annotated images and train for approximately 100 [epochs](https://www.ultralytics.com/glossary/epoch). More complex tasks might require thousands of images per class to achieve high reliability and performance. Quality annotations are crucial, so ensure your data collection and annotation processes are rigorous and aligned with your project's specific goals. Explore detailed training strategies in the [YOLO11 training guide](../modes/train.md). ### What are some popular tools for data annotation? @@ -167,7 +167,7 @@ Several popular open-source tools can streamline the data annotation process: - **[Label Studio](https://github.com/HumanSignal/label-studio)**: A flexible tool supporting various annotation tasks, project management, and quality control features. - **[CVAT](https://www.cvat.ai/)**: Offers multiple annotation formats and customizable workflows, making it suitable for complex projects. -- **[Labelme](https://github.com/labelmeai/labelme)**: Ideal for quick and straightforward image annotation with polygons. +- **[Labelme](https://github.com/wkentaro/labelme)**: Ideal for quick and straightforward image annotation with polygons. These tools can help enhance the efficiency and accuracy of your annotation workflows. For extensive feature lists and guides, refer to our [data annotation tools documentation](../datasets/index.md). diff --git a/docs/en/guides/deepstream-nvidia-jetson.md b/docs/en/guides/deepstream-nvidia-jetson.md index ab15009b99..90c361cb92 100644 --- a/docs/en/guides/deepstream-nvidia-jetson.md +++ b/docs/en/guides/deepstream-nvidia-jetson.md @@ -1,10 +1,10 @@ --- comments: true -description: Learn how to deploy Ultralytics YOLOv8 on NVIDIA Jetson devices using TensorRT and DeepStream SDK. Explore performance benchmarks and maximize AI capabilities. -keywords: Ultralytics, YOLOv8, NVIDIA Jetson, JetPack, AI deployment, embedded systems, deep learning, TensorRT, DeepStream SDK, computer vision +description: Learn how to deploy Ultralytics YOLO11 on NVIDIA Jetson devices using TensorRT and DeepStream SDK. Explore performance benchmarks and maximize AI capabilities. +keywords: Ultralytics, YOLO11, NVIDIA Jetson, JetPack, AI deployment, embedded systems, deep learning, TensorRT, DeepStream SDK, computer vision --- -# Ultralytics YOLOv8 on NVIDIA Jetson using DeepStream SDK and TensorRT +# Ultralytics YOLO11 on NVIDIA Jetson using DeepStream SDK and TensorRT


@@ -14,10 +14,10 @@ keywords: Ultralytics, YOLOv8, NVIDIA Jetson, JetPack, AI deployment, embedded s allowfullscreen>
- Watch: How to Run Multiple Streams with DeepStream SDK on Jetson Nano using Ultralytics YOLOv8 + Watch: How to Run Multiple Streams with DeepStream SDK on Jetson Nano using Ultralytics YOLO11

-This comprehensive guide provides a detailed walkthrough for deploying Ultralytics YOLOv8 on [NVIDIA Jetson](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/) devices using DeepStream SDK and TensorRT. Here we use TensorRT to maximize the inference performance on the Jetson platform. +This comprehensive guide provides a detailed walkthrough for deploying Ultralytics YOLO11 on [NVIDIA Jetson](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/) devices using DeepStream SDK and TensorRT. Here we use TensorRT to maximize the inference performance on the Jetson platform. DeepStream on NVIDIA Jetson @@ -33,7 +33,7 @@ This comprehensive guide provides a detailed walkthrough for deploying Ultralyti Before you start to follow this guide: -- Visit our documentation, [Quick Start Guide: NVIDIA Jetson with Ultralytics YOLOv8](nvidia-jetson.md) to set up your NVIDIA Jetson device with Ultralytics YOLOv8 +- Visit our documentation, [Quick Start Guide: NVIDIA Jetson with Ultralytics YOLO11](nvidia-jetson.md) to set up your NVIDIA Jetson device with Ultralytics YOLO11 - Install [DeepStream SDK](https://developer.nvidia.com/deepstream-getting-started) according to the JetPack version - For JetPack 4.6.4, install [DeepStream 6.0.1](https://docs.nvidia.com/metropolis/deepstream/6.0.1/dev-guide/text/DS_Quickstart.html) @@ -43,7 +43,7 @@ Before you start to follow this guide: In this guide we have used the Debian package method of installing DeepStream SDK to the Jetson device. You can also visit the [DeepStream SDK on Jetson (Archived)](https://developer.nvidia.com/embedded/deepstream-on-jetson-downloads-archived) to access legacy versions of DeepStream. -## DeepStream Configuration for YOLOv8 +## DeepStream Configuration for YOLO11 Here we are using [marcoslucianops/DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo) GitHub repository which includes NVIDIA DeepStream SDK support for YOLO models. We appreciate the efforts of marcoslucianops for his contributions! @@ -61,7 +61,7 @@ Here we are using [marcoslucianops/DeepStream-Yolo](https://github.com/marcosluc cd DeepStream-Yolo ``` -3. Download Ultralytics YOLOv8 detection model (.pt) of your choice from [YOLOv8 releases](https://github.com/ultralytics/assets/releases). Here we use [yolov8s.pt](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt). +3. Download Ultralytics YOLO11 detection model (.pt) of your choice from [YOLO11 releases](https://github.com/ultralytics/assets/releases). Here we use [yolov8s.pt](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt). ```bash wget https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt @@ -69,7 +69,7 @@ Here we are using [marcoslucianops/DeepStream-Yolo](https://github.com/marcosluc !!! note - You can also use a [custom trained YOLOv8 model](https://docs.ultralytics.com/modes/train/). + You can also use a [custom trained YOLO11 model](https://docs.ultralytics.com/modes/train/). 4. Convert model to ONNX @@ -179,7 +179,7 @@ deepstream-app -c deepstream_app_config.txt It will take a long time to generate the TensorRT engine file before starting the inference. So please be patient. -
YOLOv8 with deepstream
+
YOLO11 with deepstream
!!! tip @@ -317,21 +317,21 @@ This guide was initially created by our friends at Seeed Studio, Lakshantha and ## FAQ -### How do I set up Ultralytics YOLOv8 on an NVIDIA Jetson device? +### How do I set up Ultralytics YOLO11 on an NVIDIA Jetson device? -To set up Ultralytics YOLOv8 on an [NVIDIA Jetson](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/) device, you first need to install the [DeepStream SDK](https://developer.nvidia.com/deepstream-getting-started) compatible with your JetPack version. Follow the step-by-step guide in our [Quick Start Guide](nvidia-jetson.md) to configure your NVIDIA Jetson for YOLOv8 deployment. +To set up Ultralytics YOLO11 on an [NVIDIA Jetson](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/) device, you first need to install the [DeepStream SDK](https://developer.nvidia.com/deepstream-getting-started) compatible with your JetPack version. Follow the step-by-step guide in our [Quick Start Guide](nvidia-jetson.md) to configure your NVIDIA Jetson for YOLO11 deployment. -### What is the benefit of using TensorRT with YOLOv8 on NVIDIA Jetson? +### What is the benefit of using TensorRT with YOLO11 on NVIDIA Jetson? -Using TensorRT with YOLOv8 optimizes the model for inference, significantly reducing latency and improving throughput on NVIDIA Jetson devices. TensorRT provides high-performance, low-latency [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) inference through layer fusion, precision calibration, and kernel auto-tuning. This leads to faster and more efficient execution, particularly useful for real-time applications like video analytics and autonomous machines. +Using TensorRT with YOLO11 optimizes the model for inference, significantly reducing latency and improving throughput on NVIDIA Jetson devices. TensorRT provides high-performance, low-latency [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) inference through layer fusion, precision calibration, and kernel auto-tuning. This leads to faster and more efficient execution, particularly useful for real-time applications like video analytics and autonomous machines. -### Can I run Ultralytics YOLOv8 with DeepStream SDK across different NVIDIA Jetson hardware? +### Can I run Ultralytics YOLO11 with DeepStream SDK across different NVIDIA Jetson hardware? -Yes, the guide for deploying Ultralytics YOLOv8 with the DeepStream SDK and TensorRT is compatible across the entire NVIDIA Jetson lineup. This includes devices like the Jetson Orin NX 16GB with [JetPack 5.1.3](https://developer.nvidia.com/embedded/jetpack-sdk-513) and the Jetson Nano 4GB with [JetPack 4.6.4](https://developer.nvidia.com/jetpack-sdk-464). Refer to the section [DeepStream Configuration for YOLOv8](#deepstream-configuration-for-yolov8) for detailed steps. +Yes, the guide for deploying Ultralytics YOLO11 with the DeepStream SDK and TensorRT is compatible across the entire NVIDIA Jetson lineup. This includes devices like the Jetson Orin NX 16GB with [JetPack 5.1.3](https://developer.nvidia.com/embedded/jetpack-sdk-513) and the Jetson Nano 4GB with [JetPack 4.6.4](https://developer.nvidia.com/jetpack-sdk-464). Refer to the section [DeepStream Configuration for YOLO11](#deepstream-configuration-for-yolo11) for detailed steps. -### How can I convert a YOLOv8 model to ONNX for DeepStream? +### How can I convert a YOLO11 model to ONNX for DeepStream? -To convert a YOLOv8 model to ONNX format for deployment with DeepStream, use the `utils/export_yoloV8.py` script from the [DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo) repository. +To convert a YOLO11 model to ONNX format for deployment with DeepStream, use the `utils/export_yoloV8.py` script from the [DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo) repository. Here's an example command: @@ -341,12 +341,12 @@ python3 utils/export_yoloV8.py -w yolov8s.pt --opset 12 --simplify For more details on model conversion, check out our [model export section](../modes/export.md). -### What are the performance benchmarks for YOLOv8 on NVIDIA Jetson Orin NX? +### What are the performance benchmarks for YOLO on NVIDIA Jetson Orin NX? -The performance of YOLOv8 models on NVIDIA Jetson Orin NX 16GB varies based on TensorRT precision levels. For example, YOLOv8s models achieve: +The performance of YOLO11 models on NVIDIA Jetson Orin NX 16GB varies based on TensorRT precision levels. For example, YOLOv8s models achieve: - **FP32 Precision**: 15.63 ms/im, 64 FPS - **FP16 Precision**: 7.94 ms/im, 126 FPS - **INT8 Precision**: 5.53 ms/im, 181 FPS -These benchmarks underscore the efficiency and capability of using TensorRT-optimized YOLOv8 models on NVIDIA Jetson hardware. For further details, see our [Benchmark Results](#benchmark-results) section. +These benchmarks underscore the efficiency and capability of using TensorRT-optimized YOLO11 models on NVIDIA Jetson hardware. For further details, see our [Benchmark Results](#benchmark-results) section. diff --git a/docs/en/guides/defining-project-goals.md b/docs/en/guides/defining-project-goals.md index c5e3c58cf3..2a5dc1b124 100644 --- a/docs/en/guides/defining-project-goals.md +++ b/docs/en/guides/defining-project-goals.md @@ -1,7 +1,7 @@ --- comments: true description: Learn how to define clear goals and objectives for your computer vision project with our practical guide. Includes tips on problem statements, measurable objectives, and key decisions. -keywords: computer vision, project planning, problem statement, measurable objectives, dataset preparation, model selection, YOLOv8, Ultralytics +keywords: computer vision, project planning, problem statement, measurable objectives, dataset preparation, model selection, YOLO11, Ultralytics --- # A Practical Guide for Defining Your [Computer Vision](https://www.ultralytics.com/glossary/computer-vision-cv) Project @@ -30,7 +30,7 @@ Let's walk through an example. Consider a computer vision project where you want to [estimate the speed of vehicles](./speed-estimation.md) on a highway. The core issue is that current speed monitoring methods are inefficient and error-prone due to outdated radar systems and manual processes. The project aims to develop a real-time computer vision system that can replace legacy [speed estimation](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects) systems.

- Speed Estimation Using YOLOv8 + Speed Estimation Using YOLO11

Primary users include traffic management authorities and law enforcement, while secondary stakeholders are highway planners and the public benefiting from safer roads. Key requirements involve evaluating budget, time, and personnel, as well as addressing technical needs like high-resolution cameras and real-time data processing. Additionally, regulatory constraints on privacy and [data security](https://www.ultralytics.com/glossary/data-security) must be considered. @@ -85,7 +85,7 @@ The most popular computer vision tasks include [image classification](https://ww Overview of Computer Vision Tasks

-For a detailed explanation of various tasks, please take a look at the Ultralytics Docs page on [YOLOv8 Tasks](../tasks/index.md). +For a detailed explanation of various tasks, please take a look at the Ultralytics Docs page on [YOLO11 Tasks](../tasks/index.md). ### Can a Pre-trained Model Remember Classes It Knew Before Custom Training? @@ -114,12 +114,12 @@ Connecting with other computer vision enthusiasts can be incredibly helpful for ### Community Support Channels -- **GitHub Issues:** Head over to the YOLOv8 GitHub repository. You can use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers can assist with specific problems you encounter. +- **GitHub Issues:** Head over to the YOLO11 GitHub repository. You can use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers can assist with specific problems you encounter. - **Ultralytics Discord Server:** Become part of the [Ultralytics Discord server](https://discord.com/invite/ultralytics). Connect with fellow users and developers, seek support, exchange knowledge, and discuss ideas. ### Comprehensive Guides and Documentation -- **Ultralytics YOLOv8 Documentation:** Explore the [official YOLOv8 documentation](./index.md) for in-depth guides and valuable tips on various computer vision tasks and projects. +- **Ultralytics YOLO11 Documentation:** Explore the [official YOLO11 documentation](./index.md) for in-depth guides and valuable tips on various computer vision tasks and projects. ## Conclusion @@ -138,11 +138,11 @@ To define a clear problem statement for your Ultralytics computer vision project Providing a well-defined problem statement ensures that the project remains focused and aligned with your objectives. For a detailed guide, refer to our [practical guide](#defining-a-clear-problem-statement). -### Why should I use Ultralytics YOLOv8 for speed estimation in my computer vision project? +### Why should I use Ultralytics YOLO11 for speed estimation in my computer vision project? -Ultralytics YOLOv8 is ideal for speed estimation because of its real-time object tracking capabilities, high accuracy, and robust performance in detecting and monitoring vehicle speeds. It overcomes inefficiencies and inaccuracies of traditional radar systems by leveraging cutting-edge computer vision technology. Check out our blog on [speed estimation using YOLOv8](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects) for more insights and practical examples. +Ultralytics YOLO11 is ideal for speed estimation because of its real-time object tracking capabilities, high accuracy, and robust performance in detecting and monitoring vehicle speeds. It overcomes inefficiencies and inaccuracies of traditional radar systems by leveraging cutting-edge computer vision technology. Check out our blog on [speed estimation using YOLO11](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects) for more insights and practical examples. -### How do I set effective measurable objectives for my computer vision project with Ultralytics YOLOv8? +### How do I set effective measurable objectives for my computer vision project with Ultralytics YOLO11? Set effective and measurable objectives using the SMART criteria: diff --git a/docs/en/guides/distance-calculation.md b/docs/en/guides/distance-calculation.md index 443b208b70..b0b12f919b 100644 --- a/docs/en/guides/distance-calculation.md +++ b/docs/en/guides/distance-calculation.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to calculate distances between objects using Ultralytics YOLOv8 for accurate spatial positioning and scene understanding. -keywords: Ultralytics, YOLOv8, distance calculation, computer vision, object tracking, spatial positioning +description: Learn how to calculate distances between objects using Ultralytics YOLO11 for accurate spatial positioning and scene understanding. +keywords: Ultralytics, YOLO11, distance calculation, computer vision, object tracking, spatial positioning --- -# Distance Calculation using Ultralytics YOLOv8 +# Distance Calculation using Ultralytics YOLO11 ## What is Distance Calculation? -Measuring the gap between two objects is known as distance calculation within a specified space. In the case of [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics), the [bounding box](https://www.ultralytics.com/glossary/bounding-box) centroid is employed to calculate the distance for bounding boxes highlighted by the user. +Measuring the gap between two objects is known as distance calculation within a specified space. In the case of [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics), the [bounding box](https://www.ultralytics.com/glossary/bounding-box) centroid is employed to calculate the distance for bounding boxes highlighted by the user.


@@ -18,14 +18,14 @@ Measuring the gap between two objects is known as distance calculation within a allowfullscreen>
- Watch: Distance Calculation using Ultralytics YOLOv8 + Watch: Distance Calculation using Ultralytics YOLO11

## Visuals -| Distance Calculation using Ultralytics YOLOv8 | +| Distance Calculation using Ultralytics YOLO11 | | :---------------------------------------------------------------------------------------------------------------------------: | -| ![Ultralytics YOLOv8 Distance Calculation](https://github.com/ultralytics/docs/releases/download/0/distance-calculation.avif) | +| ![Ultralytics YOLO11 Distance Calculation](https://github.com/ultralytics/docs/releases/download/0/distance-calculation.avif) | ## Advantages of Distance Calculation? @@ -36,7 +36,7 @@ Measuring the gap between two objects is known as distance calculation within a - Click on any two bounding boxes with Left Mouse click for distance calculation -!!! example "Distance Calculation using YOLOv8 Example" +!!! example "Distance Calculation using YOLO11 Example" === "Video Stream" @@ -45,7 +45,7 @@ Measuring the gap between two objects is known as distance calculation within a from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") names = model.model.names cap = cv2.VideoCapture("path/to/video/file.mp4") @@ -98,29 +98,29 @@ Measuring the gap between two objects is known as distance calculation within a ## FAQ -### How do I calculate distances between objects using Ultralytics YOLOv8? +### How do I calculate distances between objects using Ultralytics YOLO11? -To calculate distances between objects using [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics), you need to identify the bounding box centroids of the detected objects. This process involves initializing the `DistanceCalculation` class from Ultralytics' `solutions` module and using the model's tracking outputs to calculate the distances. You can refer to the implementation in the [distance calculation example](#distance-calculation-using-ultralytics-yolov8). +To calculate distances between objects using [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics), you need to identify the bounding box centroids of the detected objects. This process involves initializing the `DistanceCalculation` class from Ultralytics' `solutions` module and using the model's tracking outputs to calculate the distances. You can refer to the implementation in the [distance calculation example](#distance-calculation-using-ultralytics-yolo11). -### What are the advantages of using distance calculation with Ultralytics YOLOv8? +### What are the advantages of using distance calculation with Ultralytics YOLO11? -Using distance calculation with Ultralytics YOLOv8 offers several advantages: +Using distance calculation with Ultralytics YOLO11 offers several advantages: - **Localization Precision:** Provides accurate spatial positioning for objects. - **Size Estimation:** Helps estimate physical sizes, contributing to better contextual understanding. - **Scene Understanding:** Enhances 3D scene comprehension, aiding improved decision-making in applications like autonomous driving and surveillance. -### Can I perform distance calculation in real-time video streams with Ultralytics YOLOv8? +### Can I perform distance calculation in real-time video streams with Ultralytics YOLO11? -Yes, you can perform distance calculation in real-time video streams with Ultralytics YOLOv8. The process involves capturing video frames using [OpenCV](https://www.ultralytics.com/glossary/opencv), running YOLOv8 [object detection](https://www.ultralytics.com/glossary/object-detection), and using the `DistanceCalculation` class to calculate distances between objects in successive frames. For a detailed implementation, see the [video stream example](#distance-calculation-using-ultralytics-yolov8). +Yes, you can perform distance calculation in real-time video streams with Ultralytics YOLO11. The process involves capturing video frames using [OpenCV](https://www.ultralytics.com/glossary/opencv), running YOLO11 [object detection](https://www.ultralytics.com/glossary/object-detection), and using the `DistanceCalculation` class to calculate distances between objects in successive frames. For a detailed implementation, see the [video stream example](#distance-calculation-using-ultralytics-yolo11). -### How do I delete points drawn during distance calculation using Ultralytics YOLOv8? +### How do I delete points drawn during distance calculation using Ultralytics YOLO11? -To delete points drawn during distance calculation with Ultralytics YOLOv8, you can use a right mouse click. This action will clear all the points you have drawn. For more details, refer to the note section under the [distance calculation example](#distance-calculation-using-ultralytics-yolov8). +To delete points drawn during distance calculation with Ultralytics YOLO11, you can use a right mouse click. This action will clear all the points you have drawn. For more details, refer to the note section under the [distance calculation example](#distance-calculation-using-ultralytics-yolo11). -### What are the key arguments for initializing the DistanceCalculation class in Ultralytics YOLOv8? +### What are the key arguments for initializing the DistanceCalculation class in Ultralytics YOLO11? -The key arguments for initializing the `DistanceCalculation` class in Ultralytics YOLOv8 include: +The key arguments for initializing the `DistanceCalculation` class in Ultralytics YOLO11 include: - `names`: Dictionary mapping class indices to class names. - `view_img`: Flag to indicate if the video stream should be displayed. diff --git a/docs/en/guides/docker-quickstart.md b/docs/en/guides/docker-quickstart.md index 3ee48946c9..8881ffb35b 100644 --- a/docs/en/guides/docker-quickstart.md +++ b/docs/en/guides/docker-quickstart.md @@ -197,10 +197,10 @@ Setup and configuration of an X11 or Wayland display server is outside the scope ### Using Docker with a GUI -Now you can display graphical applications inside your Docker container. For example, you can run the following [CLI command](../usage/cli.md) to visualize the [predictions](../modes/predict.md) from a [YOLOv8 model](../models/yolov8.md): +Now you can display graphical applications inside your Docker container. For example, you can run the following [CLI command](../usage/cli.md) to visualize the [predictions](../modes/predict.md) from a [YOLO11 model](../models/yolo11.md): ```bash -yolo predict model=yolov8n.pt show=True +yolo predict model=yolo11n.pt show=True ``` ??? info "Testing" diff --git a/docs/en/guides/heatmaps.md b/docs/en/guides/heatmaps.md index d2ebd4b14b..7d5aad6c2b 100644 --- a/docs/en/guides/heatmaps.md +++ b/docs/en/guides/heatmaps.md @@ -1,14 +1,14 @@ --- comments: true -description: Transform complex data into insightful heatmaps using Ultralytics YOLOv8. Discover patterns, trends, and anomalies with vibrant visualizations. -keywords: Ultralytics, YOLOv8, heatmaps, data visualization, data analysis, complex data, patterns, trends, anomalies +description: Transform complex data into insightful heatmaps using Ultralytics YOLO11. Discover patterns, trends, and anomalies with vibrant visualizations. +keywords: Ultralytics, YOLO11, heatmaps, data visualization, data analysis, complex data, patterns, trends, anomalies --- -# Advanced [Data Visualization](https://www.ultralytics.com/glossary/data-visualization): Heatmaps using Ultralytics YOLOv8 🚀 +# Advanced [Data Visualization](https://www.ultralytics.com/glossary/data-visualization): Heatmaps using Ultralytics YOLO11 🚀 ## Introduction to Heatmaps -A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) transforms complex data into a vibrant, color-coded matrix. This visual tool employs a spectrum of colors to represent varying data values, where warmer hues indicate higher intensities and cooler tones signify lower values. Heatmaps excel in visualizing intricate data patterns, correlations, and anomalies, offering an accessible and engaging approach to data interpretation across diverse domains. +A heatmap generated with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) transforms complex data into a vibrant, color-coded matrix. This visual tool employs a spectrum of colors to represent varying data values, where warmer hues indicate higher intensities and cooler tones signify lower values. Heatmaps excel in visualizing intricate data patterns, correlations, and anomalies, offering an accessible and engaging approach to data interpretation across diverse domains.


@@ -18,7 +18,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult allowfullscreen>
- Watch: Heatmaps using Ultralytics YOLOv8 + Watch: Heatmaps using Ultralytics YOLO11

## Why Choose Heatmaps for Data Analysis? @@ -31,15 +31,15 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult | Transportation | Retail | | :--------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------: | -| ![Ultralytics YOLOv8 Transportation Heatmap](https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-transportation-heatmap.avif) | ![Ultralytics YOLOv8 Retail Heatmap](https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-retail-heatmap.avif) | -| Ultralytics YOLOv8 Transportation Heatmap | Ultralytics YOLOv8 Retail Heatmap | +| ![Ultralytics YOLO11 Transportation Heatmap](https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-transportation-heatmap.avif) | ![Ultralytics YOLO11 Retail Heatmap](https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-retail-heatmap.avif) | +| Ultralytics YOLO11 Transportation Heatmap | Ultralytics YOLO11 Retail Heatmap | !!! tip "Heatmap Configuration" - `heatmap_alpha`: Ensure this value is within the range (0.0 - 1.0). - `decay_factor`: Used for removing heatmap after an object is no longer in the frame, its value should also be in the range (0.0 - 1.0). -!!! example "Heatmaps using Ultralytics YOLOv8 Example" +!!! example "Heatmaps using Ultralytics YOLO11 Example" === "Heatmap" @@ -48,7 +48,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -86,7 +86,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -127,7 +127,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -169,7 +169,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -211,7 +211,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult from ultralytics import YOLO, solutions - model = YOLO("yolov8s.pt") # YOLOv8 custom/pretrained model + model = YOLO("yolo11n.pt") # YOLO11 custom/pretrained model im0 = cv2.imread("path/to/image.png") # path to image file h, w = im0.shape[:2] # image height and width @@ -236,7 +236,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -326,20 +326,20 @@ These colormaps are commonly used for visualizing data with different color repr ## FAQ -### How does Ultralytics YOLOv8 generate heatmaps and what are their benefits? +### How does Ultralytics YOLO11 generate heatmaps and what are their benefits? -Ultralytics YOLOv8 generates heatmaps by transforming complex data into a color-coded matrix where different hues represent data intensities. Heatmaps make it easier to visualize patterns, correlations, and anomalies in the data. Warmer hues indicate higher values, while cooler tones represent lower values. The primary benefits include intuitive visualization of data distribution, efficient pattern detection, and enhanced spatial analysis for decision-making. For more details and configuration options, refer to the [Heatmap Configuration](#arguments-heatmap) section. +Ultralytics YOLO11 generates heatmaps by transforming complex data into a color-coded matrix where different hues represent data intensities. Heatmaps make it easier to visualize patterns, correlations, and anomalies in the data. Warmer hues indicate higher values, while cooler tones represent lower values. The primary benefits include intuitive visualization of data distribution, efficient pattern detection, and enhanced spatial analysis for decision-making. For more details and configuration options, refer to the [Heatmap Configuration](#arguments-heatmap) section. -### Can I use Ultralytics YOLOv8 to perform object tracking and generate a heatmap simultaneously? +### Can I use Ultralytics YOLO11 to perform object tracking and generate a heatmap simultaneously? -Yes, Ultralytics YOLOv8 supports object tracking and heatmap generation concurrently. This can be achieved through its `Heatmap` solution integrated with object tracking models. To do so, you need to initialize the heatmap object and use YOLOv8's tracking capabilities. Here's a simple example: +Yes, Ultralytics YOLO11 supports object tracking and heatmap generation concurrently. This can be achieved through its `Heatmap` solution integrated with object tracking models. To do so, you need to initialize the heatmap object and use YOLO11's tracking capabilities. Here's a simple example: ```python import cv2 from ultralytics import YOLO, solutions -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") heatmap_obj = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA, view_img=True, shape="circle", names=model.names) @@ -359,11 +359,11 @@ cv2.destroyAllWindows() For further guidance, check the [Tracking Mode](../modes/track.md) page. -### What makes Ultralytics YOLOv8 heatmaps different from other data visualization tools like those from [OpenCV](https://www.ultralytics.com/glossary/opencv) or Matplotlib? +### What makes Ultralytics YOLO11 heatmaps different from other data visualization tools like those from [OpenCV](https://www.ultralytics.com/glossary/opencv) or Matplotlib? -Ultralytics YOLOv8 heatmaps are specifically designed for integration with its [object detection](https://www.ultralytics.com/glossary/object-detection) and tracking models, providing an end-to-end solution for real-time data analysis. Unlike generic visualization tools like OpenCV or Matplotlib, YOLOv8 heatmaps are optimized for performance and automated processing, supporting features like persistent tracking, decay factor adjustment, and real-time video overlay. For more information on YOLOv8's unique features, visit the [Ultralytics YOLOv8 Introduction](https://www.ultralytics.com/blog/introducing-ultralytics-yolov8). +Ultralytics YOLO11 heatmaps are specifically designed for integration with its [object detection](https://www.ultralytics.com/glossary/object-detection) and tracking models, providing an end-to-end solution for real-time data analysis. Unlike generic visualization tools like OpenCV or Matplotlib, YOLO11 heatmaps are optimized for performance and automated processing, supporting features like persistent tracking, decay factor adjustment, and real-time video overlay. For more information on YOLO11's unique features, visit the [Ultralytics YOLO11 Introduction](https://www.ultralytics.com/blog/introducing-ultralytics-yolov8). -### How can I visualize only specific object classes in heatmaps using Ultralytics YOLOv8? +### How can I visualize only specific object classes in heatmaps using Ultralytics YOLO11? You can visualize specific object classes by specifying the desired classes in the `track()` method of the YOLO model. For instance, if you only want to visualize cars and persons (assuming their class indices are 0 and 2), you can set the `classes` parameter accordingly. @@ -372,7 +372,7 @@ import cv2 from ultralytics import YOLO, solutions -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") heatmap_obj = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA, view_img=True, shape="circle", names=model.names) @@ -391,6 +391,6 @@ cap.release() cv2.destroyAllWindows() ``` -### Why should businesses choose Ultralytics YOLOv8 for heatmap generation in data analysis? +### Why should businesses choose Ultralytics YOLO11 for heatmap generation in data analysis? -Ultralytics YOLOv8 offers seamless integration of advanced object detection and real-time heatmap generation, making it an ideal choice for businesses looking to visualize data more effectively. The key advantages include intuitive data distribution visualization, efficient pattern detection, and enhanced spatial analysis for better decision-making. Additionally, YOLOv8's cutting-edge features such as persistent tracking, customizable colormaps, and support for various export formats make it superior to other tools like [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) and OpenCV for comprehensive data analysis. Learn more about business applications at [Ultralytics Plans](https://www.ultralytics.com/plans). +Ultralytics YOLO11 offers seamless integration of advanced object detection and real-time heatmap generation, making it an ideal choice for businesses looking to visualize data more effectively. The key advantages include intuitive data distribution visualization, efficient pattern detection, and enhanced spatial analysis for better decision-making. Additionally, YOLO11's cutting-edge features such as persistent tracking, customizable colormaps, and support for various export formats make it superior to other tools like [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) and OpenCV for comprehensive data analysis. Learn more about business applications at [Ultralytics Plans](https://www.ultralytics.com/plans). diff --git a/docs/en/guides/hyperparameter-tuning.md b/docs/en/guides/hyperparameter-tuning.md index d715820f24..d20b610e99 100644 --- a/docs/en/guides/hyperparameter-tuning.md +++ b/docs/en/guides/hyperparameter-tuning.md @@ -23,7 +23,7 @@ Hyperparameters are high-level, structural settings for the algorithm. They are Hyperparameter Tuning Visual

-For a full list of augmentation hyperparameters used in YOLOv8 please refer to the [configurations page](../usage/cfg.md#augmentation-settings). +For a full list of augmentation hyperparameters used in YOLO11 please refer to the [configurations page](../usage/cfg.md#augmentation-settings). ### Genetic Evolution and Mutation @@ -67,7 +67,7 @@ The process is repeated until either the set number of iterations is reached or ## Usage Example -Here's how to use the `model.tune()` method to utilize the `Tuner` class for hyperparameter tuning of YOLOv8n on COCO8 for 30 epochs with an AdamW optimizer and skipping plotting, checkpointing and validation other than on final epoch for faster Tuning. +Here's how to use the `model.tune()` method to utilize the `Tuner` class for hyperparameter tuning of YOLO11n on COCO8 for 30 epochs with an AdamW optimizer and skipping plotting, checkpointing and validation other than on final epoch for faster Tuning. !!! example @@ -77,7 +77,7 @@ Here's how to use the `model.tune()` method to utilize the `Tuner` class for hyp from ultralytics import YOLO # Initialize the YOLO model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Tune hyperparameters on COCO8 for 30 epochs model.tune(data="coco8.yaml", epochs=30, iterations=300, optimizer="AdamW", plots=False, save=False, val=False) @@ -202,7 +202,7 @@ The hyperparameter tuning process in Ultralytics YOLO is simplified yet powerful 1. [Hyperparameter Optimization in Wikipedia](https://en.wikipedia.org/wiki/Hyperparameter_optimization) 2. [YOLOv5 Hyperparameter Evolution Guide](../yolov5/tutorials/hyperparameter_evolution.md) -3. [Efficient Hyperparameter Tuning with Ray Tune and YOLOv8](../integrations/ray-tune.md) +3. [Efficient Hyperparameter Tuning with Ray Tune and YOLO11](../integrations/ray-tune.md) For deeper insights, you can explore the `Tuner` class source code and accompanying documentation. Should you have any questions, feature requests, or need further assistance, feel free to reach out to us on [GitHub](https://github.com/ultralytics/ultralytics/issues/new/choose) or [Discord](https://discord.com/invite/ultralytics). @@ -220,7 +220,7 @@ To optimize the learning rate for Ultralytics YOLO, start by setting an initial from ultralytics import YOLO # Initialize the YOLO model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Tune hyperparameters on COCO8 for 30 epochs model.tune(data="coco8.yaml", epochs=30, iterations=300, optimizer="AdamW", plots=False, save=False, val=False) @@ -228,9 +228,9 @@ To optimize the learning rate for Ultralytics YOLO, start by setting an initial For more details, check the [Ultralytics YOLO configuration page](../usage/cfg.md#augmentation-settings). -### What are the benefits of using genetic algorithms for hyperparameter tuning in YOLOv8? +### What are the benefits of using genetic algorithms for hyperparameter tuning in YOLO11? -Genetic algorithms in Ultralytics YOLOv8 provide a robust method for exploring the hyperparameter space, leading to highly optimized model performance. Key benefits include: +Genetic algorithms in Ultralytics YOLO11 provide a robust method for exploring the hyperparameter space, leading to highly optimized model performance. Key benefits include: - **Efficient Search**: Genetic algorithms like mutation can quickly explore a large set of hyperparameters. - **Avoiding Local Minima**: By introducing randomness, they help in avoiding local minima, ensuring better global optimization. @@ -240,7 +240,7 @@ To see how genetic algorithms can optimize hyperparameters, check out the [hyper ### How long does the hyperparameter tuning process take for Ultralytics YOLO? -The time required for hyperparameter tuning with Ultralytics YOLO largely depends on several factors such as the size of the dataset, the complexity of the model architecture, the number of iterations, and the computational resources available. For instance, tuning YOLOv8n on a dataset like COCO8 for 30 epochs might take several hours to days, depending on the hardware. +The time required for hyperparameter tuning with Ultralytics YOLO largely depends on several factors such as the size of the dataset, the complexity of the model architecture, the number of iterations, and the computational resources available. For instance, tuning YOLO11n on a dataset like COCO8 for 30 epochs might take several hours to days, depending on the hardware. To effectively manage tuning time, define a clear tuning budget beforehand ([internal section link](#preparing-for-hyperparameter-tuning)). This helps in balancing resource allocation and optimization goals. diff --git a/docs/en/guides/index.md b/docs/en/guides/index.md index 1ad70434ab..4255d346b3 100644 --- a/docs/en/guides/index.md +++ b/docs/en/guides/index.md @@ -18,7 +18,7 @@ Whether you're a beginner or an expert in [deep learning](https://www.ultralytic allowfullscreen>
- Watch: Ultralytics YOLOv8 Guides Overview + Watch: Ultralytics YOLO11 Guides Overview

## Guides @@ -30,14 +30,14 @@ Here's a compilation of in-depth guides to help you master different aspects of - [Model Deployment Options](model-deployment-options.md): Overview of YOLO [model deployment](https://www.ultralytics.com/glossary/model-deployment) formats like ONNX, OpenVINO, and TensorRT, with pros and cons for each to inform your deployment strategy. - [K-Fold Cross Validation](kfold-cross-validation.md) 🚀 NEW: Learn how to improve model generalization using K-Fold cross-validation technique. - [Hyperparameter Tuning](hyperparameter-tuning.md) 🚀 NEW: Discover how to optimize your YOLO models by fine-tuning hyperparameters using the Tuner class and genetic evolution algorithms. -- [SAHI Tiled Inference](sahi-tiled-inference.md) 🚀 NEW: Comprehensive guide on leveraging SAHI's sliced inference capabilities with YOLOv8 for object detection in high-resolution images. +- [SAHI Tiled Inference](sahi-tiled-inference.md) 🚀 NEW: Comprehensive guide on leveraging SAHI's sliced inference capabilities with YOLO11 for object detection in high-resolution images. - [AzureML Quickstart](azureml-quickstart.md) 🚀 NEW: Get up and running with Ultralytics YOLO models on Microsoft's Azure [Machine Learning](https://www.ultralytics.com/glossary/machine-learning-ml) platform. Learn how to train, deploy, and scale your object detection projects in the cloud. - [Conda Quickstart](conda-quickstart.md) 🚀 NEW: Step-by-step guide to setting up a [Conda](https://anaconda.org/conda-forge/ultralytics) environment for Ultralytics. Learn how to install and start using the Ultralytics package efficiently with Conda. - [Docker Quickstart](docker-quickstart.md) 🚀 NEW: Complete guide to setting up and using Ultralytics YOLO models with [Docker](https://hub.docker.com/r/ultralytics/ultralytics). Learn how to install Docker, manage GPU support, and run YOLO models in isolated containers for consistent development and deployment. - [Raspberry Pi](raspberry-pi.md) 🚀 NEW: Quickstart tutorial to run YOLO models to the latest Raspberry Pi hardware. - [NVIDIA Jetson](nvidia-jetson.md) 🚀 NEW: Quickstart guide for deploying YOLO models on NVIDIA Jetson devices. - [DeepStream on NVIDIA Jetson](deepstream-nvidia-jetson.md) 🚀 NEW: Quickstart guide for deploying YOLO models on NVIDIA Jetson devices using DeepStream and TensorRT. -- [Triton Inference Server Integration](triton-inference-server.md) 🚀 NEW: Dive into the integration of Ultralytics YOLOv8 with NVIDIA's Triton Inference Server for scalable and efficient deep learning inference deployments. +- [Triton Inference Server Integration](triton-inference-server.md) 🚀 NEW: Dive into the integration of Ultralytics YOLO11 with NVIDIA's Triton Inference Server for scalable and efficient deep learning inference deployments. - [YOLO Thread-Safe Inference](yolo-thread-safe-inference.md) 🚀 NEW: Guidelines for performing inference with YOLO models in a thread-safe manner. Learn the importance of thread safety and best practices to prevent race conditions and ensure consistent predictions. - [Isolating Segmentation Objects](isolating-segmentation-objects.md) 🚀 NEW: Step-by-step recipe and explanation on how to extract and/or isolate objects from images using Ultralytics Segmentation. - [Edge TPU on Raspberry Pi](coral-edge-tpu-on-raspberry-pi.md): [Google Edge TPU](https://coral.ai/products/accelerator) accelerates YOLO inference on [Raspberry Pi](https://www.raspberrypi.com/). @@ -46,7 +46,7 @@ Here's a compilation of in-depth guides to help you master different aspects of - [Steps of a Computer Vision Project ](steps-of-a-cv-project.md) 🚀 NEW: Learn about the key steps involved in a computer vision project, including defining goals, selecting models, preparing data, and evaluating results. - [Defining A Computer Vision Project's Goals](defining-project-goals.md) 🚀 NEW: Walk through how to effectively define clear and measurable goals for your computer vision project. Learn the importance of a well-defined problem statement and how it creates a roadmap for your project. - [Data Collection and Annotation](data-collection-and-annotation.md) 🚀 NEW: Explore the tools, techniques, and best practices for collecting and annotating data to create high-quality inputs for your computer vision models. -- [Preprocessing Annotated Data](preprocessing_annotated_data.md) 🚀 NEW: Learn about preprocessing and augmenting image data in computer vision projects using YOLOv8, including normalization, dataset augmentation, splitting, and exploratory data analysis (EDA). +- [Preprocessing Annotated Data](preprocessing_annotated_data.md) 🚀 NEW: Learn about preprocessing and augmenting image data in computer vision projects using YOLO11, including normalization, dataset augmentation, splitting, and exploratory data analysis (EDA). - [Tips for Model Training](model-training-tips.md) 🚀 NEW: Explore tips on optimizing [batch sizes](https://www.ultralytics.com/glossary/batch-size), using [mixed precision](https://www.ultralytics.com/glossary/mixed-precision), applying pre-trained weights, and more to make training your computer vision model a breeze. - [Insights on Model Evaluation and Fine-Tuning](model-evaluation-insights.md) 🚀 NEW: Gain insights into the strategies and best practices for evaluating and fine-tuning your computer vision models. Learn about the iterative process of refining models to achieve optimal results. - [A Guide on Model Testing](model-testing.md) 🚀 NEW: A thorough guide on testing your computer vision models in realistic settings. Learn how to verify accuracy, reliability, and performance in line with project goals. @@ -75,14 +75,14 @@ Training a custom object detection model with Ultralytics YOLO is straightforwar ```python from ultralytics import YOLO - model = YOLO("yolov8s.pt") # Load a pre-trained YOLO model + model = YOLO("yolo11n.pt") # Load a pre-trained YOLO model model.train(data="path/to/dataset.yaml", epochs=50) # Train on custom dataset ``` === "CLI" ```bash - yolo task=detect mode=train model=yolov8s.pt data=path/to/dataset.yaml epochs=50 + yolo task=detect mode=train model=yolo11n.pt data=path/to/dataset.yaml epochs=50 ``` For detailed dataset formatting and additional options, refer to our [Tips for Model Training](model-training-tips.md) guide. diff --git a/docs/en/guides/instance-segmentation-and-tracking.md b/docs/en/guides/instance-segmentation-and-tracking.md index 95e91a8caf..a910a21d8f 100644 --- a/docs/en/guides/instance-segmentation-and-tracking.md +++ b/docs/en/guides/instance-segmentation-and-tracking.md @@ -1,14 +1,14 @@ --- comments: true -description: Master instance segmentation and tracking with Ultralytics YOLOv8. Learn techniques for precise object identification and tracking. -keywords: instance segmentation, tracking, YOLOv8, Ultralytics, object detection, machine learning, computer vision, python +description: Master instance segmentation and tracking with Ultralytics YOLO11. Learn techniques for precise object identification and tracking. +keywords: instance segmentation, tracking, YOLO11, Ultralytics, object detection, machine learning, computer vision, python --- -# Instance Segmentation and Tracking using Ultralytics YOLOv8 🚀 +# Instance Segmentation and Tracking using Ultralytics YOLO11 🚀 ## What is [Instance Segmentation](https://www.ultralytics.com/glossary/instance-segmentation)? -[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) instance segmentation involves identifying and outlining individual objects in an image, providing a detailed understanding of spatial distribution. Unlike [semantic segmentation](https://www.ultralytics.com/glossary/semantic-segmentation), it uniquely labels and precisely delineates each object, crucial for tasks like [object detection](https://www.ultralytics.com/glossary/object-detection) and medical imaging. +[Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) instance segmentation involves identifying and outlining individual objects in an image, providing a detailed understanding of spatial distribution. Unlike [semantic segmentation](https://www.ultralytics.com/glossary/semantic-segmentation), it uniquely labels and precisely delineates each object, crucial for tasks like [object detection](https://www.ultralytics.com/glossary/object-detection) and medical imaging. There are two types of instance segmentation tracking available in the Ultralytics package: @@ -24,7 +24,7 @@ There are two types of instance segmentation tracking available in the Ultralyti allowfullscreen>
- Watch: Instance Segmentation with Object Tracking using Ultralytics YOLOv8 + Watch: Instance Segmentation with Object Tracking using Ultralytics YOLO11

## Samples @@ -44,7 +44,7 @@ There are two types of instance segmentation tracking available in the Ultralyti from ultralytics import YOLO from ultralytics.utils.plotting import Annotator, colors - model = YOLO("yolov8n-seg.pt") # segmentation model + model = YOLO("yolo11n-seg.pt") # segmentation model names = model.model.names cap = cv2.VideoCapture("path/to/video/file.mp4") w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -91,7 +91,7 @@ There are two types of instance segmentation tracking available in the Ultralyti track_history = defaultdict(lambda: []) - model = YOLO("yolov8n-seg.pt") # segmentation model + model = YOLO("yolo11n-seg.pt") # segmentation model cap = cv2.VideoCapture("path/to/video/file.mp4") w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -142,9 +142,9 @@ For any inquiries, feel free to post your questions in the [Ultralytics Issue Se ## FAQ -### How do I perform instance segmentation using Ultralytics YOLOv8? +### How do I perform instance segmentation using Ultralytics YOLO11? -To perform instance segmentation using Ultralytics YOLOv8, initialize the YOLO model with a segmentation version of YOLOv8 and process video frames through it. Here's a simplified code example: +To perform instance segmentation using Ultralytics YOLO11, initialize the YOLO model with a segmentation version of YOLO11 and process video frames through it. Here's a simplified code example: !!! example @@ -156,7 +156,7 @@ To perform instance segmentation using Ultralytics YOLOv8, initialize the YOLO m from ultralytics import YOLO from ultralytics.utils.plotting import Annotator, colors - model = YOLO("yolov8n-seg.pt") # segmentation model + model = YOLO("yolo11n-seg.pt") # segmentation model cap = cv2.VideoCapture("path/to/video/file.mp4") w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -186,17 +186,17 @@ To perform instance segmentation using Ultralytics YOLOv8, initialize the YOLO m cv2.destroyAllWindows() ``` -Learn more about instance segmentation in the [Ultralytics YOLOv8 guide](#what-is-instance-segmentation). +Learn more about instance segmentation in the [Ultralytics YOLO11 guide](#what-is-instance-segmentation). -### What is the difference between instance segmentation and object tracking in Ultralytics YOLOv8? +### What is the difference between instance segmentation and object tracking in Ultralytics YOLO11? -Instance segmentation identifies and outlines individual objects within an image, giving each object a unique label and mask. Object tracking extends this by assigning consistent labels to objects across video frames, facilitating continuous tracking of the same objects over time. Learn more about the distinctions in the [Ultralytics YOLOv8 documentation](#samples). +Instance segmentation identifies and outlines individual objects within an image, giving each object a unique label and mask. Object tracking extends this by assigning consistent labels to objects across video frames, facilitating continuous tracking of the same objects over time. Learn more about the distinctions in the [Ultralytics YOLO11 documentation](#samples). -### Why should I use Ultralytics YOLOv8 for instance segmentation and tracking over other models like Mask R-CNN or Faster R-CNN? +### Why should I use Ultralytics YOLO11 for instance segmentation and tracking over other models like Mask R-CNN or Faster R-CNN? -Ultralytics YOLOv8 offers real-time performance, superior [accuracy](https://www.ultralytics.com/glossary/accuracy), and ease of use compared to other models like Mask R-CNN or Faster R-CNN. YOLOv8 provides a seamless integration with Ultralytics HUB, allowing users to manage models, datasets, and training pipelines efficiently. Discover more about the benefits of YOLOv8 in the [Ultralytics blog](https://www.ultralytics.com/blog/introducing-ultralytics-yolov8). +Ultralytics YOLO11 offers real-time performance, superior [accuracy](https://www.ultralytics.com/glossary/accuracy), and ease of use compared to other models like Mask R-CNN or Faster R-CNN. YOLO11 provides a seamless integration with Ultralytics HUB, allowing users to manage models, datasets, and training pipelines efficiently. Discover more about the benefits of YOLO11 in the [Ultralytics blog](https://www.ultralytics.com/blog/introducing-ultralytics-yolov8). -### How can I implement object tracking using Ultralytics YOLOv8? +### How can I implement object tracking using Ultralytics YOLO11? To implement object tracking, use the `model.track` method and ensure that each object's ID is consistently assigned across frames. Below is a simple example: @@ -214,7 +214,7 @@ To implement object tracking, use the `model.track` method and ensure that each track_history = defaultdict(lambda: []) - model = YOLO("yolov8n-seg.pt") # segmentation model + model = YOLO("yolo11n-seg.pt") # segmentation model cap = cv2.VideoCapture("path/to/video/file.mp4") w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -247,6 +247,6 @@ To implement object tracking, use the `model.track` method and ensure that each Find more in the [Instance Segmentation and Tracking section](#samples). -### Are there any datasets provided by Ultralytics suitable for training YOLOv8 models for instance segmentation and tracking? +### Are there any datasets provided by Ultralytics suitable for training YOLO11 models for instance segmentation and tracking? -Yes, Ultralytics offers several datasets suitable for training YOLOv8 models, including segmentation and tracking datasets. Dataset examples, structures, and instructions for use can be found in the [Ultralytics Datasets documentation](https://docs.ultralytics.com/datasets/). +Yes, Ultralytics offers several datasets suitable for training YOLO11 models, including segmentation and tracking datasets. Dataset examples, structures, and instructions for use can be found in the [Ultralytics Datasets documentation](https://docs.ultralytics.com/datasets/). diff --git a/docs/en/guides/isolating-segmentation-objects.md b/docs/en/guides/isolating-segmentation-objects.md index 737510e984..3caad3d736 100644 --- a/docs/en/guides/isolating-segmentation-objects.md +++ b/docs/en/guides/isolating-segmentation-objects.md @@ -1,7 +1,7 @@ --- comments: true description: Learn to extract isolated objects from inference results using Ultralytics Predict Mode. Step-by-step guide for segmentation object isolation. -keywords: Ultralytics, segmentation, object isolation, Predict Mode, YOLOv8, machine learning, object detection, binary mask, image processing +keywords: Ultralytics, segmentation, object isolation, Predict Mode, YOLO11, machine learning, object detection, binary mask, image processing --- # Isolating Segmentation Objects @@ -24,7 +24,7 @@ After performing the [Segment Task](../tasks/segment.md), it's sometimes desirab from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") + model = YOLO("yolo11n-seg.pt") # Run inference results = model.predict() @@ -141,7 +141,7 @@ After performing the [Segment Task](../tasks/segment.md), it's sometimes desirab === "Black Background Pixels" - ```py + ```python # Create 3-channel mask mask3ch = cv2.cvtColor(b_mask, cv2.COLOR_GRAY2BGR) @@ -192,7 +192,7 @@ After performing the [Segment Task](../tasks/segment.md), it's sometimes desirab === "Transparent Background Pixels" - ```py + ```python # Isolate object with transparent background (when saved as PNG) isolated = np.dstack([img, b_mask]) ``` @@ -248,7 +248,7 @@ After performing the [Segment Task](../tasks/segment.md), it's sometimes desirab ??? example "Example Final Step" - ```py + ```python # Save isolated object to file _ = cv2.imwrite(f"{img_name}_{label}-{ci}.png", iso_crop) ``` @@ -267,7 +267,7 @@ import numpy as np from ultralytics import YOLO -m = YOLO("yolov8n-seg.pt") # (4)! +m = YOLO("yolo11n-seg.pt") # (4)! res = m.predict() # (3)! # Iterate detection results (5) @@ -310,16 +310,16 @@ for r in res: ## FAQ -### How do I isolate objects using Ultralytics YOLOv8 for segmentation tasks? +### How do I isolate objects using Ultralytics YOLO11 for segmentation tasks? -To isolate objects using Ultralytics YOLOv8, follow these steps: +To isolate objects using Ultralytics YOLO11, follow these steps: 1. **Load the model and run inference:** ```python from ultralytics import YOLO - model = YOLO("yolov8n-seg.pt") + model = YOLO("yolo11n-seg.pt") results = model.predict(source="path/to/your/image.jpg") ``` @@ -345,7 +345,7 @@ Refer to the guide on [Predict Mode](../modes/predict.md) and the [Segment Task] ### What options are available for saving the isolated objects after segmentation? -Ultralytics YOLOv8 offers two main options for saving isolated objects: +Ultralytics YOLO11 offers two main options for saving isolated objects: 1. **With a Black Background:** @@ -361,7 +361,7 @@ Ultralytics YOLOv8 offers two main options for saving isolated objects: For further details, visit the [Predict Mode](../modes/predict.md) section. -### How can I crop isolated objects to their bounding boxes using Ultralytics YOLOv8? +### How can I crop isolated objects to their bounding boxes using Ultralytics YOLO11? To crop isolated objects to their bounding boxes: @@ -378,9 +378,9 @@ To crop isolated objects to their bounding boxes: Learn more about bounding box results in the [Predict Mode](../modes/predict.md#boxes) documentation. -### Why should I use Ultralytics YOLOv8 for object isolation in segmentation tasks? +### Why should I use Ultralytics YOLO11 for object isolation in segmentation tasks? -Ultralytics YOLOv8 provides: +Ultralytics YOLO11 provides: - **High-speed** real-time object detection and segmentation. - **Accurate bounding box and mask generation** for precise object isolation. @@ -388,9 +388,9 @@ Ultralytics YOLOv8 provides: Explore the benefits of using YOLO in the [Segment Task documentation](../tasks/segment.md). -### Can I save isolated objects including the background using Ultralytics YOLOv8? +### Can I save isolated objects including the background using Ultralytics YOLO11? -Yes, this is a built-in feature in Ultralytics YOLOv8. Use the `save_crop` argument in the `predict()` method. For example: +Yes, this is a built-in feature in Ultralytics YOLO11. Use the `save_crop` argument in the `predict()` method. For example: ```python results = model.predict(source="path/to/your/image.jpg", save_crop=True) diff --git a/docs/en/guides/model-deployment-options.md b/docs/en/guides/model-deployment-options.md index c2ecf8b649..a9efee17c9 100644 --- a/docs/en/guides/model-deployment-options.md +++ b/docs/en/guides/model-deployment-options.md @@ -1,26 +1,26 @@ --- comments: true -description: Learn about YOLOv8's diverse deployment options to maximize your model's performance. Explore PyTorch, TensorRT, OpenVINO, TF Lite, and more!. -keywords: YOLOv8, deployment options, export formats, PyTorch, TensorRT, OpenVINO, TF Lite, machine learning, model deployment +description: Learn about YOLO11's diverse deployment options to maximize your model's performance. Explore PyTorch, TensorRT, OpenVINO, TF Lite, and more!. +keywords: YOLO11, deployment options, export formats, PyTorch, TensorRT, OpenVINO, TF Lite, machine learning, model deployment --- -# Understanding YOLOv8's Deployment Options +# Understanding YOLO11's Deployment Options ## Introduction -You've come a long way on your journey with YOLOv8. You've diligently collected data, meticulously annotated it, and put in the hours to train and rigorously evaluate your custom YOLOv8 model. Now, it's time to put your model to work for your specific application, use case, or project. But there's a critical decision that stands before you: how to export and deploy your model effectively. +You've come a long way on your journey with YOLO11. You've diligently collected data, meticulously annotated it, and put in the hours to train and rigorously evaluate your custom YOLO11 model. Now, it's time to put your model to work for your specific application, use case, or project. But there's a critical decision that stands before you: how to export and deploy your model effectively. -This guide walks you through YOLOv8's deployment options and the essential factors to consider to choose the right option for your project. +This guide walks you through YOLO11's deployment options and the essential factors to consider to choose the right option for your project. -## How to Select the Right Deployment Option for Your YOLOv8 Model +## How to Select the Right Deployment Option for Your YOLO11 Model -When it's time to deploy your YOLOv8 model, selecting a suitable export format is very important. As outlined in the [Ultralytics YOLOv8 Modes documentation](../modes/export.md#usage-examples), the model.export() function allows for converting your trained model into a variety of formats tailored to diverse environments and performance requirements. +When it's time to deploy your YOLO11 model, selecting a suitable export format is very important. As outlined in the [Ultralytics YOLO11 Modes documentation](../modes/export.md#usage-examples), the model.export() function allows for converting your trained model into a variety of formats tailored to diverse environments and performance requirements. The ideal format depends on your model's intended operational context, balancing speed, hardware constraints, and ease of integration. In the following section, we'll take a closer look at each export option, understanding when to choose each one. -### YOLOv8's Deployment Options +### YOLO11's Deployment Options -Let's walk through the different YOLOv8 deployment options. For a detailed walkthrough of the export process, visit the [Ultralytics documentation page on exporting](../modes/export.md). +Let's walk through the different YOLO11 deployment options. For a detailed walkthrough of the export process, visit the [Ultralytics documentation page on exporting](../modes/export.md). #### PyTorch @@ -258,9 +258,9 @@ NCNN is a high-performance neural network inference framework optimized for the - **Hardware Acceleration**: Tailored for ARM CPUs and GPUs, with specific optimizations for these architectures. -## Comparative Analysis of YOLOv8 Deployment Options +## Comparative Analysis of YOLO11 Deployment Options -The following table provides a snapshot of the various deployment options available for YOLOv8 models, helping you to assess which may best fit your project needs based on several critical criteria. For an in-depth look at each deployment option's format, please see the [Ultralytics documentation page on export formats](../modes/export.md#export-formats). +The following table provides a snapshot of the various deployment options available for YOLO11 models, helping you to assess which may best fit your project needs based on several critical criteria. For an in-depth look at each deployment option's format, please see the [Ultralytics documentation page on export formats](../modes/export.md#export-formats). | Deployment Option | Performance Benchmarks | Compatibility and Integration | Community Support and Ecosystem | Case Studies | Maintenance and Updates | Security Considerations | Hardware Acceleration | | ----------------- | ----------------------------------------------- | ---------------------------------------------- | --------------------------------------------- | ------------------------------------------ | ------------------------------------------- | ------------------------------------------------- | ---------------------------------- | @@ -282,33 +282,33 @@ This comparative analysis gives you a high-level overview. For deployment, it's ## Community and Support -When you're getting started with YOLOv8, having a helpful community and support can make a significant impact. Here's how to connect with others who share your interests and get the assistance you need. +When you're getting started with YOLO11, having a helpful community and support can make a significant impact. Here's how to connect with others who share your interests and get the assistance you need. ### Engage with the Broader Community -- **GitHub Discussions:** The YOLOv8 repository on GitHub has a "Discussions" section where you can ask questions, report issues, and suggest improvements. +- **GitHub Discussions:** The YOLO11 repository on GitHub has a "Discussions" section where you can ask questions, report issues, and suggest improvements. - **Ultralytics Discord Server:** Ultralytics has a [Discord server](https://discord.com/invite/ultralytics) where you can interact with other users and developers. ### Official Documentation and Resources -- **Ultralytics YOLOv8 Docs:** The [official documentation](../index.md) provides a comprehensive overview of YOLOv8, along with guides on installation, usage, and troubleshooting. +- **Ultralytics YOLO11 Docs:** The [official documentation](../index.md) provides a comprehensive overview of YOLO11, along with guides on installation, usage, and troubleshooting. -These resources will help you tackle challenges and stay updated on the latest trends and best practices in the YOLOv8 community. +These resources will help you tackle challenges and stay updated on the latest trends and best practices in the YOLO11 community. ## Conclusion -In this guide, we've explored the different deployment options for YOLOv8. We've also discussed the important factors to consider when making your choice. These options allow you to customize your model for various environments and performance requirements, making it suitable for real-world applications. +In this guide, we've explored the different deployment options for YOLO11. We've also discussed the important factors to consider when making your choice. These options allow you to customize your model for various environments and performance requirements, making it suitable for real-world applications. -Don't forget that the YOLOv8 and Ultralytics community is a valuable source of help. Connect with other developers and experts to learn unique tips and solutions you might not find in regular documentation. Keep seeking knowledge, exploring new ideas, and sharing your experiences. +Don't forget that the YOLO11 and Ultralytics community is a valuable source of help. Connect with other developers and experts to learn unique tips and solutions you might not find in regular documentation. Keep seeking knowledge, exploring new ideas, and sharing your experiences. Happy deploying! ## FAQ -### What are the deployment options available for YOLOv8 on different hardware platforms? +### What are the deployment options available for YOLO11 on different hardware platforms? -Ultralytics YOLOv8 supports various deployment formats, each designed for specific environments and hardware platforms. Key formats include: +Ultralytics YOLO11 supports various deployment formats, each designed for specific environments and hardware platforms. Key formats include: - **PyTorch** for research and prototyping, with excellent Python integration. - **TorchScript** for production environments where Python is unavailable. @@ -318,18 +318,18 @@ Ultralytics YOLOv8 supports various deployment formats, each designed for specif Each format has unique advantages. For a detailed walkthrough, see our [export process documentation](../modes/export.md#usage-examples). -### How do I improve the inference speed of my YOLOv8 model on an Intel CPU? +### How do I improve the inference speed of my YOLO11 model on an Intel CPU? -To enhance inference speed on Intel CPUs, you can deploy your YOLOv8 model using Intel's OpenVINO toolkit. OpenVINO offers significant performance boosts by optimizing models to leverage Intel hardware efficiently. +To enhance inference speed on Intel CPUs, you can deploy your YOLO11 model using Intel's OpenVINO toolkit. OpenVINO offers significant performance boosts by optimizing models to leverage Intel hardware efficiently. -1. Convert your YOLOv8 model to the OpenVINO format using the `model.export()` function. +1. Convert your YOLO11 model to the OpenVINO format using the `model.export()` function. 2. Follow the detailed setup guide in the [Intel OpenVINO Export documentation](../integrations/openvino.md). For more insights, check out our [blog post](https://www.ultralytics.com/blog/achieve-faster-inference-speeds-ultralytics-yolov8-openvino). -### Can I deploy YOLOv8 models on mobile devices? +### Can I deploy YOLO11 models on mobile devices? -Yes, YOLOv8 models can be deployed on mobile devices using [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) Lite (TF Lite) for both Android and iOS platforms. TF Lite is designed for mobile and embedded devices, providing efficient on-device inference. +Yes, YOLO11 models can be deployed on mobile devices using [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) Lite (TF Lite) for both Android and iOS platforms. TF Lite is designed for mobile and embedded devices, providing efficient on-device inference. !!! example @@ -349,9 +349,9 @@ Yes, YOLOv8 models can be deployed on mobile devices using [TensorFlow](https:// For more details on deploying models to mobile, refer to our [TF Lite integration guide](../integrations/tflite.md). -### What factors should I consider when choosing a deployment format for my YOLOv8 model? +### What factors should I consider when choosing a deployment format for my YOLO11 model? -When choosing a deployment format for YOLOv8, consider the following factors: +When choosing a deployment format for YOLO11, consider the following factors: - **Performance**: Some formats like TensorRT provide exceptional speeds on NVIDIA GPUs, while OpenVINO is optimized for Intel hardware. - **Compatibility**: ONNX offers broad compatibility across different platforms. @@ -360,11 +360,11 @@ When choosing a deployment format for YOLOv8, consider the following factors: For a comparative analysis, refer to our [export formats documentation](../modes/export.md#export-formats). -### How can I deploy YOLOv8 models in a web application? +### How can I deploy YOLO11 models in a web application? -To deploy YOLOv8 models in a web application, you can use TensorFlow.js (TF.js), which allows for running [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models directly in the browser. This approach eliminates the need for backend infrastructure and provides real-time performance. +To deploy YOLO11 models in a web application, you can use TensorFlow.js (TF.js), which allows for running [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models directly in the browser. This approach eliminates the need for backend infrastructure and provides real-time performance. -1. Export the YOLOv8 model to the TF.js format. +1. Export the YOLO11 model to the TF.js format. 2. Integrate the exported model into your web application. For step-by-step instructions, refer to our guide on [TensorFlow.js integration](../integrations/tfjs.md). diff --git a/docs/en/guides/model-deployment-practices.md b/docs/en/guides/model-deployment-practices.md index f259779ceb..603371c1fb 100644 --- a/docs/en/guides/model-deployment-practices.md +++ b/docs/en/guides/model-deployment-practices.md @@ -27,7 +27,7 @@ It's also important to follow best practices when deploying a model because depl Often times, once a model is [trained](./model-training-tips.md), [evaluated](./model-evaluation-insights.md), and [tested](./model-testing.md), it needs to be converted into specific formats to be deployed effectively in various environments, such as cloud, edge, or local devices. -With respect to YOLOv8, you can [export your model](../modes/export.md) to different formats. For example, when you need to transfer your model between different frameworks, ONNX is an excellent tool and [exporting to YOLOv8 to ONNX](../integrations/onnx.md) is easy. You can check out more options about integrating your model into different environments smoothly and effectively [here](../integrations/index.md). +With respect to YOLO11, you can [export your model](../modes/export.md) to different formats. For example, when you need to transfer your model between different frameworks, ONNX is an excellent tool and [exporting to YOLO11 to ONNX](../integrations/onnx.md) is easy. You can check out more options about integrating your model into different environments smoothly and effectively [here](../integrations/index.md). ### Choosing a Deployment Environment @@ -94,7 +94,7 @@ Experiencing a drop in your model's accuracy after deployment can be frustrating - **Review Model Export and Conversion:** Re-export the model and make sure that the conversion process maintains the integrity of the model weights and architecture. - **Test with a Controlled Dataset:** Deploy the model in a test environment with a dataset you control and compare the results with the training phase. You can identify if the issue is with the deployment environment or the data. -When deploying YOLOv8, several factors can affect model accuracy. Converting models to formats like [TensorRT](../integrations/tensorrt.md) involves optimizations such as weight quantization and layer fusion, which can cause minor precision losses. Using FP16 (half-precision) instead of FP32 (full-precision) can speed up inference but may introduce numerical precision errors. Also, hardware constraints, like those on the [Jetson Nano](./nvidia-jetson.md), with lower CUDA core counts and reduced memory bandwidth, can impact performance. +When deploying YOLO11, several factors can affect model accuracy. Converting models to formats like [TensorRT](../integrations/tensorrt.md) involves optimizations such as weight quantization and layer fusion, which can cause minor precision losses. Using FP16 (half-precision) instead of FP32 (full-precision) can speed up inference but may introduce numerical precision errors. Also, hardware constraints, like those on the [Jetson Nano](./nvidia-jetson.md), with lower CUDA core counts and reduced memory bandwidth, can impact performance. ### Inferences Are Taking Longer Than You Expected @@ -106,7 +106,7 @@ When deploying [machine learning](https://www.ultralytics.com/glossary/machine-l - **Profile the Inference Pipeline:** Identifying bottlenecks in the inference pipeline can help pinpoint the source of delays. Use profiling tools to analyze each step of the inference process, identifying and addressing any stages that cause significant delays, such as inefficient layers or data transfer issues. - **Use Appropriate Precision:** Using higher precision than necessary can slow down inference times. Experiment with using lower precision, such as FP16 (half-precision), instead of FP32 (full-precision). While FP16 can reduce inference time, also keep in mind that it can impact model accuracy. -If you are facing this issue while deploying YOLOv8, consider that YOLOv8 offers [various model sizes](../models/yolov8.md), such as YOLOv8n (nano) for devices with lower memory capacity and YOLOv8x (extra-large) for more powerful GPUs. Choosing the right model variant for your hardware can help balance memory usage and processing time. +If you are facing this issue while deploying YOLO11, consider that YOLO11 offers [various model sizes](../models/yolov8.md), such as YOLO11n (nano) for devices with lower memory capacity and YOLOv8x (extra-large) for more powerful GPUs. Choosing the right model variant for your hardware can help balance memory usage and processing time. Also keep in mind that the size of the input images directly impacts memory usage and processing time. Lower resolutions reduce memory usage and speed up inference, while higher resolutions improve accuracy but require more memory and processing power. @@ -132,12 +132,12 @@ Being part of a community of computer vision enthusiasts can help you solve prob ### Community Resources -- **GitHub Issues:** Explore the [YOLOv8 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are very active and ready to help. +- **GitHub Issues:** Explore the [YOLO11 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are very active and ready to help. - **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to chat with other users and developers, get support, and share your experiences. ### Official Documentation -- **Ultralytics YOLOv8 Documentation:** Visit the [official YOLOv8 documentation](./index.md) for detailed guides and helpful tips on various computer vision projects. +- **Ultralytics YOLO11 Documentation:** Visit the [official YOLO11 documentation](./index.md) for detailed guides and helpful tips on various computer vision projects. Using these resources will help you solve challenges and stay up-to-date with the latest trends and practices in the computer vision community. @@ -149,22 +149,22 @@ After deploying your model, the next step would be monitoring, maintaining, and ## FAQ -### What are the best practices for deploying a machine learning model using Ultralytics YOLOv8? +### What are the best practices for deploying a machine learning model using Ultralytics YOLO11? -Deploying a machine learning model, particularly with Ultralytics YOLOv8, involves several best practices to ensure efficiency and reliability. First, choose the deployment environment that suits your needs—cloud, edge, or local. Optimize your model through techniques like [pruning, quantization, and knowledge distillation](#model-optimization-techniques) for efficient deployment in resource-constrained environments. Lastly, ensure data consistency and preprocessing steps align with the training phase to maintain performance. You can also refer to [model deployment options](./model-deployment-options.md) for more detailed guidelines. +Deploying a machine learning model, particularly with Ultralytics YOLO11, involves several best practices to ensure efficiency and reliability. First, choose the deployment environment that suits your needs—cloud, edge, or local. Optimize your model through techniques like [pruning, quantization, and knowledge distillation](#model-optimization-techniques) for efficient deployment in resource-constrained environments. Lastly, ensure data consistency and preprocessing steps align with the training phase to maintain performance. You can also refer to [model deployment options](./model-deployment-options.md) for more detailed guidelines. -### How can I troubleshoot common deployment issues with Ultralytics YOLOv8 models? +### How can I troubleshoot common deployment issues with Ultralytics YOLO11 models? Troubleshooting deployment issues can be broken down into a few key steps. If your model's accuracy drops after deployment, check for data consistency, validate preprocessing steps, and ensure the hardware/software environment matches what you used during training. For slow inference times, perform warm-up runs, optimize your inference engine, use asynchronous processing, and profile your inference pipeline. Refer to [troubleshooting deployment issues](#troubleshooting-deployment-issues) for a detailed guide on these best practices. -### How does Ultralytics YOLOv8 optimization enhance model performance on edge devices? +### How does Ultralytics YOLO11 optimization enhance model performance on edge devices? -Optimizing Ultralytics YOLOv8 models for edge devices involves using techniques like pruning to reduce the model size, quantization to convert weights to lower precision, and knowledge distillation to train smaller models that mimic larger ones. These techniques ensure the model runs efficiently on devices with limited computational power. Tools like [TensorFlow Lite](../integrations/tflite.md) and [NVIDIA Jetson](./nvidia-jetson.md) are particularly useful for these optimizations. Learn more about these techniques in our section on [model optimization](#model-optimization-techniques). +Optimizing Ultralytics YOLO11 models for edge devices involves using techniques like pruning to reduce the model size, quantization to convert weights to lower precision, and knowledge distillation to train smaller models that mimic larger ones. These techniques ensure the model runs efficiently on devices with limited computational power. Tools like [TensorFlow Lite](../integrations/tflite.md) and [NVIDIA Jetson](./nvidia-jetson.md) are particularly useful for these optimizations. Learn more about these techniques in our section on [model optimization](#model-optimization-techniques). -### What are the security considerations for deploying machine learning models with Ultralytics YOLOv8? +### What are the security considerations for deploying machine learning models with Ultralytics YOLO11? Security is paramount when deploying machine learning models. Ensure secure data transmission using encryption protocols like TLS. Implement robust access controls, including strong authentication and role-based access control (RBAC). Model obfuscation techniques, such as encrypting model parameters and serving models in a secure environment like a trusted execution environment (TEE), offer additional protection. For detailed practices, refer to [security considerations](#security-considerations-in-model-deployment). -### How do I choose the right deployment environment for my Ultralytics YOLOv8 model? +### How do I choose the right deployment environment for my Ultralytics YOLO11 model? -Selecting the optimal deployment environment for your Ultralytics YOLOv8 model depends on your application's specific needs. Cloud deployment offers scalability and ease of access, making it ideal for applications with high data volumes. Edge deployment is best for low-latency applications requiring real-time responses, using tools like [TensorFlow Lite](../integrations/tflite.md). Local deployment suits scenarios needing stringent data privacy and control. For a comprehensive overview of each environment, check out our section on [choosing a deployment environment](#choosing-a-deployment-environment). +Selecting the optimal deployment environment for your Ultralytics YOLO11 model depends on your application's specific needs. Cloud deployment offers scalability and ease of access, making it ideal for applications with high data volumes. Edge deployment is best for low-latency applications requiring real-time responses, using tools like [TensorFlow Lite](../integrations/tflite.md). Local deployment suits scenarios needing stringent data privacy and control. For a comprehensive overview of each environment, check out our section on [choosing a deployment environment](#choosing-a-deployment-environment). diff --git a/docs/en/guides/model-evaluation-insights.md b/docs/en/guides/model-evaluation-insights.md index ef9389c266..24514f4a4f 100644 --- a/docs/en/guides/model-evaluation-insights.md +++ b/docs/en/guides/model-evaluation-insights.md @@ -1,6 +1,6 @@ --- comments: true -description: Explore the most effective ways to assess and refine YOLOv8 models for better performance. Learn about evaluation metrics, fine-tuning processes, and how to customize your model for specific needs. +description: Explore the most effective ways to assess and refine YOLO11 models for better performance. Learn about evaluation metrics, fine-tuning processes, and how to customize your model for specific needs. keywords: Model Evaluation, Machine Learning Model Evaluation, Fine Tuning Machine Learning, Fine Tune Model, Evaluating Models, Model Fine Tuning, How to Fine Tune a Model --- @@ -45,23 +45,23 @@ Other mAP metrics include mAP@0.75, which uses a stricter IoU threshold of 0.75, Mean Average Precision Overview

-## Evaluating YOLOv8 Model Performance +## Evaluating YOLO11 Model Performance -With respect to YOLOv8, you can use the [validation mode](../modes/val.md) to evaluate the model. Also, be sure to take a look at our guide that goes in-depth into [YOLOv8 performance metrics](./yolo-performance-metrics.md) and how they can be interpreted. +With respect to YOLO11, you can use the [validation mode](../modes/val.md) to evaluate the model. Also, be sure to take a look at our guide that goes in-depth into [YOLO11 performance metrics](./yolo-performance-metrics.md) and how they can be interpreted. ### Common Community Questions -When evaluating your YOLOv8 model, you might run into a few hiccups. Based on common community questions, here are some tips to help you get the most out of your YOLOv8 model: +When evaluating your YOLO11 model, you might run into a few hiccups. Based on common community questions, here are some tips to help you get the most out of your YOLO11 model: #### Handling Variable Image Sizes -Evaluating your YOLOv8 model with images of different sizes can help you understand its performance on diverse datasets. Using the `rect=true` validation parameter, YOLOv8 adjusts the network's stride for each batch based on the image sizes, allowing the model to handle rectangular images without forcing them to a single size. +Evaluating your YOLO11 model with images of different sizes can help you understand its performance on diverse datasets. Using the `rect=true` validation parameter, YOLO11 adjusts the network's stride for each batch based on the image sizes, allowing the model to handle rectangular images without forcing them to a single size. The `imgsz` validation parameter sets the maximum dimension for image resizing, which is 640 by default. You can adjust this based on your dataset's maximum dimensions and the GPU memory available. Even with `imgsz` set, `rect=true` lets the model manage varying image sizes effectively by dynamically adjusting the stride. -#### Accessing YOLOv8 Metrics +#### Accessing YOLO11 Metrics -If you want to get a deeper understanding of your YOLOv8 model's performance, you can easily access specific evaluation metrics with a few lines of Python code. The code snippet below will let you load your model, run an evaluation, and print out various metrics that show how well your model is doing. +If you want to get a deeper understanding of your YOLO11 model's performance, you can easily access specific evaluation metrics with a few lines of Python code. The code snippet below will let you load your model, run an evaluation, and print out various metrics that show how well your model is doing. !!! example "Usage" @@ -71,7 +71,7 @@ If you want to get a deeper understanding of your YOLOv8 model's performance, yo from ultralytics import YOLO # Load the model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Run the evaluation results = model.val(data="coco8.yaml") @@ -101,7 +101,7 @@ If you want to get a deeper understanding of your YOLOv8 model's performance, yo print("Recall curve:", results.box.r_curve) ``` -The results object also includes speed metrics like preprocess time, inference time, loss, and postprocess time. By analyzing these metrics, you can fine-tune and optimize your YOLOv8 model for better performance, making it more effective for your specific use case. +The results object also includes speed metrics like preprocess time, inference time, loss, and postprocess time. By analyzing these metrics, you can fine-tune and optimize your YOLO11 model for better performance, making it more effective for your specific use case. ## How Does Fine-Tuning Work? @@ -115,11 +115,11 @@ Fine-tuning a model means paying close attention to several vital parameters and Usually, during the initial training [epochs](https://www.ultralytics.com/glossary/epoch), the learning rate starts low and gradually increases to stabilize the training process. However, since your model has already learned some features from the previous dataset, starting with a higher learning rate right away can be more beneficial. -When evaluating your YOLOv8 model, you can set the `warmup_epochs` validation parameter to `warmup_epochs=0` to prevent the learning rate from starting too high. By following this process, the training will continue from the provided weights, adjusting to the nuances of your new data. +When evaluating your YOLO11 model, you can set the `warmup_epochs` validation parameter to `warmup_epochs=0` to prevent the learning rate from starting too high. By following this process, the training will continue from the provided weights, adjusting to the nuances of your new data. ### Image Tiling for Small Objects -Image tiling can improve detection accuracy for small objects. By dividing larger images into smaller segments, such as splitting 1280x1280 images into multiple 640x640 segments, you maintain the original resolution, and the model can learn from high-resolution fragments. When using YOLOv8, make sure to adjust your labels for these new segments correctly. +Image tiling can improve detection accuracy for small objects. By dividing larger images into smaller segments, such as splitting 1280x1280 images into multiple 640x640 segments, you maintain the original resolution, and the model can learn from high-resolution fragments. When using YOLO11, make sure to adjust your labels for these new segments correctly. ## Engage with the Community @@ -127,12 +127,12 @@ Sharing your ideas and questions with other [computer vision](https://www.ultral ### Finding Help and Support -- **GitHub Issues:** Explore the YOLOv8 GitHub repository and use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to ask questions, report bugs, and suggest features. The community and maintainers are available to assist with any issues you encounter. +- **GitHub Issues:** Explore the YOLO11 GitHub repository and use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to ask questions, report bugs, and suggest features. The community and maintainers are available to assist with any issues you encounter. - **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to connect with other users and developers, get support, share knowledge, and brainstorm ideas. ### Official Documentation -- **Ultralytics YOLOv8 Documentation:** Check out the [official YOLOv8 documentation](./index.md) for comprehensive guides and valuable insights on various computer vision tasks and projects. +- **Ultralytics YOLO11 Documentation:** Check out the [official YOLO11 documentation](./index.md) for comprehensive guides and valuable insights on various computer vision tasks and projects. ## Final Thoughts @@ -140,30 +140,30 @@ Evaluating and fine-tuning your computer vision model are important steps for su ## FAQ -### What are the key metrics for evaluating YOLOv8 model performance? +### What are the key metrics for evaluating YOLO11 model performance? -To evaluate YOLOv8 model performance, important metrics include Confidence Score, Intersection over Union (IoU), and Mean Average Precision (mAP). Confidence Score measures the model's certainty for each detected object class. IoU evaluates how well the predicted bounding box overlaps with the ground truth. Mean Average Precision (mAP) aggregates precision scores across classes, with mAP@.5 and mAP@.5:.95 being two common types for varying IoU thresholds. Learn more about these metrics in our [YOLOv8 performance metrics guide](./yolo-performance-metrics.md). +To evaluate YOLO11 model performance, important metrics include Confidence Score, Intersection over Union (IoU), and Mean Average Precision (mAP). Confidence Score measures the model's certainty for each detected object class. IoU evaluates how well the predicted bounding box overlaps with the ground truth. Mean Average Precision (mAP) aggregates precision scores across classes, with mAP@.5 and mAP@.5:.95 being two common types for varying IoU thresholds. Learn more about these metrics in our [YOLO11 performance metrics guide](./yolo-performance-metrics.md). -### How can I fine-tune a pre-trained YOLOv8 model for my specific dataset? +### How can I fine-tune a pre-trained YOLO11 model for my specific dataset? -Fine-tuning a pre-trained YOLOv8 model involves adjusting its parameters to improve performance on a specific task or dataset. Start by evaluating your model using metrics, then set a higher initial learning rate by adjusting the `warmup_epochs` parameter to 0 for immediate stability. Use parameters like `rect=true` for handling varied image sizes effectively. For more detailed guidance, refer to our section on [fine-tuning YOLOv8 models](#how-does-fine-tuning-work). +Fine-tuning a pre-trained YOLO11 model involves adjusting its parameters to improve performance on a specific task or dataset. Start by evaluating your model using metrics, then set a higher initial learning rate by adjusting the `warmup_epochs` parameter to 0 for immediate stability. Use parameters like `rect=true` for handling varied image sizes effectively. For more detailed guidance, refer to our section on [fine-tuning YOLO11 models](#how-does-fine-tuning-work). -### How can I handle variable image sizes when evaluating my YOLOv8 model? +### How can I handle variable image sizes when evaluating my YOLO11 model? -To handle variable image sizes during evaluation, use the `rect=true` parameter in YOLOv8, which adjusts the network's stride for each batch based on image sizes. The `imgsz` parameter sets the maximum dimension for image resizing, defaulting to 640. Adjust `imgsz` to suit your dataset and GPU memory. For more details, visit our [section on handling variable image sizes](#handling-variable-image-sizes). +To handle variable image sizes during evaluation, use the `rect=true` parameter in YOLO11, which adjusts the network's stride for each batch based on image sizes. The `imgsz` parameter sets the maximum dimension for image resizing, defaulting to 640. Adjust `imgsz` to suit your dataset and GPU memory. For more details, visit our [section on handling variable image sizes](#handling-variable-image-sizes). -### What practical steps can I take to improve mean average precision for my YOLOv8 model? +### What practical steps can I take to improve mean average precision for my YOLO11 model? -Improving mean average precision (mAP) for a YOLOv8 model involves several steps: +Improving mean average precision (mAP) for a YOLO11 model involves several steps: 1. **Tuning Hyperparameters**: Experiment with different learning rates, [batch sizes](https://www.ultralytics.com/glossary/batch-size), and image augmentations. 2. **[Data Augmentation](https://www.ultralytics.com/glossary/data-augmentation)**: Use techniques like Mosaic and MixUp to create diverse training samples. 3. **Image Tiling**: Split larger images into smaller tiles to improve detection accuracy for small objects. Refer to our detailed guide on [model fine-tuning](#tips-for-fine-tuning-your-model) for specific strategies. -### How do I access YOLOv8 model evaluation metrics in Python? +### How do I access YOLO11 model evaluation metrics in Python? -You can access YOLOv8 model evaluation metrics using Python with the following steps: +You can access YOLO11 model evaluation metrics using Python with the following steps: !!! example "Usage" @@ -173,7 +173,7 @@ You can access YOLOv8 model evaluation metrics using Python with the following s from ultralytics import YOLO # Load the model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Run the evaluation results = model.val(data="coco8.yaml") @@ -185,4 +185,4 @@ You can access YOLOv8 model evaluation metrics using Python with the following s print("Mean recall:", results.box.mr) ``` -Analyzing these metrics helps fine-tune and optimize your YOLOv8 model. For a deeper dive, check out our guide on [YOLOv8 metrics](../modes/val.md). +Analyzing these metrics helps fine-tune and optimize your YOLO11 model. For a deeper dive, check out our guide on [YOLO11 metrics](../modes/val.md). diff --git a/docs/en/guides/model-monitoring-and-maintenance.md b/docs/en/guides/model-monitoring-and-maintenance.md index 2aedc8e3a3..1f4f8ed855 100644 --- a/docs/en/guides/model-monitoring-and-maintenance.md +++ b/docs/en/guides/model-monitoring-and-maintenance.md @@ -123,12 +123,12 @@ Joining a community of computer vision enthusiasts can help you solve problems a ### Community Resources -- **GitHub Issues:** Check out the [YOLOv8 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are highly active and supportive. +- **GitHub Issues:** Check out the [YOLO11 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are highly active and supportive. - **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to chat with other users and developers, get support, and share your experiences. ### Official Documentation -- **Ultralytics YOLOv8 Documentation:** Visit the [official YOLOv8 documentation](./index.md) for detailed guides and helpful tips on various computer vision projects. +- **Ultralytics YOLO11 Documentation:** Visit the [official YOLO11 documentation](./index.md) for detailed guides and helpful tips on various computer vision projects. Using these resources will help you solve challenges and stay up-to-date with the latest trends and practices in the computer vision community. diff --git a/docs/en/guides/model-testing.md b/docs/en/guides/model-testing.md index 8d32467955..b8bcb91353 100644 --- a/docs/en/guides/model-testing.md +++ b/docs/en/guides/model-testing.md @@ -44,22 +44,22 @@ Next, the testing results can be analyzed: - **Error Analysis:** Perform a thorough error analysis to understand the types of errors (e.g., false positives vs. false negatives) and their potential causes. - **Bias and Fairness:** Check for any biases in the model's predictions. Ensure that the model performs equally well across different subsets of the data, especially if it includes sensitive attributes like race, gender, or age. -## Testing Your YOLOv8 Model +## Testing Your YOLO11 Model -To test your YOLOv8 model, you can use the validation mode. It's a straightforward way to understand the model's strengths and areas that need improvement. Also, you'll need to format your test dataset correctly for YOLOv8. For more details on how to use the validation mode, check out the [Model Validation](../modes/val.md) docs page. +To test your YOLO11 model, you can use the validation mode. It's a straightforward way to understand the model's strengths and areas that need improvement. Also, you'll need to format your test dataset correctly for YOLO11. For more details on how to use the validation mode, check out the [Model Validation](../modes/val.md) docs page. -## Using YOLOv8 to Predict on Multiple Test Images +## Using YOLO11 to Predict on Multiple Test Images -If you want to test your trained YOLOv8 model on multiple images stored in a folder, you can easily do so in one go. Instead of using the validation mode, which is typically used to evaluate model performance on a validation set and provide detailed metrics, you might just want to see predictions on all images in your test set. For this, you can use the [prediction mode](../modes/predict.md). +If you want to test your trained YOLO11 model on multiple images stored in a folder, you can easily do so in one go. Instead of using the validation mode, which is typically used to evaluate model performance on a validation set and provide detailed metrics, you might just want to see predictions on all images in your test set. For this, you can use the [prediction mode](../modes/predict.md). ### Difference Between Validation and Prediction Modes - **[Validation Mode](../modes/val.md):** Used to evaluate the model's performance by comparing predictions against known labels (ground truth). It provides detailed metrics such as accuracy, precision, recall, and F1 score. - **[Prediction Mode](../modes/predict.md):** Used to run the model on new, unseen data to generate predictions. It does not provide detailed performance metrics but allows you to see how the model performs on real-world images. -## Running YOLOv8 Predictions Without Custom Training +## Running YOLO11 Predictions Without Custom Training -If you are interested in testing the basic YOLOv8 model to understand whether it can be used for your application without custom training, you can use the prediction mode. While the model is pre-trained on datasets like COCO, running predictions on your own dataset can give you a quick sense of how well it might perform in your specific context. +If you are interested in testing the basic YOLO11 model to understand whether it can be used for your application without custom training, you can use the prediction mode. While the model is pre-trained on datasets like COCO, running predictions on your own dataset can give you a quick sense of how well it might perform in your specific context. ## Overfitting and [Underfitting](https://www.ultralytics.com/glossary/underfitting) in [Machine Learning](https://www.ultralytics.com/glossary/machine-learning-ml) @@ -128,12 +128,12 @@ Becoming part of a community of computer vision enthusiasts can aid in solving p ### Community Resources -- **GitHub Issues:** Explore the [YOLOv8 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are very active and ready to help. +- **GitHub Issues:** Explore the [YOLO11 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are very active and ready to help. - **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to chat with other users and developers, get support, and share your experiences. ### Official Documentation -- **Ultralytics YOLOv8 Documentation:** Check out the [official YOLOv8 documentation](./index.md) for detailed guides and helpful tips on various computer vision projects. +- **Ultralytics YOLO11 Documentation:** Check out the [official YOLO11 documentation](./index.md) for detailed guides and helpful tips on various computer vision projects. These resources will help you navigate challenges and remain updated on the latest trends and practices within the computer vision community. @@ -147,9 +147,9 @@ Building trustworthy computer vision models relies on rigorous model testing. By Model evaluation and model testing are distinct steps in a computer vision project. Model evaluation involves using a labeled dataset to compute metrics such as [accuracy](https://www.ultralytics.com/glossary/accuracy), precision, recall, and [F1 score](https://www.ultralytics.com/glossary/f1-score), providing insights into the model's performance with a controlled dataset. Model testing, on the other hand, assesses the model's performance in real-world scenarios by applying it to new, unseen data, ensuring the model's learned behavior aligns with expectations outside the evaluation environment. For a detailed guide, refer to the [steps in a computer vision project](./steps-of-a-cv-project.md). -### How can I test my Ultralytics YOLOv8 model on multiple images? +### How can I test my Ultralytics YOLO11 model on multiple images? -To test your Ultralytics YOLOv8 model on multiple images, you can use the [prediction mode](../modes/predict.md). This mode allows you to run the model on new, unseen data to generate predictions without providing detailed metrics. This is ideal for real-world performance testing on larger image sets stored in a folder. For evaluating performance metrics, use the [validation mode](../modes/val.md) instead. +To test your Ultralytics YOLO11 model on multiple images, you can use the [prediction mode](../modes/predict.md). This mode allows you to run the model on new, unseen data to generate predictions without providing detailed metrics. This is ideal for real-world performance testing on larger image sets stored in a folder. For evaluating performance metrics, use the [validation mode](../modes/val.md) instead. ### What should I do if my computer vision model shows signs of overfitting or underfitting? @@ -195,6 +195,6 @@ Post-testing, if the model performance meets the project goals, proceed with dep Gain insights from the [Model Testing Vs. Model Evaluation](#model-testing-vs-model-evaluation) section to refine and enhance model effectiveness in real-world applications. -### How do I run YOLOv8 predictions without custom training? +### How do I run YOLO11 predictions without custom training? -You can run predictions using the pre-trained YOLOv8 model on your dataset to see if it suits your application needs. Utilize the [prediction mode](../modes/predict.md) to get a quick sense of performance results without diving into custom training. +You can run predictions using the pre-trained YOLO11 model on your dataset to see if it suits your application needs. Utilize the [prediction mode](../modes/predict.md) to get a quick sense of performance results without diving into custom training. diff --git a/docs/en/guides/model-training-tips.md b/docs/en/guides/model-training-tips.md index 725081a244..e7e3904811 100644 --- a/docs/en/guides/model-training-tips.md +++ b/docs/en/guides/model-training-tips.md @@ -46,25 +46,25 @@ There are a few different aspects to think about when you are planning on using When training models on large datasets, efficiently utilizing your GPU is key. Batch size is an important factor. It is the number of data samples that a machine learning model processes in a single training iteration. Using the maximum batch size supported by your GPU, you can fully take advantage of its capabilities and reduce the time model training takes. However, you want to avoid running out of GPU memory. If you encounter memory errors, reduce the batch size incrementally until the model trains smoothly. -With respect to YOLOv8, you can set the `batch_size` parameter in the [training configuration](../modes/train.md) to match your GPU capacity. Also, setting `batch=-1` in your training script will automatically determine the [batch size](https://www.ultralytics.com/glossary/batch-size) that can be efficiently processed based on your device's capabilities. By fine-tuning the batch size, you can make the most of your GPU resources and improve the overall training process. +With respect to YOLO11, you can set the `batch_size` parameter in the [training configuration](../modes/train.md) to match your GPU capacity. Also, setting `batch=-1` in your training script will automatically determine the [batch size](https://www.ultralytics.com/glossary/batch-size) that can be efficiently processed based on your device's capabilities. By fine-tuning the batch size, you can make the most of your GPU resources and improve the overall training process. ### Subset Training Subset training is a smart strategy that involves training your model on a smaller set of data that represents the larger dataset. It can save time and resources, especially during initial model development and testing. If you are running short on time or experimenting with different model configurations, subset training is a good option. -When it comes to YOLOv8, you can easily implement subset training by using the `fraction` parameter. This parameter lets you specify what fraction of your dataset to use for training. For example, setting `fraction=0.1` will train your model on 10% of the data. You can use this technique for quick iterations and tuning your model before committing to training a model using a full dataset. Subset training helps you make rapid progress and identify potential issues early on. +When it comes to YOLO11, you can easily implement subset training by using the `fraction` parameter. This parameter lets you specify what fraction of your dataset to use for training. For example, setting `fraction=0.1` will train your model on 10% of the data. You can use this technique for quick iterations and tuning your model before committing to training a model using a full dataset. Subset training helps you make rapid progress and identify potential issues early on. ### Multi-scale Training Multiscale training is a technique that improves your model's ability to generalize by training it on images of varying sizes. Your model can learn to detect objects at different scales and distances and become more robust. -For example, when you train YOLOv8, you can enable multiscale training by setting the `scale` parameter. This parameter adjusts the size of training images by a specified factor, simulating objects at different distances. For example, setting `scale=0.5` will reduce the image size by half, while `scale=2.0` will double it. Configuring this parameter allows your model to experience a variety of image scales and improve its detection capabilities across different object sizes and scenarios. +For example, when you train YOLO11, you can enable multiscale training by setting the `scale` parameter. This parameter adjusts the size of training images by a specified factor, simulating objects at different distances. For example, setting `scale=0.5` will reduce the image size by half, while `scale=2.0` will double it. Configuring this parameter allows your model to experience a variety of image scales and improve its detection capabilities across different object sizes and scenarios. ### Caching Caching is an important technique to improve the efficiency of training machine learning models. By storing preprocessed images in memory, caching reduces the time the GPU spends waiting for data to be loaded from the disk. The model can continuously receive data without delays caused by disk I/O operations. -Caching can be controlled when training YOLOv8 using the `cache` parameter: +Caching can be controlled when training YOLO11 using the `cache` parameter: - _`cache=True`_: Stores dataset images in RAM, providing the fastest access speed but at the cost of increased memory usage. - _`cache='disk'`_: Stores the images on disk, slower than RAM but faster than loading fresh data each time. @@ -80,19 +80,19 @@ Mixed precision training uses both 16-bit (FP16) and 32-bit (FP32) floating-poin To implement mixed precision training, you'll need to modify your training scripts and ensure your hardware (like GPUs) supports it. Many modern [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) frameworks, such as [Tensorflow](https://www.ultralytics.com/glossary/tensorflow), offer built-in support for mixed precision. -Mixed precision training is straightforward when working with YOLOv8. You can use the `amp` flag in your training configuration. Setting `amp=True` enables Automatic Mixed Precision (AMP) training. Mixed precision training is a simple yet effective way to optimize your model training process. +Mixed precision training is straightforward when working with YOLO11. You can use the `amp` flag in your training configuration. Setting `amp=True` enables Automatic Mixed Precision (AMP) training. Mixed precision training is a simple yet effective way to optimize your model training process. ### Pre-trained Weights Using pretrained weights is a smart way to speed up your model's training process. Pretrained weights come from models already trained on large datasets, giving your model a head start. [Transfer learning](https://www.ultralytics.com/glossary/transfer-learning) adapts pretrained models to new, related tasks. Fine-tuning a pre-trained model involves starting with these weights and then continuing training on your specific dataset. This method of training results in faster training times and often better performance because the model starts with a solid understanding of basic features. -The `pretrained` parameter makes transfer learning easy with YOLOv8. Setting `pretrained=True` will use default pre-trained weights, or you can specify a path to a custom pre-trained model. Using pre-trained weights and transfer learning effectively boosts your model's capabilities and reduces training costs. +The `pretrained` parameter makes transfer learning easy with YOLO11. Setting `pretrained=True` will use default pre-trained weights, or you can specify a path to a custom pre-trained model. Using pre-trained weights and transfer learning effectively boosts your model's capabilities and reduces training costs. ### Other Techniques to Consider When Handling a Large Dataset There are a couple of other techniques to consider when handling a large dataset: -- **[Learning Rate](https://www.ultralytics.com/glossary/learning-rate) Schedulers**: Implementing learning rate schedulers dynamically adjusts the learning rate during training. A well-tuned learning rate can prevent the model from overshooting minima and improve stability. When training YOLOv8, the `lrf` parameter helps manage learning rate scheduling by setting the final learning rate as a fraction of the initial rate. +- **[Learning Rate](https://www.ultralytics.com/glossary/learning-rate) Schedulers**: Implementing learning rate schedulers dynamically adjusts the learning rate during training. A well-tuned learning rate can prevent the model from overshooting minima and improve stability. When training YOLO11, the `lrf` parameter helps manage learning rate scheduling by setting the final learning rate as a fraction of the initial rate. - **Distributed Training**: For handling large datasets, distributed training can be a game-changer. You can reduce the training time by spreading the training workload across multiple GPUs or machines. ## The Number of Epochs To Train For @@ -101,7 +101,7 @@ When training a model, an epoch refers to one complete pass through the entire t A common question that comes up is how to determine the number of epochs to train the model for. A good starting point is 300 epochs. If the model overfits early, you can reduce the number of epochs. If [overfitting](https://www.ultralytics.com/glossary/overfitting) does not occur after 300 epochs, you can extend the training to 600, 1200, or more epochs. -However, the ideal number of epochs can vary based on your dataset's size and project goals. Larger datasets might require more epochs for the model to learn effectively, while smaller datasets might need fewer epochs to avoid overfitting. With respect to YOLOv8, you can set the `epochs` parameter in your training script. +However, the ideal number of epochs can vary based on your dataset's size and project goals. Larger datasets might require more epochs for the model to learn effectively, while smaller datasets might need fewer epochs to avoid overfitting. With respect to YOLO11, you can set the `epochs` parameter in your training script. ## Early Stopping @@ -113,7 +113,7 @@ The process involves setting a patience parameter that determines how many [epoc Early Stopping Overview

-For YOLOv8, you can enable early stopping by setting the patience parameter in your training configuration. For example, `patience=5` means training will stop if there's no improvement in validation metrics for 5 consecutive epochs. Using this method ensures the training process remains efficient and achieves optimal performance without excessive computation. +For YOLO11, you can enable early stopping by setting the patience parameter in your training configuration. For example, `patience=5` means training will stop if there's no improvement in validation metrics for 5 consecutive epochs. Using this method ensures the training process remains efficient and achieves optimal performance without excessive computation. ## Choosing Between Cloud and Local Training @@ -143,13 +143,13 @@ Different optimizers have various strengths and weaknesses. Let's take a glimpse - Combines the benefits of both SGD with momentum and RMSProp. - Adjusts the learning rate for each parameter based on estimates of the first and second moments of the gradients. - Well-suited for noisy data and sparse gradients. - - Efficient and generally requires less tuning, making it a recommended optimizer for YOLOv8. + - Efficient and generally requires less tuning, making it a recommended optimizer for YOLO11. - **RMSProp (Root Mean Square Propagation)**: - Adjusts the learning rate for each parameter by dividing the gradient by a running average of the magnitudes of recent gradients. - Helps in handling the vanishing gradient problem and is effective for [recurrent neural networks](https://www.ultralytics.com/glossary/recurrent-neural-network-rnn). -For YOLOv8, the `optimizer` parameter lets you choose from various optimizers, including SGD, Adam, AdamW, NAdam, RAdam, and RMSProp, or you can set it to `auto` for automatic selection based on model configuration. +For YOLO11, the `optimizer` parameter lets you choose from various optimizers, including SGD, Adam, AdamW, NAdam, RAdam, and RMSProp, or you can set it to `auto` for automatic selection based on model configuration. ## Connecting with the Community @@ -157,12 +157,12 @@ Being part of a community of computer vision enthusiasts can help you solve prob ### Community Resources -- **GitHub Issues:** Visit the [YOLOv8 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are very active and ready to help. +- **GitHub Issues:** Visit the [YOLO11 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are very active and ready to help. - **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to chat with other users and developers, get support, and share your experiences. ### Official Documentation -- **Ultralytics YOLOv8 Documentation:** Check out the [official YOLOv8 documentation](./index.md) for detailed guides and helpful tips on various computer vision projects. +- **Ultralytics YOLO11 Documentation:** Check out the [official YOLO11 documentation](./index.md) for detailed guides and helpful tips on various computer vision projects. Using these resources will help you solve challenges and stay up-to-date with the latest trends and practices in the computer vision community. @@ -174,20 +174,20 @@ Training computer vision models involves following good practices, optimizing yo ### How can I improve GPU utilization when training a large dataset with Ultralytics YOLO? -To improve GPU utilization, set the `batch_size` parameter in your training configuration to the maximum size supported by your GPU. This ensures that you make full use of the GPU's capabilities, reducing training time. If you encounter memory errors, incrementally reduce the batch size until training runs smoothly. For YOLOv8, setting `batch=-1` in your training script will automatically determine the optimal batch size for efficient processing. For further information, refer to the [training configuration](../modes/train.md). +To improve GPU utilization, set the `batch_size` parameter in your training configuration to the maximum size supported by your GPU. This ensures that you make full use of the GPU's capabilities, reducing training time. If you encounter memory errors, incrementally reduce the batch size until training runs smoothly. For YOLO11, setting `batch=-1` in your training script will automatically determine the optimal batch size for efficient processing. For further information, refer to the [training configuration](../modes/train.md). -### What is mixed precision training, and how do I enable it in YOLOv8? +### What is mixed precision training, and how do I enable it in YOLO11? -Mixed precision training utilizes both 16-bit (FP16) and 32-bit (FP32) floating-point types to balance computational speed and precision. This approach speeds up training and reduces memory usage without sacrificing model [accuracy](https://www.ultralytics.com/glossary/accuracy). To enable mixed precision training in YOLOv8, set the `amp` parameter to `True` in your training configuration. This activates Automatic Mixed Precision (AMP) training. For more details on this optimization technique, see the [training configuration](../modes/train.md). +Mixed precision training utilizes both 16-bit (FP16) and 32-bit (FP32) floating-point types to balance computational speed and precision. This approach speeds up training and reduces memory usage without sacrificing model [accuracy](https://www.ultralytics.com/glossary/accuracy). To enable mixed precision training in YOLO11, set the `amp` parameter to `True` in your training configuration. This activates Automatic Mixed Precision (AMP) training. For more details on this optimization technique, see the [training configuration](../modes/train.md). -### How does multiscale training enhance YOLOv8 model performance? +### How does multiscale training enhance YOLO11 model performance? -Multiscale training enhances model performance by training on images of varying sizes, allowing the model to better generalize across different scales and distances. In YOLOv8, you can enable multiscale training by setting the `scale` parameter in the training configuration. For example, `scale=0.5` reduces the image size by half, while `scale=2.0` doubles it. This technique simulates objects at different distances, making the model more robust across various scenarios. For settings and more details, check out the [training configuration](../modes/train.md). +Multiscale training enhances model performance by training on images of varying sizes, allowing the model to better generalize across different scales and distances. In YOLO11, you can enable multiscale training by setting the `scale` parameter in the training configuration. For example, `scale=0.5` reduces the image size by half, while `scale=2.0` doubles it. This technique simulates objects at different distances, making the model more robust across various scenarios. For settings and more details, check out the [training configuration](../modes/train.md). -### How can I use pre-trained weights to speed up training in YOLOv8? +### How can I use pre-trained weights to speed up training in YOLO11? -Using pre-trained weights can significantly reduce training times and improve model performance by starting from a model that already understands basic features. In YOLOv8, you can set the `pretrained` parameter to `True` or specify a path to custom pre-trained weights in your training configuration. This approach, known as transfer learning, leverages knowledge from large datasets to adapt to your specific task. Learn more about pre-trained weights and their advantages [here](../modes/train.md). +Using pre-trained weights can significantly reduce training times and improve model performance by starting from a model that already understands basic features. In YOLO11, you can set the `pretrained` parameter to `True` or specify a path to custom pre-trained weights in your training configuration. This approach, known as transfer learning, leverages knowledge from large datasets to adapt to your specific task. Learn more about pre-trained weights and their advantages [here](../modes/train.md). -### What is the recommended number of epochs for training a model, and how do I set this in YOLOv8? +### What is the recommended number of epochs for training a model, and how do I set this in YOLO11? -The number of epochs refers to the complete passes through the training dataset during model training. A typical starting point is 300 epochs. If your model overfits early, you can reduce the number. Alternatively, if overfitting isn't observed, you might extend training to 600, 1200, or more epochs. To set this in YOLOv8, use the `epochs` parameter in your training script. For additional advice on determining the ideal number of epochs, refer to this section on [number of epochs](#the-number-of-epochs-to-train-for). +The number of epochs refers to the complete passes through the training dataset during model training. A typical starting point is 300 epochs. If your model overfits early, you can reduce the number. Alternatively, if overfitting isn't observed, you might extend training to 600, 1200, or more epochs. To set this in YOLO11, use the `epochs` parameter in your training script. For additional advice on determining the ideal number of epochs, refer to this section on [number of epochs](#the-number-of-epochs-to-train-for). diff --git a/docs/en/guides/object-blurring.md b/docs/en/guides/object-blurring.md index 315bcd76ea..2c6a3bdfc9 100644 --- a/docs/en/guides/object-blurring.md +++ b/docs/en/guides/object-blurring.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to use Ultralytics YOLOv8 for real-time object blurring to enhance privacy and focus in your images and videos. -keywords: YOLOv8, object blurring, real-time processing, privacy protection, image manipulation, video editing, Ultralytics +description: Learn how to use Ultralytics YOLO11 for real-time object blurring to enhance privacy and focus in your images and videos. +keywords: YOLO11, object blurring, real-time processing, privacy protection, image manipulation, video editing, Ultralytics --- -# Object Blurring using Ultralytics YOLOv8 🚀 +# Object Blurring using Ultralytics YOLO11 🚀 ## What is Object Blurring? -Object blurring with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) involves applying a blurring effect to specific detected objects in an image or video. This can be achieved using the YOLOv8 model capabilities to identify and manipulate objects within a given scene. +Object blurring with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) involves applying a blurring effect to specific detected objects in an image or video. This can be achieved using the YOLO11 model capabilities to identify and manipulate objects within a given scene.


@@ -18,16 +18,16 @@ Object blurring with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly allowfullscreen>
- Watch: Object Blurring using Ultralytics YOLOv8 + Watch: Object Blurring using Ultralytics YOLO11

## Advantages of Object Blurring? - **Privacy Protection**: Object blurring is an effective tool for safeguarding privacy by concealing sensitive or personally identifiable information in images or videos. -- **Selective Focus**: YOLOv8 allows for selective blurring, enabling users to target specific objects, ensuring a balance between privacy and retaining relevant visual information. -- **Real-time Processing**: YOLOv8's efficiency enables object blurring in real-time, making it suitable for applications requiring on-the-fly privacy enhancements in dynamic environments. +- **Selective Focus**: YOLO11 allows for selective blurring, enabling users to target specific objects, ensuring a balance between privacy and retaining relevant visual information. +- **Real-time Processing**: YOLO11's efficiency enables object blurring in real-time, making it suitable for applications requiring on-the-fly privacy enhancements in dynamic environments. -!!! example "Object Blurring using YOLOv8 Example" +!!! example "Object Blurring using YOLO11 Example" === "Object Blurring" @@ -37,7 +37,7 @@ Object blurring with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly from ultralytics import YOLO from ultralytics.utils.plotting import Annotator, colors - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") names = model.names cap = cv2.VideoCapture("path/to/video/file.mp4") @@ -86,20 +86,20 @@ Object blurring with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly ## FAQ -### What is object blurring with Ultralytics YOLOv8? +### What is object blurring with Ultralytics YOLO11? -Object blurring with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) involves automatically detecting and applying a blurring effect to specific objects in images or videos. This technique enhances privacy by concealing sensitive information while retaining relevant visual data. YOLOv8's real-time processing capabilities make it suitable for applications requiring immediate privacy protection and selective focus adjustments. +Object blurring with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) involves automatically detecting and applying a blurring effect to specific objects in images or videos. This technique enhances privacy by concealing sensitive information while retaining relevant visual data. YOLO11's real-time processing capabilities make it suitable for applications requiring immediate privacy protection and selective focus adjustments. -### How can I implement real-time object blurring using YOLOv8? +### How can I implement real-time object blurring using YOLO11? -To implement real-time object blurring with YOLOv8, follow the provided Python example. This involves using YOLOv8 for [object detection](https://www.ultralytics.com/glossary/object-detection) and OpenCV for applying the blur effect. Here's a simplified version: +To implement real-time object blurring with YOLO11, follow the provided Python example. This involves using YOLO11 for [object detection](https://www.ultralytics.com/glossary/object-detection) and OpenCV for applying the blur effect. Here's a simplified version: ```python import cv2 from ultralytics import YOLO -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") while cap.isOpened(): @@ -112,7 +112,7 @@ while cap.isOpened(): obj = im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] = cv2.blur(obj, (50, 50)) - cv2.imshow("YOLOv8 Blurring", im0) + cv2.imshow("YOLO11 Blurring", im0) if cv2.waitKey(1) & 0xFF == ord("q"): break @@ -120,9 +120,9 @@ cap.release() cv2.destroyAllWindows() ``` -### What are the benefits of using Ultralytics YOLOv8 for object blurring? +### What are the benefits of using Ultralytics YOLO11 for object blurring? -Ultralytics YOLOv8 offers several advantages for object blurring: +Ultralytics YOLO11 offers several advantages for object blurring: - **Privacy Protection**: Effectively obscure sensitive or identifiable information. - **Selective Focus**: Target specific objects for blurring, maintaining essential visual content. @@ -130,10 +130,10 @@ Ultralytics YOLOv8 offers several advantages for object blurring: For more detailed applications, check the [advantages of object blurring section](#advantages-of-object-blurring). -### Can I use Ultralytics YOLOv8 to blur faces in a video for privacy reasons? +### Can I use Ultralytics YOLO11 to blur faces in a video for privacy reasons? -Yes, Ultralytics YOLOv8 can be configured to detect and blur faces in videos to protect privacy. By training or using a pre-trained model to specifically recognize faces, the detection results can be processed with [OpenCV](https://www.ultralytics.com/glossary/opencv) to apply a blur effect. Refer to our guide on [object detection with YOLOv8](https://docs.ultralytics.com/models/yolov8/) and modify the code to target face detection. +Yes, Ultralytics YOLO11 can be configured to detect and blur faces in videos to protect privacy. By training or using a pre-trained model to specifically recognize faces, the detection results can be processed with [OpenCV](https://www.ultralytics.com/glossary/opencv) to apply a blur effect. Refer to our guide on [object detection with YOLO11](https://docs.ultralytics.com/models/yolov8/) and modify the code to target face detection. -### How does YOLOv8 compare to other object detection models like Faster R-CNN for object blurring? +### How does YOLO11 compare to other object detection models like Faster R-CNN for object blurring? -Ultralytics YOLOv8 typically outperforms models like Faster R-CNN in terms of speed, making it more suitable for real-time applications. While both models offer accurate detection, YOLOv8's architecture is optimized for rapid inference, which is critical for tasks like real-time object blurring. Learn more about the technical differences and performance metrics in our [YOLOv8 documentation](https://docs.ultralytics.com/models/yolov8/). +Ultralytics YOLO11 typically outperforms models like Faster R-CNN in terms of speed, making it more suitable for real-time applications. While both models offer accurate detection, YOLO11's architecture is optimized for rapid inference, which is critical for tasks like real-time object blurring. Learn more about the technical differences and performance metrics in our [YOLO11 documentation](https://docs.ultralytics.com/models/yolov8/). diff --git a/docs/en/guides/object-counting.md b/docs/en/guides/object-counting.md index bdec66e42d..8467271b38 100644 --- a/docs/en/guides/object-counting.md +++ b/docs/en/guides/object-counting.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn to accurately identify and count objects in real-time using Ultralytics YOLOv8 for applications like crowd analysis and surveillance. -keywords: object counting, YOLOv8, Ultralytics, real-time object detection, AI, deep learning, object tracking, crowd analysis, surveillance, resource optimization +description: Learn to accurately identify and count objects in real-time using Ultralytics YOLO11 for applications like crowd analysis and surveillance. +keywords: object counting, YOLO11, Ultralytics, real-time object detection, AI, deep learning, object tracking, crowd analysis, surveillance, resource optimization --- -# Object Counting using Ultralytics YOLOv8 +# Object Counting using Ultralytics YOLO11 ## What is Object Counting? -Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) involves accurate identification and counting of specific objects in videos and camera streams. YOLOv8 excels in real-time applications, providing efficient and precise object counting for various scenarios like crowd analysis and surveillance, thanks to its state-of-the-art algorithms and [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) capabilities. +Object counting with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) involves accurate identification and counting of specific objects in videos and camera streams. YOLO11 excels in real-time applications, providing efficient and precise object counting for various scenarios like crowd analysis and surveillance, thanks to its state-of-the-art algorithms and [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) capabilities. @@ -19,7 +19,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly allowfullscreen>
- Watch: Object Counting using Ultralytics YOLOv8 + Watch: Object Counting using Ultralytics YOLO11

- Watch: Class-wise Object Counting using Ultralytics YOLOv8 + Watch: Class-wise Object Counting using Ultralytics YOLO11
@@ -43,10 +43,10 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly | Logistics | Aquaculture | | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------: | -| ![Conveyor Belt Packets Counting Using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/conveyor-belt-packets-counting.avif) | ![Fish Counting in Sea using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/fish-counting-in-sea-using-ultralytics-yolov8.avif) | -| Conveyor Belt Packets Counting Using Ultralytics YOLOv8 | Fish Counting in Sea using Ultralytics YOLOv8 | +| ![Conveyor Belt Packets Counting Using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/conveyor-belt-packets-counting.avif) | ![Fish Counting in Sea using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/fish-counting-in-sea-using-ultralytics-yolov8.avif) | +| Conveyor Belt Packets Counting Using Ultralytics YOLO11 | Fish Counting in Sea using Ultralytics YOLO11 | -!!! example "Object Counting using YOLOv8 Example" +!!! example "Object Counting using YOLO11 Example" === "Count in Region" @@ -55,7 +55,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -90,6 +90,46 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly cv2.destroyAllWindows() ``` + === "OBB Object Counting" + + ```python + import cv2 + + from ultralytics import YOLO, solutions + + model = YOLO("yolo11n-obb.pt") + cap = cv2.VideoCapture("path/to/video/file.mp4") + assert cap.isOpened(), "Error reading video file" + w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) + + # Define region points + region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)] + + # Video writer + video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) + + # Init Object Counter + counter = solutions.ObjectCounter( + view_img=True, + reg_pts=region_points, + names=model.names, + line_thickness=2, + ) + + while cap.isOpened(): + success, im0 = cap.read() + if not success: + print("Video frame is empty or video processing has been successfully completed.") + break + tracks = model.track(im0, persist=True, show=False) + im0 = counter.start_counting(im0, tracks) + video_writer.write(im0) + + cap.release() + video_writer.release() + cv2.destroyAllWindows() + ``` + === "Count in Polygon" ```python @@ -97,7 +137,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -123,7 +163,6 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly print("Video frame is empty or video processing has been successfully completed.") break tracks = model.track(im0, persist=True, show=False) - im0 = counter.start_counting(im0, tracks) video_writer.write(im0) @@ -139,7 +178,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -165,7 +204,6 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly print("Video frame is empty or video processing has been successfully completed.") break tracks = model.track(im0, persist=True, show=False) - im0 = counter.start_counting(im0, tracks) video_writer.write(im0) @@ -181,7 +219,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -207,7 +245,6 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly print("Video frame is empty or video processing has been successfully completed.") break tracks = model.track(im0, persist=True, show=False, classes=classes_to_count) - im0 = counter.start_counting(im0, tracks) video_writer.write(im0) @@ -240,12 +277,12 @@ Here's a table with the `ObjectCounter` arguments: ## FAQ -### How do I count objects in a video using Ultralytics YOLOv8? +### How do I count objects in a video using Ultralytics YOLO11? -To count objects in a video using Ultralytics YOLOv8, you can follow these steps: +To count objects in a video using Ultralytics YOLO11, you can follow these steps: 1. Import the necessary libraries (`cv2`, `ultralytics`). -2. Load a pretrained YOLOv8 model. +2. Load a pretrained YOLO11 model. 3. Define the counting region (e.g., a polygon, line, etc.). 4. Set up the video capture and initialize the object counter. 5. Process each frame to track objects and count them within the defined region. @@ -284,14 +321,14 @@ def count_objects_in_region(video_path, output_video_path, model_path): cv2.destroyAllWindows() -count_objects_in_region("path/to/video.mp4", "output_video.avi", "yolov8n.pt") +count_objects_in_region("path/to/video.mp4", "output_video.avi", "yolo11n.pt") ``` -Explore more configurations and options in the [Object Counting](#object-counting-using-ultralytics-yolov8) section. +Explore more configurations and options in the [Object Counting](#object-counting-using-ultralytics-yolo11) section. -### What are the advantages of using Ultralytics YOLOv8 for object counting? +### What are the advantages of using Ultralytics YOLO11 for object counting? -Using Ultralytics YOLOv8 for object counting offers several advantages: +Using Ultralytics YOLO11 for object counting offers several advantages: 1. **Resource Optimization:** It facilitates efficient resource management by providing accurate counts, helping optimize resource allocation in industries like inventory management. 2. **Enhanced Security:** It enhances security and surveillance by accurately tracking and counting entities, aiding in proactive threat detection. @@ -299,9 +336,9 @@ Using Ultralytics YOLOv8 for object counting offers several advantages: For real-world applications and code examples, visit the [Advantages of Object Counting](#advantages-of-object-counting) section. -### How can I count specific classes of objects using Ultralytics YOLOv8? +### How can I count specific classes of objects using Ultralytics YOLO11? -To count specific classes of objects using Ultralytics YOLOv8, you need to specify the classes you are interested in during the tracking phase. Below is a Python example: +To count specific classes of objects using Ultralytics YOLO11, you need to specify the classes you are interested in during the tracking phase. Below is a Python example: ```python import cv2 @@ -335,27 +372,27 @@ def count_specific_classes(video_path, output_video_path, model_path, classes_to cv2.destroyAllWindows() -count_specific_classes("path/to/video.mp4", "output_specific_classes.avi", "yolov8n.pt", [0, 2]) +count_specific_classes("path/to/video.mp4", "output_specific_classes.avi", "yolo11n.pt", [0, 2]) ``` In this example, `classes_to_count=[0, 2]`, which means it counts objects of class `0` and `2` (e.g., person and car). -### Why should I use YOLOv8 over other [object detection](https://www.ultralytics.com/glossary/object-detection) models for real-time applications? +### Why should I use YOLO11 over other [object detection](https://www.ultralytics.com/glossary/object-detection) models for real-time applications? -Ultralytics YOLOv8 provides several advantages over other object detection models like Faster R-CNN, SSD, and previous YOLO versions: +Ultralytics YOLO11 provides several advantages over other object detection models like Faster R-CNN, SSD, and previous YOLO versions: -1. **Speed and Efficiency:** YOLOv8 offers real-time processing capabilities, making it ideal for applications requiring high-speed inference, such as surveillance and autonomous driving. +1. **Speed and Efficiency:** YOLO11 offers real-time processing capabilities, making it ideal for applications requiring high-speed inference, such as surveillance and autonomous driving. 2. **[Accuracy](https://www.ultralytics.com/glossary/accuracy):** It provides state-of-the-art accuracy for object detection and tracking tasks, reducing the number of false positives and improving overall system reliability. -3. **Ease of Integration:** YOLOv8 offers seamless integration with various platforms and devices, including mobile and edge devices, which is crucial for modern AI applications. +3. **Ease of Integration:** YOLO11 offers seamless integration with various platforms and devices, including mobile and edge devices, which is crucial for modern AI applications. 4. **Flexibility:** Supports various tasks like object detection, segmentation, and tracking with configurable models to meet specific use-case requirements. -Check out Ultralytics [YOLOv8 Documentation](https://docs.ultralytics.com/models/yolov8/) for a deeper dive into its features and performance comparisons. +Check out Ultralytics [YOLO11 Documentation](https://docs.ultralytics.com/models/yolo11/) for a deeper dive into its features and performance comparisons. -### Can I use YOLOv8 for advanced applications like crowd analysis and traffic management? +### Can I use YOLO11 for advanced applications like crowd analysis and traffic management? -Yes, Ultralytics YOLOv8 is perfectly suited for advanced applications like crowd analysis and traffic management due to its real-time detection capabilities, scalability, and integration flexibility. Its advanced features allow for high-accuracy object tracking, counting, and classification in dynamic environments. Example use cases include: +Yes, Ultralytics YOLO11 is perfectly suited for advanced applications like crowd analysis and traffic management due to its real-time detection capabilities, scalability, and integration flexibility. Its advanced features allow for high-accuracy object tracking, counting, and classification in dynamic environments. Example use cases include: - **Crowd Analysis:** Monitor and manage large gatherings, ensuring safety and optimizing crowd flow. - **Traffic Management:** Track and count vehicles, analyze traffic patterns, and manage congestion in real-time. -For more information and implementation details, refer to the guide on [Real World Applications](#real-world-applications) of object counting with YOLOv8. +For more information and implementation details, refer to the guide on [Real World Applications](#real-world-applications) of object counting with YOLO11. diff --git a/docs/en/guides/object-cropping.md b/docs/en/guides/object-cropping.md index f4b50ed027..8bfcac5fe1 100644 --- a/docs/en/guides/object-cropping.md +++ b/docs/en/guides/object-cropping.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to crop and extract objects using Ultralytics YOLOv8 for focused analysis, reduced data volume, and enhanced precision. -keywords: Ultralytics, YOLOv8, object cropping, object detection, image processing, video analysis, AI, machine learning +description: Learn how to crop and extract objects using Ultralytics YOLO11 for focused analysis, reduced data volume, and enhanced precision. +keywords: Ultralytics, YOLO11, object cropping, object detection, image processing, video analysis, AI, machine learning --- -# Object Cropping using Ultralytics YOLOv8 +# Object Cropping using Ultralytics YOLO11 ## What is Object Cropping? -Object cropping with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) involves isolating and extracting specific detected objects from an image or video. The YOLOv8 model capabilities are utilized to accurately identify and delineate objects, enabling precise cropping for further analysis or manipulation. +Object cropping with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) involves isolating and extracting specific detected objects from an image or video. The YOLO11 model capabilities are utilized to accurately identify and delineate objects, enabling precise cropping for further analysis or manipulation.


@@ -18,23 +18,23 @@ Object cropping with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly allowfullscreen>
- Watch: Object Cropping using Ultralytics YOLOv8 + Watch: Object Cropping using Ultralytics YOLO

## Advantages of Object Cropping? -- **Focused Analysis**: YOLOv8 facilitates targeted object cropping, allowing for in-depth examination or processing of individual items within a scene. +- **Focused Analysis**: YOLO11 facilitates targeted object cropping, allowing for in-depth examination or processing of individual items within a scene. - **Reduced Data Volume**: By extracting only relevant objects, object cropping helps in minimizing data size, making it efficient for storage, transmission, or subsequent computational tasks. -- **Enhanced Precision**: YOLOv8's [object detection](https://www.ultralytics.com/glossary/object-detection) [accuracy](https://www.ultralytics.com/glossary/accuracy) ensures that the cropped objects maintain their spatial relationships, preserving the integrity of the visual information for detailed analysis. +- **Enhanced Precision**: YOLO11's [object detection](https://www.ultralytics.com/glossary/object-detection) [accuracy](https://www.ultralytics.com/glossary/accuracy) ensures that the cropped objects maintain their spatial relationships, preserving the integrity of the visual information for detailed analysis. ## Visuals | Airport Luggage | | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| ![Conveyor Belt at Airport Suitcases Cropping using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/suitcases-cropping-airport-conveyor-belt.avif) | -| Suitcases Cropping at airport conveyor belt using Ultralytics YOLOv8 | +| ![Conveyor Belt at Airport Suitcases Cropping using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/suitcases-cropping-airport-conveyor-belt.avif) | +| Suitcases Cropping at airport conveyor belt using Ultralytics YOLO11 | -!!! example "Object Cropping using YOLOv8 Example" +!!! example "Object Cropping using YOLO11 Example" === "Object Cropping" @@ -46,7 +46,7 @@ Object cropping with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly from ultralytics import YOLO from ultralytics.utils.plotting import Annotator, colors - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") names = model.names cap = cv2.VideoCapture("path/to/video/file.mp4") @@ -98,22 +98,22 @@ Object cropping with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly ## FAQ -### What is object cropping in Ultralytics YOLOv8 and how does it work? +### What is object cropping in Ultralytics YOLO11 and how does it work? -Object cropping using [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) involves isolating and extracting specific objects from an image or video based on YOLOv8's detection capabilities. This process allows for focused analysis, reduced data volume, and enhanced [precision](https://www.ultralytics.com/glossary/precision) by leveraging YOLOv8 to identify objects with high accuracy and crop them accordingly. For an in-depth tutorial, refer to the [object cropping example](#object-cropping-using-ultralytics-yolov8). +Object cropping using [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) involves isolating and extracting specific objects from an image or video based on YOLO11's detection capabilities. This process allows for focused analysis, reduced data volume, and enhanced [precision](https://www.ultralytics.com/glossary/precision) by leveraging YOLO11 to identify objects with high accuracy and crop them accordingly. For an in-depth tutorial, refer to the [object cropping example](#object-cropping-using-ultralytics-yolo11). -### Why should I use Ultralytics YOLOv8 for object cropping over other solutions? +### Why should I use Ultralytics YOLO11 for object cropping over other solutions? -Ultralytics YOLOv8 stands out due to its precision, speed, and ease of use. It allows detailed and accurate object detection and cropping, essential for [focused analysis](#advantages-of-object-cropping) and applications needing high data integrity. Moreover, YOLOv8 integrates seamlessly with tools like OpenVINO and TensorRT for deployments requiring real-time capabilities and optimization on diverse hardware. Explore the benefits in the [guide on model export](../modes/export.md). +Ultralytics YOLO11 stands out due to its precision, speed, and ease of use. It allows detailed and accurate object detection and cropping, essential for [focused analysis](#advantages-of-object-cropping) and applications needing high data integrity. Moreover, YOLO11 integrates seamlessly with tools like OpenVINO and TensorRT for deployments requiring real-time capabilities and optimization on diverse hardware. Explore the benefits in the [guide on model export](../modes/export.md). ### How can I reduce the data volume of my dataset using object cropping? -By using Ultralytics YOLOv8 to crop only relevant objects from your images or videos, you can significantly reduce the data size, making it more efficient for storage and processing. This process involves training the model to detect specific objects and then using the results to crop and save these portions only. For more information on exploiting Ultralytics YOLOv8's capabilities, visit our [quickstart guide](../quickstart.md). +By using Ultralytics YOLO11 to crop only relevant objects from your images or videos, you can significantly reduce the data size, making it more efficient for storage and processing. This process involves training the model to detect specific objects and then using the results to crop and save these portions only. For more information on exploiting Ultralytics YOLO11's capabilities, visit our [quickstart guide](../quickstart.md). -### Can I use Ultralytics YOLOv8 for real-time video analysis and object cropping? +### Can I use Ultralytics YOLO11 for real-time video analysis and object cropping? -Yes, Ultralytics YOLOv8 can process real-time video feeds to detect and crop objects dynamically. The model's high-speed inference capabilities make it ideal for real-time applications such as surveillance, sports analysis, and automated inspection systems. Check out the [tracking and prediction modes](../modes/predict.md) to understand how to implement real-time processing. +Yes, Ultralytics YOLO11 can process real-time video feeds to detect and crop objects dynamically. The model's high-speed inference capabilities make it ideal for real-time applications such as surveillance, sports analysis, and automated inspection systems. Check out the [tracking and prediction modes](../modes/predict.md) to understand how to implement real-time processing. -### What are the hardware requirements for efficiently running YOLOv8 for object cropping? +### What are the hardware requirements for efficiently running YOLO11 for object cropping? -Ultralytics YOLOv8 is optimized for both CPU and GPU environments, but to achieve optimal performance, especially for real-time or high-volume inference, a dedicated GPU (e.g., NVIDIA Tesla, RTX series) is recommended. For deployment on lightweight devices, consider using CoreML for iOS or TFLite for Android. More details on supported devices and formats can be found in our [model deployment options](../guides/model-deployment-options.md). +Ultralytics YOLO11 is optimized for both CPU and GPU environments, but to achieve optimal performance, especially for real-time or high-volume inference, a dedicated GPU (e.g., NVIDIA Tesla, RTX series) is recommended. For deployment on lightweight devices, consider using CoreML for iOS or TFLite for Android. More details on supported devices and formats can be found in our [model deployment options](../guides/model-deployment-options.md). diff --git a/docs/en/guides/parking-management.md b/docs/en/guides/parking-management.md index 78686bd061..6cf07e4847 100644 --- a/docs/en/guides/parking-management.md +++ b/docs/en/guides/parking-management.md @@ -1,14 +1,14 @@ --- comments: true -description: Optimize parking spaces and enhance safety with Ultralytics YOLOv8. Explore real-time vehicle detection and smart parking solutions. -keywords: parking management, YOLOv8, Ultralytics, vehicle detection, real-time tracking, parking lot optimization, smart parking +description: Optimize parking spaces and enhance safety with Ultralytics YOLO11. Explore real-time vehicle detection and smart parking solutions. +keywords: parking management, YOLO11, Ultralytics, vehicle detection, real-time tracking, parking lot optimization, smart parking --- -# Parking Management using Ultralytics YOLOv8 🚀 +# Parking Management using Ultralytics YOLO11 🚀 ## What is Parking Management System? -Parking management with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) ensures efficient and safe parking by organizing spaces and monitoring availability. YOLOv8 can improve parking lot management through real-time vehicle detection, and insights into parking occupancy. +Parking management with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) ensures efficient and safe parking by organizing spaces and monitoring availability. YOLO11 can improve parking lot management through real-time vehicle detection, and insights into parking occupancy.


@@ -18,21 +18,21 @@ Parking management with [Ultralytics YOLOv8](https://github.com/ultralytics/ultr allowfullscreen>
- Watch: How to Implement Parking Management Using Ultralytics YOLOv8 🚀 + Watch: How to Implement Parking Management Using Ultralytics YOLO 🚀

## Advantages of Parking Management System? - **Efficiency**: Parking lot management optimizes the use of parking spaces and reduces congestion. -- **Safety and Security**: Parking management using YOLOv8 improves the safety of both people and vehicles through surveillance and security measures. -- **Reduced Emissions**: Parking management using YOLOv8 manages traffic flow to minimize idle time and emissions in parking lots. +- **Safety and Security**: Parking management using YOLO11 improves the safety of both people and vehicles through surveillance and security measures. +- **Reduced Emissions**: Parking management using YOLO11 manages traffic flow to minimize idle time and emissions in parking lots. ## Real World Applications | Parking Management System | Parking Management System | | :----------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| ![Parking lots Analytics Using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/parking-management-aerial-view-ultralytics-yolov8.avif) | ![Parking management top view using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/parking-management-top-view-ultralytics-yolov8.avif) | -| Parking management Aerial View using Ultralytics YOLOv8 | Parking management Top View using Ultralytics YOLOv8 | +| ![Parking lots Analytics Using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/parking-management-aerial-view-ultralytics-yolov8.avif) | ![Parking management top view using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/parking-management-top-view-ultralytics-yolov8.avif) | +| Parking management Aerial View using Ultralytics YOLO11 | Parking management Top View using Ultralytics YOLO11 | ## Parking Management System Code Workflow @@ -49,7 +49,7 @@ Parking management with [Ultralytics YOLOv8](https://github.com/ultralytics/ultr Max Image Size of 1920 * 1080 supported -!!! example "Parking slots Annotator Ultralytics YOLOv8" +!!! example "Parking slots Annotator Ultralytics YOLO11" === "Parking Annotator" @@ -61,11 +61,11 @@ Parking management with [Ultralytics YOLOv8](https://github.com/ultralytics/ultr - After defining the parking areas with polygons, click `save` to store a JSON file with the data in your working directory. -![Ultralytics YOLOv8 Points Selection Demo](https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-points-selection-demo.avif) +![Ultralytics YOLO11 Points Selection Demo](https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-points-selection-demo.avif) ### Python Code for Parking Management -!!! example "Parking management using YOLOv8 Example" +!!! example "Parking management using YOLO11 Example" === "Parking Management" @@ -84,7 +84,7 @@ Parking management with [Ultralytics YOLOv8](https://github.com/ultralytics/ultr # Initialize parking management object parking_manager = solutions.ParkingManagement( - model="yolov8n.pt", # path to model file + model="yolo11n.pt", # path to model file json_file="bounding_boxes.json", # path to parking annotations file ) @@ -104,7 +104,7 @@ Parking management with [Ultralytics YOLOv8](https://github.com/ultralytics/ultr | Name | Type | Default | Description | | ------------------------ | ------- | ------------- | -------------------------------------------------------------- | -| `model` | `str` | `None` | Path to the YOLOv8 model. | +| `model` | `str` | `None` | Path to the YOLO11 model. | | `json_file` | `str` | `None` | Path to the JSON file, that have all parking coordinates data. | | `occupied_region_color` | `tuple` | `(0, 0, 255)` | RGB color for occupied regions. | | `available_region_color` | `tuple` | `(0, 255, 0)` | RGB color for available regions. | @@ -115,33 +115,33 @@ Parking management with [Ultralytics YOLOv8](https://github.com/ultralytics/ultr ## FAQ -### How does Ultralytics YOLOv8 enhance parking management systems? +### How does Ultralytics YOLO11 enhance parking management systems? -Ultralytics YOLOv8 greatly enhances parking management systems by providing **real-time vehicle detection** and monitoring. This results in optimized usage of parking spaces, reduced congestion, and improved safety through continuous surveillance. The [Parking Management System](https://github.com/ultralytics/ultralytics) enables efficient traffic flow, minimizing idle times and emissions in parking lots, thereby contributing to environmental sustainability. For further details, refer to the [parking management code workflow](#python-code-for-parking-management). +Ultralytics YOLO11 greatly enhances parking management systems by providing **real-time vehicle detection** and monitoring. This results in optimized usage of parking spaces, reduced congestion, and improved safety through continuous surveillance. The [Parking Management System](https://github.com/ultralytics/ultralytics) enables efficient traffic flow, minimizing idle times and emissions in parking lots, thereby contributing to environmental sustainability. For further details, refer to the [parking management code workflow](#python-code-for-parking-management). -### What are the benefits of using Ultralytics YOLOv8 for smart parking? +### What are the benefits of using Ultralytics YOLO11 for smart parking? -Using Ultralytics YOLOv8 for smart parking yields numerous benefits: +Using Ultralytics YOLO11 for smart parking yields numerous benefits: - **Efficiency**: Optimizes the use of parking spaces and decreases congestion. - **Safety and Security**: Enhances surveillance and ensures the safety of vehicles and pedestrians. - **Environmental Impact**: Helps in reducing emissions by minimizing vehicle idle times. More details on the advantages can be seen [here](#advantages-of-parking-management-system). -### How can I define parking spaces using Ultralytics YOLOv8? +### How can I define parking spaces using Ultralytics YOLO11? -Defining parking spaces is straightforward with Ultralytics YOLOv8: +Defining parking spaces is straightforward with Ultralytics YOLO11: 1. Capture a frame from a video or camera stream. 2. Use the provided code to launch a GUI for selecting an image and drawing polygons to define parking spaces. 3. Save the labeled data in JSON format for further processing. For comprehensive instructions, check the [selection of points](#selection-of-points) section. -### Can I customize the YOLOv8 model for specific parking management needs? +### Can I customize the YOLO11 model for specific parking management needs? -Yes, Ultralytics YOLOv8 allows customization for specific parking management needs. You can adjust parameters such as the **occupied and available region colors**, margins for text display, and much more. Utilizing the `ParkingManagement` class's [optional arguments](#optional-arguments-parkingmanagement), you can tailor the model to suit your particular requirements, ensuring maximum efficiency and effectiveness. +Yes, Ultralytics YOLO11 allows customization for specific parking management needs. You can adjust parameters such as the **occupied and available region colors**, margins for text display, and much more. Utilizing the `ParkingManagement` class's [optional arguments](#optional-arguments-parkingmanagement), you can tailor the model to suit your particular requirements, ensuring maximum efficiency and effectiveness. -### What are some real-world applications of Ultralytics YOLOv8 in parking lot management? +### What are some real-world applications of Ultralytics YOLO11 in parking lot management? -Ultralytics YOLOv8 is utilized in various real-world applications for parking lot management, including: +Ultralytics YOLO11 is utilized in various real-world applications for parking lot management, including: - **Parking Space Detection**: Accurately identifying available and occupied spaces. - **Surveillance**: Enhancing security through real-time monitoring. diff --git a/docs/en/guides/preprocessing_annotated_data.md b/docs/en/guides/preprocessing_annotated_data.md index fcd329c743..bca2268145 100644 --- a/docs/en/guides/preprocessing_annotated_data.md +++ b/docs/en/guides/preprocessing_annotated_data.md @@ -1,7 +1,7 @@ --- comments: true description: Learn essential data preprocessing techniques for annotated computer vision data, including resizing, normalizing, augmenting, and splitting datasets for optimal model training. -keywords: data preprocessing, computer vision, image resizing, normalization, data augmentation, training dataset, validation dataset, test dataset, YOLOv8 +keywords: data preprocessing, computer vision, image resizing, normalization, data augmentation, training dataset, validation dataset, test dataset, YOLO11 --- # Data Preprocessing Techniques for Annotated [Computer Vision](https://www.ultralytics.com/glossary/computer-vision-cv) Data @@ -36,7 +36,7 @@ To make resizing a simpler task, you can use the following tools: - **[OpenCV](https://www.ultralytics.com/glossary/opencv)**: A popular computer vision library with extensive functions for image processing. - **PIL (Pillow)**: A Python Imaging Library for opening, manipulating, and saving image files. -With respect to YOLOv8, the 'imgsz' parameter during [model training](../modes/train.md) allows for flexible input sizes. When set to a specific size, such as 640, the model will resize input images so their largest dimension is 640 pixels while maintaining the original aspect ratio. +With respect to YOLO11, the 'imgsz' parameter during [model training](../modes/train.md) allows for flexible input sizes. When set to a specific size, such as 640, the model will resize input images so their largest dimension is 640 pixels while maintaining the original aspect ratio. By evaluating your model's and dataset's specific needs, you can determine whether resizing is a necessary preprocessing step or if your model can efficiently handle images of varying sizes. @@ -47,7 +47,7 @@ Another preprocessing technique is normalization. Normalization scales the pixel - **Min-Max Scaling**: Scales pixel values to a range of 0 to 1. - **Z-Score Normalization**: Scales pixel values based on their mean and standard deviation. -With respect to YOLOv8, normalization is seamlessly handled as part of its preprocessing pipeline during model training. YOLOv8 automatically performs several preprocessing steps, including conversion to RGB, scaling pixel values to the range [0, 1], and normalization using predefined mean and standard deviation values. +With respect to YOLO11, normalization is seamlessly handled as part of its preprocessing pipeline during model training. YOLO11 automatically performs several preprocessing steps, including conversion to RGB, scaling pixel values to the range [0, 1], and normalization using predefined mean and standard deviation values. ### Splitting the Dataset @@ -76,9 +76,9 @@ Common augmentation techniques include flipping, rotation, scaling, and color ad Overview of Data Augmentations

-With respect to YOLOv8, you can [augment your custom dataset](../modes/train.md) by modifying the dataset configuration file, a .yaml file. In this file, you can add an augmentation section with parameters that specify how you want to augment your data. +With respect to YOLO11, you can [augment your custom dataset](../modes/train.md) by modifying the dataset configuration file, a .yaml file. In this file, you can add an augmentation section with parameters that specify how you want to augment your data. -The [Ultralytics YOLOv8 repository](https://github.com/ultralytics/ultralytics/tree/main) supports a wide range of data augmentations. You can apply various transformations such as: +The [Ultralytics YOLO11 repository](https://github.com/ultralytics/ultralytics/tree/main) supports a wide range of data augmentations. You can apply various transformations such as: - Random Crops - Flipping: Images can be flipped horizontally or vertically. @@ -89,12 +89,12 @@ Also, you can adjust the intensity of these augmentation techniques through spec ## A Case Study of Preprocessing -Consider a project aimed at developing a model to detect and classify different types of vehicles in traffic images using YOLOv8. We've collected traffic images and annotated them with bounding boxes and labels. +Consider a project aimed at developing a model to detect and classify different types of vehicles in traffic images using YOLO11. We've collected traffic images and annotated them with bounding boxes and labels. Here's what each step of preprocessing would look like for this project: -- Resizing Images: Since YOLOv8 handles flexible input sizes and performs resizing automatically, manual resizing is not required. The model will adjust the image size according to the specified 'imgsz' parameter during training. -- Normalizing Pixel Values: YOLOv8 automatically normalizes pixel values to a range of 0 to 1 during preprocessing, so it's not required. +- Resizing Images: Since YOLO11 handles flexible input sizes and performs resizing automatically, manual resizing is not required. The model will adjust the image size according to the specified 'imgsz' parameter during training. +- Normalizing Pixel Values: YOLO11 automatically normalizes pixel values to a range of 0 to 1 during preprocessing, so it's not required. - Splitting the Dataset: Divide the dataset into training (70%), validation (20%), and test (10%) sets using tools like scikit-learn. - [Data Augmentation](https://www.ultralytics.com/glossary/data-augmentation): Modify the dataset configuration file (.yaml) to include data augmentation techniques such as random crops, horizontal flips, and brightness adjustments. @@ -132,12 +132,12 @@ Having discussions about your project with other computer vision enthusiasts can ### Channels to Connect with the Community -- **GitHub Issues:** Visit the YOLOv8 GitHub repository and use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers are there to help with any issues you face. +- **GitHub Issues:** Visit the YOLO11 GitHub repository and use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers are there to help with any issues you face. - **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to connect with other users and developers, get support, share knowledge, and brainstorm ideas. ### Official Documentation -- **Ultralytics YOLOv8 Documentation:** Refer to the [official YOLOv8 documentation](./index.md) for thorough guides and valuable insights on numerous computer vision tasks and projects. +- **Ultralytics YOLO11 Documentation:** Refer to the [official YOLO11 documentation](./index.md) for thorough guides and valuable insights on numerous computer vision tasks and projects. ## Your Dataset Is Ready! @@ -151,7 +151,7 @@ Data preprocessing is essential in computer vision projects because it ensures t ### How can I use Ultralytics YOLO for data augmentation? -For data augmentation with Ultralytics YOLOv8, you need to modify the dataset configuration file (.yaml). In this file, you can specify various augmentation techniques such as random crops, horizontal flips, and brightness adjustments. This can be effectively done using the training configurations [explained here](../modes/train.md). Data augmentation helps create a more robust dataset, reduce [overfitting](https://www.ultralytics.com/glossary/overfitting), and improve model generalization. +For data augmentation with Ultralytics YOLO11, you need to modify the dataset configuration file (.yaml). In this file, you can specify various augmentation techniques such as random crops, horizontal flips, and brightness adjustments. This can be effectively done using the training configurations [explained here](../modes/train.md). Data augmentation helps create a more robust dataset, reduce [overfitting](https://www.ultralytics.com/glossary/overfitting), and improve model generalization. ### What are the best data normalization techniques for computer vision data? @@ -160,12 +160,12 @@ Normalization scales pixel values to a standard range for faster convergence and - **Min-Max Scaling**: Scales pixel values to a range of 0 to 1. - **Z-Score Normalization**: Scales pixel values based on their mean and standard deviation. -For YOLOv8, normalization is handled automatically, including conversion to RGB and pixel value scaling. Learn more about it in the [model training section](../modes/train.md). +For YOLO11, normalization is handled automatically, including conversion to RGB and pixel value scaling. Learn more about it in the [model training section](../modes/train.md). ### How should I split my annotated dataset for training? To split your dataset, a common practice is to divide it into 70% for training, 20% for validation, and 10% for testing. It is important to maintain the data distribution of classes across these splits and avoid data leakage by performing augmentation only on the training set. Use tools like scikit-learn or [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) for efficient dataset splitting. See the detailed guide on [dataset preparation](../guides/data-collection-and-annotation.md). -### Can I handle varying image sizes in YOLOv8 without manual resizing? +### Can I handle varying image sizes in YOLO11 without manual resizing? -Yes, Ultralytics YOLOv8 can handle varying image sizes through the 'imgsz' parameter during model training. This parameter ensures that images are resized so their largest dimension matches the specified size (e.g., 640 pixels), while maintaining the aspect ratio. For more flexible input handling and automatic adjustments, check the [model training section](../modes/train.md). +Yes, Ultralytics YOLO11 can handle varying image sizes through the 'imgsz' parameter during model training. This parameter ensures that images are resized so their largest dimension matches the specified size (e.g., 640 pixels), while maintaining the aspect ratio. For more flexible input handling and automatic adjustments, check the [model training section](../modes/train.md). diff --git a/docs/en/guides/queue-management.md b/docs/en/guides/queue-management.md index 9fb4897edf..8f6610bc9e 100644 --- a/docs/en/guides/queue-management.md +++ b/docs/en/guides/queue-management.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to manage and optimize queues using Ultralytics YOLOv8 to reduce wait times and increase efficiency in various real-world applications. -keywords: queue management, YOLOv8, Ultralytics, reduce wait times, efficiency, customer satisfaction, retail, airports, healthcare, banks +description: Learn how to manage and optimize queues using Ultralytics YOLO11 to reduce wait times and increase efficiency in various real-world applications. +keywords: queue management, YOLO11, Ultralytics, reduce wait times, efficiency, customer satisfaction, retail, airports, healthcare, banks --- -# Queue Management using Ultralytics YOLOv8 🚀 +# Queue Management using Ultralytics YOLO11 🚀 ## What is Queue Management? -Queue management using [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) involves organizing and controlling lines of people or vehicles to reduce wait times and enhance efficiency. It's about optimizing queues to improve customer satisfaction and system performance in various settings like retail, banks, airports, and healthcare facilities. +Queue management using [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) involves organizing and controlling lines of people or vehicles to reduce wait times and enhance efficiency. It's about optimizing queues to improve customer satisfaction and system performance in various settings like retail, banks, airports, and healthcare facilities.


@@ -18,7 +18,7 @@ Queue management using [Ultralytics YOLOv8](https://github.com/ultralytics/ultra allowfullscreen>
- Watch: How to Implement Queue Management with Ultralytics YOLOv8 | Airport and Metro Station + Watch: How to Implement Queue Management with Ultralytics YOLO11 | Airport and Metro Station

## Advantages of Queue Management? @@ -30,10 +30,10 @@ Queue management using [Ultralytics YOLOv8](https://github.com/ultralytics/ultra | Logistics | Retail | | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------: | -| ![Queue management at airport ticket counter using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/queue-management-airport-ticket-counter-ultralytics-yolov8.avif) | ![Queue monitoring in crowd using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/queue-monitoring-crowd-ultralytics-yolov8.avif) | -| Queue management at airport ticket counter Using Ultralytics YOLOv8 | Queue monitoring in crowd Ultralytics YOLOv8 | +| ![Queue management at airport ticket counter using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/queue-management-airport-ticket-counter-ultralytics-yolov8.avif) | ![Queue monitoring in crowd using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/queue-monitoring-crowd-ultralytics-yolov8.avif) | +| Queue management at airport ticket counter Using Ultralytics YOLO11 | Queue monitoring in crowd Ultralytics YOLO11 | -!!! example "Queue Management using YOLOv8 Example" +!!! example "Queue Management using YOLO11 Example" === "Queue Manager" @@ -42,7 +42,7 @@ Queue management using [Ultralytics YOLOv8](https://github.com/ultralytics/ultra from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" @@ -84,7 +84,7 @@ Queue management using [Ultralytics YOLOv8](https://github.com/ultralytics/ultra from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" @@ -135,11 +135,11 @@ Queue management using [Ultralytics YOLOv8](https://github.com/ultralytics/ultra ## FAQ -### How can I use Ultralytics YOLOv8 for real-time queue management? +### How can I use Ultralytics YOLO11 for real-time queue management? -To use Ultralytics YOLOv8 for real-time queue management, you can follow these steps: +To use Ultralytics YOLO11 for real-time queue management, you can follow these steps: -1. Load the YOLOv8 model with `YOLO("yolov8n.pt")`. +1. Load the YOLO11 model with `YOLO("yolo11n.pt")`. 2. Capture the video feed using `cv2.VideoCapture`. 3. Define the region of interest (ROI) for queue management. 4. Process frames to detect objects and manage queues. @@ -151,7 +151,7 @@ import cv2 from ultralytics import YOLO, solutions -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video.mp4") queue_region = [(20, 400), (1080, 404), (1080, 360), (20, 360)] @@ -176,9 +176,9 @@ cv2.destroyAllWindows() Leveraging Ultralytics [HUB](https://docs.ultralytics.com/hub/) can streamline this process by providing a user-friendly platform for deploying and managing your queue management solution. -### What are the key advantages of using Ultralytics YOLOv8 for queue management? +### What are the key advantages of using Ultralytics YOLO11 for queue management? -Using Ultralytics YOLOv8 for queue management offers several benefits: +Using Ultralytics YOLO11 for queue management offers several benefits: - **Plummeting Waiting Times:** Efficiently organizes queues, reducing customer wait times and boosting satisfaction. - **Enhancing Efficiency:** Analyzes queue data to optimize staff deployment and operations, thereby reducing costs. @@ -187,20 +187,20 @@ Using Ultralytics YOLOv8 for queue management offers several benefits: For more details, explore our [Queue Management](https://docs.ultralytics.com/reference/solutions/queue_management/) solutions. -### Why should I choose Ultralytics YOLOv8 over competitors like [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) or Detectron2 for queue management? +### Why should I choose Ultralytics YOLO11 over competitors like [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) or Detectron2 for queue management? -Ultralytics YOLOv8 has several advantages over TensorFlow and Detectron2 for queue management: +Ultralytics YOLO11 has several advantages over TensorFlow and Detectron2 for queue management: -- **Real-time Performance:** YOLOv8 is known for its real-time detection capabilities, offering faster processing speeds. +- **Real-time Performance:** YOLO11 is known for its real-time detection capabilities, offering faster processing speeds. - **Ease of Use:** Ultralytics provides a user-friendly experience, from training to deployment, via [Ultralytics HUB](https://docs.ultralytics.com/hub/). - **Pretrained Models:** Access to a range of pretrained models, minimizing the time needed for setup. - **Community Support:** Extensive documentation and active community support make problem-solving easier. Learn how to get started with [Ultralytics YOLO](https://docs.ultralytics.com/quickstart/). -### Can Ultralytics YOLOv8 handle multiple types of queues, such as in airports and retail? +### Can Ultralytics YOLO11 handle multiple types of queues, such as in airports and retail? -Yes, Ultralytics YOLOv8 can manage various types of queues, including those in airports and retail environments. By configuring the QueueManager with specific regions and settings, YOLOv8 can adapt to different queue layouts and densities. +Yes, Ultralytics YOLO11 can manage various types of queues, including those in airports and retail environments. By configuring the QueueManager with specific regions and settings, YOLO11 can adapt to different queue layouts and densities. Example for airports: @@ -215,9 +215,9 @@ queue_airport = solutions.QueueManager( For more information on diverse applications, check out our [Real World Applications](#real-world-applications) section. -### What are some real-world applications of Ultralytics YOLOv8 in queue management? +### What are some real-world applications of Ultralytics YOLO11 in queue management? -Ultralytics YOLOv8 is used in various real-world applications for queue management: +Ultralytics YOLO11 is used in various real-world applications for queue management: - **Retail:** Monitors checkout lines to reduce wait times and improve customer satisfaction. - **Airports:** Manages queues at ticket counters and security checkpoints for a smoother passenger experience. diff --git a/docs/en/guides/sahi-tiled-inference.md b/docs/en/guides/sahi-tiled-inference.md index 1137d6b84f..ec48d95b7a 100644 --- a/docs/en/guides/sahi-tiled-inference.md +++ b/docs/en/guides/sahi-tiled-inference.md @@ -1,12 +1,12 @@ --- comments: true -description: Learn how to implement YOLOv8 with SAHI for sliced inference. Optimize memory usage and enhance detection accuracy for large-scale applications. -keywords: YOLOv8, SAHI, Sliced Inference, Object Detection, Ultralytics, High-resolution Images, Computational Efficiency, Integration Guide +description: Learn how to implement YOLO11 with SAHI for sliced inference. Optimize memory usage and enhance detection accuracy for large-scale applications. +keywords: YOLO11, SAHI, Sliced Inference, Object Detection, Ultralytics, High-resolution Images, Computational Efficiency, Integration Guide --- -# Ultralytics Docs: Using YOLOv8 with SAHI for Sliced Inference +# Ultralytics Docs: Using YOLO11 with SAHI for Sliced Inference -Welcome to the Ultralytics documentation on how to use YOLOv8 with [SAHI](https://github.com/obss/sahi) (Slicing Aided Hyper Inference). This comprehensive guide aims to furnish you with all the essential knowledge you'll need to implement SAHI alongside YOLOv8. We'll deep-dive into what SAHI is, why sliced inference is critical for large-scale applications, and how to integrate these functionalities with YOLOv8 for enhanced [object detection](https://www.ultralytics.com/glossary/object-detection) performance. +Welcome to the Ultralytics documentation on how to use YOLO11 with [SAHI](https://github.com/obss/sahi) (Slicing Aided Hyper Inference). This comprehensive guide aims to furnish you with all the essential knowledge you'll need to implement SAHI alongside YOLO11. We'll deep-dive into what SAHI is, why sliced inference is critical for large-scale applications, and how to integrate these functionalities with YOLO11 for enhanced [object detection](https://www.ultralytics.com/glossary/object-detection) performance.

SAHI Sliced Inference Overview @@ -24,7 +24,7 @@ SAHI (Slicing Aided Hyper Inference) is an innovative library designed to optimi allowfullscreen>
- Watch: Inference with SAHI (Slicing Aided Hyper Inference) using Ultralytics YOLOv8 + Watch: Inference with SAHI (Slicing Aided Hyper Inference) using Ultralytics YOLO11

### Key Features of SAHI @@ -47,12 +47,12 @@ Sliced Inference refers to the practice of subdividing a large or high-resolutio - - + + - - + +
YOLOv8 without SAHIYOLOv8 with SAHIYOLO11 without SAHIYOLO11 with SAHI
YOLOv8 without SAHIYOLOv8 with SAHIYOLO11 without SAHIYOLO11 with SAHI
@@ -68,15 +68,15 @@ pip install -U ultralytics sahi ### Import Modules and Download Resources -Here's how to import the necessary modules and download a YOLOv8 model and some test images: +Here's how to import the necessary modules and download a YOLO11 model and some test images: ```python from sahi.utils.file import download_from_url from sahi.utils.yolov8 import download_yolov8s_model -# Download YOLOv8 model -yolov8_model_path = "models/yolov8s.pt" -download_yolov8s_model(yolov8_model_path) +# Download YOLO11 model +model_path = "models/yolo11s.pt" +download_yolov8s_model(model_path) # Download test images download_from_url( @@ -89,11 +89,11 @@ download_from_url( ) ``` -## Standard Inference with YOLOv8 +## Standard Inference with YOLO11 ### Instantiate the Model -You can instantiate a YOLOv8 model for object detection like this: +You can instantiate a YOLO11 model for object detection like this: ```python from sahi import AutoDetectionModel @@ -129,7 +129,7 @@ result.export_visuals(export_dir="demo_data/") Image("demo_data/prediction_visual.png") ``` -## Sliced Inference with YOLOv8 +## Sliced Inference with YOLO11 Perform sliced inference by specifying the slice dimensions and overlap ratios: @@ -170,7 +170,7 @@ from sahi.predict import predict predict( model_type="yolov8", - model_path="path/to/yolov8n.pt", + model_path="path/to/yolo11n.pt", model_device="cpu", # or 'cuda:0' model_confidence_threshold=0.4, source="path/to/dir", @@ -181,7 +181,7 @@ predict( ) ``` -That's it! Now you're equipped to use YOLOv8 with SAHI for both standard and sliced inference. +That's it! Now you're equipped to use YOLO11 with SAHI for both standard and sliced inference. ## Citations and Acknowledgments @@ -206,23 +206,23 @@ We extend our thanks to the SAHI research group for creating and maintaining thi ## FAQ -### How can I integrate YOLOv8 with SAHI for sliced inference in object detection? +### How can I integrate YOLO11 with SAHI for sliced inference in object detection? -Integrating Ultralytics YOLOv8 with SAHI (Slicing Aided Hyper Inference) for sliced inference optimizes your object detection tasks on high-resolution images by partitioning them into manageable slices. This approach improves memory usage and ensures high detection accuracy. To get started, you need to install the ultralytics and sahi libraries: +Integrating Ultralytics YOLO11 with SAHI (Slicing Aided Hyper Inference) for sliced inference optimizes your object detection tasks on high-resolution images by partitioning them into manageable slices. This approach improves memory usage and ensures high detection accuracy. To get started, you need to install the ultralytics and sahi libraries: ```bash pip install -U ultralytics sahi ``` -Then, download a YOLOv8 model and test images: +Then, download a YOLO11 model and test images: ```python from sahi.utils.file import download_from_url from sahi.utils.yolov8 import download_yolov8s_model -# Download YOLOv8 model -yolov8_model_path = "models/yolov8s.pt" -download_yolov8s_model(yolov8_model_path) +# Download YOLO11 model +model_path = "models/yolo11s.pt" +download_yolov8s_model(model_path) # Download test images download_from_url( @@ -231,11 +231,11 @@ download_from_url( ) ``` -For more detailed instructions, refer to our [Sliced Inference guide](#sliced-inference-with-yolov8). +For more detailed instructions, refer to our [Sliced Inference guide](#sliced-inference-with-yolo11). -### Why should I use SAHI with YOLOv8 for object detection on large images? +### Why should I use SAHI with YOLO11 for object detection on large images? -Using SAHI with Ultralytics YOLOv8 for object detection on large images offers several benefits: +Using SAHI with Ultralytics YOLO11 for object detection on large images offers several benefits: - **Reduced Computational Burden**: Smaller slices are faster to process and consume less memory, making it feasible to run high-quality detections on hardware with limited resources. - **Maintained Detection Accuracy**: SAHI uses intelligent algorithms to merge overlapping boxes, preserving the detection quality. @@ -243,9 +243,9 @@ Using SAHI with Ultralytics YOLOv8 for object detection on large images offers s Learn more about the [benefits of sliced inference](#benefits-of-sliced-inference) in our documentation. -### Can I visualize prediction results when using YOLOv8 with SAHI? +### Can I visualize prediction results when using YOLO11 with SAHI? -Yes, you can visualize prediction results when using YOLOv8 with SAHI. Here's how you can export and visualize the results: +Yes, you can visualize prediction results when using YOLO11 with SAHI. Here's how you can export and visualize the results: ```python from IPython.display import Image @@ -256,9 +256,9 @@ Image("demo_data/prediction_visual.png") This command will save the visualized predictions to the specified directory and you can then load the image to view it in your notebook or application. For a detailed guide, check out the [Standard Inference section](#visualize-results). -### What features does SAHI offer for improving YOLOv8 object detection? +### What features does SAHI offer for improving YOLO11 object detection? -SAHI (Slicing Aided Hyper Inference) offers several features that complement Ultralytics YOLOv8 for object detection: +SAHI (Slicing Aided Hyper Inference) offers several features that complement Ultralytics YOLO11 for object detection: - **Seamless Integration**: SAHI easily integrates with YOLO models, requiring minimal code adjustments. - **Resource Efficiency**: It partitions large images into smaller slices, which optimizes memory usage and speed. @@ -266,9 +266,9 @@ SAHI (Slicing Aided Hyper Inference) offers several features that complement Ult For a deeper understanding, read about SAHI's [key features](#key-features-of-sahi). -### How do I handle large-scale inference projects using YOLOv8 and SAHI? +### How do I handle large-scale inference projects using YOLO11 and SAHI? -To handle large-scale inference projects using YOLOv8 and SAHI, follow these best practices: +To handle large-scale inference projects using YOLO11 and SAHI, follow these best practices: 1. **Install Required Libraries**: Ensure that you have the latest versions of ultralytics and sahi. 2. **Configure Sliced Inference**: Determine the optimal slice dimensions and overlap ratios for your specific project. @@ -281,7 +281,7 @@ from sahi.predict import predict predict( model_type="yolov8", - model_path="path/to/yolov8n.pt", + model_path="path/to/yolo11n.pt", model_device="cpu", # or 'cuda:0' model_confidence_threshold=0.4, source="path/to/dir", diff --git a/docs/en/guides/security-alarm-system.md b/docs/en/guides/security-alarm-system.md index d90e44a583..a9523dd61c 100644 --- a/docs/en/guides/security-alarm-system.md +++ b/docs/en/guides/security-alarm-system.md @@ -1,17 +1,17 @@ --- comments: true -description: Enhance your security with real-time object detection using Ultralytics YOLOv8. Reduce false positives and integrate seamlessly with existing systems. -keywords: YOLOv8, Security Alarm System, real-time object detection, Ultralytics, computer vision, integration, false positives +description: Enhance your security with real-time object detection using Ultralytics YOLO11. Reduce false positives and integrate seamlessly with existing systems. +keywords: YOLO11, Security Alarm System, real-time object detection, Ultralytics, computer vision, integration, false positives --- -# Security Alarm System Project Using Ultralytics YOLOv8 +# Security Alarm System Project Using Ultralytics YOLO11 Security Alarm System -The Security Alarm System Project utilizing Ultralytics YOLOv8 integrates advanced [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) capabilities to enhance security measures. YOLOv8, developed by Ultralytics, provides real-time object detection, allowing the system to identify and respond to potential security threats promptly. This project offers several advantages: +The Security Alarm System Project utilizing Ultralytics YOLO11 integrates advanced [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) capabilities to enhance security measures. YOLO11, developed by Ultralytics, provides real-time object detection, allowing the system to identify and respond to potential security threats promptly. This project offers several advantages: -- **Real-time Detection:** YOLOv8's efficiency enables the Security Alarm System to detect and respond to security incidents in real-time, minimizing response time. -- **[Accuracy](https://www.ultralytics.com/glossary/accuracy):** YOLOv8 is known for its accuracy in object detection, reducing false positives and enhancing the reliability of the security alarm system. +- **Real-time Detection:** YOLO11's efficiency enables the Security Alarm System to detect and respond to security incidents in real-time, minimizing response time. +- **[Accuracy](https://www.ultralytics.com/glossary/accuracy):** YOLO11 is known for its accuracy in object detection, reducing false positives and enhancing the reliability of the security alarm system. - **Integration Capabilities:** The project can be seamlessly integrated with existing security infrastructure, providing an upgraded layer of intelligent surveillance.

@@ -22,7 +22,7 @@ The Security Alarm System Project utilizing Ultralytics YOLOv8 integrates advanc allowfullscreen>
- Watch: Security Alarm System Project with Ultralytics YOLOv8 [Object Detection](https://www.ultralytics.com/glossary/object-detection) + Watch: Security Alarm System Project with Ultralytics YOLO11 Object Detection

### Code @@ -90,7 +90,7 @@ class ObjectDetection: self.email_sent = False # model information - self.model = YOLO("yolov8n.pt") + self.model = YOLO("yolo11n.pt") # visual information self.annotator = None @@ -155,7 +155,7 @@ class ObjectDetection: self.email_sent = False self.display_fps(im0) - cv2.imshow("YOLOv8 Detection", im0) + cv2.imshow("YOLO11 Detection", im0) frame_count += 1 if cv2.waitKey(5) & 0xFF == 27: break @@ -179,22 +179,22 @@ That's it! When you execute the code, you'll receive a single notification on yo ## FAQ -### How does Ultralytics YOLOv8 improve the accuracy of a security alarm system? +### How does Ultralytics YOLO11 improve the accuracy of a security alarm system? -Ultralytics YOLOv8 enhances security alarm systems by delivering high-accuracy, real-time object detection. Its advanced algorithms significantly reduce false positives, ensuring that the system only responds to genuine threats. This increased reliability can be seamlessly integrated with existing security infrastructure, upgrading the overall surveillance quality. +Ultralytics YOLO11 enhances security alarm systems by delivering high-accuracy, real-time object detection. Its advanced algorithms significantly reduce false positives, ensuring that the system only responds to genuine threats. This increased reliability can be seamlessly integrated with existing security infrastructure, upgrading the overall surveillance quality. -### Can I integrate Ultralytics YOLOv8 with my existing security infrastructure? +### Can I integrate Ultralytics YOLO11 with my existing security infrastructure? -Yes, Ultralytics YOLOv8 can be seamlessly integrated with your existing security infrastructure. The system supports various modes and provides flexibility for customization, allowing you to enhance your existing setup with advanced object detection capabilities. For detailed instructions on integrating YOLOv8 in your projects, visit the [integration section](https://docs.ultralytics.com/integrations/). +Yes, Ultralytics YOLO11 can be seamlessly integrated with your existing security infrastructure. The system supports various modes and provides flexibility for customization, allowing you to enhance your existing setup with advanced object detection capabilities. For detailed instructions on integrating YOLO11 in your projects, visit the [integration section](https://docs.ultralytics.com/integrations/). -### What are the storage requirements for running Ultralytics YOLOv8? +### What are the storage requirements for running Ultralytics YOLO11? -Running Ultralytics YOLOv8 on a standard setup typically requires around 5GB of free disk space. This includes space for storing the YOLOv8 model and any additional dependencies. For cloud-based solutions, Ultralytics HUB offers efficient project management and dataset handling, which can optimize storage needs. Learn more about the [Pro Plan](../hub/pro.md) for enhanced features including extended storage. +Running Ultralytics YOLO11 on a standard setup typically requires around 5GB of free disk space. This includes space for storing the YOLO11 model and any additional dependencies. For cloud-based solutions, Ultralytics HUB offers efficient project management and dataset handling, which can optimize storage needs. Learn more about the [Pro Plan](../hub/pro.md) for enhanced features including extended storage. -### What makes Ultralytics YOLOv8 different from other object detection models like Faster R-CNN or SSD? +### What makes Ultralytics YOLO11 different from other object detection models like Faster R-CNN or SSD? -Ultralytics YOLOv8 provides an edge over models like Faster R-CNN or SSD with its real-time detection capabilities and higher accuracy. Its unique architecture allows it to process images much faster without compromising on [precision](https://www.ultralytics.com/glossary/precision), making it ideal for time-sensitive applications like security alarm systems. For a comprehensive comparison of object detection models, you can explore our [guide](https://docs.ultralytics.com/models/). +Ultralytics YOLO11 provides an edge over models like Faster R-CNN or SSD with its real-time detection capabilities and higher accuracy. Its unique architecture allows it to process images much faster without compromising on [precision](https://www.ultralytics.com/glossary/precision), making it ideal for time-sensitive applications like security alarm systems. For a comprehensive comparison of object detection models, you can explore our [guide](https://docs.ultralytics.com/models/). -### How can I reduce the frequency of false positives in my security system using Ultralytics YOLOv8? +### How can I reduce the frequency of false positives in my security system using Ultralytics YOLO11? -To reduce false positives, ensure your Ultralytics YOLOv8 model is adequately trained with a diverse and well-annotated dataset. Fine-tuning hyperparameters and regularly updating the model with new data can significantly improve detection accuracy. Detailed [hyperparameter tuning](https://www.ultralytics.com/glossary/hyperparameter-tuning) techniques can be found in our [hyperparameter tuning guide](../guides/hyperparameter-tuning.md). +To reduce false positives, ensure your Ultralytics YOLO11 model is adequately trained with a diverse and well-annotated dataset. Fine-tuning hyperparameters and regularly updating the model with new data can significantly improve detection accuracy. Detailed [hyperparameter tuning](https://www.ultralytics.com/glossary/hyperparameter-tuning) techniques can be found in our [hyperparameter tuning guide](../guides/hyperparameter-tuning.md). diff --git a/docs/en/guides/speed-estimation.md b/docs/en/guides/speed-estimation.md index 6f3726c921..6a6c192de1 100644 --- a/docs/en/guides/speed-estimation.md +++ b/docs/en/guides/speed-estimation.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to estimate object speed using Ultralytics YOLOv8 for applications in traffic control, autonomous navigation, and surveillance. -keywords: Ultralytics YOLOv8, speed estimation, object tracking, computer vision, traffic control, autonomous navigation, surveillance, security +description: Learn how to estimate object speed using Ultralytics YOLO11 for applications in traffic control, autonomous navigation, and surveillance. +keywords: Ultralytics YOLO11, speed estimation, object tracking, computer vision, traffic control, autonomous navigation, surveillance, security --- -# Speed Estimation using Ultralytics YOLOv8 🚀 +# Speed Estimation using Ultralytics YOLO11 🚀 ## What is Speed Estimation? -[Speed estimation](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects) is the process of calculating the rate of movement of an object within a given context, often employed in [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) applications. Using [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) you can now calculate the speed of object using [object tracking](../modes/track.md) alongside distance and time data, crucial for tasks like traffic and surveillance. The accuracy of speed estimation directly influences the efficiency and reliability of various applications, making it a key component in the advancement of intelligent systems and real-time decision-making processes. +[Speed estimation](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects) is the process of calculating the rate of movement of an object within a given context, often employed in [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) applications. Using [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) you can now calculate the speed of object using [object tracking](../modes/track.md) alongside distance and time data, crucial for tasks like traffic and surveillance. The accuracy of speed estimation directly influences the efficiency and reliability of various applications, making it a key component in the advancement of intelligent systems and real-time decision-making processes.


@@ -18,12 +18,12 @@ keywords: Ultralytics YOLOv8, speed estimation, object tracking, computer vision allowfullscreen>
- Watch: Speed Estimation using Ultralytics YOLOv8 + Watch: Speed Estimation using Ultralytics YOLO11

!!! tip "Check Out Our Blog" - For deeper insights into speed estimation, check out our blog post: [Ultralytics YOLOv8 for Speed Estimation in Computer Vision Projects](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects) + For deeper insights into speed estimation, check out our blog post: [Ultralytics YOLO11 for Speed Estimation in Computer Vision Projects](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects) ## Advantages of Speed Estimation? @@ -35,10 +35,10 @@ keywords: Ultralytics YOLOv8, speed estimation, object tracking, computer vision | Transportation | Transportation | | :------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| ![Speed Estimation on Road using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/speed-estimation-on-road-using-ultralytics-yolov8.avif) | ![Speed Estimation on Bridge using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/speed-estimation-on-bridge-using-ultralytics-yolov8.avif) | -| Speed Estimation on Road using Ultralytics YOLOv8 | Speed Estimation on Bridge using Ultralytics YOLOv8 | +| ![Speed Estimation on Road using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/speed-estimation-on-road-using-ultralytics-yolov8.avif) | ![Speed Estimation on Bridge using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/speed-estimation-on-bridge-using-ultralytics-yolov8.avif) | +| Speed Estimation on Road using Ultralytics YOLO11 | Speed Estimation on Bridge using Ultralytics YOLO11 | -!!! example "Speed Estimation using YOLOv8 Example" +!!! example "Speed Estimation using YOLO11 Example" === "Speed Estimation" @@ -47,7 +47,7 @@ keywords: Ultralytics YOLOv8, speed estimation, object tracking, computer vision from ultralytics import YOLO, solutions - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") names = model.model.names cap = cv2.VideoCapture("path/to/video/file.mp4") @@ -102,9 +102,9 @@ keywords: Ultralytics YOLOv8, speed estimation, object tracking, computer vision ## FAQ -### How do I estimate object speed using Ultralytics YOLOv8? +### How do I estimate object speed using Ultralytics YOLO11? -Estimating object speed with Ultralytics YOLOv8 involves combining [object detection](https://www.ultralytics.com/glossary/object-detection) and tracking techniques. First, you need to detect objects in each frame using the YOLOv8 model. Then, track these objects across frames to calculate their movement over time. Finally, use the distance traveled by the object between frames and the frame rate to estimate its speed. +Estimating object speed with Ultralytics YOLO11 involves combining [object detection](https://www.ultralytics.com/glossary/object-detection) and tracking techniques. First, you need to detect objects in each frame using the YOLO11 model. Then, track these objects across frames to calculate their movement over time. Finally, use the distance traveled by the object between frames and the frame rate to estimate its speed. **Example**: @@ -113,7 +113,7 @@ import cv2 from ultralytics import YOLO, solutions -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") names = model.model.names cap = cv2.VideoCapture("path/to/video/file.mp4") @@ -142,43 +142,43 @@ cv2.destroyAllWindows() For more details, refer to our [official blog post](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects). -### What are the benefits of using Ultralytics YOLOv8 for speed estimation in traffic management? +### What are the benefits of using Ultralytics YOLO11 for speed estimation in traffic management? -Using Ultralytics YOLOv8 for speed estimation offers significant advantages in traffic management: +Using Ultralytics YOLO11 for speed estimation offers significant advantages in traffic management: - **Enhanced Safety**: Accurately estimate vehicle speeds to detect over-speeding and improve road safety. -- **Real-Time Monitoring**: Benefit from YOLOv8's real-time object detection capability to monitor traffic flow and congestion effectively. +- **Real-Time Monitoring**: Benefit from YOLO11's real-time object detection capability to monitor traffic flow and congestion effectively. - **Scalability**: Deploy the model on various hardware setups, from edge devices to servers, ensuring flexible and scalable solutions for large-scale implementations. For more applications, see [advantages of speed estimation](#advantages-of-speed-estimation). -### Can YOLOv8 be integrated with other AI frameworks like [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) or [PyTorch](https://www.ultralytics.com/glossary/pytorch)? +### Can YOLO11 be integrated with other AI frameworks like [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) or [PyTorch](https://www.ultralytics.com/glossary/pytorch)? -Yes, YOLOv8 can be integrated with other AI frameworks like TensorFlow and PyTorch. Ultralytics provides support for exporting YOLOv8 models to various formats like ONNX, TensorRT, and CoreML, ensuring smooth interoperability with other ML frameworks. +Yes, YOLO11 can be integrated with other AI frameworks like TensorFlow and PyTorch. Ultralytics provides support for exporting YOLO11 models to various formats like ONNX, TensorRT, and CoreML, ensuring smooth interoperability with other ML frameworks. -To export a YOLOv8 model to ONNX format: +To export a YOLO11 model to ONNX format: ```bash -yolo export --weights yolov8n.pt --include onnx +yolo export --weights yolo11n.pt --include onnx ``` Learn more about exporting models in our [guide on export](../modes/export.md). -### How accurate is the speed estimation using Ultralytics YOLOv8? +### How accurate is the speed estimation using Ultralytics YOLO11? -The [accuracy](https://www.ultralytics.com/glossary/accuracy) of speed estimation using Ultralytics YOLOv8 depends on several factors, including the quality of the object tracking, the resolution and frame rate of the video, and environmental variables. While the speed estimator provides reliable estimates, it may not be 100% accurate due to variances in frame processing speed and object occlusion. +The [accuracy](https://www.ultralytics.com/glossary/accuracy) of speed estimation using Ultralytics YOLO11 depends on several factors, including the quality of the object tracking, the resolution and frame rate of the video, and environmental variables. While the speed estimator provides reliable estimates, it may not be 100% accurate due to variances in frame processing speed and object occlusion. **Note**: Always consider margin of error and validate the estimates with ground truth data when possible. For further accuracy improvement tips, check the [Arguments `SpeedEstimator` section](#arguments-speedestimator). -### Why choose Ultralytics YOLOv8 over other object detection models like TensorFlow Object Detection API? +### Why choose Ultralytics YOLO11 over other object detection models like TensorFlow Object Detection API? -Ultralytics YOLOv8 offers several advantages over other object detection models, such as the TensorFlow Object Detection API: +Ultralytics YOLO11 offers several advantages over other object detection models, such as the TensorFlow Object Detection API: -- **Real-Time Performance**: YOLOv8 is optimized for real-time detection, providing high speed and accuracy. -- **Ease of Use**: Designed with a user-friendly interface, YOLOv8 simplifies model training and deployment. +- **Real-Time Performance**: YOLO11 is optimized for real-time detection, providing high speed and accuracy. +- **Ease of Use**: Designed with a user-friendly interface, YOLO11 simplifies model training and deployment. - **Versatility**: Supports multiple tasks, including object detection, segmentation, and pose estimation. -- **Community and Support**: YOLOv8 is backed by an active community and extensive documentation, ensuring developers have the resources they need. +- **Community and Support**: YOLO11 is backed by an active community and extensive documentation, ensuring developers have the resources they need. -For more information on the benefits of YOLOv8, explore our detailed [model page](../models/yolov8.md). +For more information on the benefits of YOLO11, explore our detailed [model page](../models/yolov8.md). diff --git a/docs/en/guides/steps-of-a-cv-project.md b/docs/en/guides/steps-of-a-cv-project.md index bbdbef5f35..b0f03c1eac 100644 --- a/docs/en/guides/steps-of-a-cv-project.md +++ b/docs/en/guides/steps-of-a-cv-project.md @@ -100,7 +100,7 @@ However, if you choose to collect images or take your own pictures, you'll need Different Types of Image Annotation

-[Data collection and annotation](./data-collection-and-annotation.md) can be a time-consuming manual effort. Annotation tools can help make this process easier. Here are some useful open annotation tools: [LabeI Studio](https://github.com/HumanSignal/label-studio), [CVAT](https://github.com/cvat-ai/cvat), and [Labelme](https://github.com/labelmeai/labelme). +[Data collection and annotation](./data-collection-and-annotation.md) can be a time-consuming manual effort. Annotation tools can help make this process easier. Here are some useful open annotation tools: [LabeI Studio](https://github.com/HumanSignal/label-studio), [CVAT](https://github.com/cvat-ai/cvat), and [Labelme](https://github.com/wkentaro/labelme). ## Step 3: [Data Augmentation](https://www.ultralytics.com/glossary/data-augmentation) and Splitting Your Dataset @@ -166,7 +166,7 @@ Once your model has been thoroughly tested, it's time to deploy it. Deployment i - Setting Up the Environment: Configure the necessary infrastructure for your chosen deployment option, whether it's cloud-based (AWS, Google Cloud, Azure) or edge-based (local devices, IoT). -- **[Exporting the Model](../modes/export.md):** Export your model to the appropriate format (e.g., ONNX, TensorRT, CoreML for YOLOv8) to ensure compatibility with your deployment platform. +- **[Exporting the Model](../modes/export.md):** Export your model to the appropriate format (e.g., ONNX, TensorRT, CoreML for YOLO11) to ensure compatibility with your deployment platform. - **Deploying the Model:** Deploy the model by setting up APIs or endpoints and integrating it with your application. - **Ensuring Scalability**: Implement load balancers, auto-scaling groups, and monitoring tools to manage resources and handle increasing data and user requests. @@ -188,12 +188,12 @@ Connecting with a community of computer vision enthusiasts can help you tackle a ### Community Resources -- **GitHub Issues:** Check out the [YOLOv8 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The active community and maintainers are there to help with specific issues. +- **GitHub Issues:** Check out the [YOLO11 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The active community and maintainers are there to help with specific issues. - **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to interact with other users and developers, get support, and share insights. ### Official Documentation -- **Ultralytics YOLOv8 Documentation:** Explore the [official YOLOv8 documentation](./index.md) for detailed guides with helpful tips on different computer vision tasks and projects. +- **Ultralytics YOLO11 Documentation:** Explore the [official YOLO11 documentation](./index.md) for detailed guides with helpful tips on different computer vision tasks and projects. Using these resources will help you overcome challenges and stay updated with the latest trends and best practices in the computer vision community. @@ -215,7 +215,7 @@ Data annotation is vital for teaching your model to recognize patterns. The type - **Object Detection**: Bounding boxes drawn around objects. - **Image Segmentation**: Each pixel labeled according to the object it belongs to. -Tools like [Label Studio](https://github.com/HumanSignal/label-studio), [CVAT](https://github.com/cvat-ai/cvat), and [Labelme](https://github.com/labelmeai/labelme) can assist in this process. For more details, refer to our [data collection and annotation guide](./data-collection-and-annotation.md). +Tools like [Label Studio](https://github.com/HumanSignal/label-studio), [CVAT](https://github.com/cvat-ai/cvat), and [Labelme](https://github.com/wkentaro/labelme) can assist in this process. For more details, refer to our [data collection and annotation guide](./data-collection-and-annotation.md). ### What steps should I follow to augment and split my dataset effectively? @@ -229,7 +229,7 @@ After splitting, apply data augmentation techniques like rotation, scaling, and ### How can I export my trained computer vision model for deployment? -Exporting your model ensures compatibility with different deployment platforms. Ultralytics provides multiple formats, including ONNX, TensorRT, and CoreML. To export your YOLOv8 model, follow this guide: +Exporting your model ensures compatibility with different deployment platforms. Ultralytics provides multiple formats, including ONNX, TensorRT, and CoreML. To export your YOLO11 model, follow this guide: - Use the `export` function with the desired format parameter. - Ensure the exported model fits the specifications of your deployment environment (e.g., edge devices, cloud). diff --git a/docs/en/guides/streamlit-live-inference.md b/docs/en/guides/streamlit-live-inference.md index e8fb5c9165..b5831fd4f5 100644 --- a/docs/en/guides/streamlit-live-inference.md +++ b/docs/en/guides/streamlit-live-inference.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to set up a real-time object detection application using Streamlit and Ultralytics YOLOv8. Follow this step-by-step guide to implement webcam-based object detection. -keywords: Streamlit, YOLOv8, Real-time Object Detection, Streamlit Application, YOLOv8 Streamlit Tutorial, Webcam Object Detection +description: Learn how to set up a real-time object detection application using Streamlit and Ultralytics YOLO11. Follow this step-by-step guide to implement webcam-based object detection. +keywords: Streamlit, YOLO11, Real-time Object Detection, Streamlit Application, YOLO11 Streamlit Tutorial, Webcam Object Detection --- -# Live Inference with Streamlit Application using Ultralytics YOLOv8 +# Live Inference with Streamlit Application using Ultralytics YOLO11 ## Introduction -Streamlit makes it simple to build and deploy interactive web applications. Combining this with Ultralytics YOLOv8 allows for real-time [object detection](https://www.ultralytics.com/glossary/object-detection) and analysis directly in your browser. YOLOv8 high accuracy and speed ensure seamless performance for live video streams, making it ideal for applications in security, retail, and beyond. +Streamlit makes it simple to build and deploy interactive web applications. Combining this with Ultralytics YOLO11 allows for real-time [object detection](https://www.ultralytics.com/glossary/object-detection) and analysis directly in your browser. YOLO11 high accuracy and speed ensure seamless performance for live video streams, making it ideal for applications in security, retail, and beyond.


@@ -18,19 +18,19 @@ Streamlit makes it simple to build and deploy interactive web applications. Comb allowfullscreen>
- Watch: How to Use Streamlit with Ultralytics for Real-Time [Computer Vision](https://www.ultralytics.com/glossary/computer-vision-cv) in Your Browser + Watch: How to Use Streamlit with Ultralytics for Real-Time Computer Vision in Your Browser

| Aquaculture | Animals husbandry | | :----------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------: | -| ![Fish Detection using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/fish-detection-ultralytics-yolov8.avif) | ![Animals Detection using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/animals-detection-yolov8.avif) | -| Fish Detection using Ultralytics YOLOv8 | Animals Detection using Ultralytics YOLOv8 | +| ![Fish Detection using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/fish-detection-ultralytics-yolov8.avif) | ![Animals Detection using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/animals-detection-yolov8.avif) | +| Fish Detection using Ultralytics YOLO11 | Animals Detection using Ultralytics YOLO11 | ## Advantages of Live Inference -- **Seamless Real-Time Object Detection**: Streamlit combined with YOLOv8 enables real-time object detection directly from your webcam feed. This allows for immediate analysis and insights, making it ideal for applications requiring instant feedback. +- **Seamless Real-Time Object Detection**: Streamlit combined with YOLO11 enables real-time object detection directly from your webcam feed. This allows for immediate analysis and insights, making it ideal for applications requiring instant feedback. - **User-Friendly Deployment**: Streamlit's interactive interface makes it easy to deploy and use the application without extensive technical knowledge. Users can start live inference with a simple click, enhancing accessibility and usability. -- **Efficient Resource Utilization**: YOLOv8 optimized algorithm ensure high-speed processing with minimal computational resources. This efficiency allows for smooth and reliable webcam inference even on standard hardware, making advanced computer vision accessible to a wider audience. +- **Efficient Resource Utilization**: YOLO11 optimized algorithm ensure high-speed processing with minimal computational resources. This efficiency allows for smooth and reliable webcam inference even on standard hardware, making advanced computer vision accessible to a wider audience. ## Streamlit Application Code @@ -56,7 +56,7 @@ Streamlit makes it simple to build and deploy interactive web applications. Comb yolo streamlit-predict ``` -This will launch the Streamlit application in your default web browser. You will see the main title, subtitle, and the sidebar with configuration options. Select your desired YOLOv8 model, set the confidence and NMS thresholds, and click the "Start" button to begin the real-time object detection. +This will launch the Streamlit application in your default web browser. You will see the main title, subtitle, and the sidebar with configuration options. Select your desired YOLO11 model, set the confidence and NMS thresholds, and click the "Start" button to begin the real-time object detection. You can optionally supply a specific model in Python: @@ -75,7 +75,7 @@ You can optionally supply a specific model in Python: ## Conclusion -By following this guide, you have successfully created a real-time object detection application using Streamlit and Ultralytics YOLOv8. This application allows you to experience the power of YOLOv8 in detecting objects through your webcam, with a user-friendly interface and the ability to stop the video stream at any time. +By following this guide, you have successfully created a real-time object detection application using Streamlit and Ultralytics YOLO11. This application allows you to experience the power of YOLO11 in detecting objects through your webcam, with a user-friendly interface and the ability to stop the video stream at any time. For further enhancements, you can explore adding more features such as recording the video stream, saving the annotated frames, or integrating with other computer vision libraries. @@ -90,13 +90,13 @@ Engage with the community to learn more, troubleshoot issues, and share your pro ### Official Documentation -- **Ultralytics YOLOv8 Documentation:** Refer to the [official YOLOv8 documentation](https://docs.ultralytics.com/) for comprehensive guides and insights on various computer vision tasks and projects. +- **Ultralytics YOLO11 Documentation:** Refer to the [official YOLO11 documentation](https://docs.ultralytics.com/) for comprehensive guides and insights on various computer vision tasks and projects. ## FAQ -### How can I set up a real-time object detection application using Streamlit and Ultralytics YOLOv8? +### How can I set up a real-time object detection application using Streamlit and Ultralytics YOLO11? -Setting up a real-time object detection application with Streamlit and Ultralytics YOLOv8 is straightforward. First, ensure you have the Ultralytics Python package installed using: +Setting up a real-time object detection application with Streamlit and Ultralytics YOLO11 is straightforward. First, ensure you have the Ultralytics Python package installed using: ```bash pip install ultralytics @@ -124,29 +124,29 @@ Then, you can create a basic Streamlit application to run live inference: For more details on the practical setup, refer to the [Streamlit Application Code section](#streamlit-application-code) of the documentation. -### What are the main advantages of using Ultralytics YOLOv8 with Streamlit for real-time object detection? +### What are the main advantages of using Ultralytics YOLO11 with Streamlit for real-time object detection? -Using Ultralytics YOLOv8 with Streamlit for real-time object detection offers several advantages: +Using Ultralytics YOLO11 with Streamlit for real-time object detection offers several advantages: - **Seamless Real-Time Detection**: Achieve high-[accuracy](https://www.ultralytics.com/glossary/accuracy), real-time object detection directly from webcam feeds. - **User-Friendly Interface**: Streamlit's intuitive interface allows easy use and deployment without extensive technical knowledge. -- **Resource Efficiency**: YOLOv8's optimized algorithms ensure high-speed processing with minimal computational resources. +- **Resource Efficiency**: YOLO11's optimized algorithms ensure high-speed processing with minimal computational resources. Discover more about these advantages [here](#advantages-of-live-inference). ### How do I deploy a Streamlit object detection application in my web browser? -After coding your Streamlit application integrating Ultralytics YOLOv8, you can deploy it by running: +After coding your Streamlit application integrating Ultralytics YOLO11, you can deploy it by running: ```bash streamlit run ``` -This command will launch the application in your default web browser, enabling you to select YOLOv8 models, set confidence, and NMS thresholds, and start real-time object detection with a simple click. For a detailed guide, refer to the [Streamlit Application Code](#streamlit-application-code) section. +This command will launch the application in your default web browser, enabling you to select YOLO11 models, set confidence, and NMS thresholds, and start real-time object detection with a simple click. For a detailed guide, refer to the [Streamlit Application Code](#streamlit-application-code) section. -### What are some use cases for real-time object detection using Streamlit and Ultralytics YOLOv8? +### What are some use cases for real-time object detection using Streamlit and Ultralytics YOLO11? -Real-time object detection using Streamlit and Ultralytics YOLOv8 can be applied in various sectors: +Real-time object detection using Streamlit and Ultralytics YOLO11 can be applied in various sectors: - **Security**: Real-time monitoring for unauthorized access. - **Retail**: Customer counting, shelf management, and more. @@ -154,12 +154,12 @@ Real-time object detection using Streamlit and Ultralytics YOLOv8 can be applied For more in-depth use cases and examples, explore [Ultralytics Solutions](https://docs.ultralytics.com/solutions/). -### How does Ultralytics YOLOv8 compare to other object detection models like YOLOv5 and RCNNs? +### How does Ultralytics YOLO11 compare to other object detection models like YOLOv5 and RCNNs? -Ultralytics YOLOv8 provides several enhancements over prior models like YOLOv5 and RCNNs: +Ultralytics YOLO11 provides several enhancements over prior models like YOLOv5 and RCNNs: - **Higher Speed and Accuracy**: Improved performance for real-time applications. - **Ease of Use**: Simplified interfaces and deployment. - **Resource Efficiency**: Optimized for better speed with minimal computational requirements. -For a comprehensive comparison, check [Ultralytics YOLOv8 Documentation](https://docs.ultralytics.com/models/yolov8/) and related blog posts discussing model performance. +For a comprehensive comparison, check [Ultralytics YOLO11 Documentation](https://docs.ultralytics.com/models/yolov8/) and related blog posts discussing model performance. diff --git a/docs/en/guides/triton-inference-server.md b/docs/en/guides/triton-inference-server.md index 1233ba9ae1..7395ccef11 100644 --- a/docs/en/guides/triton-inference-server.md +++ b/docs/en/guides/triton-inference-server.md @@ -1,12 +1,12 @@ --- comments: true -description: Learn how to integrate Ultralytics YOLOv8 with NVIDIA Triton Inference Server for scalable, high-performance AI model deployment. -keywords: Triton Inference Server, YOLOv8, Ultralytics, NVIDIA, deep learning, AI model deployment, ONNX, scalable inference +description: Learn how to integrate Ultralytics YOLO11 with NVIDIA Triton Inference Server for scalable, high-performance AI model deployment. +keywords: Triton Inference Server, YOLO11, Ultralytics, NVIDIA, deep learning, AI model deployment, ONNX, scalable inference --- -# Triton Inference Server with Ultralytics YOLOv8 +# Triton Inference Server with Ultralytics YOLO11 -The [Triton Inference Server](https://developer.nvidia.com/triton-inference-server) (formerly known as TensorRT Inference Server) is an open-source software solution developed by NVIDIA. It provides a cloud inference solution optimized for NVIDIA GPUs. Triton simplifies the deployment of AI models at scale in production. Integrating Ultralytics YOLOv8 with Triton Inference Server allows you to deploy scalable, high-performance [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) inference workloads. This guide provides steps to set up and test the integration. +The [Triton Inference Server](https://developer.nvidia.com/triton-inference-server) (formerly known as TensorRT Inference Server) is an open-source software solution developed by NVIDIA. It provides a cloud inference solution optimized for NVIDIA GPUs. Triton simplifies the deployment of AI models at scale in production. Integrating Ultralytics YOLO11 with Triton Inference Server allows you to deploy scalable, high-performance [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) inference workloads. This guide provides steps to set up and test the integration.


@@ -38,7 +38,7 @@ Ensure you have the following prerequisites before proceeding: pip install tritonclient[all] ``` -## Exporting YOLOv8 to ONNX Format +## Exporting YOLO11 to ONNX Format Before deploying the model on Triton, it must be exported to the ONNX format. ONNX (Open Neural Network Exchange) is a format that allows models to be transferred between different deep learning frameworks. Use the `export` function from the `YOLO` class: @@ -46,7 +46,7 @@ Before deploying the model on Triton, it must be exported to the ONNX format. ON from ultralytics import YOLO # Load a model -model = YOLO("yolov8n.pt") # load an official model +model = YOLO("yolo11n.pt") # load an official model # Export the model onnx_file = model.export(format="onnx", dynamic=True) @@ -141,21 +141,21 @@ subprocess.call(f"docker kill {container_id}", shell=True) --- -By following the above steps, you can deploy and run Ultralytics YOLOv8 models efficiently on Triton Inference Server, providing a scalable and high-performance solution for deep learning inference tasks. If you face any issues or have further queries, refer to the [official Triton documentation](https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html) or reach out to the Ultralytics community for support. +By following the above steps, you can deploy and run Ultralytics YOLO11 models efficiently on Triton Inference Server, providing a scalable and high-performance solution for deep learning inference tasks. If you face any issues or have further queries, refer to the [official Triton documentation](https://docs.nvidia.com/deeplearning/triton-inference-server/user-guide/docs/index.html) or reach out to the Ultralytics community for support. ## FAQ -### How do I set up Ultralytics YOLOv8 with NVIDIA Triton Inference Server? +### How do I set up Ultralytics YOLO11 with NVIDIA Triton Inference Server? -Setting up [Ultralytics YOLOv8](https://docs.ultralytics.com/models/yolov8/) with [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) involves a few key steps: +Setting up [Ultralytics YOLO11](https://docs.ultralytics.com/models/yolov8/) with [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) involves a few key steps: -1. **Export YOLOv8 to ONNX format**: +1. **Export YOLO11 to ONNX format**: ```python from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model # Export the model to ONNX format onnx_file = model.export(format="onnx", dynamic=True) @@ -209,21 +209,21 @@ Setting up [Ultralytics YOLOv8](https://docs.ultralytics.com/models/yolov8/) wit time.sleep(1) ``` -This setup can help you efficiently deploy YOLOv8 models at scale on Triton Inference Server for high-performance AI model inference. +This setup can help you efficiently deploy YOLO11 models at scale on Triton Inference Server for high-performance AI model inference. -### What benefits does using Ultralytics YOLOv8 with NVIDIA Triton Inference Server offer? +### What benefits does using Ultralytics YOLO11 with NVIDIA Triton Inference Server offer? -Integrating [Ultralytics YOLOv8](../models/yolov8.md) with [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) provides several advantages: +Integrating [Ultralytics YOLO11](../models/yolov8.md) with [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) provides several advantages: - **Scalable AI Inference**: Triton allows serving multiple models from a single server instance, supporting dynamic model loading and unloading, making it highly scalable for diverse AI workloads. - **High Performance**: Optimized for NVIDIA GPUs, Triton Inference Server ensures high-speed inference operations, perfect for real-time applications such as [object detection](https://www.ultralytics.com/glossary/object-detection). - **Ensemble and Model Versioning**: Triton's ensemble mode enables combining multiple models to improve results, and its model versioning supports A/B testing and rolling updates. -For detailed instructions on setting up and running YOLOv8 with Triton, you can refer to the [setup guide](#setting-up-triton-model-repository). +For detailed instructions on setting up and running YOLO11 with Triton, you can refer to the [setup guide](#setting-up-triton-model-repository). -### Why should I export my YOLOv8 model to ONNX format before using Triton Inference Server? +### Why should I export my YOLO11 model to ONNX format before using Triton Inference Server? -Using ONNX (Open Neural Network Exchange) format for your [Ultralytics YOLOv8](../models/yolov8.md) model before deploying it on [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) offers several key benefits: +Using ONNX (Open Neural Network Exchange) format for your [Ultralytics YOLO11](../models/yolov8.md) model before deploying it on [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) offers several key benefits: - **Interoperability**: ONNX format supports transfer between different deep learning frameworks (such as PyTorch, TensorFlow), ensuring broader compatibility. - **Optimization**: Many deployment environments, including Triton, optimize for ONNX, enabling faster inference and better performance. @@ -234,15 +234,15 @@ To export your model, use: ```python from ultralytics import YOLO -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") onnx_file = model.export(format="onnx", dynamic=True) ``` You can follow the steps in the [exporting guide](../modes/export.md) to complete the process. -### Can I run inference using the Ultralytics YOLOv8 model on Triton Inference Server? +### Can I run inference using the Ultralytics YOLO11 model on Triton Inference Server? -Yes, you can run inference using the [Ultralytics YOLOv8](../models/yolov8.md) model on [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server). Once your model is set up in the Triton Model Repository and the server is running, you can load and run inference on your model as follows: +Yes, you can run inference using the [Ultralytics YOLO11](../models/yolov8.md) model on [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server). Once your model is set up in the Triton Model Repository and the server is running, you can load and run inference on your model as follows: ```python from ultralytics import YOLO @@ -254,14 +254,14 @@ model = YOLO("http://localhost:8000/yolo", task="detect") results = model("path/to/image.jpg") ``` -For an in-depth guide on setting up and running Triton Server with YOLOv8, refer to the [running triton inference server](#running-triton-inference-server) section. +For an in-depth guide on setting up and running Triton Server with YOLO11, refer to the [running triton inference server](#running-triton-inference-server) section. -### How does Ultralytics YOLOv8 compare to [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) and PyTorch models for deployment? +### How does Ultralytics YOLO11 compare to [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) and PyTorch models for deployment? -[Ultralytics YOLOv8](https://docs.ultralytics.com/models/yolov8/) offers several unique advantages compared to TensorFlow and PyTorch models for deployment: +[Ultralytics YOLO11](https://docs.ultralytics.com/models/yolov8/) offers several unique advantages compared to TensorFlow and PyTorch models for deployment: -- **Real-time Performance**: Optimized for real-time object detection tasks, YOLOv8 provides state-of-the-art [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed, making it ideal for applications requiring live video analytics. -- **Ease of Use**: YOLOv8 integrates seamlessly with Triton Inference Server and supports diverse export formats (ONNX, TensorRT, CoreML), making it flexible for various deployment scenarios. -- **Advanced Features**: YOLOv8 includes features like dynamic model loading, model versioning, and ensemble inference, which are crucial for scalable and reliable AI deployments. +- **Real-time Performance**: Optimized for real-time object detection tasks, YOLO11 provides state-of-the-art [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed, making it ideal for applications requiring live video analytics. +- **Ease of Use**: YOLO11 integrates seamlessly with Triton Inference Server and supports diverse export formats (ONNX, TensorRT, CoreML), making it flexible for various deployment scenarios. +- **Advanced Features**: YOLO11 includes features like dynamic model loading, model versioning, and ensemble inference, which are crucial for scalable and reliable AI deployments. For more details, compare the deployment options in the [model deployment guide](../modes/export.md). diff --git a/docs/en/guides/view-results-in-terminal.md b/docs/en/guides/view-results-in-terminal.md index cb6ce1c087..2d8ff4b647 100644 --- a/docs/en/guides/view-results-in-terminal.md +++ b/docs/en/guides/view-results-in-terminal.md @@ -47,7 +47,7 @@ The VSCode compatible protocols for viewing images using the integrated terminal from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Run inference on an image results = model.predict(source="ultralytics/assets/bus.jpg") @@ -111,7 +111,7 @@ from sixel import SixelWriter from ultralytics import YOLO # Load a model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Run inference on an image results = model.predict(source="ultralytics/assets/bus.jpg") @@ -164,7 +164,7 @@ To view YOLO inference results in a VSCode terminal on macOS or Linux, follow th ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") results = model.predict(source="path_to_image") plot = results[0].plot() ``` diff --git a/docs/en/guides/vision-eye.md b/docs/en/guides/vision-eye.md index 48cff2746d..f374388c9c 100644 --- a/docs/en/guides/vision-eye.md +++ b/docs/en/guides/vision-eye.md @@ -1,23 +1,23 @@ --- comments: true -description: Discover VisionEye's object mapping and tracking powered by Ultralytics YOLOv8. Simulate human eye precision, track objects, and calculate distances effortlessly. -keywords: VisionEye, YOLOv8, Ultralytics, object mapping, object tracking, distance calculation, computer vision, AI, machine learning, Python, tutorial +description: Discover VisionEye's object mapping and tracking powered by Ultralytics YOLO11. Simulate human eye precision, track objects, and calculate distances effortlessly. +keywords: VisionEye, YOLO11, Ultralytics, object mapping, object tracking, distance calculation, computer vision, AI, machine learning, Python, tutorial --- -# VisionEye View Object Mapping using Ultralytics YOLOv8 🚀 +# VisionEye View Object Mapping using Ultralytics YOLO11 🚀 ## What is VisionEye Object Mapping? -[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) VisionEye offers the capability for computers to identify and pinpoint objects, simulating the observational [precision](https://www.ultralytics.com/glossary/precision) of the human eye. This functionality enables computers to discern and focus on specific objects, much like the way the human eye observes details from a particular viewpoint. +[Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) VisionEye offers the capability for computers to identify and pinpoint objects, simulating the observational [precision](https://www.ultralytics.com/glossary/precision) of the human eye. This functionality enables computers to discern and focus on specific objects, much like the way the human eye observes details from a particular viewpoint. ## Samples | VisionEye View | VisionEye View With Object Tracking | VisionEye View With Distance Calculation | | :----------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| ![VisionEye View Object Mapping using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/visioneye-view-object-mapping-yolov8.avif) | ![VisionEye View Object Mapping with Object Tracking using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/visioneye-object-mapping-with-tracking.avif) | ![VisionEye View with Distance Calculation using Ultralytics YOLOv8](https://github.com/ultralytics/docs/releases/download/0/visioneye-distance-calculation-yolov8.avif) | -| VisionEye View Object Mapping using Ultralytics YOLOv8 | VisionEye View Object Mapping with Object Tracking using Ultralytics YOLOv8 | VisionEye View with Distance Calculation using Ultralytics YOLOv8 | +| ![VisionEye View Object Mapping using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/visioneye-view-object-mapping-yolov8.avif) | ![VisionEye View Object Mapping with Object Tracking using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/visioneye-object-mapping-with-tracking.avif) | ![VisionEye View with Distance Calculation using Ultralytics YOLO11](https://github.com/ultralytics/docs/releases/download/0/visioneye-distance-calculation-yolov8.avif) | +| VisionEye View Object Mapping using Ultralytics YOLO11 | VisionEye View Object Mapping with Object Tracking using Ultralytics YOLO11 | VisionEye View with Distance Calculation using Ultralytics YOLO11 | -!!! example "VisionEye Object Mapping using YOLOv8" +!!! example "VisionEye Object Mapping using YOLO11" === "VisionEye Object Mapping" @@ -27,7 +27,7 @@ keywords: VisionEye, YOLOv8, Ultralytics, object mapping, object tracking, dista from ultralytics import YOLO from ultralytics.utils.plotting import Annotator, colors - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") names = model.model.names cap = cv2.VideoCapture("path/to/video/file.mp4") w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -71,7 +71,7 @@ keywords: VisionEye, YOLOv8, Ultralytics, object mapping, object tracking, dista from ultralytics import YOLO from ultralytics.utils.plotting import Annotator, colors - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -118,7 +118,7 @@ keywords: VisionEye, YOLOv8, Ultralytics, object mapping, object tracking, dista from ultralytics import YOLO from ultralytics.utils.plotting import Annotator - model = YOLO("yolov8s.pt") + model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("Path/to/video/file.mp4") w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -180,16 +180,16 @@ For any inquiries, feel free to post your questions in the [Ultralytics Issue Se ## FAQ -### How do I start using VisionEye Object Mapping with Ultralytics YOLOv8? +### How do I start using VisionEye Object Mapping with Ultralytics YOLO11? -To start using VisionEye Object Mapping with Ultralytics YOLOv8, first, you'll need to install the Ultralytics YOLO package via pip. Then, you can use the sample code provided in the documentation to set up [object detection](https://www.ultralytics.com/glossary/object-detection) with VisionEye. Here's a simple example to get you started: +To start using VisionEye Object Mapping with Ultralytics YOLO11, first, you'll need to install the Ultralytics YOLO package via pip. Then, you can use the sample code provided in the documentation to set up [object detection](https://www.ultralytics.com/glossary/object-detection) with VisionEye. Here's a simple example to get you started: ```python import cv2 from ultralytics import YOLO -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") while True: @@ -210,12 +210,12 @@ cap.release() cv2.destroyAllWindows() ``` -### What are the key features of VisionEye's object tracking capability using Ultralytics YOLOv8? +### What are the key features of VisionEye's object tracking capability using Ultralytics YOLO11? -VisionEye's object tracking with Ultralytics YOLOv8 allows users to follow the movement of objects within a video frame. Key features include: +VisionEye's object tracking with Ultralytics YOLO11 allows users to follow the movement of objects within a video frame. Key features include: 1. **Real-Time Object Tracking**: Keeps up with objects as they move. -2. **Object Identification**: Utilizes YOLOv8's powerful detection algorithms. +2. **Object Identification**: Utilizes YOLO11's powerful detection algorithms. 3. **Distance Calculation**: Calculates distances between objects and specified points. 4. **Annotation and Visualization**: Provides visual markers for tracked objects. @@ -226,7 +226,7 @@ import cv2 from ultralytics import YOLO -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") while True: @@ -249,9 +249,9 @@ cv2.destroyAllWindows() For a comprehensive guide, visit the [VisionEye Object Mapping with Object Tracking](#samples). -### How can I calculate distances with VisionEye's YOLOv8 model? +### How can I calculate distances with VisionEye's YOLO11 model? -Distance calculation with VisionEye and Ultralytics YOLOv8 involves determining the distance of detected objects from a specified point in the frame. It enhances spatial analysis capabilities, useful in applications such as autonomous driving and surveillance. +Distance calculation with VisionEye and Ultralytics YOLO11 involves determining the distance of detected objects from a specified point in the frame. It enhances spatial analysis capabilities, useful in applications such as autonomous driving and surveillance. Here's a simplified example: @@ -262,7 +262,7 @@ import cv2 from ultralytics import YOLO -model = YOLO("yolov8s.pt") +model = YOLO("yolo11n.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") center_point = (0, 480) # Example center point pixel_per_meter = 10 @@ -290,19 +290,19 @@ cv2.destroyAllWindows() For detailed instructions, refer to the [VisionEye with Distance Calculation](#samples). -### Why should I use Ultralytics YOLOv8 for object mapping and tracking? +### Why should I use Ultralytics YOLO11 for object mapping and tracking? -Ultralytics YOLOv8 is renowned for its speed, [accuracy](https://www.ultralytics.com/glossary/accuracy), and ease of integration, making it a top choice for object mapping and tracking. Key advantages include: +Ultralytics YOLO11 is renowned for its speed, [accuracy](https://www.ultralytics.com/glossary/accuracy), and ease of integration, making it a top choice for object mapping and tracking. Key advantages include: 1. **State-of-the-art Performance**: Delivers high accuracy in real-time object detection. 2. **Flexibility**: Supports various tasks such as detection, tracking, and distance calculation. 3. **Community and Support**: Extensive documentation and active GitHub community for troubleshooting and enhancements. 4. **Ease of Use**: Intuitive API simplifies complex tasks, allowing for rapid deployment and iteration. -For more information on applications and benefits, check out the [Ultralytics YOLOv8 documentation](https://docs.ultralytics.com/models/yolov8/). +For more information on applications and benefits, check out the [Ultralytics YOLO11 documentation](https://docs.ultralytics.com/models/yolov8/). ### How can I integrate VisionEye with other [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) tools like Comet or ClearML? -Ultralytics YOLOv8 can integrate seamlessly with various machine learning tools like Comet and ClearML, enhancing experiment tracking, collaboration, and reproducibility. Follow the detailed guides on [how to use YOLOv5 with Comet](https://www.ultralytics.com/blog/how-to-use-yolov5-with-comet) and [integrate YOLOv8 with ClearML](https://docs.ultralytics.com/integrations/clearml/) to get started. +Ultralytics YOLO11 can integrate seamlessly with various machine learning tools like Comet and ClearML, enhancing experiment tracking, collaboration, and reproducibility. Follow the detailed guides on [how to use YOLOv5 with Comet](https://www.ultralytics.com/blog/how-to-use-yolov5-with-comet) and [integrate YOLO11 with ClearML](https://docs.ultralytics.com/integrations/clearml/) to get started. For further exploration and integration examples, check our [Ultralytics Integrations Guide](https://docs.ultralytics.com/integrations/). diff --git a/docs/en/guides/workouts-monitoring.md b/docs/en/guides/workouts-monitoring.md index 4585631683..af996894b3 100644 --- a/docs/en/guides/workouts-monitoring.md +++ b/docs/en/guides/workouts-monitoring.md @@ -1,12 +1,12 @@ --- comments: true -description: Optimize your fitness routine with real-time workouts monitoring using Ultralytics YOLOv8. Track and improve your exercise form and performance. -keywords: workouts monitoring, Ultralytics YOLOv8, pose estimation, fitness tracking, exercise assessment, real-time feedback, exercise form, performance metrics +description: Optimize your fitness routine with real-time workouts monitoring using Ultralytics YOLO11. Track and improve your exercise form and performance. +keywords: workouts monitoring, Ultralytics YOLO11, pose estimation, fitness tracking, exercise assessment, real-time feedback, exercise form, performance metrics --- -# Workouts Monitoring using Ultralytics YOLOv8 +# Workouts Monitoring using Ultralytics YOLO11 -Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) enhances exercise assessment by accurately tracking key body landmarks and joints in real-time. This technology provides instant feedback on exercise form, tracks workout routines, and measures performance metrics, optimizing training sessions for users and trainers alike. +Monitoring workouts through pose estimation with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) enhances exercise assessment by accurately tracking key body landmarks and joints in real-time. This technology provides instant feedback on exercise form, tracks workout routines, and measures performance metrics, optimizing training sessions for users and trainers alike.


@@ -16,7 +16,7 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi allowfullscreen>
- Watch: Workouts Monitoring using Ultralytics YOLOv8 | Pushups, Pullups, Ab Workouts + Watch: Workouts Monitoring using Ultralytics YOLO11 | Pushups, Pullups, Ab Workouts

## Advantages of Workouts Monitoring? @@ -43,7 +43,7 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi from ultralytics import YOLO, solutions - model = YOLO("yolov8n-pose.pt") + model = YOLO("yolo11n-pose.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -74,7 +74,7 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi from ultralytics import YOLO, solutions - model = YOLO("yolov8n-pose.pt") + model = YOLO("yolo11n-pose.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -108,7 +108,7 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi ### KeyPoints Map -![keyPoints Order Ultralytics YOLOv8 Pose](https://github.com/ultralytics/docs/releases/download/0/keypoints-order-ultralytics-yolov8-pose.avif) +![keyPoints Order Ultralytics YOLO11 Pose](https://github.com/ultralytics/docs/releases/download/0/keypoints-order-ultralytics-yolov8-pose.avif) ### Arguments `AIGym` @@ -131,16 +131,16 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi ## FAQ -### How do I monitor my workouts using Ultralytics YOLOv8? +### How do I monitor my workouts using Ultralytics YOLO11? -To monitor your workouts using Ultralytics YOLOv8, you can utilize the pose estimation capabilities to track and analyze key body landmarks and joints in real-time. This allows you to receive instant feedback on your exercise form, count repetitions, and measure performance metrics. You can start by using the provided example code for pushups, pullups, or ab workouts as shown: +To monitor your workouts using Ultralytics YOLO11, you can utilize the pose estimation capabilities to track and analyze key body landmarks and joints in real-time. This allows you to receive instant feedback on your exercise form, count repetitions, and measure performance metrics. You can start by using the provided example code for pushups, pullups, or ab workouts as shown: ```python import cv2 from ultralytics import YOLO, solutions -model = YOLO("yolov8n-pose.pt") +model = YOLO("yolo11n-pose.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -165,9 +165,9 @@ cv2.destroyAllWindows() For further customization and settings, you can refer to the [AIGym](#arguments-aigym) section in the documentation. -### What are the benefits of using Ultralytics YOLOv8 for workout monitoring? +### What are the benefits of using Ultralytics YOLO11 for workout monitoring? -Using Ultralytics YOLOv8 for workout monitoring provides several key benefits: +Using Ultralytics YOLO11 for workout monitoring provides several key benefits: - **Optimized Performance:** By tailoring workouts based on monitoring data, you can achieve better results. - **Goal Achievement:** Easily track and adjust fitness goals for measurable progress. @@ -177,13 +177,13 @@ Using Ultralytics YOLOv8 for workout monitoring provides several key benefits: You can watch a [YouTube video demonstration](https://www.youtube.com/watch?v=LGGxqLZtvuw) to see these benefits in action. -### How accurate is Ultralytics YOLOv8 in detecting and tracking exercises? +### How accurate is Ultralytics YOLO11 in detecting and tracking exercises? -Ultralytics YOLOv8 is highly accurate in detecting and tracking exercises due to its state-of-the-art pose estimation capabilities. It can accurately track key body landmarks and joints, providing real-time feedback on exercise form and performance metrics. The model's pretrained weights and robust architecture ensure high [precision](https://www.ultralytics.com/glossary/precision) and reliability. For real-world examples, check out the [real-world applications](#real-world-applications) section in the documentation, which showcases pushups and pullups counting. +Ultralytics YOLO11 is highly accurate in detecting and tracking exercises due to its state-of-the-art pose estimation capabilities. It can accurately track key body landmarks and joints, providing real-time feedback on exercise form and performance metrics. The model's pretrained weights and robust architecture ensure high [precision](https://www.ultralytics.com/glossary/precision) and reliability. For real-world examples, check out the [real-world applications](#real-world-applications) section in the documentation, which showcases pushups and pullups counting. -### Can I use Ultralytics YOLOv8 for custom workout routines? +### Can I use Ultralytics YOLO11 for custom workout routines? -Yes, Ultralytics YOLOv8 can be adapted for custom workout routines. The `AIGym` class supports different pose types such as "pushup", "pullup", and "abworkout." You can specify keypoints and angles to detect specific exercises. Here is an example setup: +Yes, Ultralytics YOLO11 can be adapted for custom workout routines. The `AIGym` class supports different pose types such as "pushup", "pullup", and "abworkout." You can specify keypoints and angles to detect specific exercises. Here is an example setup: ```python from ultralytics import solutions @@ -198,7 +198,7 @@ gym_object = solutions.AIGym( For more details on setting arguments, refer to the [Arguments `AIGym`](#arguments-aigym) section. This flexibility allows you to monitor various exercises and customize routines based on your needs. -### How can I save the workout monitoring output using Ultralytics YOLOv8? +### How can I save the workout monitoring output using Ultralytics YOLO11? To save the workout monitoring output, you can modify the code to include a video writer that saves the processed frames. Here's an example: @@ -207,7 +207,7 @@ import cv2 from ultralytics import YOLO, solutions -model = YOLO("yolov8n-pose.pt") +model = YOLO("yolo11n-pose.pt") cap = cv2.VideoCapture("path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -234,4 +234,4 @@ cv2.destroyAllWindows() video_writer.release() ``` -This setup writes the monitored video to an output file. For more details, refer to the [Workouts Monitoring with Save Output](#workouts-monitoring-using-ultralytics-yolov8) section. +This setup writes the monitored video to an output file. For more details, refer to the [Workouts Monitoring with Save Output](#workouts-monitoring-using-ultralytics-yolo11) section. diff --git a/docs/en/guides/yolo-common-issues.md b/docs/en/guides/yolo-common-issues.md index 6da5d164e9..19a5eb421b 100644 --- a/docs/en/guides/yolo-common-issues.md +++ b/docs/en/guides/yolo-common-issues.md @@ -1,7 +1,7 @@ --- comments: true -description: Comprehensive guide to troubleshoot common YOLOv8 issues, from installation errors to model training challenges. Enhance your Ultralytics projects with our expert tips. -keywords: YOLO, YOLOv8, troubleshooting, installation errors, model training, GPU issues, Ultralytics, AI, computer vision, deep learning, Python, CUDA, PyTorch, debugging +description: Comprehensive guide to troubleshoot common YOLO11 issues, from installation errors to model training challenges. Enhance your Ultralytics projects with our expert tips. +keywords: YOLO, YOLO11, troubleshooting, installation errors, model training, GPU issues, Ultralytics, AI, computer vision, deep learning, Python, CUDA, PyTorch, debugging --- # Troubleshooting Common YOLO Issues @@ -12,7 +12,7 @@ keywords: YOLO, YOLOv8, troubleshooting, installation errors, model training, GP ## Introduction -This guide serves as a comprehensive aid for troubleshooting common issues encountered while working with YOLOv8 on your Ultralytics projects. Navigating through these issues can be a breeze with the right guidance, ensuring your projects remain on track without unnecessary delays. +This guide serves as a comprehensive aid for troubleshooting common issues encountered while working with YOLO11 on your Ultralytics projects. Navigating through these issues can be a breeze with the right guidance, ensuring your projects remain on track without unnecessary delays.


@@ -22,7 +22,7 @@ This guide serves as a comprehensive aid for troubleshooting common issues encou allowfullscreen>
- Watch: Ultralytics YOLOv8 Common Issues | Installation Errors, Model Training Issues + Watch: Ultralytics YOLO11 Common Issues | Installation Errors, Model Training Issues

## Common Issues @@ -41,7 +41,7 @@ Installation errors can arise due to various reasons, such as incompatible versi Additionally, here are some common installation issues users have encountered, along with their respective solutions: -- Import Errors or Dependency Issues - If you're getting errors during the import of YOLOv8, or you're having issues related to dependencies, consider the following troubleshooting steps: +- Import Errors or Dependency Issues - If you're getting errors during the import of YOLO11, or you're having issues related to dependencies, consider the following troubleshooting steps: - **Fresh Installation**: Sometimes, starting with a fresh installation can resolve unexpected issues. Especially with libraries like Ultralytics, where updates might introduce changes to the file tree structure or functionalities. @@ -53,7 +53,7 @@ Additionally, here are some common installation issues users have encountered, a - Remember, keeping your libraries and dependencies up-to-date is crucial for a smooth and error-free experience. -- Running YOLOv8 on GPU - If you're having trouble running YOLOv8 on GPU, consider the following troubleshooting steps: +- Running YOLO11 on GPU - If you're having trouble running YOLO11 on GPU, consider the following troubleshooting steps: - **Verify CUDA Compatibility and Installation**: Ensure your GPU is CUDA compatible and that CUDA is correctly installed. Use the `nvidia-smi` command to check the status of your NVIDIA GPU and CUDA version. @@ -63,7 +63,7 @@ Additionally, here are some common installation issues users have encountered, a - **Update Your Packages**: Outdated packages might not be compatible with your GPU. Keep them updated. - - **Program Configuration**: Check if the program or code specifies GPU usage. In YOLOv8, this might be in the settings or configuration. + - **Program Configuration**: Check if the program or code specifies GPU usage. In YOLO11, this might be in the settings or configuration. ### Model Training Issues @@ -119,7 +119,7 @@ You can access these metrics from the training logs or by using tools like Tenso **Solution**: To track and visualize training progress, you can consider using the following tools: -- [TensorBoard](https://www.tensorflow.org/tensorboard): TensorBoard is a popular choice for visualizing training metrics, including loss, [accuracy](https://www.ultralytics.com/glossary/accuracy), and more. You can integrate it with your YOLOv8 training process. +- [TensorBoard](https://www.tensorflow.org/tensorboard): TensorBoard is a popular choice for visualizing training metrics, including loss, [accuracy](https://www.ultralytics.com/glossary/accuracy), and more. You can integrate it with your YOLO11 training process. - [Comet](https://bit.ly/yolov8-readme-comet): Comet provides an extensive toolkit for experiment tracking and comparison. It allows you to track metrics, hyperparameters, and even model weights. Integration with YOLO models is also straightforward, providing you with a complete overview of your experiment cycle. - [Ultralytics HUB](https://hub.ultralytics.com/): Ultralytics HUB offers a specialized environment for tracking YOLO models, giving you a one-stop platform to manage metrics, datasets, and even collaborate with your team. Given its tailored focus on YOLO, it offers more customized tracking options. @@ -177,13 +177,13 @@ Here are some things to keep in mind, if you are facing issues related to model This section will address common issues faced during model prediction. -#### Getting Bounding Box Predictions With Your YOLOv8 Custom Model +#### Getting Bounding Box Predictions With Your YOLO11 Custom Model -**Issue**: When running predictions with a custom YOLOv8 model, there are challenges with the format and visualization of the bounding box coordinates. +**Issue**: When running predictions with a custom YOLO11 model, there are challenges with the format and visualization of the bounding box coordinates. **Solution**: -- Coordinate Format: YOLOv8 provides bounding box coordinates in absolute pixel values. To convert these to relative coordinates (ranging from 0 to 1), you need to divide by the image dimensions. For example, let's say your image size is 640x640. Then you would do the following: +- Coordinate Format: YOLO11 provides bounding box coordinates in absolute pixel values. To convert these to relative coordinates (ranging from 0 to 1), you need to divide by the image dimensions. For example, let's say your image size is 640x640. Then you would do the following: ```python # Convert absolute coordinates to relative coordinates @@ -195,33 +195,33 @@ y2 = y2 / 640 - File Name: To obtain the file name of the image you're predicting on, access the image file path directly from the result object within your prediction loop. -#### Filtering Objects in YOLOv8 Predictions +#### Filtering Objects in YOLO11 Predictions -**Issue**: Facing issues with how to filter and display only specific objects in the prediction results when running YOLOv8 using the Ultralytics library. +**Issue**: Facing issues with how to filter and display only specific objects in the prediction results when running YOLO11 using the Ultralytics library. **Solution**: To detect specific classes use the classes argument to specify the classes you want to include in the output. For instance, to detect only cars (assuming 'cars' have class index 2): ```shell -yolo task=detect mode=segment model=yolov8n-seg.pt source='path/to/car.mp4' show=True classes=2 +yolo task=detect mode=segment model=yolo11n-seg.pt source='path/to/car.mp4' show=True classes=2 ``` -#### Understanding Precision Metrics in YOLOv8 +#### Understanding Precision Metrics in YOLO11 -**Issue**: Confusion regarding the difference between box precision, mask precision, and [confusion matrix](https://www.ultralytics.com/glossary/confusion-matrix) precision in YOLOv8. +**Issue**: Confusion regarding the difference between box precision, mask precision, and [confusion matrix](https://www.ultralytics.com/glossary/confusion-matrix) precision in YOLO11. **Solution**: Box precision measures the accuracy of predicted bounding boxes compared to the actual ground truth boxes using IoU (Intersection over Union) as the metric. Mask precision assesses the agreement between predicted segmentation masks and ground truth masks in pixel-wise object classification. Confusion matrix precision, on the other hand, focuses on overall classification accuracy across all classes and does not consider the geometric accuracy of predictions. It's important to note that a [bounding box](https://www.ultralytics.com/glossary/bounding-box) can be geometrically accurate (true positive) even if the class prediction is wrong, leading to differences between box precision and confusion matrix precision. These metrics evaluate distinct aspects of a model's performance, reflecting the need for different evaluation metrics in various tasks. -#### Extracting Object Dimensions in YOLOv8 +#### Extracting Object Dimensions in YOLO11 -**Issue**: Difficulty in retrieving the length and height of detected objects in YOLOv8, especially when multiple objects are detected in an image. +**Issue**: Difficulty in retrieving the length and height of detected objects in YOLO11, especially when multiple objects are detected in an image. -**Solution**: To retrieve the bounding box dimensions, first use the Ultralytics YOLOv8 model to predict objects in an image. Then, extract the width and height information of bounding boxes from the prediction results. +**Solution**: To retrieve the bounding box dimensions, first use the Ultralytics YOLO11 model to predict objects in an image. Then, extract the width and height information of bounding boxes from the prediction results. ```python from ultralytics import YOLO -# Load a pre-trained YOLOv8 model -model = YOLO("yolov8n.pt") +# Load a pre-trained YOLO11 model +model = YOLO("yolo11n.pt") # Specify the source image source = "https://ultralytics.com/images/bus.jpg" @@ -264,23 +264,23 @@ for box in boxes: ## Community and Support -Engaging with a community of like-minded individuals can significantly enhance your experience and success in working with YOLOv8. Below are some channels and resources you may find helpful. +Engaging with a community of like-minded individuals can significantly enhance your experience and success in working with YOLO11. Below are some channels and resources you may find helpful. ### Forums and Channels for Getting Help -**GitHub Issues:** The YOLOv8 repository on GitHub has an [Issues tab](https://github.com/ultralytics/ultralytics/issues) where you can ask questions, report bugs, and suggest new features. The community and maintainers are active here, and it's a great place to get help with specific problems. +**GitHub Issues:** The YOLO11 repository on GitHub has an [Issues tab](https://github.com/ultralytics/ultralytics/issues) where you can ask questions, report bugs, and suggest new features. The community and maintainers are active here, and it's a great place to get help with specific problems. **Ultralytics Discord Server:** Ultralytics has a [Discord server](https://discord.com/invite/ultralytics) where you can interact with other users and the developers. ### Official Documentation and Resources -**Ultralytics YOLOv8 Docs**: The [official documentation](../index.md) provides a comprehensive overview of YOLOv8, along with guides on installation, usage, and troubleshooting. +**Ultralytics YOLO11 Docs**: The [official documentation](../index.md) provides a comprehensive overview of YOLO11, along with guides on installation, usage, and troubleshooting. -These resources should provide a solid foundation for troubleshooting and improving your YOLOv8 projects, as well as connecting with others in the YOLOv8 community. +These resources should provide a solid foundation for troubleshooting and improving your YOLO11 projects, as well as connecting with others in the YOLO11 community. ## Conclusion -Troubleshooting is an integral part of any development process, and being equipped with the right knowledge can significantly reduce the time and effort spent in resolving issues. This guide aimed to address the most common challenges faced by users of the YOLOv8 model within the Ultralytics ecosystem. By understanding and addressing these common issues, you can ensure smoother project progress and achieve better results with your [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks. +Troubleshooting is an integral part of any development process, and being equipped with the right knowledge can significantly reduce the time and effort spent in resolving issues. This guide aimed to address the most common challenges faced by users of the YOLO11 model within the Ultralytics ecosystem. By understanding and addressing these common issues, you can ensure smoother project progress and achieve better results with your [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks. Remember, the Ultralytics community is a valuable resource. Engaging with fellow developers and experts can provide additional insights and solutions that might not be covered in standard documentation. Always keep learning, experimenting, and sharing your experiences to contribute to the collective knowledge of the community. @@ -288,11 +288,11 @@ Happy troubleshooting! ## FAQ -### How do I resolve installation errors with YOLOv8? +### How do I resolve installation errors with YOLO11? Installation errors can often be due to compatibility issues or missing dependencies. Ensure you use Python 3.8 or later and have PyTorch 1.8 or later installed. It's beneficial to use virtual environments to avoid conflicts. For a step-by-step installation guide, follow our [official installation guide](../quickstart.md). If you encounter import errors, try a fresh installation or update the library to the latest version. -### Why is my YOLOv8 model training slow on a single GPU? +### Why is my YOLO11 model training slow on a single GPU? Training on a single GPU might be slow due to large batch sizes or insufficient memory. To speed up training, use multiple GPUs. Ensure your system has multiple GPUs available and adjust your `.yaml` configuration file to specify the number of GPUs, e.g., `gpus: 4`. Increase the batch size accordingly to fully utilize the GPUs without exceeding memory limits. Example command: @@ -300,7 +300,7 @@ Training on a single GPU might be slow due to large batch sizes or insufficient model.train(data="/path/to/your/data.yaml", batch=32, multi_scale=True) ``` -### How can I ensure my YOLOv8 model is training on the GPU? +### How can I ensure my YOLO11 model is training on the GPU? If the 'device' value shows 'null' in the training logs, it generally means the training process is set to automatically use an available GPU. To explicitly assign a specific GPU, set the 'device' value in your `.yaml` configuration file. For instance: @@ -310,10 +310,10 @@ device: 0 This sets the training process to the first GPU. Consult the `nvidia-smi` command to confirm your CUDA setup. -### How can I monitor and track my YOLOv8 model training progress? +### How can I monitor and track my YOLO11 model training progress? Tracking and visualizing training progress can be efficiently managed through tools like [TensorBoard](https://www.tensorflow.org/tensorboard), [Comet](https://bit.ly/yolov8-readme-comet), and [Ultralytics HUB](https://hub.ultralytics.com/). These tools allow you to log and visualize metrics such as loss, [precision](https://www.ultralytics.com/glossary/precision), [recall](https://www.ultralytics.com/glossary/recall), and mAP. Implementing [early stopping](#continuous-monitoring-parameters) based on these metrics can also help achieve better training outcomes. -### What should I do if YOLOv8 is not recognizing my dataset format? +### What should I do if YOLO11 is not recognizing my dataset format? Ensure your dataset and labels conform to the expected format. Verify that annotations are accurate and of high quality. If you face any issues, refer to the [Data Collection and Annotation](https://docs.ultralytics.com/guides/data-collection-and-annotation/) guide for best practices. For more dataset-specific guidance, check the [Datasets](https://docs.ultralytics.com/datasets/) section in the documentation. diff --git a/docs/en/guides/yolo-performance-metrics.md b/docs/en/guides/yolo-performance-metrics.md index aeed82355d..27ad142dfa 100644 --- a/docs/en/guides/yolo-performance-metrics.md +++ b/docs/en/guides/yolo-performance-metrics.md @@ -1,14 +1,14 @@ --- comments: true -description: Explore essential YOLOv8 performance metrics like mAP, IoU, F1 Score, Precision, and Recall. Learn how to calculate and interpret them for model evaluation. -keywords: YOLOv8 performance metrics, mAP, IoU, F1 Score, Precision, Recall, object detection, Ultralytics +description: Explore essential YOLO11 performance metrics like mAP, IoU, F1 Score, Precision, and Recall. Learn how to calculate and interpret them for model evaluation. +keywords: YOLO11 performance metrics, mAP, IoU, F1 Score, Precision, Recall, object detection, Ultralytics --- # Performance Metrics Deep Dive ## Introduction -Performance metrics are key tools to evaluate the [accuracy](https://www.ultralytics.com/glossary/accuracy) and efficiency of [object detection](https://www.ultralytics.com/glossary/object-detection) models. They shed light on how effectively a model can identify and localize objects within images. Additionally, they help in understanding the model's handling of false positives and false negatives. These insights are crucial for evaluating and enhancing the model's performance. In this guide, we will explore various performance metrics associated with YOLOv8, their significance, and how to interpret them. +Performance metrics are key tools to evaluate the [accuracy](https://www.ultralytics.com/glossary/accuracy) and efficiency of [object detection](https://www.ultralytics.com/glossary/object-detection) models. They shed light on how effectively a model can identify and localize objects within images. Additionally, they help in understanding the model's handling of false positives and false negatives. These insights are crucial for evaluating and enhancing the model's performance. In this guide, we will explore various performance metrics associated with YOLO11, their significance, and how to interpret them.


@@ -18,12 +18,12 @@ Performance metrics are key tools to evaluate the [accuracy](https://www.ultraly allowfullscreen>
- Watch: Ultralytics YOLOv8 Performance Metrics | MAP, F1 Score, [Precision](https://www.ultralytics.com/glossary/precision), IoU & Accuracy + Watch: Ultralytics YOLO11 Performance Metrics | MAP, F1 Score, Precision, IoU & Accuracy

## Object Detection Metrics -Let's start by discussing some metrics that are not only important to YOLOv8 but are broadly applicable across different object detection models. +Let's start by discussing some metrics that are not only important to YOLO11 but are broadly applicable across different object detection models. - **[Intersection over Union](https://www.ultralytics.com/glossary/intersection-over-union-iou) (IoU):** IoU is a measure that quantifies the overlap between a predicted [bounding box](https://www.ultralytics.com/glossary/bounding-box) and a ground truth bounding box. It plays a fundamental role in evaluating the accuracy of object localization. @@ -35,9 +35,9 @@ Let's start by discussing some metrics that are not only important to YOLOv8 but - **F1 Score:** The F1 Score is the harmonic mean of precision and recall, providing a balanced assessment of a model's performance while considering both false positives and false negatives. -## How to Calculate Metrics for YOLOv8 Model +## How to Calculate Metrics for YOLO11 Model -Now, we can explore [YOLOv8's Validation mode](../modes/val.md) that can be used to compute the above discussed evaluation metrics. +Now, we can explore [YOLO11's Validation mode](../modes/val.md) that can be used to compute the above discussed evaluation metrics. Using the validation mode is simple. Once you have a trained model, you can invoke the model.val() function. This function will then process the validation dataset and return a variety of performance metrics. But what do these metrics mean? And how should you interpret them? @@ -91,7 +91,7 @@ The model.val() function, apart from producing numeric metrics, also yields visu - **Validation Batch Labels (`val_batchX_labels.jpg`)**: These images depict the ground truth labels for distinct batches from the validation dataset. They provide a clear picture of what the objects are and their respective locations as per the dataset. -- **Validation Batch Predictions (`val_batchX_pred.jpg`)**: Contrasting the label images, these visuals display the predictions made by the YOLOv8 model for the respective batches. By comparing these to the label images, you can easily assess how well the model detects and classifies objects visually. +- **Validation Batch Predictions (`val_batchX_pred.jpg`)**: Contrasting the label images, these visuals display the predictions made by the YOLO11 model for the respective batches. By comparing these to the label images, you can easily assess how well the model detects and classifies objects visually. #### Results Storage @@ -153,56 +153,56 @@ Real-world examples can help clarify how these metrics work in practice. ## Connect and Collaborate -Tapping into a community of enthusiasts and experts can amplify your journey with YOLOv8. Here are some avenues that can facilitate learning, troubleshooting, and networking. +Tapping into a community of enthusiasts and experts can amplify your journey with YOLO11. Here are some avenues that can facilitate learning, troubleshooting, and networking. ### Engage with the Broader Community -- **GitHub Issues:** The YOLOv8 repository on GitHub has an [Issues tab](https://github.com/ultralytics/ultralytics/issues) where you can ask questions, report bugs, and suggest new features. The community and maintainers are active here, and it's a great place to get help with specific problems. +- **GitHub Issues:** The YOLO11 repository on GitHub has an [Issues tab](https://github.com/ultralytics/ultralytics/issues) where you can ask questions, report bugs, and suggest new features. The community and maintainers are active here, and it's a great place to get help with specific problems. - **Ultralytics Discord Server:** Ultralytics has a [Discord server](https://discord.com/invite/ultralytics) where you can interact with other users and the developers. ### Official Documentation and Resources: -- **Ultralytics YOLOv8 Docs:** The [official documentation](../index.md) provides a comprehensive overview of YOLOv8, along with guides on installation, usage, and troubleshooting. +- **Ultralytics YOLO11 Docs:** The [official documentation](../index.md) provides a comprehensive overview of YOLO11, along with guides on installation, usage, and troubleshooting. -Using these resources will not only guide you through any challenges but also keep you updated with the latest trends and best practices in the YOLOv8 community. +Using these resources will not only guide you through any challenges but also keep you updated with the latest trends and best practices in the YOLO11 community. ## Conclusion -In this guide, we've taken a close look at the essential performance metrics for YOLOv8. These metrics are key to understanding how well a model is performing and are vital for anyone aiming to fine-tune their models. They offer the necessary insights for improvements and to make sure the model works effectively in real-life situations. +In this guide, we've taken a close look at the essential performance metrics for YOLO11. These metrics are key to understanding how well a model is performing and are vital for anyone aiming to fine-tune their models. They offer the necessary insights for improvements and to make sure the model works effectively in real-life situations. -Remember, the YOLOv8 and Ultralytics community is an invaluable asset. Engaging with fellow developers and experts can open doors to insights and solutions not found in standard documentation. As you journey through object detection, keep the spirit of learning alive, experiment with new strategies, and share your findings. By doing so, you contribute to the community's collective wisdom and ensure its growth. +Remember, the YOLO11 and Ultralytics community is an invaluable asset. Engaging with fellow developers and experts can open doors to insights and solutions not found in standard documentation. As you journey through object detection, keep the spirit of learning alive, experiment with new strategies, and share your findings. By doing so, you contribute to the community's collective wisdom and ensure its growth. Happy object detecting! ## FAQ -### What is the significance of [Mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP) in evaluating YOLOv8 model performance? +### What is the significance of [Mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP) in evaluating YOLO11 model performance? -Mean Average Precision (mAP) is crucial for evaluating YOLOv8 models as it provides a single metric encapsulating precision and recall across multiple classes. mAP@0.50 measures precision at an IoU threshold of 0.50, focusing on the model's ability to detect objects correctly. mAP@0.50:0.95 averages precision across a range of IoU thresholds, offering a comprehensive assessment of detection performance. High mAP scores indicate that the model effectively balances precision and recall, essential for applications like autonomous driving and surveillance. +Mean Average Precision (mAP) is crucial for evaluating YOLO11 models as it provides a single metric encapsulating precision and recall across multiple classes. mAP@0.50 measures precision at an IoU threshold of 0.50, focusing on the model's ability to detect objects correctly. mAP@0.50:0.95 averages precision across a range of IoU thresholds, offering a comprehensive assessment of detection performance. High mAP scores indicate that the model effectively balances precision and recall, essential for applications like autonomous driving and surveillance. -### How do I interpret the Intersection over Union (IoU) value for YOLOv8 object detection? +### How do I interpret the Intersection over Union (IoU) value for YOLO11 object detection? Intersection over Union (IoU) measures the overlap between the predicted and ground truth bounding boxes. IoU values range from 0 to 1, where higher values indicate better localization accuracy. An IoU of 1.0 means perfect alignment. Typically, an IoU threshold of 0.50 is used to define true positives in metrics like mAP. Lower IoU values suggest that the model struggles with precise object localization, which can be improved by refining bounding box regression or increasing annotation accuracy. -### Why is the F1 Score important for evaluating YOLOv8 models in object detection? +### Why is the F1 Score important for evaluating YOLO11 models in object detection? -The F1 Score is important for evaluating YOLOv8 models because it provides a harmonic mean of precision and recall, balancing both false positives and false negatives. It is particularly valuable when dealing with imbalanced datasets or applications where either precision or recall alone is insufficient. A high F1 Score indicates that the model effectively detects objects while minimizing both missed detections and false alarms, making it suitable for critical applications like security systems and medical imaging. +The F1 Score is important for evaluating YOLO11 models because it provides a harmonic mean of precision and recall, balancing both false positives and false negatives. It is particularly valuable when dealing with imbalanced datasets or applications where either precision or recall alone is insufficient. A high F1 Score indicates that the model effectively detects objects while minimizing both missed detections and false alarms, making it suitable for critical applications like security systems and medical imaging. -### What are the key advantages of using Ultralytics YOLOv8 for real-time object detection? +### What are the key advantages of using Ultralytics YOLO11 for real-time object detection? -Ultralytics YOLOv8 offers multiple advantages for real-time object detection: +Ultralytics YOLO11 offers multiple advantages for real-time object detection: - **Speed and Efficiency**: Optimized for high-speed inference, suitable for applications requiring low latency. - **High Accuracy**: Advanced algorithm ensures high mAP and IoU scores, balancing precision and recall. - **Flexibility**: Supports various tasks including object detection, segmentation, and classification. - **Ease of Use**: User-friendly interfaces, extensive documentation, and seamless integration with platforms like Ultralytics HUB ([HUB Quickstart](../hub/quickstart.md)). -This makes YOLOv8 ideal for diverse applications from autonomous vehicles to smart city solutions. +This makes YOLO11 ideal for diverse applications from autonomous vehicles to smart city solutions. -### How can validation metrics from YOLOv8 help improve model performance? +### How can validation metrics from YOLO11 help improve model performance? -Validation metrics from YOLOv8 like precision, recall, mAP, and IoU help diagnose and improve model performance by providing insights into different aspects of detection: +Validation metrics from YOLO11 like precision, recall, mAP, and IoU help diagnose and improve model performance by providing insights into different aspects of detection: - **Precision**: Helps identify and minimize false positives. - **Recall**: Ensures all relevant objects are detected. diff --git a/docs/en/guides/yolo-thread-safe-inference.md b/docs/en/guides/yolo-thread-safe-inference.md index c086685152..b66af30ab0 100644 --- a/docs/en/guides/yolo-thread-safe-inference.md +++ b/docs/en/guides/yolo-thread-safe-inference.md @@ -33,7 +33,7 @@ from threading import Thread from ultralytics import YOLO # Instantiate the model outside the thread -shared_model = YOLO("yolov8n.pt") +shared_model = YOLO("yolo11n.pt") def predict(image_path): @@ -60,8 +60,8 @@ from threading import Thread from ultralytics import YOLO # Instantiate multiple models outside the thread -shared_model_1 = YOLO("yolov8n_1.pt") -shared_model_2 = YOLO("yolov8n_2.pt") +shared_model_1 = YOLO("yolo11n_1.pt") +shared_model_2 = YOLO("yolo11n_2.pt") def predict(model, image_path): @@ -94,7 +94,7 @@ from ultralytics import YOLO def thread_safe_predict(image_path): """Predict on an image using a new YOLO model instance in a thread-safe manner; takes image path as input.""" - local_model = YOLO("yolov8n.pt") + local_model = YOLO("yolo11n.pt") results = local_model.predict(image_path) # Process results @@ -128,7 +128,7 @@ from ultralytics import YOLO def thread_safe_predict(image_path): """Predict on an image in a thread-safe manner.""" - local_model = YOLO("yolov8n.pt") + local_model = YOLO("yolo11n.pt") results = local_model.predict(image_path) # Process results @@ -157,7 +157,7 @@ from ultralytics import YOLO def thread_safe_predict(image_path): """Runs inference in a thread-safe manner with a new YOLO model instance.""" - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") results = model.predict(image_path) # Process results diff --git a/docs/en/help/FAQ.md b/docs/en/help/FAQ.md index 234fb9e82f..bde16d98af 100644 --- a/docs/en/help/FAQ.md +++ b/docs/en/help/FAQ.md @@ -14,7 +14,7 @@ This FAQ section addresses common questions and issues users might encounter whi Ultralytics is a [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) AI company specializing in state-of-the-art object detection and [image segmentation](https://www.ultralytics.com/glossary/image-segmentation) models, with a focus on the YOLO (You Only Look Once) family. Their offerings include: -- Open-source implementations of [YOLOv5](https://docs.ultralytics.com/models/yolov5/) and [YOLOv8](https://docs.ultralytics.com/models/yolov8/) +- Open-source implementations of [YOLO11](https://docs.ultralytics.com/models/yolov8/) and [YOLO11](https://docs.ultralytics.com/models/yolo11/) - A wide range of [pre-trained models](https://docs.ultralytics.com/models/) for various computer vision tasks - A comprehensive [Python package](https://docs.ultralytics.com/usage/python/) for seamless integration of YOLO models into projects - Versatile [tools](https://docs.ultralytics.com/modes/) for training, testing, and deploying models @@ -54,9 +54,9 @@ Recommended setup: For troubleshooting common issues, visit the [YOLO Common Issues](https://docs.ultralytics.com/guides/yolo-common-issues/) page. -### How can I train a custom YOLOv8 model on my own dataset? +### How can I train a custom YOLO11 model on my own dataset? -To train a custom YOLOv8 model: +To train a custom YOLO11 model: 1. Prepare your dataset in YOLO format (images and corresponding label txt files). 2. Create a YAML file describing your dataset structure and classes. @@ -77,11 +77,11 @@ For a more in-depth guide, including data preparation and advanced training opti ### What pretrained models are available in Ultralytics? -Ultralytics offers a diverse range of pretrained YOLOv8 models for various tasks: +Ultralytics offers a diverse range of pretrained YOLO11 models for various tasks: -- Object Detection: YOLOv8n, YOLOv8s, YOLOv8m, YOLOv8l, YOLOv8x -- [Instance Segmentation](https://www.ultralytics.com/glossary/instance-segmentation): YOLOv8n-seg, YOLOv8s-seg, YOLOv8m-seg, YOLOv8l-seg, YOLOv8x-seg -- Classification: YOLOv8n-cls, YOLOv8s-cls, YOLOv8m-cls, YOLOv8l-cls, YOLOv8x-cls +- Object Detection: YOLO11n, YOLO11s, YOLO11m, YOLO11l, YOLO11x +- [Instance Segmentation](https://www.ultralytics.com/glossary/instance-segmentation): YOLO11n-seg, YOLO11s-seg, YOLO11m-seg, YOLO11l-seg, YOLO11x-seg +- Classification: YOLO11n-cls, YOLO11s-cls, YOLO11m-cls, YOLO11l-cls, YOLO11x-cls These models vary in size and complexity, offering different trade-offs between speed and [accuracy](https://www.ultralytics.com/glossary/accuracy). Explore the full range of [pretrained models](https://docs.ultralytics.com/models/yolov8/) to find the best fit for your project. @@ -118,17 +118,17 @@ Absolutely! Ultralytics models are designed for versatile deployment across vari Ultralytics provides export functions to convert models to various formats for deployment. Explore the wide range of [deployment options](https://docs.ultralytics.com/guides/model-deployment-options/) to find the best solution for your use case. -### What's the difference between YOLOv5 and YOLOv8? +### What's the difference between YOLOv8 and YOLO11? Key distinctions include: -- Architecture: YOLOv8 features an improved backbone and head design for enhanced performance. -- Performance: YOLOv8 generally offers superior accuracy and speed compared to YOLOv5. -- Tasks: YOLOv8 natively supports [object detection](https://www.ultralytics.com/glossary/object-detection), instance segmentation, and classification in a unified framework. -- Codebase: YOLOv8 is implemented with a more modular and extensible architecture, facilitating easier customization and extension. -- Training: YOLOv8 incorporates advanced training techniques like multi-dataset training and hyperparameter evolution for improved results. +- Architecture: YOLO11 features an improved backbone and head design for enhanced performance. +- Performance: YOLO11 generally offers superior accuracy and speed compared to YOLOv8. +- Tasks: YOLO11 natively supports [object detection](https://www.ultralytics.com/glossary/object-detection), instance segmentation, and classification in a unified framework. +- Codebase: YOLO11 is implemented with a more modular and extensible architecture, facilitating easier customization and extension. +- Training: YOLO11 incorporates advanced training techniques like multi-dataset training and hyperparameter evolution for improved results. -For an in-depth comparison of features and performance metrics, visit the [YOLOv5 vs YOLOv8](https://www.ultralytics.com/yolo) comparison page. +For an in-depth comparison of features and performance metrics, visit the [YOLO](https://www.ultralytics.com/yolo) comparison page. ### How can I contribute to the Ultralytics open-source project? @@ -176,7 +176,7 @@ Enhancing your YOLO model's performance can be achieved through several techniqu 1. [Hyperparameter Tuning](https://www.ultralytics.com/glossary/hyperparameter-tuning): Experiment with different hyperparameters using the [Hyperparameter Tuning Guide](https://docs.ultralytics.com/guides/hyperparameter-tuning/) to optimize model performance. 2. [Data Augmentation](https://www.ultralytics.com/glossary/data-augmentation): Implement techniques like flip, scale, rotate, and color adjustments to enhance your training dataset and improve model generalization. -3. [Transfer Learning](https://www.ultralytics.com/glossary/transfer-learning): Leverage pre-trained models and fine-tune them on your specific dataset using the [Train YOLOv8](https://docs.ultralytics.com/modes/train/) guide. +3. [Transfer Learning](https://www.ultralytics.com/glossary/transfer-learning): Leverage pre-trained models and fine-tune them on your specific dataset using the [Train YOLO11](https://docs.ultralytics.com/modes/train/) guide. 4. Export to Efficient Formats: Convert your model to optimized formats like TensorRT or ONNX for faster inference using the [Export guide](../modes/export.md). 5. Benchmarking: Utilize the [Benchmark Mode](https://docs.ultralytics.com/modes/benchmark/) to measure and improve inference speed and accuracy systematically. diff --git a/docs/en/help/index.md b/docs/en/help/index.md index e8f2eecd7a..c1ff0128b2 100644 --- a/docs/en/help/index.md +++ b/docs/en/help/index.md @@ -22,9 +22,9 @@ We encourage you to review these resources for a seamless and productive experie ### What is Ultralytics YOLO and how does it benefit my [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) projects? -Ultralytics YOLO (You Only Look Once) is a state-of-the-art, real-time [object detection](https://www.ultralytics.com/glossary/object-detection) model. Its latest version, YOLOv8, enhances speed, [accuracy](https://www.ultralytics.com/glossary/accuracy), and versatility, making it ideal for a wide range of applications, from real-time video analytics to advanced machine learning research. YOLO's efficiency in detecting objects in images and videos has made it the go-to solution for businesses and researchers looking to integrate robust [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) capabilities into their projects. +Ultralytics YOLO (You Only Look Once) is a state-of-the-art, real-time [object detection](https://www.ultralytics.com/glossary/object-detection) model. Its latest version, YOLO11, enhances speed, [accuracy](https://www.ultralytics.com/glossary/accuracy), and versatility, making it ideal for a wide range of applications, from real-time video analytics to advanced machine learning research. YOLO's efficiency in detecting objects in images and videos has made it the go-to solution for businesses and researchers looking to integrate robust [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) capabilities into their projects. -For more details on YOLOv8, visit the [YOLOv8 documentation](../tasks/detect.md). +For more details on YOLO11, visit the [YOLO11 documentation](../tasks/detect.md). ### How do I contribute to Ultralytics YOLO repositories? @@ -32,7 +32,7 @@ Contributing to Ultralytics YOLO repositories is straightforward. Start by revie ### Why should I use Ultralytics HUB for my machine learning projects? -Ultralytics HUB offers a seamless, no-code solution for managing your machine learning projects. It enables you to generate, train, and deploy AI models like YOLOv8 effortlessly. Unique features include cloud training, real-time tracking, and intuitive dataset management. Ultralytics HUB simplifies the entire workflow, from data processing to [model deployment](https://www.ultralytics.com/glossary/model-deployment), making it an indispensable tool for both beginners and advanced users. +Ultralytics HUB offers a seamless, no-code solution for managing your machine learning projects. It enables you to generate, train, and deploy AI models like YOLO11 effortlessly. Unique features include cloud training, real-time tracking, and intuitive dataset management. Ultralytics HUB simplifies the entire workflow, from data processing to [model deployment](https://www.ultralytics.com/glossary/model-deployment), making it an indispensable tool for both beginners and advanced users. To get started, visit [Ultralytics HUB Quickstart](../hub/quickstart.md). diff --git a/docs/en/hub/datasets.md b/docs/en/hub/datasets.md index 5e6f3c4c87..24689cc5fd 100644 --- a/docs/en/hub/datasets.md +++ b/docs/en/hub/datasets.md @@ -48,7 +48,7 @@ The dataset YAML is the same standard YOLOv5 and YOLOv8 YAML format. After zipping your dataset, you should [validate it](https://docs.ultralytics.com/reference/hub/__init__/#ultralytics.hub.check_dataset) before uploading it to [Ultralytics HUB](https://www.ultralytics.com/hub). [Ultralytics HUB](https://www.ultralytics.com/hub) conducts the dataset validation check post-upload, so by ensuring your dataset is correctly formatted and error-free ahead of time, you can forestall any setbacks due to dataset rejection. -```py +```python from ultralytics.hub import check_dataset check_dataset("path/to/dataset.zip", task="detect") diff --git a/docs/en/hub/inference-api.md b/docs/en/hub/inference-api.md index 0c79759b25..b532e8150c 100644 --- a/docs/en/hub/inference-api.md +++ b/docs/en/hub/inference-api.md @@ -66,14 +66,14 @@ To access the [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API u ```python import requests -# API URL, use actual MODEL_ID -url = "https://api.ultralytics.com/v1/predict/MODEL_ID" +# API URL +url = "https://predict.ultralytics.com" # Headers, use actual API_KEY headers = {"x-api-key": "API_KEY"} -# Inference arguments (optional) -data = {"imgsz": 640, "conf": 0.25, "iou": 0.45} +# Inference arguments (use actual MODEL_ID) +data = {"model": "https://hub.ultralytics.com/models/MODEL_ID", "imgsz": 640, "conf": 0.25, "iou": 0.45} # Load image and send request with open("path/to/image.jpg", "rb") as image_file: @@ -94,8 +94,9 @@ print(response.json()) To access the [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API using cURL, use the following code: ```bash -curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ - -H "x-api-key: API_KEY" \ +curl -X POST "https://predict.ultralytics.com" \ + -H "x-api-key: API_KEY" \ + -F "model=https://hub.ultralytics.com/models/MODEL_ID" \ -F "file=@/path/to/image.jpg" \ -F "imgsz=640" \ -F "conf=0.25" \ @@ -145,8 +146,9 @@ The [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API returns a J === "cURL" ```bash - curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ + curl -X POST "https://predict.ultralytics.com" \ -H "x-api-key: API_KEY" \ + -F "model=https://hub.ultralytics.com/models/MODEL_ID" \ -F "file=@/path/to/image.jpg" \ -F "imgsz=640" \ -F "conf=0.25" \ @@ -158,14 +160,14 @@ The [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API returns a J ```python import requests - # API URL, use actual MODEL_ID - url = "https://api.ultralytics.com/v1/predict/MODEL_ID" + # API URL + url = "https://predict.ultralytics.com" # Headers, use actual API_KEY headers = {"x-api-key": "API_KEY"} - # Inference arguments (optional) - data = {"imgsz": 640, "conf": 0.25, "iou": 0.45} + # Inference arguments (use actual MODEL_ID) + data = {"model": "https://hub.ultralytics.com/models/MODEL_ID", "imgsz": 640, "conf": 0.25, "iou": 0.45} # Load image and send request with open("path/to/image.jpg", "rb") as image_file: @@ -225,8 +227,9 @@ The [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API returns a J === "cURL" ```bash - curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ + curl -X POST "https://predict.ultralytics.com" \ -H "x-api-key: API_KEY" \ + -F "model=https://hub.ultralytics.com/models/MODEL_ID" \ -F "file=@/path/to/image.jpg" \ -F "imgsz=640" \ -F "conf=0.25" \ @@ -238,14 +241,14 @@ The [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API returns a J ```python import requests - # API URL, use actual MODEL_ID - url = "https://api.ultralytics.com/v1/predict/MODEL_ID" + # API URL + url = "https://predict.ultralytics.com" # Headers, use actual API_KEY headers = {"x-api-key": "API_KEY"} - # Inference arguments (optional) - data = {"imgsz": 640, "conf": 0.25, "iou": 0.45} + # Inference arguments (use actual MODEL_ID) + data = {"model": "https://hub.ultralytics.com/models/MODEL_ID", "imgsz": 640, "conf": 0.25, "iou": 0.45} # Load image and send request with open("path/to/image.jpg", "rb") as image_file: @@ -311,8 +314,9 @@ The [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API returns a J === "cURL" ```bash - curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ + curl -X POST "https://predict.ultralytics.com" \ -H "x-api-key: API_KEY" \ + -F "model=https://hub.ultralytics.com/models/MODEL_ID" \ -F "file=@/path/to/image.jpg" \ -F "imgsz=640" \ -F "conf=0.25" \ @@ -324,14 +328,14 @@ The [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API returns a J ```python import requests - # API URL, use actual MODEL_ID - url = "https://api.ultralytics.com/v1/predict/MODEL_ID" + # API URL + url = "https://predict.ultralytics.com" # Headers, use actual API_KEY headers = {"x-api-key": "API_KEY"} - # Inference arguments (optional) - data = {"imgsz": 640, "conf": 0.25, "iou": 0.45} + # Inference arguments (use actual MODEL_ID) + data = {"model": "https://hub.ultralytics.com/models/MODEL_ID", "imgsz": 640, "conf": 0.25, "iou": 0.45} # Load image and send request with open("path/to/image.jpg", "rb") as image_file: @@ -401,8 +405,9 @@ The [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API returns a J === "cURL" ```bash - curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ + curl -X POST "https://predict.ultralytics.com" \ -H "x-api-key: API_KEY" \ + -F "model=https://hub.ultralytics.com/models/MODEL_ID" \ -F "file=@/path/to/image.jpg" \ -F "imgsz=640" \ -F "conf=0.25" \ @@ -414,14 +419,14 @@ The [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API returns a J ```python import requests - # API URL, use actual MODEL_ID - url = "https://api.ultralytics.com/v1/predict/MODEL_ID" + # API URL + url = "https://predict.ultralytics.com" # Headers, use actual API_KEY headers = {"x-api-key": "API_KEY"} - # Inference arguments (optional) - data = {"imgsz": 640, "conf": 0.25, "iou": 0.45} + # Inference arguments (use actual MODEL_ID) + data = {"model": "https://hub.ultralytics.com/models/MODEL_ID", "imgsz": 640, "conf": 0.25, "iou": 0.45} # Load image and send request with open("path/to/image.jpg", "rb") as image_file: @@ -501,8 +506,9 @@ The [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API returns a J === "cURL" ```bash - curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ + curl -X POST "https://predict.ultralytics.com" \ -H "x-api-key: API_KEY" \ + -F "model=https://hub.ultralytics.com/models/MODEL_ID" \ -F "file=@/path/to/image.jpg" \ -F "imgsz=640" \ -F "conf=0.25" \ @@ -514,14 +520,14 @@ The [Ultralytics HUB](https://www.ultralytics.com/hub) Inference API returns a J ```python import requests - # API URL, use actual MODEL_ID - url = "https://api.ultralytics.com/v1/predict/MODEL_ID" + # API URL + url = "https://predict.ultralytics.com" # Headers, use actual API_KEY headers = {"x-api-key": "API_KEY"} - # Inference arguments (optional) - data = {"imgsz": 640, "conf": 0.25, "iou": 0.45} + # Inference arguments (use actual MODEL_ID) + data = {"model": "https://hub.ultralytics.com/models/MODEL_ID", "imgsz": 640, "conf": 0.25, "iou": 0.45} # Load image and send request with open("path/to/image.jpg", "rb") as image_file: diff --git a/docs/en/index.md b/docs/en/index.md index a25805c72a..45c5dab7d8 100644 --- a/docs/en/index.md +++ b/docs/en/index.md @@ -1,7 +1,7 @@ --- comments: true -description: Discover Ultralytics YOLOv8 - the latest in real-time object detection and image segmentation. Learn its features and maximize its potential in your projects. -keywords: Ultralytics, YOLOv8, object detection, image segmentation, deep learning, computer vision, AI, machine learning, documentation, tutorial +description: Discover Ultralytics YOLO - the latest in real-time object detection and image segmentation. Learn its features and maximize its potential in your projects. +keywords: Ultralytics, YOLO, YOLO11, object detection, image segmentation, deep learning, computer vision, AI, machine learning, documentation, tutorial ---
@@ -20,7 +20,7 @@ keywords: Ultralytics, YOLOv8, object detection, image segmentation, deep learni

Ultralytics CI -YOLOv8 Citation +YOLO Citation Docker Pulls Discord Ultralytics Forums @@ -31,9 +31,9 @@ keywords: Ultralytics, YOLOv8, object detection, image segmentation, deep learni Open In Kaggle
-Introducing [Ultralytics](https://www.ultralytics.com/) [YOLOv8](https://github.com/ultralytics/ultralytics), the latest version of the acclaimed real-time object detection and image segmentation model. YOLOv8 is built on cutting-edge advancements in [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv), offering unparalleled performance in terms of speed and [accuracy](https://www.ultralytics.com/glossary/accuracy). Its streamlined design makes it suitable for various applications and easily adaptable to different hardware platforms, from edge devices to cloud APIs. +Introducing [Ultralytics](https://www.ultralytics.com/) [YOLO11](https://github.com/ultralytics/ultralytics), the latest version of the acclaimed real-time object detection and image segmentation model. YOLO11 is built on cutting-edge advancements in [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv), offering unparalleled performance in terms of speed and [accuracy](https://www.ultralytics.com/glossary/accuracy). Its streamlined design makes it suitable for various applications and easily adaptable to different hardware platforms, from edge devices to cloud APIs. -Explore the YOLOv8 Docs, a comprehensive resource designed to help you understand and utilize its features and capabilities. Whether you are a seasoned [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) practitioner or new to the field, this hub aims to maximize YOLOv8's potential in your projects +Explore the Ultralytics Docs, a comprehensive resource designed to help you understand and utilize its features and capabilities. Whether you are a seasoned [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) practitioner or new to the field, this hub aims to maximize YOLO's potential in your projects

@@ -55,10 +55,10 @@ Explore the YOLOv8 Docs, a comprehensive resource designed to help you understan ## Where to Start - **Install** `ultralytics` with pip and get up and running in minutes   [:material-clock-fast: Get Started](quickstart.md){ .md-button } -- **Predict** new images and videos with YOLOv8   [:octicons-image-16: Predict on Images](modes/predict.md){ .md-button } -- **Train** a new YOLOv8 model on your own custom dataset   [:fontawesome-solid-brain: Train a Model](modes/train.md){ .md-button } -- **Tasks** YOLOv8 tasks like segment, classify, pose and track   [:material-magnify-expand: Explore Tasks](tasks/index.md){ .md-button } -- **NEW 🚀 Explore** datasets with advanced semantic and SQL search   [:material-magnify-expand: Explore a Dataset](datasets/explorer/index.md){ .md-button } +- **Predict** new images and videos with YOLO   [:octicons-image-16: Predict on Images](modes/predict.md){ .md-button } +- **Train** a new YOLO model on your own custom dataset   [:fontawesome-solid-brain: Train a Model](modes/train.md){ .md-button } +- **Tasks** YOLO tasks like segment, classify, pose and track   [:material-magnify-expand: Explore Tasks](tasks/index.md){ .md-button } +- **[YOLO11](models/yolo11.md) NEW 🚀**: Ultralytics' latest SOTA models   [:material-magnify-expand: Explore new YOLO11 models](models/yolo11.md){ .md-button }


@@ -68,7 +68,7 @@ Explore the YOLOv8 Docs, a comprehensive resource designed to help you understan allowfullscreen>
- Watch: How to Train a YOLOv8 model on Your Custom Dataset in Google Colab. + Watch: How to Train a YOLO model on Your Custom Dataset in Google Colab.

## YOLO: A Brief History @@ -81,9 +81,10 @@ Explore the YOLOv8 Docs, a comprehensive resource designed to help you understan - [YOLOv5](https://github.com/ultralytics/yolov5) further improved the model's performance and added new features such as hyperparameter optimization, integrated experiment tracking and automatic export to popular export formats. - [YOLOv6](https://github.com/meituan/YOLOv6) was open-sourced by [Meituan](https://about.meituan.com/) in 2022 and is in use in many of the company's autonomous delivery robots. - [YOLOv7](https://github.com/WongKinYiu/yolov7) added additional tasks such as pose estimation on the COCO keypoints dataset. -- [YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of YOLO by Ultralytics. As a cutting-edge, state-of-the-art (SOTA) model, YOLOv8 builds on the success of previous versions, introducing new features and improvements for enhanced performance, flexibility, and efficiency. YOLOv8 supports a full range of vision AI tasks, including [detection](tasks/detect.md), [segmentation](tasks/segment.md), [pose estimation](tasks/pose.md), [tracking](modes/track.md), and [classification](tasks/classify.md). This versatility allows users to leverage YOLOv8's capabilities across diverse applications and domains. +- [YOLOv8](https://github.com/ultralytics/ultralytics) released in 2023 by Ultralytics. YOLOv8 introduced new features and improvements for enhanced performance, flexibility, and efficiency, supporting a full range of vision AI tasks, - [YOLOv9](models/yolov9.md) introduces innovative methods like Programmable Gradient Information (PGI) and the Generalized Efficient Layer Aggregation Network (GELAN). - [YOLOv10](models/yolov10.md) is created by researchers from [Tsinghua University](https://www.tsinghua.edu.cn/en/) using the [Ultralytics](https://www.ultralytics.com/) [Python package](https://pypi.org/project/ultralytics/). This version provides real-time [object detection](tasks/detect.md) advancements by introducing an End-to-End head that eliminates Non-Maximum Suppression (NMS) requirements. +- **[YOLO11](models/yolo11.md) NEW 🚀**: Ultralytics' latest YOLO models delivering state-of-the-art (SOTA) performance across multiple tasks, including [detection](tasks/detect.md), [segmentation](tasks/segment.md), [pose estimation](tasks/pose.md), [tracking](modes/track.md), and [classification](tasks/classify.md), leverage capabilities across diverse AI applications and domains. ## YOLO Licenses: How is Ultralytics YOLO licensed? @@ -98,15 +99,19 @@ Our licensing strategy is designed to ensure that any improvements to our open-s ### What is Ultralytics YOLO and how does it improve object detection? -Ultralytics YOLO is the latest advancement in the acclaimed YOLO (You Only Look Once) series for real-time object detection and image segmentation. It builds on previous versions by introducing new features and improvements for enhanced performance, flexibility, and efficiency. YOLOv8 supports various [vision AI tasks](tasks/index.md) such as detection, segmentation, pose estimation, tracking, and classification. Its state-of-the-art architecture ensures superior speed and accuracy, making it suitable for diverse applications, including edge devices and cloud APIs. +Ultralytics YOLO is the latest advancement in the acclaimed YOLO (You Only Look Once) series for real-time object detection and image segmentation. It builds on previous versions by introducing new features and improvements for enhanced performance, flexibility, and efficiency. YOLO supports various [vision AI tasks](tasks/index.md) such as detection, segmentation, pose estimation, tracking, and classification. Its state-of-the-art architecture ensures superior speed and accuracy, making it suitable for diverse applications, including edge devices and cloud APIs. ### How can I get started with YOLO installation and setup? -Getting started with YOLO is quick and straightforward. You can install the Ultralytics package using pip and get up and running in minutes. Here's a basic installation command: +Getting started with YOLO is quick and straightforward. You can install the Ultralytics package using [pip](https://pypi.org/project/ultralytics/) and get up and running in minutes. Here's a basic installation command: -```bash -pip install ultralytics -``` +!!! example + + === "CLI" + + ```bash + pip install ultralytics + ``` For a comprehensive step-by-step guide, visit our [quickstart guide](quickstart.md). This resource will help you with installation instructions, initial setup, and running your first model. @@ -118,11 +123,28 @@ Training a custom YOLO model on your dataset involves a few detailed steps: 2. Configure the training parameters in a YAML file. 3. Use the `yolo train` command to start training. -Here's an example command: +Here's example code: + +!!! example + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a pre-trained YOLO model (you can choose n, s, m, l, or x versions) + model = YOLO("yolo11n.pt") + + # Start training on your custom dataset + model.train(data="path/to/dataset.yaml", epochs=100, imgsz=640) + ``` -```bash -yolo train model=yolov8n.pt data=coco128.yaml epochs=100 imgsz=640 -``` + === "CLI" + + ```bash + # Train a YOLO model from the command line + yolo train data=path/to/dataset.yaml epochs=100 imgsz=640 + ``` For a detailed walkthrough, check out our [Train a Model](modes/train.md) guide, which includes examples and tips for optimizing your training process. @@ -139,8 +161,27 @@ For more details, visit our [Licensing](https://www.ultralytics.com/license) pag Ultralytics YOLO supports efficient and customizable multi-object tracking. To utilize tracking capabilities, you can use the `yolo track` command as shown below: -```bash -yolo track model=yolov8n.pt source=video.mp4 -``` +!!! example + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a pre-trained YOLO model + model = YOLO("yolo11n.pt") + + # Start tracking objects in a video + # You can also use live video streams or webcam input + model.track(source="path/to/video.mp4") + ``` + + === "CLI" + + ```bash + # Perform object tracking on a video from the command line + # You can specify different sources like webcam (0) or RTSP streams + yolo track source=path/to/video.mp4 + ``` For a detailed guide on setting up and running object tracking, check our [tracking mode](modes/track.md) documentation, which explains the configuration and practical applications in real-time scenarios. diff --git a/docs/en/integrations/amazon-sagemaker.md b/docs/en/integrations/amazon-sagemaker.md index 9a82037ac2..366a4f1388 100644 --- a/docs/en/integrations/amazon-sagemaker.md +++ b/docs/en/integrations/amazon-sagemaker.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn step-by-step how to deploy Ultralytics' YOLOv8 on Amazon SageMaker Endpoints, from setup to testing, for powerful real-time inference with AWS services. -keywords: YOLOv8, Amazon SageMaker, AWS, Ultralytics, machine learning, computer vision, model deployment, AWS CloudFormation, AWS CDK, real-time inference +description: Learn step-by-step how to deploy Ultralytics' YOLO11 on Amazon SageMaker Endpoints, from setup to testing, for powerful real-time inference with AWS services. +keywords: YOLO11, Amazon SageMaker, AWS, Ultralytics, machine learning, computer vision, model deployment, AWS CloudFormation, AWS CDK, real-time inference --- -# A Guide to Deploying YOLOv8 on Amazon SageMaker Endpoints +# A Guide to Deploying YOLO11 on Amazon SageMaker Endpoints -Deploying advanced [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models like [Ultralytics' YOLOv8](https://github.com/ultralytics/ultralytics) on Amazon SageMaker Endpoints opens up a wide range of possibilities for various [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) applications. The key to effectively using these models lies in understanding their setup, configuration, and deployment processes. YOLOv8 becomes even more powerful when integrated seamlessly with Amazon SageMaker, a robust and scalable machine learning service by AWS. +Deploying advanced [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models like [Ultralytics' YOLO11](https://github.com/ultralytics/ultralytics) on Amazon SageMaker Endpoints opens up a wide range of possibilities for various [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) applications. The key to effectively using these models lies in understanding their setup, configuration, and deployment processes. YOLO11 becomes even more powerful when integrated seamlessly with Amazon SageMaker, a robust and scalable machine learning service by AWS. -This guide will take you through the process of deploying YOLOv8 [PyTorch](https://www.ultralytics.com/glossary/pytorch) models on Amazon SageMaker Endpoints step by step. You'll learn the essentials of preparing your AWS environment, configuring the model appropriately, and using tools like AWS CloudFormation and the AWS Cloud Development Kit (CDK) for deployment. +This guide will take you through the process of deploying YOLO11 [PyTorch](https://www.ultralytics.com/glossary/pytorch) models on Amazon SageMaker Endpoints step by step. You'll learn the essentials of preparing your AWS environment, configuring the model appropriately, and using tools like AWS CloudFormation and the AWS Cloud Development Kit (CDK) for deployment. ## Amazon SageMaker @@ -18,9 +18,9 @@ This guide will take you through the process of deploying YOLOv8 [PyTorch](https [Amazon SageMaker](https://aws.amazon.com/sagemaker/) is a machine learning service from Amazon Web Services (AWS) that simplifies the process of building, training, and deploying machine learning models. It provides a broad range of tools for handling various aspects of machine learning workflows. This includes automated features for tuning models, options for training models at scale, and straightforward methods for deploying models into production. SageMaker supports popular machine learning frameworks, offering the flexibility needed for diverse projects. Its features also cover data labeling, workflow management, and performance analysis. -## Deploying YOLOv8 on Amazon SageMaker Endpoints +## Deploying YOLO11 on Amazon SageMaker Endpoints -Deploying YOLOv8 on Amazon SageMaker lets you use its managed environment for real-time inference and take advantage of features like autoscaling. Take a look at the AWS architecture below. +Deploying YOLO11 on Amazon SageMaker lets you use its managed environment for real-time inference and take advantage of features like autoscaling. Take a look at the AWS architecture below.

AWS Architecture @@ -40,9 +40,9 @@ First, ensure you have the following prerequisites in place: - Adequate Service Quota: Confirm that you have sufficient quotas for two separate resources in Amazon SageMaker: one for `ml.m5.4xlarge` for endpoint usage and another for `ml.m5.4xlarge` for notebook instance usage. Each of these requires a minimum of one quota value. If your current quotas are below this requirement, it's important to request an increase for each. You can request a quota increase by following the detailed instructions in the [AWS Service Quotas documentation](https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html#quota-console-increase). -### Step 2: Clone the YOLOv8 SageMaker Repository +### Step 2: Clone the YOLO11 SageMaker Repository -The next step is to clone the specific AWS repository that contains the resources for deploying YOLOv8 on SageMaker. This repository, hosted on GitHub, includes the necessary CDK scripts and configuration files. +The next step is to clone the specific AWS repository that contains the resources for deploying YOLO11 on SageMaker. This repository, hosted on GitHub, includes the necessary CDK scripts and configuration files. - Clone the GitHub Repository: Execute the following command in your terminal to clone the host-yolov8-on-sagemaker-endpoint repository: @@ -104,11 +104,11 @@ cdk bootstrap cdk deploy ``` -### Step 5: Deploy the YOLOv8 Model +### Step 5: Deploy the YOLO Model -Before diving into the deployment instructions, be sure to check out the range of [YOLOv8 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. +Before diving into the deployment instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. -After creating the AWS CloudFormation Stack, the next step is to deploy YOLOv8. +After creating the AWS CloudFormation Stack, the next step is to deploy YOLO11. - Open the Notebook Instance: Go to the AWS Console and navigate to the Amazon SageMaker service. Select "Notebook Instances" from the dashboard, then locate the notebook instance that was created by your CDK deployment script. Open the notebook instance to access the Jupyter environment. @@ -136,18 +136,18 @@ def output_fn(prediction_output): return json.dumps(infer) ``` -- Deploy the Endpoint Using 1_DeployEndpoint.ipynb: In the Jupyter environment, open the 1_DeployEndpoint.ipynb notebook located in the sm-notebook directory. Follow the instructions in the notebook and run the cells to download the YOLOv8 model, package it with the updated inference code, and upload it to an Amazon S3 bucket. The notebook will guide you through creating and deploying a SageMaker endpoint for the YOLOv8 model. +- Deploy the Endpoint Using 1_DeployEndpoint.ipynb: In the Jupyter environment, open the 1_DeployEndpoint.ipynb notebook located in the sm-notebook directory. Follow the instructions in the notebook and run the cells to download the YOLO11 model, package it with the updated inference code, and upload it to an Amazon S3 bucket. The notebook will guide you through creating and deploying a SageMaker endpoint for the YOLO11 model. ### Step 6: Testing Your Deployment -Now that your YOLOv8 model is deployed, it's important to test its performance and functionality. +Now that your YOLO11 model is deployed, it's important to test its performance and functionality. - Open the Test Notebook: In the same Jupyter environment, locate and open the 2_TestEndpoint.ipynb notebook, also in the sm-notebook directory. - Run the Test Notebook: Follow the instructions within the notebook to test the deployed SageMaker endpoint. This includes sending an image to the endpoint and running inferences. Then, you'll plot the output to visualize the model's performance and [accuracy](https://www.ultralytics.com/glossary/accuracy), as shown below.

- Testing Results YOLOv8 + Testing Results YOLO11

- Clean-Up Resources: The test notebook will also guide you through the process of cleaning up the endpoint and the hosted model. This is an important step to manage costs and resources effectively, especially if you do not plan to use the deployed model immediately. @@ -160,24 +160,24 @@ After testing, continuous monitoring and management of your deployed model are e - Manage the Endpoint: Use the SageMaker console for ongoing management of the endpoint. This includes scaling, updating, or redeploying the model as required. -By completing these steps, you will have successfully deployed and tested a YOLOv8 model on Amazon SageMaker Endpoints. This process not only equips you with practical experience in using AWS services for machine learning deployment but also lays the foundation for deploying other advanced models in the future. +By completing these steps, you will have successfully deployed and tested a YOLO11 model on Amazon SageMaker Endpoints. This process not only equips you with practical experience in using AWS services for machine learning deployment but also lays the foundation for deploying other advanced models in the future. ## Summary -This guide took you step by step through deploying YOLOv8 on Amazon SageMaker Endpoints using AWS CloudFormation and the AWS Cloud Development Kit (CDK). The process includes cloning the necessary GitHub repository, setting up the CDK environment, deploying the model using AWS services, and testing its performance on SageMaker. +This guide took you step by step through deploying YOLO11 on Amazon SageMaker Endpoints using AWS CloudFormation and the AWS Cloud Development Kit (CDK). The process includes cloning the necessary GitHub repository, setting up the CDK environment, deploying the model using AWS services, and testing its performance on SageMaker. For more technical details, refer to [this article](https://aws.amazon.com/blogs/machine-learning/hosting-yolov8-pytorch-model-on-amazon-sagemaker-endpoints/) on the AWS Machine Learning Blog. You can also check out the official [Amazon SageMaker Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/realtime-endpoints.html) for more insights into various features and functionalities. -Are you interested in learning more about different YOLOv8 integrations? Visit the [Ultralytics integrations guide page](../integrations/index.md) to discover additional tools and capabilities that can enhance your machine-learning projects. +Are you interested in learning more about different YOLO11 integrations? Visit the [Ultralytics integrations guide page](../integrations/index.md) to discover additional tools and capabilities that can enhance your machine-learning projects. ## FAQ -### How do I deploy the Ultralytics YOLOv8 model on Amazon SageMaker Endpoints? +### How do I deploy the Ultralytics YOLO11 model on Amazon SageMaker Endpoints? -To deploy the Ultralytics YOLOv8 model on Amazon SageMaker Endpoints, follow these steps: +To deploy the Ultralytics YOLO11 model on Amazon SageMaker Endpoints, follow these steps: 1. **Set Up Your AWS Environment**: Ensure you have an AWS Account, IAM roles with necessary permissions, and the AWS CLI configured. Install AWS CDK if not already done (refer to the [AWS CDK instructions](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html#getting_started_install)). -2. **Clone the YOLOv8 SageMaker Repository**: +2. **Clone the YOLO11 SageMaker Repository**: ```bash git clone https://github.com/aws-samples/host-yolov8-on-sagemaker-endpoint.git cd host-yolov8-on-sagemaker-endpoint/yolov8-pytorch-cdk @@ -196,11 +196,11 @@ To deploy the Ultralytics YOLOv8 model on Amazon SageMaker Endpoints, follow the cdk deploy ``` -For further details, review the [documentation section](#step-5-deploy-the-yolov8-model). +For further details, review the [documentation section](#step-5-deploy-the-yolo-model). -### What are the prerequisites for deploying YOLOv8 on Amazon SageMaker? +### What are the prerequisites for deploying YOLO11 on Amazon SageMaker? -To deploy YOLOv8 on Amazon SageMaker, ensure you have the following prerequisites: +To deploy YOLO11 on Amazon SageMaker, ensure you have the following prerequisites: 1. **AWS Account**: Active AWS account ([sign up here](https://aws.amazon.com/)). 2. **IAM Roles**: Configured IAM roles with permissions for SageMaker, CloudFormation, and Amazon S3. @@ -210,9 +210,9 @@ To deploy YOLOv8 on Amazon SageMaker, ensure you have the following prerequisite For detailed setup, refer to [this section](#step-1-setup-your-aws-environment). -### Why should I use Ultralytics YOLOv8 on Amazon SageMaker? +### Why should I use Ultralytics YOLO11 on Amazon SageMaker? -Using Ultralytics YOLOv8 on Amazon SageMaker offers several advantages: +Using Ultralytics YOLO11 on Amazon SageMaker offers several advantages: 1. **Scalability and Management**: SageMaker provides a managed environment with features like autoscaling, which helps in real-time inference needs. 2. **Integration with AWS Services**: Seamlessly integrate with other AWS services, such as S3 for data storage, CloudFormation for infrastructure as code, and CloudWatch for monitoring. @@ -221,9 +221,9 @@ Using Ultralytics YOLOv8 on Amazon SageMaker offers several advantages: Explore more about the advantages of using SageMaker in the [introduction section](#amazon-sagemaker). -### Can I customize the inference logic for YOLOv8 on Amazon SageMaker? +### Can I customize the inference logic for YOLO11 on Amazon SageMaker? -Yes, you can customize the inference logic for YOLOv8 on Amazon SageMaker: +Yes, you can customize the inference logic for YOLO11 on Amazon SageMaker: 1. **Modify `inference.py`**: Locate and customize the `output_fn` function in the `inference.py` file to tailor output formats. @@ -243,11 +243,11 @@ Yes, you can customize the inference logic for YOLOv8 on Amazon SageMaker: 2. **Deploy Updated Model**: Ensure you redeploy the model using Jupyter notebooks provided (`1_DeployEndpoint.ipynb`) to include these changes. -Refer to the [detailed steps](#step-5-deploy-the-yolov8-model) for deploying the modified model. +Refer to the [detailed steps](#step-5-deploy-the-yolo-model) for deploying the modified model. -### How can I test the deployed YOLOv8 model on Amazon SageMaker? +### How can I test the deployed YOLO11 model on Amazon SageMaker? -To test the deployed YOLOv8 model on Amazon SageMaker: +To test the deployed YOLO11 model on Amazon SageMaker: 1. **Open the Test Notebook**: Locate the `2_TestEndpoint.ipynb` notebook in the SageMaker Jupyter environment. 2. **Run the Notebook**: Follow the notebook's instructions to send an image to the endpoint, perform inference, and display results. diff --git a/docs/en/integrations/clearml.md b/docs/en/integrations/clearml.md index 5245d88740..465f2fa8d1 100644 --- a/docs/en/integrations/clearml.md +++ b/docs/en/integrations/clearml.md @@ -1,14 +1,14 @@ --- comments: true -description: Discover how to integrate YOLOv8 with ClearML to streamline your MLOps workflow, automate experiments, and enhance model management effortlessly. -keywords: YOLOv8, ClearML, MLOps, Ultralytics, machine learning, object detection, model training, automation, experiment management +description: Discover how to integrate YOLO11 with ClearML to streamline your MLOps workflow, automate experiments, and enhance model management effortlessly. +keywords: YOLO11, ClearML, MLOps, Ultralytics, machine learning, object detection, model training, automation, experiment management --- -# Training YOLOv8 with ClearML: Streamlining Your MLOps Workflow +# Training YOLO11 with ClearML: Streamlining Your MLOps Workflow MLOps bridges the gap between creating and deploying [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models in real-world settings. It focuses on efficient deployment, scalability, and ongoing management to ensure models perform well in practical applications. -[Ultralytics YOLOv8](https://www.ultralytics.com/) effortlessly integrates with ClearML, streamlining and enhancing your [object detection](https://www.ultralytics.com/glossary/object-detection) model's training and management. This guide will walk you through the integration process, detailing how to set up ClearML, manage experiments, automate model management, and collaborate effectively. +[Ultralytics YOLO11](https://www.ultralytics.com/) effortlessly integrates with ClearML, streamlining and enhancing your [object detection](https://www.ultralytics.com/glossary/object-detection) model's training and management. This guide will walk you through the integration process, detailing how to set up ClearML, manage experiments, automate model management, and collaborate effectively. ## ClearML @@ -18,9 +18,9 @@ MLOps bridges the gap between creating and deploying [machine learning](https:// [ClearML](https://clear.ml/) is an innovative open-source MLOps platform that is skillfully designed to automate, monitor, and orchestrate machine learning workflows. Its key features include automated logging of all training and inference data for full experiment reproducibility, an intuitive web UI for easy [data visualization](https://www.ultralytics.com/glossary/data-visualization) and analysis, advanced hyperparameter [optimization algorithms](https://www.ultralytics.com/glossary/optimization-algorithm), and robust model management for efficient deployment across various platforms. -## YOLOv8 Training with ClearML +## YOLO11 Training with ClearML -You can bring automation and efficiency to your machine learning workflow by improving your training process by integrating YOLOv8 with ClearML. +You can bring automation and efficiency to your machine learning workflow by improving your training process by integrating YOLO11 with ClearML. ## Installation @@ -31,11 +31,11 @@ To install the required packages, run: === "CLI" ```bash - # Install the required packages for YOLOv8 and ClearML + # Install the required packages for YOLO11 and ClearML pip install ultralytics clearml ``` -For detailed instructions and best practices related to the installation process, be sure to check our [YOLOv8 Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, be sure to check our [YOLO11 Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ## Configuring ClearML @@ -56,7 +56,7 @@ After executing this command, visit the [ClearML Settings page](https://app.clea ## Usage -Before diving into the usage instructions, be sure to check out the range of [YOLOv8 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. +Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. !!! example "Usage" @@ -70,11 +70,11 @@ Before diving into the usage instructions, be sure to check out the range of [YO # Step 1: Creating a ClearML Task task = Task.init(project_name="my_project", task_name="my_yolov8_task") - # Step 2: Selecting the YOLOv8 Model - model_variant = "yolov8n" + # Step 2: Selecting the YOLO11 Model + model_variant = "yolo11n" task.set_parameter("model_variant", model_variant) - # Step 3: Loading the YOLOv8 Model + # Step 3: Loading the YOLO11 Model model = YOLO(f"{model_variant}.pt") # Step 4: Setting Up Training Arguments @@ -91,11 +91,11 @@ Let's understand the steps showcased in the usage code snippet above. **Step 1: Creating a ClearML Task**: A new task is initialized in ClearML, specifying your project and task names. This task will track and manage your model's training. -**Step 2: Selecting the YOLOv8 Model**: The `model_variant` variable is set to 'yolov8n', one of the YOLOv8 models. This variant is then logged in ClearML for tracking. +**Step 2: Selecting the YOLO11 Model**: The `model_variant` variable is set to 'yolo11n', one of the YOLO11 models. This variant is then logged in ClearML for tracking. -**Step 3: Loading the YOLOv8 Model**: The selected YOLOv8 model is loaded using Ultralytics' YOLO class, preparing it for training. +**Step 3: Loading the YOLO11 Model**: The selected YOLO11 model is loaded using Ultralytics' YOLO class, preparing it for training. -**Step 4: Setting Up Training Arguments**: Key training arguments like the dataset (`coco8.yaml`) and the number of [epochs](https://www.ultralytics.com/glossary/epoch) (`16`) are organized in a dictionary and connected to the ClearML task. This allows for tracking and potential modification via the ClearML UI. For a detailed understanding of the model training process and best practices, refer to our [YOLOv8 Model Training guide](../modes/train.md). +**Step 4: Setting Up Training Arguments**: Key training arguments like the dataset (`coco8.yaml`) and the number of [epochs](https://www.ultralytics.com/glossary/epoch) (`16`) are organized in a dictionary and connected to the ClearML task. This allows for tracking and potential modification via the ClearML UI. For a detailed understanding of the model training process and best practices, refer to our [YOLO11 Model Training guide](../modes/train.md). **Step 5: Initiating Model Training**: The model training is started with the specified arguments. The results of the training process are captured in the `results` variable. @@ -106,7 +106,7 @@ Upon running the usage code snippet above, you can expect the following output: - A confirmation message indicating the creation of a new ClearML task, along with its unique ID. - An informational message about the script code being stored, indicating that the code execution is being tracked by ClearML. - A URL link to the ClearML results page where you can monitor the training progress and view detailed logs. -- Download progress for the YOLOv8 model and the specified dataset, followed by a summary of the model architecture and training configuration. +- Download progress for the YOLO11 model and the specified dataset, followed by a summary of the model architecture and training configuration. - Initialization messages for various training components like TensorBoard, Automatic [Mixed Precision](https://www.ultralytics.com/glossary/mixed-precision) (AMP), and dataset preparation. - Finally, the training process starts, with progress updates as the model trains on the specified dataset. For an in-depth understanding of the performance metrics used during training, read [our guide on performance metrics](../guides/yolo-performance-metrics.md). @@ -151,7 +151,7 @@ For a visual walkthrough of what the ClearML Results Page looks like, watch the allowfullscreen>
- Watch: YOLOv8 MLOps Integration using ClearML + Watch: YOLO11 MLOps Integration using ClearML

### Advanced Features in ClearML @@ -180,7 +180,7 @@ ClearML's user-friendly interface allows easy cloning, editing, and enqueuing of ## Summary -This guide has led you through the process of integrating ClearML with Ultralytics' YOLOv8. Covering everything from initial setup to advanced model management, you've discovered how to leverage ClearML for efficient training, experiment tracking, and workflow optimization in your machine learning projects. +This guide has led you through the process of integrating ClearML with Ultralytics' YOLO11. Covering everything from initial setup to advanced model management, you've discovered how to leverage ClearML for efficient training, experiment tracking, and workflow optimization in your machine learning projects. For further details on usage, visit [ClearML's official documentation](https://clear.ml/docs/latest/docs/integrations/yolov8/). @@ -188,9 +188,9 @@ Additionally, explore more integrations and capabilities of Ultralytics by visit ## FAQ -### What is the process for integrating Ultralytics YOLOv8 with ClearML? +### What is the process for integrating Ultralytics YOLO11 with ClearML? -Integrating Ultralytics YOLOv8 with ClearML involves a series of steps to streamline your MLOps workflow. First, install the necessary packages: +Integrating Ultralytics YOLO11 with ClearML involves a series of steps to streamline your MLOps workflow. First, install the necessary packages: ```bash pip install ultralytics clearml @@ -202,19 +202,19 @@ Next, initialize the ClearML SDK in your environment using: clearml-init ``` -You then configure ClearML with your credentials from the [ClearML Settings page](https://app.clear.ml/settings/workspace-configuration). Detailed instructions on the entire setup process, including model selection and training configurations, can be found in our [YOLOv8 Model Training guide](../modes/train.md). +You then configure ClearML with your credentials from the [ClearML Settings page](https://app.clear.ml/settings/workspace-configuration). Detailed instructions on the entire setup process, including model selection and training configurations, can be found in our [YOLO11 Model Training guide](../modes/train.md). -### Why should I use ClearML with Ultralytics YOLOv8 for my machine learning projects? +### Why should I use ClearML with Ultralytics YOLO11 for my machine learning projects? -Using ClearML with Ultralytics YOLOv8 enhances your machine learning projects by automating experiment tracking, streamlining workflows, and enabling robust model management. ClearML offers real-time metrics tracking, resource utilization monitoring, and a user-friendly interface for comparing experiments. These features help optimize your model's performance and make the development process more efficient. Learn more about the benefits and procedures in our [MLOps Integration guide](../modes/train.md). +Using ClearML with Ultralytics YOLO11 enhances your machine learning projects by automating experiment tracking, streamlining workflows, and enabling robust model management. ClearML offers real-time metrics tracking, resource utilization monitoring, and a user-friendly interface for comparing experiments. These features help optimize your model's performance and make the development process more efficient. Learn more about the benefits and procedures in our [MLOps Integration guide](../modes/train.md). -### How do I troubleshoot common issues during YOLOv8 and ClearML integration? +### How do I troubleshoot common issues during YOLO11 and ClearML integration? -If you encounter issues during the integration of YOLOv8 with ClearML, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. Typical problems might involve package installation errors, credential setup, or configuration issues. This guide provides step-by-step troubleshooting instructions to resolve these common issues efficiently. +If you encounter issues during the integration of YOLO11 with ClearML, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. Typical problems might involve package installation errors, credential setup, or configuration issues. This guide provides step-by-step troubleshooting instructions to resolve these common issues efficiently. -### How do I set up the ClearML task for YOLOv8 model training? +### How do I set up the ClearML task for YOLO11 model training? -Setting up a ClearML task for YOLOv8 training involves initializing a task, selecting the model variant, loading the model, setting up training arguments, and finally, starting the model training. Here's a simplified example: +Setting up a ClearML task for YOLO11 training involves initializing a task, selecting the model variant, loading the model, setting up training arguments, and finally, starting the model training. Here's a simplified example: ```python from clearml import Task @@ -224,11 +224,11 @@ from ultralytics import YOLO # Step 1: Creating a ClearML Task task = Task.init(project_name="my_project", task_name="my_yolov8_task") -# Step 2: Selecting the YOLOv8 Model -model_variant = "yolov8n" +# Step 2: Selecting the YOLO11 Model +model_variant = "yolo11n" task.set_parameter("model_variant", model_variant) -# Step 3: Loading the YOLOv8 Model +# Step 3: Loading the YOLO11 Model model = YOLO(f"{model_variant}.pt") # Step 4: Setting Up Training Arguments @@ -241,6 +241,6 @@ results = model.train(**args) Refer to our [Usage guide](#usage) for a detailed breakdown of these steps. -### Where can I view the results of my YOLOv8 training in ClearML? +### Where can I view the results of my YOLO11 training in ClearML? -After running your YOLOv8 training script with ClearML, you can view the results on the ClearML results page. The output will include a URL link to the ClearML dashboard, where you can track metrics, compare experiments, and monitor resource usage. For more details on how to view and interpret the results, check our section on [Viewing the ClearML Results Page](#viewing-the-clearml-results-page). +After running your YOLO11 training script with ClearML, you can view the results on the ClearML results page. The output will include a URL link to the ClearML dashboard, where you can track metrics, compare experiments, and monitor resource usage. For more details on how to view and interpret the results, check our section on [Viewing the ClearML Results Page](#viewing-the-clearml-results-page). diff --git a/docs/en/integrations/comet.md b/docs/en/integrations/comet.md index 2591973515..2774b2fed9 100644 --- a/docs/en/integrations/comet.md +++ b/docs/en/integrations/comet.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn to simplify the logging of YOLOv8 training with Comet ML. This guide covers installation, setup, real-time insights, and custom logging. -keywords: YOLOv8, Comet ML, logging, machine learning, training, model checkpoints, metrics, installation, configuration, real-time insights, custom logging +description: Learn to simplify the logging of YOLO11 training with Comet ML. This guide covers installation, setup, real-time insights, and custom logging. +keywords: YOLO11, Comet ML, logging, machine learning, training, model checkpoints, metrics, installation, configuration, real-time insights, custom logging --- -# Elevating YOLOv8 Training: Simplify Your Logging Process with Comet ML +# Elevating YOLO11 Training: Simplify Your Logging Process with Comet ML Logging key training details such as parameters, metrics, image predictions, and model checkpoints is essential in [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml)—it keeps your project transparent, your progress measurable, and your results repeatable. -[Ultralytics YOLOv8](https://www.ultralytics.com/) seamlessly integrates with Comet ML, efficiently capturing and optimizing every aspect of your YOLOv8 [object detection](https://www.ultralytics.com/glossary/object-detection) model's training process. In this guide, we'll cover the installation process, Comet ML setup, real-time insights, custom logging, and offline usage, ensuring that your YOLOv8 training is thoroughly documented and fine-tuned for outstanding results. +[Ultralytics YOLO11](https://www.ultralytics.com/) seamlessly integrates with Comet ML, efficiently capturing and optimizing every aspect of your YOLO11 [object detection](https://www.ultralytics.com/glossary/object-detection) model's training process. In this guide, we'll cover the installation process, Comet ML setup, real-time insights, custom logging, and offline usage, ensuring that your YOLO11 training is thoroughly documented and fine-tuned for outstanding results. ## Comet ML @@ -18,9 +18,9 @@ Logging key training details such as parameters, metrics, image predictions, and [Comet ML](https://www.comet.com/site/) is a platform for tracking, comparing, explaining, and optimizing machine learning models and experiments. It allows you to log metrics, parameters, media, and more during your model training and monitor your experiments through an aesthetically pleasing web interface. Comet ML helps data scientists iterate more rapidly, enhances transparency and reproducibility, and aids in the development of production models. -## Harnessing the Power of YOLOv8 and Comet ML +## Harnessing the Power of YOLO11 and Comet ML -By combining Ultralytics YOLOv8 with Comet ML, you unlock a range of benefits. These include simplified experiment management, real-time insights for quick adjustments, flexible and tailored logging options, and the ability to log experiments offline when internet access is limited. This integration empowers you to make data-driven decisions, analyze performance metrics, and achieve exceptional results. +By combining Ultralytics YOLO11 with Comet ML, you unlock a range of benefits. These include simplified experiment management, real-time insights for quick adjustments, flexible and tailored logging options, and the ability to log experiments offline when internet access is limited. This integration empowers you to make data-driven decisions, analyze performance metrics, and achieve exceptional results. ## Installation @@ -31,7 +31,7 @@ To install the required packages, run: === "CLI" ```bash - # Install the required packages for YOLOv8 and Comet ML + # Install the required packages for YOLO11 and Comet ML pip install ultralytics comet_ml torch torchvision ``` @@ -60,7 +60,7 @@ If you are using a Google Colab notebook, the code above will prompt you to ente ## Usage -Before diving into the usage instructions, be sure to check out the range of [YOLOv8 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. +Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. !!! example "Usage" @@ -70,7 +70,7 @@ Before diving into the usage instructions, be sure to check out the range of [YO from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model results = model.train( @@ -83,13 +83,13 @@ Before diving into the usage instructions, be sure to check out the range of [YO ) ``` -After running the training code, Comet ML will create an experiment in your Comet workspace to track the run automatically. You will then be provided with a link to view the detailed logging of your [YOLOv8 model's training](../modes/train.md) process. +After running the training code, Comet ML will create an experiment in your Comet workspace to track the run automatically. You will then be provided with a link to view the detailed logging of your [YOLO11 model's training](../modes/train.md) process. Comet automatically logs the following data with no additional configuration: metrics such as mAP and loss, hyperparameters, model checkpoints, interactive confusion matrix, and image [bounding box](https://www.ultralytics.com/glossary/bounding-box) predictions. ## Understanding Your Model's Performance with Comet ML Visualizations -Let's dive into what you'll see on the Comet ML dashboard once your YOLOv8 model begins training. The dashboard is where all the action happens, presenting a range of automatically logged information through visuals and statistics. Here's a quick tour: +Let's dive into what you'll see on the Comet ML dashboard once your YOLO11 model begins training. The dashboard is where all the action happens, presenting a range of automatically logged information through visuals and statistics. Here's a quick tour: **Experiment Panels** @@ -169,19 +169,19 @@ os.environ["COMET_MODE"] = "offline" ## Summary -This guide has walked you through integrating Comet ML with Ultralytics' YOLOv8. From installation to customization, you've learned to streamline experiment management, gain real-time insights, and adapt logging to your project's needs. +This guide has walked you through integrating Comet ML with Ultralytics' YOLO11. From installation to customization, you've learned to streamline experiment management, gain real-time insights, and adapt logging to your project's needs. -Explore [Comet ML's official documentation](https://www.comet.com/docs/v2/integrations/third-party-tools/yolov8/) for more insights on integrating with YOLOv8. +Explore [Comet ML's official documentation](https://www.comet.com/docs/v2/integrations/third-party-tools/yolov8/) for more insights on integrating with YOLO11. -Furthermore, if you're looking to dive deeper into the practical applications of YOLOv8, specifically for [image segmentation](https://www.ultralytics.com/glossary/image-segmentation) tasks, this detailed guide on [fine-tuning YOLOv8 with Comet ML](https://www.comet.com/site/blog/fine-tuning-yolov8-for-image-segmentation-with-comet/) offers valuable insights and step-by-step instructions to enhance your model's performance. +Furthermore, if you're looking to dive deeper into the practical applications of YOLO11, specifically for [image segmentation](https://www.ultralytics.com/glossary/image-segmentation) tasks, this detailed guide on [fine-tuning YOLO11 with Comet ML](https://www.comet.com/site/blog/fine-tuning-yolov8-for-image-segmentation-with-comet/) offers valuable insights and step-by-step instructions to enhance your model's performance. Additionally, to explore other exciting integrations with Ultralytics, check out the [integration guide page](../integrations/index.md), which offers a wealth of resources and information. ## FAQ -### How do I integrate Comet ML with Ultralytics YOLOv8 for training? +### How do I integrate Comet ML with Ultralytics YOLO11 for training? -To integrate Comet ML with Ultralytics YOLOv8, follow these steps: +To integrate Comet ML with Ultralytics YOLO11, follow these steps: 1. **Install the required packages**: @@ -203,12 +203,12 @@ To integrate Comet ML with Ultralytics YOLOv8, follow these steps: comet_ml.login(project_name="comet-example-yolov8-coco128") ``` -4. **Train your YOLOv8 model and log metrics**: +4. **Train your YOLO11 model and log metrics**: ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") results = model.train( data="coco8.yaml", project="comet-example-yolov8-coco128", @@ -221,9 +221,9 @@ To integrate Comet ML with Ultralytics YOLOv8, follow these steps: For more detailed instructions, refer to the [Comet ML configuration section](#configuring-comet-ml). -### What are the benefits of using Comet ML with YOLOv8? +### What are the benefits of using Comet ML with YOLO11? -By integrating Ultralytics YOLOv8 with Comet ML, you can: +By integrating Ultralytics YOLO11 with Comet ML, you can: - **Monitor real-time insights**: Get instant feedback on your training results, allowing for quick adjustments. - **Log extensive metrics**: Automatically capture essential metrics such as mAP, loss, hyperparameters, and model checkpoints. @@ -232,7 +232,7 @@ By integrating Ultralytics YOLOv8 with Comet ML, you can: By leveraging these features, you can optimize your machine learning workflows for better performance and reproducibility. For more information, visit the [Comet ML integration guide](../integrations/index.md). -### How do I customize the logging behavior of Comet ML during YOLOv8 training? +### How do I customize the logging behavior of Comet ML during YOLO11 training? Comet ML allows for extensive customization of its logging behavior using environment variables: @@ -262,9 +262,9 @@ Comet ML allows for extensive customization of its logging behavior using enviro Refer to the [Customizing Comet ML Logging](#customizing-comet-ml-logging) section for more customization options. -### How do I view detailed metrics and visualizations of my YOLOv8 training on Comet ML? +### How do I view detailed metrics and visualizations of my YOLO11 training on Comet ML? -Once your YOLOv8 model starts training, you can access a wide range of metrics and visualizations on the Comet ML dashboard. Key features include: +Once your YOLO11 model starts training, you can access a wide range of metrics and visualizations on the Comet ML dashboard. Key features include: - **Experiment Panels**: View different runs and their metrics, including segment mask loss, class loss, and mean average [precision](https://www.ultralytics.com/glossary/precision). - **Metrics**: Examine metrics in tabular format for detailed analysis. @@ -273,7 +273,7 @@ Once your YOLOv8 model starts training, you can access a wide range of metrics a For a detailed overview of these features, visit the [Understanding Your Model's Performance with Comet ML Visualizations](#understanding-your-models-performance-with-comet-ml-visualizations) section. -### Can I use Comet ML for offline logging when training YOLOv8 models? +### Can I use Comet ML for offline logging when training YOLO11 models? Yes, you can enable offline logging in Comet ML by setting the `COMET_MODE` environment variable to "offline": diff --git a/docs/en/integrations/coreml.md b/docs/en/integrations/coreml.md index 352e175304..41ff20d10f 100644 --- a/docs/en/integrations/coreml.md +++ b/docs/en/integrations/coreml.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to export YOLOv8 models to CoreML for optimized, on-device machine learning on iOS and macOS. Follow step-by-step instructions. -keywords: CoreML export, YOLOv8 models, CoreML conversion, Ultralytics, iOS object detection, macOS machine learning, AI deployment, machine learning integration +description: Learn how to export YOLO11 models to CoreML for optimized, on-device machine learning on iOS and macOS. Follow step-by-step instructions. +keywords: CoreML export, YOLO11 models, CoreML conversion, Ultralytics, iOS object detection, macOS machine learning, AI deployment, machine learning integration --- -# CoreML Export for YOLOv8 Models +# CoreML Export for YOLO11 Models Deploying [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models on Apple devices like iPhones and Macs requires a format that ensures seamless performance. -The CoreML export format allows you to optimize your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models for efficient [object detection](https://www.ultralytics.com/glossary/object-detection) in iOS and macOS applications. In this guide, we'll walk you through the steps for converting your models to the CoreML format, making it easier for your models to perform well on Apple devices. +The CoreML export format allows you to optimize your [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models for efficient [object detection](https://www.ultralytics.com/glossary/object-detection) in iOS and macOS applications. In this guide, we'll walk you through the steps for converting your models to the CoreML format, making it easier for your models to perform well on Apple devices. ## CoreML @@ -40,7 +40,7 @@ Apple's CoreML framework offers robust features for on-device machine learning. ## CoreML Deployment Options -Before we look at the code for exporting YOLOv8 models to the CoreML format, let's understand where CoreML models are usually used. +Before we look at the code for exporting YOLO11 models to the CoreML format, let's understand where CoreML models are usually used. CoreML offers various deployment options for machine learning models, including: @@ -52,9 +52,9 @@ CoreML offers various deployment options for machine learning models, including: - **Cloud-Based Deployment**: CoreML models are hosted on servers and accessed by the iOS app through API requests. This scalable and flexible option enables easy model updates without app revisions. It's ideal for complex models or large-scale apps requiring regular updates. However, it does require an internet connection and may pose latency and security issues. -## Exporting YOLOv8 Models to CoreML +## Exporting YOLO11 Models to CoreML -Exporting YOLOv8 to CoreML enables optimized, on-device machine learning performance within Apple's ecosystem, offering benefits in terms of efficiency, security, and seamless integration with iOS, macOS, watchOS, and tvOS platforms. +Exporting YOLO11 to CoreML enables optimized, on-device machine learning performance within Apple's ecosystem, offering benefits in terms of efficiency, security, and seamless integration with iOS, macOS, watchOS, and tvOS platforms. ### Installation @@ -65,15 +65,15 @@ To install the required package, run: === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` -For detailed instructions and best practices related to the installation process, check our [YOLOv8 Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [YOLO11 Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ### Usage -Before diving into the usage instructions, be sure to check out the range of [YOLOv8 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. +Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. !!! example "Usage" @@ -82,14 +82,14 @@ Before diving into the usage instructions, be sure to check out the range of [YO ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to CoreML format - model.export(format="coreml") # creates 'yolov8n.mlpackage' + model.export(format="coreml") # creates 'yolo11n.mlpackage' # Load the exported CoreML model - coreml_model = YOLO("yolov8n.mlpackage") + coreml_model = YOLO("yolo11n.mlpackage") # Run inference results = coreml_model("https://ultralytics.com/images/bus.jpg") @@ -98,18 +98,18 @@ Before diving into the usage instructions, be sure to check out the range of [YO === "CLI" ```bash - # Export a YOLOv8n PyTorch model to CoreML format - yolo export model=yolov8n.pt format=coreml # creates 'yolov8n.mlpackage'' + # Export a YOLO11n PyTorch model to CoreML format + yolo export model=yolo11n.pt format=coreml # creates 'yolo11n.mlpackage'' # Run inference with the exported model - yolo predict model=yolov8n.mlpackage source='https://ultralytics.com/images/bus.jpg' + yolo predict model=yolo11n.mlpackage source='https://ultralytics.com/images/bus.jpg' ``` For more details about the export process, visit the [Ultralytics documentation page on exporting](../modes/export.md). -## Deploying Exported YOLOv8 CoreML Models +## Deploying Exported YOLO11 CoreML Models -Having successfully exported your Ultralytics YOLOv8 models to CoreML, the next critical phase is deploying these models effectively. For detailed guidance on deploying CoreML models in various environments, check out these resources: +Having successfully exported your Ultralytics YOLO11 models to CoreML, the next critical phase is deploying these models effectively. For detailed guidance on deploying CoreML models in various environments, check out these resources: - **[CoreML Tools](https://apple.github.io/coremltools/docs-guides/)**: This guide includes instructions and examples to convert models from [TensorFlow](https://www.ultralytics.com/glossary/tensorflow), PyTorch, and other libraries to Core ML. @@ -119,17 +119,17 @@ Having successfully exported your Ultralytics YOLOv8 models to CoreML, the next ## Summary -In this guide, we went over how to export Ultralytics YOLOv8 models to CoreML format. By following the steps outlined in this guide, you can ensure maximum compatibility and performance when exporting YOLOv8 models to CoreML. +In this guide, we went over how to export Ultralytics YOLO11 models to CoreML format. By following the steps outlined in this guide, you can ensure maximum compatibility and performance when exporting YOLO11 models to CoreML. For further details on usage, visit the [CoreML official documentation](https://developer.apple.com/documentation/coreml). -Also, if you'd like to know more about other Ultralytics YOLOv8 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of valuable resources and insights there. +Also, if you'd like to know more about other Ultralytics YOLO11 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of valuable resources and insights there. ## FAQ -### How do I export YOLOv8 models to CoreML format? +### How do I export YOLO11 models to CoreML format? -To export your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models to CoreML format, you'll first need to ensure you have the `ultralytics` package installed. You can install it using: +To export your [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models to CoreML format, you'll first need to ensure you have the `ultralytics` package installed. You can install it using: !!! example "Installation" @@ -148,21 +148,21 @@ Next, you can export the model using the following Python or CLI commands: ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") model.export(format="coreml") ``` === "CLI" ```bash - yolo export model=yolov8n.pt format=coreml + yolo export model=yolo11n.pt format=coreml ``` -For further details, refer to the [Exporting YOLOv8 Models to CoreML](../modes/export.md) section of our documentation. +For further details, refer to the [Exporting YOLO11 Models to CoreML](../modes/export.md) section of our documentation. -### What are the benefits of using CoreML for deploying YOLOv8 models? +### What are the benefits of using CoreML for deploying YOLO11 models? -CoreML provides numerous advantages for deploying [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models on Apple devices: +CoreML provides numerous advantages for deploying [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models on Apple devices: - **On-device Processing**: Enables local model inference on devices, ensuring [data privacy](https://www.ultralytics.com/glossary/data-privacy) and minimizing latency. - **Performance Optimization**: Leverages the full potential of the device's CPU, GPU, and Neural Engine, optimizing both speed and efficiency. @@ -171,9 +171,9 @@ CoreML provides numerous advantages for deploying [Ultralytics YOLOv8](https://g For more details on integrating your CoreML model into an iOS app, check out the guide on [Integrating a Core ML Model into Your App](https://developer.apple.com/documentation/coreml/integrating-a-core-ml-model-into-your-app). -### What are the deployment options for YOLOv8 models exported to CoreML? +### What are the deployment options for YOLO11 models exported to CoreML? -Once you export your YOLOv8 model to CoreML format, you have multiple deployment options: +Once you export your YOLO11 model to CoreML format, you have multiple deployment options: 1. **On-Device Deployment**: Directly integrate CoreML models into your app for enhanced privacy and offline functionality. This can be done as: @@ -184,9 +184,9 @@ Once you export your YOLOv8 model to CoreML format, you have multiple deployment For detailed guidance on deploying CoreML models, refer to [CoreML Deployment Options](#coreml-deployment-options). -### How does CoreML ensure optimized performance for YOLOv8 models? +### How does CoreML ensure optimized performance for YOLO11 models? -CoreML ensures optimized performance for [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models by utilizing various optimization techniques: +CoreML ensures optimized performance for [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models by utilizing various optimization techniques: - **Hardware Acceleration**: Uses the device's CPU, GPU, and Neural Engine for efficient computation. - **Model Compression**: Provides tools for compressing models to reduce their footprint without compromising accuracy. @@ -205,14 +205,14 @@ Yes, you can run inference directly using the exported CoreML model. Below are t ```python from ultralytics import YOLO - coreml_model = YOLO("yolov8n.mlpackage") + coreml_model = YOLO("yolo11n.mlpackage") results = coreml_model("https://ultralytics.com/images/bus.jpg") ``` === "CLI" ```bash - yolo predict model=yolov8n.mlpackage source='https://ultralytics.com/images/bus.jpg' + yolo predict model=yolo11n.mlpackage source='https://ultralytics.com/images/bus.jpg' ``` For additional information, refer to the [Usage section](#usage) of the CoreML export guide. diff --git a/docs/en/integrations/dvc.md b/docs/en/integrations/dvc.md index 76ba91b432..c90377e06f 100644 --- a/docs/en/integrations/dvc.md +++ b/docs/en/integrations/dvc.md @@ -1,14 +1,14 @@ --- comments: true -description: Unlock seamless YOLOv8 tracking with DVCLive. Discover how to log, visualize, and analyze experiments for optimized ML model performance. -keywords: YOLOv8, DVCLive, experiment tracking, machine learning, model training, data visualization, Git integration +description: Unlock seamless YOLO11 tracking with DVCLive. Discover how to log, visualize, and analyze experiments for optimized ML model performance. +keywords: YOLO11, DVCLive, experiment tracking, machine learning, model training, data visualization, Git integration --- -# Advanced YOLOv8 Experiment Tracking with DVCLive +# Advanced YOLO11 Experiment Tracking with DVCLive Experiment tracking in [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) is critical to model development and evaluation. It involves recording and analyzing various parameters, metrics, and outcomes from numerous training runs. This process is essential for understanding model performance and making data-driven decisions to refine and optimize models. -Integrating DVCLive with [Ultralytics YOLOv8](https://www.ultralytics.com/) transforms the way experiments are tracked and managed. This integration offers a seamless solution for automatically logging key experiment details, comparing results across different runs, and visualizing data for in-depth analysis. In this guide, we'll understand how DVCLive can be used to streamline the process. +Integrating DVCLive with [Ultralytics YOLO11](https://www.ultralytics.com/) transforms the way experiments are tracked and managed. This integration offers a seamless solution for automatically logging key experiment details, comparing results across different runs, and visualizing data for in-depth analysis. In this guide, we'll understand how DVCLive can be used to streamline the process. ## DVCLive @@ -18,9 +18,9 @@ Integrating DVCLive with [Ultralytics YOLOv8](https://www.ultralytics.com/) tran [DVCLive](https://dvc.org/doc/dvclive), developed by DVC, is an innovative open-source tool for experiment tracking in machine learning. Integrating seamlessly with Git and DVC, it automates the logging of crucial experiment data like model parameters and training metrics. Designed for simplicity, DVCLive enables effortless comparison and analysis of multiple runs, enhancing the efficiency of machine learning projects with intuitive [data visualization](https://www.ultralytics.com/glossary/data-visualization) and analysis tools. -## YOLOv8 Training with DVCLive +## YOLO11 Training with DVCLive -YOLOv8 training sessions can be effectively monitored with DVCLive. Additionally, DVC provides integral features for visualizing these experiments, including the generation of a report that enables the comparison of metric plots across all tracked experiments, offering a comprehensive view of the training process. +YOLO11 training sessions can be effectively monitored with DVCLive. Additionally, DVC provides integral features for visualizing these experiments, including the generation of a report that enables the comparison of metric plots across all tracked experiments, offering a comprehensive view of the training process. ## Installation @@ -31,11 +31,11 @@ To install the required packages, run: === "CLI" ```bash - # Install the required packages for YOLOv8 and DVCLive + # Install the required packages for YOLO11 and DVCLive pip install ultralytics dvclive ``` -For detailed instructions and best practices related to the installation process, be sure to check our [YOLOv8 Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, be sure to check our [YOLO11 Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ## Configuring DVCLive @@ -66,27 +66,27 @@ In these commands, ensure to replace "you@example.com" with the email address as ## Usage -Before diving into the usage instructions, be sure to check out the range of [YOLOv8 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. +Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. -### Training YOLOv8 Models with DVCLive +### Training YOLO11 Models with DVCLive -Start by running your YOLOv8 training sessions. You can use different model configurations and training parameters to suit your project needs. For instance: +Start by running your YOLO11 training sessions. You can use different model configurations and training parameters to suit your project needs. For instance: ```bash -# Example training commands for YOLOv8 with varying configurations -yolo train model=yolov8n.pt data=coco8.yaml epochs=5 imgsz=512 -yolo train model=yolov8n.pt data=coco8.yaml epochs=5 imgsz=640 +# Example training commands for YOLO11 with varying configurations +yolo train model=yolo11n.pt data=coco8.yaml epochs=5 imgsz=512 +yolo train model=yolo11n.pt data=coco8.yaml epochs=5 imgsz=640 ``` -Adjust the model, data, [epochs](https://www.ultralytics.com/glossary/epoch), and imgsz parameters according to your specific requirements. For a detailed understanding of the model training process and best practices, refer to our [YOLOv8 Model Training guide](../modes/train.md). +Adjust the model, data, [epochs](https://www.ultralytics.com/glossary/epoch), and imgsz parameters according to your specific requirements. For a detailed understanding of the model training process and best practices, refer to our [YOLO11 Model Training guide](../modes/train.md). ### Monitoring Experiments with DVCLive -DVCLive enhances the training process by enabling the tracking and visualization of key metrics. When installed, Ultralytics YOLOv8 automatically integrates with DVCLive for experiment tracking, which you can later analyze for performance insights. For a comprehensive understanding of the specific performance metrics used during training, be sure to explore [our detailed guide on performance metrics](../guides/yolo-performance-metrics.md). +DVCLive enhances the training process by enabling the tracking and visualization of key metrics. When installed, Ultralytics YOLO11 automatically integrates with DVCLive for experiment tracking, which you can later analyze for performance insights. For a comprehensive understanding of the specific performance metrics used during training, be sure to explore [our detailed guide on performance metrics](../guides/yolo-performance-metrics.md). ### Analyzing Results -After your YOLOv8 training sessions are complete, you can leverage DVCLive's powerful visualization tools for in-depth analysis of the results. DVCLive's integration ensures that all training metrics are systematically logged, facilitating a comprehensive evaluation of your model's performance. +After your YOLO11 training sessions are complete, you can leverage DVCLive's powerful visualization tools for in-depth analysis of the results. DVCLive's integration ensures that all training metrics are systematically logged, facilitating a comprehensive evaluation of your model's performance. To start the analysis, you can extract the experiment data using DVC's API and process it with Pandas for easier handling and visualization: @@ -108,7 +108,7 @@ df.reset_index(drop=True, inplace=True) print(df) ``` -The output of the code snippet above provides a clear tabular view of the different experiments conducted with YOLOv8 models. Each row represents a different training run, detailing the experiment's name, the number of epochs, image size (imgsz), the specific model used, and the mAP50-95(B) metric. This metric is crucial for evaluating the model's [accuracy](https://www.ultralytics.com/glossary/accuracy), with higher values indicating better performance. +The output of the code snippet above provides a clear tabular view of the different experiments conducted with YOLO11 models. Each row represents a different training run, detailing the experiment's name, the number of epochs, image size (imgsz), the specific model used, and the mAP50-95(B) metric. This metric is crucial for evaluating the model's [accuracy](https://www.ultralytics.com/glossary/accuracy), with higher values indicating better performance. #### Visualizing Results with Plotly @@ -164,7 +164,7 @@ Based on your analysis, iterate on your experiments. Adjust model configurations ## Summary -This guide has led you through the process of integrating DVCLive with Ultralytics' YOLOv8. You have learned how to harness the power of DVCLive for detailed experiment monitoring, effective visualization, and insightful analysis in your machine learning endeavors. +This guide has led you through the process of integrating DVCLive with Ultralytics' YOLO11. You have learned how to harness the power of DVCLive for detailed experiment monitoring, effective visualization, and insightful analysis in your machine learning endeavors. For further details on usage, visit [DVCLive's official documentation](https://dvc.org/doc/dvclive/ml-frameworks/yolo). @@ -172,9 +172,9 @@ Additionally, explore more integrations and capabilities of Ultralytics by visit ## FAQ -### How do I integrate DVCLive with Ultralytics YOLOv8 for experiment tracking? +### How do I integrate DVCLive with Ultralytics YOLO11 for experiment tracking? -Integrating DVCLive with Ultralytics YOLOv8 is straightforward. Start by installing the necessary packages: +Integrating DVCLive with Ultralytics YOLO11 is straightforward. Start by installing the necessary packages: !!! example "Installation" @@ -198,21 +198,21 @@ Next, initialize a Git repository and configure DVCLive in your project: git commit -m "DVC init" ``` -Follow our [YOLOv8 Installation guide](../quickstart.md) for detailed setup instructions. +Follow our [YOLO11 Installation guide](../quickstart.md) for detailed setup instructions. -### Why should I use DVCLive for tracking YOLOv8 experiments? +### Why should I use DVCLive for tracking YOLO11 experiments? -Using DVCLive with YOLOv8 provides several advantages, such as: +Using DVCLive with YOLO11 provides several advantages, such as: - **Automated Logging**: DVCLive automatically records key experiment details like model parameters and metrics. - **Easy Comparison**: Facilitates comparison of results across different runs. - **Visualization Tools**: Leverages DVCLive's robust data visualization capabilities for in-depth analysis. -For further details, refer to our guide on [YOLOv8 Model Training](../modes/train.md) and [YOLO Performance Metrics](../guides/yolo-performance-metrics.md) to maximize your experiment tracking efficiency. +For further details, refer to our guide on [YOLO11 Model Training](../modes/train.md) and [YOLO Performance Metrics](../guides/yolo-performance-metrics.md) to maximize your experiment tracking efficiency. -### How can DVCLive improve my results analysis for YOLOv8 training sessions? +### How can DVCLive improve my results analysis for YOLO11 training sessions? -After completing your YOLOv8 training sessions, DVCLive helps in visualizing and analyzing the results effectively. Example code for loading and displaying experiment data: +After completing your YOLO11 training sessions, DVCLive helps in visualizing and analyzing the results effectively. Example code for loading and displaying experiment data: ```python import dvc.api @@ -241,11 +241,11 @@ fig = parallel_coordinates(df, columns, color="metrics.mAP50-95(B)") fig.show() ``` -Refer to our guide on [YOLOv8 Training with DVCLive](#yolov8-training-with-dvclive) for more examples and best practices. +Refer to our guide on [YOLO11 Training with DVCLive](#yolo11-training-with-dvclive) for more examples and best practices. -### What are the steps to configure my environment for DVCLive and YOLOv8 integration? +### What are the steps to configure my environment for DVCLive and YOLO11 integration? -To configure your environment for a smooth integration of DVCLive and YOLOv8, follow these steps: +To configure your environment for a smooth integration of DVCLive and YOLO11, follow these steps: 1. **Install Required Packages**: Use `pip install ultralytics dvclive`. 2. **Initialize Git Repository**: Run `git init -q`. @@ -254,9 +254,9 @@ To configure your environment for a smooth integration of DVCLive and YOLOv8, fo These steps ensure proper version control and setup for experiment tracking. For in-depth configuration details, visit our [Configuration guide](../quickstart.md). -### How do I visualize YOLOv8 experiment results using DVCLive? +### How do I visualize YOLO11 experiment results using DVCLive? -DVCLive offers powerful tools to visualize the results of YOLOv8 experiments. Here's how you can generate comparative plots: +DVCLive offers powerful tools to visualize the results of YOLO11 experiments. Here's how you can generate comparative plots: !!! example "Generate Comparative Plots" @@ -275,4 +275,4 @@ from IPython.display import HTML HTML(filename="./dvc_plots/index.html") ``` -These visualizations help identify trends and optimize model performance. Check our detailed guides on [YOLOv8 Experiment Analysis](#analyzing-results) for comprehensive steps and examples. +These visualizations help identify trends and optimize model performance. Check our detailed guides on [YOLO11 Experiment Analysis](#analyzing-results) for comprehensive steps and examples. diff --git a/docs/en/integrations/edge-tpu.md b/docs/en/integrations/edge-tpu.md index d72410c5a7..f8821e47bd 100644 --- a/docs/en/integrations/edge-tpu.md +++ b/docs/en/integrations/edge-tpu.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to export YOLOv8 models to TFLite Edge TPU format for high-speed, low-power inferencing on mobile and embedded devices. -keywords: YOLOv8, TFLite Edge TPU, TensorFlow Lite, model export, machine learning, edge computing, neural networks, Ultralytics +description: Learn how to export YOLO11 models to TFLite Edge TPU format for high-speed, low-power inferencing on mobile and embedded devices. +keywords: YOLO11, TFLite Edge TPU, TensorFlow Lite, model export, machine learning, edge computing, neural networks, Ultralytics --- -# Learn to Export to TFLite Edge TPU Format From YOLOv8 Model +# Learn to Export to TFLite Edge TPU Format From YOLO11 Model Deploying computer vision models on devices with limited computational power, such as mobile or embedded systems, can be tricky. Using a model format that is optimized for faster performance simplifies the process. The [TensorFlow Lite](https://ai.google.dev/edge/litert) [Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) or TFLite Edge TPU model format is designed to use minimal power while delivering fast performance for neural networks. -The export to TFLite Edge TPU format feature allows you to optimize your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models for high-speed and low-power inferencing. In this guide, we'll walk you through converting your models to the TFLite Edge TPU format, making it easier for your models to perform well on various mobile and embedded devices. +The export to TFLite Edge TPU format feature allows you to optimize your [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models for high-speed and low-power inferencing. In this guide, we'll walk you through converting your models to the TFLite Edge TPU format, making it easier for your models to perform well on various mobile and embedded devices. ## Why Should You Export to TFLite Edge TPU? @@ -32,7 +32,7 @@ Here are the key features that make TFLite Edge TPU a great model format choice ## Deployment Options with TFLite Edge TPU -Before we jump into how to export YOLOv8 models to the TFLite Edge TPU format, let's understand where TFLite Edge TPU models are usually used. +Before we jump into how to export YOLO11 models to the TFLite Edge TPU format, let's understand where TFLite Edge TPU models are usually used. TFLite Edge TPU offers various deployment options for machine learning models, including: @@ -42,9 +42,9 @@ TFLite Edge TPU offers various deployment options for machine learning models, i - **Hybrid Deployment**: A hybrid approach combines on-device and cloud deployment and offers a versatile and scalable solution for deploying machine learning models. Advantages include on-device processing for quick responses and [cloud computing](https://www.ultralytics.com/glossary/cloud-computing) for more complex computations. -## Exporting YOLOv8 Models to TFLite Edge TPU +## Exporting YOLO11 Models to TFLite Edge TPU -You can expand model compatibility and deployment flexibility by converting YOLOv8 models to TensorFlow Edge TPU. +You can expand model compatibility and deployment flexibility by converting YOLO11 models to TensorFlow Edge TPU. ### Installation @@ -55,15 +55,15 @@ To install the required package, run: === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` -For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ### Usage -Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLOv8 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). +Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLO11 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). !!! example "Usage" @@ -72,14 +72,14 @@ Before diving into the usage instructions, it's important to note that while all ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TFLite Edge TPU format - model.export(format="edgetpu") # creates 'yolov8n_full_integer_quant_edgetpu.tflite' + model.export(format="edgetpu") # creates 'yolo11n_full_integer_quant_edgetpu.tflite' # Load the exported TFLite Edge TPU model - edgetpu_model = YOLO("yolov8n_full_integer_quant_edgetpu.tflite") + edgetpu_model = YOLO("yolo11n_full_integer_quant_edgetpu.tflite") # Run inference results = edgetpu_model("https://ultralytics.com/images/bus.jpg") @@ -88,22 +88,22 @@ Before diving into the usage instructions, it's important to note that while all === "CLI" ```bash - # Export a YOLOv8n PyTorch model to TFLite Edge TPU format - yolo export model=yolov8n.pt format=edgetpu # creates 'yolov8n_full_integer_quant_edgetpu.tflite' + # Export a YOLO11n PyTorch model to TFLite Edge TPU format + yolo export model=yolo11n.pt format=edgetpu # creates 'yolo11n_full_integer_quant_edgetpu.tflite' # Run inference with the exported model - yolo predict model=yolov8n_full_integer_quant_edgetpu.tflite source='https://ultralytics.com/images/bus.jpg' + yolo predict model=yolo11n_full_integer_quant_edgetpu.tflite source='https://ultralytics.com/images/bus.jpg' ``` For more details about supported export options, visit the [Ultralytics documentation page on deployment options](../guides/model-deployment-options.md). -## Deploying Exported YOLOv8 TFLite Edge TPU Models +## Deploying Exported YOLO11 TFLite Edge TPU Models -After successfully exporting your Ultralytics YOLOv8 models to TFLite Edge TPU format, you can now deploy them. The primary and recommended first step for running a TFLite Edge TPU model is to use the YOLO("model_edgetpu.tflite") method, as outlined in the previous usage code snippet. +After successfully exporting your Ultralytics YOLO11 models to TFLite Edge TPU format, you can now deploy them. The primary and recommended first step for running a TFLite Edge TPU model is to use the YOLO("model_edgetpu.tflite") method, as outlined in the previous usage code snippet. However, for in-depth instructions on deploying your TFLite Edge TPU models, take a look at the following resources: -- **[Coral Edge TPU on a Raspberry Pi with Ultralytics YOLOv8](../guides/coral-edge-tpu-on-raspberry-pi.md)**: Discover how to integrate Coral Edge TPUs with Raspberry Pi for enhanced machine learning capabilities. +- **[Coral Edge TPU on a Raspberry Pi with Ultralytics YOLO11](../guides/coral-edge-tpu-on-raspberry-pi.md)**: Discover how to integrate Coral Edge TPUs with Raspberry Pi for enhanced machine learning capabilities. - **[Code Examples](https://coral.ai/docs/edgetpu/compiler/)**: Access practical TensorFlow Edge TPU deployment examples to kickstart your projects. @@ -111,17 +111,17 @@ However, for in-depth instructions on deploying your TFLite Edge TPU models, tak ## Summary -In this guide, we've learned how to export Ultralytics YOLOv8 models to TFLite Edge TPU format. By following the steps mentioned above, you can increase the speed and power of your [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) applications. +In this guide, we've learned how to export Ultralytics YOLO11 models to TFLite Edge TPU format. By following the steps mentioned above, you can increase the speed and power of your [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) applications. For further details on usage, visit the [Edge TPU official website](https://cloud.google.com/tpu). -Also, for more information on other Ultralytics YOLOv8 integrations, please visit our [integration guide page](index.md). There, you'll discover valuable resources and insights. +Also, for more information on other Ultralytics YOLO11 integrations, please visit our [integration guide page](index.md). There, you'll discover valuable resources and insights. ## FAQ -### How do I export a YOLOv8 model to TFLite Edge TPU format? +### How do I export a YOLO11 model to TFLite Edge TPU format? -To export a YOLOv8 model to TFLite Edge TPU format, you can follow these steps: +To export a YOLO11 model to TFLite Edge TPU format, you can follow these steps: !!! example "Usage" @@ -130,14 +130,14 @@ To export a YOLOv8 model to TFLite Edge TPU format, you can follow these steps: ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TFLite Edge TPU format - model.export(format="edgetpu") # creates 'yolov8n_full_integer_quant_edgetpu.tflite' + model.export(format="edgetpu") # creates 'yolo11n_full_integer_quant_edgetpu.tflite' # Load the exported TFLite Edge TPU model - edgetpu_model = YOLO("yolov8n_full_integer_quant_edgetpu.tflite") + edgetpu_model = YOLO("yolo11n_full_integer_quant_edgetpu.tflite") # Run inference results = edgetpu_model("https://ultralytics.com/images/bus.jpg") @@ -146,18 +146,18 @@ To export a YOLOv8 model to TFLite Edge TPU format, you can follow these steps: === "CLI" ```bash - # Export a YOLOv8n PyTorch model to TFLite Edge TPU format - yolo export model=yolov8n.pt format=edgetpu # creates 'yolov8n_full_integer_quant_edgetpu.tflite' + # Export a YOLO11n PyTorch model to TFLite Edge TPU format + yolo export model=yolo11n.pt format=edgetpu # creates 'yolo11n_full_integer_quant_edgetpu.tflite' # Run inference with the exported model - yolo predict model=yolov8n_full_integer_quant_edgetpu.tflite source='https://ultralytics.com/images/bus.jpg' + yolo predict model=yolo11n_full_integer_quant_edgetpu.tflite source='https://ultralytics.com/images/bus.jpg' ``` For complete details on exporting models to other formats, refer to our [export guide](../modes/export.md). -### What are the benefits of exporting YOLOv8 models to TFLite Edge TPU? +### What are the benefits of exporting YOLO11 models to TFLite Edge TPU? -Exporting YOLOv8 models to TFLite Edge TPU offers several benefits: +Exporting YOLO11 models to TFLite Edge TPU offers several benefits: - **Optimized Performance**: Achieve high-speed neural network performance with minimal power consumption. - **Reduced Latency**: Quick local data processing without the need for cloud dependency. diff --git a/docs/en/integrations/google-colab.md b/docs/en/integrations/google-colab.md index 2c45528c19..2c242f6f82 100644 --- a/docs/en/integrations/google-colab.md +++ b/docs/en/integrations/google-colab.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to efficiently train Ultralytics YOLOv8 models using Google Colab's powerful cloud-based environment. Start your project with ease. -keywords: YOLOv8, Google Colab, machine learning, deep learning, model training, GPU, TPU, cloud computing, Jupyter Notebook, Ultralytics +description: Learn how to efficiently train Ultralytics YOLO11 models using Google Colab's powerful cloud-based environment. Start your project with ease. +keywords: YOLO11, Google Colab, machine learning, deep learning, model training, GPU, TPU, cloud computing, Jupyter Notebook, Ultralytics --- -# Accelerating YOLOv8 Projects with Google Colab +# Accelerating YOLO11 Projects with Google Colab Many developers lack the powerful computing resources needed to build [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models. Acquiring high-end hardware or renting a decent GPU can be expensive. Google Colab is a great solution to this. It's a browser-based platform that allows you to work with large datasets, develop complex models, and share your work with others without a huge cost. -You can use Google Colab to work on projects related to [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models. Google Colab's user-friendly environment is well suited for efficient model development and experimentation. Let's learn more about Google Colab, its key features, and how you can use it to train YOLOv8 models. +You can use Google Colab to work on projects related to [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models. Google Colab's user-friendly environment is well suited for efficient model development and experimentation. Let's learn more about Google Colab, its key features, and how you can use it to train YOLO11 models. ## Google Colaboratory @@ -16,15 +16,15 @@ Google Colaboratory, commonly known as Google Colab, was developed by Google Res You can use Google Colab regardless of the specifications and configurations of your local computer. All you need is a Google account and a web browser, and you're good to go. -## Training YOLOv8 Using Google Colaboratory +## Training YOLO11 Using Google Colaboratory -Training YOLOv8 models on Google Colab is pretty straightforward. Thanks to the integration, you can access the [Google Colab YOLOv8 Notebook](https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb) and start training your model immediately. For a detailed understanding of the model training process and best practices, refer to our [YOLOv8 Model Training guide](../modes/train.md). +Training YOLO11 models on Google Colab is pretty straightforward. Thanks to the integration, you can access the [Google Colab YOLO11 Notebook](https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb) and start training your model immediately. For a detailed understanding of the model training process and best practices, refer to our [YOLO11 Model Training guide](../modes/train.md). Sign in to your Google account and run the notebook's cells to train your model. -![Training YOLOv8 Using Google Colab](https://github.com/ultralytics/docs/releases/download/0/training-yolov8-using-google-colab.avif) +![Training YOLO11 Using Google Colab](https://github.com/ultralytics/docs/releases/download/0/training-yolov8-using-google-colab.avif) -Learn how to train a YOLOv8 model with custom data on YouTube with Nicolai. Check out the guide below. +Learn how to train a YOLO11 model with custom data on YouTube with Nicolai. Check out the guide below.


@@ -34,7 +34,7 @@ Learn how to train a YOLOv8 model with custom data on YouTube with Nicolai. Chec allowfullscreen>
- Watch: How to Train Ultralytics YOLOv8 models on Your Custom Dataset in Google Colab | Episode 3 + Watch: How to Train Ultralytics YOLO11 models on Your Custom Dataset in Google Colab | Episode 3

### Common Questions While Working with Google Colab @@ -75,9 +75,9 @@ Now, let's look at some of the standout features that make Google Colab a go-to - **Educational Resources:** Google Colab offers a range of tutorials and example notebooks to help users learn and explore various functionalities. -## Why Should You Use Google Colab for Your YOLOv8 Projects? +## Why Should You Use Google Colab for Your YOLO11 Projects? -There are many options for training and evaluating YOLOv8 models, so what makes the integration with Google Colab unique? Let's explore the advantages of this integration: +There are many options for training and evaluating YOLO11 models, so what makes the integration with Google Colab unique? Let's explore the advantages of this integration: - **Zero Setup:** Since Colab runs in the cloud, users can start training models immediately without the need for complex environment setups. Just create an account and start coding. @@ -95,7 +95,7 @@ There are many options for training and evaluating YOLOv8 models, so what makes If you'd like to dive deeper into Google Colab, here are a few resources to guide you. -- **[Training Custom Datasets with Ultralytics YOLOv8 in Google Colab](https://www.ultralytics.com/blog/training-custom-datasets-with-ultralytics-yolov8-in-google-colab)**: Learn how to train custom datasets with Ultralytics YOLOv8 on Google Colab. This comprehensive blog post will take you through the entire process, from initial setup to the training and evaluation stages. +- **[Training Custom Datasets with Ultralytics YOLO11 in Google Colab](https://www.ultralytics.com/blog/training-custom-datasets-with-ultralytics-yolov8-in-google-colab)**: Learn how to train custom datasets with Ultralytics YOLO11 on Google Colab. This comprehensive blog post will take you through the entire process, from initial setup to the training and evaluation stages. - **[Curated Notebooks](https://colab.google/notebooks/)**: Here you can explore a series of organized and educational notebooks, each grouped by specific topic areas. @@ -103,21 +103,21 @@ If you'd like to dive deeper into Google Colab, here are a few resources to guid ## Summary -We've discussed how you can easily experiment with Ultralytics YOLOv8 models on Google Colab. You can use Google Colab to train and evaluate your models on GPUs and TPUs with a few clicks. +We've discussed how you can easily experiment with Ultralytics YOLO11 models on Google Colab. You can use Google Colab to train and evaluate your models on GPUs and TPUs with a few clicks. For more details, visit [Google Colab's FAQ page](https://research.google.com/colaboratory/intl/en-GB/faq.html). -Interested in more YOLOv8 integrations? Visit the [Ultralytics integration guide page](index.md) to explore additional tools and capabilities that can improve your machine-learning projects. +Interested in more YOLO11 integrations? Visit the [Ultralytics integration guide page](index.md) to explore additional tools and capabilities that can improve your machine-learning projects. ## FAQ -### How do I start training Ultralytics YOLOv8 models on Google Colab? +### How do I start training Ultralytics YOLO11 models on Google Colab? -To start training Ultralytics YOLOv8 models on Google Colab, sign in to your Google account, then access the [Google Colab YOLOv8 Notebook](https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb). This notebook guides you through the setup and training process. After launching the notebook, run the cells step-by-step to train your model. For a full guide, refer to the [YOLOv8 Model Training guide](../modes/train.md). +To start training Ultralytics YOLO11 models on Google Colab, sign in to your Google account, then access the [Google Colab YOLO11 Notebook](https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb). This notebook guides you through the setup and training process. After launching the notebook, run the cells step-by-step to train your model. For a full guide, refer to the [YOLO11 Model Training guide](../modes/train.md). -### What are the advantages of using Google Colab for training YOLOv8 models? +### What are the advantages of using Google Colab for training YOLO11 models? -Google Colab offers several advantages for training YOLOv8 models: +Google Colab offers several advantages for training YOLO11 models: - **Zero Setup:** No initial environment setup is required; just log in and start coding. - **Free GPU Access:** Use powerful GPUs or TPUs without the need for expensive hardware. @@ -126,7 +126,7 @@ Google Colab offers several advantages for training YOLOv8 models: For more information on why you should use Google Colab, explore the [training guide](../modes/train.md) and visit the [Google Colab page](https://colab.google/notebooks/). -### How can I handle Google Colab session timeouts during YOLOv8 training? +### How can I handle Google Colab session timeouts during YOLO11 training? Google Colab sessions timeout due to inactivity, especially for free users. To handle this: @@ -136,9 +136,9 @@ Google Colab sessions timeout due to inactivity, especially for free users. To h For more tips on managing your Colab session, visit the [Google Colab FAQ page](https://research.google.com/colaboratory/intl/en-GB/faq.html). -### Can I use custom datasets for training YOLOv8 models in Google Colab? +### Can I use custom datasets for training YOLO11 models in Google Colab? -Yes, you can use custom datasets to train YOLOv8 models in Google Colab. Upload your dataset to Google Drive and load it directly into your Colab notebook. You can follow Nicolai's YouTube guide, [How to Train YOLOv8 Models on Your Custom Dataset](https://www.youtube.com/watch?v=LNwODJXcvt4), or refer to the [Custom Dataset Training guide](https://www.ultralytics.com/blog/training-custom-datasets-with-ultralytics-yolov8-in-google-colab) for detailed steps. +Yes, you can use custom datasets to train YOLO11 models in Google Colab. Upload your dataset to Google Drive and load it directly into your Colab notebook. You can follow Nicolai's YouTube guide, [How to Train YOLO11 Models on Your Custom Dataset](https://www.youtube.com/watch?v=LNwODJXcvt4), or refer to the [Custom Dataset Training guide](https://www.ultralytics.com/blog/training-custom-datasets-with-ultralytics-yolov8-in-google-colab) for detailed steps. ### What should I do if my Google Colab training session is interrupted? diff --git a/docs/en/integrations/gradio.md b/docs/en/integrations/gradio.md index 1dc8801b4e..3199a519cc 100644 --- a/docs/en/integrations/gradio.md +++ b/docs/en/integrations/gradio.md @@ -1,14 +1,14 @@ --- comments: true -description: Discover an interactive way to perform object detection with Ultralytics YOLOv8 using Gradio. Upload images and adjust settings for real-time results. -keywords: Ultralytics, YOLOv8, Gradio, object detection, interactive, real-time, image processing, AI +description: Discover an interactive way to perform object detection with Ultralytics YOLO11 using Gradio. Upload images and adjust settings for real-time results. +keywords: Ultralytics, YOLO11, Gradio, object detection, interactive, real-time, image processing, AI --- -# Interactive [Object Detection](https://www.ultralytics.com/glossary/object-detection): Gradio & Ultralytics YOLOv8 🚀 +# Interactive [Object Detection](https://www.ultralytics.com/glossary/object-detection): Gradio & Ultralytics YOLO11 🚀 ## Introduction to Interactive Object Detection -This Gradio interface provides an easy and interactive way to perform object detection using the [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) model. Users can upload images and adjust parameters like confidence threshold and intersection-over-union (IoU) threshold to get real-time detection results. +This Gradio interface provides an easy and interactive way to perform object detection using the [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) model. Users can upload images and adjust parameters like confidence threshold and intersection-over-union (IoU) threshold to get real-time detection results.


@@ -18,7 +18,7 @@ This Gradio interface provides an easy and interactive way to perform object det allowfullscreen>
- Watch: Gradio Integration with Ultralytics YOLOv8 + Watch: Gradio Integration with Ultralytics YOLO11

## Why Use Gradio for Object Detection? @@ -52,7 +52,7 @@ pip install gradio ## Usage Example -This section provides the Python code used to create the Gradio interface with the Ultralytics YOLOv8 model. Supports classification tasks, detection tasks, segmentation tasks, and key point tasks. +This section provides the Python code used to create the Gradio interface with the Ultralytics YOLO11 model. Supports classification tasks, detection tasks, segmentation tasks, and key point tasks. ```python import gradio as gr @@ -60,11 +60,11 @@ import PIL.Image as Image from ultralytics import ASSETS, YOLO -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") def predict_image(img, conf_threshold, iou_threshold): - """Predicts objects in an image using a YOLOv8 model with adjustable confidence and IOU thresholds.""" + """Predicts objects in an image using a YOLO11 model with adjustable confidence and IOU thresholds.""" results = model.predict( source=img, conf=conf_threshold, @@ -90,7 +90,7 @@ iface = gr.Interface( ], outputs=gr.Image(type="pil", label="Result"), title="Ultralytics Gradio", - description="Upload images for inference. The Ultralytics YOLOv8n model is used by default.", + description="Upload images for inference. The Ultralytics YOLO11n model is used by default.", examples=[ [ASSETS / "bus.jpg", 0.25, 0.45], [ASSETS / "zidane.jpg", 0.25, 0.45], @@ -119,9 +119,9 @@ if __name__ == "__main__": ## FAQ -### How do I use Gradio with Ultralytics YOLOv8 for object detection? +### How do I use Gradio with Ultralytics YOLO11 for object detection? -To use Gradio with Ultralytics YOLOv8 for object detection, you can follow these steps: +To use Gradio with Ultralytics YOLO11 for object detection, you can follow these steps: 1. **Install Gradio:** Use the command `pip install gradio`. 2. **Create Interface:** Write a Python script to initialize the Gradio interface. You can refer to the provided code example in the [documentation](#usage-example) for details. @@ -134,7 +134,7 @@ import gradio as gr from ultralytics import YOLO -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") def predict_image(img, conf_threshold, iou_threshold): @@ -156,15 +156,15 @@ iface = gr.Interface( gr.Slider(minimum=0, maximum=1, value=0.45, label="IoU threshold"), ], outputs=gr.Image(type="pil", label="Result"), - title="Ultralytics Gradio YOLOv8", - description="Upload images for YOLOv8 object detection.", + title="Ultralytics Gradio YOLO11", + description="Upload images for YOLO11 object detection.", ) iface.launch() ``` -### What are the benefits of using Gradio for Ultralytics YOLOv8 object detection? +### What are the benefits of using Gradio for Ultralytics YOLO11 object detection? -Using Gradio for Ultralytics YOLOv8 object detection offers several benefits: +Using Gradio for Ultralytics YOLO11 object detection offers several benefits: - **User-Friendly Interface:** Gradio provides an intuitive interface for users to upload images and visualize detection results without any coding effort. - **Real-Time Adjustments:** You can dynamically adjust detection parameters such as confidence and IoU thresholds and see the effects immediately. @@ -172,22 +172,22 @@ Using Gradio for Ultralytics YOLOv8 object detection offers several benefits: For more details, you can read this [blog post](https://www.ultralytics.com/blog/ai-and-radiology-a-new-era-of-precision-and-efficiency). -### Can I use Gradio and Ultralytics YOLOv8 together for educational purposes? +### Can I use Gradio and Ultralytics YOLO11 together for educational purposes? -Yes, Gradio and Ultralytics YOLOv8 can be utilized together for educational purposes effectively. Gradio's intuitive web interface makes it easy for students and educators to interact with state-of-the-art [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models like Ultralytics YOLOv8 without needing advanced programming skills. This setup is ideal for demonstrating key concepts in object detection and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv), as Gradio provides immediate visual feedback which helps in understanding the impact of different parameters on the detection performance. +Yes, Gradio and Ultralytics YOLO11 can be utilized together for educational purposes effectively. Gradio's intuitive web interface makes it easy for students and educators to interact with state-of-the-art [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models like Ultralytics YOLO11 without needing advanced programming skills. This setup is ideal for demonstrating key concepts in object detection and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv), as Gradio provides immediate visual feedback which helps in understanding the impact of different parameters on the detection performance. -### How do I adjust the confidence and IoU thresholds in the Gradio interface for YOLOv8? +### How do I adjust the confidence and IoU thresholds in the Gradio interface for YOLO11? -In the Gradio interface for YOLOv8, you can adjust the confidence and IoU thresholds using the sliders provided. These thresholds help control the prediction [accuracy](https://www.ultralytics.com/glossary/accuracy) and object separation: +In the Gradio interface for YOLO11, you can adjust the confidence and IoU thresholds using the sliders provided. These thresholds help control the prediction [accuracy](https://www.ultralytics.com/glossary/accuracy) and object separation: - **Confidence Threshold:** Determines the minimum confidence level for detecting objects. Slide to increase or decrease the confidence required. - **IoU Threshold:** Sets the intersection-over-union threshold for distinguishing between overlapping objects. Adjust this value to refine object separation. For more information on these parameters, visit the [parameters explanation section](#parameters-explanation). -### What are some practical applications of using Ultralytics YOLOv8 with Gradio? +### What are some practical applications of using Ultralytics YOLO11 with Gradio? -Practical applications of combining Ultralytics YOLOv8 with Gradio include: +Practical applications of combining Ultralytics YOLO11 with Gradio include: - **Real-Time Object Detection Demonstrations:** Ideal for showcasing how object detection works in real-time. - **Educational Tools:** Useful in academic settings to teach object detection and computer vision concepts. @@ -196,4 +196,4 @@ Practical applications of combining Ultralytics YOLOv8 with Gradio include: For examples of similar use cases, check out the [Ultralytics blog](https://www.ultralytics.com/blog/monitoring-animal-behavior-using-ultralytics-yolov8). -Providing this information within the documentation will help in enhancing the usability and accessibility of Ultralytics YOLOv8, making it more approachable for users at all levels of expertise. +Providing this information within the documentation will help in enhancing the usability and accessibility of Ultralytics YOLO11, making it more approachable for users at all levels of expertise. diff --git a/docs/en/integrations/ibm-watsonx.md b/docs/en/integrations/ibm-watsonx.md index cda19b055c..9b820e7f34 100644 --- a/docs/en/integrations/ibm-watsonx.md +++ b/docs/en/integrations/ibm-watsonx.md @@ -1,14 +1,14 @@ --- comments: true -description: Dive into our detailed integration guide on using IBM Watson to train a YOLOv8 model. Uncover key features and step-by-step instructions on model training. -keywords: IBM Watsonx, IBM Watsonx AI, What is Watson?, IBM Watson Integration, IBM Watson Features, YOLOv8, Ultralytics, Model Training, GPU, TPU, cloud computing +description: Dive into our detailed integration guide on using IBM Watson to train a YOLO11 model. Uncover key features and step-by-step instructions on model training. +keywords: IBM Watsonx, IBM Watsonx AI, What is Watson?, IBM Watson Integration, IBM Watson Features, YOLO11, Ultralytics, Model Training, GPU, TPU, cloud computing --- -# A Step-by-Step Guide to Training YOLOv8 Models with IBM Watsonx +# A Step-by-Step Guide to Training YOLO11 Models with IBM Watsonx Nowadays, scalable [computer vision solutions](../guides/steps-of-a-cv-project.md) are becoming more common and transforming the way we handle visual data. A great example is IBM Watsonx, an advanced AI and data platform that simplifies the development, deployment, and management of AI models. It offers a complete suite for the entire AI lifecycle and seamless integration with IBM Cloud services. -You can train [Ultralytics YOLOv8 models](https://github.com/ultralytics/ultralytics) using IBM Watsonx. It's a good option for enterprises interested in efficient [model training](../modes/train.md), fine-tuning for specific tasks, and improving [model performance](../guides/model-evaluation-insights.md) with robust tools and a user-friendly setup. In this guide, we'll walk you through the process of training YOLOv8 with IBM Watsonx, covering everything from setting up your environment to evaluating your trained models. Let's get started! +You can train [Ultralytics YOLO11 models](https://github.com/ultralytics/ultralytics) using IBM Watsonx. It's a good option for enterprises interested in efficient [model training](../modes/train.md), fine-tuning for specific tasks, and improving [model performance](../guides/model-evaluation-insights.md) with robust tools and a user-friendly setup. In this guide, we'll walk you through the process of training YOLO11 with IBM Watsonx, covering everything from setting up your environment to evaluating your trained models. Let's get started! ## What is IBM Watsonx? @@ -36,9 +36,9 @@ Watsonx.data supports both cloud and on-premises deployments through the IBM Sto Watsonx.governance makes compliance easier by automatically identifying regulatory changes and enforcing policies. It links requirements to internal risk data and provides up-to-date AI factsheets. The platform helps manage risk with alerts and tools to detect issues such as [bias and drift](../guides/model-monitoring-and-maintenance.md). It also automates the monitoring and documentation of the AI lifecycle, organizes AI development with a model inventory, and enhances collaboration with user-friendly dashboards and reporting tools. -## How to Train YOLOv8 Using IBM Watsonx +## How to Train YOLO11 Using IBM Watsonx -You can use IBM Watsonx to accelerate your YOLOv8 model training workflow. +You can use IBM Watsonx to accelerate your YOLO11 model training workflow. ### Prerequisites @@ -67,7 +67,7 @@ Next, you can install and import the necessary Python libraries. pip install ultralytics==8.0.196 ``` -For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. Then, you can import the needed packages. @@ -86,7 +86,7 @@ Then, you can import the needed packages. ### Step 3: Load the Data -For this tutorial, we will use a [marine litter dataset](https://www.kaggle.com/datasets/atiqishrak/trash-dataset-icra19) available on Kaggle. With this dataset, we will custom-train a YOLOv8 model to detect and classify litter and biological objects in underwater images. +For this tutorial, we will use a [marine litter dataset](https://www.kaggle.com/datasets/atiqishrak/trash-dataset-icra19) available on Kaggle. With this dataset, we will custom-train a YOLO11 model to detect and classify litter and biological objects in underwater images. We can load the dataset directly into the notebook using the Kaggle API. First, create a free Kaggle account. Once you have created an account, you'll need to generate an API key. Directions for generating your key can be found in the [Kaggle API documentation](https://github.com/Kaggle/kaggle-api/blob/main/docs/README.md) under the section "API credentials". @@ -236,34 +236,34 @@ Run the following script to delete the current contents of config.yaml and repla print(f"{file_path} updated successfully.") ``` -### Step 5: Train the YOLOv8 model +### Step 5: Train the YOLO11 model -Run the following command-line code to fine tune a pretrained default YOLOv8 model. +Run the following command-line code to fine tune a pretrained default YOLO11 model. -!!! example "Train the YOLOv8 model" +!!! example "Train the YOLO11 model" === "CLI" ```bash - !yolo task=detect mode=train data={work_dir}/trash_ICRA19/config.yaml model=yolov8s.pt epochs=2 batch=32 lr0=.04 plots=True + !yolo task=detect mode=train data={work_dir}/trash_ICRA19/config.yaml model=yolo11n.pt epochs=2 batch=32 lr0=.04 plots=True ``` Here's a closer look at the parameters in the model training command: - **task**: It specifies the [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) task for which you are using the specified YOLO model and data set. - **mode**: Denotes the purpose for which you are loading the specified model and data. Since we are training a model, it is set to "train." Later, when we test our model's performance, we will set it to "predict." -- **epochs**: This delimits the number of times YOLOv8 will pass through our entire data set. +- **epochs**: This delimits the number of times YOLO11 will pass through our entire data set. - **batch**: The numerical value stipulates the training [batch sizes](https://www.ultralytics.com/glossary/batch-size). Batches are the number of images a model processes before it updates its parameters. - **lr0**: Specifies the model's initial [learning rate](https://www.ultralytics.com/glossary/learning-rate). - **plots**: Directs YOLO to generate and save plots of our model's training and evaluation metrics. -For a detailed understanding of the model training process and best practices, refer to the [YOLOv8 Model Training guide](../modes/train.md). This guide will help you get the most out of your experiments and ensure you're using YOLOv8 effectively. +For a detailed understanding of the model training process and best practices, refer to the [YOLO11 Model Training guide](../modes/train.md). This guide will help you get the most out of your experiments and ensure you're using YOLO11 effectively. ### Step 6: Test the Model We can now run inference to test the performance of our fine-tuned model: -!!! example "Test the YOLOv8 model" +!!! example "Test the YOLO11 model" === "CLI" @@ -312,11 +312,11 @@ Unlike precision, recall moves in the opposite direction, showing greater recall ### Step 8: Calculating [Intersection Over Union](https://www.ultralytics.com/glossary/intersection-over-union-iou) -You can measure the prediction [accuracy](https://www.ultralytics.com/glossary/accuracy) by calculating the IoU between a predicted bounding box and a ground truth bounding box for the same object. Check out [IBM's tutorial on training YOLOv8](https://developer.ibm.com/tutorials/awb-train-yolo-object-detection-model-in-python/) for more details. +You can measure the prediction [accuracy](https://www.ultralytics.com/glossary/accuracy) by calculating the IoU between a predicted bounding box and a ground truth bounding box for the same object. Check out [IBM's tutorial on training YOLO11](https://developer.ibm.com/tutorials/awb-train-yolo-object-detection-model-in-python/) for more details. ## Summary -We explored IBM Watsonx key features, and how to train a YOLOv8 model using IBM Watsonx. We also saw how IBM Watsonx can enhance your AI workflows with advanced tools for model building, data management, and compliance. +We explored IBM Watsonx key features, and how to train a YOLO11 model using IBM Watsonx. We also saw how IBM Watsonx can enhance your AI workflows with advanced tools for model building, data management, and compliance. For further details on usage, visit [IBM Watsonx official documentation](https://www.ibm.com/watsonx). @@ -324,9 +324,9 @@ Also, be sure to check out the [Ultralytics integration guide page](./index.md), ## FAQ -### How do I train a YOLOv8 model using IBM Watsonx? +### How do I train a YOLO11 model using IBM Watsonx? -To train a YOLOv8 model using IBM Watsonx, follow these steps: +To train a YOLO11 model using IBM Watsonx, follow these steps: 1. **Set Up Your Environment**: Create an IBM Cloud account and set up a Watsonx.ai project. Use a Jupyter Notebook for your coding environment. 2. **Install Libraries**: Install necessary libraries like `torch`, `opencv`, and `ultralytics`. @@ -335,7 +335,7 @@ To train a YOLOv8 model using IBM Watsonx, follow these steps: 5. **Train the Model**: Use the YOLO command-line interface to train your model with specific parameters like `epochs`, `batch size`, and `learning rate`. 6. **Test and Evaluate**: Run inference to test the model and evaluate its performance using metrics like precision and recall. -For detailed instructions, refer to our [YOLOv8 Model Training guide](../modes/train.md). +For detailed instructions, refer to our [YOLO11 Model Training guide](../modes/train.md). ### What are the key features of IBM Watsonx for AI model training? @@ -347,20 +347,20 @@ IBM Watsonx offers several key features for AI model training: For more information, visit the [IBM Watsonx official documentation](https://www.ibm.com/watsonx). -### Why should I use IBM Watsonx for training Ultralytics YOLOv8 models? +### Why should I use IBM Watsonx for training Ultralytics YOLO11 models? -IBM Watsonx is an excellent choice for training Ultralytics YOLOv8 models due to its comprehensive suite of tools that streamline the AI lifecycle. Key benefits include: +IBM Watsonx is an excellent choice for training Ultralytics YOLO11 models due to its comprehensive suite of tools that streamline the AI lifecycle. Key benefits include: - **Scalability**: Easily scale your model training with IBM Cloud services. - **Integration**: Seamlessly integrate with various data sources and APIs. - **User-Friendly Interface**: Simplifies the development process with a collaborative and intuitive interface. - **Advanced Tools**: Access to powerful tools like the Prompt Lab, Tuning Studio, and Flows Engine for enhancing model performance. -Learn more about [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) and how to train models using IBM Watsonx in our [integration guide](./index.md). +Learn more about [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) and how to train models using IBM Watsonx in our [integration guide](./index.md). -### How can I preprocess my dataset for YOLOv8 training on IBM Watsonx? +### How can I preprocess my dataset for YOLO11 training on IBM Watsonx? -To preprocess your dataset for YOLOv8 training on IBM Watsonx: +To preprocess your dataset for YOLO11 training on IBM Watsonx: 1. **Organize Directories**: Ensure your dataset follows the YOLO directory structure with separate subdirectories for images and labels within the train/val/test split. 2. **Update .yaml File**: Modify the `.yaml` configuration file to reflect the new directory structure and class names. @@ -399,9 +399,9 @@ if __name__ == "__main__": For more details, refer to our [data preprocessing guide](../guides/preprocessing_annotated_data.md). -### What are the prerequisites for training a YOLOv8 model on IBM Watsonx? +### What are the prerequisites for training a YOLO11 model on IBM Watsonx? -Before you start training a YOLOv8 model on IBM Watsonx, ensure you have the following prerequisites: +Before you start training a YOLO11 model on IBM Watsonx, ensure you have the following prerequisites: - **IBM Cloud Account**: Create an account on IBM Cloud to access Watsonx.ai. - **Kaggle Account**: For loading datasets, you'll need a Kaggle account and an API key. diff --git a/docs/en/integrations/index.md b/docs/en/integrations/index.md index 391b1ecb81..bb4de86c81 100644 --- a/docs/en/integrations/index.md +++ b/docs/en/integrations/index.md @@ -18,7 +18,7 @@ Welcome to the Ultralytics Integrations page! This page provides an overview of allowfullscreen>
- Watch: Ultralytics YOLOv8 Deployment and Integrations + Watch: Ultralytics YOLO11 Deployment and Integrations

## Datasets Integrations @@ -47,7 +47,7 @@ Welcome to the Ultralytics Integrations page! This page provides an overview of - [Amazon SageMaker](amazon-sagemaker.md): Leverage Amazon SageMaker to efficiently build, train, and deploy Ultralytics models, providing an all-in-one platform for the ML lifecycle. -- [Paperspace Gradient](paperspace.md): Paperspace Gradient simplifies working on YOLOv8 projects by providing easy-to-use cloud tools for training, testing, and deploying your models quickly. +- [Paperspace Gradient](paperspace.md): Paperspace Gradient simplifies working on YOLO11 projects by providing easy-to-use cloud tools for training, testing, and deploying your models quickly. - [Google Colab](google-colab.md): Use Google Colab to train and evaluate Ultralytics models in a cloud-based environment that supports collaboration and sharing. @@ -111,7 +111,7 @@ Let's collaborate to make the Ultralytics YOLO ecosystem more expansive and feat ### What is Ultralytics HUB, and how does it streamline the ML workflow? -Ultralytics HUB is a cloud-based platform designed to make machine learning (ML) workflows for Ultralytics models seamless and efficient. By using this tool, you can easily upload datasets, train models, perform real-time tracking, and deploy YOLOv8 models without needing extensive coding skills. You can explore the key features on the [Ultralytics HUB](https://hub.ultralytics.com/) page and get started quickly with our [Quickstart](https://docs.ultralytics.com/hub/quickstart/) guide. +Ultralytics HUB is a cloud-based platform designed to make machine learning (ML) workflows for Ultralytics models seamless and efficient. By using this tool, you can easily upload datasets, train models, perform real-time tracking, and deploy YOLO11 models without needing extensive coding skills. You can explore the key features on the [Ultralytics HUB](https://hub.ultralytics.com/) page and get started quickly with our [Quickstart](https://docs.ultralytics.com/hub/quickstart/) guide. ### How do I integrate Ultralytics YOLO models with Roboflow for dataset management? @@ -121,9 +121,9 @@ Integrating Ultralytics YOLO models with Roboflow enhances dataset management by Yes, you can. Integrating MLFlow with Ultralytics models allows you to track experiments, improve reproducibility, and streamline the entire ML lifecycle. Detailed instructions for setting up this integration can be found on the [MLFlow](mlflow.md) integration page. This integration is particularly useful for monitoring model metrics and managing the ML workflow efficiently. -### What are the benefits of using Neural Magic for YOLOv8 model optimization? +### What are the benefits of using Neural Magic for YOLO11 model optimization? -Neural Magic optimizes YOLOv8 models by leveraging techniques like Quantization Aware Training (QAT) and pruning, resulting in highly efficient, smaller models that perform better on resource-limited hardware. Check out the [Neural Magic](neural-magic.md) integration page to learn how to implement these optimizations for superior performance and leaner models. This is especially beneficial for deployment on edge devices. +Neural Magic optimizes YOLO11 models by leveraging techniques like Quantization Aware Training (QAT) and pruning, resulting in highly efficient, smaller models that perform better on resource-limited hardware. Check out the [Neural Magic](neural-magic.md) integration page to learn how to implement these optimizations for superior performance and leaner models. This is especially beneficial for deployment on edge devices. ### How do I deploy Ultralytics YOLO models with Gradio for interactive demos? diff --git a/docs/en/integrations/jupyterlab.md b/docs/en/integrations/jupyterlab.md index b3179918b1..668940ffab 100644 --- a/docs/en/integrations/jupyterlab.md +++ b/docs/en/integrations/jupyterlab.md @@ -1,14 +1,14 @@ --- comments: true -description: Explore our integration guide that explains how you can use JupyterLab to train a YOLOv8 model. We'll also cover key features and tips for common issues. -keywords: JupyterLab, What is JupyterLab, How to Use JupyterLab, JupyterLab How to Use, YOLOv8, Ultralytics, Model Training, GPU, TPU, cloud computing +description: Explore our integration guide that explains how you can use JupyterLab to train a YOLO11 model. We'll also cover key features and tips for common issues. +keywords: JupyterLab, What is JupyterLab, How to Use JupyterLab, JupyterLab How to Use, YOLO11, Ultralytics, Model Training, GPU, TPU, cloud computing --- -# A Guide on How to Use JupyterLab to Train Your YOLOv8 Models +# A Guide on How to Use JupyterLab to Train Your YOLO11 Models Building [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models can be tough, especially when you don't have the right tools or environment to work with. If you are facing this issue, JupyterLab might be the right solution for you. JupyterLab is a user-friendly, web-based platform that makes coding more flexible and interactive. You can use it to handle big datasets, create complex models, and even collaborate with others, all in one place. -You can use JupyterLab to [work on projects](../guides/steps-of-a-cv-project.md) related to [Ultralytics YOLOv8 models](https://github.com/ultralytics/ultralytics). JupyterLab is a great option for efficient model development and experimentation. It makes it easy to start experimenting with and [training YOLOv8 models](../modes/train.md) right from your computer. Let's dive deeper into JupyterLab, its key features, and how you can use it to train YOLOv8 models. +You can use JupyterLab to [work on projects](../guides/steps-of-a-cv-project.md) related to [Ultralytics YOLO11 models](https://github.com/ultralytics/ultralytics). JupyterLab is a great option for efficient model development and experimentation. It makes it easy to start experimenting with and [training YOLO11 models](../modes/train.md) right from your computer. Let's dive deeper into JupyterLab, its key features, and how you can use it to train YOLO11 models. ## What is JupyterLab? @@ -26,7 +26,7 @@ Here are some of the key features that make JupyterLab a great option for model - **Markdown Preview**: Working with Markdown files is more efficient in JupyterLab, thanks to its simultaneous preview feature. As you write or edit your Markdown file, you can see the formatted output in real-time. It makes it easier to double-check that your documentation looks perfect, saving you from having to switch back and forth between editing and preview modes. - **Run Code from Text Files**: If you're sharing a text file with code, JupyterLab makes it easy to run it directly within the platform. You can highlight the code and press Shift + Enter to execute it. It is great for verifying code snippets quickly and helps guarantee that the code you share is functional and error-free. -## Why Should You Use JupyterLab for Your YOLOv8 Projects? +## Why Should You Use JupyterLab for Your YOLO11 Projects? There are multiple platforms for developing and evaluating machine learning models, so what makes JupyterLab stand out? Let's explore some of the unique aspects that JupyterLab offers for your machine-learning projects: @@ -46,9 +46,9 @@ When working with Kaggle, you might come across some common issues. Here are som - **Installing JupyterLab Extensions**: JupyterLab supports various extensions to enhance functionality. You can install and customize these extensions to suit your needs. For detailed instructions, refer to [JupyterLab Extensions Guide](https://jupyterlab.readthedocs.io/en/latest/user/extensions.html) for more information. - **Using Multiple Versions of Python**: If you need to work with different versions of Python, you can use Jupyter kernels configured with different Python versions. -## How to Use JupyterLab to Try Out YOLOv8 +## How to Use JupyterLab to Try Out YOLO11 -JupyterLab makes it easy to experiment with YOLOv8. To get started, follow these simple steps. +JupyterLab makes it easy to experiment with YOLO11. To get started, follow these simple steps. ### Step 1: Install JupyterLab @@ -63,7 +63,7 @@ First, you need to install JupyterLab. Open your terminal and run the command: pip install jupyterlab ``` -### Step 2: Download the YOLOv8 Tutorial Notebook +### Step 2: Download the YOLO11 Tutorial Notebook Next, download the [tutorial.ipynb](https://github.com/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb) file from the Ultralytics GitHub repository. Save this file to any directory on your local machine. @@ -85,13 +85,13 @@ Once you've run this command, it will open JupyterLab in your default web browse ### Step 4: Start Experimenting -In JupyterLab, open the tutorial.ipynb notebook. You can now start running the cells to explore and experiment with YOLOv8. +In JupyterLab, open the tutorial.ipynb notebook. You can now start running the cells to explore and experiment with YOLO11. -![Image Showing Opened YOLOv8 Notebook in JupyterLab](https://github.com/ultralytics/docs/releases/download/0/opened-yolov8-notebook-jupyterlab.avif) +![Image Showing Opened YOLO11 Notebook in JupyterLab](https://github.com/ultralytics/docs/releases/download/0/opened-yolov8-notebook-jupyterlab.avif) -JupyterLab's interactive environment allows you to modify code, visualize outputs, and document your findings all in one place. You can try out different configurations and understand how YOLOv8 works. +JupyterLab's interactive environment allows you to modify code, visualize outputs, and document your findings all in one place. You can try out different configurations and understand how YOLO11 works. -For a detailed understanding of the model training process and best practices, refer to the [YOLOv8 Model Training guide](../modes/train.md). This guide will help you get the most out of your experiments and ensure you're using YOLOv8 effectively. +For a detailed understanding of the model training process and best practices, refer to the [YOLO11 Model Training guide](../modes/train.md). This guide will help you get the most out of your experiments and ensure you're using YOLO11 effectively. ## Keep Learning about Jupyterlab @@ -103,17 +103,17 @@ If you're excited to learn more about JupyterLab, here are some great resources ## Summary -We've explored how JupyterLab can be a powerful tool for experimenting with Ultralytics YOLOv8 models. Using its flexible and interactive environment, you can easily set up JupyterLab on your local machine and start working with YOLOv8. JupyterLab makes it simple to [train](../guides/model-training-tips.md) and [evaluate](../guides/model-testing.md) your models, visualize outputs, and [document your findings](../guides/model-monitoring-and-maintenance.md) all in one place. +We've explored how JupyterLab can be a powerful tool for experimenting with Ultralytics YOLO11 models. Using its flexible and interactive environment, you can easily set up JupyterLab on your local machine and start working with YOLO11. JupyterLab makes it simple to [train](../guides/model-training-tips.md) and [evaluate](../guides/model-testing.md) your models, visualize outputs, and [document your findings](../guides/model-monitoring-and-maintenance.md) all in one place. For more details, visit the [JupyterLab FAQ Page](https://jupyterlab.readthedocs.io/en/stable/getting_started/faq.html). -Interested in more YOLOv8 integrations? Check out the [Ultralytics integration guide](./index.md) to explore additional tools and capabilities for your machine learning projects. +Interested in more YOLO11 integrations? Check out the [Ultralytics integration guide](./index.md) to explore additional tools and capabilities for your machine learning projects. ## FAQ -### How do I use JupyterLab to train a YOLOv8 model? +### How do I use JupyterLab to train a YOLO11 model? -To train a YOLOv8 model using JupyterLab: +To train a YOLO11 model using JupyterLab: 1. Install JupyterLab and the Ultralytics package: @@ -128,7 +128,7 @@ To train a YOLOv8 model using JupyterLab: ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") ``` 4. Train the model on your custom dataset: @@ -147,22 +147,22 @@ To train a YOLOv8 model using JupyterLab: JupyterLab's interactive environment allows you to easily modify parameters, visualize results, and iterate on your model training process. -### What are the key features of JupyterLab that make it suitable for YOLOv8 projects? +### What are the key features of JupyterLab that make it suitable for YOLO11 projects? -JupyterLab offers several features that make it ideal for YOLOv8 projects: +JupyterLab offers several features that make it ideal for YOLO11 projects: -1. Interactive code execution: Test and debug YOLOv8 code snippets in real-time. +1. Interactive code execution: Test and debug YOLO11 code snippets in real-time. 2. Integrated file browser: Easily manage datasets, model weights, and configuration files. 3. Flexible layout: Arrange multiple notebooks, terminals, and output windows side-by-side for efficient workflow. -4. Rich output display: Visualize YOLOv8 detection results, training curves, and model performance metrics inline. -5. Markdown support: Document your YOLOv8 experiments and findings with rich text and images. +4. Rich output display: Visualize YOLO11 detection results, training curves, and model performance metrics inline. +5. Markdown support: Document your YOLO11 experiments and findings with rich text and images. 6. Extension ecosystem: Enhance functionality with extensions for version control, [remote computing](google-colab.md), and more. -These features allow for a seamless development experience when working with YOLOv8 models, from data preparation to [model deployment](https://www.ultralytics.com/glossary/model-deployment). +These features allow for a seamless development experience when working with YOLO11 models, from data preparation to [model deployment](https://www.ultralytics.com/glossary/model-deployment). -### How can I optimize YOLOv8 model performance using JupyterLab? +### How can I optimize YOLO11 model performance using JupyterLab? -To optimize YOLOv8 model performance in JupyterLab: +To optimize YOLO11 model performance in JupyterLab: 1. Use the autobatch feature to determine the optimal batch size: @@ -190,11 +190,11 @@ To optimize YOLOv8 model performance in JupyterLab: 4. Experiment with different model architectures and [export formats](../modes/export.md) to find the best balance of speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) for your specific use case. -JupyterLab's interactive environment allows for quick iterations and real-time feedback, making it easier to optimize your YOLOv8 models efficiently. +JupyterLab's interactive environment allows for quick iterations and real-time feedback, making it easier to optimize your YOLO11 models efficiently. -### How do I handle common issues when working with JupyterLab and YOLOv8? +### How do I handle common issues when working with JupyterLab and YOLO11? -When working with JupyterLab and YOLOv8, you might encounter some common issues. Here's how to handle them: +When working with JupyterLab and YOLO11, you might encounter some common issues. Here's how to handle them: 1. GPU memory issues: @@ -203,7 +203,7 @@ When working with JupyterLab and YOLOv8, you might encounter some common issues. 2. Package conflicts: - - Create a separate conda environment for your YOLOv8 projects to avoid conflicts. + - Create a separate conda environment for your YOLO11 projects to avoid conflicts. - Use `!pip install package_name` in a notebook cell to install missing packages. 3. Kernel crashes: diff --git a/docs/en/integrations/kaggle.md b/docs/en/integrations/kaggle.md index 66929d109d..efcb78eb81 100644 --- a/docs/en/integrations/kaggle.md +++ b/docs/en/integrations/kaggle.md @@ -1,14 +1,14 @@ --- comments: true -description: Dive into our guide on YOLOv8's integration with Kaggle. Find out what Kaggle is, its key features, and how to train a YOLOv8 model using the integration. -keywords: What is Kaggle, What is Kaggle Used For, YOLOv8, Kaggle Machine Learning, Model Training, GPU, TPU, cloud computing +description: Dive into our guide on YOLO11's integration with Kaggle. Find out what Kaggle is, its key features, and how to train a YOLO11 model using the integration. +keywords: What is Kaggle, What is Kaggle Used For, YOLO11, Kaggle Machine Learning, Model Training, GPU, TPU, cloud computing --- -# A Guide on Using Kaggle to Train Your YOLOv8 Models +# A Guide on Using Kaggle to Train Your YOLO11 Models If you are learning about AI and working on [small projects](../solutions/index.md), you might not have access to powerful computing resources yet, and high-end hardware can be pretty expensive. Fortunately, Kaggle, a platform owned by Google, offers a great solution. Kaggle provides a free, cloud-based environment where you can access GPU resources, handle large datasets, and collaborate with a diverse community of data scientists and [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) enthusiasts. -Kaggle is a great choice for [training](../guides/model-training-tips.md) and experimenting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics?tab=readme-ov-file) models. Kaggle Notebooks make using popular machine-learning libraries and frameworks in your projects easy. Let's explore Kaggle's main features and learn how you can train YOLOv8 models on this platform! +Kaggle is a great choice for [training](../guides/model-training-tips.md) and experimenting with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics?tab=readme-ov-file) models. Kaggle Notebooks make using popular machine-learning libraries and frameworks in your projects easy. Let's explore Kaggle's main features and learn how you can train YOLO11 models on this platform! ## What is Kaggle? @@ -16,21 +16,21 @@ Kaggle is a platform that brings together data scientists from around the world With more than [10 million users](https://www.kaggle.com/discussions/general/332147) as of 2022, Kaggle provides a rich environment for developing and experimenting with machine learning models. You don't need to worry about your local machine's specs or setup; you can dive right in with just a Kaggle account and a web browser. -## Training YOLOv8 Using Kaggle +## Training YOLO11 Using Kaggle -Training YOLOv8 models on Kaggle is simple and efficient, thanks to the platform's access to powerful GPUs. +Training YOLO11 models on Kaggle is simple and efficient, thanks to the platform's access to powerful GPUs. -To get started, access the [Kaggle YOLOv8 Notebook](https://www.kaggle.com/code/ultralytics/yolov8). Kaggle's environment comes with pre-installed libraries like [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) and [PyTorch](https://www.ultralytics.com/glossary/pytorch), making the setup process hassle-free. +To get started, access the [Kaggle YOLO11 Notebook](https://www.kaggle.com/code/ultralytics/yolov8). Kaggle's environment comes with pre-installed libraries like [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) and [PyTorch](https://www.ultralytics.com/glossary/pytorch), making the setup process hassle-free. -![What is the kaggle integration with respect to YOLOv8?](https://github.com/ultralytics/docs/releases/download/0/kaggle-integration-yolov8.avif) +![What is the kaggle integration with respect to YOLO11?](https://github.com/ultralytics/docs/releases/download/0/kaggle-integration-yolov8.avif) -Once you sign in to your Kaggle account, you can click on the option to copy and edit the code, select a GPU under the accelerator settings, and run the notebook's cells to begin training your model. For a detailed understanding of the model training process and best practices, refer to our [YOLOv8 Model Training guide](../modes/train.md). +Once you sign in to your Kaggle account, you can click on the option to copy and edit the code, select a GPU under the accelerator settings, and run the notebook's cells to begin training your model. For a detailed understanding of the model training process and best practices, refer to our [YOLO11 Model Training guide](../modes/train.md). ![Using kaggle for machine learning model training with a GPU](https://github.com/ultralytics/docs/releases/download/0/using-kaggle-for-machine-learning-model-training-with-a-gpu.avif) -On the [official YOLOv8 Kaggle notebook page](https://www.kaggle.com/code/ultralytics/yolov8), if you click on the three dots in the upper right-hand corner, you'll notice more options will pop up. +On the [official YOLO11 Kaggle notebook page](https://www.kaggle.com/code/ultralytics/yolov8), if you click on the three dots in the upper right-hand corner, you'll notice more options will pop up. -![Overview of Options From the Official YOLOv8 Kaggle Notebook Page](https://github.com/ultralytics/docs/releases/download/0/overview-options-yolov8-kaggle-notebook.avif) +![Overview of Options From the Official YOLO11 Kaggle Notebook Page](https://github.com/ultralytics/docs/releases/download/0/overview-options-yolov8-kaggle-notebook.avif) These options include: @@ -59,17 +59,17 @@ When working with Kaggle, you might come across some common issues. Here are som Next, let's understand the features Kaggle offers that make it an excellent platform for data science and machine learning enthusiasts. Here are some of the key highlights: -- **Datasets**: Kaggle hosts a massive collection of datasets on various topics. You can easily search and use these datasets in your projects, which is particularly handy for training and testing your YOLOv8 models. +- **Datasets**: Kaggle hosts a massive collection of datasets on various topics. You can easily search and use these datasets in your projects, which is particularly handy for training and testing your YOLO11 models. - **Competitions**: Known for its exciting competitions, Kaggle allows data scientists and machine learning enthusiasts to solve real-world problems. Competing helps you improve your skills, learn new techniques, and gain recognition in the community. -- **Free Access to TPUs**: Kaggle provides free access to powerful TPUs, which are essential for training complex machine learning models. This means you can speed up processing and boost the performance of your YOLOv8 projects without incurring extra costs. +- **Free Access to TPUs**: Kaggle provides free access to powerful TPUs, which are essential for training complex machine learning models. This means you can speed up processing and boost the performance of your YOLO11 projects without incurring extra costs. - **Integration with Github**: Kaggle allows you to easily connect your GitHub repository to upload notebooks and save your work. This integration makes it convenient to manage and access your files. - **Community and Discussions**: Kaggle boasts a strong community of data scientists and machine learning practitioners. The discussion forums and shared notebooks are fantastic resources for learning and troubleshooting. You can easily find help, share your knowledge, and collaborate with others. -## Why Should You Use Kaggle for Your YOLOv8 Projects? +## Why Should You Use Kaggle for Your YOLO11 Projects? There are multiple platforms for training and evaluating machine learning models, so what makes Kaggle stand out? Let's dive into the benefits of using Kaggle for your machine-learning projects: -- **Public Notebooks**: You can make your Kaggle notebooks public, allowing other users to view, vote, fork, and discuss your work. Kaggle promotes collaboration, feedback, and the sharing of ideas, helping you improve your YOLOv8 models. +- **Public Notebooks**: You can make your Kaggle notebooks public, allowing other users to view, vote, fork, and discuss your work. Kaggle promotes collaboration, feedback, and the sharing of ideas, helping you improve your YOLO11 models. - **Comprehensive History of Notebook Commits**: Kaggle creates a detailed history of your notebook commits. This allows you to review and track changes over time, making it easier to understand the evolution of your project and revert to previous versions if needed. - **Console Access**: Kaggle provides a console, giving you more control over your environment. This feature allows you to perform various tasks directly from the command line, enhancing your workflow and productivity. - **Resource Availability**: Each notebook editing session on Kaggle is provided with significant resources: 12 hours of execution time for CPU and GPU sessions, 9 hours of execution time for TPU sessions, and 20 gigabytes of auto-saved disk space. @@ -85,21 +85,21 @@ If you want to learn more about Kaggle, here are some helpful resources to guide ## Summary -We've seen how Kaggle can boost your YOLOv8 projects by providing free access to powerful GPUs, making model training and evaluation efficient. Kaggle's platform is user-friendly, with pre-installed libraries for quick setup. +We've seen how Kaggle can boost your YOLO11 projects by providing free access to powerful GPUs, making model training and evaluation efficient. Kaggle's platform is user-friendly, with pre-installed libraries for quick setup. For more details, visit [Kaggle's documentation](https://www.kaggle.com/docs). -Interested in more YOLOv8 integrations? Check out the[ Ultralytics integration guide](https://docs.ultralytics.com/integrations/) to explore additional tools and capabilities for your machine learning projects. +Interested in more YOLO11 integrations? Check out the[ Ultralytics integration guide](https://docs.ultralytics.com/integrations/) to explore additional tools and capabilities for your machine learning projects. ## FAQ -### How do I train a YOLOv8 model on Kaggle? +### How do I train a YOLO11 model on Kaggle? -Training a YOLOv8 model on Kaggle is straightforward. First, access the [Kaggle YOLOv8 Notebook](https://www.kaggle.com/ultralytics/yolov8). Sign in to your Kaggle account, copy and edit the notebook, and select a GPU under the accelerator settings. Run the notebook cells to start training. For more detailed steps, refer to our [YOLOv8 Model Training guide](../modes/train.md). +Training a YOLO11 model on Kaggle is straightforward. First, access the [Kaggle YOLO11 Notebook](https://www.kaggle.com/ultralytics/yolov8). Sign in to your Kaggle account, copy and edit the notebook, and select a GPU under the accelerator settings. Run the notebook cells to start training. For more detailed steps, refer to our [YOLO11 Model Training guide](../modes/train.md). -### What are the benefits of using Kaggle for YOLOv8 model training? +### What are the benefits of using Kaggle for YOLO11 model training? -Kaggle offers several advantages for training YOLOv8 models: +Kaggle offers several advantages for training YOLO11 models: - **Free GPU Access**: Utilize powerful GPUs like Nvidia Tesla P100 or T4 x2 for up to 30 hours per week. - **Pre-installed Libraries**: Libraries like TensorFlow and PyTorch are pre-installed, simplifying the setup. @@ -108,7 +108,7 @@ Kaggle offers several advantages for training YOLOv8 models: For more details, visit our [Ultralytics integration guide](https://docs.ultralytics.com/integrations/). -### What common issues might I encounter when using Kaggle for YOLOv8, and how can I resolve them? +### What common issues might I encounter when using Kaggle for YOLO11, and how can I resolve them? Common issues include: @@ -119,7 +119,7 @@ Common issues include: For more troubleshooting tips, see our [Common Issues guide](../guides/yolo-common-issues.md). -### Why should I choose Kaggle over other platforms like Google Colab for training YOLOv8 models? +### Why should I choose Kaggle over other platforms like Google Colab for training YOLO11 models? Kaggle offers unique features that make it an excellent choice: diff --git a/docs/en/integrations/ncnn.md b/docs/en/integrations/ncnn.md index 42d04198e1..c3f7b99253 100644 --- a/docs/en/integrations/ncnn.md +++ b/docs/en/integrations/ncnn.md @@ -1,14 +1,14 @@ --- comments: true -description: Optimize YOLOv8 models for mobile and embedded devices by exporting to NCNN format. Enhance performance in resource-constrained environments. -keywords: Ultralytics, YOLOv8, NCNN, model export, machine learning, deployment, mobile, embedded systems, deep learning, AI models +description: Optimize YOLO11 models for mobile and embedded devices by exporting to NCNN format. Enhance performance in resource-constrained environments. +keywords: Ultralytics, YOLO11, NCNN, model export, machine learning, deployment, mobile, embedded systems, deep learning, AI models --- -# How to Export to NCNN from YOLOv8 for Smooth Deployment +# How to Export to NCNN from YOLO11 for Smooth Deployment Deploying [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models on devices with limited computational power, such as mobile or embedded systems, can be tricky. You need to make sure you use a format optimized for optimal performance. This makes sure that even devices with limited processing power can handle advanced computer vision tasks well. -The export to NCNN format feature allows you to optimize your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models for lightweight device-based applications. In this guide, we'll walk you through how to convert your models to the NCNN format, making it easier for your models to perform well on various mobile and embedded devices. +The export to NCNN format feature allows you to optimize your [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models for lightweight device-based applications. In this guide, we'll walk you through how to convert your models to the NCNN format, making it easier for your models to perform well on various mobile and embedded devices. ## Why should you export to NCNN? @@ -34,7 +34,7 @@ NCNN models offer a wide range of key features that enable on-device [machine le ## Deployment Options with NCNN -Before we look at the code for exporting YOLOv8 models to the NCNN format, let's understand how NCNN models are normally used. +Before we look at the code for exporting YOLO11 models to the NCNN format, let's understand how NCNN models are normally used. NCNN models, designed for efficiency and performance, are compatible with a variety of deployment platforms: @@ -44,9 +44,9 @@ NCNN models, designed for efficiency and performance, are compatible with a vari - **Desktop and Server Deployment**: Capable of being deployed in desktop and server environments across Linux, Windows, and macOS, supporting development, training, and evaluation with higher computational capacities. -## Export to NCNN: Converting Your YOLOv8 Model +## Export to NCNN: Converting Your YOLO11 Model -You can expand model compatibility and deployment flexibility by converting YOLOv8 models to NCNN format. +You can expand model compatibility and deployment flexibility by converting YOLO11 models to NCNN format. ### Installation @@ -57,15 +57,15 @@ To install the required packages, run: === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` -For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ### Usage -Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLOv8 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). +Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLO11 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). !!! example "Usage" @@ -74,14 +74,14 @@ Before diving into the usage instructions, it's important to note that while all ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to NCNN format - model.export(format="ncnn") # creates '/yolov8n_ncnn_model' + model.export(format="ncnn") # creates '/yolo11n_ncnn_model' # Load the exported NCNN model - ncnn_model = YOLO("./yolov8n_ncnn_model") + ncnn_model = YOLO("./yolo11n_ncnn_model") # Run inference results = ncnn_model("https://ultralytics.com/images/bus.jpg") @@ -90,18 +90,18 @@ Before diving into the usage instructions, it's important to note that while all === "CLI" ```bash - # Export a YOLOv8n PyTorch model to NCNN format - yolo export model=yolov8n.pt format=ncnn # creates '/yolov8n_ncnn_model' + # Export a YOLO11n PyTorch model to NCNN format + yolo export model=yolo11n.pt format=ncnn # creates '/yolo11n_ncnn_model' # Run inference with the exported model - yolo predict model='./yolov8n_ncnn_model' source='https://ultralytics.com/images/bus.jpg' + yolo predict model='./yolo11n_ncnn_model' source='https://ultralytics.com/images/bus.jpg' ``` For more details about supported export options, visit the [Ultralytics documentation page on deployment options](../guides/model-deployment-options.md). -## Deploying Exported YOLOv8 NCNN Models +## Deploying Exported YOLO11 NCNN Models -After successfully exporting your Ultralytics YOLOv8 models to NCNN format, you can now deploy them. The primary and recommended first step for running a NCNN model is to utilize the YOLO("./model_ncnn_model") method, as outlined in the previous usage code snippet. However, for in-depth instructions on deploying your NCNN models in various other settings, take a look at the following resources: +After successfully exporting your Ultralytics YOLO11 models to NCNN format, you can now deploy them. The primary and recommended first step for running a NCNN model is to utilize the YOLO("./model_ncnn_model") method, as outlined in the previous usage code snippet. However, for in-depth instructions on deploying your NCNN models in various other settings, take a look at the following resources: - **[Android](https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-android)**: This blog explains how to use NCNN models for performing tasks like [object detection](https://www.ultralytics.com/glossary/object-detection) through Android applications. @@ -113,40 +113,40 @@ After successfully exporting your Ultralytics YOLOv8 models to NCNN format, you ## Summary -In this guide, we've gone over exporting Ultralytics YOLOv8 models to the NCNN format. This conversion step is crucial for improving the efficiency and speed of YOLOv8 models, making them more effective and suitable for limited-resource computing environments. +In this guide, we've gone over exporting Ultralytics YOLO11 models to the NCNN format. This conversion step is crucial for improving the efficiency and speed of YOLO11 models, making them more effective and suitable for limited-resource computing environments. For detailed instructions on usage, please refer to the [official NCNN documentation](https://ncnn.readthedocs.io/en/latest/index.html). -Also, if you're interested in exploring other integration options for Ultralytics YOLOv8, be sure to visit our [integration guide page](index.md) for further insights and information. +Also, if you're interested in exploring other integration options for Ultralytics YOLO11, be sure to visit our [integration guide page](index.md) for further insights and information. ## FAQ -### How do I export Ultralytics YOLOv8 models to NCNN format? +### How do I export Ultralytics YOLO11 models to NCNN format? -To export your Ultralytics YOLOv8 model to NCNN format, follow these steps: +To export your Ultralytics YOLO11 model to NCNN format, follow these steps: - **Python**: Use the `export` function from the YOLO class. ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export to NCNN format - model.export(format="ncnn") # creates '/yolov8n_ncnn_model' + model.export(format="ncnn") # creates '/yolo11n_ncnn_model' ``` - **CLI**: Use the `yolo` command with the `export` argument. ```bash - yolo export model=yolov8n.pt format=ncnn # creates '/yolov8n_ncnn_model' + yolo export model=yolo11n.pt format=ncnn # creates '/yolo11n_ncnn_model' ``` For detailed export options, check the [Export](../modes/export.md) page in the documentation. -### What are the advantages of exporting YOLOv8 models to NCNN? +### What are the advantages of exporting YOLO11 models to NCNN? -Exporting your Ultralytics YOLOv8 models to NCNN offers several benefits: +Exporting your Ultralytics YOLO11 models to NCNN offers several benefits: - **Efficiency**: NCNN models are optimized for mobile and embedded devices, ensuring high performance even with limited computational resources. - **Quantization**: NCNN supports techniques like quantization that improve model speed and reduce memory usage. @@ -174,13 +174,13 @@ NCNN is versatile and supports various platforms: If running models on a Raspberry Pi isn't fast enough, converting to the NCNN format could speed things up as detailed in our [Raspberry Pi Guide](../guides/raspberry-pi.md). -### How can I deploy Ultralytics YOLOv8 NCNN models on Android? +### How can I deploy Ultralytics YOLO11 NCNN models on Android? -To deploy your YOLOv8 models on Android: +To deploy your YOLO11 models on Android: 1. **Build for Android**: Follow the [NCNN Build for Android](https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-android) guide. 2. **Integrate with Your App**: Use the NCNN Android SDK to integrate the exported model into your application for efficient on-device inference. -For step-by-step instructions, refer to our guide on [Deploying YOLOv8 NCNN Models](#deploying-exported-yolov8-ncnn-models). +For step-by-step instructions, refer to our guide on [Deploying YOLO11 NCNN Models](#deploying-exported-yolo11-ncnn-models). For more advanced guides and use cases, visit the [Ultralytics documentation page](../guides/model-deployment-options.md). diff --git a/docs/en/integrations/neural-magic.md b/docs/en/integrations/neural-magic.md index d05cf98fa7..72837ccdce 100644 --- a/docs/en/integrations/neural-magic.md +++ b/docs/en/integrations/neural-magic.md @@ -1,14 +1,14 @@ --- comments: true -description: Enhance YOLOv8 performance using Neural Magic's DeepSparse Engine. Learn how to deploy and benchmark YOLOv8 models on CPUs for efficient object detection. -keywords: YOLOv8, DeepSparse, Neural Magic, model optimization, object detection, inference speed, CPU performance, sparsity, pruning, quantization +description: Enhance YOLO11 performance using Neural Magic's DeepSparse Engine. Learn how to deploy and benchmark YOLO11 models on CPUs for efficient object detection. +keywords: YOLO11, DeepSparse, Neural Magic, model optimization, object detection, inference speed, CPU performance, sparsity, pruning, quantization --- -# Optimizing YOLOv8 Inferences with Neural Magic's DeepSparse Engine +# Optimizing YOLO11 Inferences with Neural Magic's DeepSparse Engine -When deploying [object detection](https://www.ultralytics.com/glossary/object-detection) models like [Ultralytics YOLOv8](https://www.ultralytics.com/) on various hardware, you can bump into unique issues like optimization. This is where YOLOv8's integration with Neural Magic's DeepSparse Engine steps in. It transforms the way YOLOv8 models are executed and enables GPU-level performance directly on CPUs. +When deploying [object detection](https://www.ultralytics.com/glossary/object-detection) models like [Ultralytics YOLO11](https://www.ultralytics.com/) on various hardware, you can bump into unique issues like optimization. This is where YOLO11's integration with Neural Magic's DeepSparse Engine steps in. It transforms the way YOLO11 models are executed and enables GPU-level performance directly on CPUs. -This guide shows you how to deploy YOLOv8 using Neural Magic's DeepSparse, how to run inferences, and also how to benchmark performance to ensure it is optimized. +This guide shows you how to deploy YOLO11 using Neural Magic's DeepSparse, how to run inferences, and also how to benchmark performance to ensure it is optimized. ## Neural Magic's DeepSparse @@ -18,17 +18,17 @@ This guide shows you how to deploy YOLOv8 using Neural Magic's DeepSparse, how t [Neural Magic's DeepSparse](https://neuralmagic.com/deepsparse/) is an inference run-time designed to optimize the execution of neural networks on CPUs. It applies advanced techniques like sparsity, pruning, and quantization to dramatically reduce computational demands while maintaining accuracy. DeepSparse offers an agile solution for efficient and scalable [neural network](https://www.ultralytics.com/glossary/neural-network-nn) execution across various devices. -## Benefits of Integrating Neural Magic's DeepSparse with YOLOv8 +## Benefits of Integrating Neural Magic's DeepSparse with YOLO11 Before diving into how to deploy YOLOV8 using DeepSparse, let's understand the benefits of using DeepSparse. Some key advantages include: -- **Enhanced Inference Speed**: Achieves up to 525 FPS (on YOLOv8n), significantly speeding up YOLOv8's inference capabilities compared to traditional methods. +- **Enhanced Inference Speed**: Achieves up to 525 FPS (on YOLO11n), significantly speeding up YOLO11's inference capabilities compared to traditional methods.

Enhanced Inference Speed

-- **Optimized Model Efficiency**: Uses pruning and quantization to enhance YOLOv8's efficiency, reducing model size and computational requirements while maintaining [accuracy](https://www.ultralytics.com/glossary/accuracy). +- **Optimized Model Efficiency**: Uses pruning and quantization to enhance YOLO11's efficiency, reducing model size and computational requirements while maintaining [accuracy](https://www.ultralytics.com/glossary/accuracy).

Optimized Model Efficiency @@ -36,9 +36,9 @@ Before diving into how to deploy YOLOV8 using DeepSparse, let's understand the b - **High Performance on Standard CPUs**: Delivers GPU-like performance on CPUs, providing a more accessible and cost-effective option for various applications. -- **Streamlined Integration and Deployment**: Offers user-friendly tools for easy integration of YOLOv8 into applications, including image and video annotation features. +- **Streamlined Integration and Deployment**: Offers user-friendly tools for easy integration of YOLO11 into applications, including image and video annotation features. -- **Support for Various Model Types**: Compatible with both standard and sparsity-optimized YOLOv8 models, adding deployment flexibility. +- **Support for Various Model Types**: Compatible with both standard and sparsity-optimized YOLO11 models, adding deployment flexibility. - **Cost-Effective and Scalable Solution**: Reduces operational expenses and offers scalable deployment of advanced object detection models. @@ -56,15 +56,15 @@ Neural Magic's Deep Sparse technology is inspired by the human brain's efficienc For more details on how Neural Magic's DeepSparse technology work, check out [their blog post](https://neuralmagic.com/blog/how-neural-magics-deep-sparse-technology-works/). -## Creating A Sparse Version of YOLOv8 Trained on a Custom Dataset +## Creating A Sparse Version of YOLO11 Trained on a Custom Dataset -SparseZoo, an open-source model repository by Neural Magic, offers [a collection of pre-sparsified YOLOv8 model checkpoints](https://sparsezoo.neuralmagic.com/?modelSet=computer_vision&searchModels=yolo). With SparseML, seamlessly integrated with Ultralytics, users can effortlessly fine-tune these sparse checkpoints on their specific datasets using a straightforward command-line interface. +SparseZoo, an open-source model repository by Neural Magic, offers [a collection of pre-sparsified YOLO11 model checkpoints](https://sparsezoo.neuralmagic.com/?modelSet=computer_vision&searchModels=yolo). With SparseML, seamlessly integrated with Ultralytics, users can effortlessly fine-tune these sparse checkpoints on their specific datasets using a straightforward command-line interface. -Checkout [Neural Magic's SparseML YOLOv8 documentation](https://github.com/neuralmagic/sparseml/tree/main/integrations/ultralytics-yolov8) for more details. +Checkout [Neural Magic's SparseML YOLO11 documentation](https://github.com/neuralmagic/sparseml/tree/main/integrations/ultralytics-yolov8) for more details. ## Usage: Deploying YOLOV8 using DeepSparse -Deploying YOLOv8 with Neural Magic's DeepSparse involves a few straightforward steps. Before diving into the usage instructions, be sure to check out the range of [YOLOv8 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. Here's how you can get started. +Deploying YOLO11 with Neural Magic's DeepSparse involves a few straightforward steps. Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. Here's how you can get started. ### Step 1: Installation @@ -79,24 +79,24 @@ To install the required packages, run: pip install deepsparse[yolov8] ``` -### Step 2: Exporting YOLOv8 to ONNX Format +### Step 2: Exporting YOLO11 to ONNX Format -DeepSparse Engine requires YOLOv8 models in ONNX format. Exporting your model to this format is essential for compatibility with DeepSparse. Use the following command to export YOLOv8 models: +DeepSparse Engine requires YOLO11 models in ONNX format. Exporting your model to this format is essential for compatibility with DeepSparse. Use the following command to export YOLO11 models: !!! tip "Model Export" === "CLI" ```bash - # Export YOLOv8 model to ONNX format - yolo task=detect mode=export model=yolov8n.pt format=onnx opset=13 + # Export YOLO11 model to ONNX format + yolo task=detect mode=export model=yolo11n.pt format=onnx opset=13 ``` -This command will save the `yolov8n.onnx` model to your disk. +This command will save the `yolo11n.onnx` model to your disk. ### Step 3: Deploying and Running Inferences -With your YOLOv8 model in ONNX format, you can deploy and run inferences using DeepSparse. This can be done easily with their intuitive Python API: +With your YOLO11 model in ONNX format, you can deploy and run inferences using DeepSparse. This can be done easily with their intuitive Python API: !!! tip "Deploying and Running Inferences" @@ -105,8 +105,8 @@ With your YOLOv8 model in ONNX format, you can deploy and run inferences using D ```python from deepsparse import Pipeline - # Specify the path to your YOLOv8 ONNX model - model_path = "path/to/yolov8n.onnx" + # Specify the path to your YOLO11 ONNX model + model_path = "path/to/yolo11n.onnx" # Set up the DeepSparse Pipeline yolo_pipeline = Pipeline.create(task="yolov8", model_path=model_path) @@ -118,7 +118,7 @@ With your YOLOv8 model in ONNX format, you can deploy and run inferences using D ### Step 4: Benchmarking Performance -It's important to check that your YOLOv8 model is performing optimally on DeepSparse. You can benchmark your model's performance to analyze throughput and latency: +It's important to check that your YOLO11 model is performing optimally on DeepSparse. You can benchmark your model's performance to analyze throughput and latency: !!! tip "Benchmarking" @@ -126,12 +126,12 @@ It's important to check that your YOLOv8 model is performing optimally on DeepSp ```bash # Benchmark performance - deepsparse.benchmark model_path="path/to/yolov8n.onnx" --scenario=sync --input_shapes="[1,3,640,640]" + deepsparse.benchmark model_path="path/to/yolo11n.onnx" --scenario=sync --input_shapes="[1,3,640,640]" ``` ### Step 5: Additional Features -DeepSparse provides additional features for practical integration of YOLOv8 in applications, such as image annotation and dataset evaluation. +DeepSparse provides additional features for practical integration of YOLO11 in applications, such as image annotation and dataset evaluation. !!! tip "Additional Features" @@ -139,10 +139,10 @@ DeepSparse provides additional features for practical integration of YOLOv8 in a ```bash # For image annotation - deepsparse.yolov8.annotate --source "path/to/image.jpg" --model_filepath "path/to/yolov8n.onnx" + deepsparse.yolov8.annotate --source "path/to/image.jpg" --model_filepath "path/to/yolo11n.onnx" # For evaluating model performance on a dataset - deepsparse.yolov8.eval --model_path "path/to/yolov8n.onnx" + deepsparse.yolov8.eval --model_path "path/to/yolo11n.onnx" ``` Running the annotate command processes your specified image, detecting objects, and saving the annotated image with bounding boxes and classifications. The annotated image will be stored in an annotation-results folder. This helps provide a visual representation of the model's detection capabilities. @@ -151,61 +151,61 @@ Running the annotate command processes your specified image, detecting objects, Image Annotation Feature

-After running the eval command, you will receive detailed output metrics such as [precision](https://www.ultralytics.com/glossary/precision), [recall](https://www.ultralytics.com/glossary/recall), and mAP (mean Average Precision). This provides a comprehensive view of your model's performance on the dataset. This functionality is particularly useful for fine-tuning and optimizing your YOLOv8 models for specific use cases, ensuring high accuracy and efficiency. +After running the eval command, you will receive detailed output metrics such as [precision](https://www.ultralytics.com/glossary/precision), [recall](https://www.ultralytics.com/glossary/recall), and mAP (mean Average Precision). This provides a comprehensive view of your model's performance on the dataset. This functionality is particularly useful for fine-tuning and optimizing your YOLO11 models for specific use cases, ensuring high accuracy and efficiency. ## Summary -This guide explored integrating Ultralytics' YOLOv8 with Neural Magic's DeepSparse Engine. It highlighted how this integration enhances YOLOv8's performance on CPU platforms, offering GPU-level efficiency and advanced neural network sparsity techniques. +This guide explored integrating Ultralytics' YOLO11 with Neural Magic's DeepSparse Engine. It highlighted how this integration enhances YOLO11's performance on CPU platforms, offering GPU-level efficiency and advanced neural network sparsity techniques. -For more detailed information and advanced usage, visit [Neural Magic's DeepSparse documentation](https://docs.neuralmagic.com/products/deepsparse/). Also, check out Neural Magic's documentation on the integration with YOLOv8 [here](https://github.com/neuralmagic/deepsparse/tree/main/src/deepsparse/yolov8#yolov8-inference-pipelines) and watch a great session on it [here](https://www.youtube.com/watch?v=qtJ7bdt52x8). +For more detailed information and advanced usage, visit [Neural Magic's DeepSparse documentation](https://docs.neuralmagic.com/products/deepsparse/). Also, check out Neural Magic's documentation on the integration with YOLO11 [here](https://github.com/neuralmagic/deepsparse/tree/main/src/deepsparse/yolov8#yolov8-inference-pipelines) and watch a great session on it [here](https://www.youtube.com/watch?v=qtJ7bdt52x8). -Additionally, for a broader understanding of various YOLOv8 integrations, visit the [Ultralytics integration guide page](../integrations/index.md), where you can discover a range of other exciting integration possibilities. +Additionally, for a broader understanding of various YOLO11 integrations, visit the [Ultralytics integration guide page](../integrations/index.md), where you can discover a range of other exciting integration possibilities. ## FAQ -### What is Neural Magic's DeepSparse Engine and how does it optimize YOLOv8 performance? +### What is Neural Magic's DeepSparse Engine and how does it optimize YOLO11 performance? -Neural Magic's DeepSparse Engine is an inference runtime designed to optimize the execution of neural networks on CPUs through advanced techniques such as sparsity, pruning, and quantization. By integrating DeepSparse with YOLOv8, you can achieve GPU-like performance on standard CPUs, significantly enhancing inference speed, model efficiency, and overall performance while maintaining accuracy. For more details, check out the [Neural Magic's DeepSparse section](#neural-magics-deepsparse). +Neural Magic's DeepSparse Engine is an inference runtime designed to optimize the execution of neural networks on CPUs through advanced techniques such as sparsity, pruning, and quantization. By integrating DeepSparse with YOLO11, you can achieve GPU-like performance on standard CPUs, significantly enhancing inference speed, model efficiency, and overall performance while maintaining accuracy. For more details, check out the [Neural Magic's DeepSparse section](#neural-magics-deepsparse). -### How can I install the needed packages to deploy YOLOv8 using Neural Magic's DeepSparse? +### How can I install the needed packages to deploy YOLO11 using Neural Magic's DeepSparse? -Installing the required packages for deploying YOLOv8 with Neural Magic's DeepSparse is straightforward. You can easily install them using the CLI. Here's the command you need to run: +Installing the required packages for deploying YOLO11 with Neural Magic's DeepSparse is straightforward. You can easily install them using the CLI. Here's the command you need to run: ```bash pip install deepsparse[yolov8] ``` -Once installed, follow the steps provided in the [Installation section](#step-1-installation) to set up your environment and start using DeepSparse with YOLOv8. +Once installed, follow the steps provided in the [Installation section](#step-1-installation) to set up your environment and start using DeepSparse with YOLO11. -### How do I convert YOLOv8 models to ONNX format for use with DeepSparse? +### How do I convert YOLO11 models to ONNX format for use with DeepSparse? -To convert YOLOv8 models to the ONNX format, which is required for compatibility with DeepSparse, you can use the following CLI command: +To convert YOLO11 models to the ONNX format, which is required for compatibility with DeepSparse, you can use the following CLI command: ```bash -yolo task=detect mode=export model=yolov8n.pt format=onnx opset=13 +yolo task=detect mode=export model=yolo11n.pt format=onnx opset=13 ``` -This command will export your YOLOv8 model (`yolov8n.pt`) to a format (`yolov8n.onnx`) that can be utilized by the DeepSparse Engine. More information about model export can be found in the [Model Export section](#step-2-exporting-yolov8-to-onnx-format). +This command will export your YOLO11 model (`yolo11n.pt`) to a format (`yolo11n.onnx`) that can be utilized by the DeepSparse Engine. More information about model export can be found in the [Model Export section](#step-2-exporting-yolo11-to-onnx-format). -### How do I benchmark YOLOv8 performance on the DeepSparse Engine? +### How do I benchmark YOLO11 performance on the DeepSparse Engine? -Benchmarking YOLOv8 performance on DeepSparse helps you analyze throughput and latency to ensure your model is optimized. You can use the following CLI command to run a benchmark: +Benchmarking YOLO11 performance on DeepSparse helps you analyze throughput and latency to ensure your model is optimized. You can use the following CLI command to run a benchmark: ```bash -deepsparse.benchmark model_path="path/to/yolov8n.onnx" --scenario=sync --input_shapes="[1,3,640,640]" +deepsparse.benchmark model_path="path/to/yolo11n.onnx" --scenario=sync --input_shapes="[1,3,640,640]" ``` This command will provide you with vital performance metrics. For more details, see the [Benchmarking Performance section](#step-4-benchmarking-performance). -### Why should I use Neural Magic's DeepSparse with YOLOv8 for object detection tasks? +### Why should I use Neural Magic's DeepSparse with YOLO11 for object detection tasks? -Integrating Neural Magic's DeepSparse with YOLOv8 offers several benefits: +Integrating Neural Magic's DeepSparse with YOLO11 offers several benefits: -- **Enhanced Inference Speed:** Achieves up to 525 FPS, significantly speeding up YOLOv8's capabilities. +- **Enhanced Inference Speed:** Achieves up to 525 FPS, significantly speeding up YOLO11's capabilities. - **Optimized Model Efficiency:** Uses sparsity, pruning, and quantization techniques to reduce model size and computational needs while maintaining accuracy. - **High Performance on Standard CPUs:** Offers GPU-like performance on cost-effective CPU hardware. - **Streamlined Integration:** User-friendly tools for easy deployment and integration. -- **Flexibility:** Supports both standard and sparsity-optimized YOLOv8 models. +- **Flexibility:** Supports both standard and sparsity-optimized YOLO11 models. - **Cost-Effective:** Reduces operational expenses through efficient resource utilization. -For a deeper dive into these advantages, visit the [Benefits of Integrating Neural Magic's DeepSparse with YOLOv8 section](#benefits-of-integrating-neural-magics-deepsparse-with-yolov8). +For a deeper dive into these advantages, visit the [Benefits of Integrating Neural Magic's DeepSparse with YOLO11 section](#benefits-of-integrating-neural-magics-deepsparse-with-yolo11). diff --git a/docs/en/integrations/onnx.md b/docs/en/integrations/onnx.md index 3bb372ac2a..fbff328d02 100644 --- a/docs/en/integrations/onnx.md +++ b/docs/en/integrations/onnx.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to export YOLOv8 models to ONNX format for flexible deployment across various platforms with enhanced performance. -keywords: YOLOv8, ONNX, model export, Ultralytics, ONNX Runtime, machine learning, model deployment, computer vision, deep learning +description: Learn how to export YOLO11 models to ONNX format for flexible deployment across various platforms with enhanced performance. +keywords: YOLO11, ONNX, model export, Ultralytics, ONNX Runtime, machine learning, model deployment, computer vision, deep learning --- -# ONNX Export for YOLOv8 Models +# ONNX Export for YOLO11 Models Often, when deploying [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models, you'll need a model format that's both flexible and compatible with multiple platforms. -Exporting [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models to ONNX format streamlines deployment and ensures optimal performance across various environments. This guide will show you how to easily convert your YOLOv8 models to ONNX and enhance their scalability and effectiveness in real-world applications. +Exporting [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models to ONNX format streamlines deployment and ensures optimal performance across various environments. This guide will show you how to easily convert your YOLO11 models to ONNX and enhance their scalability and effectiveness in real-world applications. ## ONNX and ONNX Runtime @@ -44,7 +44,7 @@ The ability of ONNX to handle various formats can be attributed to the following ## Common Usage of ONNX -Before we jump into how to export YOLOv8 models to the ONNX format, let's take a look at where ONNX models are usually used. +Before we jump into how to export YOLO11 models to the ONNX format, let's take a look at where ONNX models are usually used. ### CPU Deployment @@ -60,9 +60,9 @@ While ONNX models are commonly used on CPUs, they can also be deployed on the fo - **Web Browsers**: ONNX can run directly in web browsers, powering interactive and dynamic web-based AI applications. -## Exporting YOLOv8 Models to ONNX +## Exporting YOLO11 Models to ONNX -You can expand model compatibility and deployment flexibility by converting YOLOv8 models to ONNX format. +You can expand model compatibility and deployment flexibility by converting YOLO11 models to ONNX format. ### Installation @@ -73,15 +73,15 @@ To install the required package, run: === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` -For detailed instructions and best practices related to the installation process, check our [YOLOv8 Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [YOLO11 Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ### Usage -Before diving into the usage instructions, be sure to check out the range of [YOLOv8 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. +Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. !!! example "Usage" @@ -90,14 +90,14 @@ Before diving into the usage instructions, be sure to check out the range of [YO ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to ONNX format - model.export(format="onnx") # creates 'yolov8n.onnx' + model.export(format="onnx") # creates 'yolo11n.onnx' # Load the exported ONNX model - onnx_model = YOLO("yolov8n.onnx") + onnx_model = YOLO("yolo11n.onnx") # Run inference results = onnx_model("https://ultralytics.com/images/bus.jpg") @@ -106,18 +106,18 @@ Before diving into the usage instructions, be sure to check out the range of [YO === "CLI" ```bash - # Export a YOLOv8n PyTorch model to ONNX format - yolo export model=yolov8n.pt format=onnx # creates 'yolov8n.onnx' + # Export a YOLO11n PyTorch model to ONNX format + yolo export model=yolo11n.pt format=onnx # creates 'yolo11n.onnx' # Run inference with the exported model - yolo predict model=yolov8n.onnx source='https://ultralytics.com/images/bus.jpg' + yolo predict model=yolo11n.onnx source='https://ultralytics.com/images/bus.jpg' ``` For more details about the export process, visit the [Ultralytics documentation page on exporting](../modes/export.md). -## Deploying Exported YOLOv8 ONNX Models +## Deploying Exported YOLO11 ONNX Models -Once you've successfully exported your Ultralytics YOLOv8 models to ONNX format, the next step is deploying these models in various environments. For detailed instructions on deploying your ONNX models, take a look at the following resources: +Once you've successfully exported your Ultralytics YOLO11 models to ONNX format, the next step is deploying these models in various environments. For detailed instructions on deploying your ONNX models, take a look at the following resources: - **[ONNX Runtime Python API Documentation](https://onnxruntime.ai/docs/api/python/api_summary.html)**: This guide provides essential information for loading and running ONNX models using ONNX Runtime. @@ -127,17 +127,17 @@ Once you've successfully exported your Ultralytics YOLOv8 models to ONNX format, ## Summary -In this guide, you've learned how to export Ultralytics YOLOv8 models to ONNX format to increase their interoperability and performance across various platforms. You were also introduced to the ONNX Runtime and ONNX deployment options. +In this guide, you've learned how to export Ultralytics YOLO11 models to ONNX format to increase their interoperability and performance across various platforms. You were also introduced to the ONNX Runtime and ONNX deployment options. For further details on usage, visit the [ONNX official documentation](https://onnx.ai/onnx/intro/). -Also, if you'd like to know more about other Ultralytics YOLOv8 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of useful resources and insights there. +Also, if you'd like to know more about other Ultralytics YOLO11 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of useful resources and insights there. ## FAQ -### How do I export YOLOv8 models to ONNX format using Ultralytics? +### How do I export YOLO11 models to ONNX format using Ultralytics? -To export your YOLOv8 models to ONNX format using Ultralytics, follow these steps: +To export your YOLO11 models to ONNX format using Ultralytics, follow these steps: !!! example "Usage" @@ -146,14 +146,14 @@ To export your YOLOv8 models to ONNX format using Ultralytics, follow these step ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to ONNX format - model.export(format="onnx") # creates 'yolov8n.onnx' + model.export(format="onnx") # creates 'yolo11n.onnx' # Load the exported ONNX model - onnx_model = YOLO("yolov8n.onnx") + onnx_model = YOLO("yolo11n.onnx") # Run inference results = onnx_model("https://ultralytics.com/images/bus.jpg") @@ -162,18 +162,18 @@ To export your YOLOv8 models to ONNX format using Ultralytics, follow these step === "CLI" ```bash - # Export a YOLOv8n PyTorch model to ONNX format - yolo export model=yolov8n.pt format=onnx # creates 'yolov8n.onnx' + # Export a YOLO11n PyTorch model to ONNX format + yolo export model=yolo11n.pt format=onnx # creates 'yolo11n.onnx' # Run inference with the exported model - yolo predict model=yolov8n.onnx source='https://ultralytics.com/images/bus.jpg' + yolo predict model=yolo11n.onnx source='https://ultralytics.com/images/bus.jpg' ``` For more details, visit the [export documentation](../modes/export.md). -### What are the advantages of using ONNX Runtime for deploying YOLOv8 models? +### What are the advantages of using ONNX Runtime for deploying YOLO11 models? -Using ONNX Runtime for deploying YOLOv8 models offers several advantages: +Using ONNX Runtime for deploying YOLO11 models offers several advantages: - **Cross-platform compatibility**: ONNX Runtime supports various platforms, such as Windows, macOS, and Linux, ensuring your models run smoothly across different environments. - **Hardware acceleration**: ONNX Runtime can leverage hardware-specific optimizations for CPUs, GPUs, and dedicated accelerators, providing high-performance inference. @@ -181,9 +181,9 @@ Using ONNX Runtime for deploying YOLOv8 models offers several advantages: Learn more by checking the [ONNX Runtime documentation](https://onnxruntime.ai/docs/api/python/api_summary.html). -### What deployment options are available for YOLOv8 models exported to ONNX? +### What deployment options are available for YOLO11 models exported to ONNX? -YOLOv8 models exported to ONNX can be deployed on various platforms including: +YOLO11 models exported to ONNX can be deployed on various platforms including: - **CPUs**: Utilizing ONNX Runtime for optimized CPU inference. - **GPUs**: Leveraging NVIDIA CUDA for high-performance GPU acceleration. @@ -192,19 +192,19 @@ YOLOv8 models exported to ONNX can be deployed on various platforms including: For more information, explore our guide on [model deployment options](../guides/model-deployment-options.md). -### Why should I use ONNX format for Ultralytics YOLOv8 models? +### Why should I use ONNX format for Ultralytics YOLO11 models? -Using ONNX format for Ultralytics YOLOv8 models provides numerous benefits: +Using ONNX format for Ultralytics YOLO11 models provides numerous benefits: - **Interoperability**: ONNX allows models to be transferred between different machine learning frameworks seamlessly. - **Performance Optimization**: ONNX Runtime can enhance model performance by utilizing hardware-specific optimizations. - **Flexibility**: ONNX supports various deployment environments, enabling you to use the same model on different platforms without modification. -Refer to the comprehensive guide on [exporting YOLOv8 models to ONNX](https://www.ultralytics.com/blog/export-and-optimize-a-yolov8-model-for-inference-on-openvino). +Refer to the comprehensive guide on [exporting YOLO11 models to ONNX](https://www.ultralytics.com/blog/export-and-optimize-a-yolov8-model-for-inference-on-openvino). -### How can I troubleshoot issues when exporting YOLOv8 models to ONNX? +### How can I troubleshoot issues when exporting YOLO11 models to ONNX? -When exporting YOLOv8 models to ONNX, you might encounter common issues such as mismatched dependencies or unsupported operations. To troubleshoot these problems: +When exporting YOLO11 models to ONNX, you might encounter common issues such as mismatched dependencies or unsupported operations. To troubleshoot these problems: 1. Verify that you have the correct version of required dependencies installed. 2. Check the official [ONNX documentation](https://onnx.ai/onnx/intro/) for supported operators and features. diff --git a/docs/en/integrations/paddlepaddle.md b/docs/en/integrations/paddlepaddle.md index 62092df9f5..c88bacde89 100644 --- a/docs/en/integrations/paddlepaddle.md +++ b/docs/en/integrations/paddlepaddle.md @@ -1,12 +1,12 @@ --- comments: true -description: Learn how to export YOLOv8 models to PaddlePaddle format for enhanced performance, flexibility, and deployment across various platforms and devices. -keywords: YOLOv8, PaddlePaddle, export models, computer vision, deep learning, model deployment, performance optimization +description: Learn how to export YOLO11 models to PaddlePaddle format for enhanced performance, flexibility, and deployment across various platforms and devices. +keywords: YOLO11, PaddlePaddle, export models, computer vision, deep learning, model deployment, performance optimization --- -# How to Export to PaddlePaddle Format from YOLOv8 Models +# How to Export to PaddlePaddle Format from YOLO11 Models -Bridging the gap between developing and deploying [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models in real-world scenarios with varying conditions can be difficult. PaddlePaddle makes this process easier with its focus on flexibility, performance, and its capability for parallel processing in distributed environments. This means you can use your YOLOv8 computer vision models on a wide variety of devices and platforms, from smartphones to cloud-based servers. +Bridging the gap between developing and deploying [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models in real-world scenarios with varying conditions can be difficult. PaddlePaddle makes this process easier with its focus on flexibility, performance, and its capability for parallel processing in distributed environments. This means you can use your YOLO11 computer vision models on a wide variety of devices and platforms, from smartphones to cloud-based servers.


@@ -16,10 +16,10 @@ Bridging the gap between developing and deploying [computer vision](https://www. allowfullscreen>
- Watch: How to Export Ultralytics YOLOv8 Models to PaddlePaddle Format | Key Features of PaddlePaddle Format + Watch: How to Export Ultralytics YOLO11 Models to PaddlePaddle Format | Key Features of PaddlePaddle Format

-The ability to export to PaddlePaddle model format allows you to optimize your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models for use within the PaddlePaddle framework. PaddlePaddle is known for facilitating industrial deployments and is a good choice for deploying computer vision applications in real-world settings across various domains. +The ability to export to PaddlePaddle model format allows you to optimize your [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models for use within the PaddlePaddle framework. PaddlePaddle is known for facilitating industrial deployments and is a good choice for deploying computer vision applications in real-world settings across various domains. ## Why should you export to PaddlePaddle? @@ -31,7 +31,7 @@ Developed by Baidu, [PaddlePaddle](https://www.paddlepaddle.org.cn/en) (**PA**ra It offers tools and resources similar to popular frameworks like [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) and [PyTorch](https://www.ultralytics.com/glossary/pytorch), making it accessible for developers of all experience levels. From farming and factories to service businesses, PaddlePaddle's large developer community of over 4.77 million is helping create and deploy AI applications. -By exporting your Ultralytics YOLOv8 models to PaddlePaddle format, you can tap into PaddlePaddle's strengths in performance optimization. PaddlePaddle prioritizes efficient model execution and reduced memory usage. As a result, your YOLOv8 models can potentially achieve even better performance, delivering top-notch results in practical scenarios. +By exporting your Ultralytics YOLO11 models to PaddlePaddle format, you can tap into PaddlePaddle's strengths in performance optimization. PaddlePaddle prioritizes efficient model execution and reduced memory usage. As a result, your YOLO11 models can potentially achieve even better performance, delivering top-notch results in practical scenarios. ## Key Features of PaddlePaddle Models @@ -45,7 +45,7 @@ PaddlePaddle models offer a range of key features that contribute to their flexi ## Deployment Options in PaddlePaddle -Before diving into the code for exporting YOLOv8 models to PaddlePaddle, let's take a look at the different deployment scenarios in which PaddlePaddle models excel. +Before diving into the code for exporting YOLO11 models to PaddlePaddle, let's take a look at the different deployment scenarios in which PaddlePaddle models excel. PaddlePaddle provides a range of options, each offering a distinct balance of ease of use, flexibility, and performance: @@ -57,9 +57,9 @@ PaddlePaddle provides a range of options, each offering a distinct balance of ea - **Paddle.js**: Paddle.js enables you to deploy PaddlePaddle models directly within web browsers. Paddle.js can either load a pre-trained model or transform a model from [paddle-hub](https://github.com/PaddlePaddle/PaddleHub) with model transforming tools provided by Paddle.js. It can run in browsers that support WebGL/WebGPU/WebAssembly. -## Export to PaddlePaddle: Converting Your YOLOv8 Model +## Export to PaddlePaddle: Converting Your YOLO11 Model -Converting YOLOv8 models to the PaddlePaddle format can improve execution flexibility and optimize performance for various deployment scenarios. +Converting YOLO11 models to the PaddlePaddle format can improve execution flexibility and optimize performance for various deployment scenarios. ### Installation @@ -70,15 +70,15 @@ To install the required package, run: === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` -For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ### Usage -Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLOv8 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). +Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLO11 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). !!! example "Usage" @@ -87,14 +87,14 @@ Before diving into the usage instructions, it's important to note that while all ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to PaddlePaddle format - model.export(format="paddle") # creates '/yolov8n_paddle_model' + model.export(format="paddle") # creates '/yolo11n_paddle_model' # Load the exported PaddlePaddle model - paddle_model = YOLO("./yolov8n_paddle_model") + paddle_model = YOLO("./yolo11n_paddle_model") # Run inference results = paddle_model("https://ultralytics.com/images/bus.jpg") @@ -103,18 +103,18 @@ Before diving into the usage instructions, it's important to note that while all === "CLI" ```bash - # Export a YOLOv8n PyTorch model to PaddlePaddle format - yolo export model=yolov8n.pt format=paddle # creates '/yolov8n_paddle_model' + # Export a YOLO11n PyTorch model to PaddlePaddle format + yolo export model=yolo11n.pt format=paddle # creates '/yolo11n_paddle_model' # Run inference with the exported model - yolo predict model='./yolov8n_paddle_model' source='https://ultralytics.com/images/bus.jpg' + yolo predict model='./yolo11n_paddle_model' source='https://ultralytics.com/images/bus.jpg' ``` For more details about supported export options, visit the [Ultralytics documentation page on deployment options](../guides/model-deployment-options.md). -## Deploying Exported YOLOv8 PaddlePaddle Models +## Deploying Exported YOLO11 PaddlePaddle Models -After successfully exporting your Ultralytics YOLOv8 models to PaddlePaddle format, you can now deploy them. The primary and recommended first step for running a PaddlePaddle model is to use the YOLO("./model_paddle_model") method, as outlined in the previous usage code snippet. +After successfully exporting your Ultralytics YOLO11 models to PaddlePaddle format, you can now deploy them. The primary and recommended first step for running a PaddlePaddle model is to use the YOLO("./model_paddle_model") method, as outlined in the previous usage code snippet. However, for in-depth instructions on deploying your PaddlePaddle models in various other settings, take a look at the following resources: @@ -126,17 +126,17 @@ However, for in-depth instructions on deploying your PaddlePaddle models in vari ## Summary -In this guide, we explored the process of exporting Ultralytics YOLOv8 models to the PaddlePaddle format. By following these steps, you can leverage PaddlePaddle's strengths in diverse deployment scenarios, optimizing your models for different hardware and software environments. +In this guide, we explored the process of exporting Ultralytics YOLO11 models to the PaddlePaddle format. By following these steps, you can leverage PaddlePaddle's strengths in diverse deployment scenarios, optimizing your models for different hardware and software environments. For further details on usage, visit the [PaddlePaddle official documentation](https://www.paddlepaddle.org.cn/documentation/docs/en/guides/index_en.html) -Want to explore more ways to integrate your Ultralytics YOLOv8 models? Our [integration guide page](index.md) explores various options, equipping you with valuable resources and insights. +Want to explore more ways to integrate your Ultralytics YOLO11 models? Our [integration guide page](index.md) explores various options, equipping you with valuable resources and insights. ## FAQ -### How do I export Ultralytics YOLOv8 models to PaddlePaddle format? +### How do I export Ultralytics YOLO11 models to PaddlePaddle format? -Exporting Ultralytics YOLOv8 models to PaddlePaddle format is straightforward. You can use the `export` method of the YOLO class to perform this exportation. Here is an example using Python: +Exporting Ultralytics YOLO11 models to PaddlePaddle format is straightforward. You can use the `export` method of the YOLO class to perform this exportation. Here is an example using Python: !!! example "Usage" @@ -145,14 +145,14 @@ Exporting Ultralytics YOLOv8 models to PaddlePaddle format is straightforward. Y ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to PaddlePaddle format - model.export(format="paddle") # creates '/yolov8n_paddle_model' + model.export(format="paddle") # creates '/yolo11n_paddle_model' # Load the exported PaddlePaddle model - paddle_model = YOLO("./yolov8n_paddle_model") + paddle_model = YOLO("./yolo11n_paddle_model") # Run inference results = paddle_model("https://ultralytics.com/images/bus.jpg") @@ -161,11 +161,11 @@ Exporting Ultralytics YOLOv8 models to PaddlePaddle format is straightforward. Y === "CLI" ```bash - # Export a YOLOv8n PyTorch model to PaddlePaddle format - yolo export model=yolov8n.pt format=paddle # creates '/yolov8n_paddle_model' + # Export a YOLO11n PyTorch model to PaddlePaddle format + yolo export model=yolo11n.pt format=paddle # creates '/yolo11n_paddle_model' # Run inference with the exported model - yolo predict model='./yolov8n_paddle_model' source='https://ultralytics.com/images/bus.jpg' + yolo predict model='./yolo11n_paddle_model' source='https://ultralytics.com/images/bus.jpg' ``` For more detailed setup and troubleshooting, check the [Ultralytics Installation Guide](../quickstart.md) and [Common Issues Guide](../guides/yolo-common-issues.md). @@ -179,17 +179,17 @@ PaddlePaddle offers several key advantages for model deployment: - **Operator Fusion**: By merging compatible operations, it reduces computational overhead. - **Quantization Techniques**: Supports both post-training and quantization-aware training, enabling lower-[precision](https://www.ultralytics.com/glossary/precision) data representations for improved performance. -You can achieve enhanced results by exporting your Ultralytics YOLOv8 models to PaddlePaddle, ensuring flexibility and high performance across various applications and hardware platforms. Learn more about PaddlePaddle's features [here](https://www.paddlepaddle.org.cn/en). +You can achieve enhanced results by exporting your Ultralytics YOLO11 models to PaddlePaddle, ensuring flexibility and high performance across various applications and hardware platforms. Learn more about PaddlePaddle's features [here](https://www.paddlepaddle.org.cn/en). -### Why should I choose PaddlePaddle for deploying my YOLOv8 models? +### Why should I choose PaddlePaddle for deploying my YOLO11 models? -PaddlePaddle, developed by Baidu, is optimized for industrial and commercial AI deployments. Its large developer community and robust framework provide extensive tools similar to TensorFlow and PyTorch. By exporting your YOLOv8 models to PaddlePaddle, you leverage: +PaddlePaddle, developed by Baidu, is optimized for industrial and commercial AI deployments. Its large developer community and robust framework provide extensive tools similar to TensorFlow and PyTorch. By exporting your YOLO11 models to PaddlePaddle, you leverage: - **Enhanced Performance**: Optimal execution speed and reduced memory footprint. - **Flexibility**: Wide compatibility with various devices from smartphones to cloud servers. - **Scalability**: Efficient parallel processing capabilities for distributed environments. -These features make PaddlePaddle a compelling choice for deploying YOLOv8 models in production settings. +These features make PaddlePaddle a compelling choice for deploying YOLO11 models in production settings. ### How does PaddlePaddle improve model performance over other frameworks? @@ -199,9 +199,9 @@ PaddlePaddle employs several advanced techniques to optimize model performance: - **Operator Fusion**: Combines compatible operations to minimize memory transfer and increase inference speed. - **Quantization**: Reduces model size and increases efficiency using lower-precision data while maintaining [accuracy](https://www.ultralytics.com/glossary/accuracy). -These techniques prioritize efficient model execution, making PaddlePaddle an excellent option for deploying high-performance YOLOv8 models. For more on optimization, see the [PaddlePaddle official documentation](https://www.paddlepaddle.org.cn/documentation/docs/en/guides/index_en.html). +These techniques prioritize efficient model execution, making PaddlePaddle an excellent option for deploying high-performance YOLO11 models. For more on optimization, see the [PaddlePaddle official documentation](https://www.paddlepaddle.org.cn/documentation/docs/en/guides/index_en.html). -### What deployment options does PaddlePaddle offer for YOLOv8 models? +### What deployment options does PaddlePaddle offer for YOLO11 models? PaddlePaddle provides flexible deployment options: diff --git a/docs/en/integrations/paperspace.md b/docs/en/integrations/paperspace.md index 7c67d9bbff..f6f9117a6c 100644 --- a/docs/en/integrations/paperspace.md +++ b/docs/en/integrations/paperspace.md @@ -1,14 +1,14 @@ --- comments: true -description: Simplify YOLOv8 training with Paperspace Gradient's all-in-one MLOps platform. Access GPUs, automate workflows, and deploy with ease. -keywords: YOLOv8, Paperspace Gradient, MLOps, machine learning, training, GPUs, Jupyter notebooks, model deployment, AI, cloud platform +description: Simplify YOLO11 training with Paperspace Gradient's all-in-one MLOps platform. Access GPUs, automate workflows, and deploy with ease. +keywords: YOLO11, Paperspace Gradient, MLOps, machine learning, training, GPUs, Jupyter notebooks, model deployment, AI, cloud platform --- -# YOLOv8 Model Training Made Simple with Paperspace Gradient +# YOLO11 Model Training Made Simple with Paperspace Gradient -Training computer vision models like [YOLOv8](https://github.com/ultralytics/ultralytics) can be complicated. It involves managing large datasets, using different types of computer hardware like GPUs, TPUs, and CPUs, and making sure data flows smoothly during the training process. Typically, developers end up spending a lot of time managing their computer systems and environments. It can be frustrating when you just want to focus on building the best model. +Training computer vision models like [YOLO11](https://github.com/ultralytics/ultralytics) can be complicated. It involves managing large datasets, using different types of computer hardware like GPUs, TPUs, and CPUs, and making sure data flows smoothly during the training process. Typically, developers end up spending a lot of time managing their computer systems and environments. It can be frustrating when you just want to focus on building the best model. -This is where a platform like Paperspace Gradient can make things simpler. Paperspace Gradient is a MLOps platform that lets you build, train, and deploy [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models all in one place. With Gradient, developers can focus on training their YOLOv8 models without the hassle of managing infrastructure and environments. +This is where a platform like Paperspace Gradient can make things simpler. Paperspace Gradient is a MLOps platform that lets you build, train, and deploy [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models all in one place. With Gradient, developers can focus on training their YOLO11 models without the hassle of managing infrastructure and environments. ## Paperspace @@ -28,15 +28,15 @@ Paperspace Gradient is a suite of tools designed to make working with AI and mac Within its toolkit, it includes support for Google's TPUs via a job runner, comprehensive support for Jupyter notebooks and containers, and new programming language integrations. Its focus on language integration particularly stands out, allowing users to easily adapt their existing Python projects to use the most advanced GPU infrastructure available. -## Training YOLOv8 Using Paperspace Gradient +## Training YOLO11 Using Paperspace Gradient -Paperspace Gradient makes training a YOLOv8 model possible with a few clicks. Thanks to the integration, you can access the [Paperspace console](https://console.paperspace.com/github/ultralytics/ultralytics) and start training your model immediately. For a detailed understanding of the model training process and best practices, refer to our [YOLOv8 Model Training guide](../modes/train.md). +Paperspace Gradient makes training a YOLO11 model possible with a few clicks. Thanks to the integration, you can access the [Paperspace console](https://console.paperspace.com/github/ultralytics/ultralytics) and start training your model immediately. For a detailed understanding of the model training process and best practices, refer to our [YOLO11 Model Training guide](../modes/train.md). Sign in and then click on the “Start Machine” button shown in the image below. In a few seconds, a managed GPU environment will start up, and then you can run the notebook's cells. -![Training YOLOv8 Using Paperspace Gradient](https://github.com/ultralytics/docs/releases/download/0/start-machine-button.avif) +![Training YOLO11 Using Paperspace Gradient](https://github.com/ultralytics/docs/releases/download/0/start-machine-button.avif) -Explore more capabilities of YOLOv8 and Paperspace Gradient in a discussion with Glenn Jocher, Ultralytics founder, and James Skelton from Paperspace. Watch the discussion below. +Explore more capabilities of YOLO11 and Paperspace Gradient in a discussion with Glenn Jocher, Ultralytics founder, and James Skelton from Paperspace. Watch the discussion below.


@@ -46,14 +46,14 @@ Explore more capabilities of YOLOv8 and Paperspace Gradient in a discussion with allowfullscreen>
- Watch: Ultralytics Live Session 7: It's All About the Environment: Optimizing YOLOv8 Training With Gradient + Watch: Ultralytics Live Session 7: It's All About the Environment: Optimizing YOLO11 Training With Gradient

## Key Features of Paperspace Gradient As you explore the Paperspace console, you'll see how each step of the machine-learning workflow is supported and enhanced. Here are some things to look out for: -- **One-Click Notebooks:** Gradient provides pre-configured Jupyter Notebooks specifically tailored for YOLOv8, eliminating the need for environment setup and dependency management. Simply choose the desired notebook and start experimenting immediately. +- **One-Click Notebooks:** Gradient provides pre-configured Jupyter Notebooks specifically tailored for YOLO11, eliminating the need for environment setup and dependency management. Simply choose the desired notebook and start experimenting immediately. - **Hardware Flexibility:** Choose from a range of machine types with varying CPU, GPU, and TPU configurations to suit your training needs and budget. Gradient handles all the backend setup, allowing you to focus on model development. @@ -61,13 +61,13 @@ As you explore the Paperspace console, you'll see how each step of the machine-l - **Dataset Management:** Efficiently manage your datasets directly within Gradient. Upload, version, and pre-process data with ease, streamlining the data preparation phase of your project. -- **Model Serving:** Deploy your trained YOLOv8 models as REST APIs with just a few clicks. Gradient handles the infrastructure, allowing you to easily integrate your [object detection](https://www.ultralytics.com/glossary/object-detection) models into your applications. +- **Model Serving:** Deploy your trained YOLO11 models as REST APIs with just a few clicks. Gradient handles the infrastructure, allowing you to easily integrate your [object detection](https://www.ultralytics.com/glossary/object-detection) models into your applications. - **Real-time Monitoring:** Monitor the performance and health of your deployed models through Gradient's intuitive dashboard. Gain insights into inference speed, resource utilization, and potential errors. -## Why Should You Use Gradient for Your YOLOv8 Projects? +## Why Should You Use Gradient for Your YOLO11 Projects? -While many options are available for training, deploying, and evaluating YOLOv8 models, the integration with Paperspace Gradient offers a unique set of advantages that separates it from other solutions. Let's explore what makes this integration unique: +While many options are available for training, deploying, and evaluating YOLO11 models, the integration with Paperspace Gradient offers a unique set of advantages that separates it from other solutions. Let's explore what makes this integration unique: - **Enhanced Collaboration:** Shared workspaces and version control facilitate seamless teamwork and ensure reproducibility, allowing your team to work together effectively and maintain a clear history of your project. @@ -79,37 +79,37 @@ While many options are available for training, deploying, and evaluating YOLOv8 ## Summary -This guide explored the Paperspace Gradient integration for training YOLOv8 models. Gradient provides the tools and infrastructure to accelerate your AI development journey from effortless model training and evaluation to streamlined deployment options. +This guide explored the Paperspace Gradient integration for training YOLO11 models. Gradient provides the tools and infrastructure to accelerate your AI development journey from effortless model training and evaluation to streamlined deployment options. For further exploration, visit [PaperSpace's official documentation](https://docs.digitalocean.com/products/paperspace/). -Also, visit the [Ultralytics integration guide page](index.md) to learn more about different YOLOv8 integrations. It's full of insights and tips to take your [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) projects to the next level. +Also, visit the [Ultralytics integration guide page](index.md) to learn more about different YOLO11 integrations. It's full of insights and tips to take your [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) projects to the next level. ## FAQ -### How do I train a YOLOv8 model using Paperspace Gradient? +### How do I train a YOLO11 model using Paperspace Gradient? -Training a YOLOv8 model with Paperspace Gradient is straightforward and efficient. First, sign in to the [Paperspace console](https://console.paperspace.com/github/ultralytics/ultralytics). Next, click the “Start Machine” button to initiate a managed GPU environment. Once the environment is ready, you can run the notebook's cells to start training your YOLOv8 model. For detailed instructions, refer to our [YOLOv8 Model Training guide](../modes/train.md). +Training a YOLO11 model with Paperspace Gradient is straightforward and efficient. First, sign in to the [Paperspace console](https://console.paperspace.com/github/ultralytics/ultralytics). Next, click the “Start Machine” button to initiate a managed GPU environment. Once the environment is ready, you can run the notebook's cells to start training your YOLO11 model. For detailed instructions, refer to our [YOLO11 Model Training guide](../modes/train.md). -### What are the advantages of using Paperspace Gradient for YOLOv8 projects? +### What are the advantages of using Paperspace Gradient for YOLO11 projects? -Paperspace Gradient offers several unique advantages for training and deploying YOLOv8 models: +Paperspace Gradient offers several unique advantages for training and deploying YOLO11 models: - **Hardware Flexibility:** Choose from various CPU, GPU, and TPU configurations. -- **One-Click Notebooks:** Use pre-configured Jupyter Notebooks for YOLOv8 without worrying about environment setup. +- **One-Click Notebooks:** Use pre-configured Jupyter Notebooks for YOLO11 without worrying about environment setup. - **Experiment Tracking:** Automatic tracking of hyperparameters, metrics, and code changes. - **Dataset Management:** Efficiently manage your datasets within Gradient. - **Model Serving:** Deploy models as REST APIs easily. - **Real-time Monitoring:** Monitor model performance and resource utilization through a dashboard. -### Why should I choose Ultralytics YOLOv8 over other object detection models? +### Why should I choose Ultralytics YOLO11 over other object detection models? -Ultralytics YOLOv8 stands out for its real-time object detection capabilities and high [accuracy](https://www.ultralytics.com/glossary/accuracy). Its seamless integration with platforms like Paperspace Gradient enhances productivity by simplifying the training and deployment process. YOLOv8 supports various use cases, from security systems to retail inventory management. Explore more about YOLOv8's advantages [here](https://www.ultralytics.com/yolo). +Ultralytics YOLO11 stands out for its real-time object detection capabilities and high [accuracy](https://www.ultralytics.com/glossary/accuracy). Its seamless integration with platforms like Paperspace Gradient enhances productivity by simplifying the training and deployment process. YOLO11 supports various use cases, from security systems to retail inventory management. Explore more about YOLO11's advantages [here](https://www.ultralytics.com/yolo). -### Can I deploy my YOLOv8 model on edge devices using Paperspace Gradient? +### Can I deploy my YOLO11 model on edge devices using Paperspace Gradient? -Yes, you can deploy YOLOv8 models on edge devices using Paperspace Gradient. The platform supports various deployment formats like TFLite and Edge TPU, which are optimized for edge devices. After training your model on Gradient, refer to our [export guide](../modes/export.md) for instructions on converting your model to the desired format. +Yes, you can deploy YOLO11 models on edge devices using Paperspace Gradient. The platform supports various deployment formats like TFLite and Edge TPU, which are optimized for edge devices. After training your model on Gradient, refer to our [export guide](../modes/export.md) for instructions on converting your model to the desired format. -### How does experiment tracking in Paperspace Gradient help improve YOLOv8 training? +### How does experiment tracking in Paperspace Gradient help improve YOLO11 training? Experiment tracking in Paperspace Gradient streamlines the model development process by automatically logging hyperparameters, metrics, and code changes. This allows you to easily compare different training runs, identify optimal configurations, and reproduce successful experiments. diff --git a/docs/en/integrations/ray-tune.md b/docs/en/integrations/ray-tune.md index 3dec5efeb6..86987f643c 100644 --- a/docs/en/integrations/ray-tune.md +++ b/docs/en/integrations/ray-tune.md @@ -1,16 +1,16 @@ --- comments: true -description: Optimize YOLOv8 model performance with Ray Tune. Learn efficient hyperparameter tuning using advanced search strategies, parallelism, and early stopping. -keywords: YOLOv8, Ray Tune, hyperparameter tuning, model optimization, machine learning, deep learning, AI, Ultralytics, Weights & Biases +description: Optimize YOLO11 model performance with Ray Tune. Learn efficient hyperparameter tuning using advanced search strategies, parallelism, and early stopping. +keywords: YOLO11, Ray Tune, hyperparameter tuning, model optimization, machine learning, deep learning, AI, Ultralytics, Weights & Biases --- -# Efficient [Hyperparameter Tuning](https://www.ultralytics.com/glossary/hyperparameter-tuning) with Ray Tune and YOLOv8 +# Efficient [Hyperparameter Tuning](https://www.ultralytics.com/glossary/hyperparameter-tuning) with Ray Tune and YOLO11 Hyperparameter tuning is vital in achieving peak model performance by discovering the optimal set of hyperparameters. This involves running trials with different hyperparameters and evaluating each trial's performance. -## Accelerate Tuning with Ultralytics YOLOv8 and Ray Tune +## Accelerate Tuning with Ultralytics YOLO11 and Ray Tune -[Ultralytics YOLOv8](https://www.ultralytics.com/) incorporates Ray Tune for hyperparameter tuning, streamlining the optimization of YOLOv8 model hyperparameters. With Ray Tune, you can utilize advanced search strategies, parallelism, and early stopping to expedite the tuning process. +[Ultralytics YOLO11](https://www.ultralytics.com/) incorporates Ray Tune for hyperparameter tuning, streamlining the optimization of YOLO11 model hyperparameters. With Ray Tune, you can utilize advanced search strategies, parallelism, and early stopping to expedite the tuning process. ### Ray Tune @@ -18,11 +18,11 @@ Hyperparameter tuning is vital in achieving peak model performance by discoverin Ray Tune Overview

-[Ray Tune](https://docs.ray.io/en/latest/tune/index.html) is a hyperparameter tuning library designed for efficiency and flexibility. It supports various search strategies, parallelism, and early stopping strategies, and seamlessly integrates with popular [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) frameworks, including Ultralytics YOLOv8. +[Ray Tune](https://docs.ray.io/en/latest/tune/index.html) is a hyperparameter tuning library designed for efficiency and flexibility. It supports various search strategies, parallelism, and early stopping strategies, and seamlessly integrates with popular [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) frameworks, including Ultralytics YOLO11. ### Integration with Weights & Biases -YOLOv8 also allows optional integration with [Weights & Biases](https://wandb.ai/site) for monitoring the tuning process. +YOLO11 also allows optional integration with [Weights & Biases](https://wandb.ai/site) for monitoring the tuning process. ## Installation @@ -49,21 +49,21 @@ To install the required packages, run: ```python from ultralytics import YOLO - # Load a YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a YOLO11n model + model = YOLO("yolo11n.pt") - # Start tuning hyperparameters for YOLOv8n training on the COCO8 dataset + # Start tuning hyperparameters for YOLO11n training on the COCO8 dataset result_grid = model.tune(data="coco8.yaml", use_ray=True) ``` ## `tune()` Method Parameters -The `tune()` method in YOLOv8 provides an easy-to-use interface for hyperparameter tuning with Ray Tune. It accepts several arguments that allow you to customize the tuning process. Below is a detailed explanation of each parameter: +The `tune()` method in YOLO11 provides an easy-to-use interface for hyperparameter tuning with Ray Tune. It accepts several arguments that allow you to customize the tuning process. Below is a detailed explanation of each parameter: | Parameter | Type | Description | Default Value | | --------------- | ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | | `data` | `str` | The dataset configuration file (in YAML format) to run the tuner on. This file should specify the training and [validation data](https://www.ultralytics.com/glossary/validation-data) paths, as well as other dataset-specific settings. | | -| `space` | `dict, optional` | A dictionary defining the hyperparameter search space for Ray Tune. Each key corresponds to a hyperparameter name, and the value specifies the range of values to explore during tuning. If not provided, YOLOv8 uses a default search space with various hyperparameters. | | +| `space` | `dict, optional` | A dictionary defining the hyperparameter search space for Ray Tune. Each key corresponds to a hyperparameter name, and the value specifies the range of values to explore during tuning. If not provided, YOLO11 uses a default search space with various hyperparameters. | | | `grace_period` | `int, optional` | The grace period in [epochs](https://www.ultralytics.com/glossary/epoch) for the [ASHA scheduler](https://docs.ray.io/en/latest/tune/api/schedulers.html) in Ray Tune. The scheduler will not terminate any trial before this number of epochs, allowing the model to have some minimum training before making a decision on early stopping. | 10 | | `gpu_per_trial` | `int, optional` | The number of GPUs to allocate per trial during tuning. This helps manage GPU usage, particularly in multi-GPU environments. If not provided, the tuner will use all available GPUs. | None | | `iterations` | `int, optional` | The maximum number of trials to run during tuning. This parameter helps control the total number of hyperparameter combinations tested, ensuring the tuning process does not run indefinitely. | 10 | @@ -73,7 +73,7 @@ By customizing these parameters, you can fine-tune the hyperparameter optimizati ## Default Search Space Description -The following table lists the default search space parameters for hyperparameter tuning in YOLOv8 with Ray Tune. Each parameter has a specific value range defined by `tune.uniform()`. +The following table lists the default search space parameters for hyperparameter tuning in YOLO11 with Ray Tune. Each parameter has a specific value range defined by `tune.uniform()`. | Parameter | Value Range | Description | | ----------------- | -------------------------- | --------------------------------------------------------------------------- | @@ -101,7 +101,7 @@ The following table lists the default search space parameters for hyperparameter ## Custom Search Space Example -In this example, we demonstrate how to use a custom search space for hyperparameter tuning with Ray Tune and YOLOv8. By providing a custom search space, you can focus the tuning process on specific hyperparameters of interest. +In this example, we demonstrate how to use a custom search space for hyperparameter tuning with Ray Tune and YOLO11. By providing a custom search space, you can focus the tuning process on specific hyperparameters of interest. !!! example "Usage" @@ -109,7 +109,7 @@ In this example, we demonstrate how to use a custom search space for hyperparame from ultralytics import YOLO # Define a YOLO model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Run Ray Tune on the model result_grid = model.tune( @@ -120,7 +120,7 @@ In this example, we demonstrate how to use a custom search space for hyperparame ) ``` -In the code snippet above, we create a YOLO model with the "yolov8n.pt" pretrained weights. Then, we call the `tune()` method, specifying the dataset configuration with "coco8.yaml". We provide a custom search space for the initial learning rate `lr0` using a dictionary with the key "lr0" and the value `tune.uniform(1e-5, 1e-1)`. Finally, we pass additional training arguments, such as the number of epochs directly to the tune method as `epochs=50`. +In the code snippet above, we create a YOLO model with the "yolo11n.pt" pretrained weights. Then, we call the `tune()` method, specifying the dataset configuration with "coco8.yaml". We provide a custom search space for the initial learning rate `lr0` using a dictionary with the key "lr0" and the value `tune.uniform(1e-5, 1e-1)`. Finally, we pass additional training arguments, such as the number of epochs directly to the tune method as `epochs=50`. ## Processing Ray Tune Results @@ -186,9 +186,9 @@ Explore further by looking into Ray Tune's [Analyze Results](https://docs.ray.io ## FAQ -### How do I tune the hyperparameters of my YOLOv8 model using Ray Tune? +### How do I tune the hyperparameters of my YOLO11 model using Ray Tune? -To tune the hyperparameters of your Ultralytics YOLOv8 model using Ray Tune, follow these steps: +To tune the hyperparameters of your Ultralytics YOLO11 model using Ray Tune, follow these steps: 1. **Install the required packages:** @@ -197,13 +197,13 @@ To tune the hyperparameters of your Ultralytics YOLOv8 model using Ray Tune, fol pip install wandb # optional for logging ``` -2. **Load your YOLOv8 model and start tuning:** +2. **Load your YOLO11 model and start tuning:** ```python from ultralytics import YOLO - # Load a YOLOv8 model - model = YOLO("yolov8n.pt") + # Load a YOLO11 model + model = YOLO("yolo11n.pt") # Start tuning with the COCO8 dataset result_grid = model.tune(data="coco8.yaml", use_ray=True) @@ -211,9 +211,9 @@ To tune the hyperparameters of your Ultralytics YOLOv8 model using Ray Tune, fol This utilizes Ray Tune's advanced search strategies and parallelism to efficiently optimize your model's hyperparameters. For more information, check out the [Ray Tune documentation](https://docs.ray.io/en/latest/tune/index.html). -### What are the default hyperparameters for YOLOv8 tuning with Ray Tune? +### What are the default hyperparameters for YOLO11 tuning with Ray Tune? -Ultralytics YOLOv8 uses the following default hyperparameters for tuning with Ray Tune: +Ultralytics YOLO11 uses the following default hyperparameters for tuning with Ray Tune: | Parameter | Value Range | Description | | --------------- | -------------------------- | ------------------------------ | @@ -229,9 +229,9 @@ Ultralytics YOLOv8 uses the following default hyperparameters for tuning with Ra These hyperparameters can be customized to suit your specific needs. For a complete list and more details, refer to the [Hyperparameter Tuning](../guides/hyperparameter-tuning.md) guide. -### How can I integrate Weights & Biases with my YOLOv8 model tuning? +### How can I integrate Weights & Biases with my YOLO11 model tuning? -To integrate Weights & Biases (W&B) with your Ultralytics YOLOv8 tuning process: +To integrate Weights & Biases (W&B) with your Ultralytics YOLO11 tuning process: 1. **Install W&B:** @@ -249,7 +249,7 @@ To integrate Weights & Biases (W&B) with your Ultralytics YOLOv8 tuning process: wandb.init(project="YOLO-Tuning", entity="your-entity") # Load YOLO model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Tune hyperparameters result_grid = model.tune(data="coco8.yaml", use_ray=True) @@ -257,7 +257,7 @@ To integrate Weights & Biases (W&B) with your Ultralytics YOLOv8 tuning process: This setup will allow you to monitor the tuning process, track hyperparameter configurations, and visualize results in W&B. -### Why should I use Ray Tune for hyperparameter optimization with YOLOv8? +### Why should I use Ray Tune for hyperparameter optimization with YOLO11? Ray Tune offers numerous advantages for hyperparameter optimization: @@ -265,18 +265,18 @@ Ray Tune offers numerous advantages for hyperparameter optimization: - **Parallelism:** Supports parallel execution of multiple trials, significantly speeding up the tuning process. - **Early Stopping:** Employs strategies like ASHA to terminate under-performing trials early, saving computational resources. -Ray Tune seamlessly integrates with Ultralytics YOLOv8, providing an easy-to-use interface for tuning hyperparameters effectively. To get started, check out the [Efficient Hyperparameter Tuning with Ray Tune and YOLOv8](../guides/hyperparameter-tuning.md) guide. +Ray Tune seamlessly integrates with Ultralytics YOLO11, providing an easy-to-use interface for tuning hyperparameters effectively. To get started, check out the [Efficient Hyperparameter Tuning with Ray Tune and YOLO11](../guides/hyperparameter-tuning.md) guide. -### How can I define a custom search space for YOLOv8 hyperparameter tuning? +### How can I define a custom search space for YOLO11 hyperparameter tuning? -To define a custom search space for your YOLOv8 hyperparameter tuning with Ray Tune: +To define a custom search space for your YOLO11 hyperparameter tuning with Ray Tune: ```python from ray import tune from ultralytics import YOLO -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") search_space = {"lr0": tune.uniform(1e-5, 1e-1), "momentum": tune.uniform(0.6, 0.98)} result_grid = model.tune(data="coco8.yaml", space=search_space, use_ray=True) ``` diff --git a/docs/en/integrations/roboflow.md b/docs/en/integrations/roboflow.md index 321e560164..921538859a 100644 --- a/docs/en/integrations/roboflow.md +++ b/docs/en/integrations/roboflow.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn how to gather, label, and deploy data for custom YOLOv8 models using Roboflow's powerful tools. Optimize your computer vision pipeline effortlessly. -keywords: Roboflow, YOLOv8, data labeling, computer vision, model training, model deployment, dataset management, automated image annotation, AI tools +description: Learn how to gather, label, and deploy data for custom YOLO11 models using Roboflow's powerful tools. Optimize your computer vision pipeline effortlessly. +keywords: Roboflow, YOLO11, data labeling, computer vision, model training, model deployment, dataset management, automated image annotation, AI tools --- # Roboflow @@ -17,17 +17,17 @@ keywords: Roboflow, YOLOv8, data labeling, computer vision, model training, mode For more details see [Ultralytics Licensing](https://www.ultralytics.com/license). -In this guide, we are going to showcase how to find, label, and organize data for use in training a custom Ultralytics YOLOv8 model. Use the table of contents below to jump directly to a specific section: +In this guide, we are going to showcase how to find, label, and organize data for use in training a custom Ultralytics YOLO11 model. Use the table of contents below to jump directly to a specific section: -- Gather data for training a custom YOLOv8 model -- Upload, convert and label data for YOLOv8 format +- Gather data for training a custom YOLO11 model +- Upload, convert and label data for YOLO11 format - Pre-process and augment data for model robustness -- Dataset management for [YOLOv8](../models/yolov8.md) +- Dataset management for [YOLO11](../models/yolov8.md) - Export data in 40+ formats for model training -- Upload custom YOLOv8 model weights for testing and deployment -- Gather Data for Training a Custom YOLOv8 Model +- Upload custom YOLO11 model weights for testing and deployment +- Gather Data for Training a Custom YOLO11 Model -Roboflow provides two services that can help you collect data for YOLOv8 models: [Universe](https://universe.roboflow.com/?ref=ultralytics) and [Collect](https://github.com/roboflow/roboflow-collect?ref=ultralytics). +Roboflow provides two services that can help you collect data for YOLO11 models: [Universe](https://universe.roboflow.com/?ref=ultralytics) and [Collect](https://github.com/roboflow/roboflow-collect?ref=ultralytics). Universe is an online repository with over 250,000 vision datasets totalling over 100 million images. @@ -41,21 +41,21 @@ With a [free Roboflow account](https://app.roboflow.com/?ref=ultralytics), you c Roboflow Universe dataset export

-For YOLOv8, select "YOLOv8" as the export format: +For YOLO11, select "YOLO11" as the export format:

Roboflow Universe dataset export

-Universe also has a page that aggregates all [public fine-tuned YOLOv8 models uploaded to Roboflow](https://universe.roboflow.com/search?q=model%3Ayolov8&ref=ultralytics). You can use this page to explore pre-trained models you can use for testing or [for automated data labeling](https://docs.roboflow.com/annotate/use-roboflow-annotate/model-assisted-labeling?ref=ultralytics) or to prototype with [Roboflow inference](https://github.com/roboflow/inference?ref=ultralytics). +Universe also has a page that aggregates all [public fine-tuned YOLO11 models uploaded to Roboflow](https://universe.roboflow.com/search?q=model%3Ayolov8&ref=ultralytics). You can use this page to explore pre-trained models you can use for testing or [for automated data labeling](https://docs.roboflow.com/annotate/use-roboflow-annotate/model-assisted-labeling?ref=ultralytics) or to prototype with [Roboflow inference](https://github.com/roboflow/inference?ref=ultralytics). If you want to gather images yourself, try [Collect](https://github.com/roboflow/roboflow-collect), an open source project that allows you to automatically gather images using a webcam on the edge. You can use text or image prompts with Collect to instruct what data should be collected, allowing you to capture only the useful data you need to build your vision model. -## Upload, Convert and Label Data for YOLOv8 Format +## Upload, Convert and Label Data for YOLO11 Format [Roboflow Annotate](https://docs.roboflow.com/annotate/use-roboflow-annotate?ref=ultralytics) is an online annotation tool for use in labeling images for [object detection](https://www.ultralytics.com/glossary/object-detection), classification, and segmentation. -To label data for a YOLOv8 object detection, [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), or classification model, first create a project in Roboflow. +To label data for a YOLO11 object detection, [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), or classification model, first create a project in Roboflow.

Create a Roboflow project @@ -95,7 +95,7 @@ You can also add tags to images from the Tags panel in the sidebar. You can appl Adding tags to an image in Roboflow

-Models hosted on Roboflow can be used with Label Assist, an automated annotation tool that uses your YOLOv8 model to recommend annotations. To use Label Assist, first upload a YOLOv8 model to Roboflow (see instructions later in the guide). Then, click the magic wand icon in the left sidebar and select your model for use in Label Assist. +Models hosted on Roboflow can be used with Label Assist, an automated annotation tool that uses your YOLO11 model to recommend annotations. To use Label Assist, first upload a YOLO11 model to Roboflow (see instructions later in the guide). Then, click the magic wand icon in the left sidebar and select your model for use in Label Assist. Choose a model, then click "Continue" to enable Label Assist: @@ -109,7 +109,7 @@ When you open new images for annotation, Label Assist will trigger and recommend ALabel Assist recommending an annotation

-## Dataset Management for YOLOv8 +## Dataset Management for YOLO11 Roboflow provides a suite of tools for understanding computer vision datasets. @@ -157,13 +157,13 @@ When your dataset version has been generated, you can export your data into a ra Exporting a dataset

-You are now ready to train YOLOv8 on a custom dataset. Follow this [written guide](https://blog.roboflow.com/how-to-train-yolov8-on-a-custom-dataset/?ref=ultralytics) and [YouTube video](https://www.youtube.com/watch?v=wuZtUMEiKWY) for step-by-step instructions or refer to the [Ultralytics documentation](../modes/train.md). +You are now ready to train YOLO11 on a custom dataset. Follow this [written guide](https://blog.roboflow.com/how-to-train-yolov8-on-a-custom-dataset/?ref=ultralytics) and [YouTube video](https://www.youtube.com/watch?v=wuZtUMEiKWY) for step-by-step instructions or refer to the [Ultralytics documentation](../modes/train.md). -## Upload Custom YOLOv8 Model Weights for Testing and Deployment +## Upload Custom YOLO11 Model Weights for Testing and Deployment Roboflow offers an infinitely scalable API for deployed models and SDKs for use with NVIDIA Jetsons, Luxonis OAKs, Raspberry Pis, GPU-based devices, and more. -You can deploy YOLOv8 models by uploading YOLOv8 weights to Roboflow. You can do this in a few lines of Python code. Create a new Python file and add the following code: +You can deploy YOLO11 models by uploading YOLO11 weights to Roboflow. You can do this in a few lines of Python code. Create a new Python file and add the following code: ```python import roboflow # install with 'pip install roboflow' @@ -190,7 +190,7 @@ To test your model and find deployment instructions for supported SDKs, go to th You can also use your uploaded model as a [labeling assistant](https://docs.roboflow.com/annotate/use-roboflow-annotate/model-assisted-labeling?ref=ultralytics). This feature uses your trained model to recommend annotations on images uploaded to Roboflow. -## How to Evaluate YOLOv8 Models +## How to Evaluate YOLO11 Models Roboflow provides a range of features for use in evaluating models. @@ -224,17 +224,17 @@ You can use Vector Analysis to: ## Learning Resources -Want to learn more about using Roboflow for creating YOLOv8 models? The following resources may be helpful in your work. +Want to learn more about using Roboflow for creating YOLO11 models? The following resources may be helpful in your work. -- [Train YOLOv8 on a Custom Dataset](https://github.com/roboflow/notebooks/blob/main/notebooks/train-yolov8-object-detection-on-custom-dataset.ipynb): Follow our interactive notebook that shows you how to train a YOLOv8 model on a custom dataset. -- [Autodistill](https://docs.autodistill.com/): Use large foundation vision models to label data for specific models. You can label images for use in training YOLOv8 classification, detection, and segmentation models with Autodistill. +- [Train YOLO11 on a Custom Dataset](https://github.com/roboflow/notebooks/blob/main/notebooks/train-yolov8-object-detection-on-custom-dataset.ipynb): Follow our interactive notebook that shows you how to train a YOLO11 model on a custom dataset. +- [Autodistill](https://docs.autodistill.com/): Use large foundation vision models to label data for specific models. You can label images for use in training YOLO11 classification, detection, and segmentation models with Autodistill. - [Supervision](https://supervision.roboflow.com/?ref=ultralytics): A Python package with helpful utilities for use in working with computer vision models. You can use supervision to filter detections, compute confusion matrices, and more, all in a few lines of Python code. -- [Roboflow Blog](https://blog.roboflow.com/?ref=ultralytics): The Roboflow Blog features over 500 articles on computer vision, covering topics from how to train a YOLOv8 model to annotation best practices. -- [Roboflow YouTube channel](https://www.youtube.com/@Roboflow): Browse dozens of in-depth computer vision guides on our YouTube channel, covering topics from training YOLOv8 models to automated image labeling. +- [Roboflow Blog](https://blog.roboflow.com/?ref=ultralytics): The Roboflow Blog features over 500 articles on computer vision, covering topics from how to train a YOLO11 model to annotation best practices. +- [Roboflow YouTube channel](https://www.youtube.com/@Roboflow): Browse dozens of in-depth computer vision guides on our YouTube channel, covering topics from training YOLO11 models to automated image labeling. ## Project Showcase -Below are a few of the many pieces of feedback we have received for using YOLOv8 and Roboflow together to create computer vision models. +Below are a few of the many pieces of feedback we have received for using YOLO11 and Roboflow together to create computer vision models.

Showcase image @@ -244,26 +244,26 @@ Below are a few of the many pieces of feedback we have received for using YOLOv8 ## FAQ -### How do I label data for YOLOv8 models using Roboflow? +### How do I label data for YOLO11 models using Roboflow? -Labeling data for YOLOv8 models using Roboflow is straightforward with Roboflow Annotate. First, create a project on Roboflow and upload your images. After uploading, select the batch of images and click "Start Annotating." You can use the `B` key for bounding boxes or the `P` key for polygons. For faster annotation, use the SAM-based label assistant by clicking the cursor icon in the sidebar. Detailed steps can be found [here](#upload-convert-and-label-data-for-yolov8-format). +Labeling data for YOLO11 models using Roboflow is straightforward with Roboflow Annotate. First, create a project on Roboflow and upload your images. After uploading, select the batch of images and click "Start Annotating." You can use the `B` key for bounding boxes or the `P` key for polygons. For faster annotation, use the SAM-based label assistant by clicking the cursor icon in the sidebar. Detailed steps can be found [here](#upload-convert-and-label-data-for-yolo11-format). -### What services does Roboflow offer for collecting YOLOv8 [training data](https://www.ultralytics.com/glossary/training-data)? +### What services does Roboflow offer for collecting YOLO11 [training data](https://www.ultralytics.com/glossary/training-data)? -Roboflow provides two key services for collecting YOLOv8 training data: [Universe](https://universe.roboflow.com/?ref=ultralytics) and [Collect](https://github.com/roboflow/roboflow-collect?ref=ultralytics). Universe offers access to over 250,000 vision datasets, while Collect helps you gather images using a webcam and automated prompts. +Roboflow provides two key services for collecting YOLO11 training data: [Universe](https://universe.roboflow.com/?ref=ultralytics) and [Collect](https://github.com/roboflow/roboflow-collect?ref=ultralytics). Universe offers access to over 250,000 vision datasets, while Collect helps you gather images using a webcam and automated prompts. -### How can I manage and analyze my YOLOv8 dataset using Roboflow? +### How can I manage and analyze my YOLO11 dataset using Roboflow? -Roboflow offers robust dataset management tools, including dataset search, tagging, and Health Check. Use the search feature to find images based on text descriptions or tags. Health Check provides insights into dataset quality, showing class balance, image sizes, and annotation heatmaps. This helps optimize dataset performance before training YOLOv8 models. Detailed information can be found [here](#dataset-management-for-yolov8). +Roboflow offers robust dataset management tools, including dataset search, tagging, and Health Check. Use the search feature to find images based on text descriptions or tags. Health Check provides insights into dataset quality, showing class balance, image sizes, and annotation heatmaps. This helps optimize dataset performance before training YOLO11 models. Detailed information can be found [here](#dataset-management-for-yolo11). -### How do I export my YOLOv8 dataset from Roboflow? +### How do I export my YOLO11 dataset from Roboflow? -To export your YOLOv8 dataset from Roboflow, you need to create a dataset version. Click "Versions" in the sidebar, then "Create New Version" and apply any desired augmentations. Once the version is generated, click "Export Dataset" and choose the YOLOv8 format. Follow this process [here](#export-data-in-40-formats-for-model-training). +To export your YOLO11 dataset from Roboflow, you need to create a dataset version. Click "Versions" in the sidebar, then "Create New Version" and apply any desired augmentations. Once the version is generated, click "Export Dataset" and choose the YOLO11 format. Follow this process [here](#export-data-in-40-formats-for-model-training). -### How can I integrate and deploy YOLOv8 models with Roboflow? +### How can I integrate and deploy YOLO11 models with Roboflow? -Integrate and deploy YOLOv8 models on Roboflow by uploading your YOLOv8 weights through a few lines of Python code. Use the provided script to authenticate and upload your model, which will create an API for deployment. For details on the script and further instructions, see [this section](#upload-custom-yolov8-model-weights-for-testing-and-deployment). +Integrate and deploy YOLO11 models on Roboflow by uploading your YOLO11 weights through a few lines of Python code. Use the provided script to authenticate and upload your model, which will create an API for deployment. For details on the script and further instructions, see [this section](#upload-custom-yolo11-model-weights-for-testing-and-deployment). -### What tools does Roboflow provide for evaluating YOLOv8 models? +### What tools does Roboflow provide for evaluating YOLO11 models? -Roboflow offers model evaluation tools, including a confusion matrix and vector analysis plots. Access these tools from the "View Detailed Evaluation" button on your model page. These features help identify model performance issues and find areas for improvement. For more information, refer to [this section](#how-to-evaluate-yolov8-models). +Roboflow offers model evaluation tools, including a confusion matrix and vector analysis plots. Access these tools from the "View Detailed Evaluation" button on your model page. These features help identify model performance issues and find areas for improvement. For more information, refer to [this section](#how-to-evaluate-yolo11-models). diff --git a/docs/en/integrations/tensorboard.md b/docs/en/integrations/tensorboard.md index d563aca12b..c6cad0d50b 100644 --- a/docs/en/integrations/tensorboard.md +++ b/docs/en/integrations/tensorboard.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to integrate YOLOv8 with TensorBoard for real-time visual insights into your model's training metrics, performance graphs, and debugging workflows. -keywords: YOLOv8, TensorBoard, model training, visualization, machine learning, deep learning, Ultralytics, training metrics, performance analysis +description: Learn how to integrate YOLO11 with TensorBoard for real-time visual insights into your model's training metrics, performance graphs, and debugging workflows. +keywords: YOLO11, TensorBoard, model training, visualization, machine learning, deep learning, Ultralytics, training metrics, performance analysis --- -# Gain Visual Insights with YOLOv8's Integration with TensorBoard +# Gain Visual Insights with YOLO11's Integration with TensorBoard -Understanding and fine-tuning [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models like [Ultralytics' YOLOv8](https://www.ultralytics.com/) becomes more straightforward when you take a closer look at their training processes. Model training visualization helps with getting insights into the model's learning patterns, performance metrics, and overall behavior. YOLOv8's integration with TensorBoard makes this process of visualization and analysis easier and enables more efficient and informed adjustments to the model. +Understanding and fine-tuning [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models like [Ultralytics' YOLO11](https://www.ultralytics.com/) becomes more straightforward when you take a closer look at their training processes. Model training visualization helps with getting insights into the model's learning patterns, performance metrics, and overall behavior. YOLO11's integration with TensorBoard makes this process of visualization and analysis easier and enables more efficient and informed adjustments to the model. -This guide covers how to use TensorBoard with YOLOv8. You'll learn about various visualizations, from tracking metrics to analyzing model graphs. These tools will help you understand your YOLOv8 model's performance better. +This guide covers how to use TensorBoard with YOLO11. You'll learn about various visualizations, from tracking metrics to analyzing model graphs. These tools will help you understand your YOLO11 model's performance better. ## TensorBoard @@ -18,9 +18,9 @@ This guide covers how to use TensorBoard with YOLOv8. You'll learn about various [TensorBoard](https://www.tensorflow.org/tensorboard), [TensorFlow](https://www.ultralytics.com/glossary/tensorflow)'s visualization toolkit, is essential for [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) experimentation. TensorBoard features a range of visualization tools, crucial for monitoring machine learning models. These tools include tracking key metrics like loss and accuracy, visualizing model graphs, and viewing histograms of weights and biases over time. It also provides capabilities for projecting [embeddings](https://www.ultralytics.com/glossary/embeddings) to lower-dimensional spaces and displaying multimedia data. -## YOLOv8 Training with TensorBoard +## YOLO11 Training with TensorBoard -Using TensorBoard while training YOLOv8 models is straightforward and offers significant benefits. +Using TensorBoard while training YOLO11 models is straightforward and offers significant benefits. ## Installation @@ -31,13 +31,13 @@ To install the required package, run: === "CLI" ```bash - # Install the required package for YOLOv8 and Tensorboard + # Install the required package for YOLO11 and Tensorboard pip install ultralytics ``` -TensorBoard is conveniently pre-installed with YOLOv8, eliminating the need for additional setup for visualization purposes. +TensorBoard is conveniently pre-installed with YOLO11, eliminating the need for additional setup for visualization purposes. -For detailed instructions and best practices related to the installation process, be sure to check our [YOLOv8 Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, be sure to check our [YOLO11 Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ## Configuring TensorBoard for Google Colab @@ -54,7 +54,7 @@ When using Google Colab, it's important to set up TensorBoard before starting yo ## Usage -Before diving into the usage instructions, be sure to check out the range of [YOLOv8 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. +Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. !!! example "Usage" @@ -64,7 +64,7 @@ Before diving into the usage instructions, be sure to check out the range of [YO from ultralytics import YOLO # Load a pre-trained model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) @@ -76,17 +76,17 @@ Upon running the usage code snippet above, you can expect the following output: TensorBoard: Start with 'tensorboard --logdir path_to_your_tensorboard_logs', view at http://localhost:6006/ ``` -This output indicates that TensorBoard is now actively monitoring your YOLOv8 training session. You can access the TensorBoard dashboard by visiting the provided URL (http://localhost:6006/) to view real-time training metrics and model performance. For users working in Google Colab, the TensorBoard will be displayed in the same cell where you executed the TensorBoard configuration commands. +This output indicates that TensorBoard is now actively monitoring your YOLO11 training session. You can access the TensorBoard dashboard by visiting the provided URL (http://localhost:6006/) to view real-time training metrics and model performance. For users working in Google Colab, the TensorBoard will be displayed in the same cell where you executed the TensorBoard configuration commands. -For more information related to the model training process, be sure to check our [YOLOv8 Model Training guide](../modes/train.md). If you are interested in learning more about logging, checkpoints, plotting, and file management, read our [usage guide on configuration](../usage/cfg.md). +For more information related to the model training process, be sure to check our [YOLO11 Model Training guide](../modes/train.md). If you are interested in learning more about logging, checkpoints, plotting, and file management, read our [usage guide on configuration](../usage/cfg.md). -## Understanding Your TensorBoard for YOLOv8 Training +## Understanding Your TensorBoard for YOLO11 Training -Now, let's focus on understanding the various features and components of TensorBoard in the context of YOLOv8 training. The three key sections of the TensorBoard are Time Series, Scalars, and Graphs. +Now, let's focus on understanding the various features and components of TensorBoard in the context of YOLO11 training. The three key sections of the TensorBoard are Time Series, Scalars, and Graphs. ### Time Series -The Time Series feature in the TensorBoard offers a dynamic and detailed perspective of various training metrics over time for YOLOv8 models. It focuses on the progression and trends of metrics across training epochs. Here's an example of what you can expect to see. +The Time Series feature in the TensorBoard offers a dynamic and detailed perspective of various training metrics over time for YOLO11 models. It focuses on the progression and trends of metrics across training epochs. Here's an example of what you can expect to see. ![image](https://github.com/ultralytics/docs/releases/download/0/time-series-tensorboard-yolov8.avif) @@ -100,13 +100,13 @@ The Time Series feature in the TensorBoard offers a dynamic and detailed perspec - **In-Depth Analysis**: Time Series provides an in-depth analysis of each metric. For instance, different learning rate segments are shown, offering insights into how adjustments in learning rate impact the model's learning curve. -#### Importance of Time Series in YOLOv8 Training +#### Importance of Time Series in YOLO11 Training -The Time Series section is essential for a thorough analysis of the YOLOv8 model's training progress. It lets you track the metrics in real time to promptly identify and solve issues. It also offers a detailed view of each metrics progression, which is crucial for fine-tuning the model and enhancing its performance. +The Time Series section is essential for a thorough analysis of the YOLO11 model's training progress. It lets you track the metrics in real time to promptly identify and solve issues. It also offers a detailed view of each metrics progression, which is crucial for fine-tuning the model and enhancing its performance. ### Scalars -Scalars in the TensorBoard are crucial for plotting and analyzing simple metrics like loss and accuracy during the training of YOLOv8 models. They offer a clear and concise view of how these metrics evolve with each training [epoch](https://www.ultralytics.com/glossary/epoch), providing insights into the model's learning effectiveness and stability. Here's an example of what you can expect to see. +Scalars in the TensorBoard are crucial for plotting and analyzing simple metrics like loss and accuracy during the training of YOLO11 models. They offer a clear and concise view of how these metrics evolve with each training [epoch](https://www.ultralytics.com/glossary/epoch), providing insights into the model's learning effectiveness and stability. Here's an example of what you can expect to see. ![image](https://github.com/ultralytics/docs/releases/download/0/scalars-metrics-tensorboard.avif) @@ -130,7 +130,7 @@ Scalars in the TensorBoard are crucial for plotting and analyzing simple metrics #### Importance of Monitoring Scalars -Observing scalar metrics is crucial for fine-tuning the YOLOv8 model. Variations in these metrics, such as spikes or irregular patterns in loss graphs, can highlight potential issues such as [overfitting](https://www.ultralytics.com/glossary/overfitting), [underfitting](https://www.ultralytics.com/glossary/underfitting), or inappropriate learning rate settings. By closely monitoring these scalars, you can make informed decisions to optimize the training process, ensuring that the model learns effectively and achieves the desired performance. +Observing scalar metrics is crucial for fine-tuning the YOLO11 model. Variations in these metrics, such as spikes or irregular patterns in loss graphs, can highlight potential issues such as [overfitting](https://www.ultralytics.com/glossary/overfitting), [underfitting](https://www.ultralytics.com/glossary/underfitting), or inappropriate learning rate settings. By closely monitoring these scalars, you can make informed decisions to optimize the training process, ensuring that the model learns effectively and achieves the desired performance. ### Difference Between Scalars and Time Series @@ -138,15 +138,15 @@ While both Scalars and Time Series in TensorBoard are used for tracking metrics, ### Graphs -The Graphs section of the TensorBoard visualizes the computational graph of the YOLOv8 model, showing how operations and data flow within the model. It's a powerful tool for understanding the model's structure, ensuring that all layers are connected correctly, and for identifying any potential bottlenecks in data flow. Here's an example of what you can expect to see. +The Graphs section of the TensorBoard visualizes the computational graph of the YOLO11 model, showing how operations and data flow within the model. It's a powerful tool for understanding the model's structure, ensuring that all layers are connected correctly, and for identifying any potential bottlenecks in data flow. Here's an example of what you can expect to see. ![image](https://github.com/ultralytics/docs/releases/download/0/tensorboard-yolov8-computational-graph.avif) -Graphs are particularly useful for debugging the model, especially in complex architectures typical in [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models like YOLOv8. They help in verifying layer connections and the overall design of the model. +Graphs are particularly useful for debugging the model, especially in complex architectures typical in [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models like YOLO11. They help in verifying layer connections and the overall design of the model. ## Summary -This guide aims to help you use TensorBoard with YOLOv8 for visualization and analysis of machine learning model training. It focuses on explaining how key TensorBoard features can provide insights into training metrics and model performance during YOLOv8 training sessions. +This guide aims to help you use TensorBoard with YOLO11 for visualization and analysis of machine learning model training. It focuses on explaining how key TensorBoard features can provide insights into training metrics and model performance during YOLO11 training sessions. For a more detailed exploration of these features and effective utilization strategies, you can refer to TensorFlow's official [TensorBoard documentation](https://www.tensorflow.org/tensorboard/get_started) and their [GitHub repository](https://github.com/tensorflow/tensorboard). @@ -154,29 +154,29 @@ Want to learn more about the various integrations of Ultralytics? Check out the ## FAQ -### What benefits does using TensorBoard with YOLOv8 offer? +### What benefits does using TensorBoard with YOLO11 offer? -Using TensorBoard with YOLOv8 provides several visualization tools essential for efficient model training: +Using TensorBoard with YOLO11 provides several visualization tools essential for efficient model training: - **Real-Time Metrics Tracking:** Track key metrics such as loss, accuracy, precision, and recall live. - **Model Graph Visualization:** Understand and debug the model architecture by visualizing computational graphs. - **Embedding Visualization:** Project embeddings to lower-dimensional spaces for better insight. -These tools enable you to make informed adjustments to enhance your YOLOv8 model's performance. For more details on TensorBoard features, check out the TensorFlow [TensorBoard guide](https://www.tensorflow.org/tensorboard/get_started). +These tools enable you to make informed adjustments to enhance your YOLO11 model's performance. For more details on TensorBoard features, check out the TensorFlow [TensorBoard guide](https://www.tensorflow.org/tensorboard/get_started). -### How can I monitor training metrics using TensorBoard when training a YOLOv8 model? +### How can I monitor training metrics using TensorBoard when training a YOLO11 model? -To monitor training metrics while training a YOLOv8 model with TensorBoard, follow these steps: +To monitor training metrics while training a YOLO11 model with TensorBoard, follow these steps: -1. **Install TensorBoard and YOLOv8:** Run `pip install ultralytics` which includes TensorBoard. -2. **Configure TensorBoard Logging:** During the training process, YOLOv8 logs metrics to a specified log directory. +1. **Install TensorBoard and YOLO11:** Run `pip install ultralytics` which includes TensorBoard. +2. **Configure TensorBoard Logging:** During the training process, YOLO11 logs metrics to a specified log directory. 3. **Start TensorBoard:** Launch TensorBoard using the command `tensorboard --logdir path/to/your/tensorboard/logs`. -The TensorBoard dashboard, accessible via [http://localhost:6006/](http://localhost:6006/), provides real-time insights into various training metrics. For a deeper dive into training configurations, visit our [YOLOv8 Configuration guide](../usage/cfg.md). +The TensorBoard dashboard, accessible via [http://localhost:6006/](http://localhost:6006/), provides real-time insights into various training metrics. For a deeper dive into training configurations, visit our [YOLO11 Configuration guide](../usage/cfg.md). -### What kind of metrics can I visualize with TensorBoard when training YOLOv8 models? +### What kind of metrics can I visualize with TensorBoard when training YOLO11 models? -When training YOLOv8 models, TensorBoard allows you to visualize an array of important metrics including: +When training YOLO11 models, TensorBoard allows you to visualize an array of important metrics including: - **Loss (Training and Validation):** Indicates how well the model is performing during training and validation. - **Accuracy/Precision/[Recall](https://www.ultralytics.com/glossary/recall):** Key performance metrics to evaluate detection accuracy. @@ -185,9 +185,9 @@ When training YOLOv8 models, TensorBoard allows you to visualize an array of imp These visualizations are essential for tracking model performance and making necessary optimizations. For more information on these metrics, refer to our [Performance Metrics guide](../guides/yolo-performance-metrics.md). -### Can I use TensorBoard in a Google Colab environment for training YOLOv8? +### Can I use TensorBoard in a Google Colab environment for training YOLO11? -Yes, you can use TensorBoard in a Google Colab environment to train YOLOv8 models. Here's a quick setup: +Yes, you can use TensorBoard in a Google Colab environment to train YOLO11 models. Here's a quick setup: !!! example "Configure TensorBoard for Google Colab" @@ -198,16 +198,16 @@ Yes, you can use TensorBoard in a Google Colab environment to train YOLOv8 model %tensorboard --logdir path/to/runs ``` - Then, run the YOLOv8 training script: + Then, run the YOLO11 training script: ```python from ultralytics import YOLO # Load a pre-trained model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) ``` -TensorBoard will visualize the training progress within Colab, providing real-time insights into metrics like loss and accuracy. For additional details on configuring YOLOv8 training, see our detailed [YOLOv8 Installation guide](../quickstart.md). +TensorBoard will visualize the training progress within Colab, providing real-time insights into metrics like loss and accuracy. For additional details on configuring YOLO11 training, see our detailed [YOLO11 Installation guide](../quickstart.md). diff --git a/docs/en/integrations/tensorrt.md b/docs/en/integrations/tensorrt.md index 0e40198113..1a8e5a9161 100644 --- a/docs/en/integrations/tensorrt.md +++ b/docs/en/integrations/tensorrt.md @@ -380,7 +380,7 @@ Expand sections below for information on how these models were exported and test See [export mode](../modes/export.md) for details regarding export configuration arguments. - ```py + ```python from ultralytics import YOLO model = YOLO("yolov8n.pt") @@ -401,7 +401,7 @@ Expand sections below for information on how these models were exported and test See [predict mode](../modes/predict.md) for additional information. - ```py + ```python import cv2 from ultralytics import YOLO @@ -421,7 +421,7 @@ Expand sections below for information on how these models were exported and test See [`val` mode](../modes/val.md) to learn more about validation configuration arguments. - ```py + ```python from ultralytics import YOLO model = YOLO("yolov8n.engine") diff --git a/docs/en/integrations/tf-graphdef.md b/docs/en/integrations/tf-graphdef.md index 15cbd48426..fd6d86a32a 100644 --- a/docs/en/integrations/tf-graphdef.md +++ b/docs/en/integrations/tf-graphdef.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to export YOLOv8 models to the TF GraphDef format for seamless deployment on various platforms, including mobile and web. -keywords: YOLOv8, export, TensorFlow, GraphDef, model deployment, TensorFlow Serving, TensorFlow Lite, TensorFlow.js, machine learning, AI, computer vision +description: Learn how to export YOLO11 models to the TF GraphDef format for seamless deployment on various platforms, including mobile and web. +keywords: YOLO11, export, TensorFlow, GraphDef, model deployment, TensorFlow Serving, TensorFlow Lite, TensorFlow.js, machine learning, AI, computer vision --- -# How to Export to TF GraphDef from YOLOv8 for Deployment +# How to Export to TF GraphDef from YOLO11 for Deployment -When you are deploying cutting-edge [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models, like YOLOv8, in different environments, you might run into compatibility issues. Google's [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) GraphDef, or TF GraphDef, offers a solution by providing a serialized, platform-independent representation of your model. Using the TF GraphDef model format, you can deploy your YOLOv8 model in environments where the complete TensorFlow ecosystem may not be available, such as mobile devices or specialized hardware. +When you are deploying cutting-edge [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models, like YOLO11, in different environments, you might run into compatibility issues. Google's [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) GraphDef, or TF GraphDef, offers a solution by providing a serialized, platform-independent representation of your model. Using the TF GraphDef model format, you can deploy your YOLO11 model in environments where the complete TensorFlow ecosystem may not be available, such as mobile devices or specialized hardware. -In this guide, we'll walk you step by step through how to export your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models to the TF GraphDef model format. By converting your model, you can streamline deployment and use YOLOv8's computer vision capabilities in a broader range of applications and platforms. +In this guide, we'll walk you step by step through how to export your [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models to the TF GraphDef model format. By converting your model, you can streamline deployment and use YOLO11's computer vision capabilities in a broader range of applications and platforms.

TensorFlow GraphDef @@ -16,11 +16,11 @@ In this guide, we'll walk you step by step through how to export your [Ultralyti ## Why Should You Export to TF GraphDef? -TF GraphDef is a powerful component of the TensorFlow ecosystem that was developed by Google. It can be used to optimize and deploy models like YOLOv8. Exporting to TF GraphDef lets us move models from research to real-world applications. It allows models to run in environments without the full TensorFlow framework. +TF GraphDef is a powerful component of the TensorFlow ecosystem that was developed by Google. It can be used to optimize and deploy models like YOLO11. Exporting to TF GraphDef lets us move models from research to real-world applications. It allows models to run in environments without the full TensorFlow framework. The GraphDef format represents the model as a serialized computation graph. This enables various optimization techniques like constant folding, quantization, and graph transformations. These optimizations ensure efficient execution, reduced memory usage, and faster inference speeds. -GraphDef models can use hardware accelerators such as GPUs, TPUs, and AI chips, unlocking significant performance gains for the YOLOv8 inference pipeline. The TF GraphDef format creates a self-contained package with the model and its dependencies, simplifying deployment and integration into diverse systems. +GraphDef models can use hardware accelerators such as GPUs, TPUs, and AI chips, unlocking significant performance gains for the YOLO11 inference pipeline. The TF GraphDef format creates a self-contained package with the model and its dependencies, simplifying deployment and integration into diverse systems. ## Key Features of TF GraphDef Models @@ -38,7 +38,7 @@ Here's a look at its key characteristics: ## Deployment Options with TF GraphDef -Before we dive into the process of exporting YOLOv8 models to TF GraphDef, let's take a look at some typical deployment situations where this format is used. +Before we dive into the process of exporting YOLO11 models to TF GraphDef, let's take a look at some typical deployment situations where this format is used. Here's how you can deploy with TF GraphDef efficiently across various platforms. @@ -46,13 +46,13 @@ Here's how you can deploy with TF GraphDef efficiently across various platforms. - **Mobile and Embedded Devices:** With tools like TensorFlow Lite, you can convert TF GraphDef models into formats optimized for smartphones, tablets, and various embedded devices. Your models can then be used for on-device inference, where execution is done locally, often providing performance gains and offline capabilities. -- **Web Browsers:** TensorFlow.js enables the deployment of TF GraphDef models directly within web browsers. It paves the way for real-time object detection applications running on the client side, using the capabilities of YOLOv8 through JavaScript. +- **Web Browsers:** TensorFlow.js enables the deployment of TF GraphDef models directly within web browsers. It paves the way for real-time object detection applications running on the client side, using the capabilities of YOLO11 through JavaScript. - **Specialized Hardware:** TF GraphDef's platform-agnostic nature allows it to target custom hardware, such as accelerators and TPUs (Tensor Processing Units). These devices can provide performance advantages for computationally intensive models. -## Exporting YOLOv8 Models to TF GraphDef +## Exporting YOLO11 Models to TF GraphDef -You can convert your YOLOv8 object detection model to the TF GraphDef format, which is compatible with various systems, to improve its performance across platforms. +You can convert your YOLO11 object detection model to the TF GraphDef format, which is compatible with various systems, to improve its performance across platforms. ### Installation @@ -63,15 +63,15 @@ To install the required package, run: === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` -For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ### Usage -Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLOv8 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). +Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLO11 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). !!! example "Usage" @@ -80,14 +80,14 @@ Before diving into the usage instructions, it's important to note that while all ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TF GraphDef format - model.export(format="pb") # creates 'yolov8n.pb' + model.export(format="pb") # creates 'yolo11n.pb' # Load the exported TF GraphDef model - tf_graphdef_model = YOLO("yolov8n.pb") + tf_graphdef_model = YOLO("yolo11n.pb") # Run inference results = tf_graphdef_model("https://ultralytics.com/images/bus.jpg") @@ -96,18 +96,18 @@ Before diving into the usage instructions, it's important to note that while all === "CLI" ```bash - # Export a YOLOv8n PyTorch model to TF GraphDef format - yolo export model=yolov8n.pt format=pb # creates 'yolov8n.pb' + # Export a YOLO11n PyTorch model to TF GraphDef format + yolo export model=yolo11n.pt format=pb # creates 'yolo11n.pb' # Run inference with the exported model - yolo predict model='yolov8n.pb' source='https://ultralytics.com/images/bus.jpg' + yolo predict model='yolo11n.pb' source='https://ultralytics.com/images/bus.jpg' ``` For more details about supported export options, visit the [Ultralytics documentation page on deployment options](../guides/model-deployment-options.md). -## Deploying Exported YOLOv8 TF GraphDef Models +## Deploying Exported YOLO11 TF GraphDef Models -Once you've exported your YOLOv8 model to the TF GraphDef format, the next step is deployment. The primary and recommended first step for running a TF GraphDef model is to use the YOLO("model.pb") method, as previously shown in the usage code snippet. +Once you've exported your YOLO11 model to the TF GraphDef format, the next step is deployment. The primary and recommended first step for running a TF GraphDef model is to use the YOLO("model.pb") method, as previously shown in the usage code snippet. However, for more information on deploying your TF GraphDef models, take a look at the following resources: @@ -119,17 +119,17 @@ However, for more information on deploying your TF GraphDef models, take a look ## Summary -In this guide, we explored how to export Ultralytics YOLOv8 models to the TF GraphDef format. By doing this, you can flexibly deploy your optimized YOLOv8 models in different environments. +In this guide, we explored how to export Ultralytics YOLO11 models to the TF GraphDef format. By doing this, you can flexibly deploy your optimized YOLO11 models in different environments. For further details on usage, visit the [TF GraphDef official documentation](https://www.tensorflow.org/api_docs/python/tf/Graph). -For more information on integrating Ultralytics YOLOv8 with other platforms and frameworks, don't forget to check out our [integration guide page](index.md). It has great resources and insights to help you make the most of YOLOv8 in your projects. +For more information on integrating Ultralytics YOLO11 with other platforms and frameworks, don't forget to check out our [integration guide page](index.md). It has great resources and insights to help you make the most of YOLO11 in your projects. ## FAQ -### How do I export a YOLOv8 model to TF GraphDef format? +### How do I export a YOLO11 model to TF GraphDef format? -Ultralytics YOLOv8 models can be exported to TensorFlow GraphDef (TF GraphDef) format seamlessly. This format provides a serialized, platform-independent representation of the model, ideal for deploying in varied environments like mobile and web. To export a YOLOv8 model to TF GraphDef, follow these steps: +Ultralytics YOLO11 models can be exported to TensorFlow GraphDef (TF GraphDef) format seamlessly. This format provides a serialized, platform-independent representation of the model, ideal for deploying in varied environments like mobile and web. To export a YOLO11 model to TF GraphDef, follow these steps: !!! example "Usage" @@ -138,14 +138,14 @@ Ultralytics YOLOv8 models can be exported to TensorFlow GraphDef (TF GraphDef) f ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TF GraphDef format - model.export(format="pb") # creates 'yolov8n.pb' + model.export(format="pb") # creates 'yolo11n.pb' # Load the exported TF GraphDef model - tf_graphdef_model = YOLO("yolov8n.pb") + tf_graphdef_model = YOLO("yolo11n.pb") # Run inference results = tf_graphdef_model("https://ultralytics.com/images/bus.jpg") @@ -154,18 +154,18 @@ Ultralytics YOLOv8 models can be exported to TensorFlow GraphDef (TF GraphDef) f === "CLI" ```bash - # Export a YOLOv8n PyTorch model to TF GraphDef format - yolo export model="yolov8n.pt" format="pb" # creates 'yolov8n.pb' + # Export a YOLO11n PyTorch model to TF GraphDef format + yolo export model="yolo11n.pt" format="pb" # creates 'yolo11n.pb' # Run inference with the exported model - yolo predict model="yolov8n.pb" source="https://ultralytics.com/images/bus.jpg" + yolo predict model="yolo11n.pb" source="https://ultralytics.com/images/bus.jpg" ``` For more information on different export options, visit the [Ultralytics documentation on model export](../modes/export.md). -### What are the benefits of using TF GraphDef for YOLOv8 model deployment? +### What are the benefits of using TF GraphDef for YOLO11 model deployment? -Exporting YOLOv8 models to the TF GraphDef format offers multiple advantages, including: +Exporting YOLO11 models to the TF GraphDef format offers multiple advantages, including: 1. **Platform Independence**: TF GraphDef provides a platform-independent format, allowing models to be deployed across various environments including mobile and web browsers. 2. **Optimizations**: The format enables several optimizations, such as constant folding, quantization, and graph transformations, which enhance execution efficiency and reduce memory usage. @@ -173,19 +173,19 @@ Exporting YOLOv8 models to the TF GraphDef format offers multiple advantages, in Read more about the benefits in the [TF GraphDef section](#why-should-you-export-to-tf-graphdef) of our documentation. -### Why should I use Ultralytics YOLOv8 over other [object detection](https://www.ultralytics.com/glossary/object-detection) models? +### Why should I use Ultralytics YOLO11 over other [object detection](https://www.ultralytics.com/glossary/object-detection) models? -Ultralytics YOLOv8 offers numerous advantages compared to other models like YOLOv5 and YOLOv7. Some key benefits include: +Ultralytics YOLO11 offers numerous advantages compared to other models like YOLOv5 and YOLOv7. Some key benefits include: -1. **State-of-the-Art Performance**: YOLOv8 provides exceptional speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) for real-time object detection, segmentation, and classification. +1. **State-of-the-Art Performance**: YOLO11 provides exceptional speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) for real-time object detection, segmentation, and classification. 2. **Ease of Use**: Features a user-friendly API for model training, validation, prediction, and export, making it accessible for both beginners and experts. 3. **Broad Compatibility**: Supports multiple export formats including ONNX, TensorRT, CoreML, and TensorFlow, for versatile deployment options. -Explore further details in our [introduction to YOLOv8](https://docs.ultralytics.com/models/yolov8/). +Explore further details in our [introduction to YOLO11](https://docs.ultralytics.com/models/yolov8/). -### How can I deploy a YOLOv8 model on specialized hardware using TF GraphDef? +### How can I deploy a YOLO11 model on specialized hardware using TF GraphDef? -Once a YOLOv8 model is exported to TF GraphDef format, you can deploy it across various specialized hardware platforms. Typical deployment scenarios include: +Once a YOLO11 model is exported to TF GraphDef format, you can deploy it across various specialized hardware platforms. Typical deployment scenarios include: - **TensorFlow Serving**: Use TensorFlow Serving for scalable model deployment in production environments. It supports model management and efficient serving. - **Mobile Devices**: Convert TF GraphDef models to TensorFlow Lite, optimized for mobile and embedded devices, enabling on-device inference. @@ -194,11 +194,11 @@ Once a YOLOv8 model is exported to TF GraphDef format, you can deploy it across Check the [deployment options](#deployment-options-with-tf-graphdef) section for detailed information. -### Where can I find solutions for common issues while exporting YOLOv8 models? +### Where can I find solutions for common issues while exporting YOLO11 models? -For troubleshooting common issues with exporting YOLOv8 models, Ultralytics provides comprehensive guides and resources. If you encounter problems during installation or model export, refer to: +For troubleshooting common issues with exporting YOLO11 models, Ultralytics provides comprehensive guides and resources. If you encounter problems during installation or model export, refer to: - **[Common Issues Guide](../guides/yolo-common-issues.md)**: Offers solutions to frequently faced problems. - **[Installation Guide](../quickstart.md)**: Step-by-step instructions for setting up the required packages. -These resources should help you resolve most issues related to YOLOv8 model export and deployment. +These resources should help you resolve most issues related to YOLO11 model export and deployment. diff --git a/docs/en/integrations/tf-savedmodel.md b/docs/en/integrations/tf-savedmodel.md index 9f04dc7893..288802b641 100644 --- a/docs/en/integrations/tf-savedmodel.md +++ b/docs/en/integrations/tf-savedmodel.md @@ -1,14 +1,14 @@ --- comments: true -description: Learn how to export Ultralytics YOLOv8 models to TensorFlow SavedModel format for easy deployment across various platforms and environments. -keywords: YOLOv8, TF SavedModel, Ultralytics, TensorFlow, model export, model deployment, machine learning, AI +description: Learn how to export Ultralytics YOLO11 models to TensorFlow SavedModel format for easy deployment across various platforms and environments. +keywords: YOLO11, TF SavedModel, Ultralytics, TensorFlow, model export, model deployment, machine learning, AI --- -# Understand How to Export to TF SavedModel Format From YOLOv8 +# Understand How to Export to TF SavedModel Format From YOLO11 Deploying [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models can be challenging. However, using an efficient and flexible model format can make your job easier. TF SavedModel is an open-source machine-learning framework used by TensorFlow to load machine-learning models in a consistent way. It is like a suitcase for TensorFlow models, making them easy to carry and use on different devices and systems. -Learning how to export to TF SavedModel from [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models can help you deploy models easily across different platforms and environments. In this guide, we'll walk through how to convert your models to the TF SavedModel format, simplifying the process of running inferences with your models on different devices. +Learning how to export to TF SavedModel from [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models can help you deploy models easily across different platforms and environments. In this guide, we'll walk through how to convert your models to the TF SavedModel format, simplifying the process of running inferences with your models on different devices. ## Why Should You Export to TF SavedModel? @@ -32,7 +32,7 @@ Here are the key features that make TF SavedModel a great option for AI develope ## Deployment Options with TF SavedModel -Before we dive into the process of exporting YOLOv8 models to the TF SavedModel format, let's explore some typical deployment scenarios where this format is used. +Before we dive into the process of exporting YOLO11 models to the TF SavedModel format, let's explore some typical deployment scenarios where this format is used. TF SavedModel provides a range of options to deploy your machine learning models: @@ -44,9 +44,9 @@ TF SavedModel provides a range of options to deploy your machine learning models - **TensorFlow Runtime:** TensorFlow Runtime (`tfrt`) is a high-performance runtime for executing [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) graphs. It provides lower-level APIs for loading and running TF SavedModels in C++ environments. TensorFlow Runtime offers better performance compared to the standard TensorFlow runtime. It is suitable for deployment scenarios that require low-latency inference and tight integration with existing C++ codebases. -## Exporting YOLOv8 Models to TF SavedModel +## Exporting YOLO11 Models to TF SavedModel -By exporting YOLOv8 models to the TF SavedModel format, you enhance their adaptability and ease of deployment across various platforms. +By exporting YOLO11 models to the TF SavedModel format, you enhance their adaptability and ease of deployment across various platforms. ### Installation @@ -57,15 +57,15 @@ To install the required package, run: === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` -For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ### Usage -Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLOv8 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). +Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLO11 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). !!! example "Usage" @@ -74,14 +74,14 @@ Before diving into the usage instructions, it's important to note that while all ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TF SavedModel format - model.export(format="saved_model") # creates '/yolov8n_saved_model' + model.export(format="saved_model") # creates '/yolo11n_saved_model' # Load the exported TF SavedModel model - tf_savedmodel_model = YOLO("./yolov8n_saved_model") + tf_savedmodel_model = YOLO("./yolo11n_saved_model") # Run inference results = tf_savedmodel_model("https://ultralytics.com/images/bus.jpg") @@ -90,18 +90,18 @@ Before diving into the usage instructions, it's important to note that while all === "CLI" ```bash - # Export a YOLOv8n PyTorch model to TF SavedModel format - yolo export model=yolov8n.pt format=saved_model # creates '/yolov8n_saved_model' + # Export a YOLO11n PyTorch model to TF SavedModel format + yolo export model=yolo11n.pt format=saved_model # creates '/yolo11n_saved_model' # Run inference with the exported model - yolo predict model='./yolov8n_saved_model' source='https://ultralytics.com/images/bus.jpg' + yolo predict model='./yolo11n_saved_model' source='https://ultralytics.com/images/bus.jpg' ``` For more details about supported export options, visit the [Ultralytics documentation page on deployment options](../guides/model-deployment-options.md). -## Deploying Exported YOLOv8 TF SavedModel Models +## Deploying Exported YOLO11 TF SavedModel Models -Now that you have exported your YOLOv8 model to the TF SavedModel format, the next step is to deploy it. The primary and recommended first step for running a TF GraphDef model is to use the YOLO("./yolov8n_saved_model") method, as previously shown in the usage code snippet. +Now that you have exported your YOLO11 model to the TF SavedModel format, the next step is to deploy it. The primary and recommended first step for running a TF GraphDef model is to use the YOLO("./yolo11n_saved_model") method, as previously shown in the usage code snippet. However, for in-depth instructions on deploying your TF SavedModel models, take a look at the following resources: @@ -113,11 +113,11 @@ However, for in-depth instructions on deploying your TF SavedModel models, take ## Summary -In this guide, we explored how to export Ultralytics YOLOv8 models to the TF SavedModel format. By exporting to TF SavedModel, you gain the flexibility to optimize, deploy, and scale your YOLOv8 models on a wide range of platforms. +In this guide, we explored how to export Ultralytics YOLO11 models to the TF SavedModel format. By exporting to TF SavedModel, you gain the flexibility to optimize, deploy, and scale your YOLO11 models on a wide range of platforms. For further details on usage, visit the [TF SavedModel official documentation](https://www.tensorflow.org/guide/saved_model). -For more information on integrating Ultralytics YOLOv8 with other platforms and frameworks, don't forget to check out our [integration guide page](index.md). It's packed with great resources to help you make the most of YOLOv8 in your projects. +For more information on integrating Ultralytics YOLO11 with other platforms and frameworks, don't forget to check out our [integration guide page](index.md). It's packed with great resources to help you make the most of YOLO11 in your projects. ## FAQ @@ -125,32 +125,32 @@ For more information on integrating Ultralytics YOLOv8 with other platforms and Exporting an Ultralytics YOLO model to the TensorFlow SavedModel format is straightforward. You can use either Python or CLI to achieve this: -!!! example "Exporting YOLOv8 to TF SavedModel" +!!! example "Exporting YOLO11 to TF SavedModel" === "Python" ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TF SavedModel format - model.export(format="saved_model") # creates '/yolov8n_saved_model' + model.export(format="saved_model") # creates '/yolo11n_saved_model' # Load the exported TF SavedModel for inference - tf_savedmodel_model = YOLO("./yolov8n_saved_model") + tf_savedmodel_model = YOLO("./yolo11n_saved_model") results = tf_savedmodel_model("https://ultralytics.com/images/bus.jpg") ``` === "CLI" ```bash - # Export the YOLOv8 model to TF SavedModel format - yolo export model=yolov8n.pt format=saved_model # creates '/yolov8n_saved_model' + # Export the YOLO11 model to TF SavedModel format + yolo export model=yolo11n.pt format=saved_model # creates '/yolo11n_saved_model' # Run inference with the exported model - yolo predict model='./yolov8n_saved_model' source='https://ultralytics.com/images/bus.jpg' + yolo predict model='./yolo11n_saved_model' source='https://ultralytics.com/images/bus.jpg' ``` Refer to the [Ultralytics Export documentation](../modes/export.md) for more details. @@ -176,9 +176,9 @@ TF SavedModel can be deployed in various environments, including: For detailed deployment options, visit the official guides on [deploying TensorFlow models](https://www.tensorflow.org/tfx/guide/serving). -### How can I install the necessary packages to export YOLOv8 models? +### How can I install the necessary packages to export YOLO11 models? -To export YOLOv8 models, you need to install the `ultralytics` package. Run the following command in your terminal: +To export YOLO11 models, you need to install the `ultralytics` package. Run the following command in your terminal: ```bash pip install ultralytics diff --git a/docs/en/integrations/tfjs.md b/docs/en/integrations/tfjs.md index ea2d613c62..a8168215b6 100644 --- a/docs/en/integrations/tfjs.md +++ b/docs/en/integrations/tfjs.md @@ -1,14 +1,14 @@ --- comments: true -description: Convert your Ultralytics YOLOv8 models to TensorFlow.js for high-speed, local object detection. Learn how to optimize ML models for browser and Node.js apps. -keywords: YOLOv8, TensorFlow.js, TF.js, model export, machine learning, object detection, browser ML, Node.js, Ultralytics, YOLO, export models +description: Convert your Ultralytics YOLO11 models to TensorFlow.js for high-speed, local object detection. Learn how to optimize ML models for browser and Node.js apps. +keywords: YOLO11, TensorFlow.js, TF.js, model export, machine learning, object detection, browser ML, Node.js, Ultralytics, YOLO, export models --- -# Export to TF.js Model Format From a YOLOv8 Model Format +# Export to TF.js Model Format From a YOLO11 Model Format Deploying [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models directly in the browser or on Node.js can be tricky. You'll need to make sure your model format is optimized for faster performance so that the model can be used to run interactive applications locally on the user's device. The TensorFlow.js, or TF.js, model format is designed to use minimal power while delivering fast performance. -The 'export to TF.js model format' feature allows you to optimize your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models for high-speed and locally-run [object detection](https://www.ultralytics.com/glossary/object-detection) inference. In this guide, we'll walk you through converting your models to the TF.js format, making it easier for your models to perform well on various local browsers and Node.js applications. +The 'export to TF.js model format' feature allows you to optimize your [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models for high-speed and locally-run [object detection](https://www.ultralytics.com/glossary/object-detection) inference. In this guide, we'll walk you through converting your models to the TF.js format, making it easier for your models to perform well on various local browsers and Node.js applications. ## Why Should You Export to TF.js? @@ -32,7 +32,7 @@ Here are the key features that make TF.js a powerful tool for developers: ## Deployment Options with TensorFlow.js -Before we dive into the process of exporting YOLOv8 models to the TF.js format, let's explore some typical deployment scenarios where this format is used. +Before we dive into the process of exporting YOLO11 models to the TF.js format, let's explore some typical deployment scenarios where this format is used. TF.js provides a range of options to deploy your machine learning models: @@ -42,9 +42,9 @@ TF.js provides a range of options to deploy your machine learning models: - **Chrome Extensions:** An interesting deployment scenario is the creation of Chrome extensions with TensorFlow.js. For instance, you can develop an extension that allows users to right-click on an image within any webpage to classify it using a pre-trained ML model. TensorFlow.js can be integrated into everyday web browsing experiences to provide immediate insights or augmentations based on machine learning. -## Exporting YOLOv8 Models to TensorFlow.js +## Exporting YOLO11 Models to TensorFlow.js -You can expand model compatibility and deployment flexibility by converting YOLOv8 models to TF.js. +You can expand model compatibility and deployment flexibility by converting YOLO11 models to TF.js. ### Installation @@ -55,15 +55,15 @@ To install the required package, run: === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` -For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ### Usage -Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLOv8 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). +Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLO11 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). !!! example "Usage" @@ -72,14 +72,14 @@ Before diving into the usage instructions, it's important to note that while all ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TF.js format - model.export(format="tfjs") # creates '/yolov8n_web_model' + model.export(format="tfjs") # creates '/yolo11n_web_model' # Load the exported TF.js model - tfjs_model = YOLO("./yolov8n_web_model") + tfjs_model = YOLO("./yolo11n_web_model") # Run inference results = tfjs_model("https://ultralytics.com/images/bus.jpg") @@ -88,18 +88,18 @@ Before diving into the usage instructions, it's important to note that while all === "CLI" ```bash - # Export a YOLOv8n PyTorch model to TF.js format - yolo export model=yolov8n.pt format=tfjs # creates '/yolov8n_web_model' + # Export a YOLO11n PyTorch model to TF.js format + yolo export model=yolo11n.pt format=tfjs # creates '/yolo11n_web_model' # Run inference with the exported model - yolo predict model='./yolov8n_web_model' source='https://ultralytics.com/images/bus.jpg' + yolo predict model='./yolo11n_web_model' source='https://ultralytics.com/images/bus.jpg' ``` For more details about supported export options, visit the [Ultralytics documentation page on deployment options](../guides/model-deployment-options.md). -## Deploying Exported YOLOv8 TensorFlow.js Models +## Deploying Exported YOLO11 TensorFlow.js Models -Now that you have exported your YOLOv8 model to the TF.js format, the next step is to deploy it. The primary and recommended first step for running a TF.js is to use the YOLO("./yolov8n_web_model") method, as previously shown in the usage code snippet. +Now that you have exported your YOLO11 model to the TF.js format, the next step is to deploy it. The primary and recommended first step for running a TF.js is to use the `YOLO("./yolo11n_web_model")` method, as previously shown in the usage code snippet. However, for in-depth instructions on deploying your TF.js models, take a look at the following resources: @@ -111,17 +111,17 @@ However, for in-depth instructions on deploying your TF.js models, take a look a ## Summary -In this guide, we learned how to export Ultralytics YOLOv8 models to the TensorFlow.js format. By exporting to TF.js, you gain the flexibility to optimize, deploy, and scale your YOLOv8 models on a wide range of platforms. +In this guide, we learned how to export Ultralytics YOLO11 models to the TensorFlow.js format. By exporting to TF.js, you gain the flexibility to optimize, deploy, and scale your YOLO11 models on a wide range of platforms. For further details on usage, visit the [TensorFlow.js official documentation](https://www.tensorflow.org/js/guide). -For more information on integrating Ultralytics YOLOv8 with other platforms and frameworks, don't forget to check out our [integration guide page](index.md). It's packed with great resources to help you make the most of YOLOv8 in your projects. +For more information on integrating Ultralytics YOLO11 with other platforms and frameworks, don't forget to check out our [integration guide page](index.md). It's packed with great resources to help you make the most of YOLO11 in your projects. ## FAQ -### How do I export Ultralytics YOLOv8 models to TensorFlow.js format? +### How do I export Ultralytics YOLO11 models to TensorFlow.js format? -Exporting Ultralytics YOLOv8 models to TensorFlow.js (TF.js) format is straightforward. You can follow these steps: +Exporting Ultralytics YOLO11 models to TensorFlow.js (TF.js) format is straightforward. You can follow these steps: !!! example "Usage" @@ -130,14 +130,14 @@ Exporting Ultralytics YOLOv8 models to TensorFlow.js (TF.js) format is straightf ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TF.js format - model.export(format="tfjs") # creates '/yolov8n_web_model' + model.export(format="tfjs") # creates '/yolo11n_web_model' # Load the exported TF.js model - tfjs_model = YOLO("./yolov8n_web_model") + tfjs_model = YOLO("./yolo11n_web_model") # Run inference results = tfjs_model("https://ultralytics.com/images/bus.jpg") @@ -146,18 +146,18 @@ Exporting Ultralytics YOLOv8 models to TensorFlow.js (TF.js) format is straightf === "CLI" ```bash - # Export a YOLOv8n PyTorch model to TF.js format - yolo export model=yolov8n.pt format=tfjs # creates '/yolov8n_web_model' + # Export a YOLO11n PyTorch model to TF.js format + yolo export model=yolo11n.pt format=tfjs # creates '/yolo11n_web_model' # Run inference with the exported model - yolo predict model='./yolov8n_web_model' source='https://ultralytics.com/images/bus.jpg' + yolo predict model='./yolo11n_web_model' source='https://ultralytics.com/images/bus.jpg' ``` For more details about supported export options, visit the [Ultralytics documentation page on deployment options](../guides/model-deployment-options.md). -### Why should I export my YOLOv8 models to TensorFlow.js? +### Why should I export my YOLO11 models to TensorFlow.js? -Exporting YOLOv8 models to TensorFlow.js offers several advantages, including: +Exporting YOLO11 models to TensorFlow.js offers several advantages, including: 1. **Local Execution:** Models can run directly in the browser or Node.js, reducing latency and enhancing user experience. 2. **Cross-Platform Support:** TF.js supports multiple environments, allowing flexibility in deployment. @@ -177,7 +177,7 @@ TensorFlow.js is specifically designed for efficient execution of ML models in b Interested in learning more about TF.js? Check out the [official TensorFlow.js guide](https://www.tensorflow.org/js/guide). -### What are the key features of TensorFlow.js for deploying YOLOv8 models? +### What are the key features of TensorFlow.js for deploying YOLO11 models? Key features of TensorFlow.js include: @@ -185,10 +185,10 @@ Key features of TensorFlow.js include: - **Multiple Backends:** Supports CPU, WebGL for GPU acceleration, WebAssembly (WASM), and WebGPU for advanced operations. - **Offline Capabilities:** Models can run directly in the browser without internet connectivity, making it ideal for developing responsive web applications. -For deployment scenarios and more in-depth information, see our section on [Deployment Options with TensorFlow.js](#deploying-exported-yolov8-tensorflowjs-models). +For deployment scenarios and more in-depth information, see our section on [Deployment Options with TensorFlow.js](#deploying-exported-yolo11-tensorflowjs-models). -### Can I deploy a YOLOv8 model on server-side Node.js applications using TensorFlow.js? +### Can I deploy a YOLO11 model on server-side Node.js applications using TensorFlow.js? -Yes, TensorFlow.js allows the deployment of YOLOv8 models on Node.js environments. This enables server-side machine learning applications that benefit from the processing power of a server and access to server-side data. Typical use cases include real-time data processing and machine learning pipelines on backend servers. +Yes, TensorFlow.js allows the deployment of YOLO11 models on Node.js environments. This enables server-side machine learning applications that benefit from the processing power of a server and access to server-side data. Typical use cases include real-time data processing and machine learning pipelines on backend servers. To get started with Node.js deployment, refer to the [Run TensorFlow.js in Node.js](https://www.tensorflow.org/js/guide/nodejs) guide from TensorFlow. diff --git a/docs/en/integrations/tflite.md b/docs/en/integrations/tflite.md index 028675eabb..9f0ebad156 100644 --- a/docs/en/integrations/tflite.md +++ b/docs/en/integrations/tflite.md @@ -1,10 +1,10 @@ --- comments: true -description: Learn how to convert YOLOv8 models to TFLite for edge device deployment. Optimize performance and ensure seamless execution on various platforms. -keywords: YOLOv8, TFLite, model export, TensorFlow Lite, edge devices, deployment, Ultralytics, machine learning, on-device inference, model optimization +description: Learn how to convert YOLO11 models to TFLite for edge device deployment. Optimize performance and ensure seamless execution on various platforms. +keywords: YOLO11, TFLite, model export, TensorFlow Lite, edge devices, deployment, Ultralytics, machine learning, on-device inference, model optimization --- -# A Guide on YOLOv8 Model Export to TFLite for Deployment +# A Guide on YOLO11 Model Export to TFLite for Deployment

TFLite Logo @@ -12,7 +12,7 @@ keywords: YOLOv8, TFLite, model export, TensorFlow Lite, edge devices, deploymen Deploying [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models on edge devices or embedded devices requires a format that can ensure seamless performance. -The TensorFlow Lite or TFLite export format allows you to optimize your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models for tasks like [object detection](https://www.ultralytics.com/glossary/object-detection) and [image classification](https://www.ultralytics.com/glossary/image-classification) in edge device-based applications. In this guide, we'll walk through the steps for converting your models to the TFLite format, making it easier for your models to perform well on various edge devices. +The TensorFlow Lite or TFLite export format allows you to optimize your [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models for tasks like [object detection](https://www.ultralytics.com/glossary/object-detection) and [image classification](https://www.ultralytics.com/glossary/image-classification) in edge device-based applications. In this guide, we'll walk through the steps for converting your models to the TFLite format, making it easier for your models to perform well on various edge devices. ## Why should you export to TFLite? @@ -34,7 +34,7 @@ TFLite models offer a wide range of key features that enable on-device machine l ## Deployment Options in TFLite -Before we look at the code for exporting YOLOv8 models to the TFLite format, let's understand how TFLite models are normally used. +Before we look at the code for exporting YOLO11 models to the TFLite format, let's understand how TFLite models are normally used. TFLite offers various on-device deployment options for machine learning models, including: @@ -48,7 +48,7 @@ TFLite offers various on-device deployment options for machine learning models, - **Deploying with Microcontrollers**: TFLite models can also be deployed on microcontrollers and other devices with only a few kilobytes of memory. The core runtime just fits in 16 KB on an Arm Cortex M3 and can run many basic models. It doesn't require operating system support, any standard C or C++ libraries, or dynamic memory allocation. -## Export to TFLite: Converting Your YOLOv8 Model +## Export to TFLite: Converting Your YOLO11 Model You can improve on-device model execution efficiency and optimize performance by converting them to TFLite format. @@ -61,15 +61,15 @@ To install the required packages, run: === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` -For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ### Usage -Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLOv8 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). +Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLO11 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). !!! example "Usage" @@ -78,14 +78,14 @@ Before diving into the usage instructions, it's important to note that while all ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TFLite format - model.export(format="tflite") # creates 'yolov8n_float32.tflite' + model.export(format="tflite") # creates 'yolo11n_float32.tflite' # Load the exported TFLite model - tflite_model = YOLO("yolov8n_float32.tflite") + tflite_model = YOLO("yolo11n_float32.tflite") # Run inference results = tflite_model("https://ultralytics.com/images/bus.jpg") @@ -94,18 +94,18 @@ Before diving into the usage instructions, it's important to note that while all === "CLI" ```bash - # Export a YOLOv8n PyTorch model to TFLite format - yolo export model=yolov8n.pt format=tflite # creates 'yolov8n_float32.tflite' + # Export a YOLO11n PyTorch model to TFLite format + yolo export model=yolo11n.pt format=tflite # creates 'yolo11n_float32.tflite' # Run inference with the exported model - yolo predict model='yolov8n_float32.tflite' source='https://ultralytics.com/images/bus.jpg' + yolo predict model='yolo11n_float32.tflite' source='https://ultralytics.com/images/bus.jpg' ``` For more details about the export process, visit the [Ultralytics documentation page on exporting](../modes/export.md). -## Deploying Exported YOLOv8 TFLite Models +## Deploying Exported YOLO11 TFLite Models -After successfully exporting your Ultralytics YOLOv8 models to TFLite format, you can now deploy them. The primary and recommended first step for running a TFLite model is to utilize the YOLO("model.tflite") method, as outlined in the previous usage code snippet. However, for in-depth instructions on deploying your TFLite models in various other settings, take a look at the following resources: +After successfully exporting your Ultralytics YOLO11 models to TFLite format, you can now deploy them. The primary and recommended first step for running a TFLite model is to utilize the YOLO("model.tflite") method, as outlined in the previous usage code snippet. However, for in-depth instructions on deploying your TFLite models in various other settings, take a look at the following resources: - **[Android](https://ai.google.dev/edge/litert/android)**: A quick start guide for integrating [TensorFlow](https://www.ultralytics.com/glossary/tensorflow) Lite into Android applications, providing easy-to-follow steps for setting up and running [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models. @@ -115,17 +115,17 @@ After successfully exporting your Ultralytics YOLOv8 models to TFLite format, yo ## Summary -In this guide, we focused on how to export to TFLite format. By converting your Ultralytics YOLOv8 models to TFLite model format, you can improve the efficiency and speed of YOLOv8 models, making them more effective and suitable for [edge computing](https://www.ultralytics.com/glossary/edge-computing) environments. +In this guide, we focused on how to export to TFLite format. By converting your Ultralytics YOLO11 models to TFLite model format, you can improve the efficiency and speed of YOLO11 models, making them more effective and suitable for [edge computing](https://www.ultralytics.com/glossary/edge-computing) environments. For further details on usage, visit the [TFLite official documentation](https://ai.google.dev/edge/litert). -Also, if you're curious about other Ultralytics YOLOv8 integrations, make sure to check out our [integration guide page](../integrations/index.md). You'll find tons of helpful info and insights waiting for you there. +Also, if you're curious about other Ultralytics YOLO11 integrations, make sure to check out our [integration guide page](../integrations/index.md). You'll find tons of helpful info and insights waiting for you there. ## FAQ -### How do I export a YOLOv8 model to TFLite format? +### How do I export a YOLO11 model to TFLite format? -To export a YOLOv8 model to TFLite format, you can use the Ultralytics library. First, install the required package using: +To export a YOLO11 model to TFLite format, you can use the Ultralytics library. First, install the required package using: ```bash pip install ultralytics @@ -136,24 +136,24 @@ Then, use the following code snippet to export your model: ```python from ultralytics import YOLO -# Load the YOLOv8 model -model = YOLO("yolov8n.pt") +# Load the YOLO11 model +model = YOLO("yolo11n.pt") # Export the model to TFLite format -model.export(format="tflite") # creates 'yolov8n_float32.tflite' +model.export(format="tflite") # creates 'yolo11n_float32.tflite' ``` For CLI users, you can achieve this with: ```bash -yolo export model=yolov8n.pt format=tflite # creates 'yolov8n_float32.tflite' +yolo export model=yolo11n.pt format=tflite # creates 'yolo11n_float32.tflite' ``` For more details, visit the [Ultralytics export guide](../modes/export.md). -### What are the benefits of using TensorFlow Lite for YOLOv8 [model deployment](https://www.ultralytics.com/glossary/model-deployment)? +### What are the benefits of using TensorFlow Lite for YOLO11 [model deployment](https://www.ultralytics.com/glossary/model-deployment)? -TensorFlow Lite (TFLite) is an open-source [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) framework designed for on-device inference, making it ideal for deploying YOLOv8 models on mobile, embedded, and IoT devices. Key benefits include: +TensorFlow Lite (TFLite) is an open-source [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) framework designed for on-device inference, making it ideal for deploying YOLO11 models on mobile, embedded, and IoT devices. Key benefits include: - **On-device optimization**: Minimize latency and enhance privacy by processing data locally. - **Platform compatibility**: Supports Android, iOS, embedded Linux, and MCU. @@ -161,33 +161,33 @@ TensorFlow Lite (TFLite) is an open-source [deep learning](https://www.ultralyti To learn more, check out the [TFLite guide](https://ai.google.dev/edge/litert). -### Is it possible to run YOLOv8 TFLite models on Raspberry Pi? +### Is it possible to run YOLO11 TFLite models on Raspberry Pi? -Yes, you can run YOLOv8 TFLite models on Raspberry Pi to improve inference speeds. First, export your model to TFLite format as explained [here](#how-do-i-export-a-yolov8-model-to-tflite-format). Then, use a tool like TensorFlow Lite Interpreter to execute the model on your Raspberry Pi. +Yes, you can run YOLO11 TFLite models on Raspberry Pi to improve inference speeds. First, export your model to TFLite format as explained [here](#how-do-i-export-a-yolo11-model-to-tflite-format). Then, use a tool like TensorFlow Lite Interpreter to execute the model on your Raspberry Pi. For further optimizations, you might consider using [Coral Edge TPU](https://coral.withgoogle.com/). For detailed steps, refer to our [Raspberry Pi deployment guide](../guides/raspberry-pi.md). -### Can I use TFLite models on microcontrollers for YOLOv8 predictions? +### Can I use TFLite models on microcontrollers for YOLO11 predictions? -Yes, TFLite supports deployment on microcontrollers with limited resources. TFLite's core runtime requires only 16 KB of memory on an Arm Cortex M3 and can run basic YOLOv8 models. This makes it suitable for deployment on devices with minimal computational power and memory. +Yes, TFLite supports deployment on microcontrollers with limited resources. TFLite's core runtime requires only 16 KB of memory on an Arm Cortex M3 and can run basic YOLO11 models. This makes it suitable for deployment on devices with minimal computational power and memory. To get started, visit the [TFLite Micro for Microcontrollers guide](https://ai.google.dev/edge/litert/microcontrollers/overview). -### What platforms are compatible with TFLite exported YOLOv8 models? +### What platforms are compatible with TFLite exported YOLO11 models? -TensorFlow Lite provides extensive platform compatibility, allowing you to deploy YOLOv8 models on a wide range of devices, including: +TensorFlow Lite provides extensive platform compatibility, allowing you to deploy YOLO11 models on a wide range of devices, including: - **Android and iOS**: Native support through TFLite Android and iOS libraries. - **Embedded Linux**: Ideal for single-board computers such as Raspberry Pi. - **Microcontrollers**: Suitable for MCUs with constrained resources. -For more information on deployment options, see our detailed [deployment guide](#deploying-exported-yolov8-tflite-models). +For more information on deployment options, see our detailed [deployment guide](#deploying-exported-yolo11-tflite-models). -### How do I troubleshoot common issues during YOLOv8 model export to TFLite? +### How do I troubleshoot common issues during YOLO11 model export to TFLite? -If you encounter errors while exporting YOLOv8 models to TFLite, common solutions include: +If you encounter errors while exporting YOLO11 models to TFLite, common solutions include: - **Check package compatibility**: Ensure you're using compatible versions of Ultralytics and TensorFlow. Refer to our [installation guide](../quickstart.md). -- **Model support**: Verify that the specific YOLOv8 model supports TFLite export by checking [here](../modes/export.md). +- **Model support**: Verify that the specific YOLO11 model supports TFLite export by checking [here](../modes/export.md). For additional troubleshooting tips, visit our [Common Issues guide](../guides/yolo-common-issues.md). diff --git a/docs/en/integrations/torchscript.md b/docs/en/integrations/torchscript.md index 839caff921..1be1516c0b 100644 --- a/docs/en/integrations/torchscript.md +++ b/docs/en/integrations/torchscript.md @@ -1,22 +1,22 @@ --- comments: true -description: Learn how to export Ultralytics YOLOv8 models to TorchScript for flexible, cross-platform deployment. Boost performance and utilize in various environments. -keywords: YOLOv8, TorchScript, model export, Ultralytics, PyTorch, deep learning, AI deployment, cross-platform, performance optimization +description: Learn how to export Ultralytics YOLO11 models to TorchScript for flexible, cross-platform deployment. Boost performance and utilize in various environments. +keywords: YOLO11, TorchScript, model export, Ultralytics, PyTorch, deep learning, AI deployment, cross-platform, performance optimization --- -# YOLOv8 Model Export to TorchScript for Quick Deployment +# YOLO11 Model Export to TorchScript for Quick Deployment Deploying [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) models across different environments, including embedded systems, web browsers, or platforms with limited Python support, requires a flexible and portable solution. TorchScript focuses on portability and the ability to run models in environments where the entire Python framework is unavailable. This makes it ideal for scenarios where you need to deploy your computer vision capabilities across various devices or platforms. -Export to Torchscript to serialize your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models for cross-platform compatibility and streamlined deployment. In this guide, we'll show you how to export your YOLOv8 models to the TorchScript format, making it easier for you to use them across a wider range of applications. +Export to Torchscript to serialize your [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) models for cross-platform compatibility and streamlined deployment. In this guide, we'll show you how to export your YOLO11 models to the TorchScript format, making it easier for you to use them across a wider range of applications. ## Why should you export to TorchScript? ![Torchscript Overview](https://github.com/ultralytics/docs/releases/download/0/torchscript-overview.avif) -Developed by the creators of PyTorch, TorchScript is a powerful tool for optimizing and deploying PyTorch models across a variety of platforms. Exporting YOLOv8 models to [TorchScript](https://pytorch.org/docs/stable/jit.html) is crucial for moving from research to real-world applications. TorchScript, part of the PyTorch framework, helps make this transition smoother by allowing PyTorch models to be used in environments that don't support Python. +Developed by the creators of PyTorch, TorchScript is a powerful tool for optimizing and deploying PyTorch models across a variety of platforms. Exporting YOLO11 models to [TorchScript](https://pytorch.org/docs/stable/jit.html) is crucial for moving from research to real-world applications. TorchScript, part of the PyTorch framework, helps make this transition smoother by allowing PyTorch models to be used in environments that don't support Python. -The process involves two techniques: tracing and scripting. Tracing records operations during model execution, while scripting allows for the definition of models using a subset of Python. These techniques ensure that models like YOLOv8 can still work their magic even outside their usual Python environment. +The process involves two techniques: tracing and scripting. Tracing records operations during model execution, while scripting allows for the definition of models using a subset of Python. These techniques ensure that models like YOLO11 can still work their magic even outside their usual Python environment. ![TorchScript Script and Trace](https://github.com/ultralytics/docs/releases/download/0/torchscript-script-and-trace.avif) @@ -42,7 +42,7 @@ Here are the key features that make TorchScript a valuable tool for developers: ## Deployment Options in TorchScript -Before we look at the code for exporting YOLOv8 models to the TorchScript format, let's understand where TorchScript models are normally used. +Before we look at the code for exporting YOLO11 models to the TorchScript format, let's understand where TorchScript models are normally used. TorchScript offers various deployment options for [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models, such as: @@ -52,9 +52,9 @@ TorchScript offers various deployment options for [machine learning](https://www - **Cloud Deployment**: TorchScript models can be deployed to cloud-based servers using solutions like TorchServe. It provides features like model versioning, batching, and metrics monitoring for scalable deployment in production environments. Cloud deployment with TorchScript can make your models accessible via APIs or other web services. -## Export to TorchScript: Converting Your YOLOv8 Model +## Export to TorchScript: Converting Your YOLO11 Model -Exporting YOLOv8 models to TorchScript makes it easier to use them in different places and helps them run faster and more efficiently. This is great for anyone looking to use deep learning models more effectively in real-world applications. +Exporting YOLO11 models to TorchScript makes it easier to use them in different places and helps them run faster and more efficiently. This is great for anyone looking to use deep learning models more effectively in real-world applications. ### Installation @@ -65,15 +65,15 @@ To install the required package, run: === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` -For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, check our [Ultralytics Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ### Usage -Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLOv8 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). +Before diving into the usage instructions, it's important to note that while all [Ultralytics YOLO11 models](../models/index.md) are available for exporting, you can ensure that the model you select supports export functionality [here](../modes/export.md). !!! example "Usage" @@ -82,14 +82,14 @@ Before diving into the usage instructions, it's important to note that while all ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TorchScript format - model.export(format="torchscript") # creates 'yolov8n.torchscript' + model.export(format="torchscript") # creates 'yolo11n.torchscript' # Load the exported TorchScript model - torchscript_model = YOLO("yolov8n.torchscript") + torchscript_model = YOLO("yolo11n.torchscript") # Run inference results = torchscript_model("https://ultralytics.com/images/bus.jpg") @@ -98,18 +98,18 @@ Before diving into the usage instructions, it's important to note that while all === "CLI" ```bash - # Export a YOLOv8n PyTorch model to TorchScript format - yolo export model=yolov8n.pt format=torchscript # creates 'yolov8n.torchscript' + # Export a YOLO11n PyTorch model to TorchScript format + yolo export model=yolo11n.pt format=torchscript # creates 'yolo11n.torchscript' # Run inference with the exported model - yolo predict model=yolov8n.torchscript source='https://ultralytics.com/images/bus.jpg' + yolo predict model=yolo11n.torchscript source='https://ultralytics.com/images/bus.jpg' ``` For more details about the export process, visit the [Ultralytics documentation page on exporting](../modes/export.md). -## Deploying Exported YOLOv8 TorchScript Models +## Deploying Exported YOLO11 TorchScript Models -After successfully exporting your Ultralytics YOLOv8 models to TorchScript format, you can now deploy them. The primary and recommended first step for running a TorchScript model is to utilize the YOLO("model.torchscript") method, as outlined in the previous usage code snippet. However, for in-depth instructions on deploying your TorchScript models in various other settings, take a look at the following resources: +After successfully exporting your Ultralytics YOLO11 models to TorchScript format, you can now deploy them. The primary and recommended first step for running a TorchScript model is to utilize the YOLO("model.torchscript") method, as outlined in the previous usage code snippet. However, for in-depth instructions on deploying your TorchScript models in various other settings, take a look at the following resources: - **[Explore Mobile Deployment](https://pytorch.org/mobile/home/)**: The [PyTorch](https://www.ultralytics.com/glossary/pytorch) Mobile Documentation provides comprehensive guidelines for deploying models on mobile devices, ensuring your applications are efficient and responsive. @@ -119,21 +119,21 @@ After successfully exporting your Ultralytics YOLOv8 models to TorchScript forma ## Summary -In this guide, we explored the process of exporting Ultralytics YOLOv8 models to the TorchScript format. By following the provided instructions, you can optimize YOLOv8 models for performance and gain the flexibility to deploy them across various platforms and environments. +In this guide, we explored the process of exporting Ultralytics YOLO11 models to the TorchScript format. By following the provided instructions, you can optimize YOLO11 models for performance and gain the flexibility to deploy them across various platforms and environments. For further details on usage, visit [TorchScript's official documentation](https://pytorch.org/docs/stable/jit.html). -Also, if you'd like to know more about other Ultralytics YOLOv8 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of useful resources and insights there. +Also, if you'd like to know more about other Ultralytics YOLO11 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of useful resources and insights there. ## FAQ -### What is Ultralytics YOLOv8 model export to TorchScript? +### What is Ultralytics YOLO11 model export to TorchScript? -Exporting an Ultralytics YOLOv8 model to TorchScript allows for flexible, cross-platform deployment. TorchScript, a part of the PyTorch ecosystem, facilitates the serialization of models, which can then be executed in environments that lack Python support. This makes it ideal for deploying models on embedded systems, C++ environments, mobile applications, and even web browsers. Exporting to TorchScript enables efficient performance and wider applicability of your YOLOv8 models across diverse platforms. +Exporting an Ultralytics YOLO11 model to TorchScript allows for flexible, cross-platform deployment. TorchScript, a part of the PyTorch ecosystem, facilitates the serialization of models, which can then be executed in environments that lack Python support. This makes it ideal for deploying models on embedded systems, C++ environments, mobile applications, and even web browsers. Exporting to TorchScript enables efficient performance and wider applicability of your YOLO11 models across diverse platforms. -### How can I export my YOLOv8 model to TorchScript using Ultralytics? +### How can I export my YOLO11 model to TorchScript using Ultralytics? -To export a YOLOv8 model to TorchScript, you can use the following example code: +To export a YOLO11 model to TorchScript, you can use the following example code: !!! example "Usage" @@ -142,14 +142,14 @@ To export a YOLOv8 model to TorchScript, you can use the following example code: ```python from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Export the model to TorchScript format - model.export(format="torchscript") # creates 'yolov8n.torchscript' + model.export(format="torchscript") # creates 'yolo11n.torchscript' # Load the exported TorchScript model - torchscript_model = YOLO("yolov8n.torchscript") + torchscript_model = YOLO("yolo11n.torchscript") # Run inference results = torchscript_model("https://ultralytics.com/images/bus.jpg") @@ -158,18 +158,18 @@ To export a YOLOv8 model to TorchScript, you can use the following example code: === "CLI" ```bash - # Export a YOLOv8n PyTorch model to TorchScript format - yolo export model=yolov8n.pt format=torchscript # creates 'yolov8n.torchscript' + # Export a YOLO11n PyTorch model to TorchScript format + yolo export model=yolo11n.pt format=torchscript # creates 'yolo11n.torchscript' # Run inference with the exported model - yolo predict model=yolov8n.torchscript source='https://ultralytics.com/images/bus.jpg' + yolo predict model=yolo11n.torchscript source='https://ultralytics.com/images/bus.jpg' ``` For more details about the export process, refer to the [Ultralytics documentation on exporting](../modes/export.md). -### Why should I use TorchScript for deploying YOLOv8 models? +### Why should I use TorchScript for deploying YOLO11 models? -Using TorchScript for deploying YOLOv8 models offers several advantages: +Using TorchScript for deploying YOLO11 models offers several advantages: - **Portability**: Exported models can run in environments without the need for Python, such as C++ applications, embedded systems, or mobile devices. - **Optimization**: TorchScript supports static graph execution and Just-In-Time (JIT) compilation, which can optimize model performance. @@ -178,24 +178,24 @@ Using TorchScript for deploying YOLOv8 models offers several advantages: For more insights into deployment, visit the [PyTorch Mobile Documentation](https://pytorch.org/mobile/home/), [TorchServe Documentation](https://pytorch.org/serve/getting_started.html), and [C++ Deployment Guide](https://pytorch.org/tutorials/advanced/cpp_export.html). -### What are the installation steps for exporting YOLOv8 models to TorchScript? +### What are the installation steps for exporting YOLO11 models to TorchScript? -To install the required package for exporting YOLOv8 models, use the following command: +To install the required package for exporting YOLO11 models, use the following command: !!! tip "Installation" === "CLI" ```bash - # Install the required package for YOLOv8 + # Install the required package for YOLO11 pip install ultralytics ``` For detailed instructions, visit the [Ultralytics Installation guide](../quickstart.md). If any issues arise during installation, consult the [Common Issues guide](../guides/yolo-common-issues.md). -### How do I deploy my exported TorchScript YOLOv8 models? +### How do I deploy my exported TorchScript YOLO11 models? -After exporting YOLOv8 models to the TorchScript format, you can deploy them across a variety of platforms: +After exporting YOLO11 models to the TorchScript format, you can deploy them across a variety of platforms: - **C++ API**: Ideal for low-overhead, highly efficient production environments. - **Mobile Deployment**: Use [PyTorch Mobile](https://pytorch.org/mobile/home/) for iOS and Android applications. diff --git a/docs/en/integrations/vscode.md b/docs/en/integrations/vscode.md index b6785d1529..521abde311 100644 --- a/docs/en/integrations/vscode.md +++ b/docs/en/integrations/vscode.md @@ -134,7 +134,7 @@ The `ultra.examples` snippets are to useful for anyone looking to learn how to g ```python from ultralytics import ASSETS, YOLO - model = YOLO("yolov8n.pt", task="detect") + model = YOLO("yolo11n.pt", task="detect") results = model(source=ASSETS / "bus.jpg") for result in results: diff --git a/docs/en/integrations/weights-biases.md b/docs/en/integrations/weights-biases.md index cb9dc95131..9f2cbb2fa0 100644 --- a/docs/en/integrations/weights-biases.md +++ b/docs/en/integrations/weights-biases.md @@ -1,12 +1,12 @@ --- comments: true -description: Learn how to enhance YOLOv8 experiment tracking and visualization with Weights & Biases for better model performance and management. -keywords: YOLOv8, Weights & Biases, model training, experiment tracking, Ultralytics, machine learning, computer vision, model visualization +description: Learn how to enhance YOLO11 experiment tracking and visualization with Weights & Biases for better model performance and management. +keywords: YOLO11, Weights & Biases, model training, experiment tracking, Ultralytics, machine learning, computer vision, model visualization --- -# Enhancing YOLOv8 Experiment Tracking and Visualization with Weights & Biases +# Enhancing YOLO11 Experiment Tracking and Visualization with Weights & Biases -[Object detection](https://www.ultralytics.com/glossary/object-detection) models like [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) have become integral to many [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) applications. However, training, evaluating, and deploying these complex models introduces several challenges. Tracking key training metrics, comparing model variants, analyzing model behavior, and detecting issues require substantial instrumentation and experiment management. +[Object detection](https://www.ultralytics.com/glossary/object-detection) models like [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics) have become integral to many [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) applications. However, training, evaluating, and deploying these complex models introduces several challenges. Tracking key training metrics, comparing model variants, analyzing model behavior, and detecting issues require substantial instrumentation and experiment management.


@@ -16,10 +16,10 @@ keywords: YOLOv8, Weights & Biases, model training, experiment tracking, Ultraly allowfullscreen>
- Watch: How to use Ultralytics YOLOv8 with Weights and Biases + Watch: How to use Ultralytics YOLO11 with Weights and Biases

-This guide showcases Ultralytics YOLOv8 integration with Weights & Biases' for enhanced experiment tracking, model-checkpointing, and visualization of model performance. It also includes instructions for setting up the integration, training, fine-tuning, and visualizing results using Weights & Biases' interactive features. +This guide showcases Ultralytics YOLO11 integration with Weights & Biases' for enhanced experiment tracking, model-checkpointing, and visualization of model performance. It also includes instructions for setting up the integration, training, fine-tuning, and visualizing results using Weights & Biases' interactive features. ## Weights & Biases @@ -29,9 +29,9 @@ This guide showcases Ultralytics YOLOv8 integration with Weights & Biases' for e [Weights & Biases](https://wandb.ai/site) is a cutting-edge MLOps platform designed for tracking, visualizing, and managing [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) experiments. It features automatic logging of training metrics for full experiment reproducibility, an interactive UI for streamlined data analysis, and efficient model management tools for deploying across various environments. -## YOLOv8 Training with Weights & Biases +## YOLO11 Training with Weights & Biases -You can use Weights & Biases to bring efficiency and automation to your YOLOv8 training process. +You can use Weights & Biases to bring efficiency and automation to your YOLO11 training process. ## Installation @@ -42,11 +42,11 @@ To install the required packages, run: === "CLI" ```bash - # Install the required packages for YOLOv8 and Weights & Biases + # Install the required packages for YOLO11 and Weights & Biases pip install --upgrade ultralytics==8.0.186 wandb ``` -For detailed instructions and best practices related to the installation process, be sure to check our [YOLOv8 Installation guide](../quickstart.md). While installing the required packages for YOLOv8, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. +For detailed instructions and best practices related to the installation process, be sure to check our [YOLO11 Installation guide](../quickstart.md). While installing the required packages for YOLO11, if you encounter any difficulties, consult our [Common Issues guide](../guides/yolo-common-issues.md) for solutions and tips. ## Configuring Weights & Biases @@ -66,11 +66,11 @@ Start by initializing the Weights & Biases environment in your workspace. You ca Navigate to the Weights & Biases authorization page to create and retrieve your API key. Use this key to authenticate your environment with W&B. -## Usage: Training YOLOv8 with Weights & Biases +## Usage: Training YOLO11 with Weights & Biases -Before diving into the usage instructions for YOLOv8 model training with Weights & Biases, be sure to check out the range of [YOLOv8 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. +Before diving into the usage instructions for YOLO11 model training with Weights & Biases, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements. -!!! example "Usage: Training YOLOv8 with Weights & Biases" +!!! example "Usage: Training YOLO11 with Weights & Biases" === "Python" @@ -84,7 +84,7 @@ Before diving into the usage instructions for YOLOv8 model training with Weights wandb.init(project="ultralytics", job_type="training") # Load a YOLO model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Add W&B Callback for Ultralytics add_wandb_callback(model, enable_model_checkpointing=True) @@ -108,7 +108,7 @@ Let's understand the steps showcased in the usage code snippet above. - **Step 1: Initialize a Weights & Biases Run**: Start by initializing a Weights & Biases run, specifying the project name and the job type. This run will track and manage the training and validation processes of your model. -- **Step 2: Define the YOLOv8 Model and Dataset**: Specify the model variant and the dataset you wish to use. The YOLO model is then initialized with the specified model file. +- **Step 2: Define the YOLO11 Model and Dataset**: Specify the model variant and the dataset you wish to use. The YOLO model is then initialized with the specified model file. - **Step 3: Add Weights & Biases Callback for Ultralytics**: This step is crucial as it enables the automatic logging of training metrics and validation results to Weights & Biases, providing a detailed view of the model's performance. @@ -132,13 +132,13 @@ Upon running the usage code snippet above, you can expect the following key outp ### Viewing the Weights & Biases Dashboard -After running the usage code snippet, you can access the Weights & Biases (W&B) dashboard through the provided link in the output. This dashboard offers a comprehensive view of your model's training process with YOLOv8. +After running the usage code snippet, you can access the Weights & Biases (W&B) dashboard through the provided link in the output. This dashboard offers a comprehensive view of your model's training process with YOLO11. ## Key Features of the Weights & Biases Dashboard - **Real-Time Metrics Tracking**: Observe metrics like loss, accuracy, and validation scores as they evolve during the training, offering immediate insights for model tuning. [See how experiments are tracked using Weights & Biases](https://imgur.com/D6NVnmN). -- **Hyperparameter Optimization**: Weights & Biases aids in fine-tuning critical parameters such as [learning rate](https://www.ultralytics.com/glossary/learning-rate), batch size, and more, enhancing the performance of YOLOv8. +- **Hyperparameter Optimization**: Weights & Biases aids in fine-tuning critical parameters such as [learning rate](https://www.ultralytics.com/glossary/learning-rate), batch size, and more, enhancing the performance of YOLO11. - **Comparative Analysis**: The platform allows side-by-side comparisons of different training runs, essential for assessing the impact of various model configurations. @@ -148,33 +148,33 @@ After running the usage code snippet, you can access the Weights & Biases (W&B) - **Model Artifacts Management**: Access and share model checkpoints, facilitating easy deployment and collaboration. -- **Viewing Inference Results with Image Overlay**: Visualize the prediction results on images using interactive overlays in Weights & Biases, providing a clear and detailed view of model performance on real-world data. For more detailed information on Weights & Biases' image overlay capabilities, check out this [link](https://docs.wandb.ai/guides/track/log/media#image-overlays). [See how Weights & Biases' image overlays helps visualize model inferences](https://imgur.com/a/UTSiufs). +- **Viewing Inference Results with Image Overlay**: Visualize the prediction results on images using interactive overlays in Weights & Biases, providing a clear and detailed view of model performance on real-world data. For more detailed information on Weights & Biases' image overlay capabilities, check out this [link](https://docs.wandb.ai/guides/track/log/media/#image-overlays). [See how Weights & Biases' image overlays helps visualize model inferences](https://imgur.com/a/UTSiufs). -By using these features, you can effectively track, analyze, and optimize your YOLOv8 model's training, ensuring the best possible performance and efficiency. +By using these features, you can effectively track, analyze, and optimize your YOLO11 model's training, ensuring the best possible performance and efficiency. ## Summary -This guide helped you explore Ultralytics' YOLOv8 integration with Weights & Biases. It illustrates the ability of this integration to efficiently track and visualize model training and prediction results. +This guide helped you explore Ultralytics' YOLO11 integration with Weights & Biases. It illustrates the ability of this integration to efficiently track and visualize model training and prediction results. -For further details on usage, visit [Weights & Biases' official documentation](https://docs.wandb.ai/guides/integrations/ultralytics). +For further details on usage, visit [Weights & Biases' official documentation](https://docs.wandb.ai/guides/integrations/ultralytics/). Also, be sure to check out the [Ultralytics integration guide page](../integrations/index.md), to learn more about different exciting integrations. ## FAQ -### How do I install the required packages for YOLOv8 and Weights & Biases? +### How do I install the required packages for YOLO11 and Weights & Biases? -To install the required packages for YOLOv8 and Weights & Biases, open your command line interface and run: +To install the required packages for YOLO11 and Weights & Biases, open your command line interface and run: ```bash pip install --upgrade ultralytics==8.0.186 wandb ``` -For further guidance on installation steps, refer to our [YOLOv8 Installation guide](../quickstart.md). If you encounter issues, consult the [Common Issues guide](../guides/yolo-common-issues.md) for troubleshooting tips. +For further guidance on installation steps, refer to our [YOLO11 Installation guide](../quickstart.md). If you encounter issues, consult the [Common Issues guide](../guides/yolo-common-issues.md) for troubleshooting tips. -### What are the benefits of integrating Ultralytics YOLOv8 with Weights & Biases? +### What are the benefits of integrating Ultralytics YOLO11 with Weights & Biases? -Integrating Ultralytics YOLOv8 with Weights & Biases offers several benefits including: +Integrating Ultralytics YOLO11 with Weights & Biases offers several benefits including: - **Real-Time Metrics Tracking:** Observe metric changes during training for immediate insights. - **Hyperparameter Optimization:** Improve model performance by fine-tuning learning rate, [batch size](https://www.ultralytics.com/glossary/batch-size), etc. @@ -184,9 +184,9 @@ Integrating Ultralytics YOLOv8 with Weights & Biases offers several benefits inc Explore these features in detail in the Weights & Biases Dashboard section above. -### How can I configure Weights & Biases for YOLOv8 training? +### How can I configure Weights & Biases for YOLO11 training? -To configure Weights & Biases for YOLOv8 training, follow these steps: +To configure Weights & Biases for YOLO11 training, follow these steps: 1. Run the command to initialize Weights & Biases: ```bash @@ -198,9 +198,9 @@ To configure Weights & Biases for YOLOv8 training, follow these steps: Detailed setup instructions can be found in the Configuring Weights & Biases section above. -### How do I train a YOLOv8 model using Weights & Biases? +### How do I train a YOLO11 model using Weights & Biases? -For training a YOLOv8 model using Weights & Biases, use the following steps in a Python script: +For training a YOLO11 model using Weights & Biases, use the following steps in a Python script: ```python import wandb @@ -212,7 +212,7 @@ from ultralytics import YOLO wandb.init(project="ultralytics", job_type="training") # Load a YOLO model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Add W&B Callback for Ultralytics add_wandb_callback(model, enable_model_checkpointing=True) @@ -232,9 +232,9 @@ wandb.finish() This script initializes Weights & Biases, sets up the model, trains it, and logs results. For more details, visit the Usage section above. -### Why should I use Ultralytics YOLOv8 with Weights & Biases over other platforms? +### Why should I use Ultralytics YOLO11 with Weights & Biases over other platforms? -Ultralytics YOLOv8 integrated with Weights & Biases offers several unique advantages: +Ultralytics YOLO11 integrated with Weights & Biases offers several unique advantages: - **High Efficiency:** Real-time tracking of training metrics and performance optimization. - **Scalability:** Easily manage large-scale training jobs with robust resource monitoring and utilization tools. diff --git a/docs/en/macros/augmentation-args.md b/docs/en/macros/augmentation-args.md index d8dbee8583..b4d6c9df6d 100644 --- a/docs/en/macros/augmentation-args.md +++ b/docs/en/macros/augmentation-args.md @@ -1,19 +1,20 @@ -| Argument | Type | Default | Range | Description | -| --------------- | ------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `hsv_h` | `float` | `0.015` | `0.0 - 1.0` | Adjusts the hue of the image by a fraction of the color wheel, introducing color variability. Helps the model generalize across different lighting conditions. | -| `hsv_s` | `float` | `0.7` | `0.0 - 1.0` | Alters the saturation of the image by a fraction, affecting the intensity of colors. Useful for simulating different environmental conditions. | -| `hsv_v` | `float` | `0.4` | `0.0 - 1.0` | Modifies the value (brightness) of the image by a fraction, helping the model to perform well under various lighting conditions. | -| `degrees` | `float` | `0.0` | `-180 - +180` | Rotates the image randomly within the specified degree range, improving the model's ability to recognize objects at various orientations. | -| `translate` | `float` | `0.1` | `0.0 - 1.0` | Translates the image horizontally and vertically by a fraction of the image size, aiding in learning to detect partially visible objects. | -| `scale` | `float` | `0.5` | `>=0.0` | Scales the image by a gain factor, simulating objects at different distances from the camera. | -| `shear` | `float` | `0.0` | `-180 - +180` | Shears the image by a specified degree, mimicking the effect of objects being viewed from different angles. | -| `perspective` | `float` | `0.0` | `0.0 - 0.001` | Applies a random perspective transformation to the image, enhancing the model's ability to understand objects in 3D space. | -| `flipud` | `float` | `0.0` | `0.0 - 1.0` | Flips the image upside down with the specified probability, increasing the data variability without affecting the object's characteristics. | -| `fliplr` | `float` | `0.5` | `0.0 - 1.0` | Flips the image left to right with the specified probability, useful for learning symmetrical objects and increasing dataset diversity. | -| `bgr` | `float` | `0.0` | `0.0 - 1.0` | Flips the image channels from RGB to BGR with the specified probability, useful for increasing robustness to incorrect channel ordering. | -| `mosaic` | `float` | `1.0` | `0.0 - 1.0` | Combines four training images into one, simulating different scene compositions and object interactions. Highly effective for complex scene understanding. | -| `mixup` | `float` | `0.0` | `0.0 - 1.0` | Blends two images and their labels, creating a composite image. Enhances the model's ability to generalize by introducing label noise and visual variability. | -| `copy_paste` | `float` | `0.0` | `0.0 - 1.0` | Copies objects from one image and pastes them onto another, useful for increasing object instances and learning object occlusion. | -| `auto_augment` | `str` | `randaugment` | - | Automatically applies a predefined augmentation policy (`randaugment`, `autoaugment`, `augmix`), optimizing for classification tasks by diversifying the visual features. | -| `erasing` | `float` | `0.4` | `0.0 - 0.9` | Randomly erases a portion of the image during classification training, encouraging the model to focus on less obvious features for recognition. | -| `crop_fraction` | `float` | `1.0` | `0.1 - 1.0` | Crops the classification image to a fraction of its size to emphasize central features and adapt to object scales, reducing background distractions. | +| Argument | Type | Default | Range | Description | +| ----------------- | ------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `hsv_h` | `float` | `0.015` | `0.0 - 1.0` | Adjusts the hue of the image by a fraction of the color wheel, introducing color variability. Helps the model generalize across different lighting conditions. | +| `hsv_s` | `float` | `0.7` | `0.0 - 1.0` | Alters the saturation of the image by a fraction, affecting the intensity of colors. Useful for simulating different environmental conditions. | +| `hsv_v` | `float` | `0.4` | `0.0 - 1.0` | Modifies the value (brightness) of the image by a fraction, helping the model to perform well under various lighting conditions. | +| `degrees` | `float` | `0.0` | `-180 - +180` | Rotates the image randomly within the specified degree range, improving the model's ability to recognize objects at various orientations. | +| `translate` | `float` | `0.1` | `0.0 - 1.0` | Translates the image horizontally and vertically by a fraction of the image size, aiding in learning to detect partially visible objects. | +| `scale` | `float` | `0.5` | `>=0.0` | Scales the image by a gain factor, simulating objects at different distances from the camera. | +| `shear` | `float` | `0.0` | `-180 - +180` | Shears the image by a specified degree, mimicking the effect of objects being viewed from different angles. | +| `perspective` | `float` | `0.0` | `0.0 - 0.001` | Applies a random perspective transformation to the image, enhancing the model's ability to understand objects in 3D space. | +| `flipud` | `float` | `0.0` | `0.0 - 1.0` | Flips the image upside down with the specified probability, increasing the data variability without affecting the object's characteristics. | +| `fliplr` | `float` | `0.5` | `0.0 - 1.0` | Flips the image left to right with the specified probability, useful for learning symmetrical objects and increasing dataset diversity. | +| `bgr` | `float` | `0.0` | `0.0 - 1.0` | Flips the image channels from RGB to BGR with the specified probability, useful for increasing robustness to incorrect channel ordering. | +| `mosaic` | `float` | `1.0` | `0.0 - 1.0` | Combines four training images into one, simulating different scene compositions and object interactions. Highly effective for complex scene understanding. | +| `mixup` | `float` | `0.0` | `0.0 - 1.0` | Blends two images and their labels, creating a composite image. Enhances the model's ability to generalize by introducing label noise and visual variability. | +| `copy_paste` | `float` | `0.0` | `0.0 - 1.0` | Copies objects from one image and pastes them onto another, useful for increasing object instances and learning object occlusion. | +| `copy_paste_mode` | `str` | `flip` | - | Copy-Paste augmentation method selection among the options of (`"flip"`, `"mixup"`). | +| `auto_augment` | `str` | `randaugment` | - | Automatically applies a predefined augmentation policy (`randaugment`, `autoaugment`, `augmix`), optimizing for classification tasks by diversifying the visual features. | +| `erasing` | `float` | `0.4` | `0.0 - 0.9` | Randomly erases a portion of the image during classification training, encouraging the model to focus on less obvious features for recognition. | +| `crop_fraction` | `float` | `1.0` | `0.1 - 1.0` | Crops the classification image to a fraction of its size to emphasize central features and adapt to object scales, reducing background distractions. | diff --git a/docs/en/macros/export-table.md b/docs/en/macros/export-table.md index 924a8727c9..7cda31963a 100644 --- a/docs/en/macros/export-table.md +++ b/docs/en/macros/export-table.md @@ -1,15 +1,15 @@ | Format | `format` Argument | Model | Metadata | Arguments | | ------------------------------------------------- | ----------------- | ----------------------------------------------- | -------- | -------------------------------------------------------------------- | -| [PyTorch](https://pytorch.org/) | - | `{{ model_name or "yolov8n" }}.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `{{ model_name or "yolov8n" }}.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `{{ model_name or "yolov8n" }}.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `{{ model_name or "yolov8n" }}_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `{{ model_name or "yolov8n" }}.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `{{ model_name or "yolov8n" }}.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `{{ model_name or "yolov8n" }}_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `{{ model_name or "yolov8n" }}.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `{{ model_name or "yolov8n" }}.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `{{ model_name or "yolov8n" }}_edgetpu.tflite` | ✅ | `imgsz` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `{{ model_name or "yolov8n" }}_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `{{ model_name or "yolov8n" }}_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `{{ model_name or "yolov8n" }}_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `{{ model_name or "yolo11n" }}.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `{{ model_name or "yolo11n" }}.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `{{ model_name or "yolo11n" }}.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `{{ model_name or "yolo11n" }}_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `{{ model_name or "yolo11n" }}.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `{{ model_name or "yolo11n" }}.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `{{ model_name or "yolo11n" }}_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `{{ model_name or "yolo11n" }}.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `{{ model_name or "yolo11n" }}.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `{{ model_name or "yolo11n" }}_edgetpu.tflite` | ✅ | `imgsz` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `{{ model_name or "yolo11n" }}_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `{{ model_name or "yolo11n" }}_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `{{ model_name or "yolo11n" }}_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | diff --git a/docs/en/macros/predict-args.md b/docs/en/macros/predict-args.md index f977a30a2f..2bb669eb7b 100644 --- a/docs/en/macros/predict-args.md +++ b/docs/en/macros/predict-args.md @@ -1,17 +1,17 @@ -| Argument | Type | Default | Description | -| --------------- | -------------- | ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `source` | `str` | `'ultralytics/assets'` | Specifies the data source for inference. Can be an image path, video file, directory, URL, or device ID for live feeds. Supports a wide range of formats and sources, enabling flexible application across different types of input. | -| `conf` | `float` | `0.25` | Sets the minimum confidence threshold for detections. Objects detected with confidence below this threshold will be disregarded. Adjusting this value can help reduce false positives. | -| `iou` | `float` | `0.7` | [Intersection Over Union](https://www.ultralytics.com/glossary/intersection-over-union-iou) (IoU) threshold for Non-Maximum Suppression (NMS). Lower values result in fewer detections by eliminating overlapping boxes, useful for reducing duplicates. | -| `imgsz` | `int or tuple` | `640` | Defines the image size for inference. Can be a single integer `640` for square resizing or a (height, width) tuple. Proper sizing can improve detection [accuracy](https://www.ultralytics.com/glossary/accuracy) and processing speed. | -| `half` | `bool` | `False` | Enables half-[precision](https://www.ultralytics.com/glossary/precision) (FP16) inference, which can speed up model inference on supported GPUs with minimal impact on accuracy. | -| `device` | `str` | `None` | Specifies the device for inference (e.g., `cpu`, `cuda:0` or `0`). Allows users to select between CPU, a specific GPU, or other compute devices for model execution. | -| `max_det` | `int` | `300` | Maximum number of detections allowed per image. Limits the total number of objects the model can detect in a single inference, preventing excessive outputs in dense scenes. | -| `vid_stride` | `int` | `1` | Frame stride for video inputs. Allows skipping frames in videos to speed up processing at the cost of temporal resolution. A value of 1 processes every frame, higher values skip frames. | -| `stream_buffer` | `bool` | `False` | Determines if all frames should be buffered when processing video streams (`True`), or if the model should return the most recent frame (`False`). Useful for real-time applications. | -| `visualize` | `bool` | `False` | Activates visualization of model features during inference, providing insights into what the model is "seeing". Useful for debugging and model interpretation. | -| `augment` | `bool` | `False` | Enables test-time augmentation (TTA) for predictions, potentially improving detection robustness at the cost of inference speed. | -| `agnostic_nms` | `bool` | `False` | Enables class-agnostic Non-Maximum Suppression (NMS), which merges overlapping boxes of different classes. Useful in multi-class detection scenarios where class overlap is common. | -| `classes` | `list[int]` | `None` | Filters predictions to a set of class IDs. Only detections belonging to the specified classes will be returned. Useful for focusing on relevant objects in multi-class detection tasks. | -| `retina_masks` | `bool` | `False` | Uses high-resolution segmentation masks if available in the model. This can enhance mask quality for segmentation tasks, providing finer detail. | -| `embed` | `list[int]` | `None` | Specifies the layers from which to extract feature vectors or [embeddings](https://www.ultralytics.com/glossary/embeddings). Useful for downstream tasks like clustering or similarity search. | +| Argument | Type | Default | Description | +| --------------- | -------------- | ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `source` | `str` | `'ultralytics/assets'` | Specifies the data source for inference. Can be an image path, video file, directory, URL, or device ID for live feeds. Supports a wide range of formats and sources, enabling flexible application across [different types of input](/modes/predict.md/#inference-sources). | +| `conf` | `float` | `0.25` | Sets the minimum confidence threshold for detections. Objects detected with confidence below this threshold will be disregarded. Adjusting this value can help reduce false positives. | +| `iou` | `float` | `0.7` | [Intersection Over Union](https://www.ultralytics.com/glossary/intersection-over-union-iou) (IoU) threshold for Non-Maximum Suppression (NMS). Lower values result in fewer detections by eliminating overlapping boxes, useful for reducing duplicates. | +| `imgsz` | `int or tuple` | `640` | Defines the image size for inference. Can be a single integer `640` for square resizing or a (height, width) tuple. Proper sizing can improve detection [accuracy](https://www.ultralytics.com/glossary/accuracy) and processing speed. | +| `half` | `bool` | `False` | Enables half-[precision](https://www.ultralytics.com/glossary/precision) (FP16) inference, which can speed up model inference on supported GPUs with minimal impact on accuracy. | +| `device` | `str` | `None` | Specifies the device for inference (e.g., `cpu`, `cuda:0` or `0`). Allows users to select between CPU, a specific GPU, or other compute devices for model execution. | +| `max_det` | `int` | `300` | Maximum number of detections allowed per image. Limits the total number of objects the model can detect in a single inference, preventing excessive outputs in dense scenes. | +| `vid_stride` | `int` | `1` | Frame stride for video inputs. Allows skipping frames in videos to speed up processing at the cost of temporal resolution. A value of 1 processes every frame, higher values skip frames. | +| `stream_buffer` | `bool` | `False` | Determines the frame processing strategy for video streams. If `False` processing only the most recent frame, minimizing latency (optimized for real-time applications). If `True' processes all frames in order, ensuring no frames are skipped. | +| `visualize` | `bool` | `False` | Activates visualization of model features during inference, providing insights into what the model is "seeing". Useful for debugging and model interpretation. | +| `augment` | `bool` | `False` | Enables test-time augmentation (TTA) for predictions, potentially improving detection robustness at the cost of inference speed. | +| `agnostic_nms` | `bool` | `False` | Enables class-agnostic Non-Maximum Suppression (NMS), which merges overlapping boxes of different classes. Useful in multi-class detection scenarios where class overlap is common. | +| `classes` | `list[int]` | `None` | Filters predictions to a set of class IDs. Only detections belonging to the specified classes will be returned. Useful for focusing on relevant objects in multi-class detection tasks. | +| `retina_masks` | `bool` | `False` | Uses high-resolution segmentation masks if available in the model. This can enhance mask quality for segmentation tasks, providing finer detail. | +| `embed` | `list[int]` | `None` | Specifies the layers from which to extract feature vectors or [embeddings](https://www.ultralytics.com/glossary/embeddings). Useful for downstream tasks like clustering or similarity search. | diff --git a/docs/en/macros/yolo-cls-perf.md b/docs/en/macros/yolo-cls-perf.md new file mode 100644 index 0000000000..855e2c0ffc --- /dev/null +++ b/docs/en/macros/yolo-cls-perf.md @@ -0,0 +1,7 @@ +| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) at 640 | +| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | +| [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 | 3.3 | +| [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 | 12.1 | +| [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 | 39.3 | +| [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 12.9 | 49.4 | +| [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 28.4 | 110.4 | diff --git a/docs/en/macros/yolo-det-perf.md b/docs/en/macros/yolo-det-perf.md new file mode 100644 index 0000000000..1b146cfc03 --- /dev/null +++ b/docs/en/macros/yolo-det-perf.md @@ -0,0 +1,7 @@ +| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | +| ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) | 640 | 39.5 | 56.1 ± 0.8 | 1.5 ± 0.0 | 2.6 | 6.5 | +| [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) | 640 | 47.0 | 90.0 ± 1.2 | 2.5 ± 0.0 | 9.4 | 21.5 | +| [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) | 640 | 51.5 | 183.2 ± 2.0 | 4.7 ± 0.1 | 20.1 | 68.0 | +| [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) | 640 | 53.4 | 238.6 ± 1.4 | 6.2 ± 0.1 | 25.3 | 86.9 | +| [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) | 640 | 54.7 | 462.8 ± 6.7 | 11.3 ± 0.2 | 56.9 | 194.9 | diff --git a/docs/en/macros/yolo-obb-perf.md b/docs/en/macros/yolo-obb-perf.md new file mode 100644 index 0000000000..37a7d7b17b --- /dev/null +++ b/docs/en/macros/yolo-obb-perf.md @@ -0,0 +1,7 @@ +| Model | size
(pixels) | mAPtest
50 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | +| -------------------------------------------------------------------------------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.6 ± 0.8 | 4.4 ± 0.0 | 2.7 | 17.2 | +| [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.4 ± 4.0 | 5.1 ± 0.0 | 9.7 | 57.5 | +| [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.8 ± 2.9 | 10.1 ± 0.4 | 20.9 | 183.5 | +| [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.5 ± 5.0 | 13.5 ± 0.6 | 26.2 | 232.0 | +| [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.6 ± 7.7 | 28.6 ± 1.0 | 58.8 | 520.2 | diff --git a/docs/en/macros/yolo-pose-perf.md b/docs/en/macros/yolo-pose-perf.md new file mode 100644 index 0000000000..d699017b83 --- /dev/null +++ b/docs/en/macros/yolo-pose-perf.md @@ -0,0 +1,7 @@ +| Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | +| ---------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLO11n-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt) | 640 | 50.0 | 81.0 | 52.4 ± 0.5 | 1.7 ± 0.0 | 2.9 | 7.6 | +| [YOLO11s-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-pose.pt) | 640 | 58.9 | 86.3 | 90.5 ± 0.6 | 2.6 ± 0.0 | 9.9 | 23.2 | +| [YOLO11m-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-pose.pt) | 640 | 64.9 | 89.4 | 187.3 ± 0.8 | 4.9 ± 0.1 | 20.9 | 71.7 | +| [YOLO11l-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-pose.pt) | 640 | 66.1 | 89.9 | 247.7 ± 1.1 | 6.4 ± 0.1 | 26.2 | 90.7 | +| [YOLO11x-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-pose.pt) | 640 | 69.5 | 91.1 | 488.0 ± 13.9 | 12.1 ± 0.2 | 58.8 | 203.3 | diff --git a/docs/en/macros/yolo-seg-perf.md b/docs/en/macros/yolo-seg-perf.md new file mode 100644 index 0000000000..af97e6f455 --- /dev/null +++ b/docs/en/macros/yolo-seg-perf.md @@ -0,0 +1,7 @@ +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | +| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.9 ± 1.1 | 1.8 ± 0.0 | 2.9 | 10.4 | +| [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.6 ± 4.9 | 2.9 ± 0.0 | 10.1 | 35.5 | +| [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.6 ± 1.2 | 6.3 ± 0.1 | 22.4 | 123.3 | +| [YOLO11l-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt) | 640 | 53.4 | 42.9 | 344.2 ± 3.2 | 7.8 ± 0.2 | 27.6 | 142.2 | +| [YOLO11x-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt) | 640 | 54.7 | 43.8 | 664.5 ± 3.2 | 15.8 ± 0.7 | 62.1 | 319.0 | diff --git a/docs/en/models/index.md b/docs/en/models/index.md index c43af1f57f..baa5c9b260 100644 --- a/docs/en/models/index.md +++ b/docs/en/models/index.md @@ -17,16 +17,17 @@ Here are some of the key models supported: 3. **[YOLOv5](yolov5.md)**: An improved version of the YOLO architecture by Ultralytics, offering better performance and speed trade-offs compared to previous versions. 4. **[YOLOv6](yolov6.md)**: Released by [Meituan](https://about.meituan.com/) in 2022, and in use in many of the company's autonomous delivery robots. 5. **[YOLOv7](yolov7.md)**: Updated YOLO models released in 2022 by the authors of YOLOv4. -6. **[YOLOv8](yolov8.md) NEW 🚀**: The latest version of the YOLO family, featuring enhanced capabilities such as [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), pose/keypoints estimation, and classification. +6. **[YOLOv8](yolov8.md)**: The latest version of the YOLO family, featuring enhanced capabilities such as [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), pose/keypoints estimation, and classification. 7. **[YOLOv9](yolov9.md)**: An experimental model trained on the Ultralytics [YOLOv5](yolov5.md) codebase implementing Programmable Gradient Information (PGI). 8. **[YOLOv10](yolov10.md)**: By Tsinghua University, featuring NMS-free training and efficiency-accuracy driven architecture, delivering state-of-the-art performance and latency. -9. **[Segment Anything Model (SAM)](sam.md)**: Meta's original Segment Anything Model (SAM). -10. **[Segment Anything Model 2 (SAM2)](sam-2.md)**: The next generation of Meta's Segment Anything Model (SAM) for videos and images. -11. **[Mobile Segment Anything Model (MobileSAM)](mobile-sam.md)**: MobileSAM for mobile applications, by Kyung Hee University. -12. **[Fast Segment Anything Model (FastSAM)](fast-sam.md)**: FastSAM by Image & Video Analysis Group, Institute of Automation, Chinese Academy of Sciences. -13. **[YOLO-NAS](yolo-nas.md)**: YOLO Neural Architecture Search (NAS) Models. -14. **[Realtime Detection Transformers (RT-DETR)](rtdetr.md)**: Baidu's PaddlePaddle Realtime Detection [Transformer](https://www.ultralytics.com/glossary/transformer) (RT-DETR) models. -15. **[YOLO-World](yolo-world.md)**: Real-time Open Vocabulary Object Detection models from Tencent AI Lab. +9. **[YOLO11](yolo11.md) NEW 🚀**: Ultralytics' latest YOLO models delivering state-of-the-art (SOTA) performance across multiple tasks. +10. **[Segment Anything Model (SAM)](sam.md)**: Meta's original Segment Anything Model (SAM). +11. **[Segment Anything Model 2 (SAM2)](sam-2.md)**: The next generation of Meta's Segment Anything Model (SAM) for videos and images. +12. **[Mobile Segment Anything Model (MobileSAM)](mobile-sam.md)**: MobileSAM for mobile applications, by Kyung Hee University. +13. **[Fast Segment Anything Model (FastSAM)](fast-sam.md)**: FastSAM by Image & Video Analysis Group, Institute of Automation, Chinese Academy of Sciences. +14. **[YOLO-NAS](yolo-nas.md)**: YOLO Neural Architecture Search (NAS) Models. +15. **[Realtime Detection Transformers (RT-DETR)](rtdetr.md)**: Baidu's PaddlePaddle Realtime Detection [Transformer](https://www.ultralytics.com/glossary/transformer) (RT-DETR) models. +16. **[YOLO-World](yolo-world.md)**: Real-time Open Vocabulary Object Detection models from Tencent AI Lab.


diff --git a/docs/en/models/mobile-sam.md b/docs/en/models/mobile-sam.md index 26c92f6829..0d7df2a2ca 100644 --- a/docs/en/models/mobile-sam.md +++ b/docs/en/models/mobile-sam.md @@ -4,7 +4,7 @@ description: Discover MobileSAM, a lightweight and fast image segmentation model keywords: MobileSAM, image segmentation, lightweight model, fast segmentation, mobile applications, SAM, ViT encoder, Tiny-ViT, Ultralytics --- -![MobileSAM Logo](https://github.com/ChaoningZhang/MobileSAM/blob/master/assets/logo2.png) +![MobileSAM Logo](https://raw.githubusercontent.com/ChaoningZhang/MobileSAM/master/assets/logo2.png) # Mobile Segment Anything (MobileSAM) @@ -12,6 +12,17 @@ The MobileSAM paper is now available on [arXiv](https://arxiv.org/pdf/2306.14289 A demonstration of MobileSAM running on a CPU can be accessed at this [demo link](https://huggingface.co/spaces/dhkim2810/MobileSAM). The performance on a Mac i5 CPU takes approximately 3 seconds. On the Hugging Face demo, the interface and lower-performance CPUs contribute to a slower response, but it continues to function effectively. +

+
+ +
+ Watch: How to Run Inference with MobileSAM using Ultralytics | Step-by-Step Guide 🎉 +

+ MobileSAM is implemented in various projects including [Grounding-SAM](https://github.com/IDEA-Research/Grounded-Segment-Anything), [AnyLabeling](https://github.com/vietanhdev/anylabeling), and [Segment Anything in 3D](https://github.com/Jumpat/SegmentAnythingin3D). MobileSAM is trained on a single GPU with a 100k dataset (1% of the original images) in less than a day. The code for this training will be made available in the future. diff --git a/docs/en/models/sam-2.md b/docs/en/models/sam-2.md index 025c18d267..562a130029 100644 --- a/docs/en/models/sam-2.md +++ b/docs/en/models/sam-2.md @@ -12,6 +12,17 @@ SAM 2, the successor to Meta's [Segment Anything Model (SAM)](sam.md), is a cutt ## Key Features +

+
+ +
+ Watch: How to Run Inference with Meta's SAM2 using Ultralytics | Step-by-Step Guide 🎉 +

+ ### Unified Model Architecture SAM 2 combines the capabilities of image and video segmentation in a single model. This unification simplifies deployment and allows for consistent performance across different media types. It leverages a flexible prompt-based interface, enabling users to specify objects of interest through various prompt types, such as points, bounding boxes, or masks. @@ -54,7 +65,7 @@ SAM 2 sets a new benchmark in the field, outperforming previous models on variou - **Memory Mechanism**: Includes a memory encoder, memory bank, and memory attention module. These components collectively store and utilize information from past frames, enabling the model to maintain consistent object tracking over time. - **Mask Decoder**: Generates the final segmentation masks based on the encoded image features and prompts. In video, it also uses memory context to ensure accurate tracking across frames. -![SAM 2 Architecture Diagram](https://github.com/facebookresearch/segment-anything-2/blob/main/assets/model_diagram.png) +![SAM 2 Architecture Diagram](https://raw.githubusercontent.com/facebookresearch/sam2/refs/heads/main/assets/model_diagram.png) ### Memory Mechanism and Occlusion Handling diff --git a/docs/en/models/yolo11.md b/docs/en/models/yolo11.md new file mode 100644 index 0000000000..dbb8318fe1 --- /dev/null +++ b/docs/en/models/yolo11.md @@ -0,0 +1,208 @@ +--- +comments: true +description: Discover YOLO11, the latest advancement in state-of-the-art object detection, offering unmatched accuracy and efficiency for diverse computer vision tasks. +keywords: YOLO11, state-of-the-art object detection, YOLO series, Ultralytics, computer vision, AI, machine learning, deep learning +--- + +# Ultralytics YOLO11 + +## Overview + +YOLO11 is the latest iteration in the [Ultralytics](https://www.ultralytics.com) YOLO series of real-time object detectors, redefining what's possible with cutting-edge [accuracy](https://www.ultralytics.com/glossary/accuracy), speed, and efficiency. Building upon the impressive advancements of previous YOLO versions, YOLO11 introduces significant improvements in architecture and training methods, making it a versatile choice for a wide range of [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks. + +![Ultralytics YOLO11 Comparison Plots](https://github.com/user-attachments/assets/a311a4ed-bbf2-43b5-8012-5f183a28a845) + +

+
+ +
+ Watch: Ultralytics YOLO11 Announcement at YOLO Vision 2024 +

+ +## Key Features + +- **Enhanced Feature Extraction:** YOLO11 employs an improved backbone and neck architecture, which enhances [feature extraction](https://www.ultralytics.com/glossary/feature-extraction) capabilities for more precise object detection and complex task performance. +- **Optimized for Efficiency and Speed:** YOLO11 introduces refined architectural designs and optimized training pipelines, delivering faster processing speeds and maintaining an optimal balance between accuracy and performance. +- **Greater Accuracy with Fewer Parameters:** With advancements in model design, YOLO11m achieves a higher [mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP) on the COCO dataset while using 22% fewer parameters than YOLOv8m, making it computationally efficient without compromising accuracy. +- **Adaptability Across Environments:** YOLO11 can be seamlessly deployed across various environments, including edge devices, cloud platforms, and systems supporting NVIDIA GPUs, ensuring maximum flexibility. +- **Broad Range of Supported Tasks:** Whether it's object detection, instance segmentation, image classification, pose estimation, or oriented object detection (OBB), YOLO11 is designed to cater to a diverse set of computer vision challenges. + +## Supported Tasks and Modes + +YOLO11 builds upon the versatile model range introduced in YOLOv8, offering enhanced support across various computer vision tasks: + +| Model | Filenames | Task | Inference | Validation | Training | Export | +| ----------- | ----------------------------------------------------------------------------------------- | -------------------------------------------- | --------- | ---------- | -------- | ------ | +| YOLO11 | `yolo11n.pt` `yolo11s.pt` `yolo11m.pt` `yolo11l.pt` `yolo11x.pt` | [Detection](../tasks/detect.md) | ✅ | ✅ | ✅ | ✅ | +| YOLO11-seg | `yolo11n-seg.pt` `yolo11s-seg.pt` `yolo11m-seg.pt` `yolo11l-seg.pt` `yolo11x-seg.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ✅ | ✅ | ✅ | +| YOLO11-pose | `yolo11n-pose.pt` `yolo11s-pose.pt` `yolo11m-pose.pt` `yolo11l-pose.pt` `yolo11x-pose.pt` | [Pose/Keypoints](../tasks/pose.md) | ✅ | ✅ | ✅ | ✅ | +| YOLO11-obb | `yolo11n-obb.pt` `yolo11s-obb.pt` `yolo11m-obb.pt` `yolo11l-obb.pt` `yolo11x-obb.pt` | [Oriented Detection](../tasks/obb.md) | ✅ | ✅ | ✅ | ✅ | +| YOLO11-cls | `yolo11n-cls.pt` `yolo11s-cls.pt` `yolo11m-cls.pt` `yolo11l-cls.pt` `yolo11x-cls.pt` | [Classification](../tasks/classify.md) | ✅ | ✅ | ✅ | ✅ | + +This table provides an overview of the YOLO11 model variants, showcasing their applicability in specific tasks and compatibility with operational modes such as Inference, Validation, Training, and Export. This flexibility makes YOLO11 suitable for a wide range of applications in computer vision, from real-time detection to complex segmentation tasks. + +## Performance Metrics + +!!! performance + + === "Detection (COCO)" + + See [Detection Docs](../tasks/detect.md) for usage examples with these models trained on [COCO](../datasets/detect/coco.md), which include 80 pre-trained classes. + +{% filter indent(width=8, first=False, blank=True) %} +{% include "macros/yolo-det-perf.md" %} +{% endfilter %} + + === "Segmentation (COCO)" + + See [Segmentation Docs](../tasks/segment.md) for usage examples with these models trained on [COCO](../datasets/segment/coco.md), which include 80 pre-trained classes. + +{% filter indent(width=8, first=False, blank=True) %} +{% include "macros/yolo-seg-perf.md" %} +{% endfilter %} + + === "Classification (ImageNet)" + + See [Classification Docs](../tasks/classify.md) for usage examples with these models trained on [ImageNet](../datasets/classify/imagenet.md), which include 1000 pre-trained classes. + +{% filter indent(width=8, first=False, blank=True) %} +{% include "macros/yolo-cls-perf.md" %} +{% endfilter %} + + === "Pose (COCO)" + + See [Pose Estimation Docs](../tasks/pose.md) for usage examples with these models trained on [COCO](../datasets/pose/coco.md), which include 1 pre-trained class, 'person'. + +{% filter indent(width=8, first=False, blank=True) %} +{% include "macros/yolo-pose-perf.md" %} +{% endfilter %} + + === "OBB (DOTAv1)" + + See [Oriented Detection Docs](../tasks/obb.md) for usage examples with these models trained on [DOTAv1](../datasets/obb/dota-v2.md#dota-v10), which include 15 pre-trained classes. + +{% filter indent(width=8, first=False, blank=True) %} +{% include "macros/yolo-obb-perf.md" %} +{% endfilter %} + +## Usage Examples + +This section provides simple YOLO11 training and inference examples. For full documentation on these and other [modes](../modes/index.md), see the [Predict](../modes/predict.md), [Train](../modes/train.md), [Val](../modes/val.md), and [Export](../modes/export.md) docs pages. + +Note that the example below is for YOLO11 [Detect](../tasks/detect.md) models for [object detection](https://www.ultralytics.com/glossary/object-detection). For additional supported tasks, see the [Segment](../tasks/segment.md), [Classify](../tasks/classify.md), [OBB](../tasks/obb.md), and [Pose](../tasks/pose.md) docs. + +!!! example + + === "Python" + + [PyTorch](https://www.ultralytics.com/glossary/pytorch) pretrained `*.pt` models as well as configuration `*.yaml` files can be passed to the `YOLO()` class to create a model instance in Python: + + ```python + from ultralytics import YOLO + + # Load a COCO-pretrained YOLO11n model + model = YOLO("yolo11n.pt") + + # Train the model on the COCO8 example dataset for 100 epochs + results = model.train(data="coco8.yaml", epochs=100, imgsz=640) + + # Run inference with the YOLO11n model on the 'bus.jpg' image + results = model("path/to/bus.jpg") + ``` + + === "CLI" + + CLI commands are available to directly run the models: + + ```bash + # Load a COCO-pretrained YOLO11n model and train it on the COCO8 example dataset for 100 epochs + yolo train model=yolo11n.pt data=coco8.yaml epochs=100 imgsz=640 + + # Load a COCO-pretrained YOLO11n model and run inference on the 'bus.jpg' image + yolo predict model=yolo11n.pt source=path/to/bus.jpg + ``` + +## Citations and Acknowledgements + +If you use YOLO11 or any other software from this repository in your work, please cite it using the following format: + +!!! quote "" + + === "BibTeX" + + ```bibtex + @software{yolo11_ultralytics, + author = {Glenn Jocher and Jing Qiu}, + title = {Ultralytics YOLO11}, + version = {11.0.0}, + year = {2024}, + url = {https://github.com/ultralytics/ultralytics}, + orcid = {0000-0001-5950-6979, 0000-0002-7603-6750, 0000-0003-3783-7069}, + license = {AGPL-3.0} + } + ``` + +Please note that the DOI is pending and will be added to the citation once it is available. YOLO11 models are provided under [AGPL-3.0](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) and [Enterprise](https://www.ultralytics.com/license) licenses. + +## FAQ + +### What are the key improvements in Ultralytics YOLO11 compared to previous versions? + +Ultralytics YOLO11 introduces several significant advancements over its predecessors. Key improvements include: + +- **Enhanced Feature Extraction:** YOLO11 employs an improved backbone and neck architecture, enhancing [feature extraction](https://www.ultralytics.com/glossary/feature-extraction) capabilities for more precise object detection. +- **Optimized Efficiency and Speed:** Refined architectural designs and optimized training pipelines deliver faster processing speeds while maintaining a balance between accuracy and performance. +- **Greater Accuracy with Fewer Parameters:** YOLO11m achieves higher mean Average [Precision](https://www.ultralytics.com/glossary/precision) (mAP) on the COCO dataset with 22% fewer parameters than YOLOv8m, making it computationally efficient without compromising accuracy. +- **Adaptability Across Environments:** YOLO11 can be deployed across various environments, including edge devices, cloud platforms, and systems supporting NVIDIA GPUs. +- **Broad Range of Supported Tasks:** YOLO11 supports diverse computer vision tasks such as object detection, [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation), image classification, pose estimation, and oriented object detection (OBB). + +### How do I train a YOLO11 model for object detection? + +Training a YOLO11 model for object detection can be done using Python or CLI commands. Below are examples for both methods: + +!!! Example + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a COCO-pretrained YOLO11n model + model = YOLO("yolo11n.pt") + + # Train the model on the COCO8 example dataset for 100 epochs + results = model.train(data="coco8.yaml", epochs=100, imgsz=640) + ``` + + === "CLI" + + ```bash + # Load a COCO-pretrained YOLO11n model and train it on the COCO8 example dataset for 100 epochs + yolo train model=yolo11n.pt data=coco8.yaml epochs=100 imgsz=640 + ``` + +For more detailed instructions, refer to the [Train](../modes/train.md) documentation. + +### What tasks can YOLO11 models perform? + +YOLO11 models are versatile and support a wide range of computer vision tasks, including: + +- **Object Detection:** Identifying and locating objects within an image. +- **Instance Segmentation:** Detecting objects and delineating their boundaries. +- **[Image Classification](https://www.ultralytics.com/glossary/image-classification):** Categorizing images into predefined classes. +- **Pose Estimation:** Detecting and tracking keypoints on human bodies. +- **Oriented Object Detection (OBB):** Detecting objects with rotation for higher precision. + +For more information on each task, see the [Detection](../tasks/detect.md), [Instance Segmentation](../tasks/segment.md), [Classification](../tasks/classify.md), [Pose Estimation](../tasks/pose.md), and [Oriented Detection](../tasks/obb.md) documentation. + +### How does YOLO11 achieve greater accuracy with fewer parameters? + +YOLO11 achieves greater accuracy with fewer parameters through advancements in model design and optimization techniques. The improved architecture allows for efficient feature extraction and processing, resulting in higher mean Average Precision (mAP) on datasets like COCO while using 22% fewer parameters than YOLOv8m. This makes YOLO11 computationally efficient without compromising on accuracy, making it suitable for deployment on resource-constrained devices. + +### Can YOLO11 be deployed on edge devices? + +Yes, YOLO11 is designed for adaptability across various environments, including edge devices. Its optimized architecture and efficient processing capabilities make it suitable for deployment on edge devices, cloud platforms, and systems supporting NVIDIA GPUs. This flexibility ensures that YOLO11 can be used in diverse applications, from real-time detection on mobile devices to complex segmentation tasks in cloud environments. For more details on deployment options, refer to the [Export](../modes/export.md) documentation. diff --git a/docs/en/models/yolov6.md b/docs/en/models/yolov6.md index b670d58d31..c41b40c838 100644 --- a/docs/en/models/yolov6.md +++ b/docs/en/models/yolov6.md @@ -24,7 +24,7 @@ keywords: Meituan YOLOv6, object detection, real-time applications, BiC module, YOLOv6 provides various pre-trained models with different scales: -- YOLOv6-N: 37.5% AP on COCO val2017 at 1187 FPS with NVIDIA Tesla T4 GPU. +- YOLOv6-N: 37.5% AP on COCO val2017 at 1187 FPS with NVIDIA T4 GPU. - YOLOv6-S: 45.0% AP at 484 FPS. - YOLOv6-M: 50.0% AP at 226 FPS. - YOLOv6-L: 52.8% AP at 116 FPS. @@ -151,7 +151,7 @@ YOLOv6 offers multiple versions, each optimized for different performance requir - YOLOv6-L: 52.8% AP at 116 FPS - YOLOv6-L6: State-of-the-art accuracy in real-time scenarios -These models are evaluated on the COCO dataset using an NVIDIA Tesla T4 GPU. For more on performance metrics, see the [Performance Metrics](#performance-metrics) section. +These models are evaluated on the COCO dataset using an NVIDIA T4 GPU. For more on performance metrics, see the [Performance Metrics](#performance-metrics) section. ### How does the Anchor-Aided Training (AAT) strategy benefit YOLOv6? diff --git a/docs/en/models/yolov8.md b/docs/en/models/yolov8.md index f7a32ef009..036cd305a1 100644 --- a/docs/en/models/yolov8.md +++ b/docs/en/models/yolov8.md @@ -4,7 +4,7 @@ description: Discover YOLOv8, the latest advancement in real-time object detecti keywords: YOLOv8, real-time object detection, YOLO series, Ultralytics, computer vision, advanced object detection, AI, machine learning, deep learning --- -# YOLOv8 +# Ultralytics YOLOv8 ## Overview diff --git a/docs/en/modes/benchmark.md b/docs/en/modes/benchmark.md index 209a0e03e0..3086e98ec6 100644 --- a/docs/en/modes/benchmark.md +++ b/docs/en/modes/benchmark.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn how to evaluate your YOLOv8 model's performance in real-world scenarios using benchmark mode. Optimize speed, accuracy, and resource allocation across export formats. -keywords: model benchmarking, YOLOv8, Ultralytics, performance evaluation, export formats, ONNX, TensorRT, OpenVINO, CoreML, TensorFlow, optimization, mAP50-95, inference time +description: Learn how to evaluate your YOLO11 model's performance in real-world scenarios using benchmark mode. Optimize speed, accuracy, and resource allocation across export formats. +keywords: model benchmarking, YOLO11, Ultralytics, performance evaluation, export formats, ONNX, TensorRT, OpenVINO, CoreML, TensorFlow, optimization, mAP50-95, inference time --- # Model Benchmarking with Ultralytics YOLO @@ -10,7 +10,7 @@ keywords: model benchmarking, YOLOv8, Ultralytics, performance evaluation, expor ## Introduction -Once your model is trained and validated, the next logical step is to evaluate its performance in various real-world scenarios. Benchmark mode in Ultralytics YOLOv8 serves this purpose by providing a robust framework for assessing the speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) of your model across a range of export formats. +Once your model is trained and validated, the next logical step is to evaluate its performance in various real-world scenarios. Benchmark mode in Ultralytics YOLO11 serves this purpose by providing a robust framework for assessing the speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) of your model across a range of export formats.


@@ -50,7 +50,7 @@ Once your model is trained and validated, the next logical step is to evaluate i ## Usage Examples -Run YOLOv8n benchmarks on all supported export formats including ONNX, TensorRT etc. See Arguments section below for a full list of export arguments. +Run YOLO11n benchmarks on all supported export formats including ONNX, TensorRT etc. See Arguments section below for a full list of export arguments. !!! example @@ -60,13 +60,13 @@ Run YOLOv8n benchmarks on all supported export formats including ONNX, TensorRT from ultralytics.utils.benchmarks import benchmark # Benchmark on GPU - benchmark(model="yolov8n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) + benchmark(model="yolo11n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) ``` === "CLI" ```bash - yolo benchmark model=yolov8n.pt data='coco8.yaml' imgsz=640 half=False device=0 + yolo benchmark model=yolo11n.pt data='coco8.yaml' imgsz=640 half=False device=0 ``` ## Arguments @@ -75,7 +75,7 @@ Arguments such as `model`, `data`, `imgsz`, `half`, `device`, and `verbose` prov | Key | Default Value | Description | | --------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | `None` | Specifies the path to the model file. Accepts both `.pt` and `.yaml` formats, e.g., `"yolov8n.pt"` for pre-trained models or configuration files. | +| `model` | `None` | Specifies the path to the model file. Accepts both `.pt` and `.yaml` formats, e.g., `"yolo11n.pt"` for pre-trained models or configuration files. | | `data` | `None` | Path to a YAML file defining the dataset for benchmarking, typically including paths and settings for [validation data](https://www.ultralytics.com/glossary/validation-data). Example: `"coco8.yaml"`. | | `imgsz` | `640` | The input image size for the model. Can be a single integer for square images or a tuple `(width, height)` for non-square, e.g., `(640, 480)`. | | `half` | `False` | Enables FP16 (half-precision) inference, reducing memory usage and possibly increasing speed on compatible hardware. Use `half=True` to enable. | @@ -93,9 +93,9 @@ See full `export` details in the [Export](../modes/export.md) page. ## FAQ -### How do I benchmark my YOLOv8 model's performance using Ultralytics? +### How do I benchmark my YOLO11 model's performance using Ultralytics? -Ultralytics YOLOv8 offers a Benchmark mode to assess your model's performance across different export formats. This mode provides insights into key metrics such as [mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP50-95), accuracy, and inference time in milliseconds. To run benchmarks, you can use either Python or CLI commands. For example, to benchmark on a GPU: +Ultralytics YOLO11 offers a Benchmark mode to assess your model's performance across different export formats. This mode provides insights into key metrics such as [mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP50-95), accuracy, and inference time in milliseconds. To run benchmarks, you can use either Python or CLI commands. For example, to benchmark on a GPU: !!! example @@ -105,29 +105,29 @@ Ultralytics YOLOv8 offers a Benchmark mode to assess your model's performance ac from ultralytics.utils.benchmarks import benchmark # Benchmark on GPU - benchmark(model="yolov8n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) + benchmark(model="yolo11n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) ``` === "CLI" ```bash - yolo benchmark model=yolov8n.pt data='coco8.yaml' imgsz=640 half=False device=0 + yolo benchmark model=yolo11n.pt data='coco8.yaml' imgsz=640 half=False device=0 ``` For more details on benchmark arguments, visit the [Arguments](#arguments) section. -### What are the benefits of exporting YOLOv8 models to different formats? +### What are the benefits of exporting YOLO11 models to different formats? -Exporting YOLOv8 models to different formats such as ONNX, TensorRT, and OpenVINO allows you to optimize performance based on your deployment environment. For instance: +Exporting YOLO11 models to different formats such as ONNX, TensorRT, and OpenVINO allows you to optimize performance based on your deployment environment. For instance: - **ONNX:** Provides up to 3x CPU speedup. - **TensorRT:** Offers up to 5x GPU speedup. - **OpenVINO:** Specifically optimized for Intel hardware. These formats enhance both the speed and accuracy of your models, making them more efficient for various real-world applications. Visit the [Export](../modes/export.md) page for complete details. -### Why is benchmarking crucial in evaluating YOLOv8 models? +### Why is benchmarking crucial in evaluating YOLO11 models? -Benchmarking your YOLOv8 models is essential for several reasons: +Benchmarking your YOLO11 models is essential for several reasons: - **Informed Decisions:** Understand the trade-offs between speed and accuracy. - **Resource Allocation:** Gauge the performance across different hardware options. @@ -135,9 +135,9 @@ Benchmarking your YOLOv8 models is essential for several reasons: - **Cost Efficiency:** Optimize hardware usage based on benchmark results. Key metrics such as mAP50-95, Top-5 accuracy, and inference time help in making these evaluations. Refer to the [Key Metrics](#key-metrics-in-benchmark-mode) section for more information. -### Which export formats are supported by YOLOv8, and what are their advantages? +### Which export formats are supported by YOLO11, and what are their advantages? -YOLOv8 supports a variety of export formats, each tailored for specific hardware and use cases: +YOLO11 supports a variety of export formats, each tailored for specific hardware and use cases: - **ONNX:** Best for CPU performance. - **TensorRT:** Ideal for GPU efficiency. @@ -145,11 +145,11 @@ YOLOv8 supports a variety of export formats, each tailored for specific hardware - **CoreML & [TensorFlow](https://www.ultralytics.com/glossary/tensorflow):** Useful for iOS and general ML applications. For a complete list of supported formats and their respective advantages, check out the [Supported Export Formats](#supported-export-formats) section. -### What arguments can I use to fine-tune my YOLOv8 benchmarks? +### What arguments can I use to fine-tune my YOLO11 benchmarks? When running benchmarks, several arguments can be customized to suit specific needs: -- **model:** Path to the model file (e.g., "yolov8n.pt"). +- **model:** Path to the model file (e.g., "yolo11n.pt"). - **data:** Path to a YAML file defining the dataset (e.g., "coco8.yaml"). - **imgsz:** The input image size, either as a single integer or a tuple. - **half:** Enable FP16 inference for better performance. diff --git a/docs/en/modes/export.md b/docs/en/modes/export.md index 706dd91cdc..4be5bd5b90 100644 --- a/docs/en/modes/export.md +++ b/docs/en/modes/export.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn how to export your YOLOv8 model to various formats like ONNX, TensorRT, and CoreML. Achieve maximum compatibility and performance. -keywords: YOLOv8, Model Export, ONNX, TensorRT, CoreML, Ultralytics, AI, Machine Learning, Inference, Deployment +description: Learn how to export your YOLO11 model to various formats like ONNX, TensorRT, and CoreML. Achieve maximum compatibility and performance. +keywords: YOLO11, Model Export, ONNX, TensorRT, CoreML, Ultralytics, AI, Machine Learning, Inference, Deployment --- # Model Export with Ultralytics YOLO @@ -10,7 +10,7 @@ keywords: YOLOv8, Model Export, ONNX, TensorRT, CoreML, Ultralytics, AI, Machine ## Introduction -The ultimate goal of training a model is to deploy it for real-world applications. Export mode in Ultralytics YOLOv8 offers a versatile range of options for exporting your trained model to different formats, making it deployable across various platforms and devices. This comprehensive guide aims to walk you through the nuances of model exporting, showcasing how to achieve maximum compatibility and performance. +The ultimate goal of training a model is to deploy it for real-world applications. Export mode in Ultralytics YOLO11 offers a versatile range of options for exporting your trained model to different formats, making it deployable across various platforms and devices. This comprehensive guide aims to walk you through the nuances of model exporting, showcasing how to achieve maximum compatibility and performance.


@@ -20,10 +20,10 @@ The ultimate goal of training a model is to deploy it for real-world application allowfullscreen>
- Watch: How To Export Custom Trained Ultralytics YOLOv8 Model and Run Live Inference on Webcam. + Watch: How To Export Custom Trained Ultralytics YOLO Model and Run Live Inference on Webcam.

-## Why Choose YOLOv8's Export Mode? +## Why Choose YOLO11's Export Mode? - **Versatility:** Export to multiple formats including ONNX, TensorRT, CoreML, and more. - **Performance:** Gain up to 5x GPU speedup with TensorRT and 3x CPU speedup with ONNX or OpenVINO. @@ -46,7 +46,7 @@ Here are some of the standout functionalities: ## Usage Examples -Export a YOLOv8n model to a different format like ONNX or TensorRT. See the Arguments section below for a full list of export arguments. +Export a YOLO11n model to a different format like ONNX or TensorRT. See the Arguments section below for a full list of export arguments. !!! example @@ -56,7 +56,7 @@ Export a YOLOv8n model to a different format like ONNX or TensorRT. See the Argu from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -66,7 +66,7 @@ Export a YOLOv8n model to a different format like ONNX or TensorRT. See the Argu === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=yolo11n.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` @@ -80,15 +80,15 @@ Adjusting these parameters allows for customization of the export process to fit ## Export Formats -Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} ## FAQ -### How do I export a YOLOv8 model to ONNX format? +### How do I export a YOLO11 model to ONNX format? -Exporting a YOLOv8 model to ONNX format is straightforward with Ultralytics. It provides both Python and CLI methods for exporting models. +Exporting a YOLO11 model to ONNX format is straightforward with Ultralytics. It provides both Python and CLI methods for exporting models. !!! example @@ -98,7 +98,7 @@ Exporting a YOLOv8 model to ONNX format is straightforward with Ultralytics. It from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -108,7 +108,7 @@ Exporting a YOLOv8 model to ONNX format is straightforward with Ultralytics. It === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=yolo11n.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` @@ -116,7 +116,7 @@ For more details on the process, including advanced options like handling differ ### What are the benefits of using TensorRT for model export? -Using TensorRT for model export offers significant performance improvements. YOLOv8 models exported to TensorRT can achieve up to a 5x GPU speedup, making it ideal for real-time inference applications. +Using TensorRT for model export offers significant performance improvements. YOLO11 models exported to TensorRT can achieve up to a 5x GPU speedup, making it ideal for real-time inference applications. - **Versatility:** Optimize models for a specific hardware setup. - **Speed:** Achieve faster inference through advanced optimizations. @@ -124,7 +124,7 @@ Using TensorRT for model export offers significant performance improvements. YOL To learn more about integrating TensorRT, see the [TensorRT integration guide](../integrations/tensorrt.md). -### How do I enable INT8 quantization when exporting my YOLOv8 model? +### How do I enable INT8 quantization when exporting my YOLO11 model? INT8 quantization is an excellent way to compress the model and speed up inference, especially on edge devices. Here's how you can enable INT8 quantization: @@ -135,14 +135,14 @@ INT8 quantization is an excellent way to compress the model and speed up inferen ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") # Load a model + model = YOLO("yolo11n.pt") # Load a model model.export(format="onnx", int8=True) ``` === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx int8=True # export model with INT8 quantization + yolo export model=yolo11n.pt format=onnx int8=True # export model with INT8 quantization ``` INT8 quantization can be applied to various formats, such as TensorRT and CoreML. More details can be found in the [Export section](../modes/export.md). @@ -160,14 +160,14 @@ To enable this feature, use the `dynamic=True` flag during export: ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") model.export(format="onnx", dynamic=True) ``` === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx dynamic=True + yolo export model=yolo11n.pt format=onnx dynamic=True ``` For additional context, refer to the [dynamic input size configuration](#arguments). diff --git a/docs/en/modes/index.md b/docs/en/modes/index.md index 2b56680efc..ea303643d6 100644 --- a/docs/en/modes/index.md +++ b/docs/en/modes/index.md @@ -1,16 +1,16 @@ --- comments: true -description: Discover the diverse modes of Ultralytics YOLOv8, including training, validation, prediction, export, tracking, and benchmarking. Maximize model performance and efficiency. -keywords: Ultralytics, YOLOv8, machine learning, model training, validation, prediction, export, tracking, benchmarking, object detection +description: Discover the diverse modes of Ultralytics YOLO11, including training, validation, prediction, export, tracking, and benchmarking. Maximize model performance and efficiency. +keywords: Ultralytics, YOLO11, machine learning, model training, validation, prediction, export, tracking, benchmarking, object detection --- -# Ultralytics YOLOv8 Modes +# Ultralytics YOLO11 Modes Ultralytics YOLO ecosystem and integrations ## Introduction -Ultralytics YOLOv8 is not just another object detection model; it's a versatile framework designed to cover the entire lifecycle of [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models—from data ingestion and model training to validation, deployment, and real-world tracking. Each mode serves a specific purpose and is engineered to offer you the flexibility and efficiency required for different tasks and use-cases. +Ultralytics YOLO11 is not just another object detection model; it's a versatile framework designed to cover the entire lifecycle of [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) models—from data ingestion and model training to validation, deployment, and real-world tracking. Each mode serves a specific purpose and is engineered to offer you the flexibility and efficiency required for different tasks and use-cases.


@@ -25,7 +25,7 @@ Ultralytics YOLOv8 is not just another object detection model; it's a versatile ### Modes at a Glance -Understanding the different **modes** that Ultralytics YOLOv8 supports is critical to getting the most out of your models: +Understanding the different **modes** that Ultralytics YOLO11 supports is critical to getting the most out of your models: - **Train** mode: Fine-tune your model on custom or preloaded datasets. - **Val** mode: A post-training checkpoint to validate model performance. @@ -34,49 +34,49 @@ Understanding the different **modes** that Ultralytics YOLOv8 supports is critic - **Track** mode: Extend your object detection model into real-time tracking applications. - **Benchmark** mode: Analyze the speed and accuracy of your model in diverse deployment environments. -This comprehensive guide aims to give you an overview and practical insights into each mode, helping you harness the full potential of YOLOv8. +This comprehensive guide aims to give you an overview and practical insights into each mode, helping you harness the full potential of YOLO11. ## [Train](train.md) -Train mode is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can accurately predict the classes and locations of objects in an image. +Train mode is used for training a YOLO11 model on a custom dataset. In this mode, the model is trained using the specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can accurately predict the classes and locations of objects in an image. [Train Examples](train.md){ .md-button } ## [Val](val.md) -Val mode is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a validation set to measure its accuracy and generalization performance. This mode can be used to tune the hyperparameters of the model to improve its performance. +Val mode is used for validating a YOLO11 model after it has been trained. In this mode, the model is evaluated on a validation set to measure its accuracy and generalization performance. This mode can be used to tune the hyperparameters of the model to improve its performance. [Val Examples](val.md){ .md-button } ## [Predict](predict.md) -Predict mode is used for making predictions using a trained YOLOv8 model on new images or videos. In this mode, the model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model predicts the classes and locations of objects in the input images or videos. +Predict mode is used for making predictions using a trained YOLO11 model on new images or videos. In this mode, the model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model predicts the classes and locations of objects in the input images or videos. [Predict Examples](predict.md){ .md-button } ## [Export](export.md) -Export mode is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the model is converted to a format that can be used by other software applications or hardware devices. This mode is useful when deploying the model to production environments. +Export mode is used for exporting a YOLO11 model to a format that can be used for deployment. In this mode, the model is converted to a format that can be used by other software applications or hardware devices. This mode is useful when deploying the model to production environments. [Export Examples](export.md){ .md-button } ## [Track](track.md) -Track mode is used for tracking objects in real-time using a YOLOv8 model. In this mode, the model is loaded from a checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful for applications such as surveillance systems or self-driving cars. +Track mode is used for tracking objects in real-time using a YOLO11 model. In this mode, the model is loaded from a checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful for applications such as surveillance systems or self-driving cars. [Track Examples](track.md){ .md-button } ## [Benchmark](benchmark.md) -Benchmark mode is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks provide information on the size of the exported format, its `mAP50-95` metrics (for object detection, segmentation, and pose) or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various formats like ONNX, OpenVINO, TensorRT, and others. This information can help users choose the optimal export format for their specific use case based on their requirements for speed and accuracy. +Benchmark mode is used to profile the speed and accuracy of various export formats for YOLO11. The benchmarks provide information on the size of the exported format, its `mAP50-95` metrics (for object detection, segmentation, and pose) or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various formats like ONNX, OpenVINO, TensorRT, and others. This information can help users choose the optimal export format for their specific use case based on their requirements for speed and accuracy. [Benchmark Examples](benchmark.md){ .md-button } ## FAQ -### How do I train a custom [object detection](https://www.ultralytics.com/glossary/object-detection) model with Ultralytics YOLOv8? +### How do I train a custom [object detection](https://www.ultralytics.com/glossary/object-detection) model with Ultralytics YOLO11? -Training a custom object detection model with Ultralytics YOLOv8 involves using the train mode. You need a dataset formatted in YOLO format, containing images and corresponding annotation files. Use the following command to start the training process: +Training a custom object detection model with Ultralytics YOLO11 involves using the train mode. You need a dataset formatted in YOLO format, containing images and corresponding annotation files. Use the following command to start the training process: !!! example @@ -85,22 +85,25 @@ Training a custom object detection model with Ultralytics YOLOv8 involves using ```python from ultralytics import YOLO - # Train a custom model - model = YOLO("yolov8n.pt") + # Load a pre-trained YOLO model (you can choose n, s, m, l, or x versions) + model = YOLO("yolo11n.pt") + + # Start training on your custom dataset model.train(data="path/to/dataset.yaml", epochs=100, imgsz=640) ``` === "CLI" ```bash + # Train a YOLO model from the command line yolo train data=path/to/dataset.yaml epochs=100 imgsz=640 ``` For more detailed instructions, you can refer to the [Ultralytics Train Guide](../modes/train.md). -### What metrics does Ultralytics YOLOv8 use to validate the model's performance? +### What metrics does Ultralytics YOLO11 use to validate the model's performance? -Ultralytics YOLOv8 uses various metrics during the validation process to assess model performance. These include: +Ultralytics YOLO11 uses various metrics during the validation process to assess model performance. These include: - **mAP (mean Average Precision)**: This evaluates the accuracy of object detection. - **IOU (Intersection over Union)**: Measures the overlap between predicted and ground truth bounding boxes. @@ -115,22 +118,25 @@ You can run the following command to start the validation: ```python from ultralytics import YOLO - # Validate the model - model = YOLO("yolov8n.pt") + # Load a pre-trained or custom YOLO model + model = YOLO("yolo11n.pt") + + # Run validation on your dataset model.val(data="path/to/validation.yaml") ``` === "CLI" ```bash + # Validate a YOLO model from the command line yolo val data=path/to/validation.yaml ``` Refer to the [Validation Guide](../modes/val.md) for further details. -### How can I export my YOLOv8 model for deployment? +### How can I export my YOLO11 model for deployment? -Ultralytics YOLOv8 offers export functionality to convert your trained model into various deployment formats such as ONNX, TensorRT, CoreML, and more. Use the following example to export your model: +Ultralytics YOLO11 offers export functionality to convert your trained model into various deployment formats such as ONNX, TensorRT, CoreML, and more. Use the following example to export your model: !!! example @@ -139,22 +145,25 @@ Ultralytics YOLOv8 offers export functionality to convert your trained model int ```python from ultralytics import YOLO - # Export the model - model = YOLO("yolov8n.pt") + # Load your trained YOLO model + model = YOLO("yolo11n.pt") + + # Export the model to ONNX format (you can specify other formats as needed) model.export(format="onnx") ``` === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx + # Export a YOLO model to ONNX format from the command line + yolo export model=yolo11n.pt format=onnx ``` Detailed steps for each export format can be found in the [Export Guide](../modes/export.md). -### What is the purpose of the benchmark mode in Ultralytics YOLOv8? +### What is the purpose of the benchmark mode in Ultralytics YOLO11? -Benchmark mode in Ultralytics YOLOv8 is used to analyze the speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) of various export formats such as ONNX, TensorRT, and OpenVINO. It provides metrics like model size, `mAP50-95` for object detection, and inference time across different hardware setups, helping you choose the most suitable format for your deployment needs. +Benchmark mode in Ultralytics YOLO11 is used to analyze the speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) of various export formats such as ONNX, TensorRT, and OpenVINO. It provides metrics like model size, `mAP50-95` for object detection, and inference time across different hardware setups, helping you choose the most suitable format for your deployment needs. !!! example @@ -163,21 +172,24 @@ Benchmark mode in Ultralytics YOLOv8 is used to analyze the speed and [accuracy] ```python from ultralytics.utils.benchmarks import benchmark - # Benchmark on GPU - benchmark(model="yolov8n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) + # Run benchmark on GPU (device 0) + # You can adjust parameters like model, dataset, image size, and precision as needed + benchmark(model="yolo11n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) ``` === "CLI" ```bash - yolo benchmark model=yolov8n.pt data='coco8.yaml' imgsz=640 half=False device=0 + # Benchmark a YOLO model from the command line + # Adjust parameters as needed for your specific use case + yolo benchmark model=yolo11n.pt data='coco8.yaml' imgsz=640 half=False device=0 ``` For more details, refer to the [Benchmark Guide](../modes/benchmark.md). -### How can I perform real-time object tracking using Ultralytics YOLOv8? +### How can I perform real-time object tracking using Ultralytics YOLO11? -Real-time object tracking can be achieved using the track mode in Ultralytics YOLOv8. This mode extends object detection capabilities to track objects across video frames or live feeds. Use the following example to enable tracking: +Real-time object tracking can be achieved using the track mode in Ultralytics YOLO11. This mode extends object detection capabilities to track objects across video frames or live feeds. Use the following example to enable tracking: !!! example @@ -186,14 +198,19 @@ Real-time object tracking can be achieved using the track mode in Ultralytics YO ```python from ultralytics import YOLO - # Track objects in a video - model = YOLO("yolov8n.pt") + # Load a pre-trained YOLO model + model = YOLO("yolo11n.pt") + + # Start tracking objects in a video + # You can also use live video streams or webcam input model.track(source="path/to/video.mp4") ``` === "CLI" ```bash + # Perform object tracking on a video from the command line + # You can specify different sources like webcam (0) or RTSP streams yolo track source=path/to/video.mp4 ``` diff --git a/docs/en/modes/predict.md b/docs/en/modes/predict.md index 196d9e2028..cb8ca25e7e 100644 --- a/docs/en/modes/predict.md +++ b/docs/en/modes/predict.md @@ -1,7 +1,7 @@ --- comments: true -description: Harness the power of Ultralytics YOLOv8 for real-time, high-speed inference on various data sources. Learn about predict mode, key features, and practical applications. -keywords: Ultralytics, YOLOv8, model prediction, inference, predict mode, real-time inference, computer vision, machine learning, streaming, high performance +description: Harness the power of Ultralytics YOLO11 for real-time, high-speed inference on various data sources. Learn about predict mode, key features, and practical applications. +keywords: Ultralytics, YOLO11, model prediction, inference, predict mode, real-time inference, computer vision, machine learning, streaming, high performance --- # Model Prediction with Ultralytics YOLO @@ -10,7 +10,7 @@ keywords: Ultralytics, YOLOv8, model prediction, inference, predict mode, real-t ## Introduction -In the world of [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv), the process of making sense out of visual data is called 'inference' or 'prediction'. Ultralytics YOLOv8 offers a powerful feature known as **predict mode** that is tailored for high-performance, real-time inference on a wide range of data sources. +In the world of [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv), the process of making sense out of visual data is called 'inference' or 'prediction'. Ultralytics YOLO11 offers a powerful feature known as **predict mode** that is tailored for high-performance, real-time inference on a wide range of data sources.


@@ -20,7 +20,7 @@ In the world of [machine learning](https://www.ultralytics.com/glossary/machine- allowfullscreen>
- Watch: How to Extract the Outputs from Ultralytics YOLOv8 Model for Custom Projects. + Watch: How to Extract the Outputs from Ultralytics YOLO Model for Custom Projects.

## Real-world Applications @@ -32,7 +32,7 @@ In the world of [machine learning](https://www.ultralytics.com/glossary/machine- ## Why Use Ultralytics YOLO for Inference? -Here's why you should consider YOLOv8's predict mode for your various inference needs: +Here's why you should consider YOLO11's predict mode for your various inference needs: - **Versatility:** Capable of making inferences on images, videos, and even live streams. - **Performance:** Engineered for real-time, high-speed processing without sacrificing [accuracy](https://www.ultralytics.com/glossary/accuracy). @@ -41,7 +41,7 @@ Here's why you should consider YOLOv8's predict mode for your various inference ### Key Features of Predict Mode -YOLOv8's predict mode is designed to be robust and versatile, featuring: +YOLO11's predict mode is designed to be robust and versatile, featuring: - **Multiple Data Source Compatibility:** Whether your data is in the form of individual images, a collection of images, video files, or real-time video streams, predict mode has you covered. - **Streaming Mode:** Use the streaming feature to generate a memory-efficient generator of `Results` objects. Enable this by setting `stream=True` in the predictor's call method. @@ -58,7 +58,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # pretrained YOLOv8n model + model = YOLO("yolo11n.pt") # pretrained YOLO11n model # Run batched inference on a list of images results = model(["image1.jpg", "image2.jpg"]) # return a list of Results objects @@ -80,7 +80,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # pretrained YOLOv8n model + model = YOLO("yolo11n.pt") # pretrained YOLO11n model # Run batched inference on a list of images results = model(["image1.jpg", "image2.jpg"], stream=True) # return a generator of Results objects @@ -98,7 +98,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m ## Inference Sources -YOLOv8 can process different types of input sources for inference, as shown in the table below. The sources include static images, video streams, and various data formats. The table also indicates whether each source can be used in streaming mode with the argument `stream=True` ✅. Streaming mode is beneficial for processing videos or live streams as it creates a generator of results instead of loading all frames into memory. +YOLO11 can process different types of input sources for inference, as shown in the table below. The sources include static images, video streams, and various data formats. The table also indicates whether each source can be used in streaming mode with the argument `stream=True` ✅. Streaming mode is beneficial for processing videos or live streams as it creates a generator of results instead of loading all frames into memory. !!! tip @@ -131,8 +131,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define path to the image file source = "path/to/image.jpg" @@ -147,8 +147,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define current screenshot as source source = "screen" @@ -163,8 +163,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define remote image or video URL source = "https://ultralytics.com/images/bus.jpg" @@ -181,8 +181,8 @@ Below are code examples for using each source type: from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Open an image using PIL source = Image.open("path/to/image.jpg") @@ -199,8 +199,8 @@ Below are code examples for using each source type: from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Read an image using OpenCV source = cv2.imread("path/to/image.jpg") @@ -217,8 +217,8 @@ Below are code examples for using each source type: from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Create a random numpy array of HWC shape (640, 640, 3) with values in range [0, 255] and type uint8 source = np.random.randint(low=0, high=255, size=(640, 640, 3), dtype="uint8") @@ -235,8 +235,8 @@ Below are code examples for using each source type: from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Create a random torch tensor of BCHW shape (1, 3, 640, 640) with values in range [0, 1] and type float32 source = torch.rand(1, 3, 640, 640, dtype=torch.float32) @@ -251,8 +251,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define a path to a CSV file with images, URLs, videos and directories source = "path/to/file.csv" @@ -267,8 +267,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define path to video file source = "path/to/video.mp4" @@ -283,8 +283,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define path to directory containing images and videos for inference source = "path/to/dir" @@ -299,8 +299,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define a glob search for all JPG files in a directory source = "path/to/dir/*.jpg" @@ -318,8 +318,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define source as YouTube video URL source = "https://youtu.be/LNwODJXcvt4" @@ -335,8 +335,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Single stream with batch-size 1 inference source = "rtsp://example.com/media.mp4" # RTSP, RTMP, TCP, or IP streaming address @@ -354,8 +354,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Multiple streams with batched inference (e.g., batch-size 8 for 8 streams) source = "path/to/list.streams" # *.streams text file with one streaming address per line @@ -385,8 +385,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Run inference on 'bus.jpg' with arguments model.predict("bus.jpg", save=True, imgsz=320, conf=0.5) @@ -402,7 +402,7 @@ Visualization arguments: ## Image and Video Formats -YOLOv8 supports various image and video formats, as specified in [ultralytics/data/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/utils.py). See the tables below for the valid suffixes and example predict commands. +YOLO11 supports various image and video formats, as specified in [ultralytics/data/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/utils.py). See the tables below for the valid suffixes and example predict commands. ### Images @@ -449,8 +449,8 @@ All Ultralytics `predict()` calls will return a list of `Results` objects: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Run inference on an image results = model("bus.jpg") # list of 1 Results object @@ -501,8 +501,8 @@ For more details see the [`Results` class documentation](../reference/engine/res ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Run inference on an image results = model("bus.jpg") # results list @@ -539,8 +539,8 @@ For more details see the [`Boxes` class documentation](../reference/engine/resul ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n-seg Segment model - model = YOLO("yolov8n-seg.pt") + # Load a pretrained YOLO11n-seg Segment model + model = YOLO("yolo11n-seg.pt") # Run inference on an image results = model("bus.jpg") # results list @@ -572,8 +572,8 @@ For more details see the [`Masks` class documentation](../reference/engine/resul ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n-pose Pose model - model = YOLO("yolov8n-pose.pt") + # Load a pretrained YOLO11n-pose Pose model + model = YOLO("yolo11n-pose.pt") # Run inference on an image results = model("bus.jpg") # results list @@ -606,8 +606,8 @@ For more details see the [`Keypoints` class documentation](../reference/engine/r ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n-cls Classify model - model = YOLO("yolov8n-cls.pt") + # Load a pretrained YOLO11n-cls Classify model + model = YOLO("yolo11n-cls.pt") # Run inference on an image results = model("bus.jpg") # results list @@ -641,8 +641,8 @@ For more details see the [`Probs` class documentation](../reference/engine/resul ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n-obb.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n-obb.pt") # Run inference on an image results = model("bus.jpg") # results list @@ -681,8 +681,8 @@ The `plot()` method in `Results` objects facilitates visualization of prediction from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Run inference on 'bus.jpg' results = model(["bus.jpg", "zidane.jpg"]) # results list @@ -747,15 +747,15 @@ When using YOLO models in a multi-threaded application, it's important to instan # Starting threads that each have their own model instance - Thread(target=thread_safe_predict, args=("yolov8n.pt", "image1.jpg")).start() - Thread(target=thread_safe_predict, args=("yolov8n.pt", "image2.jpg")).start() + Thread(target=thread_safe_predict, args=("yolo11n.pt", "image1.jpg")).start() + Thread(target=thread_safe_predict, args=("yolo11n.pt", "image2.jpg")).start() ``` For an in-depth look at thread-safe inference with YOLO models and step-by-step instructions, please refer to our [YOLO Thread-Safe Inference Guide](../guides/yolo-thread-safe-inference.md). This guide will provide you with all the necessary information to avoid common pitfalls and ensure that your multi-threaded inference runs smoothly. ## Streaming Source `for`-loop -Here's a Python script using OpenCV (`cv2`) and YOLOv8 to run inference on video frames. This script assumes you have already installed the necessary packages (`opencv-python` and `ultralytics`). +Here's a Python script using OpenCV (`cv2`) and YOLO to run inference on video frames. This script assumes you have already installed the necessary packages (`opencv-python` and `ultralytics`). !!! example "Streaming for-loop" @@ -764,8 +764,8 @@ Here's a Python script using OpenCV (`cv2`) and YOLOv8 to run inference on video from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO model + model = YOLO("yolo11n.pt") # Open the video file video_path = "path/to/your/video/file.mp4" @@ -777,14 +777,14 @@ Here's a Python script using OpenCV (`cv2`) and YOLOv8 to run inference on video success, frame = cap.read() if success: - # Run YOLOv8 inference on the frame + # Run YOLO inference on the frame results = model(frame) # Visualize the results on the frame annotated_frame = results[0].plot() # Display the annotated frame - cv2.imshow("YOLOv8 Inference", annotated_frame) + cv2.imshow("YOLO Inference", annotated_frame) # Break the loop if 'q' is pressed if cv2.waitKey(1) & 0xFF == ord("q"): @@ -806,22 +806,22 @@ This script will run predictions on each frame of the video, visualize the resul ## FAQ -### What is Ultralytics YOLOv8 and its predict mode for real-time inference? +### What is Ultralytics YOLO and its predict mode for real-time inference? -Ultralytics YOLOv8 is a state-of-the-art model for real-time [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification. Its **predict mode** allows users to perform high-speed inference on various data sources such as images, videos, and live streams. Designed for performance and versatility, it also offers batch processing and streaming modes. For more details on its features, check out the [Ultralytics YOLOv8 predict mode](#key-features-of-predict-mode). +Ultralytics YOLO is a state-of-the-art model for real-time [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification. Its **predict mode** allows users to perform high-speed inference on various data sources such as images, videos, and live streams. Designed for performance and versatility, it also offers batch processing and streaming modes. For more details on its features, check out the [Ultralytics YOLO predict mode](#key-features-of-predict-mode). -### How can I run inference using Ultralytics YOLOv8 on different data sources? +### How can I run inference using Ultralytics YOLO on different data sources? -Ultralytics YOLOv8 can process a wide range of data sources, including individual images, videos, directories, URLs, and streams. You can specify the data source in the `model.predict()` call. For example, use `'image.jpg'` for a local image or `'https://ultralytics.com/images/bus.jpg'` for a URL. Check out the detailed examples for various [inference sources](#inference-sources) in the documentation. +Ultralytics YOLO can process a wide range of data sources, including individual images, videos, directories, URLs, and streams. You can specify the data source in the `model.predict()` call. For example, use `'image.jpg'` for a local image or `'https://ultralytics.com/images/bus.jpg'` for a URL. Check out the detailed examples for various [inference sources](#inference-sources) in the documentation. -### How do I optimize YOLOv8 inference speed and memory usage? +### How do I optimize YOLO inference speed and memory usage? To optimize inference speed and manage memory efficiently, you can use the streaming mode by setting `stream=True` in the predictor's call method. The streaming mode generates a memory-efficient generator of `Results` objects instead of loading all frames into memory. For processing long videos or large datasets, streaming mode is particularly useful. Learn more about [streaming mode](#key-features-of-predict-mode). -### What inference arguments does Ultralytics YOLOv8 support? +### What inference arguments does Ultralytics YOLO support? -The `model.predict()` method in YOLOv8 supports various arguments such as `conf`, `iou`, `imgsz`, `device`, and more. These arguments allow you to customize the inference process, setting parameters like confidence thresholds, image size, and the device used for computation. Detailed descriptions of these arguments can be found in the [inference arguments](#inference-arguments) section. +The `model.predict()` method in YOLO supports various arguments such as `conf`, `iou`, `imgsz`, `device`, and more. These arguments allow you to customize the inference process, setting parameters like confidence thresholds, image size, and the device used for computation. Detailed descriptions of these arguments can be found in the [inference arguments](#inference-arguments) section. -### How can I visualize and save the results of YOLOv8 predictions? +### How can I visualize and save the results of YOLO predictions? -After running inference with YOLOv8, the `Results` objects contain methods for displaying and saving annotated images. You can use methods like `result.show()` and `result.save(filename="result.jpg")` to visualize and save the results. For a comprehensive list of these methods, refer to the [working with results](#working-with-results) section. +After running inference with YOLO, the `Results` objects contain methods for displaying and saving annotated images. You can use methods like `result.show()` and `result.save(filename="result.jpg")` to visualize and save the results. For a comprehensive list of these methods, refer to the [working with results](#working-with-results) section. diff --git a/docs/en/modes/track.md b/docs/en/modes/track.md index 46c43b0b1a..efd39e9f47 100644 --- a/docs/en/modes/track.md +++ b/docs/en/modes/track.md @@ -27,7 +27,7 @@ The output from Ultralytics trackers is consistent with standard [object detecti allowfullscreen>
- Watch: Object Detection and Tracking with Ultralytics YOLOv8. + Watch: Object Detection and Tracking with Ultralytics YOLO.

## Real-world Applications @@ -60,7 +60,7 @@ The default tracker is BoT-SORT. If object confidence score will be low, i.e lower than [`track_high_thresh`](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/trackers/bytetrack.yaml#L5), then there will be no tracks successfully returned and updated. -To run the tracker on video streams, use a trained Detect, Segment or Pose model such as YOLOv8n, YOLOv8n-seg and YOLOv8n-pose. +To run the tracker on video streams, use a trained Detect, Segment or Pose model such as YOLO11n, YOLO11n-seg and YOLO11n-pose. !!! example @@ -70,9 +70,9 @@ To run the tracker on video streams, use a trained Detect, Segment or Pose model from ultralytics import YOLO # Load an official or custom model - model = YOLO("yolov8n.pt") # Load an official Detect model - model = YOLO("yolov8n-seg.pt") # Load an official Segment model - model = YOLO("yolov8n-pose.pt") # Load an official Pose model + model = YOLO("yolo11n.pt") # Load an official Detect model + model = YOLO("yolo11n-seg.pt") # Load an official Segment model + model = YOLO("yolo11n-pose.pt") # Load an official Pose model model = YOLO("path/to/best.pt") # Load a custom trained model # Perform tracking with the model @@ -84,9 +84,9 @@ To run the tracker on video streams, use a trained Detect, Segment or Pose model ```bash # Perform tracking with various models using the command line interface - yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" # Official Detect model - yolo track model=yolov8n-seg.pt source="https://youtu.be/LNwODJXcvt4" # Official Segment model - yolo track model=yolov8n-pose.pt source="https://youtu.be/LNwODJXcvt4" # Official Pose model + yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" # Official Detect model + yolo track model=yolo11n-seg.pt source="https://youtu.be/LNwODJXcvt4" # Official Segment model + yolo track model=yolo11n-pose.pt source="https://youtu.be/LNwODJXcvt4" # Official Pose model yolo track model=path/to/best.pt source="https://youtu.be/LNwODJXcvt4" # Custom trained model # Track using ByteTrack tracker @@ -113,7 +113,7 @@ Tracking configuration shares properties with Predict mode, such as `conf`, `iou from ultralytics import YOLO # Configure the tracking parameters and run the tracker - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") results = model.track(source="https://youtu.be/LNwODJXcvt4", conf=0.3, iou=0.5, show=True) ``` @@ -121,7 +121,7 @@ Tracking configuration shares properties with Predict mode, such as `conf`, `iou ```bash # Configure tracking parameters and run the tracker using the command line interface - yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3, iou=0.5 show + yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3, iou=0.5 show ``` ### Tracker Selection @@ -136,7 +136,7 @@ Ultralytics also allows you to use a modified tracker configuration file. To do from ultralytics import YOLO # Load the model and run the tracker with a custom configuration file - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") results = model.track(source="https://youtu.be/LNwODJXcvt4", tracker="custom_tracker.yaml") ``` @@ -144,7 +144,7 @@ Ultralytics also allows you to use a modified tracker configuration file. To do ```bash # Load the model and run the tracker with a custom configuration file using the command line interface - yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" tracker='custom_tracker.yaml' + yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" tracker='custom_tracker.yaml' ``` For a comprehensive list of tracking arguments, refer to the [ultralytics/cfg/trackers](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/trackers) page. @@ -153,7 +153,7 @@ For a comprehensive list of tracking arguments, refer to the [ultralytics/cfg/tr ### Persisting Tracks Loop -Here is a Python script using [OpenCV](https://www.ultralytics.com/glossary/opencv) (`cv2`) and YOLOv8 to run object tracking on video frames. This script still assumes you have already installed the necessary packages (`opencv-python` and `ultralytics`). The `persist=True` argument tells the tracker that the current image or frame is the next in a sequence and to expect tracks from the previous image in the current image. +Here is a Python script using [OpenCV](https://www.ultralytics.com/glossary/opencv) (`cv2`) and YOLO11 to run object tracking on video frames. This script still assumes you have already installed the necessary packages (`opencv-python` and `ultralytics`). The `persist=True` argument tells the tracker that the current image or frame is the next in a sequence and to expect tracks from the previous image in the current image. !!! example "Streaming for-loop with tracking" @@ -162,8 +162,8 @@ Here is a Python script using [OpenCV](https://www.ultralytics.com/glossary/open from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Open the video file video_path = "path/to/video.mp4" @@ -175,14 +175,14 @@ Here is a Python script using [OpenCV](https://www.ultralytics.com/glossary/open success, frame = cap.read() if success: - # Run YOLOv8 tracking on the frame, persisting tracks between frames + # Run YOLO11 tracking on the frame, persisting tracks between frames results = model.track(frame, persist=True) # Visualize the results on the frame annotated_frame = results[0].plot() # Display the annotated frame - cv2.imshow("YOLOv8 Tracking", annotated_frame) + cv2.imshow("YOLO11 Tracking", annotated_frame) # Break the loop if 'q' is pressed if cv2.waitKey(1) & 0xFF == ord("q"): @@ -200,9 +200,9 @@ Please note the change from `model(frame)` to `model.track(frame)`, which enable ### Plotting Tracks Over Time -Visualizing object tracks over consecutive frames can provide valuable insights into the movement patterns and behavior of detected objects within a video. With Ultralytics YOLOv8, plotting these tracks is a seamless and efficient process. +Visualizing object tracks over consecutive frames can provide valuable insights into the movement patterns and behavior of detected objects within a video. With Ultralytics YOLO11, plotting these tracks is a seamless and efficient process. -In the following example, we demonstrate how to utilize YOLOv8's tracking capabilities to plot the movement of detected objects across multiple video frames. This script involves opening a video file, reading it frame by frame, and utilizing the YOLO model to identify and track various objects. By retaining the center points of the detected bounding boxes and connecting them, we can draw lines that represent the paths followed by the tracked objects. +In the following example, we demonstrate how to utilize YOLO11's tracking capabilities to plot the movement of detected objects across multiple video frames. This script involves opening a video file, reading it frame by frame, and utilizing the YOLO model to identify and track various objects. By retaining the center points of the detected bounding boxes and connecting them, we can draw lines that represent the paths followed by the tracked objects. !!! example "Plotting tracks over multiple video frames" @@ -214,8 +214,8 @@ In the following example, we demonstrate how to utilize YOLOv8's tracking capabi from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Open the video file video_path = "path/to/video.mp4" @@ -230,7 +230,7 @@ In the following example, we demonstrate how to utilize YOLOv8's tracking capabi success, frame = cap.read() if success: - # Run YOLOv8 tracking on the frame, persisting tracks between frames + # Run YOLO11 tracking on the frame, persisting tracks between frames results = model.track(frame, persist=True) # Get the boxes and track IDs @@ -253,7 +253,7 @@ In the following example, we demonstrate how to utilize YOLOv8's tracking capabi cv2.polylines(annotated_frame, [points], isClosed=False, color=(230, 230, 230), thickness=10) # Display the annotated frame - cv2.imshow("YOLOv8 Tracking", annotated_frame) + cv2.imshow("YOLO11 Tracking", annotated_frame) # Break the loop if 'q' is pressed if cv2.waitKey(1) & 0xFF == ord("q"): @@ -275,7 +275,7 @@ In the provided Python script, we make use of Python's `threading` module to run To ensure that each thread receives the correct parameters (the video file, the model to use and the file index), we define a function `run_tracker_in_thread` that accepts these parameters and contains the main tracking loop. This function reads the video frame by frame, runs the tracker, and displays the results. -Two different models are used in this example: `yolov8n.pt` and `yolov8n-seg.pt`, each tracking objects in a different video file. The video files are specified in `video_file1` and `video_file2`. +Two different models are used in this example: `yolo11n.pt` and `yolo11n-seg.pt`, each tracking objects in a different video file. The video files are specified in `video_file1` and `video_file2`. The `daemon=True` parameter in `threading.Thread` means that these threads will be closed as soon as the main program finishes. We then start the threads with `start()` and use `join()` to make the main thread wait until both tracker threads have finished. @@ -291,7 +291,7 @@ Finally, after all threads have completed their task, the windows displaying the from ultralytics import YOLO # Define model names and video sources - MODEL_NAMES = ["yolov8n.pt", "yolov8n-seg.pt"] + MODEL_NAMES = ["yolo11n.pt", "yolo11n-seg.pt"] SOURCES = ["path/to/video.mp4", "0"] # local video, 0 for webcam @@ -300,7 +300,7 @@ Finally, after all threads have completed their task, the windows displaying the Run YOLO tracker in its own thread for concurrent processing. Args: - model_name (str): The YOLOv8 model object. + model_name (str): The YOLO11 model object. filename (str): The path to the video file or the identifier for the webcam/external camera source. """ model = YOLO(model_name) @@ -357,14 +357,14 @@ You can configure a custom tracker by copying an existing tracker configuration ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") results = model.track(source="https://youtu.be/LNwODJXcvt4", tracker="custom_tracker.yaml") ``` === "CLI" ```bash - yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" tracker='custom_tracker.yaml' + yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" tracker='custom_tracker.yaml' ``` ### How can I run object tracking on multiple video streams simultaneously? @@ -381,7 +381,7 @@ To run object tracking on multiple video streams simultaneously, you can use Pyt from ultralytics import YOLO # Define model names and video sources - MODEL_NAMES = ["yolov8n.pt", "yolov8n-seg.pt"] + MODEL_NAMES = ["yolo11n.pt", "yolo11n-seg.pt"] SOURCES = ["path/to/video.mp4", "0"] # local video, 0 for webcam @@ -390,7 +390,7 @@ To run object tracking on multiple video streams simultaneously, you can use Pyt Run YOLO tracker in its own thread for concurrent processing. Args: - model_name (str): The YOLOv8 model object. + model_name (str): The YOLO11 model object. filename (str): The path to the video file or the identifier for the webcam/external camera source. """ model = YOLO(model_name) @@ -438,7 +438,7 @@ To visualize object tracks over multiple video frames, you can use the YOLO mode from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") video_path = "path/to/video.mp4" cap = cv2.VideoCapture(video_path) track_history = defaultdict(lambda: []) @@ -458,7 +458,7 @@ To visualize object tracks over multiple video frames, you can use the YOLO mode track.pop(0) points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2)) cv2.polylines(annotated_frame, [points], isClosed=False, color=(230, 230, 230), thickness=10) - cv2.imshow("YOLOv8 Tracking", annotated_frame) + cv2.imshow("YOLO11 Tracking", annotated_frame) if cv2.waitKey(1) & 0xFF == ord("q"): break else: diff --git a/docs/en/modes/train.md b/docs/en/modes/train.md index f5722b7280..9cbe791991 100644 --- a/docs/en/modes/train.md +++ b/docs/en/modes/train.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn how to efficiently train object detection models using YOLOv8 with comprehensive instructions on settings, augmentation, and hardware utilization. -keywords: Ultralytics, YOLOv8, model training, deep learning, object detection, GPU training, dataset augmentation, hyperparameter tuning, model performance, M1 M2 training +description: Learn how to efficiently train object detection models using YOLO11 with comprehensive instructions on settings, augmentation, and hardware utilization. +keywords: Ultralytics, YOLO11, model training, deep learning, object detection, GPU training, dataset augmentation, hyperparameter tuning, model performance, M1 M2 training --- # Model Training with Ultralytics YOLO @@ -10,7 +10,7 @@ keywords: Ultralytics, YOLOv8, model training, deep learning, object detection, ## Introduction -Training a [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) model involves feeding it data and adjusting its parameters so that it can make accurate predictions. Train mode in Ultralytics YOLOv8 is engineered for effective and efficient training of object detection models, fully utilizing modern hardware capabilities. This guide aims to cover all the details you need to get started with training your own models using YOLOv8's robust set of features. +Training a [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) model involves feeding it data and adjusting its parameters so that it can make accurate predictions. Train mode in Ultralytics YOLO11 is engineered for effective and efficient training of object detection models, fully utilizing modern hardware capabilities. This guide aims to cover all the details you need to get started with training your own models using YOLO11's robust set of features.


@@ -20,12 +20,12 @@ Training a [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl allowfullscreen>
- Watch: How to Train a YOLOv8 model on Your Custom Dataset in Google Colab. + Watch: How to Train a YOLO model on Your Custom Dataset in Google Colab.

## Why Choose Ultralytics YOLO for Training? -Here are some compelling reasons to opt for YOLOv8's Train mode: +Here are some compelling reasons to opt for YOLO11's Train mode: - **Efficiency:** Make the most out of your hardware, whether you're on a single-GPU setup or scaling across multiple GPUs. - **Versatility:** Train on custom datasets in addition to readily available ones like COCO, VOC, and ImageNet. @@ -34,7 +34,7 @@ Here are some compelling reasons to opt for YOLOv8's Train mode: ### Key Features of Train Mode -The following are some notable features of YOLOv8's Train mode: +The following are some notable features of YOLO11's Train mode: - **Automatic Dataset Download:** Standard datasets like COCO, VOC, and ImageNet are downloaded automatically on first use. - **Multi-GPU Support:** Scale your training efforts seamlessly across multiple GPUs to expedite the process. @@ -43,11 +43,11 @@ The following are some notable features of YOLOv8's Train mode: !!! tip - * YOLOv8 datasets like COCO, VOC, ImageNet and many others automatically download on first use, i.e. `yolo train data=coco.yaml` + * YOLO11 datasets like COCO, VOC, ImageNet and many others automatically download on first use, i.e. `yolo train data=coco.yaml` ## Usage Examples -Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. The training device can be specified using the `device` argument. If no argument is passed GPU `device=0` will be used if available, otherwise `device='cpu'` will be used. See Arguments section below for a full list of training arguments. +Train YOLO11n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. The training device can be specified using the `device` argument. If no argument is passed GPU `device=0` will be used if available, otherwise `device='cpu'` will be used. See Arguments section below for a full list of training arguments. !!! example "Single-GPU and CPU Training Example" @@ -59,9 +59,9 @@ Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/ from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.yaml") # build a new model from YAML - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n.yaml").load("yolov8n.pt") # build from YAML and transfer weights + model = YOLO("yolo11n.yaml") # build a new model from YAML + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.yaml").load("yolo11n.pt") # build from YAML and transfer weights # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) @@ -71,13 +71,13 @@ Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/ ```bash # Build a new model from YAML and start training from scratch - yolo detect train data=coco8.yaml model=yolov8n.yaml epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.yaml epochs=100 imgsz=640 # Start training from a pretrained *.pt model - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo detect train data=coco8.yaml model=yolov8n.yaml pretrained=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.yaml pretrained=yolo11n.pt epochs=100 imgsz=640 ``` ### Multi-GPU Training @@ -94,7 +94,7 @@ Multi-GPU training allows for more efficient utilization of available hardware r from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model with 2 GPUs results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device=[0, 1]) @@ -104,7 +104,7 @@ Multi-GPU training allows for more efficient utilization of available hardware r ```bash # Start training from a pretrained *.pt model using GPUs 0 and 1 - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 device=0,1 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 device=0,1 ``` ### Apple M1 and M2 MPS Training @@ -121,7 +121,7 @@ To enable training on Apple M1 and M2 chips, you should specify 'mps' as your de from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model with MPS results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device="mps") @@ -131,7 +131,7 @@ To enable training on Apple M1 and M2 chips, you should specify 'mps' as your de ```bash # Start training from a pretrained *.pt model using MPS - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 device=mps + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 device=mps ``` While leveraging the computational power of the M1/M2 chips, this enables more efficient processing of the training tasks. For more detailed guidance and advanced configuration options, please refer to the [PyTorch MPS documentation](https://pytorch.org/docs/stable/notes/mps.html). @@ -199,7 +199,7 @@ These settings can be adjusted to meet the specific requirements of the dataset ## Logging -In training a YOLOv8 model, you might find it valuable to keep track of the model's performance over time. This is where logging comes into play. Ultralytics' YOLO provides support for three types of loggers - Comet, ClearML, and TensorBoard. +In training a YOLO11 model, you might find it valuable to keep track of the model's performance over time. This is where logging comes into play. Ultralytics' YOLO provides support for three types of loggers - Comet, ClearML, and TensorBoard. To use a logger, select it from the dropdown menu in the code snippet above and run it. The chosen logger will be installed and initialized. @@ -272,9 +272,9 @@ After setting up your logger, you can then proceed with your model training. All ## FAQ -### How do I train an [object detection](https://www.ultralytics.com/glossary/object-detection) model using Ultralytics YOLOv8? +### How do I train an [object detection](https://www.ultralytics.com/glossary/object-detection) model using Ultralytics YOLO11? -To train an object detection model using Ultralytics YOLOv8, you can either use the Python API or the CLI. Below is an example for both: +To train an object detection model using Ultralytics YOLO11, you can either use the Python API or the CLI. Below is an example for both: !!! example "Single-GPU and CPU Training Example" @@ -284,7 +284,7 @@ To train an object detection model using Ultralytics YOLOv8, you can either use from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) @@ -293,14 +293,14 @@ To train an object detection model using Ultralytics YOLOv8, you can either use === "CLI" ```bash - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For more details, refer to the [Train Settings](#train-settings) section. -### What are the key features of Ultralytics YOLOv8's Train mode? +### What are the key features of Ultralytics YOLO11's Train mode? -The key features of Ultralytics YOLOv8's Train mode include: +The key features of Ultralytics YOLO11's Train mode include: - **Automatic Dataset Download:** Automatically downloads standard datasets like COCO, VOC, and ImageNet. - **Multi-GPU Support:** Scale training across multiple GPUs for faster processing. @@ -309,7 +309,7 @@ The key features of Ultralytics YOLOv8's Train mode include: These features make training efficient and customizable to your needs. For more details, see the [Key Features of Train Mode](#key-features-of-train-mode) section. -### How do I resume training from an interrupted session in Ultralytics YOLOv8? +### How do I resume training from an interrupted session in Ultralytics YOLO11? To resume training from an interrupted session, set the `resume` argument to `True` and specify the path to the last saved checkpoint. @@ -335,9 +335,9 @@ To resume training from an interrupted session, set the `resume` argument to `Tr Check the section on [Resuming Interrupted Trainings](#resuming-interrupted-trainings) for more information. -### Can I train YOLOv8 models on Apple M1 and M2 chips? +### Can I train YOLO11 models on Apple M1 and M2 chips? -Yes, Ultralytics YOLOv8 supports training on Apple M1 and M2 chips utilizing the Metal Performance Shaders (MPS) framework. Specify 'mps' as your training device. +Yes, Ultralytics YOLO11 supports training on Apple M1 and M2 chips utilizing the Metal Performance Shaders (MPS) framework. Specify 'mps' as your training device. !!! example "MPS Training Example" @@ -347,7 +347,7 @@ Yes, Ultralytics YOLOv8 supports training on Apple M1 and M2 chips utilizing the from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model on M1/M2 chip results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device="mps") @@ -356,14 +356,14 @@ Yes, Ultralytics YOLOv8 supports training on Apple M1 and M2 chips utilizing the === "CLI" ```bash - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 device=mps + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 device=mps ``` For more details, refer to the [Apple M1 and M2 MPS Training](#apple-m1-and-m2-mps-training) section. ### What are the common training settings, and how do I configure them? -Ultralytics YOLOv8 allows you to configure a variety of training settings such as batch size, learning rate, epochs, and more through arguments. Here's a brief overview: +Ultralytics YOLO11 allows you to configure a variety of training settings such as batch size, learning rate, epochs, and more through arguments. Here's a brief overview: | Argument | Default | Description | | -------- | ------- | ---------------------------------------------------------------------- | diff --git a/docs/en/modes/val.md b/docs/en/modes/val.md index 91eb4c2a87..da275d6316 100644 --- a/docs/en/modes/val.md +++ b/docs/en/modes/val.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn how to validate your YOLOv8 model with precise metrics, easy-to-use tools, and custom settings for optimal performance. -keywords: Ultralytics, YOLOv8, model validation, machine learning, object detection, mAP metrics, Python API, CLI +description: Learn how to validate your YOLO11 model with precise metrics, easy-to-use tools, and custom settings for optimal performance. +keywords: Ultralytics, YOLO11, model validation, machine learning, object detection, mAP metrics, Python API, CLI --- # Model Validation with Ultralytics YOLO @@ -10,7 +10,7 @@ keywords: Ultralytics, YOLOv8, model validation, machine learning, object detect ## Introduction -Validation is a critical step in the [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) pipeline, allowing you to assess the quality of your trained models. Val mode in Ultralytics YOLOv8 provides a robust suite of tools and metrics for evaluating the performance of your [object detection](https://www.ultralytics.com/glossary/object-detection) models. This guide serves as a complete resource for understanding how to effectively use the Val mode to ensure that your models are both accurate and reliable. +Validation is a critical step in the [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) pipeline, allowing you to assess the quality of your trained models. Val mode in Ultralytics YOLO11 provides a robust suite of tools and metrics for evaluating the performance of your [object detection](https://www.ultralytics.com/glossary/object-detection) models. This guide serves as a complete resource for understanding how to effectively use the Val mode to ensure that your models are both accurate and reliable.


@@ -25,7 +25,7 @@ Validation is a critical step in the [machine learning](https://www.ultralytics. ## Why Validate with Ultralytics YOLO? -Here's why using YOLOv8's Val mode is advantageous: +Here's why using YOLO11's Val mode is advantageous: - **Precision:** Get accurate metrics like mAP50, mAP75, and mAP50-95 to comprehensively evaluate your model. - **Convenience:** Utilize built-in features that remember training settings, simplifying the validation process. @@ -34,7 +34,7 @@ Here's why using YOLOv8's Val mode is advantageous: ### Key Features of Val Mode -These are the notable functionalities offered by YOLOv8's Val mode: +These are the notable functionalities offered by YOLO11's Val mode: - **Automated Settings:** Models remember their training configurations for straightforward validation. - **Multi-Metric Support:** Evaluate your model based on a range of accuracy metrics. @@ -43,11 +43,11 @@ These are the notable functionalities offered by YOLOv8's Val mode: !!! tip - * YOLOv8 models automatically remember their training settings, so you can validate a model at the same image size and on the original dataset easily with just `yolo val model=yolov8n.pt` or `model('yolov8n.pt').val()` + * YOLO11 models automatically remember their training settings, so you can validate a model at the same image size and on the original dataset easily with just `yolo val model=yolo11n.pt` or `model('yolo11n.pt').val()` ## Usage Examples -Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. See Arguments section below for a full list of export arguments. +Validate trained YOLO11n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. See Arguments section below for a full list of export arguments. !!! example @@ -57,7 +57,7 @@ Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/a from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -71,7 +71,7 @@ Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/a === "CLI" ```bash - yolo detect val model=yolov8n.pt # val official model + yolo detect val model=yolo11n.pt # val official model yolo detect val model=path/to/best.pt # val custom model ``` @@ -95,7 +95,7 @@ The below examples showcase YOLO model validation with custom arguments in Pytho from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Customize validation settings validation_results = model.val(data="coco8.yaml", imgsz=640, batch=16, conf=0.25, iou=0.6, device="0") @@ -104,20 +104,20 @@ The below examples showcase YOLO model validation with custom arguments in Pytho === "CLI" ```bash - yolo val model=yolov8n.pt data=coco8.yaml imgsz=640 batch=16 conf=0.25 iou=0.6 device=0 + yolo val model=yolo11n.pt data=coco8.yaml imgsz=640 batch=16 conf=0.25 iou=0.6 device=0 ``` ## FAQ -### How do I validate my YOLOv8 model with Ultralytics? +### How do I validate my YOLO11 model with Ultralytics? -To validate your YOLOv8 model, you can use the Val mode provided by Ultralytics. For example, using the Python API, you can load a model and run validation with: +To validate your YOLO11 model, you can use the Val mode provided by Ultralytics. For example, using the Python API, you can load a model and run validation with: ```python from ultralytics import YOLO # Load a model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Validate the model metrics = model.val() @@ -127,14 +127,14 @@ print(metrics.box.map) # map50-95 Alternatively, you can use the command-line interface (CLI): ```bash -yolo val model=yolov8n.pt +yolo val model=yolo11n.pt ``` For further customization, you can adjust various arguments like `imgsz`, `batch`, and `conf` in both Python and CLI modes. Check the [Arguments for YOLO Model Validation](#arguments-for-yolo-model-validation) section for the full list of parameters. -### What metrics can I get from YOLOv8 model validation? +### What metrics can I get from YOLO11 model validation? -YOLOv8 model validation provides several key metrics to assess model performance. These include: +YOLO11 model validation provides several key metrics to assess model performance. These include: - mAP50 (mean Average Precision at IoU threshold 0.5) - mAP75 (mean Average Precision at IoU threshold 0.75) @@ -156,16 +156,16 @@ For a complete performance evaluation, it's crucial to review all these metrics. Using Ultralytics YOLO for validation provides several advantages: -- **[Precision](https://www.ultralytics.com/glossary/precision):** YOLOv8 offers accurate performance metrics including mAP50, mAP75, and mAP50-95. +- **[Precision](https://www.ultralytics.com/glossary/precision):** YOLO11 offers accurate performance metrics including mAP50, mAP75, and mAP50-95. - **Convenience:** The models remember their training settings, making validation straightforward. - **Flexibility:** You can validate against the same or different datasets and image sizes. - **Hyperparameter Tuning:** Validation metrics help in fine-tuning models for better performance. These benefits ensure that your models are evaluated thoroughly and can be optimized for superior results. Learn more about these advantages in the [Why Validate with Ultralytics YOLO](#why-validate-with-ultralytics-yolo) section. -### Can I validate my YOLOv8 model using a custom dataset? +### Can I validate my YOLO11 model using a custom dataset? -Yes, you can validate your YOLOv8 model using a [custom dataset](https://docs.ultralytics.com/datasets/). Specify the `data` argument with the path to your dataset configuration file. This file should include paths to the [validation data](https://www.ultralytics.com/glossary/validation-data), class names, and other relevant details. +Yes, you can validate your YOLO11 model using a [custom dataset](https://docs.ultralytics.com/datasets/). Specify the `data` argument with the path to your dataset configuration file. This file should include paths to the [validation data](https://www.ultralytics.com/glossary/validation-data), class names, and other relevant details. Example in Python: @@ -173,7 +173,7 @@ Example in Python: from ultralytics import YOLO # Load a model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Validate with a custom dataset metrics = model.val(data="path/to/your/custom_dataset.yaml") @@ -183,12 +183,12 @@ print(metrics.box.map) # map50-95 Example using CLI: ```bash -yolo val model=yolov8n.pt data=path/to/your/custom_dataset.yaml +yolo val model=yolo11n.pt data=path/to/your/custom_dataset.yaml ``` For more customizable options during validation, see the [Example Validation with Arguments](#example-validation-with-arguments) section. -### How do I save validation results to a JSON file in YOLOv8? +### How do I save validation results to a JSON file in YOLO11? To save the validation results to a JSON file, you can set the `save_json` argument to `True` when running validation. This can be done in both the Python API and CLI. @@ -198,7 +198,7 @@ Example in Python: from ultralytics import YOLO # Load a model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Save validation results to JSON metrics = model.val(save_json=True) @@ -207,7 +207,7 @@ metrics = model.val(save_json=True) Example using CLI: ```bash -yolo val model=yolov8n.pt save_json=True +yolo val model=yolo11n.pt save_json=True ``` This functionality is particularly useful for further analysis or integration with other tools. Check the [Arguments for YOLO Model Validation](#arguments-for-yolo-model-validation) for more details. diff --git a/docs/en/quickstart.md b/docs/en/quickstart.md index 7b5a3a27c0..204623cca4 100644 --- a/docs/en/quickstart.md +++ b/docs/en/quickstart.md @@ -1,12 +1,12 @@ --- comments: true -description: Learn how to install Ultralytics using pip, conda, or Docker. Follow our step-by-step guide for a seamless setup of YOLOv8 with thorough instructions. -keywords: Ultralytics, YOLOv8, Install Ultralytics, pip, conda, Docker, GitHub, machine learning, object detection +description: Learn how to install Ultralytics using pip, conda, or Docker. Follow our step-by-step guide for a seamless setup of YOLO with thorough instructions. +keywords: Ultralytics, YOLO11, Install Ultralytics, pip, conda, Docker, GitHub, machine learning, object detection --- ## Install Ultralytics -Ultralytics provides various installation methods including pip, conda, and Docker. Install YOLOv8 via the `ultralytics` pip package for the latest stable release or by cloning the [Ultralytics GitHub repository](https://github.com/ultralytics/ultralytics) for the most up-to-date version. Docker can be used to execute the package in an isolated container, avoiding local installation. +Ultralytics provides various installation methods including pip, conda, and Docker. Install YOLO via the `ultralytics` pip package for the latest stable release or by cloning the [Ultralytics GitHub repository](https://github.com/ultralytics/ultralytics) for the most up-to-date version. Docker can be used to execute the package in an isolated container, avoiding local installation.


@@ -151,7 +151,7 @@ See the `ultralytics` [pyproject.toml](https://github.com/ultralytics/ultralytic ## Use Ultralytics with CLI -The Ultralytics command line interface (CLI) allows for simple single-line commands without the need for a Python environment. CLI requires no customization or Python code. You can simply run all tasks from the terminal with the `yolo` command. Check out the [CLI Guide](usage/cli.md) to learn more about using YOLOv8 from the command line. +The Ultralytics command line interface (CLI) allows for simple single-line commands without the need for a Python environment. CLI requires no customization or Python code. You can simply run all tasks from the terminal with the `yolo` command. Check out the [CLI Guide](usage/cli.md) to learn more about using YOLO from the command line. !!! example @@ -172,28 +172,28 @@ The Ultralytics command line interface (CLI) allows for simple single-line comma Train a detection model for 10 [epochs](https://www.ultralytics.com/glossary/epoch) with an initial learning_rate of 0.01 ```bash - yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 + yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` === "Predict" Predict a YouTube video using a pretrained segmentation model at image size 320: ```bash - yolo predict model=yolov8n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 + yolo predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 ``` === "Val" Val a pretrained detection model at batch-size 1 and image size 640: ```bash - yolo val model=yolov8n.pt data=coco8.yaml batch=1 imgsz=640 + yolo val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640 ``` === "Export" - Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) + Export a yolo11n classification model to ONNX format at image size 224 by 128 (no TASK required) ```bash - yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + yolo export model=yolo11n-cls.pt format=onnx imgsz=224,128 ``` === "Special" @@ -212,18 +212,18 @@ The Ultralytics command line interface (CLI) allows for simple single-line comma Arguments must be passed as `arg=val` pairs, split by an equals `=` sign and delimited by spaces between pairs. Do not use `--` argument prefixes or commas `,` between arguments. - - `yolo predict model=yolov8n.pt imgsz=640 conf=0.25` ✅ - - `yolo predict model yolov8n.pt imgsz 640 conf 0.25` ❌ (missing `=`) - - `yolo predict model=yolov8n.pt, imgsz=640, conf=0.25` ❌ (do not use `,`) - - `yolo predict --model yolov8n.pt --imgsz 640 --conf 0.25` ❌ (do not use `--`) + - `yolo predict model=yolo11n.pt imgsz=640 conf=0.25` ✅ + - `yolo predict model yolo11n.pt imgsz 640 conf 0.25` ❌ (missing `=`) + - `yolo predict model=yolo11n.pt, imgsz=640, conf=0.25` ❌ (do not use `,`) + - `yolo predict --model yolo11n.pt --imgsz 640 --conf 0.25` ❌ (do not use `--`) [CLI Guide](usage/cli.md){ .md-button } ## Use Ultralytics with Python -YOLOv8's Python interface allows for seamless integration into your Python projects, making it easy to load, run, and process the model's output. Designed with simplicity and ease of use in mind, the Python interface enables users to quickly implement [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification in their projects. This makes YOLOv8's Python interface an invaluable tool for anyone looking to incorporate these functionalities into their Python projects. +YOLO's Python interface allows for seamless integration into your Python projects, making it easy to load, run, and process the model's output. Designed with simplicity and ease of use in mind, the Python interface enables users to quickly implement [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification in their projects. This makes YOLO's Python interface an invaluable tool for anyone looking to incorporate these functionalities into their Python projects. -For example, users can load a model, train it, evaluate its performance on a validation set, and even export it to ONNX format with just a few lines of code. Check out the [Python Guide](usage/python.md) to learn more about using YOLOv8 within your Python projects. +For example, users can load a model, train it, evaluate its performance on a validation set, and even export it to ONNX format with just a few lines of code. Check out the [Python Guide](usage/python.md) to learn more about using YOLO within your Python projects. !!! example @@ -231,10 +231,10 @@ For example, users can load a model, train it, evaluate its performance on a val from ultralytics import YOLO # Create a new YOLO model from scratch - model = YOLO("yolov8n.yaml") + model = YOLO("yolo11n.yaml") # Load a pretrained YOLO model (recommended for training) - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model using the 'coco8.yaml' dataset for 3 epochs results = model.train(data="coco8.yaml", epochs=3) @@ -345,9 +345,9 @@ As you navigate through your projects or experiments, be sure to revisit these s ## FAQ -### How do I install Ultralytics YOLOv8 using pip? +### How do I install Ultralytics using pip? -To install Ultralytics YOLOv8 with pip, execute the following command: +To install Ultralytics with pip, execute the following command: ```bash pip install ultralytics @@ -363,9 +363,9 @@ pip install git+https://github.com/ultralytics/ultralytics.git Make sure to have the Git command-line tool installed on your system. -### Can I install Ultralytics YOLOv8 using conda? +### Can I install Ultralytics YOLO using conda? -Yes, you can install Ultralytics YOLOv8 using conda by running: +Yes, you can install Ultralytics YOLO using conda by running: ```bash conda install -c conda-forge ultralytics @@ -379,9 +379,9 @@ conda install -c pytorch -c nvidia -c conda-forge pytorch torchvision pytorch-cu For more instructions, visit the [Conda quickstart guide](guides/conda-quickstart.md). -### What are the advantages of using Docker to run Ultralytics YOLOv8? +### What are the advantages of using Docker to run Ultralytics YOLO? -Using Docker to run Ultralytics YOLOv8 provides an isolated and consistent environment, ensuring smooth performance across different systems. It also eliminates the complexity of local installation. Official Docker images from Ultralytics are available on [Docker Hub](https://hub.docker.com/r/ultralytics/ultralytics), with different variants tailored for GPU, CPU, ARM64, NVIDIA Jetson, and Conda environments. Below are the commands to pull and run the latest image: +Using Docker to run Ultralytics YOLO provides an isolated and consistent environment, ensuring smooth performance across different systems. It also eliminates the complexity of local installation. Official Docker images from Ultralytics are available on [Docker Hub](https://hub.docker.com/r/ultralytics/ultralytics), with different variants tailored for GPU, CPU, ARM64, NVIDIA Jetson, and Conda environments. Below are the commands to pull and run the latest image: ```bash # Pull the latest ultralytics image from Docker Hub @@ -410,9 +410,9 @@ pip install -e . This approach allows you to contribute to the project or experiment with the latest source code. For more details, visit the [Ultralytics GitHub repository](https://github.com/ultralytics/ultralytics). -### Why should I use Ultralytics YOLOv8 CLI? +### Why should I use Ultralytics YOLO CLI? -The Ultralytics YOLOv8 command line interface (CLI) simplifies running object detection tasks without requiring Python code. You can execute single-line commands for tasks like training, validation, and prediction straight from your terminal. The basic syntax for `yolo` commands is: +The Ultralytics YOLO command line interface (CLI) simplifies running object detection tasks without requiring Python code. You can execute single-line commands for tasks like training, validation, and prediction straight from your terminal. The basic syntax for `yolo` commands is: ```bash yolo TASK MODE ARGS @@ -421,7 +421,7 @@ yolo TASK MODE ARGS For example, to train a detection model with specified parameters: ```bash -yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 +yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` Check out the full [CLI Guide](usage/cli.md) to explore more commands and usage examples. diff --git a/docs/en/reference/nn/modules/block.md b/docs/en/reference/nn/modules/block.md index ed7a94ac75..da0ca655f3 100644 --- a/docs/en/reference/nn/modules/block.md +++ b/docs/en/reference/nn/modules/block.md @@ -143,6 +143,18 @@ keywords: Ultralytics, YOLO, neural networks, block modules, DFL, Proto, HGStem,



+## ::: ultralytics.nn.modules.block.C3f + +



+ +## ::: ultralytics.nn.modules.block.C3k2 + +



+ +## ::: ultralytics.nn.modules.block.C3k + +



+ ## ::: ultralytics.nn.modules.block.RepVGGDW



@@ -159,10 +171,22 @@ keywords: Ultralytics, YOLO, neural networks, block modules, DFL, Proto, HGStem,



+## ::: ultralytics.nn.modules.block.PSABlock + +



+ ## ::: ultralytics.nn.modules.block.PSA



+## ::: ultralytics.nn.modules.block.C2PSA + +



+ +## ::: ultralytics.nn.modules.block.C2fPSA + +



+ ## ::: ultralytics.nn.modules.block.SCDown

diff --git a/docs/en/solutions/index.md b/docs/en/solutions/index.md index 52423c14f5..e5187ed8d4 100644 --- a/docs/en/solutions/index.md +++ b/docs/en/solutions/index.md @@ -1,12 +1,12 @@ --- comments: true -description: Explore Ultralytics Solutions using YOLOv8 for object counting, blurring, security, and more. Enhance efficiency and solve real-world problems with cutting-edge AI. -keywords: Ultralytics, YOLOv8, object counting, object blurring, security systems, AI solutions, real-time analysis, computer vision applications +description: Explore Ultralytics Solutions using YOLO11 for object counting, blurring, security, and more. Enhance efficiency and solve real-world problems with cutting-edge AI. +keywords: Ultralytics, YOLO11, object counting, object blurring, security systems, AI solutions, real-time analysis, computer vision applications --- -# Ultralytics Solutions: Harness YOLOv8 to Solve Real-World Problems +# Ultralytics Solutions: Harness YOLO11 to Solve Real-World Problems -Ultralytics Solutions provide cutting-edge applications of YOLO models, offering real-world solutions like object counting, blurring, and security systems, enhancing efficiency and [accuracy](https://www.ultralytics.com/glossary/accuracy) in diverse industries. Discover the power of YOLOv8 for practical, impactful implementations. +Ultralytics Solutions provide cutting-edge applications of YOLO models, offering real-world solutions like object counting, blurring, and security systems, enhancing efficiency and [accuracy](https://www.ultralytics.com/glossary/accuracy) in diverse industries. Discover the power of YOLO11 for practical, impactful implementations. ![Ultralytics Solutions Thumbnail](https://github.com/ultralytics/docs/releases/download/0/ultralytics-solutions-thumbnail.avif) @@ -14,21 +14,21 @@ Ultralytics Solutions provide cutting-edge applications of YOLO models, offering Here's our curated list of Ultralytics solutions that can be used to create awesome [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) projects. -- [Object Counting](../guides/object-counting.md) 🚀 NEW: Learn to perform real-time object counting with YOLOv8. Gain the expertise to accurately count objects in live video streams. -- [Object Cropping](../guides/object-cropping.md) 🚀 NEW: Master object cropping with YOLOv8 for precise extraction of objects from images and videos. -- [Object Blurring](../guides/object-blurring.md) 🚀 NEW: Apply object blurring using YOLOv8 to protect privacy in image and video processing. -- [Workouts Monitoring](../guides/workouts-monitoring.md) 🚀 NEW: Discover how to monitor workouts using YOLOv8. Learn to track and analyze various fitness routines in real time. -- [Objects Counting in Regions](../guides/region-counting.md) 🚀 NEW: Count objects in specific regions using YOLOv8 for accurate detection in varied areas. -- [Security Alarm System](../guides/security-alarm-system.md) 🚀 NEW: Create a security alarm system with YOLOv8 that triggers alerts upon detecting new objects. Customize the system to fit your specific needs. +- [Object Counting](../guides/object-counting.md) 🚀 NEW: Learn to perform real-time object counting with YOLO11. Gain the expertise to accurately count objects in live video streams. +- [Object Cropping](../guides/object-cropping.md) 🚀 NEW: Master object cropping with YOLO11 for precise extraction of objects from images and videos. +- [Object Blurring](../guides/object-blurring.md) 🚀 NEW: Apply object blurring using YOLO11 to protect privacy in image and video processing. +- [Workouts Monitoring](../guides/workouts-monitoring.md) 🚀 NEW: Discover how to monitor workouts using YOLO11. Learn to track and analyze various fitness routines in real time. +- [Objects Counting in Regions](../guides/region-counting.md) 🚀 NEW: Count objects in specific regions using YOLO11 for accurate detection in varied areas. +- [Security Alarm System](../guides/security-alarm-system.md) 🚀 NEW: Create a security alarm system with YOLO11 that triggers alerts upon detecting new objects. Customize the system to fit your specific needs. - [Heatmaps](../guides/heatmaps.md) 🚀 NEW: Utilize detection heatmaps to visualize data intensity across a matrix, providing clear insights in computer vision tasks. -- [Instance Segmentation with Object Tracking](../guides/instance-segmentation-and-tracking.md) 🚀 NEW: Implement [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) and object tracking with YOLOv8 to achieve precise object boundaries and continuous monitoring. +- [Instance Segmentation with Object Tracking](../guides/instance-segmentation-and-tracking.md) 🚀 NEW: Implement [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) and object tracking with YOLO11 to achieve precise object boundaries and continuous monitoring. - [VisionEye View Objects Mapping](../guides/vision-eye.md) 🚀 NEW: Develop systems that mimic human eye focus on specific objects, enhancing the computer's ability to discern and prioritize details. -- [Speed Estimation](../guides/speed-estimation.md) 🚀 NEW: Estimate object speed using YOLOv8 and object tracking techniques, crucial for applications like autonomous vehicles and traffic monitoring. -- [Distance Calculation](../guides/distance-calculation.md) 🚀 NEW: Calculate distances between objects using [bounding box](https://www.ultralytics.com/glossary/bounding-box) centroids in YOLOv8, essential for spatial analysis. -- [Queue Management](../guides/queue-management.md) 🚀 NEW: Implement efficient queue management systems to minimize wait times and improve productivity using YOLOv8. -- [Parking Management](../guides/parking-management.md) 🚀 NEW: Organize and direct vehicle flow in parking areas with YOLOv8, optimizing space utilization and user experience. -- [Analytics](../guides/analytics.md) 📊 NEW: Conduct comprehensive data analysis to discover patterns and make informed decisions, leveraging YOLOv8 for descriptive, predictive, and prescriptive analytics. -- [Live Inference with Streamlit](../guides/streamlit-live-inference.md) 🚀 NEW: Leverage the power of YOLOv8 for real-time [object detection](https://www.ultralytics.com/glossary/object-detection) directly through your web browser with a user-friendly Streamlit interface. +- [Speed Estimation](../guides/speed-estimation.md) 🚀 NEW: Estimate object speed using YOLO11 and object tracking techniques, crucial for applications like autonomous vehicles and traffic monitoring. +- [Distance Calculation](../guides/distance-calculation.md) 🚀 NEW: Calculate distances between objects using [bounding box](https://www.ultralytics.com/glossary/bounding-box) centroids in YOLO11, essential for spatial analysis. +- [Queue Management](../guides/queue-management.md) 🚀 NEW: Implement efficient queue management systems to minimize wait times and improve productivity using YOLO11. +- [Parking Management](../guides/parking-management.md) 🚀 NEW: Organize and direct vehicle flow in parking areas with YOLO11, optimizing space utilization and user experience. +- [Analytics](../guides/analytics.md) 📊 NEW: Conduct comprehensive data analysis to discover patterns and make informed decisions, leveraging YOLO11 for descriptive, predictive, and prescriptive analytics. +- [Live Inference with Streamlit](../guides/streamlit-live-inference.md) 🚀 NEW: Leverage the power of YOLO11 for real-time [object detection](https://www.ultralytics.com/glossary/object-detection) directly through your web browser with a user-friendly Streamlit interface. ## Contribute to Our Solutions @@ -42,20 +42,20 @@ Let's work together to make the Ultralytics YOLO ecosystem more robust and versa ### How can I use Ultralytics YOLO for real-time object counting? -Ultralytics YOLOv8 can be used for real-time object counting by leveraging its advanced object detection capabilities. You can follow our detailed guide on [Object Counting](../guides/object-counting.md) to set up YOLOv8 for live video stream analysis. Simply install YOLOv8, load your model, and process video frames to count objects dynamically. +Ultralytics YOLO11 can be used for real-time object counting by leveraging its advanced object detection capabilities. You can follow our detailed guide on [Object Counting](../guides/object-counting.md) to set up YOLO11 for live video stream analysis. Simply install YOLO11, load your model, and process video frames to count objects dynamically. ### What are the benefits of using Ultralytics YOLO for security systems? -Ultralytics YOLOv8 enhances security systems by offering real-time object detection and alert mechanisms. By employing YOLOv8, you can create a security alarm system that triggers alerts when new objects are detected in the surveillance area. Learn how to set up a [Security Alarm System](../guides/security-alarm-system.md) with YOLOv8 for robust security monitoring. +Ultralytics YOLO11 enhances security systems by offering real-time object detection and alert mechanisms. By employing YOLO11, you can create a security alarm system that triggers alerts when new objects are detected in the surveillance area. Learn how to set up a [Security Alarm System](../guides/security-alarm-system.md) with YOLO11 for robust security monitoring. ### How can Ultralytics YOLO improve queue management systems? -Ultralytics YOLOv8 can significantly improve queue management systems by accurately counting and tracking people in queues, thus helping to reduce wait times and optimize service efficiency. Follow our detailed guide on [Queue Management](../guides/queue-management.md) to learn how to implement YOLOv8 for effective queue monitoring and analysis. +Ultralytics YOLO11 can significantly improve queue management systems by accurately counting and tracking people in queues, thus helping to reduce wait times and optimize service efficiency. Follow our detailed guide on [Queue Management](../guides/queue-management.md) to learn how to implement YOLO11 for effective queue monitoring and analysis. ### Can Ultralytics YOLO be used for workout monitoring? -Yes, Ultralytics YOLOv8 can be effectively used for monitoring workouts by tracking and analyzing fitness routines in real-time. This allows for precise evaluation of exercise form and performance. Explore our guide on [Workouts Monitoring](../guides/workouts-monitoring.md) to learn how to set up an AI-powered workout monitoring system using YOLOv8. +Yes, Ultralytics YOLO11 can be effectively used for monitoring workouts by tracking and analyzing fitness routines in real-time. This allows for precise evaluation of exercise form and performance. Explore our guide on [Workouts Monitoring](../guides/workouts-monitoring.md) to learn how to set up an AI-powered workout monitoring system using YOLO11. ### How does Ultralytics YOLO help in creating heatmaps for [data visualization](https://www.ultralytics.com/glossary/data-visualization)? -Ultralytics YOLOv8 can generate heatmaps to visualize data intensity across a given area, highlighting regions of high activity or interest. This feature is particularly useful in understanding patterns and trends in various computer vision tasks. Learn more about creating and using [Heatmaps](../guides/heatmaps.md) with YOLOv8 for comprehensive data analysis and visualization. +Ultralytics YOLO11 can generate heatmaps to visualize data intensity across a given area, highlighting regions of high activity or interest. This feature is particularly useful in understanding patterns and trends in various computer vision tasks. Learn more about creating and using [Heatmaps](../guides/heatmaps.md) with YOLO11 for comprehensive data analysis and visualization. diff --git a/docs/en/tasks/classify.md b/docs/en/tasks/classify.md index 62623a403f..7da8a3e7e7 100644 --- a/docs/en/tasks/classify.md +++ b/docs/en/tasks/classify.md @@ -1,8 +1,8 @@ --- comments: true -description: Master image classification using YOLOv8. Learn to train, validate, predict, and export models efficiently. -keywords: YOLOv8, image classification, AI, machine learning, pretrained models, ImageNet, model export, predict, train, validate -model_name: yolov8n-cls +description: Master image classification using YOLO11. Learn to train, validate, predict, and export models efficiently. +keywords: YOLO11, image classification, AI, machine learning, pretrained models, ImageNet, model export, predict, train, validate +model_name: yolo11n-cls --- # Image Classification @@ -26,28 +26,22 @@ The output of an image classifier is a single class label and a confidence score !!! tip - YOLOv8 Classify models use the `-cls` suffix, i.e. `yolov8n-cls.pt` and are pretrained on [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml). + YOLO11 Classify models use the `-cls` suffix, i.e. `yolo11n-cls.pt` and are pretrained on [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml). -## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) -YOLOv8 pretrained Classify models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. +YOLO11 pretrained Classify models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. -| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 | -| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | -| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-cls.pt) | 224 | 69.0 | 88.3 | 12.9 | 0.31 | 2.7 | 4.3 | -| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-cls.pt) | 224 | 73.8 | 91.7 | 23.4 | 0.35 | 6.4 | 13.5 | -| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-cls.pt) | 224 | 76.8 | 93.5 | 85.4 | 0.62 | 17.0 | 42.7 | -| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-cls.pt) | 224 | 76.8 | 93.5 | 163.0 | 0.87 | 37.5 | 99.7 | -| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-cls.pt) | 224 | 79.0 | 94.6 | 232.0 | 1.01 | 57.4 | 154.8 | +{% include "macros/yolo-cls-perf.md" %} - **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set.
Reproduce by `yolo val classify data=path/to/ImageNet device=0` - **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` ## Train -Train YOLOv8n-cls on the MNIST160 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 64. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. +Train YOLO11n-cls on the MNIST160 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 64. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. !!! example @@ -57,9 +51,9 @@ Train YOLOv8n-cls on the MNIST160 dataset for 100 [epochs](https://www.ultralyti from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.yaml") # build a new model from YAML - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n-cls.yaml").load("yolov8n-cls.pt") # build from YAML and transfer weights + model = YOLO("yolo11n-cls.yaml") # build a new model from YAML + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.yaml").load("yolo11n-cls.pt") # build from YAML and transfer weights # Train the model results = model.train(data="mnist160", epochs=100, imgsz=64) @@ -69,13 +63,13 @@ Train YOLOv8n-cls on the MNIST160 dataset for 100 [epochs](https://www.ultralyti ```bash # Build a new model from YAML and start training from scratch - yolo classify train data=mnist160 model=yolov8n-cls.yaml epochs=100 imgsz=64 + yolo classify train data=mnist160 model=yolo11n-cls.yaml epochs=100 imgsz=64 # Start training from a pretrained *.pt model - yolo classify train data=mnist160 model=yolov8n-cls.pt epochs=100 imgsz=64 + yolo classify train data=mnist160 model=yolo11n-cls.pt epochs=100 imgsz=64 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo classify train data=mnist160 model=yolov8n-cls.yaml pretrained=yolov8n-cls.pt epochs=100 imgsz=64 + yolo classify train data=mnist160 model=yolo11n-cls.yaml pretrained=yolo11n-cls.pt epochs=100 imgsz=64 ``` ### Dataset format @@ -84,7 +78,7 @@ YOLO classification dataset format can be found in detail in the [Dataset Guide] ## Val -Validate trained YOLOv8n-cls model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the MNIST160 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n-cls model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the MNIST160 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example @@ -94,7 +88,7 @@ Validate trained YOLOv8n-cls model [accuracy](https://www.ultralytics.com/glossa from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load an official model + model = YOLO("yolo11n-cls.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -106,13 +100,13 @@ Validate trained YOLOv8n-cls model [accuracy](https://www.ultralytics.com/glossa === "CLI" ```bash - yolo classify val model=yolov8n-cls.pt # val official model + yolo classify val model=yolo11n-cls.pt # val official model yolo classify val model=path/to/best.pt # val custom model ``` ## Predict -Use a trained YOLOv8n-cls model to run predictions on images. +Use a trained YOLO11n-cls model to run predictions on images. !!! example @@ -122,7 +116,7 @@ Use a trained YOLOv8n-cls model to run predictions on images. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load an official model + model = YOLO("yolo11n-cls.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Predict with the model @@ -132,7 +126,7 @@ Use a trained YOLOv8n-cls model to run predictions on images. === "CLI" ```bash - yolo classify predict model=yolov8n-cls.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo classify predict model=yolo11n-cls.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo classify predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model ``` @@ -140,7 +134,7 @@ See full `predict` mode details in the [Predict](../modes/predict.md) page. ## Export -Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc. +Export a YOLO11n-cls model to a different format like ONNX, CoreML, etc. !!! example @@ -150,7 +144,7 @@ Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load an official model + model = YOLO("yolo11n-cls.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -160,11 +154,11 @@ Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc. === "CLI" ```bash - yolo export model=yolov8n-cls.pt format=onnx # export official model + yolo export model=yolo11n-cls.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` -Available YOLOv8-cls export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n-cls.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11-cls export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n-cls.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} @@ -172,13 +166,13 @@ See full `export` details in the [Export](../modes/export.md) page. ## FAQ -### What is the purpose of YOLOv8 in image classification? +### What is the purpose of YOLO11 in image classification? -YOLOv8 models, such as `yolov8n-cls.pt`, are designed for efficient image classification. They assign a single class label to an entire image along with a confidence score. This is particularly useful for applications where knowing the specific class of an image is sufficient, rather than identifying the location or shape of objects within the image. +YOLO11 models, such as `yolo11n-cls.pt`, are designed for efficient image classification. They assign a single class label to an entire image along with a confidence score. This is particularly useful for applications where knowing the specific class of an image is sufficient, rather than identifying the location or shape of objects within the image. -### How do I train a YOLOv8 model for image classification? +### How do I train a YOLO11 model for image classification? -To train a YOLOv8 model, you can use either Python or CLI commands. For example, to train a `yolov8n-cls` model on the MNIST160 dataset for 100 epochs at an image size of 64: +To train a YOLO11 model, you can use either Python or CLI commands. For example, to train a `yolo11n-cls` model on the MNIST160 dataset for 100 epochs at an image size of 64: !!! example @@ -188,7 +182,7 @@ To train a YOLOv8 model, you can use either Python or CLI commands. For example, from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="mnist160", epochs=100, imgsz=64) @@ -197,18 +191,18 @@ To train a YOLOv8 model, you can use either Python or CLI commands. For example, === "CLI" ```bash - yolo classify train data=mnist160 model=yolov8n-cls.pt epochs=100 imgsz=64 + yolo classify train data=mnist160 model=yolo11n-cls.pt epochs=100 imgsz=64 ``` For more configuration options, visit the [Configuration](../usage/cfg.md) page. -### Where can I find pretrained YOLOv8 classification models? +### Where can I find pretrained YOLO11 classification models? -Pretrained YOLOv8 classification models can be found in the [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) section. Models like `yolov8n-cls.pt`, `yolov8s-cls.pt`, `yolov8m-cls.pt`, etc., are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset and can be easily downloaded and used for various image classification tasks. +Pretrained YOLO11 classification models can be found in the [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) section. Models like `yolo11n-cls.pt`, `yolo11s-cls.pt`, `yolo11m-cls.pt`, etc., are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset and can be easily downloaded and used for various image classification tasks. -### How can I export a trained YOLOv8 model to different formats? +### How can I export a trained YOLO11 model to different formats? -You can export a trained YOLOv8 model to various formats using Python or CLI commands. For instance, to export a model to ONNX format: +You can export a trained YOLO11 model to various formats using Python or CLI commands. For instance, to export a model to ONNX format: !!! example @@ -218,7 +212,7 @@ You can export a trained YOLOv8 model to various formats using Python or CLI com from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load the trained model + model = YOLO("yolo11n-cls.pt") # load the trained model # Export the model to ONNX model.export(format="onnx") @@ -227,12 +221,12 @@ You can export a trained YOLOv8 model to various formats using Python or CLI com === "CLI" ```bash - yolo export model=yolov8n-cls.pt format=onnx # export the trained model to ONNX format + yolo export model=yolo11n-cls.pt format=onnx # export the trained model to ONNX format ``` For detailed export options, refer to the [Export](../modes/export.md) page. -### How do I validate a trained YOLOv8 classification model? +### How do I validate a trained YOLO11 classification model? To validate a trained model's accuracy on a dataset like MNIST160, you can use the following Python or CLI commands: @@ -244,7 +238,7 @@ To validate a trained model's accuracy on a dataset like MNIST160, you can use t from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load the trained model + model = YOLO("yolo11n-cls.pt") # load the trained model # Validate the model metrics = model.val() # no arguments needed, uses the dataset and settings from training @@ -255,7 +249,7 @@ To validate a trained model's accuracy on a dataset like MNIST160, you can use t === "CLI" ```bash - yolo classify val model=yolov8n-cls.pt # validate the trained model + yolo classify val model=yolo11n-cls.pt # validate the trained model ``` For more information, visit the [Validate](#val) section. diff --git a/docs/en/tasks/detect.md b/docs/en/tasks/detect.md index a9c54889bc..68c53f075b 100644 --- a/docs/en/tasks/detect.md +++ b/docs/en/tasks/detect.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn about object detection with YOLOv8. Explore pretrained models, training, validation, prediction, and export details for efficient object recognition. -keywords: object detection, YOLOv8, pretrained models, training, validation, prediction, export, machine learning, computer vision +description: Learn about object detection with YOLO11. Explore pretrained models, training, validation, prediction, and export details for efficient object recognition. +keywords: object detection, YOLO11, pretrained models, training, validation, prediction, export, machine learning, computer vision --- # Object Detection @@ -20,33 +20,27 @@ The output of an object detector is a set of bounding boxes that enclose the obj allowfullscreen>
- Watch: Object Detection with Pre-trained Ultralytics YOLOv8 Model. + Watch: Object Detection with Pre-trained Ultralytics YOLO Model.

!!! tip - YOLOv8 Detect models are the default YOLOv8 models, i.e. `yolov8n.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml). + YOLO11 Detect models are the default YOLO11 models, i.e. `yolo11n.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml). -## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) -YOLOv8 pretrained Detect models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. +YOLO11 pretrained Detect models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. -| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | -| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | -| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | -| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | -| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | +{% include "macros/yolo-det-perf.md" %} - **mAPval** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset.
Reproduce by `yolo val detect data=coco.yaml device=0` -- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val detect data=coco8.yaml batch=1 device=0|cpu` +- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val detect data=coco.yaml batch=1 device=0|cpu` ## Train -Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. +Train YOLO11n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. !!! example @@ -56,9 +50,9 @@ Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/ from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.yaml") # build a new model from YAML - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n.yaml").load("yolov8n.pt") # build from YAML and transfer weights + model = YOLO("yolo11n.yaml") # build a new model from YAML + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.yaml").load("yolo11n.pt") # build from YAML and transfer weights # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) @@ -68,13 +62,13 @@ Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/ ```bash # Build a new model from YAML and start training from scratch - yolo detect train data=coco8.yaml model=yolov8n.yaml epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.yaml epochs=100 imgsz=640 # Start training from a pretrained *.pt model - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo detect train data=coco8.yaml model=yolov8n.yaml pretrained=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.yaml pretrained=yolo11n.pt epochs=100 imgsz=640 ``` ### Dataset format @@ -83,7 +77,7 @@ YOLO detection dataset format can be found in detail in the [Dataset Guide](../d ## Val -Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example @@ -93,7 +87,7 @@ Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/a from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -107,13 +101,13 @@ Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/a === "CLI" ```bash - yolo detect val model=yolov8n.pt # val official model + yolo detect val model=yolo11n.pt # val official model yolo detect val model=path/to/best.pt # val custom model ``` ## Predict -Use a trained YOLOv8n model to run predictions on images. +Use a trained YOLO11n model to run predictions on images. !!! example @@ -123,7 +117,7 @@ Use a trained YOLOv8n model to run predictions on images. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Predict with the model @@ -133,7 +127,7 @@ Use a trained YOLOv8n model to run predictions on images. === "CLI" ```bash - yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo detect predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model ``` @@ -141,7 +135,7 @@ See full `predict` mode details in the [Predict](../modes/predict.md) page. ## Export -Export a YOLOv8n model to a different format like ONNX, CoreML, etc. +Export a YOLO11n model to a different format like ONNX, CoreML, etc. !!! example @@ -151,7 +145,7 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -161,11 +155,11 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc. === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=yolo11n.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` -Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} @@ -173,9 +167,9 @@ See full `export` details in the [Export](../modes/export.md) page. ## FAQ -### How do I train a YOLOv8 model on my custom dataset? +### How do I train a YOLO11 model on my custom dataset? -Training a YOLOv8 model on a custom dataset involves a few steps: +Training a YOLO11 model on a custom dataset involves a few steps: 1. **Prepare the Dataset**: Ensure your dataset is in the YOLO format. For guidance, refer to our [Dataset Guide](../datasets/detect/index.md). 2. **Load the Model**: Use the Ultralytics YOLO library to load a pre-trained model or create a new model from a YAML file. @@ -189,7 +183,7 @@ Training a YOLOv8 model on a custom dataset involves a few steps: from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model on your custom dataset model.train(data="my_custom_dataset.yaml", epochs=100, imgsz=640) @@ -198,26 +192,26 @@ Training a YOLOv8 model on a custom dataset involves a few steps: === "CLI" ```bash - yolo detect train data=my_custom_dataset.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=my_custom_dataset.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For detailed configuration options, visit the [Configuration](../usage/cfg.md) page. -### What pretrained models are available in YOLOv8? +### What pretrained models are available in YOLO11? -Ultralytics YOLOv8 offers various pretrained models for object detection, segmentation, and pose estimation. These models are pretrained on the COCO dataset or ImageNet for classification tasks. Here are some of the available models: +Ultralytics YOLO11 offers various pretrained models for object detection, segmentation, and pose estimation. These models are pretrained on the COCO dataset or ImageNet for classification tasks. Here are some of the available models: -- [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt) -- [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt) -- [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m.pt) -- [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) -- [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) +- [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) +- [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) +- [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) +- [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) +- [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) -For a detailed list and performance metrics, refer to the [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) section. +For a detailed list and performance metrics, refer to the [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) section. -### How can I validate the accuracy of my trained YOLOv8 model? +### How can I validate the accuracy of my trained YOLO model? -To validate the accuracy of your trained YOLOv8 model, you can use the `.val()` method in Python or the `yolo detect val` command in CLI. This will provide metrics like mAP50-95, mAP50, and more. +To validate the accuracy of your trained YOLO11 model, you can use the `.val()` method in Python or the `yolo detect val` command in CLI. This will provide metrics like mAP50-95, mAP50, and more. !!! example @@ -242,9 +236,9 @@ To validate the accuracy of your trained YOLOv8 model, you can use the `.val()` For more validation details, visit the [Val](../modes/val.md) page. -### What formats can I export a YOLOv8 model to? +### What formats can I export a YOLO11 model to? -Ultralytics YOLOv8 allows exporting models to various formats such as ONNX, TensorRT, CoreML, and more to ensure compatibility across different platforms and devices. +Ultralytics YOLO11 allows exporting models to various formats such as ONNX, TensorRT, CoreML, and more to ensure compatibility across different platforms and devices. !!! example @@ -254,7 +248,7 @@ Ultralytics YOLOv8 allows exporting models to various formats such as ONNX, Tens from ultralytics import YOLO # Load the model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Export the model to ONNX format model.export(format="onnx") @@ -263,18 +257,18 @@ Ultralytics YOLOv8 allows exporting models to various formats such as ONNX, Tens === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx + yolo export model=yolo11n.pt format=onnx ``` Check the full list of supported formats and instructions on the [Export](../modes/export.md) page. -### Why should I use Ultralytics YOLOv8 for object detection? +### Why should I use Ultralytics YOLO11 for object detection? -Ultralytics YOLOv8 is designed to offer state-of-the-art performance for object detection, segmentation, and pose estimation. Here are some key advantages: +Ultralytics YOLO11 is designed to offer state-of-the-art performance for object detection, segmentation, and pose estimation. Here are some key advantages: 1. **Pretrained Models**: Utilize models pretrained on popular datasets like COCO and ImageNet for faster development. 2. **High Accuracy**: Achieves impressive mAP scores, ensuring reliable object detection. 3. **Speed**: Optimized for real-time inference, making it ideal for applications requiring swift processing. 4. **Flexibility**: Export models to various formats like ONNX and TensorRT for deployment across multiple platforms. -Explore our [Blog](https://www.ultralytics.com/blog) for use cases and success stories showcasing YOLOv8 in action. +Explore our [Blog](https://www.ultralytics.com/blog) for use cases and success stories showcasing YOLO11 in action. diff --git a/docs/en/tasks/index.md b/docs/en/tasks/index.md index 016a1ca9b9..b31b96c447 100644 --- a/docs/en/tasks/index.md +++ b/docs/en/tasks/index.md @@ -1,15 +1,15 @@ --- comments: true -description: Explore Ultralytics YOLOv8 for detection, segmentation, classification, OBB, and pose estimation with high accuracy and speed. Learn how to apply each task. -keywords: Ultralytics YOLOv8, detection, segmentation, classification, oriented object detection, pose estimation, computer vision, AI framework +description: Explore Ultralytics YOLO11 for detection, segmentation, classification, OBB, and pose estimation with high accuracy and speed. Learn how to apply each task. +keywords: Ultralytics YOLO11, detection, segmentation, classification, oriented object detection, pose estimation, computer vision, AI framework --- -# Ultralytics YOLOv8 Tasks +# Ultralytics YOLO11 Tasks
Ultralytics YOLO supported tasks -YOLOv8 is an AI framework that supports multiple [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) **tasks**. The framework can be used to perform [detection](detect.md), [segmentation](segment.md), [obb](obb.md), [classification](classify.md), and [pose](pose.md) estimation. Each of these tasks has a different objective and use case. +YOLO11 is an AI framework that supports multiple [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) **tasks**. The framework can be used to perform [detection](detect.md), [segmentation](segment.md), [obb](obb.md), [classification](classify.md), and [pose](pose.md) estimation. Each of these tasks has a different objective and use case.


@@ -19,48 +19,48 @@ YOLOv8 is an AI framework that supports multiple [computer vision](https://www.u allowfullscreen>
- Watch: Explore Ultralytics YOLO Tasks: [Object Detection](https://www.ultralytics.com/glossary/object-detection), Segmentation, OBB, Tracking, and Pose Estimation. + Watch: Explore Ultralytics YOLO Tasks: Object Detection, Segmentation, OBB, Tracking, and Pose Estimation.

## [Detection](detect.md) -Detection is the primary task supported by YOLOv8. It involves detecting objects in an image or video frame and drawing bounding boxes around them. The detected objects are classified into different categories based on their features. YOLOv8 can detect multiple objects in a single image or video frame with high [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed. +Detection is the primary task supported by YOLO11. It involves detecting objects in an image or video frame and drawing bounding boxes around them. The detected objects are classified into different categories based on their features. YOLO11 can detect multiple objects in a single image or video frame with high [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed. [Detection Examples](detect.md){ .md-button } ## [Segmentation](segment.md) -Segmentation is a task that involves segmenting an image into different regions based on the content of the image. Each region is assigned a label based on its content. This task is useful in applications such as [image segmentation](https://www.ultralytics.com/glossary/image-segmentation) and medical imaging. YOLOv8 uses a variant of the U-Net architecture to perform segmentation. +Segmentation is a task that involves segmenting an image into different regions based on the content of the image. Each region is assigned a label based on its content. This task is useful in applications such as [image segmentation](https://www.ultralytics.com/glossary/image-segmentation) and medical imaging. YOLO11 uses a variant of the U-Net architecture to perform segmentation. [Segmentation Examples](segment.md){ .md-button } ## [Classification](classify.md) -Classification is a task that involves classifying an image into different categories. YOLOv8 can be used to classify images based on their content. It uses a variant of the EfficientNet architecture to perform classification. +Classification is a task that involves classifying an image into different categories. YOLO11 can be used to classify images based on their content. It uses a variant of the EfficientNet architecture to perform classification. [Classification Examples](classify.md){ .md-button } ## [Pose](pose.md) -Pose/keypoint detection is a task that involves detecting specific points in an image or video frame. These points are referred to as keypoints and are used to track movement or pose estimation. YOLOv8 can detect keypoints in an image or video frame with high accuracy and speed. +Pose/keypoint detection is a task that involves detecting specific points in an image or video frame. These points are referred to as keypoints and are used to track movement or pose estimation. YOLO11 can detect keypoints in an image or video frame with high accuracy and speed. [Pose Examples](pose.md){ .md-button } ## [OBB](obb.md) -Oriented object detection goes a step further than regular object detection with introducing an extra angle to locate objects more accurate in an image. YOLOv8 can detect rotated objects in an image or video frame with high accuracy and speed. +Oriented object detection goes a step further than regular object detection with introducing an extra angle to locate objects more accurate in an image. YOLO11 can detect rotated objects in an image or video frame with high accuracy and speed. [Oriented Detection](obb.md){ .md-button } ## Conclusion -YOLOv8 supports multiple tasks, including detection, segmentation, classification, oriented object detection and keypoints detection. Each of these tasks has different objectives and use cases. By understanding the differences between these tasks, you can choose the appropriate task for your computer vision application. +YOLO11 supports multiple tasks, including detection, segmentation, classification, oriented object detection and keypoints detection. Each of these tasks has different objectives and use cases. By understanding the differences between these tasks, you can choose the appropriate task for your computer vision application. ## FAQ -### What tasks can Ultralytics YOLOv8 perform? +### What tasks can Ultralytics YOLO11 perform? -Ultralytics YOLOv8 is a versatile AI framework capable of performing various computer vision tasks with high accuracy and speed. These tasks include: +Ultralytics YOLO11 is a versatile AI framework capable of performing various computer vision tasks with high accuracy and speed. These tasks include: - **[Detection](detect.md):** Identifying and localizing objects in images or video frames by drawing bounding boxes around them. - **[Segmentation](segment.md):** Segmenting images into different regions based on their content, useful for applications like medical imaging. @@ -68,12 +68,12 @@ Ultralytics YOLOv8 is a versatile AI framework capable of performing various com - **[Pose estimation](pose.md):** Detecting specific keypoints in an image or video frame to track movements or poses. - **[Oriented Object Detection (OBB)](obb.md):** Detecting rotated objects with an added orientation angle for enhanced accuracy. -### How do I use Ultralytics YOLOv8 for object detection? +### How do I use Ultralytics YOLO11 for object detection? -To use Ultralytics YOLOv8 for object detection, follow these steps: +To use Ultralytics YOLO11 for object detection, follow these steps: 1. Prepare your dataset in the appropriate format. -2. Train the YOLOv8 model using the detection task. +2. Train the YOLO11 model using the detection task. 3. Use the model to make predictions by feeding in new images or video frames. !!! example @@ -83,38 +83,44 @@ To use Ultralytics YOLOv8 for object detection, follow these steps: ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") # Load pre-trained model - results = model.predict(source="image.jpg") # Perform object detection - results[0].show() + # Load a pre-trained YOLO model (adjust model type as needed) + model = YOLO("yolo11n.pt") # n, s, m, l, x versions available + + # Perform object detection on an image + results = model.predict(source="image.jpg") # Can also use video, directory, URL, etc. + + # Display the results + results[0].show() # Show the first image results ``` === "CLI" ```bash - yolo detect model=yolov8n.pt source='image.jpg' + # Run YOLO detection from the command line + yolo detect model=yolo11n.pt source="image.jpg" # Adjust model and source as needed ``` For more detailed instructions, check out our [detection examples](detect.md). -### What are the benefits of using YOLOv8 for segmentation tasks? +### What are the benefits of using YOLO11 for segmentation tasks? -Using YOLOv8 for segmentation tasks provides several advantages: +Using YOLO11 for segmentation tasks provides several advantages: 1. **High Accuracy:** The segmentation task leverages a variant of the U-Net architecture to achieve precise segmentation. -2. **Speed:** YOLOv8 is optimized for real-time applications, offering quick processing even for high-resolution images. +2. **Speed:** YOLO11 is optimized for real-time applications, offering quick processing even for high-resolution images. 3. **Multiple Applications:** It is ideal for medical imaging, autonomous driving, and other applications requiring detailed image segmentation. -Learn more about the benefits and use cases of YOLOv8 for segmentation in the [segmentation section](segment.md). +Learn more about the benefits and use cases of YOLO11 for segmentation in the [segmentation section](segment.md). -### Can Ultralytics YOLOv8 handle pose estimation and keypoint detection? +### Can Ultralytics YOLO11 handle pose estimation and keypoint detection? -Yes, Ultralytics YOLOv8 can effectively perform pose estimation and keypoint detection with high accuracy and speed. This feature is particularly useful for tracking movements in sports analytics, healthcare, and human-computer interaction applications. YOLOv8 detects keypoints in an image or video frame, allowing for precise pose estimation. +Yes, Ultralytics YOLO11 can effectively perform pose estimation and keypoint detection with high accuracy and speed. This feature is particularly useful for tracking movements in sports analytics, healthcare, and human-computer interaction applications. YOLO11 detects keypoints in an image or video frame, allowing for precise pose estimation. For more details and implementation tips, visit our [pose estimation examples](pose.md). -### Why should I choose Ultralytics YOLOv8 for oriented object detection (OBB)? +### Why should I choose Ultralytics YOLO11 for oriented object detection (OBB)? -Oriented Object Detection (OBB) with YOLOv8 provides enhanced [precision](https://www.ultralytics.com/glossary/precision) by detecting objects with an additional angle parameter. This feature is beneficial for applications requiring accurate localization of rotated objects, such as aerial imagery analysis and warehouse automation. +Oriented Object Detection (OBB) with YOLO11 provides enhanced [precision](https://www.ultralytics.com/glossary/precision) by detecting objects with an additional angle parameter. This feature is beneficial for applications requiring accurate localization of rotated objects, such as aerial imagery analysis and warehouse automation. - **Increased Precision:** The angle component reduces false positives for rotated objects. - **Versatile Applications:** Useful for tasks in geospatial analysis, robotics, etc. diff --git a/docs/en/tasks/obb.md b/docs/en/tasks/obb.md index 9175d82786..7554dc2b5e 100644 --- a/docs/en/tasks/obb.md +++ b/docs/en/tasks/obb.md @@ -1,8 +1,8 @@ --- comments: true -description: Discover how to detect objects with rotation for higher precision using YOLOv8 OBB models. Learn, train, validate, and export OBB models effortlessly. -keywords: Oriented Bounding Boxes, OBB, Object Detection, YOLOv8, Ultralytics, DOTAv1, Model Training, Model Export, AI, Machine Learning -model_name: yolov8n-obb +description: Discover how to detect objects with rotation for higher precision using YOLO11 OBB models. Learn, train, validate, and export OBB models effortlessly. +keywords: Oriented Bounding Boxes, OBB, Object Detection, YOLO11, Ultralytics, DOTAv1, Model Training, Model Export, AI, Machine Learning +model_name: yolo11n-obb --- # Oriented Bounding Boxes [Object Detection](https://www.ultralytics.com/glossary/object-detection) @@ -17,7 +17,7 @@ The output of an oriented object detector is a set of rotated bounding boxes tha !!! tip - YOLOv8 OBB models use the `-obb` suffix, i.e. `yolov8n-obb.pt` and are pretrained on [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml). + YOLO11 OBB models use the `-obb` suffix, i.e. `yolo11n-obb.pt` and are pretrained on [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml).


@@ -27,7 +27,7 @@ The output of an oriented object detector is a set of rotated bounding boxes tha allowfullscreen>
- Watch: Object Detection using Ultralytics YOLOv8 Oriented Bounding Boxes (YOLOv8-OBB) + Watch: Object Detection using Ultralytics YOLO Oriented Bounding Boxes (YOLO-OBB)

## Visual Samples @@ -36,26 +36,20 @@ The output of an oriented object detector is a set of rotated bounding boxes tha | :------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------: | | ![Ships Detection using OBB](https://github.com/ultralytics/docs/releases/download/0/ships-detection-using-obb.avif) | ![Vehicle Detection using OBB](https://github.com/ultralytics/docs/releases/download/0/vehicle-detection-using-obb.avif) | -## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) -YOLOv8 pretrained OBB models are shown here, which are pretrained on the [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml) dataset. +YOLO11 pretrained OBB models are shown here, which are pretrained on the [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml) dataset. [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. -| Model | size
(pixels) | mAPtest
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| -------------------------------------------------------------------------------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-obb.pt) | 1024 | 78.0 | 204.77 | 3.57 | 3.1 | 23.3 | -| [YOLOv8s-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-obb.pt) | 1024 | 79.5 | 424.88 | 4.07 | 11.4 | 76.3 | -| [YOLOv8m-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-obb.pt) | 1024 | 80.5 | 763.48 | 7.61 | 26.4 | 208.6 | -| [YOLOv8l-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-obb.pt) | 1024 | 80.7 | 1278.42 | 11.83 | 44.5 | 433.8 | -| [YOLOv8x-obb](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-obb.pt) | 1024 | 81.36 | 1759.10 | 13.23 | 69.5 | 676.7 | +{% include "macros/yolo-obb-perf.md" %} -- **mAPtest** values are for single-model multiscale on [DOTAv1 test](https://captain-whu.github.io/DOTA/index.html) dataset.
Reproduce by `yolo val obb data=DOTAv1.yaml device=0 split=test` and submit merged results to [DOTA evaluation](https://captain-whu.github.io/DOTA/evaluation.html). +- **mAPtest** values are for single-model multiscale on [DOTAv1](https://captain-whu.github.io/DOTA/index.html) dataset.
Reproduce by `yolo val obb data=DOTAv1.yaml device=0 split=test` and submit merged results to [DOTA evaluation](https://captain-whu.github.io/DOTA/evaluation.html). - **Speed** averaged over DOTAv1 val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu` ## Train -Train YOLOv8n-obb on the `dota8.yaml` dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. +Train YOLO11n-obb on the `dota8.yaml` dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. !!! example @@ -65,9 +59,9 @@ Train YOLOv8n-obb on the `dota8.yaml` dataset for 100 [epochs](https://www.ultra from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.yaml") # build a new model from YAML - model = YOLO("yolov8n-obb.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n-obb.yaml").load("yolov8n.pt") # build from YAML and transfer weights + model = YOLO("yolo11n-obb.yaml") # build a new model from YAML + model = YOLO("yolo11n-obb.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-obb.yaml").load("yolo11n.pt") # build from YAML and transfer weights # Train the model results = model.train(data="dota8.yaml", epochs=100, imgsz=640) @@ -77,13 +71,13 @@ Train YOLOv8n-obb on the `dota8.yaml` dataset for 100 [epochs](https://www.ultra ```bash # Build a new model from YAML and start training from scratch - yolo obb train data=dota8.yaml model=yolov8n-obb.yaml epochs=100 imgsz=640 + yolo obb train data=dota8.yaml model=yolo11n-obb.yaml epochs=100 imgsz=640 # Start training from a pretrained *.pt model - yolo obb train data=dota8.yaml model=yolov8n-obb.pt epochs=100 imgsz=640 + yolo obb train data=dota8.yaml model=yolo11n-obb.pt epochs=100 imgsz=640 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo obb train data=dota8.yaml model=yolov8n-obb.yaml pretrained=yolov8n-obb.pt epochs=100 imgsz=640 + yolo obb train data=dota8.yaml model=yolo11n-obb.yaml pretrained=yolo11n-obb.pt epochs=100 imgsz=640 ```

@@ -94,7 +88,7 @@ Train YOLOv8n-obb on the `dota8.yaml` dataset for 100 [epochs](https://www.ultra allowfullscreen>
- Watch: How to Train Ultralytics YOLOv8-OBB (Oriented Bounding Boxes) Models on DOTA Dataset using Ultralytics HUB + Watch: How to Train Ultralytics YOLO-OBB (Oriented Bounding Boxes) Models on DOTA Dataset using Ultralytics HUB

### Dataset format @@ -103,7 +97,7 @@ OBB dataset format can be found in detail in the [Dataset Guide](../datasets/obb ## Val -Validate trained YOLOv8n-obb model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the DOTA8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n-obb model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the DOTA8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example @@ -113,7 +107,7 @@ Validate trained YOLOv8n-obb model [accuracy](https://www.ultralytics.com/glossa from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") # load an official model + model = YOLO("yolo11n-obb.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -127,13 +121,13 @@ Validate trained YOLOv8n-obb model [accuracy](https://www.ultralytics.com/glossa === "CLI" ```bash - yolo obb val model=yolov8n-obb.pt data=dota8.yaml # val official model + yolo obb val model=yolo11n-obb.pt data=dota8.yaml # val official model yolo obb val model=path/to/best.pt data=path/to/data.yaml # val custom model ``` ## Predict -Use a trained YOLOv8n-obb model to run predictions on images. +Use a trained YOLO11n-obb model to run predictions on images. !!! example @@ -143,7 +137,7 @@ Use a trained YOLOv8n-obb model to run predictions on images. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") # load an official model + model = YOLO("yolo11n-obb.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Predict with the model @@ -153,7 +147,7 @@ Use a trained YOLOv8n-obb model to run predictions on images. === "CLI" ```bash - yolo obb predict model=yolov8n-obb.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo obb predict model=yolo11n-obb.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo obb predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model ``` @@ -165,14 +159,14 @@ Use a trained YOLOv8n-obb model to run predictions on images. allowfullscreen>
- Watch: How to Detect and Track Storage Tanks using Ultralytics YOLOv8-OBB | Oriented Bounding Boxes | DOTA + Watch: How to Detect and Track Storage Tanks using Ultralytics YOLO-OBB | Oriented Bounding Boxes | DOTA

See full `predict` mode details in the [Predict](../modes/predict.md) page. ## Export -Export a YOLOv8n-obb model to a different format like ONNX, CoreML, etc. +Export a YOLO11n-obb model to a different format like ONNX, CoreML, etc. !!! example @@ -182,7 +176,7 @@ Export a YOLOv8n-obb model to a different format like ONNX, CoreML, etc. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") # load an official model + model = YOLO("yolo11n-obb.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -192,11 +186,11 @@ Export a YOLOv8n-obb model to a different format like ONNX, CoreML, etc. === "CLI" ```bash - yolo export model=yolov8n-obb.pt format=onnx # export official model + yolo export model=yolo11n-obb.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` -Available YOLOv8-obb export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n-obb.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11-obb export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n-obb.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} @@ -208,9 +202,9 @@ See full `export` details in the [Export](../modes/export.md) page. Oriented Bounding Boxes (OBB) include an additional angle to enhance object localization accuracy in images. Unlike regular bounding boxes, which are axis-aligned rectangles, OBBs can rotate to fit the orientation of the object better. This is particularly useful for applications requiring precise object placement, such as aerial or satellite imagery ([Dataset Guide](../datasets/obb/index.md)). -### How do I train a YOLOv8n-obb model using a custom dataset? +### How do I train a YOLO11n-obb model using a custom dataset? -To train a YOLOv8n-obb model with a custom dataset, follow the example below using Python or CLI: +To train a YOLO11n-obb model with a custom dataset, follow the example below using Python or CLI: !!! example @@ -220,7 +214,7 @@ To train a YOLOv8n-obb model with a custom dataset, follow the example below usi from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n-obb.pt") + model = YOLO("yolo11n-obb.pt") # Train the model results = model.train(data="path/to/custom_dataset.yaml", epochs=100, imgsz=640) @@ -229,18 +223,18 @@ To train a YOLOv8n-obb model with a custom dataset, follow the example below usi === "CLI" ```bash - yolo obb train data=path/to/custom_dataset.yaml model=yolov8n-obb.pt epochs=100 imgsz=640 + yolo obb train data=path/to/custom_dataset.yaml model=yolo11n-obb.pt epochs=100 imgsz=640 ``` For more training arguments, check the [Configuration](../usage/cfg.md) section. -### What datasets can I use for training YOLOv8-OBB models? +### What datasets can I use for training YOLO11-OBB models? -YOLOv8-OBB models are pretrained on datasets like [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml) but you can use any dataset formatted for OBB. Detailed information on OBB dataset formats can be found in the [Dataset Guide](../datasets/obb/index.md). +YOLO11-OBB models are pretrained on datasets like [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml) but you can use any dataset formatted for OBB. Detailed information on OBB dataset formats can be found in the [Dataset Guide](../datasets/obb/index.md). -### How can I export a YOLOv8-OBB model to ONNX format? +### How can I export a YOLO11-OBB model to ONNX format? -Exporting a YOLOv8-OBB model to ONNX format is straightforward using either Python or CLI: +Exporting a YOLO11-OBB model to ONNX format is straightforward using either Python or CLI: !!! example @@ -250,7 +244,7 @@ Exporting a YOLOv8-OBB model to ONNX format is straightforward using either Pyth from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") + model = YOLO("yolo11n-obb.pt") # Export the model model.export(format="onnx") @@ -259,14 +253,14 @@ Exporting a YOLOv8-OBB model to ONNX format is straightforward using either Pyth === "CLI" ```bash - yolo export model=yolov8n-obb.pt format=onnx + yolo export model=yolo11n-obb.pt format=onnx ``` For more export formats and details, refer to the [Export](../modes/export.md) page. -### How do I validate the accuracy of a YOLOv8n-obb model? +### How do I validate the accuracy of a YOLO11n-obb model? -To validate a YOLOv8n-obb model, you can use Python or CLI commands as shown below: +To validate a YOLO11n-obb model, you can use Python or CLI commands as shown below: !!! example @@ -276,7 +270,7 @@ To validate a YOLOv8n-obb model, you can use Python or CLI commands as shown bel from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") + model = YOLO("yolo11n-obb.pt") # Validate the model metrics = model.val(data="dota8.yaml") @@ -285,7 +279,7 @@ To validate a YOLOv8n-obb model, you can use Python or CLI commands as shown bel === "CLI" ```bash - yolo obb val model=yolov8n-obb.pt data=dota8.yaml + yolo obb val model=yolo11n-obb.pt data=dota8.yaml ``` See full validation details in the [Val](../modes/val.md) section. diff --git a/docs/en/tasks/pose.md b/docs/en/tasks/pose.md index ca0d5feca6..5fa566029c 100644 --- a/docs/en/tasks/pose.md +++ b/docs/en/tasks/pose.md @@ -1,8 +1,8 @@ --- comments: true -description: Discover how to use YOLOv8 for pose estimation tasks. Learn about model training, validation, prediction, and exporting in various formats. -keywords: pose estimation, YOLOv8, Ultralytics, keypoints, model training, image recognition, deep learning -model_name: yolov8n-pose +description: Discover how to use YOLO11 for pose estimation tasks. Learn about model training, validation, prediction, and exporting in various formats. +keywords: pose estimation, YOLO11, Ultralytics, keypoints, model training, image recognition, deep learning +model_name: yolo11n-pose --- # Pose Estimation @@ -22,7 +22,7 @@ The output of a pose estimation model is a set of points that represent the keyp allowfullscreen>
- Watch: Pose Estimation with Ultralytics YOLOv8. + Watch: Pose Estimation with Ultralytics YOLO.
- Watch: Run Segmentation with Pre-Trained Ultralytics YOLOv8 Model in Python. + Watch: Run Segmentation with Pre-Trained Ultralytics YOLO Model in Python.

!!! tip - YOLOv8 Segment models use the `-seg` suffix, i.e. `yolov8n-seg.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml). + YOLO11 Segment models use the `-seg` suffix, i.e. `yolo11n-seg.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml). -## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) -YOLOv8 pretrained Segment models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. +YOLO11 pretrained Segment models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. -| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | -| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | -| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | -| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | -| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | +{% include "macros/yolo-seg-perf.md" %} -- **mAPval** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset.
Reproduce by `yolo val segment data=coco.yaml device=0` -- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val segment data=coco8-seg.yaml batch=1 device=0|cpu` +- **mAPval** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset.
Reproduce by `yolo val segment data=coco-seg.yaml device=0` +- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu` ## Train -Train YOLOv8n-seg on the COCO128-seg dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. +Train YOLO11n-seg on the COCO128-seg dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. !!! example @@ -57,9 +51,9 @@ Train YOLOv8n-seg on the COCO128-seg dataset for 100 [epochs](https://www.ultral from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.yaml") # build a new model from YAML - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n-seg.yaml").load("yolov8n.pt") # build from YAML and transfer weights + model = YOLO("yolo11n-seg.yaml") # build a new model from YAML + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.yaml").load("yolo11n.pt") # build from YAML and transfer weights # Train the model results = model.train(data="coco8-seg.yaml", epochs=100, imgsz=640) @@ -69,13 +63,13 @@ Train YOLOv8n-seg on the COCO128-seg dataset for 100 [epochs](https://www.ultral ```bash # Build a new model from YAML and start training from scratch - yolo segment train data=coco8-seg.yaml model=yolov8n-seg.yaml epochs=100 imgsz=640 + yolo segment train data=coco8-seg.yaml model=yolo11n-seg.yaml epochs=100 imgsz=640 # Start training from a pretrained *.pt model - yolo segment train data=coco8-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=coco8-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo segment train data=coco8-seg.yaml model=yolov8n-seg.yaml pretrained=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=coco8-seg.yaml model=yolo11n-seg.yaml pretrained=yolo11n-seg.pt epochs=100 imgsz=640 ``` ### Dataset format @@ -84,7 +78,7 @@ YOLO segmentation dataset format can be found in detail in the [Dataset Guide](. ## Val -Validate trained YOLOv8n-seg model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO128-seg dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n-seg model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO128-seg dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example @@ -94,7 +88,7 @@ Validate trained YOLOv8n-seg model [accuracy](https://www.ultralytics.com/glossa from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load an official model + model = YOLO("yolo11n-seg.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -112,13 +106,13 @@ Validate trained YOLOv8n-seg model [accuracy](https://www.ultralytics.com/glossa === "CLI" ```bash - yolo segment val model=yolov8n-seg.pt # val official model + yolo segment val model=yolo11n-seg.pt # val official model yolo segment val model=path/to/best.pt # val custom model ``` ## Predict -Use a trained YOLOv8n-seg model to run predictions on images. +Use a trained YOLO11n-seg model to run predictions on images. !!! example @@ -128,7 +122,7 @@ Use a trained YOLOv8n-seg model to run predictions on images. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load an official model + model = YOLO("yolo11n-seg.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Predict with the model @@ -138,7 +132,7 @@ Use a trained YOLOv8n-seg model to run predictions on images. === "CLI" ```bash - yolo segment predict model=yolov8n-seg.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo segment predict model=yolo11n-seg.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo segment predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model ``` @@ -146,7 +140,7 @@ See full `predict` mode details in the [Predict](../modes/predict.md) page. ## Export -Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc. +Export a YOLO11n-seg model to a different format like ONNX, CoreML, etc. !!! example @@ -156,7 +150,7 @@ Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load an official model + model = YOLO("yolo11n-seg.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -166,11 +160,11 @@ Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc. === "CLI" ```bash - yolo export model=yolov8n-seg.pt format=onnx # export official model + yolo export model=yolo11n-seg.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` -Available YOLOv8-seg export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n-seg.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11-seg export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n-seg.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} @@ -178,9 +172,9 @@ See full `export` details in the [Export](../modes/export.md) page. ## FAQ -### How do I train a YOLOv8 segmentation model on a custom dataset? +### How do I train a YOLO11 segmentation model on a custom dataset? -To train a YOLOv8 segmentation model on a custom dataset, you first need to prepare your dataset in the YOLO segmentation format. You can use tools like [JSON2YOLO](https://github.com/ultralytics/JSON2YOLO) to convert datasets from other formats. Once your dataset is ready, you can train the model using Python or CLI commands: +To train a YOLO11 segmentation model on a custom dataset, you first need to prepare your dataset in the YOLO segmentation format. You can use tools like [JSON2YOLO](https://github.com/ultralytics/JSON2YOLO) to convert datasets from other formats. Once your dataset is ready, you can train the model using Python or CLI commands: !!! example @@ -189,8 +183,8 @@ To train a YOLOv8 segmentation model on a custom dataset, you first need to prep ```python from ultralytics import YOLO - # Load a pretrained YOLOv8 segment model - model = YOLO("yolov8n-seg.pt") + # Load a pretrained YOLO11 segment model + model = YOLO("yolo11n-seg.pt") # Train the model results = model.train(data="path/to/your_dataset.yaml", epochs=100, imgsz=640) @@ -199,22 +193,22 @@ To train a YOLOv8 segmentation model on a custom dataset, you first need to prep === "CLI" ```bash - yolo segment train data=path/to/your_dataset.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=path/to/your_dataset.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` Check the [Configuration](../usage/cfg.md) page for more available arguments. -### What is the difference between [object detection](https://www.ultralytics.com/glossary/object-detection) and instance segmentation in YOLOv8? +### What is the difference between [object detection](https://www.ultralytics.com/glossary/object-detection) and instance segmentation in YOLO11? -Object detection identifies and localizes objects within an image by drawing bounding boxes around them, whereas instance segmentation not only identifies the bounding boxes but also delineates the exact shape of each object. YOLOv8 instance segmentation models provide masks or contours that outline each detected object, which is particularly useful for tasks where knowing the precise shape of objects is important, such as medical imaging or autonomous driving. +Object detection identifies and localizes objects within an image by drawing bounding boxes around them, whereas instance segmentation not only identifies the bounding boxes but also delineates the exact shape of each object. YOLO11 instance segmentation models provide masks or contours that outline each detected object, which is particularly useful for tasks where knowing the precise shape of objects is important, such as medical imaging or autonomous driving. -### Why use YOLOv8 for instance segmentation? +### Why use YOLO11 for instance segmentation? -Ultralytics YOLOv8 is a state-of-the-art model recognized for its high accuracy and real-time performance, making it ideal for instance segmentation tasks. YOLOv8 Segment models come pretrained on the [COCO dataset](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml), ensuring robust performance across a variety of objects. Additionally, YOLOv8 supports training, validation, prediction, and export functionalities with seamless integration, making it highly versatile for both research and industry applications. +Ultralytics YOLO11 is a state-of-the-art model recognized for its high accuracy and real-time performance, making it ideal for instance segmentation tasks. YOLO11 Segment models come pretrained on the [COCO dataset](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml), ensuring robust performance across a variety of objects. Additionally, YOLO supports training, validation, prediction, and export functionalities with seamless integration, making it highly versatile for both research and industry applications. -### How do I load and validate a pretrained YOLOv8 segmentation model? +### How do I load and validate a pretrained YOLO segmentation model? -Loading and validating a pretrained YOLOv8 segmentation model is straightforward. Here's how you can do it using both Python and CLI: +Loading and validating a pretrained YOLO segmentation model is straightforward. Here's how you can do it using both Python and CLI: !!! example @@ -224,7 +218,7 @@ Loading and validating a pretrained YOLOv8 segmentation model is straightforward from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n-seg.pt") + model = YOLO("yolo11n-seg.pt") # Validate the model metrics = model.val() @@ -235,14 +229,14 @@ Loading and validating a pretrained YOLOv8 segmentation model is straightforward === "CLI" ```bash - yolo segment val model=yolov8n-seg.pt + yolo segment val model=yolo11n-seg.pt ``` These steps will provide you with validation metrics like [Mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP), crucial for assessing model performance. -### How can I export a YOLOv8 segmentation model to ONNX format? +### How can I export a YOLO segmentation model to ONNX format? -Exporting a YOLOv8 segmentation model to ONNX format is simple and can be done using Python or CLI commands: +Exporting a YOLO segmentation model to ONNX format is simple and can be done using Python or CLI commands: !!! example @@ -252,7 +246,7 @@ Exporting a YOLOv8 segmentation model to ONNX format is simple and can be done u from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n-seg.pt") + model = YOLO("yolo11n-seg.pt") # Export the model to ONNX format model.export(format="onnx") @@ -261,7 +255,7 @@ Exporting a YOLOv8 segmentation model to ONNX format is simple and can be done u === "CLI" ```bash - yolo export model=yolov8n-seg.pt format=onnx + yolo export model=yolo11n-seg.pt format=onnx ``` For more details on exporting to various formats, refer to the [Export](../modes/export.md) page. diff --git a/docs/en/usage/callbacks.md b/docs/en/usage/callbacks.md index 2886f8f512..16c4718786 100644 --- a/docs/en/usage/callbacks.md +++ b/docs/en/usage/callbacks.md @@ -1,7 +1,7 @@ --- comments: true description: Explore Ultralytics callbacks for training, validation, exporting, and prediction. Learn how to use and customize them for your ML models. -keywords: Ultralytics, callbacks, training, validation, export, prediction, ML models, YOLOv8, Python, machine learning +keywords: Ultralytics, callbacks, training, validation, export, prediction, ML models, YOLO11, Python, machine learning --- ## Callbacks @@ -16,7 +16,7 @@ Ultralytics framework supports callbacks as entry points in strategic stages of allowfullscreen>
- Watch: Mastering Ultralytics YOLOv8: Callbacks + Watch: Mastering Ultralytics YOLO: Callbacks

## Examples @@ -41,7 +41,7 @@ def on_predict_batch_end(predictor): # Create a YOLO model instance -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Add the custom callback to the model model.add_callback("on_predict_batch_end", on_predict_batch_end) @@ -119,7 +119,7 @@ def on_predict_batch_end(predictor): predictor.results = zip(predictor.results, image) -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") model.add_callback("on_predict_batch_end", on_predict_batch_end) for result, frame in model.predict(): pass @@ -141,7 +141,7 @@ def on_train_epoch_end(trainer): trainer.log({"additional_metric": additional_metric}) -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") model.add_callback("on_train_epoch_end", on_train_epoch_end) model.train(data="coco.yaml", epochs=10) ``` @@ -164,7 +164,7 @@ def on_val_end(validator): validator.log({"custom_metric": custom_metric}) -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") model.add_callback("on_val_end", on_val_end) model.val(data="coco.yaml") ``` @@ -187,7 +187,7 @@ def on_predict_end(predictor): log_prediction(result) -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") model.add_callback("on_predict_end", on_predict_end) results = model.predict(source="image.jpg") ``` @@ -215,7 +215,7 @@ def on_predict_batch_end(predictor): predictor.results = zip(predictor.results, image) -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") model.add_callback("on_predict_batch_end", on_predict_batch_end) for result, frame in model.predict(): pass diff --git a/docs/en/usage/cfg.md b/docs/en/usage/cfg.md index aecc5fc646..8f8ac6025f 100644 --- a/docs/en/usage/cfg.md +++ b/docs/en/usage/cfg.md @@ -14,7 +14,7 @@ YOLO settings and hyperparameters play a critical role in the model's performanc allowfullscreen>
- Watch: Mastering Ultralytics YOLOv8: Configuration + Watch: Mastering Ultralytics YOLO: Configuration

Ultralytics commands use the following syntax: @@ -32,8 +32,8 @@ Ultralytics commands use the following syntax: ```python from ultralytics import YOLO - # Load a YOLOv8 model from a pre-trained weights file - model = YOLO("yolov8n.pt") + # Load a YOLO11 model from a pre-trained weights file + model = YOLO("yolo11n.pt") # Run MODE mode using the custom arguments ARGS (guess TASK) model.MODE(ARGS) @@ -67,12 +67,12 @@ YOLO models can be used for a variety of tasks, including detection, segmentatio YOLO models can be used in different modes depending on the specific problem you are trying to solve. These modes include: -- **Train**: For training a YOLOv8 model on a custom dataset. -- **Val**: For validating a YOLOv8 model after it has been trained. -- **Predict**: For making predictions using a trained YOLOv8 model on new images or videos. -- **Export**: For exporting a YOLOv8 model to a format that can be used for deployment. -- **Track**: For tracking objects in real-time using a YOLOv8 model. -- **Benchmark**: For benchmarking YOLOv8 exports (ONNX, TensorRT, etc.) speed and accuracy. +- **Train**: For training a YOLO11 model on a custom dataset. +- **Val**: For validating a YOLO11 model after it has been trained. +- **Predict**: For making predictions using a trained YOLO11 model on new images or videos. +- **Export**: For exporting a YOLO11 model to a format that can be used for deployment. +- **Track**: For tracking objects in real-time using a YOLO11 model. +- **Benchmark**: For benchmarking YOLO11 exports (ONNX, TensorRT, etc.) speed and accuracy. | Argument | Default | Description | | -------- | --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | diff --git a/docs/en/usage/cli.md b/docs/en/usage/cli.md index d1d7c8de48..a2276f2e00 100644 --- a/docs/en/usage/cli.md +++ b/docs/en/usage/cli.md @@ -1,7 +1,7 @@ --- comments: true -description: Explore the YOLOv8 command line interface (CLI) for easy execution of detection tasks without needing a Python environment. -keywords: YOLOv8 CLI, command line interface, YOLOv8 commands, detection tasks, Ultralytics, model training, model prediction +description: Explore the YOLO11 command line interface (CLI) for easy execution of detection tasks without needing a Python environment. +keywords: YOLO11 CLI, command line interface, YOLO11 commands, detection tasks, Ultralytics, model training, model prediction --- # Command Line Interface Usage @@ -16,7 +16,7 @@ The YOLO command line interface (CLI) allows for simple single-line commands wit allowfullscreen>
- Watch: Mastering Ultralytics YOLOv8: CLI + Watch: Mastering Ultralytics YOLO: CLI

!!! example @@ -37,28 +37,28 @@ The YOLO command line interface (CLI) allows for simple single-line commands wit Train a detection model for 10 [epochs](https://www.ultralytics.com/glossary/epoch) with an initial learning_rate of 0.01 ```bash - yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 + yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` === "Predict" Predict a YouTube video using a pretrained segmentation model at image size 320: ```bash - yolo predict model=yolov8n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 + yolo predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 ``` === "Val" Val a pretrained detection model at batch-size 1 and image size 640: ```bash - yolo val model=yolov8n.pt data=coco8.yaml batch=1 imgsz=640 + yolo val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640 ``` === "Export" - Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) + Export a YOLO11n classification model to ONNX format at image size 224 by 128 (no TASK required) ```bash - yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + yolo export model=yolo11n-cls.pt format=onnx imgsz=224,128 ``` === "Special" @@ -75,7 +75,7 @@ The YOLO command line interface (CLI) allows for simple single-line commands wit Where: -- `TASK` (optional) is one of `[detect, segment, classify, pose, obb]`. If it is not passed explicitly YOLOv8 will try to guess the `TASK` from the model type. +- `TASK` (optional) is one of `[detect, segment, classify, pose, obb]`. If it is not passed explicitly YOLO11 will try to guess the `TASK` from the model type. - `MODE` (required) is one of `[train, val, predict, export, track, benchmark]` - `ARGS` (optional) are any number of custom `arg=value` pairs like `imgsz=320` that override defaults. For a full list of available `ARGS` see the [Configuration](cfg.md) page and `defaults.yaml` @@ -83,21 +83,21 @@ Where: Arguments must be passed as `arg=val` pairs, split by an equals `=` sign and delimited by spaces ` ` between pairs. Do not use `--` argument prefixes or commas `,` between arguments. - - `yolo predict model=yolov8n.pt imgsz=640 conf=0.25`   ✅ - - `yolo predict model yolov8n.pt imgsz 640 conf 0.25`   ❌ - - `yolo predict --model yolov8n.pt --imgsz 640 --conf 0.25`   ❌ + - `yolo predict model=yolo11n.pt imgsz=640 conf=0.25`   ✅ + - `yolo predict model yolo11n.pt imgsz 640 conf 0.25`   ❌ + - `yolo predict --model yolo11n.pt --imgsz 640 --conf 0.25`   ❌ ## Train -Train YOLOv8n on the COCO8 dataset for 100 epochs at image size 640. For a full list of available arguments see the [Configuration](cfg.md) page. +Train YOLO11n on the COCO8 dataset for 100 epochs at image size 640. For a full list of available arguments see the [Configuration](cfg.md) page. !!! example === "Train" - Start training YOLOv8n on COCO8 for 100 epochs at image-size 640. + Start training YOLO11n on COCO8 for 100 epochs at image-size 640. ```bash - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` === "Resume" @@ -109,15 +109,15 @@ Train YOLOv8n on the COCO8 dataset for 100 epochs at image size 640. For a full ## Val -Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example === "Official" - Validate an official YOLOv8n model. + Validate an official YOLO11n model. ```bash - yolo detect val model=yolov8n.pt + yolo detect val model=yolo11n.pt ``` === "Custom" @@ -129,15 +129,15 @@ Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/a ## Predict -Use a trained YOLOv8n model to run predictions on images. +Use a trained YOLO11n model to run predictions on images. !!! example === "Official" - Predict with an official YOLOv8n model. + Predict with an official YOLO11n model. ```bash - yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' + yolo detect predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg' ``` === "Custom" @@ -149,15 +149,15 @@ Use a trained YOLOv8n model to run predictions on images. ## Export -Export a YOLOv8n model to a different format like ONNX, CoreML, etc. +Export a YOLO11n model to a different format like ONNX, CoreML, etc. !!! example === "Official" - Export an official YOLOv8n model to ONNX format. + Export an official YOLO11n model to ONNX format. ```bash - yolo export model=yolov8n.pt format=onnx + yolo export model=yolo11n.pt format=onnx ``` === "Custom" @@ -167,7 +167,7 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc. yolo export model=path/to/best.pt format=onnx ``` -Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. +Available YOLO11 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. {% include "macros/export-table.md" %} @@ -183,21 +183,21 @@ Default arguments can be overridden by simply passing them as arguments in the C Train a detection model for `10 epochs` with `learning_rate` of `0.01` ```bash - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` === "Predict" Predict a YouTube video using a pretrained segmentation model at image size 320: ```bash - yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 + yolo segment predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 ``` === "Val" Validate a pretrained detection model at batch-size 1 and image size 640: ```bash - yolo detect val model=yolov8n.pt data=coco8.yaml batch=1 imgsz=640 + yolo detect val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640 ``` ## Overriding default config file @@ -219,19 +219,19 @@ This will create `default_copy.yaml`, which you can then pass as `cfg=default_co ## FAQ -### How do I use the Ultralytics YOLOv8 command line interface (CLI) for model training? +### How do I use the Ultralytics YOLO11 command line interface (CLI) for model training? -To train a YOLOv8 model using the CLI, you can execute a simple one-line command in the terminal. For example, to train a detection model for 10 epochs with a [learning rate](https://www.ultralytics.com/glossary/learning-rate) of 0.01, you would run: +To train a YOLO11 model using the CLI, you can execute a simple one-line command in the terminal. For example, to train a detection model for 10 epochs with a [learning rate](https://www.ultralytics.com/glossary/learning-rate) of 0.01, you would run: ```bash -yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 +yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` This command uses the `train` mode with specific arguments. Refer to the full list of available arguments in the [Configuration Guide](cfg.md). -### What tasks can I perform with the Ultralytics YOLOv8 CLI? +### What tasks can I perform with the Ultralytics YOLO11 CLI? -The Ultralytics YOLOv8 CLI supports a variety of tasks including detection, segmentation, classification, validation, prediction, export, and tracking. For instance: +The Ultralytics YOLO11 CLI supports a variety of tasks including detection, segmentation, classification, validation, prediction, export, and tracking. For instance: - **Train a Model**: Run `yolo train data= model= epochs=`. - **Run Predictions**: Use `yolo predict model= source= imgsz=`. @@ -239,32 +239,32 @@ The Ultralytics YOLOv8 CLI supports a variety of tasks including detection, segm Each task can be customized with various arguments. For detailed syntax and examples, see the respective sections like [Train](#train), [Predict](#predict), and [Export](#export). -### How can I validate the accuracy of a trained YOLOv8 model using the CLI? +### How can I validate the accuracy of a trained YOLO11 model using the CLI? -To validate a YOLOv8 model's accuracy, use the `val` mode. For example, to validate a pretrained detection model with a [batch size](https://www.ultralytics.com/glossary/batch-size) of 1 and image size of 640, run: +To validate a YOLO11 model's accuracy, use the `val` mode. For example, to validate a pretrained detection model with a [batch size](https://www.ultralytics.com/glossary/batch-size) of 1 and image size of 640, run: ```bash -yolo val model=yolov8n.pt data=coco8.yaml batch=1 imgsz=640 +yolo val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640 ``` This command evaluates the model on the specified dataset and provides performance metrics. For more details, refer to the [Val](#val) section. -### What formats can I export my YOLOv8 models to using the CLI? +### What formats can I export my YOLO11 models to using the CLI? -YOLOv8 models can be exported to various formats such as ONNX, CoreML, TensorRT, and more. For instance, to export a model to ONNX format, run: +YOLO11 models can be exported to various formats such as ONNX, CoreML, TensorRT, and more. For instance, to export a model to ONNX format, run: ```bash -yolo export model=yolov8n.pt format=onnx +yolo export model=yolo11n.pt format=onnx ``` For complete details, visit the [Export](../modes/export.md) page. -### How do I customize YOLOv8 CLI commands to override default arguments? +### How do I customize YOLO11 CLI commands to override default arguments? -To override default arguments in YOLOv8 CLI commands, pass them as `arg=value` pairs. For example, to train a model with custom arguments, use: +To override default arguments in YOLO11 CLI commands, pass them as `arg=value` pairs. For example, to train a model with custom arguments, use: ```bash -yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 +yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` For a full list of available arguments and their descriptions, refer to the [Configuration Guide](cfg.md). Ensure arguments are formatted correctly, as shown in the [Overriding default arguments](#overriding-default-arguments) section. diff --git a/docs/en/usage/engine.md b/docs/en/usage/engine.md index dc44047ff7..d5d807c995 100644 --- a/docs/en/usage/engine.md +++ b/docs/en/usage/engine.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn to customize the YOLOv8 Trainer for specific tasks. Step-by-step instructions with Python examples for maximum model performance. -keywords: Ultralytics, YOLOv8, Trainer Customization, Python, Machine Learning, AI, Model Training, DetectionTrainer, Custom Models +description: Learn to customize the YOLO11 Trainer for specific tasks. Step-by-step instructions with Python examples for maximum model performance. +keywords: Ultralytics, YOLO11, Trainer Customization, Python, Machine Learning, AI, Model Training, DetectionTrainer, Custom Models --- Both the Ultralytics YOLO command-line and Python interfaces are simply a high-level abstraction on the base engine executors. Let's take a look at the Trainer engine. @@ -14,7 +14,7 @@ Both the Ultralytics YOLO command-line and Python interfaces are simply a high-l allowfullscreen>
- Watch: Mastering Ultralytics YOLOv8: Advanced Customization + Watch: Mastering Ultralytics YOLO: Advanced Customization

## BaseTrainer @@ -26,7 +26,7 @@ BaseTrainer contains the generic boilerplate training routine. It can be customi ## DetectionTrainer -Here's how you can use the YOLOv8 `DetectionTrainer` and customize it. +Here's how you can use the YOLO11 `DetectionTrainer` and customize it. ```python from ultralytics.models.yolo.detect import DetectionTrainer @@ -96,9 +96,9 @@ There are other components that can be customized similarly like `Validators` an ## FAQ -### How do I customize the Ultralytics YOLOv8 DetectionTrainer for specific tasks? +### How do I customize the Ultralytics YOLO11 DetectionTrainer for specific tasks? -To customize the Ultralytics YOLOv8 `DetectionTrainer` for a specific task, you can override its methods to adapt to your custom model and dataloader. Start by inheriting from `DetectionTrainer` and then redefine methods like `get_model` to implement your custom functionalities. Here's an example: +To customize the Ultralytics YOLO11 `DetectionTrainer` for a specific task, you can override its methods to adapt to your custom model and dataloader. Start by inheriting from `DetectionTrainer` and then redefine methods like `get_model` to implement your custom functionalities. Here's an example: ```python from ultralytics.models.yolo.detect import DetectionTrainer @@ -117,18 +117,18 @@ trained_model = trainer.best # get best model For further customization like changing the `loss function` or adding a `callback`, you can reference our [Callbacks Guide](../usage/callbacks.md). -### What are the key components of the BaseTrainer in Ultralytics YOLOv8? +### What are the key components of the BaseTrainer in Ultralytics YOLO11? -The `BaseTrainer` in Ultralytics YOLOv8 serves as the foundation for training routines and can be customized for various tasks by overriding its generic methods. Key components include: +The `BaseTrainer` in Ultralytics YOLO11 serves as the foundation for training routines and can be customized for various tasks by overriding its generic methods. Key components include: - `get_model(cfg, weights)` to build the model to be trained. - `get_dataloader()` to build the dataloader. For more details on the customization and source code, see the [`BaseTrainer` Reference](../reference/engine/trainer.md). -### How can I add a callback to the Ultralytics YOLOv8 DetectionTrainer? +### How can I add a callback to the Ultralytics YOLO11 DetectionTrainer? -You can add callbacks to monitor and modify the training process in Ultralytics YOLOv8 `DetectionTrainer`. For instance, here's how you can add a callback to log model weights after every training [epoch](https://www.ultralytics.com/glossary/epoch): +You can add callbacks to monitor and modify the training process in Ultralytics YOLO11 `DetectionTrainer`. For instance, here's how you can add a callback to log model weights after every training [epoch](https://www.ultralytics.com/glossary/epoch): ```python from ultralytics.models.yolo.detect import DetectionTrainer @@ -148,19 +148,19 @@ trainer.train() For further details on callback events and entry points, refer to our [Callbacks Guide](../usage/callbacks.md). -### Why should I use Ultralytics YOLOv8 for model training? +### Why should I use Ultralytics YOLO11 for model training? -Ultralytics YOLOv8 offers a high-level abstraction on powerful engine executors, making it ideal for rapid development and customization. Key benefits include: +Ultralytics YOLO11 offers a high-level abstraction on powerful engine executors, making it ideal for rapid development and customization. Key benefits include: - **Ease of Use**: Both command-line and Python interfaces simplify complex tasks. - **Performance**: Optimized for real-time [object detection](https://www.ultralytics.com/glossary/object-detection) and various vision AI applications. - **Customization**: Easily extendable for custom models, [loss functions](https://www.ultralytics.com/glossary/loss-function), and dataloaders. -Learn more about YOLOv8's capabilities by visiting [Ultralytics YOLO](https://www.ultralytics.com/yolo). +Learn more about YOLO11's capabilities by visiting [Ultralytics YOLO](https://www.ultralytics.com/yolo). -### Can I use the Ultralytics YOLOv8 DetectionTrainer for non-standard models? +### Can I use the Ultralytics YOLO11 DetectionTrainer for non-standard models? -Yes, Ultralytics YOLOv8 `DetectionTrainer` is highly flexible and can be customized for non-standard models. By inheriting from `DetectionTrainer`, you can overload different methods to support your specific model's needs. Here's a simple example: +Yes, Ultralytics YOLO11 `DetectionTrainer` is highly flexible and can be customized for non-standard models. By inheriting from `DetectionTrainer`, you can overload different methods to support your specific model's needs. Here's a simple example: ```python from ultralytics.models.yolo.detect import DetectionTrainer diff --git a/docs/en/usage/python.md b/docs/en/usage/python.md index 5236af0a1d..af0546f434 100644 --- a/docs/en/usage/python.md +++ b/docs/en/usage/python.md @@ -1,12 +1,12 @@ --- comments: true -description: Learn to integrate YOLOv8 in Python for object detection, segmentation, and classification. Load, train models, and make predictions easily with our comprehensive guide. -keywords: YOLOv8, Python, object detection, segmentation, classification, machine learning, AI, pretrained models, train models, make predictions +description: Learn to integrate YOLO11 in Python for object detection, segmentation, and classification. Load, train models, and make predictions easily with our comprehensive guide. +keywords: YOLO11, Python, object detection, segmentation, classification, machine learning, AI, pretrained models, train models, make predictions --- # Python Usage -Welcome to the YOLOv8 Python Usage documentation! This guide is designed to help you seamlessly integrate YOLOv8 into your Python projects for [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification. Here, you'll learn how to load and use pretrained models, train new models, and perform predictions on images. The easy-to-use Python interface is a valuable resource for anyone looking to incorporate YOLOv8 into their Python projects, allowing you to quickly implement advanced object detection capabilities. Let's get started! +Welcome to the YOLO11 Python Usage documentation! This guide is designed to help you seamlessly integrate YOLO11 into your Python projects for [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification. Here, you'll learn how to load and use pretrained models, train new models, and perform predictions on images. The easy-to-use Python interface is a valuable resource for anyone looking to incorporate YOLO11 into their Python projects, allowing you to quickly implement advanced object detection capabilities. Let's get started!


@@ -16,7 +16,7 @@ Welcome to the YOLOv8 Python Usage documentation! This guide is designed to help allowfullscreen>
- Watch: Mastering Ultralytics YOLOv8: Python + Watch: Mastering Ultralytics YOLO11: Python

For example, users can load a model, train it, evaluate its performance on a validation set, and even export it to ONNX format with just a few lines of code. @@ -27,10 +27,10 @@ For example, users can load a model, train it, evaluate its performance on a val from ultralytics import YOLO # Create a new YOLO model from scratch - model = YOLO("yolov8n.yaml") + model = YOLO("yolo11n.yaml") # Load a pretrained YOLO model (recommended for training) - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model using the 'coco8.yaml' dataset for 3 epochs results = model.train(data="coco8.yaml", epochs=3) @@ -47,7 +47,7 @@ For example, users can load a model, train it, evaluate its performance on a val ## [Train](../modes/train.md) -Train mode is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can accurately predict the classes and locations of objects in an image. +Train mode is used for training a YOLO11 model on a custom dataset. In this mode, the model is trained using the specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can accurately predict the classes and locations of objects in an image. !!! example "Train" @@ -56,7 +56,7 @@ Train mode is used for training a YOLOv8 model on a custom dataset. In this mode ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") # pass any model type + model = YOLO("yolo11n.pt") # pass any model type results = model.train(epochs=5) ``` @@ -65,7 +65,7 @@ Train mode is used for training a YOLOv8 model on a custom dataset. In this mode ```python from ultralytics import YOLO - model = YOLO("yolov8n.yaml") + model = YOLO("yolo11n.yaml") results = model.train(data="coco8.yaml", epochs=5) ``` @@ -80,7 +80,7 @@ Train mode is used for training a YOLOv8 model on a custom dataset. In this mode ## [Val](../modes/val.md) -Val mode is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a validation set to measure its [accuracy](https://www.ultralytics.com/glossary/accuracy) and generalization performance. This mode can be used to tune the hyperparameters of the model to improve its performance. +Val mode is used for validating a YOLO11 model after it has been trained. In this mode, the model is evaluated on a validation set to measure its [accuracy](https://www.ultralytics.com/glossary/accuracy) and generalization performance. This mode can be used to tune the hyperparameters of the model to improve its performance. !!! example "Val" @@ -89,8 +89,8 @@ Val mode is used for validating a YOLOv8 model after it has been trained. In thi ```python from ultralytics import YOLO - # Load a YOLOv8 model - model = YOLO("yolov8n.yaml") + # Load a YOLO11 model + model = YOLO("yolo11n.yaml") # Train the model model.train(data="coco8.yaml", epochs=5) @@ -104,8 +104,8 @@ Val mode is used for validating a YOLOv8 model after it has been trained. In thi ```python from ultralytics import YOLO - # Load a YOLOv8 model - model = YOLO("yolov8n.yaml") + # Load a YOLO11 model + model = YOLO("yolo11n.yaml") # Train the model model.train(data="coco8.yaml", epochs=5) @@ -118,7 +118,7 @@ Val mode is used for validating a YOLOv8 model after it has been trained. In thi ## [Predict](../modes/predict.md) -Predict mode is used for making predictions using a trained YOLOv8 model on new images or videos. In this mode, the model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model predicts the classes and locations of objects in the input images or videos. +Predict mode is used for making predictions using a trained YOLO11 model on new images or videos. In this mode, the model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model predicts the classes and locations of objects in the input images or videos. !!! example "Predict" @@ -189,27 +189,27 @@ Predict mode is used for making predictions using a trained YOLOv8 model on new ## [Export](../modes/export.md) -Export mode is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the model is converted to a format that can be used by other software applications or hardware devices. This mode is useful when deploying the model to production environments. +Export mode is used for exporting a YOLO11 model to a format that can be used for deployment. In this mode, the model is converted to a format that can be used by other software applications or hardware devices. This mode is useful when deploying the model to production environments. !!! example "Export" === "Export to ONNX" - Export an official YOLOv8n model to ONNX with dynamic batch-size and image-size. + Export an official YOLO11n model to ONNX with dynamic batch-size and image-size. ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") model.export(format="onnx", dynamic=True) ``` === "Export to TensorRT" - Export an official YOLOv8n model to TensorRT on `device=0` for acceleration on CUDA devices. + Export an official YOLO11n model to TensorRT on `device=0` for acceleration on CUDA devices. ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") model.export(format="onnx", device=0) ``` @@ -217,7 +217,7 @@ Export mode is used for exporting a YOLOv8 model to a format that can be used fo ## [Track](../modes/track.md) -Track mode is used for tracking objects in real-time using a YOLOv8 model. In this mode, the model is loaded from a checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful for applications such as surveillance systems or self-driving cars. +Track mode is used for tracking objects in real-time using a YOLO11 model. In this mode, the model is loaded from a checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful for applications such as surveillance systems or self-driving cars. !!! example "Track" @@ -227,8 +227,8 @@ Track mode is used for tracking objects in real-time using a YOLOv8 model. In th from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official detection model - model = YOLO("yolov8n-seg.pt") # load an official segmentation model + model = YOLO("yolo11n.pt") # load an official detection model + model = YOLO("yolo11n-seg.pt") # load an official segmentation model model = YOLO("path/to/best.pt") # load a custom model # Track with the model @@ -240,18 +240,18 @@ Track mode is used for tracking objects in real-time using a YOLOv8 model. In th ## [Benchmark](../modes/benchmark.md) -Benchmark mode is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks provide information on the size of the exported format, its `mAP50-95` metrics (for object detection and segmentation) or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for their specific use case based on their requirements for speed and accuracy. +Benchmark mode is used to profile the speed and accuracy of various export formats for YOLO11. The benchmarks provide information on the size of the exported format, its `mAP50-95` metrics (for object detection and segmentation) or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for their specific use case based on their requirements for speed and accuracy. !!! example "Benchmark" === "Python" - Benchmark an official YOLOv8n model across all export formats. + Benchmark an official YOLO11n model across all export formats. ```python from ultralytics.utils.benchmarks import benchmark # Benchmark - benchmark(model="yolov8n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) + benchmark(model="yolo11n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) ``` [Benchmark Examples](../modes/benchmark.md){ .md-button } @@ -268,7 +268,7 @@ Explorer API can be used to explore datasets with advanced semantic, vector-simi from ultralytics import Explorer # create an Explorer object - exp = Explorer(data="coco8.yaml", model="yolov8n.pt") + exp = Explorer(data="coco8.yaml", model="yolo11n.pt") exp.create_embeddings_table() similar = exp.get_similar(img="https://ultralytics.com/images/bus.jpg", limit=10) @@ -287,7 +287,7 @@ Explorer API can be used to explore datasets with advanced semantic, vector-simi from ultralytics import Explorer # create an Explorer object - exp = Explorer(data="coco8.yaml", model="yolov8n.pt") + exp = Explorer(data="coco8.yaml", model="yolo11n.pt") exp.create_embeddings_table() similar = exp.get_similar(idx=1, limit=10) @@ -306,26 +306,26 @@ Explorer API can be used to explore datasets with advanced semantic, vector-simi !!! tip "Detection Trainer Example" - ```python - from ultralytics.models.yolo import DetectionPredictor, DetectionTrainer, DetectionValidator + ```python + from ultralytics.models.yolo import DetectionPredictor, DetectionTrainer, DetectionValidator - # trainer - trainer = DetectionTrainer(overrides={}) - trainer.train() - trained_model = trainer.best + # trainer + trainer = DetectionTrainer(overrides={}) + trainer.train() + trained_model = trainer.best - # Validator - val = DetectionValidator(args=...) - val(model=trained_model) + # Validator + val = DetectionValidator(args=...) + val(model=trained_model) - # predictor - pred = DetectionPredictor(overrides={}) - pred(source=SOURCE, model=trained_model) + # predictor + pred = DetectionPredictor(overrides={}) + pred(source=SOURCE, model=trained_model) - # resume from last weight - overrides["resume"] = trainer.last - trainer = detect.DetectionTrainer(overrides=overrides) - ``` + # resume from last weight + overrides["resume"] = trainer.last + trainer = detect.DetectionTrainer(overrides=overrides) + ``` You can easily customize Trainers to support custom tasks or explore R&D ideas. Learn more about Customizing `Trainers`, `Validators` and `Predictors` to suit your project needs in the Customization Section. @@ -333,15 +333,15 @@ You can easily customize Trainers to support custom tasks or explore R&D ideas. ## FAQ -### How can I integrate YOLOv8 into my Python project for object detection? +### How can I integrate YOLO11 into my Python project for object detection? -Integrating Ultralytics YOLOv8 into your Python projects is simple. You can load a pre-trained model or train a new model from scratch. Here's how to get started: +Integrating Ultralytics YOLO11 into your Python projects is simple. You can load a pre-trained model or train a new model from scratch. Here's how to get started: ```python from ultralytics import YOLO # Load a pretrained YOLO model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Perform object detection on an image results = model("https://ultralytics.com/images/bus.jpg") @@ -353,9 +353,9 @@ for result in results: See more detailed examples in our [Predict Mode](../modes/predict.md) section. -### What are the different modes available in YOLOv8? +### What are the different modes available in YOLO11? -Ultralytics YOLOv8 provides various modes to cater to different [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) workflows. These include: +Ultralytics YOLO11 provides various modes to cater to different [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) workflows. These include: - **[Train](../modes/train.md)**: Train a model using custom datasets. - **[Val](../modes/val.md)**: Validate model performance on a validation set. @@ -366,15 +366,15 @@ Ultralytics YOLOv8 provides various modes to cater to different [machine learnin Each mode is designed to provide comprehensive functionalities for different stages of model development and deployment. -### How do I train a custom YOLOv8 model using my dataset? +### How do I train a custom YOLO11 model using my dataset? -To train a custom YOLOv8 model, you need to specify your dataset and other hyperparameters. Here's a quick example: +To train a custom YOLO11 model, you need to specify your dataset and other hyperparameters. Here's a quick example: ```python from ultralytics import YOLO # Load the YOLO model -model = YOLO("yolov8n.yaml") +model = YOLO("yolo11n.yaml") # Train the model with custom dataset model.train(data="path/to/your/dataset.yaml", epochs=10) @@ -382,15 +382,15 @@ model.train(data="path/to/your/dataset.yaml", epochs=10) For more details on training and hyperlinks to example usage, visit our [Train Mode](../modes/train.md) page. -### How do I export YOLOv8 models for deployment? +### How do I export YOLO11 models for deployment? -Exporting YOLOv8 models in a format suitable for deployment is straightforward with the `export` function. For example, you can export a model to ONNX format: +Exporting YOLO11 models in a format suitable for deployment is straightforward with the `export` function. For example, you can export a model to ONNX format: ```python from ultralytics import YOLO # Load the YOLO model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Export the model to ONNX format model.export(format="onnx") @@ -398,15 +398,15 @@ model.export(format="onnx") For various export options, refer to the [Export Mode](../modes/export.md) documentation. -### Can I validate my YOLOv8 model on different datasets? +### Can I validate my YOLO11 model on different datasets? -Yes, validating YOLOv8 models on different datasets is possible. After training, you can use the validation mode to evaluate the performance: +Yes, validating YOLO11 models on different datasets is possible. After training, you can use the validation mode to evaluate the performance: ```python from ultralytics import YOLO -# Load a YOLOv8 model -model = YOLO("yolov8n.yaml") +# Load a YOLO11 model +model = YOLO("yolo11n.yaml") # Train the model model.train(data="coco8.yaml", epochs=5) diff --git a/docs/en/usage/simple-utilities.md b/docs/en/usage/simple-utilities.md index e40a2478ee..0a947adaf1 100644 --- a/docs/en/usage/simple-utilities.md +++ b/docs/en/usage/simple-utilities.md @@ -38,7 +38,7 @@ from ultralytics.data.annotator import auto_annotate auto_annotate( # (1)! data="path/to/new/data", - det_model="yolov8n.pt", + det_model="yolo11n.pt", sam_model="mobile_sam.pt", device="cuda", output_dir="path/to/save_labels", @@ -93,7 +93,7 @@ from ultralytics.utils.plotting import Annotator from ultralytics import YOLO import cv2 -model = YOLO('yolov8n.pt') # Load pretrain or fine-tune model +model = YOLO('yolo11n.pt') # Load pretrain or fine-tune model # Process the image source = cv2.imread('path/to/image.jpg') @@ -468,7 +468,7 @@ import cv2 from ultralytics import YOLO from ultralytics.utils.plotting import Annotator -model = YOLO("yolov8s.pt") +model = YOLO("yolo11s.pt") names = model.names cap = cv2.VideoCapture("path/to/video/file.mp4") @@ -507,7 +507,7 @@ import cv2 from ultralytics import YOLO from ultralytics.utils.plotting import Annotator -model = YOLO("yolov8s.pt") +model = YOLO("yolo11s.pt") names = model.names cap = cv2.VideoCapture("path/to/video/file.mp4") @@ -598,7 +598,7 @@ from ultralytics.data.annotator import auto_annotate auto_annotate( data="path/to/new/data", - det_model="yolov8n.pt", + det_model="yolo11n.pt", sam_model="mobile_sam.pt", device="cuda", output_dir="path/to/save_labels", diff --git a/docs/en/yolov5/environments/google_cloud_quickstart_tutorial.md b/docs/en/yolov5/environments/google_cloud_quickstart_tutorial.md index 65ad9c3d62..69e2fa80a8 100644 --- a/docs/en/yolov5/environments/google_cloud_quickstart_tutorial.md +++ b/docs/en/yolov5/environments/google_cloud_quickstart_tutorial.md @@ -18,7 +18,7 @@ Let's begin by creating a virtual machine that's tuned for deep learning: 1. Head over to the [GCP marketplace](https://console.cloud.google.com/marketplace/details/click-to-deploy-images/deeplearning) and select the **Deep Learning VM**. 2. Opt for a **n1-standard-8** instance; it offers a balance of 8 vCPUs and 30 GB of memory, ideally suited for our needs. -3. Next, select a GPU. This depends on your workload; even a basic one like the Tesla T4 will markedly accelerate your model training. +3. Next, select a GPU. This depends on your workload; even a basic one like the T4 will markedly accelerate your model training. 4. Tick the box for 'Install NVIDIA GPU driver automatically on first startup?' for hassle-free setup. 5. Allocate a 300 GB SSD Persistent Disk to ensure you don't bottleneck on I/O operations. 6. Hit 'Deploy' and let GCP do its magic in provisioning your custom Deep Learning VM. diff --git a/docs/mkdocs_github_authors.yaml b/docs/mkdocs_github_authors.yaml index 31526516ed..4f7f3b3a38 100644 --- a/docs/mkdocs_github_authors.yaml +++ b/docs/mkdocs_github_authors.yaml @@ -1,6 +1,9 @@ 116908874+jk4e@users.noreply.github.com: avatar: https://avatars.githubusercontent.com/u/116908874?v=4 username: jk4e +1185102784@qq.com: + avatar: null + username: null 130829914+IvorZhu331@users.noreply.github.com: avatar: https://avatars.githubusercontent.com/u/130829914?v=4 username: IvorZhu331 @@ -133,6 +136,9 @@ sometimesocrazy@gmail.com: stormsson@users.noreply.github.com: avatar: https://avatars.githubusercontent.com/u/1133032?v=4 username: stormsson +waxmann.sergiu@me.com: + avatar: https://avatars.githubusercontent.com/u/47978446?v=4 + username: sergiuwaxmann web@ultralytics.com: avatar: https://avatars.githubusercontent.com/u/135830346?v=4 username: UltralyticsAssistant diff --git a/examples/README.md b/examples/README.md index 931bdc634c..22da53f294 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,6 +1,6 @@ -## Ultralytics YOLOv8 Example Applications +## Ultralytics Examples -This repository features a collection of real-world applications and walkthroughs, provided as either Python files or notebooks. Explore the examples below to see how YOLOv8 can be integrated into various applications. +This directory features a collection of real-world applications and walkthroughs, provided as either Python files or notebooks. Explore the examples below to see how YOLO can be integrated into various applications. ### Ultralytics YOLO Example Applications @@ -20,6 +20,7 @@ This repository features a collection of real-world applications and walkthrough | [YOLOv8 LibTorch CPP](./YOLOv8-LibTorch-CPP-Inference) | C++/LibTorch | [Myyura](https://github.com/Myyura) | | [YOLOv8 OpenCV INT8 TFLite Python](./YOLOv8-OpenCV-int8-tflite-Python) | Python | [Wamiq Raza](https://github.com/wamiqraza) | | [YOLOv8 All Tasks ONNXRuntime Rust](./YOLOv8-ONNXRuntime-Rust) | Rust/ONNXRuntime | [jamjamjon](https://github.com/jamjamjon) | +| [YOLOv8 OpenVINO CPP](./YOLOv8-OpenVINO-CPP-Inference) | C++/OpenVINO | [Erlangga Yudi Pradana](https://github.com/rlggyp) | ### How to Contribute diff --git a/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp b/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp index 2ee993eed7..a65391f5d7 100644 --- a/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp +++ b/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp @@ -238,9 +238,9 @@ char* YOLO_V8::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std:: rawData = cv::Mat(signalResultNum, strideNum, CV_16F, output); rawData.convertTo(rawData, CV_32F); } - //Note: - //ultralytics add transpose operator to the output of yolov8 model.which make yolov8/v5/v7 has same shape - //https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt + // Note: + // ultralytics add transpose operator to the output of yolov8 model.which make yolov8/v5/v7 has same shape + // https://github.com/ultralytics/assets/releases/download/v8.3.0/yolov8n.pt rawData = rawData.t(); float* data = (float*)rawData.data; diff --git a/examples/heatmaps.ipynb b/examples/heatmaps.ipynb index 1f590b8cb0..6ebf179b21 100644 --- a/examples/heatmaps.ipynb +++ b/examples/heatmaps.ipynb @@ -19,11 +19,11 @@ " \"Open\n", " \"Discord\"\n", "\n", - "Welcome to the Ultralytics YOLOv8 🚀 notebook! YOLOv8 is the latest version of the YOLO (You Only Look Once) AI models developed by Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and understand its features and capabilities.\n", + "Welcome to the Ultralytics YOLO11 🚀 notebook! YOLO11 is the latest version of the YOLO (You Only Look Once) AI models developed by Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLO11 and understand its features and capabilities.\n", "\n", - "YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n", + "YOLO11 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n", "\n", - "We hope that the resources in this notebook will help you get the most out of YOLOv8. Please browse the YOLOv8 Heatmap Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", + "We hope that the resources in this notebook will help you get the most out of YOLO11. Please browse the YOLO11 Heatmap Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "
" ] @@ -56,7 +56,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Ultralytics YOLOv8.2.17 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", + "Ultralytics 8.2.17 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (T4, 15102MiB)\n", "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 29.8/78.2 GB disk)\n" ] } @@ -76,14 +76,14 @@ "source": [ "# Introduction to Heatmaps\n", "\n", - "A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) transforms complex data into a vibrant, color-coded matrix. This visual tool employs a spectrum of colors to represent varying data values, where warmer hues indicate higher intensities and cooler tones signify lower values. Heatmaps excel in visualizing intricate data patterns, correlations, and anomalies, offering an accessible and engaging approach to data interpretation across diverse domains.\n", + "A heatmap generated with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) transforms complex data into a vibrant, color-coded matrix. This visual tool employs a spectrum of colors to represent varying data values, where warmer hues indicate higher intensities and cooler tones signify lower values. Heatmaps excel in visualizing intricate data patterns, correlations, and anomalies, offering an accessible and engaging approach to data interpretation across diverse domains.\n", "\n", "## Real World Applications\n", "\n", "| Transportation | Retail |\n", "|:-----------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------:|\n", - "| ![Ultralytics YOLOv8 Transportation Heatmap](https://github.com/RizwanMunawar/ultralytics/assets/62513924/288d7053-622b-4452-b4e4-1f41aeb764aa) | ![Ultralytics YOLOv8 Retail Heatmap](https://github.com/RizwanMunawar/ultralytics/assets/62513924/edef75ad-50a7-4c0a-be4a-a66cdfc12802) |\n", - "| Ultralytics YOLOv8 Transportation Heatmap | Ultralytics YOLOv8 Retail Heatmap |\n" + "| ![Ultralytics YOLO11 Transportation Heatmap](https://github.com/RizwanMunawar/ultralytics/assets/62513924/288d7053-622b-4452-b4e4-1f41aeb764aa) | ![Ultralytics YOLO11 Retail Heatmap](https://github.com/RizwanMunawar/ultralytics/assets/62513924/edef75ad-50a7-4c0a-be4a-a66cdfc12802) |\n", + "| Ultralytics YOLO11 Transportation Heatmap | Ultralytics YOLO11 Retail Heatmap |\n" ] }, { @@ -99,7 +99,7 @@ "from ultralytics import YOLO, solutions\n", "\n", "# Load YOLO model\n", - "model = YOLO(\"yolov8n.pt\")\n", + "model = YOLO(\"yolo11n.pt\")\n", "\n", "# Open video file\n", "cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n", @@ -161,15 +161,15 @@ "- [About Us](https://ultralytics.com/about): Discover our mission, vision, and the story behind Ultralytics.\n", "- [Join Our Team](https://ultralytics.com/work): Explore career opportunities and join our team of talented professionals.\n", "\n", - "## YOLOv8 🚀 Resources\n", + "## YOLO11 🚀 Resources\n", "\n", - "YOLOv8 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLOv8:\n", + "YOLO11 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLO11:\n", "\n", - "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLOv8 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n", - "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLOv8, including installation guides, tutorials, and detailed API references.\n", + "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLO11 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n", + "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLO11, including installation guides, tutorials, and detailed API references.\n", "- [Discord](https://ultralytics.com/discord): Join our Discord community to connect with other users, share your projects, and get help from the Ultralytics team.\n", "\n", - "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLOv8. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed." + "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLO11. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed." ] } ], diff --git a/examples/hub.ipynb b/examples/hub.ipynb index ee6f3cae60..03382596ce 100644 --- a/examples/hub.ipynb +++ b/examples/hub.ipynb @@ -54,7 +54,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Ultralytics YOLOv8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", + "Ultralytics 8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (T4, 15102MiB)\n", "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 28.8/78.2 GB disk)\n" ] } diff --git a/examples/object_counting.ipynb b/examples/object_counting.ipynb index 8c3d0ba6e8..8356d592d2 100644 --- a/examples/object_counting.ipynb +++ b/examples/object_counting.ipynb @@ -19,11 +19,11 @@ " \"Open\n", " \"Discord\"\n", "\n", - "Welcome to the Ultralytics YOLOv8 🚀 notebook! YOLOv8 is the latest version of the YOLO (You Only Look Once) AI models developed by Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and understand its features and capabilities.\n", + "Welcome to the Ultralytics YOLO11 🚀 notebook! YOLO11 is the latest version of the YOLO (You Only Look Once) AI models developed by Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLO11 and understand its features and capabilities.\n", "\n", - "YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n", + "YOLO11 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n", "\n", - "We hope that the resources in this notebook will help you get the most out of YOLOv8. Please browse the YOLOv8 Object Counting Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", + "We hope that the resources in this notebook will help you get the most out of YOLO11. Please browse the YOLO11 Object Counting Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "
" ] @@ -56,7 +56,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Ultralytics YOLOv8.2.18 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", + "Ultralytics 8.2.18 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (T4, 15102MiB)\n", "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 29.8/78.2 GB disk)\n" ] } @@ -74,11 +74,11 @@ "id": "m7VkxQ2aeg7k" }, "source": [ - "# Object Counting using Ultralytics YOLOv8 🚀\n", + "# Object Counting using Ultralytics YOLO11 🚀\n", "\n", "## What is Object Counting?\n", "\n", - "Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) involves accurate identification and counting of specific objects in videos and camera streams. YOLOv8 excels in real-time applications, providing efficient and precise object counting for various scenarios like crowd analysis and surveillance, thanks to its state-of-the-art algorithms and deep learning capabilities.\n", + "Object counting with [Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) involves accurate identification and counting of specific objects in videos and camera streams. YOLO11 excels in real-time applications, providing efficient and precise object counting for various scenarios like crowd analysis and surveillance, thanks to its state-of-the-art algorithms and deep learning capabilities.\n", "\n", "## Advantages of Object Counting?\n", "\n", @@ -90,8 +90,8 @@ "\n", "| Logistics | Aquaculture |\n", "|:-------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------:|\n", - "| ![Conveyor Belt Packets Counting Using Ultralytics YOLOv8](https://github.com/RizwanMunawar/ultralytics/assets/62513924/70e2d106-510c-4c6c-a57a-d34a765aa757) | ![Fish Counting in Sea using Ultralytics YOLOv8](https://github.com/RizwanMunawar/ultralytics/assets/62513924/c60d047b-3837-435f-8d29-bb9fc95d2191) |\n", - "| Conveyor Belt Packets Counting Using Ultralytics YOLOv8 | Fish Counting in Sea using Ultralytics YOLOv8 |\n" + "| ![Conveyor Belt Packets Counting Using Ultralytics YOLO11](https://github.com/RizwanMunawar/ultralytics/assets/62513924/70e2d106-510c-4c6c-a57a-d34a765aa757) | ![Fish Counting in Sea using Ultralytics YOLO11](https://github.com/RizwanMunawar/ultralytics/assets/62513924/c60d047b-3837-435f-8d29-bb9fc95d2191) |\n", + "| Conveyor Belt Packets Counting Using Ultralytics YOLO11 | Fish Counting in Sea using Ultralytics YOLO11 |\n" ] }, { @@ -106,8 +106,8 @@ "\n", "from ultralytics import YOLO, solutions\n", "\n", - "# Load the pre-trained YOLOv8 model\n", - "model = YOLO(\"yolov8n.pt\")\n", + "# Load the pre-trained YOLO11 model\n", + "model = YOLO(\"yolo11n.pt\")\n", "\n", "# Open the video file\n", "cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n", @@ -179,15 +179,15 @@ "- [About Us](https://ultralytics.com/about): Discover our mission, vision, and the story behind Ultralytics.\n", "- [Join Our Team](https://ultralytics.com/work): Explore career opportunities and join our team of talented professionals.\n", "\n", - "## YOLOv8 🚀 Resources\n", + "## YOLO11 🚀 Resources\n", "\n", - "YOLOv8 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLOv8:\n", + "YOLO11 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLO11:\n", "\n", - "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLOv8 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n", - "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLOv8, including installation guides, tutorials, and detailed API references.\n", + "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLO11 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n", + "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLO11, including installation guides, tutorials, and detailed API references.\n", "- [Discord](https://ultralytics.com/discord): Join our Discord community to connect with other users, share your projects, and get help from the Ultralytics team.\n", "\n", - "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLOv8. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed." + "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLO11. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed." ] } ], diff --git a/examples/object_tracking.ipynb b/examples/object_tracking.ipynb index 17c27c0ec0..53ca6b253c 100644 --- a/examples/object_tracking.ipynb +++ b/examples/object_tracking.ipynb @@ -19,11 +19,11 @@ " \"Open\n", " \"Discord\"\n", "\n", - "Welcome to the Ultralytics YOLOv8 🚀 notebook! YOLOv8 is the latest version of the YOLO (You Only Look Once) AI models developed by Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and understand its features and capabilities.\n", + "Welcome to the Ultralytics YOLO11 🚀 notebook! YOLO11 is the latest version of the YOLO (You Only Look Once) AI models developed by Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLO11 and understand its features and capabilities.\n", "\n", - "YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n", + "YOLO11 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n", "\n", - "We hope that the resources in this notebook will help you get the most out of YOLOv8. Please browse the YOLOv8 Tracking Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", + "We hope that the resources in this notebook will help you get the most out of YOLO11. Please browse the YOLO11 Tracking Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "" ] @@ -56,7 +56,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Ultralytics YOLOv8.2.17 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", + "Ultralytics 8.2.17 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (T4, 15102MiB)\n", "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 29.8/78.2 GB disk)\n" ] } @@ -76,7 +76,7 @@ "source": [ "# Ultralytics Object Tracking\n", "\n", - "[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) instance segmentation involves identifying and outlining individual objects in an image, providing a detailed understanding of spatial distribution. Unlike semantic segmentation, it uniquely labels and precisely delineates each object, crucial for tasks like object detection and medical imaging.\n", + "[Ultralytics YOLO11](https://github.com/ultralytics/ultralytics/) instance segmentation involves identifying and outlining individual objects in an image, providing a detailed understanding of spatial distribution. Unlike semantic segmentation, it uniquely labels and precisely delineates each object, crucial for tasks like object detection and medical imaging.\n", "\n", "There are two types of instance segmentation tracking available in the Ultralytics package:\n", "\n", @@ -144,7 +144,7 @@ "track_history = defaultdict(lambda: [])\n", "\n", "# Load the YOLO model with segmentation capabilities\n", - "model = YOLO(\"yolov8n-seg.pt\")\n", + "model = YOLO(\"yolo11n-seg.pt\")\n", "\n", "# Open the video file\n", "cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n", @@ -214,15 +214,15 @@ "- [About Us](https://ultralytics.com/about): Discover our mission, vision, and the story behind Ultralytics.\n", "- [Join Our Team](https://ultralytics.com/work): Explore career opportunities and join our team of talented professionals.\n", "\n", - "## YOLOv8 🚀 Resources\n", + "## YOLO11 🚀 Resources\n", "\n", - "YOLOv8 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLOv8:\n", + "YOLO11 is the latest evolution in the YOLO series, offering state-of-the-art performance in object detection and image segmentation. Here are some essential resources to help you get started with YOLO11:\n", "\n", - "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLOv8 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n", - "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLOv8, including installation guides, tutorials, and detailed API references.\n", + "- [GitHub](https://github.com/ultralytics/ultralytics): Access the YOLO11 repository on GitHub, where you can find the source code, contribute to the project, and report issues.\n", + "- [Docs](https://docs.ultralytics.com/): Explore the official documentation for YOLO11, including installation guides, tutorials, and detailed API references.\n", "- [Discord](https://ultralytics.com/discord): Join our Discord community to connect with other users, share your projects, and get help from the Ultralytics team.\n", "\n", - "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLOv8. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed." + "These resources are designed to help you leverage the full potential of Ultralytics' offerings and YOLO11. Whether you're a beginner or an experienced developer, you'll find the information and support you need to succeed." ] } ], diff --git a/examples/tutorial.ipynb b/examples/tutorial.ipynb index 8fbe02d018..0992abf460 100644 --- a/examples/tutorial.ipynb +++ b/examples/tutorial.ipynb @@ -3,7 +3,7 @@ "nbformat_minor": 0, "metadata": { "colab": { - "name": "YOLOv8 Tutorial", + "name": "YOLO11 Tutorial", "provenance": [], "toc_visible": true }, @@ -36,11 +36,11 @@ " \"Ultralytics\n", " \"Ultralytics\n", "\n", - "Welcome to the Ultralytics YOLOv8 🚀 notebook! YOLOv8 is the latest version of the YOLO (You Only Look Once) AI models developed by Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and understand its features and capabilities.\n", + "Welcome to the Ultralytics YOLO11 🚀 notebook! YOLO11 is the latest version of the YOLO (You Only Look Once) AI models developed by Ultralytics. This notebook serves as the starting point for exploring the various resources available to help you get started with YOLO11 and understand its features and capabilities.\n", "\n", - "YOLOv8 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n", + "YOLO11 models are fast, accurate, and easy to use, making them ideal for various object detection and image segmentation tasks. They can be trained on large datasets and run on diverse hardware platforms, from CPUs to GPUs.\n", "\n", - "We hope that the resources in this notebook will help you get the most out of YOLOv8. Please browse the YOLOv8 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", + "We hope that the resources in this notebook will help you get the most out of YOLO11. Please browse the YOLO11 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "" ] @@ -65,21 +65,21 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "96335d4c-20a9-4864-f7a4-bb2eb0077a9d" + "outputId": "2e992f9f-90bb-4668-de12-fed629975285" }, "source": [ "%pip install ultralytics\n", "import ultralytics\n", "ultralytics.checks()" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "Ultralytics YOLOv8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", - "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 28.8/78.2 GB disk)\n" + "Ultralytics 8.3.2 🚀 Python-3.10.12 torch-2.4.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 41.1/112.6 GB disk)\n" ] } ] @@ -92,7 +92,7 @@ "source": [ "# 1. Predict\n", "\n", - "YOLOv8 may be used directly in the Command Line Interface (CLI) with a `yolo` command for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See a full list of available `yolo` [arguments](https://docs.ultralytics.com/usage/cfg/) and other details in the [YOLOv8 Predict Docs](https://docs.ultralytics.com/modes/train/).\n" + "YOLO11 may be used directly in the Command Line Interface (CLI) with a `yolo` command for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See a full list of available `yolo` [arguments](https://docs.ultralytics.com/usage/cfg/) and other details in the [YOLO11 Predict Docs](https://docs.ultralytics.com/modes/train/).\n" ] }, { @@ -102,27 +102,27 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "84f32db2-80b0-4f35-9a2a-a56d11f7863f" + "outputId": "e3ebec6f-658a-4803-d80c-e07d12908767" }, "source": [ - "# Run inference on an image with YOLOv8n\n", - "!yolo predict model=yolov8n.pt source='https://ultralytics.com/images/zidane.jpg'" + "# Run inference on an image with YOLO11n\n", + "!yolo predict model=yolo11n.pt source='https://ultralytics.com/images/zidane.jpg'" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "Downloading https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt to 'yolov8n.pt'...\n", - "100% 6.23M/6.23M [00:00<00:00, 83.2MB/s]\n", - "Ultralytics YOLOv8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", - "YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n", + "Downloading https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt to 'yolo11n.pt'...\n", + "100% 5.35M/5.35M [00:00<00:00, 72.7MB/s]\n", + "Ultralytics 8.3.2 🚀 Python-3.10.12 torch-2.4.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", + "YOLO11n summary (fused): 238 layers, 2,616,248 parameters, 0 gradients, 6.5 GFLOPs\n", "\n", "Downloading https://ultralytics.com/images/zidane.jpg to 'zidane.jpg'...\n", - "100% 165k/165k [00:00<00:00, 11.1MB/s]\n", - "image 1/1 /content/zidane.jpg: 384x640 2 persons, 1 tie, 21.4ms\n", - "Speed: 1.9ms preprocess, 21.4ms inference, 6.2ms postprocess per image at shape (1, 3, 384, 640)\n", + "100% 49.2k/49.2k [00:00<00:00, 5.37MB/s]\n", + "image 1/1 /content/zidane.jpg: 384x640 2 persons, 1 tie, 63.4ms\n", + "Speed: 14.5ms preprocess, 63.4ms inference, 820.9ms postprocess per image at shape (1, 3, 384, 640)\n", "Results saved to \u001b[1mruns/detect/predict\u001b[0m\n", "💡 Learn more at https://docs.ultralytics.com/modes/predict\n" ] @@ -146,7 +146,7 @@ }, "source": [ "# 2. Val\n", - "Validate a model's accuracy on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset's `val` or `test` splits. The latest YOLOv8 [models](https://github.com/ultralytics/ultralytics#models) are downloaded automatically the first time they are used. See [YOLOv8 Val Docs](https://docs.ultralytics.com/modes/val/) for more information." + "Validate a model's accuracy on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset's `val` or `test` splits. The latest YOLO11 [models](https://github.com/ultralytics/ultralytics#models) are downloaded automatically the first time they are used. See [YOLO11 Val Docs](https://docs.ultralytics.com/modes/val/) for more information." ] }, { @@ -167,43 +167,43 @@ "cell_type": "code", "metadata": { "id": "X58w8JLpMnjH", - "outputId": "bed10d45-ceb6-4b6f-86b7-9428208b142a", + "outputId": "af2a5deb-029b-466d-96a4-bd3e406987fa", "colab": { "base_uri": "https://localhost:8080/" } }, "source": [ - "# Validate YOLOv8n on COCO8 val\n", - "!yolo val model=yolov8n.pt data=coco8.yaml" + "# Validate YOLO11n on COCO8 val\n", + "!yolo val model=yolo11n.pt data=coco8.yaml" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "Ultralytics YOLOv8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", - "YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n", + "Ultralytics 8.3.2 🚀 Python-3.10.12 torch-2.4.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", + "YOLO11n summary (fused): 238 layers, 2,616,248 parameters, 0 gradients, 6.5 GFLOPs\n", "\n", "Dataset 'coco8.yaml' images not found ⚠️, missing path '/content/datasets/coco8/images/val'\n", "Downloading https://ultralytics.com/assets/coco8.zip to '/content/datasets/coco8.zip'...\n", - "100% 433k/433k [00:00<00:00, 14.2MB/s]\n", - "Unzipping /content/datasets/coco8.zip to /content/datasets/coco8...: 100% 25/25 [00:00<00:00, 1093.93file/s]\n", - "Dataset download success ✅ (1.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 433k/433k [00:00<00:00, 15.8MB/s]\n", + "Unzipping /content/datasets/coco8.zip to /content/datasets/coco8...: 100% 25/25 [00:00<00:00, 1188.35file/s]\n", + "Dataset download success ✅ (1.4s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", "Downloading https://ultralytics.com/assets/Arial.ttf to '/root/.config/Ultralytics/Arial.ttf'...\n", - "100% 755k/755k [00:00<00:00, 17.4MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco8/labels/val... 4 images, 0 backgrounds, 0 corrupt: 100% 4/4 [00:00<00:00, 157.00it/s]\n", + "100% 755k/755k [00:00<00:00, 17.7MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco8/labels/val... 4 images, 0 backgrounds, 0 corrupt: 100% 4/4 [00:00<00:00, 142.04it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco8/labels/val.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95): 100% 1/1 [00:06<00:00, 6.89s/it]\n", - " all 4 17 0.621 0.833 0.888 0.63\n", - " person 4 10 0.721 0.5 0.519 0.269\n", - " dog 4 1 0.37 1 0.995 0.597\n", - " horse 4 2 0.751 1 0.995 0.631\n", - " elephant 4 2 0.505 0.5 0.828 0.394\n", - " umbrella 4 1 0.564 1 0.995 0.995\n", - " potted plant 4 1 0.814 1 0.995 0.895\n", - "Speed: 0.3ms preprocess, 4.9ms inference, 0.0ms loss, 1.3ms postprocess per image\n", + " Class Images Instances Box(P R mAP50 mAP50-95): 100% 1/1 [00:04<00:00, 4.75s/it]\n", + " all 4 17 0.57 0.85 0.847 0.632\n", + " person 3 10 0.557 0.6 0.585 0.272\n", + " dog 1 1 0.548 1 0.995 0.697\n", + " horse 1 2 0.531 1 0.995 0.674\n", + " elephant 1 2 0.371 0.5 0.516 0.256\n", + " umbrella 1 1 0.569 1 0.995 0.995\n", + " potted plant 1 1 0.847 1 0.995 0.895\n", + "Speed: 1.0ms preprocess, 73.8ms inference, 0.0ms loss, 561.4ms postprocess per image\n", "Results saved to \u001b[1mruns/detect/val\u001b[0m\n", "💡 Learn more at https://docs.ultralytics.com/modes/val\n" ] @@ -220,13 +220,13 @@ "\n", "

\n", "\n", - "Train YOLOv8 on [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/), [Classify](https://docs.ultralytics.com/tasks/classify/) and [Pose](https://docs.ultralytics.com/tasks/pose/) datasets. See [YOLOv8 Train Docs](https://docs.ultralytics.com/modes/train/) for more information." + "Train YOLO11 on [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/), [Classify](https://docs.ultralytics.com/tasks/classify/) and [Pose](https://docs.ultralytics.com/tasks/pose/) datasets. See [YOLO11 Train Docs](https://docs.ultralytics.com/modes/train/) for more information." ] }, { "cell_type": "code", "source": [ - "#@title Select YOLOv8 🚀 logger {run: 'auto'}\n", + "#@title Select YOLO11 🚀 logger {run: 'auto'}\n", "logger = 'Comet' #@param ['Comet', 'TensorBoard']\n", "\n", "if logger == 'Comet':\n", @@ -246,64 +246,62 @@ "cell_type": "code", "metadata": { "id": "1NcFxRcFdJ_O", - "outputId": "9f60c6cb-fa9c-4785-cb7a-71d40abeaf38", + "outputId": "952f35f7-666f-4121-fbdf-2b3a33b28081", "colab": { "base_uri": "https://localhost:8080/" } }, "source": [ - "# Train YOLOv8n on COCO8 for 3 epochs\n", - "!yolo train model=yolov8n.pt data=coco8.yaml epochs=3 imgsz=640" + "# Train YOLO11n on COCO8 for 3 epochs\n", + "!yolo train model=yolo11n.pt data=coco8.yaml epochs=3 imgsz=640" ], - "execution_count": null, + "execution_count": 7, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "Ultralytics YOLOv8.2.3 🚀 Python-3.10.12 torch-2.2.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", - "\u001b[34m\u001b[1mengine/trainer: \u001b[0mtask=detect, mode=train, model=yolov8n.pt, data=coco8.yaml, epochs=3, time=None, patience=100, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=train, exist_ok=False, pretrained=True, optimizer=auto, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, freeze=None, multi_scale=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, vid_stride=1, stream_buffer=False, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, embed=None, show=False, save_frames=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, show_boxes=True, line_width=None, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, bgr=0.0, mosaic=1.0, mixup=0.0, copy_paste=0.0, auto_augment=randaugment, erasing=0.4, crop_fraction=1.0, cfg=None, tracker=botsort.yaml, save_dir=runs/detect/train\n", + "Ultralytics 8.3.2 🚀 Python-3.10.12 torch-2.4.1+cu121 CUDA:0 (Tesla T4, 15102MiB)\n", + "\u001b[34m\u001b[1mengine/trainer: \u001b[0mtask=detect, mode=train, model=yolo11n.pt, data=coco8.yaml, epochs=3, time=None, patience=100, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=train3, exist_ok=False, pretrained=True, optimizer=auto, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, freeze=None, multi_scale=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, vid_stride=1, stream_buffer=False, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, embed=None, show=False, save_frames=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, show_boxes=True, line_width=None, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=True, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, bgr=0.0, mosaic=1.0, mixup=0.0, copy_paste=0.0, copy_paste_mode=flip, auto_augment=randaugment, erasing=0.4, crop_fraction=1.0, cfg=None, tracker=botsort.yaml, save_dir=runs/detect/train3\n", "\n", " from n params module arguments \n", " 0 -1 1 464 ultralytics.nn.modules.conv.Conv [3, 16, 3, 2] \n", " 1 -1 1 4672 ultralytics.nn.modules.conv.Conv [16, 32, 3, 2] \n", - " 2 -1 1 7360 ultralytics.nn.modules.block.C2f [32, 32, 1, True] \n", - " 3 -1 1 18560 ultralytics.nn.modules.conv.Conv [32, 64, 3, 2] \n", - " 4 -1 2 49664 ultralytics.nn.modules.block.C2f [64, 64, 2, True] \n", - " 5 -1 1 73984 ultralytics.nn.modules.conv.Conv [64, 128, 3, 2] \n", - " 6 -1 2 197632 ultralytics.nn.modules.block.C2f [128, 128, 2, True] \n", + " 2 -1 1 6640 ultralytics.nn.modules.block.C3k2 [32, 64, 1, False, 0.25] \n", + " 3 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2] \n", + " 4 -1 1 26080 ultralytics.nn.modules.block.C3k2 [64, 128, 1, False, 0.25] \n", + " 5 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2] \n", + " 6 -1 1 87040 ultralytics.nn.modules.block.C3k2 [128, 128, 1, True] \n", " 7 -1 1 295424 ultralytics.nn.modules.conv.Conv [128, 256, 3, 2] \n", - " 8 -1 1 460288 ultralytics.nn.modules.block.C2f [256, 256, 1, True] \n", + " 8 -1 1 346112 ultralytics.nn.modules.block.C3k2 [256, 256, 1, True] \n", " 9 -1 1 164608 ultralytics.nn.modules.block.SPPF [256, 256, 5] \n", - " 10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 11 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1] \n", - " 12 -1 1 148224 ultralytics.nn.modules.block.C2f [384, 128, 1] \n", - " 13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 14 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1] \n", - " 15 -1 1 37248 ultralytics.nn.modules.block.C2f [192, 64, 1] \n", - " 16 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2] \n", - " 17 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1] \n", - " 18 -1 1 123648 ultralytics.nn.modules.block.C2f [192, 128, 1] \n", - " 19 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2] \n", - " 20 [-1, 9] 1 0 ultralytics.nn.modules.conv.Concat [1] \n", - " 21 -1 1 493056 ultralytics.nn.modules.block.C2f [384, 256, 1] \n", - " 22 [15, 18, 21] 1 897664 ultralytics.nn.modules.head.Detect [80, [64, 128, 256]] \n", - "Model summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs\n", + " 10 -1 1 249728 ultralytics.nn.modules.block.C2PSA [256, 256, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1] \n", + " 13 -1 1 111296 ultralytics.nn.modules.block.C3k2 [384, 128, 1, False] \n", + " 14 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 15 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1] \n", + " 16 -1 1 32096 ultralytics.nn.modules.block.C3k2 [256, 64, 1, False] \n", + " 17 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2] \n", + " 18 [-1, 13] 1 0 ultralytics.nn.modules.conv.Concat [1] \n", + " 19 -1 1 86720 ultralytics.nn.modules.block.C3k2 [192, 128, 1, False] \n", + " 20 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2] \n", + " 21 [-1, 10] 1 0 ultralytics.nn.modules.conv.Concat [1] \n", + " 22 -1 1 378880 ultralytics.nn.modules.block.C3k2 [384, 256, 1, True] \n", + " 23 [16, 19, 22] 1 464912 ultralytics.nn.modules.head.Detect [80, [64, 128, 256]] \n", + "YOLO11n summary: 319 layers, 2,624,080 parameters, 2,624,064 gradients, 6.6 GFLOPs\n", "\n", - "Transferred 355/355 items from pretrained weights\n", + "Transferred 499/499 items from pretrained weights\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/detect/train', view at http://localhost:6006/\n", - "Freezing layer 'model.22.dfl.conv.weight'\n", - "\u001b[34m\u001b[1mAMP: \u001b[0mrunning Automatic Mixed Precision (AMP) checks with YOLOv8n...\n", + "Freezing layer 'model.23.dfl.conv.weight'\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mrunning Automatic Mixed Precision (AMP) checks with YOLO11n...\n", "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco8/labels/train... 4 images, 0 backgrounds, 0 corrupt: 100% 4/4 [00:00<00:00, 837.19it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco8/labels/train.cache\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "/usr/lib/python3.10/multiprocessing/popen_fork.py:66: RuntimeWarning: os.fork() was called. os.fork() is incompatible with multithreaded code, and JAX is multithreaded, so this will likely lead to a deadlock.\n", - " self.pid = os.fork()\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco8/labels/train.cache... 4 images, 0 backgrounds, 0 corrupt: 100% 4/4 [00:00\n" ], @@ -463,7 +461,7 @@ "source": [ "## 1. Detection\n", "\n", - "YOLOv8 _detection_ models have no suffix and are the default YOLOv8 models, i.e. `yolov8n.pt` and are pretrained on COCO. See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for full details.\n" + "YOLO11 _detection_ models have no suffix and are the default YOLO11 models, i.e. `yolo11n.pt` and are pretrained on COCO. See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for full details.\n" ], "metadata": { "id": "yq26lwpYK1lq" @@ -472,10 +470,10 @@ { "cell_type": "code", "source": [ - "# Load YOLOv8n, train it on COCO128 for 3 epochs and predict an image with it\n", + "# Load YOLO11n, train it on COCO128 for 3 epochs and predict an image with it\n", "from ultralytics import YOLO\n", "\n", - "model = YOLO('yolov8n.pt') # load a pretrained YOLOv8n detection model\n", + "model = YOLO('yolo11n.pt') # load a pretrained YOLO detection model\n", "model.train(data='coco8.yaml', epochs=3) # train the model\n", "model('https://ultralytics.com/images/bus.jpg') # predict on an image" ], @@ -490,7 +488,7 @@ "source": [ "## 2. Segmentation\n", "\n", - "YOLOv8 _segmentation_ models use the `-seg` suffix, i.e. `yolov8n-seg.pt` and are pretrained on COCO. See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for full details.\n" + "YOLO11 _segmentation_ models use the `-seg` suffix, i.e. `yolo11n-seg.pt` and are pretrained on COCO. See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for full details.\n" ], "metadata": { "id": "7ZW58jUzK66B" @@ -499,10 +497,10 @@ { "cell_type": "code", "source": [ - "# Load YOLOv8n-seg, train it on COCO128-seg for 3 epochs and predict an image with it\n", + "# Load YOLO11n-seg, train it on COCO128-seg for 3 epochs and predict an image with it\n", "from ultralytics import YOLO\n", "\n", - "model = YOLO('yolov8n-seg.pt') # load a pretrained YOLOv8n segmentation model\n", + "model = YOLO('yolo11n-seg.pt') # load a pretrained YOLO segmentation model\n", "model.train(data='coco8-seg.yaml', epochs=3) # train the model\n", "model('https://ultralytics.com/images/bus.jpg') # predict on an image" ], @@ -517,7 +515,7 @@ "source": [ "## 3. Classification\n", "\n", - "YOLOv8 _classification_ models use the `-cls` suffix, i.e. `yolov8n-cls.pt` and are pretrained on ImageNet. See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for full details.\n" + "YOLO11 _classification_ models use the `-cls` suffix, i.e. `yolo11n-cls.pt` and are pretrained on ImageNet. See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for full details.\n" ], "metadata": { "id": "ax3p94VNK9zR" @@ -526,10 +524,10 @@ { "cell_type": "code", "source": [ - "# Load YOLOv8n-cls, train it on mnist160 for 3 epochs and predict an image with it\n", + "# Load YOLO11n-cls, train it on mnist160 for 3 epochs and predict an image with it\n", "from ultralytics import YOLO\n", "\n", - "model = YOLO('yolov8n-cls.pt') # load a pretrained YOLOv8n classification model\n", + "model = YOLO('yolo11n-cls.pt') # load a pretrained YOLO classification model\n", "model.train(data='mnist160', epochs=3) # train the model\n", "model('https://ultralytics.com/images/bus.jpg') # predict on an image" ], @@ -544,7 +542,7 @@ "source": [ "## 4. Pose\n", "\n", - "YOLOv8 _pose_ models use the `-pose` suffix, i.e. `yolov8n-pose.pt` and are pretrained on COCO Keypoints. See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for full details." + "YOLO11 _pose_ models use the `-pose` suffix, i.e. `yolo11n-pose.pt` and are pretrained on COCO Keypoints. See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for full details." ], "metadata": { "id": "SpIaFLiO11TG" @@ -553,10 +551,10 @@ { "cell_type": "code", "source": [ - "# Load YOLOv8n-pose, train it on COCO8-pose for 3 epochs and predict an image with it\n", + "# Load YOLO11n-pose, train it on COCO8-pose for 3 epochs and predict an image with it\n", "from ultralytics import YOLO\n", "\n", - "model = YOLO('yolov8n-pose.pt') # load a pretrained YOLOv8n pose model\n", + "model = YOLO('yolo11n-pose.pt') # load a pretrained YOLO pose model\n", "model.train(data='coco8-pose.yaml', epochs=3) # train the model\n", "model('https://ultralytics.com/images/bus.jpg') # predict on an image" ], @@ -571,7 +569,7 @@ "source": [ "## 4. Oriented Bounding Boxes (OBB)\n", "\n", - "YOLOv8 _OBB_ models use the `-obb` suffix, i.e. `yolov8n-obb.pt` and are pretrained on the DOTA dataset. See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for full details." + "YOLO11 _OBB_ models use the `-obb` suffix, i.e. `yolo11n-obb.pt` and are pretrained on the DOTA dataset. See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for full details." ], "metadata": { "id": "cf5j_T9-B5F0" @@ -580,10 +578,10 @@ { "cell_type": "code", "source": [ - "# Load YOLOv8n-obb, train it on DOTA8 for 3 epochs and predict an image with it\n", + "# Load YOLO11n-obb, train it on DOTA8 for 3 epochs and predict an image with it\n", "from ultralytics import YOLO\n", "\n", - "model = YOLO('yolov8n-obb.pt') # load a pretrained YOLOv8n OBB model\n", + "model = YOLO('yolo11n-obb.pt') # load a pretrained YOLO OBB model\n", "model.train(data='coco8-dota.yaml', epochs=3) # train the model\n", "model('https://ultralytics.com/images/bus.jpg') # predict on an image" ], @@ -646,7 +644,7 @@ "source": [ "# Validate multiple models\n", "for x in 'nsmlx':\n", - " !yolo val model=yolov8{x}.pt data=coco.yaml" + " !yolo val model=yolo11{x}.pt data=coco.yaml" ], "metadata": { "id": "Wdc6t_bfzDDk" diff --git a/mkdocs.yml b/mkdocs.yml index 1ec7c9c22e..f3e7d9d106 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -251,6 +251,7 @@ nav: - YOLOv8: models/yolov8.md - YOLOv9: models/yolov9.md - YOLOv10: models/yolov10.md + - NEW 🚀 YOLO11: models/yolo11.md - SAM (Segment Anything Model): models/sam.md - SAM 2 (Segment Anything Model 2): models/sam-2.md - MobileSAM (Mobile Segment Anything Model): models/mobile-sam.md @@ -294,6 +295,7 @@ nav: - COCO: datasets/pose/coco.md - COCO8-pose: datasets/pose/coco8-pose.md - Tiger-pose: datasets/pose/tiger-pose.md + - Hand-keypoints: datasets/pose/hand-keypoints.md - Classification: - datasets/classify/index.md - Caltech 101: datasets/classify/caltech101.md diff --git a/pyproject.toml b/pyproject.toml index 51593636dc..0a88360baa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,14 +30,14 @@ description = "Ultralytics YOLO for SOTA object detection, multi-object tracking readme = "README.md" requires-python = ">=3.8" license = { "text" = "AGPL-3.0" } -keywords = ["machine-learning", "deep-learning", "computer-vision", "ML", "DL", "AI", "YOLO", "YOLOv3", "YOLOv5", "YOLOv8", "YOLOv9", "YOLOv10", "HUB", "Ultralytics"] +keywords = ["machine-learning", "deep-learning", "computer-vision", "ML", "DL", "AI", "YOLO", "YOLOv3", "YOLOv5", "YOLOv8", "YOLOv9", "YOLOv10", "YOLO11", "HUB", "Ultralytics"] authors = [ - { name = "Glenn Jocher", email = "glenn.jocher@ultralytics.com"}, - { name = "Jing Qiu", email = "jing.qiu@ultralytics.com"}, - { name = "Ayush Chaurasia" } + { name = "Glenn Jocher", email = "glenn.jocher@ultralytics.com" }, + { name = "Jing Qiu", email = "jing.qiu@ultralytics.com" }, + { name = "Ayush Chaurasia" }, ] maintainers = [ - { name = "Ultralytics", email = "hello@ultralytics.com" } + { name = "Ultralytics", email = "hello@ultralytics.com" }, ] classifiers = [ "Development Status :: 4 - Beta", @@ -62,7 +62,7 @@ classifiers = [ # Required dependencies ------------------------------------------------------------------------------------------------ dependencies = [ - "numpy>=1.23.0,<2.0.0", # temporary patch for compat errors https://github.com/ultralytics/yolov5/actions/runs/9538130424/job/26286956354 + "numpy>=1.23.0", # temporary patch for compat errors https://github.com/ultralytics/yolov5/actions/runs/9538130424/job/26286956354 "matplotlib>=3.3.0", "opencv-python>=4.6.0", "pillow>=7.1.2", @@ -70,7 +70,7 @@ dependencies = [ "requests>=2.23.0", "scipy>=1.4.1", "torch>=1.8.0", - "torch>=1.8.0,!=2.4.0; sys_platform == 'win32'", # Windows CPU errors w/ 2.4.0 https://github.com/ultralytics/ultralytics/issues/15049 + "torch>=1.8.0,!=2.4.0; sys_platform == 'win32'", # Windows CPU errors w/ 2.4.0 https://github.com/ultralytics/ultralytics/issues/15049 "torchvision>=0.9.0", "tqdm>=4.64.0", # progress bars "psutil", # system utilization @@ -98,11 +98,12 @@ dev = [ export = [ "onnx>=1.12.0", # ONNX export "coremltools>=7.0; platform_system != 'Windows' and python_version <= '3.11'", # CoreML supported on macOS and Linux + "scikit-learn>=1.3.2; platform_system != 'Windows' and python_version <= '3.11'", # CoreML k-means quantization "openvino>=2024.0.0", # OpenVINO export "tensorflow>=2.0.0", # TF bug https://github.com/ultralytics/ultralytics/issues/5161 "tensorflowjs>=3.9.0", # TF.js export, automatically installs tensorflow - "tensorstore>=0.1.63; platform_machine == 'aarch64' and python_version >= '3.9'", # for TF Raspberry Pi exports - "keras", # not installed automatically by tensorflow>=2.16 + "tensorstore>=0.1.63; platform_machine == 'aarch64' and python_version >= '3.9'", # for TF Raspberry Pi exports + "keras", # not installed automatically by tensorflow>=2.16 "flatbuffers>=23.5.26,<100; platform_machine == 'aarch64'", # update old 'flatbuffers' included inside tensorflow package "numpy==1.23.5; platform_machine == 'aarch64'", # fix error: `np.bool` was a deprecated alias for the builtin `bool` when using TensorRT models on NVIDIA Jetson "h5py!=3.11.0; platform_machine == 'aarch64'", # fix h5py build issues due to missing aarch64 wheels in 3.11 release @@ -118,7 +119,7 @@ logging = [ "dvclive>=2.12.0", ] extra = [ - "hub-sdk>=0.0.8", # Ultralytics HUB + "hub-sdk>=0.0.12", # Ultralytics HUB "ipython", # interactive notebook "albumentations>=1.4.6", # training augmentations "pycocotools>=2.0.7", # COCO mAP @@ -129,7 +130,7 @@ extra = [ "Source" = "https://github.com/ultralytics/ultralytics" "Documentation" = "https://docs.ultralytics.com" "Bug Reports" = "https://github.com/ultralytics/ultralytics/issues" -"Changelog" = "https://github.com/ultralytics/ultralytics/releases" +"Changelog" = "https://github.com/ultralytics/ultralytics/releases" [project.scripts] yolo = "ultralytics.cfg:entrypoint" diff --git a/tests/__init__.py b/tests/__init__.py index ea6b398292..ea8afff5a8 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -3,8 +3,8 @@ from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks # Constants used in tests -MODEL = WEIGHTS_DIR / "path with spaces" / "yolov8n.pt" # test spaces in path -CFG = "yolov8n.yaml" +MODEL = WEIGHTS_DIR / "path with spaces" / "yolo11n.pt" # test spaces in path +CFG = "yolo11n.yaml" SOURCE = ASSETS / "bus.jpg" SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"] TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files diff --git a/tests/conftest.py b/tests/conftest.py index faba91b2be..7b0539b467 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -74,7 +74,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): # Remove files models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)] - for file in ["bus.jpg", "yolov8n.onnx", "yolov8n.torchscript"] + models: + for file in ["bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models: Path(file).unlink(missing_ok=True) # Remove directories diff --git a/tests/test_cuda.py b/tests/test_cuda.py index 3c3ba174d1..3b08edc699 100644 --- a/tests/test_cuda.py +++ b/tests/test_cuda.py @@ -10,6 +10,7 @@ from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE from ultralytics import YOLO from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS from ultralytics.utils import ASSETS, WEIGHTS_DIR +from ultralytics.utils.checks import check_amp def test_checks(): @@ -18,6 +19,13 @@ def test_checks(): assert torch.cuda.device_count() == CUDA_DEVICE_COUNT +@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available") +def test_amp(): + """Test AMP training checks.""" + model = YOLO("yolo11n.pt").model.cuda() + assert check_amp(model) + + @pytest.mark.slow @pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability") @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available") @@ -60,7 +68,7 @@ def test_train(): @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available") def test_predict_multiple_devices(): """Validate model prediction consistency across CPU and CUDA devices.""" - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") model = model.cpu() assert str(model.device) == "cpu" _ = model(SOURCE) # CPU inference diff --git a/tests/test_engine.py b/tests/test_engine.py index 92373044ef..aa4b671eaa 100644 --- a/tests/test_engine.py +++ b/tests/test_engine.py @@ -21,13 +21,13 @@ def test_export(): exporter = Exporter() exporter.add_callback("on_export_start", test_func) assert test_func in exporter.callbacks["on_export_start"], "callback test failed" - f = exporter(model=YOLO("yolov8n.yaml").model) + f = exporter(model=YOLO("yolo11n.yaml").model) YOLO(f)(ASSETS) # exported model inference def test_detect(): """Test YOLO object detection training, validation, and prediction functionality.""" - overrides = {"data": "coco8.yaml", "model": "yolov8n.yaml", "imgsz": 32, "epochs": 1, "save": False} + overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 1, "save": False} cfg = get_cfg(DEFAULT_CFG) cfg.data = "coco8.yaml" cfg.imgsz = 32 @@ -66,7 +66,7 @@ def test_detect(): def test_segment(): """Tests image segmentation training, validation, and prediction pipelines using YOLO models.""" - overrides = {"data": "coco8-seg.yaml", "model": "yolov8n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False} + overrides = {"data": "coco8-seg.yaml", "model": "yolo11n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False} cfg = get_cfg(DEFAULT_CFG) cfg.data = "coco8-seg.yaml" cfg.imgsz = 32 @@ -88,7 +88,7 @@ def test_segment(): pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]}) pred.add_callback("on_predict_start", test_func) assert test_func in pred.callbacks["on_predict_start"], "callback test failed" - result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolov8n-seg.pt") + result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo11n-seg.pt") assert len(result), "predictor test failed" # Test resume @@ -105,7 +105,7 @@ def test_segment(): def test_classify(): """Test image classification including training, validation, and prediction phases.""" - overrides = {"data": "imagenet10", "model": "yolov8n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False} + overrides = {"data": "imagenet10", "model": "yolo11n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False} cfg = get_cfg(DEFAULT_CFG) cfg.data = "imagenet10" cfg.imgsz = 32 diff --git a/tests/test_explorer.py b/tests/test_explorer.py index b13bb86828..45b0a31e36 100644 --- a/tests/test_explorer.py +++ b/tests/test_explorer.py @@ -30,7 +30,7 @@ def test_similarity(): @pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13") def test_det(): """Test detection functionalities and verify embedding table includes bounding boxes.""" - exp = Explorer(data="coco8.yaml", model="yolov8n.pt") + exp = Explorer(data="coco8.yaml", model="yolo11n.pt") exp.create_embeddings_table(force=True) assert len(exp.table.head()["bboxes"]) > 0 similar = exp.get_similar(idx=[1, 2], limit=10) @@ -44,7 +44,7 @@ def test_det(): @pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13") def test_seg(): """Test segmentation functionalities and ensure the embedding table includes segmentation masks.""" - exp = Explorer(data="coco8-seg.yaml", model="yolov8n-seg.pt") + exp = Explorer(data="coco8-seg.yaml", model="yolo11n-seg.pt") exp.create_embeddings_table(force=True) assert len(exp.table.head()["masks"]) > 0 similar = exp.get_similar(idx=[1, 2], limit=10) @@ -57,7 +57,7 @@ def test_seg(): @pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13") def test_pose(): """Test pose estimation functionality and verify the embedding table includes keypoints.""" - exp = Explorer(data="coco8-pose.yaml", model="yolov8n-pose.pt") + exp = Explorer(data="coco8-pose.yaml", model="yolo11n-pose.pt") exp.create_embeddings_table(force=True) assert len(exp.table.head()["keypoints"]) > 0 similar = exp.get_similar(idx=[1, 2], limit=10) diff --git a/tests/test_exports.py b/tests/test_exports.py index 98e4049d79..e6e2ec1598 100644 --- a/tests/test_exports.py +++ b/tests/test_exports.py @@ -40,7 +40,6 @@ def test_export_openvino(): @pytest.mark.slow -@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="OpenVINO not supported in Python 3.12") @pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13") @pytest.mark.parametrize( "task, dynamic, int8, half, batch", @@ -187,7 +186,7 @@ def test_export_pb(): YOLO(file)(SOURCE, imgsz=32) -@pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirementsk conflict.") +@pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirements conflict.") def test_export_paddle(): """Test YOLO exports to Paddle format, noting protobuf conflicts with ONNX.""" YOLO(MODEL).export(format="paddle", imgsz=32) diff --git a/tests/test_integrations.py b/tests/test_integrations.py index 3a0d1b48a7..4c8e066978 100644 --- a/tests/test_integrations.py +++ b/tests/test_integrations.py @@ -17,7 +17,7 @@ from ultralytics.utils.checks import check_requirements @pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed") def test_model_ray_tune(): """Tune YOLO model using Ray for hyperparameter optimization.""" - YOLO("yolov8n-cls.yaml").tune( + YOLO("yolo11n-cls.yaml").tune( use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu" ) @@ -26,7 +26,7 @@ def test_model_ray_tune(): def test_mlflow(): """Test training with MLflow tracking enabled (see https://mlflow.org/ for details).""" SETTINGS["mlflow"] = True - YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu") + YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu") SETTINGS["mlflow"] = False @@ -42,7 +42,7 @@ def test_mlflow_keep_run_active(): # Test with MLFLOW_KEEP_RUN_ACTIVE=True os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True" - YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu") + YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu") status = mlflow.active_run().info.status assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True" @@ -50,13 +50,13 @@ def test_mlflow_keep_run_active(): # Test with MLFLOW_KEEP_RUN_ACTIVE=False os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False" - YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu") + YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu") status = mlflow.get_run(run_id=run_id).info.status assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False" # Test with MLFLOW_KEEP_RUN_ACTIVE not set os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None) - YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu") + YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu") status = mlflow.get_run(run_id=run_id).info.status assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set" SETTINGS["mlflow"] = False @@ -126,23 +126,23 @@ def test_pycocotools(): from ultralytics.models.yolo.segment import SegmentationValidator # Download annotations after each dataset downloads first - url = "https://github.com/ultralytics/assets/releases/download/v8.2.0/" + url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/" - args = {"model": "yolov8n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64} + args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64} validator = DetectionValidator(args=args) validator() validator.is_coco = True download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations") _ = validator.eval_json(validator.stats) - args = {"model": "yolov8n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64} + args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64} validator = SegmentationValidator(args=args) validator() validator.is_coco = True download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations") _ = validator.eval_json(validator.stats) - args = {"model": "yolov8n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64} + args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64} validator = PoseValidator(args=args) validator() validator.is_coco = True diff --git a/tests/test_python.py b/tests/test_python.py index 55f087f008..117e6f802e 100644 --- a/tests/test_python.py +++ b/tests/test_python.py @@ -211,7 +211,7 @@ def test_train_scratch(): def test_train_pretrained(): """Test training of the YOLO model starting from a pre-trained checkpoint.""" - model = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt") + model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt") model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0) model(SOURCE) @@ -281,13 +281,13 @@ def test_results(model): def test_labels_and_crops(): """Test output from prediction args for saving YOLO detection labels and crops; ensures accurate saving.""" imgs = [SOURCE, ASSETS / "zidane.jpg"] - results = YOLO(WEIGHTS_DIR / "yolov8n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True) + results = YOLO(WEIGHTS_DIR / "yolo11n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True) save_path = Path(results[0].save_dir) for r in results: im_name = Path(r.path).stem cls_idxs = r.boxes.cls.int().tolist() # Check correct detections - assert cls_idxs == ([0, 0, 5, 0, 7] if r.path.endswith("bus.jpg") else [0, 0]) # bus.jpg and zidane.jpg classes + assert cls_idxs == ([0, 7, 0, 0] if r.path.endswith("bus.jpg") else [0, 0, 0]) # bus.jpg and zidane.jpg classes # Check label path labels = save_path / f"labels/{im_name}.txt" assert labels.exists() @@ -339,7 +339,7 @@ def test_data_annotator(): auto_annotate( ASSETS, - det_model=WEIGHTS_DIR / "yolov8n.pt", + det_model=WEIGHTS_DIR / "yolo11n.pt", sam_model=WEIGHTS_DIR / "mobile_sam.pt", output_dir=TMP / "auto_annotate_labels", ) @@ -393,7 +393,7 @@ def test_utils_benchmarks(): """Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'.""" from ultralytics.utils.benchmarks import ProfileModels - ProfileModels(["yolov8n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile() + ProfileModels(["yolo11n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile() def test_utils_torchutils(): @@ -568,14 +568,14 @@ def test_classify_transforms_train(image, auto_augment, erasing, force_color_jit @pytest.mark.skipif(not ONLINE, reason="environment is offline") def test_model_tune(): """Tune YOLO model for performance improvement.""" - YOLO("yolov8n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu") - YOLO("yolov8n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu") + YOLO("yolo11n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu") + YOLO("yolo11n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu") def test_model_embeddings(): """Test YOLO model embeddings.""" model_detect = YOLO(MODEL) - model_segment = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt") + model_segment = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt") for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2 assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch) @@ -585,11 +585,11 @@ def test_model_embeddings(): @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12") def test_yolo_world(): """Tests YOLO world models with CLIP support, including detection and training scenarios.""" - model = YOLO("yolov8s-world.pt") # no YOLOv8n-world model yet + model = YOLO("yolov8s-world.pt") # no YOLO11n-world model yet model.set_classes(["tree", "window"]) model(SOURCE, conf=0.01) - model = YOLO("yolov8s-worldv2.pt") # no YOLOv8n-world model yet + model = YOLO("yolov8s-worldv2.pt") # no YOLO11n-world model yet # Training from a pretrained model. Eval is included at the final stage of training. # Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model model.train( @@ -603,7 +603,7 @@ def test_yolo_world(): # test WorWorldTrainerFromScratch from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch - model = YOLO("yolov8s-worldv2.yaml") # no YOLOv8n-world model yet + model = YOLO("yolov8s-worldv2.yaml") # no YOLO11n-world model yet model.train( data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}}, epochs=1, diff --git a/tests/test_solutions.py b/tests/test_solutions.py index fec1d74d0f..fabec621d3 100644 --- a/tests/test_solutions.py +++ b/tests/test_solutions.py @@ -14,7 +14,7 @@ WORKOUTS_SOLUTION_DEMO = "https://github.com/ultralytics/assets/releases/downloa def test_major_solutions(): """Test the object counting, heatmap, speed estimation and queue management solution.""" safe_download(url=MAJOR_SOLUTIONS_DEMO) - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") names = model.names cap = cv2.VideoCapture("solutions_ci_demo.mp4") assert cap.isOpened(), "Error reading video file" @@ -41,7 +41,7 @@ def test_major_solutions(): def test_aigym(): """Test the workouts monitoring solution.""" safe_download(url=WORKOUTS_SOLUTION_DEMO) - model = YOLO("yolov8n-pose.pt") + model = YOLO("yolo11n-pose.pt") cap = cv2.VideoCapture("solution_ci_pose_demo.mp4") assert cap.isOpened(), "Error reading video file" gym_object = solutions.AIGym(line_thickness=2, pose_type="squat", kpts_to_check=[5, 11, 13]) @@ -60,7 +60,7 @@ def test_instance_segmentation(): """Test the instance segmentation solution.""" from ultralytics.utils.plotting import Annotator, colors - model = YOLO("yolov8n-seg.pt") + model = YOLO("yolo11n-seg.pt") names = model.names cap = cv2.VideoCapture("solutions_ci_demo.mp4") assert cap.isOpened(), "Error reading video file" diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index 85944a543c..80b45c154b 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,7 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -__version__ = "8.2.100" - +__version__ = "8.3.4" import os diff --git a/ultralytics/cfg/__init__.py b/ultralytics/cfg/__init__.py index 06356e7589..7058c3d4a5 100644 --- a/ultralytics/cfg/__init__.py +++ b/ultralytics/cfg/__init__.py @@ -42,11 +42,11 @@ TASK2DATA = { "obb": "dota8.yaml", } TASK2MODEL = { - "detect": "yolov8n.pt", - "segment": "yolov8n-seg.pt", - "classify": "yolov8n-cls.pt", - "pose": "yolov8n-pose.pt", - "obb": "yolov8n-obb.pt", + "detect": "yolo11n.pt", + "segment": "yolo11n-seg.pt", + "classify": "yolo11n-cls.pt", + "pose": "yolo11n-pose.pt", + "obb": "yolo11n-obb.pt", } TASK2METRIC = { "detect": "metrics/mAP50-95(B)", @@ -69,19 +69,19 @@ CLI_HELP_MSG = f""" See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg' 1. Train a detection model for 10 epochs with an initial learning_rate of 0.01 - yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 + yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 2. Predict a YouTube video using a pretrained segmentation model at image size 320: - yolo predict model=yolov8n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 + yolo predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 3. Val a pretrained detection model at batch-size 1 and image size 640: - yolo val model=yolov8n.pt data=coco8.yaml batch=1 imgsz=640 + yolo val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640 - 4. Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) - yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + 4. Export a YOLO11n classification model to ONNX format at image size 224 by 128 (no TASK required) + yolo export model=yolo11n-cls.pt format=onnx imgsz=224,128 5. Explore your datasets using semantic search and SQL with a simple GUI powered by Ultralytics Explorer API - yolo explorer data=data.yaml model=yolov8n.pt + yolo explorer data=data.yaml model=yolo11n.pt 6. Streamlit real-time webcam inference GUI yolo streamlit-predict @@ -517,7 +517,7 @@ def handle_yolo_settings(args: List[str]) -> None: Examples: >>> handle_yolo_settings(["reset"]) # Reset YOLO settings - >>> handle_yolo_settings(["default_cfg_path=yolov8n.yaml"]) # Update a specific setting + >>> handle_yolo_settings(["default_cfg_path=yolo11n.yaml"]) # Update a specific setting Notes: - If no arguments are provided, the function will display the current settings. @@ -557,7 +557,7 @@ def handle_explorer(args: List[str]): Examples: ```bash - yolo explorer data=data.yaml model=yolov8n.pt + yolo explorer data=data.yaml model=yolo11n.pt ``` Notes: @@ -611,9 +611,9 @@ def parse_key_value_pair(pair: str = "key=value"): AssertionError: If the value is missing or empty. Examples: - >>> key, value = parse_key_value_pair("model=yolov8n.pt") + >>> key, value = parse_key_value_pair("model=yolo11n.pt") >>> print(f"Key: {key}, Value: {value}") - Key: model, Value: yolov8n.pt + Key: model, Value: yolo11n.pt >>> key, value = parse_key_value_pair("epochs=100") >>> print(f"Key: {key}, Value: {value}") @@ -686,13 +686,13 @@ def entrypoint(debug=""): Examples: Train a detection model for 10 epochs with an initial learning_rate of 0.01: - >>> entrypoint("train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01") + >>> entrypoint("train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01") Predict a YouTube video using a pretrained segmentation model at image size 320: - >>> entrypoint("predict model=yolov8n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320") + >>> entrypoint("predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320") Validate a pretrained detection model at batch-size 1 and image size 640: - >>> entrypoint("val model=yolov8n.pt data=coco8.yaml batch=1 imgsz=640") + >>> entrypoint("val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640") Notes: - If no arguments are passed, the function will display the usage help message. @@ -782,7 +782,7 @@ def entrypoint(debug=""): # Model model = overrides.pop("model", DEFAULT_CFG.model) if model is None: - model = "yolov8n.pt" + model = "yolo11n.pt" LOGGER.warning(f"WARNING ⚠️ 'model' argument is missing. Using default 'model={model}'.") overrides["model"] = model stem = Path(model).stem.lower() @@ -869,5 +869,5 @@ def copy_default_cfg(): if __name__ == "__main__": - # Example: entrypoint(debug='yolo predict model=yolov8n.pt') + # Example: entrypoint(debug='yolo predict model=yolo11n.pt') entrypoint(debug="") diff --git a/ultralytics/cfg/datasets/hand-keypoints.yaml b/ultralytics/cfg/datasets/hand-keypoints.yaml new file mode 100644 index 0000000000..475a7c0137 --- /dev/null +++ b/ultralytics/cfg/datasets/hand-keypoints.yaml @@ -0,0 +1,25 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Hand Keypoints dataset by Ultralytics +# Documentation: https://docs.ultralytics.com/datasets/pose/hand-keypoints/ +# Example usage: yolo train data=hand-keypoints.yaml +# parent +# ├── ultralytics +# └── datasets +# └── hand-keypoints ← downloads here (369 MB) + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/hand-keypoints # dataset root dir +train: train # train images (relative to 'path') 18776 images +val: val # val images (relative to 'path') 7992 images + +# Keypoints +kpt_shape: [21, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +flip_idx: + [0, 1, 2, 4, 3, 10, 11, 12, 13, 14, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 20] + +# Classes +names: + 0: hand + +# Download script/URL (optional) +download: https://github.com/ultralytics/assets/releases/download/v0.0.0/hand-keypoints.zip diff --git a/ultralytics/cfg/default.yaml b/ultralytics/cfg/default.yaml index a44f609411..da616d651e 100644 --- a/ultralytics/cfg/default.yaml +++ b/ultralytics/cfg/default.yaml @@ -115,6 +115,7 @@ bgr: 0.0 # (float) image channel BGR (probability) mosaic: 1.0 # (float) image mosaic (probability) mixup: 0.0 # (float) image mixup (probability) copy_paste: 0.0 # (float) segment copy-paste (probability) +copy_paste_mode: "flip" # (str) the method to do copy_paste augmentation (flip, mixup) auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix) erasing: 0.4 # (float) probability of random erasing during classification training (0-0.9), 0 means no erasing, must be less than 1.0. crop_fraction: 1.0 # (float) image crop fraction for classification (0.1-1), 1.0 means no crop, must be greater than 0. diff --git a/ultralytics/cfg/models/11/yolo11-cls.yaml b/ultralytics/cfg/models/11/yolo11-cls.yaml new file mode 100644 index 0000000000..ea21e7922f --- /dev/null +++ b/ultralytics/cfg/models/11/yolo11-cls.yaml @@ -0,0 +1,30 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLO11-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolo11n-cls.yaml' will call yolo11-cls.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.50, 0.25, 1024] # summary: 151 layers, 1633584 parameters, 1633584 gradients, 3.3 GFLOPs + s: [0.50, 0.50, 1024] # summary: 151 layers, 5545488 parameters, 5545488 gradients, 12.2 GFLOPs + m: [0.50, 1.00, 512] # summary: 187 layers, 10455696 parameters, 10455696 gradients, 39.7 GFLOPs + l: [1.00, 1.00, 512] # summary: 309 layers, 12937104 parameters, 12937104 gradients, 49.9 GFLOPs + x: [1.00, 1.50, 512] # summary: 309 layers, 28458544 parameters, 28458544 gradients, 111.1 GFLOPs + +# YOLO11n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 2, C3k2, [256, False, 0.25]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 2, C3k2, [512, False, 0.25]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 2, C3k2, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 2, C3k2, [1024, True]] + - [-1, 2, C2PSA, [1024]] # 9 + +# YOLO11n head +head: + - [-1, 1, Classify, [nc]] # Classify diff --git a/ultralytics/cfg/models/11/yolo11-obb.yaml b/ultralytics/cfg/models/11/yolo11-obb.yaml new file mode 100644 index 0000000000..5540ed753d --- /dev/null +++ b/ultralytics/cfg/models/11/yolo11-obb.yaml @@ -0,0 +1,47 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLO11 Oriented Bounding Boxes (OBB) model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/obb + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolo11n-obb.yaml' will call yolo11-obb.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.50, 0.25, 1024] # summary: 344 layers, 2695747 parameters, 2695731 gradients, 6.9 GFLOPs + s: [0.50, 0.50, 1024] # summary: 344 layers, 9744931 parameters, 9744915 gradients, 22.7 GFLOPs + m: [0.50, 1.00, 512] # summary: 434 layers, 20963523 parameters, 20963507 gradients, 72.2 GFLOPs + l: [1.00, 1.00, 512] # summary: 656 layers, 26220995 parameters, 26220979 gradients, 91.3 GFLOPs + x: [1.00, 1.50, 512] # summary: 656 layers, 58875331 parameters, 58875315 gradients, 204.3 GFLOPs + +# YOLO11n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 2, C3k2, [256, False, 0.25]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 2, C3k2, [512, False, 0.25]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 2, C3k2, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 2, C3k2, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 2, C2PSA, [1024]] # 10 + +# YOLO11n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 2, C3k2, [512, False]] # 13 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 13], 1, Concat, [1]] # cat head P4 + - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large) + + - [[16, 19, 22], 1, OBB, [nc, 1]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/11/yolo11-pose.yaml b/ultralytics/cfg/models/11/yolo11-pose.yaml new file mode 100644 index 0000000000..a744a33b6b --- /dev/null +++ b/ultralytics/cfg/models/11/yolo11-pose.yaml @@ -0,0 +1,48 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLO11-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose + +# Parameters +nc: 80 # number of classes +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +scales: # model compound scaling constants, i.e. 'model=yolo11n-pose.yaml' will call yolo11.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.50, 0.25, 1024] # summary: 344 layers, 2908507 parameters, 2908491 gradients, 7.7 GFLOPs + s: [0.50, 0.50, 1024] # summary: 344 layers, 9948811 parameters, 9948795 gradients, 23.5 GFLOPs + m: [0.50, 1.00, 512] # summary: 434 layers, 20973273 parameters, 20973257 gradients, 72.3 GFLOPs + l: [1.00, 1.00, 512] # summary: 656 layers, 26230745 parameters, 26230729 gradients, 91.4 GFLOPs + x: [1.00, 1.50, 512] # summary: 656 layers, 58889881 parameters, 58889865 gradients, 204.3 GFLOPs + +# YOLO11n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 2, C3k2, [256, False, 0.25]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 2, C3k2, [512, False, 0.25]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 2, C3k2, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 2, C3k2, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 2, C2PSA, [1024]] # 10 + +# YOLO11n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 2, C3k2, [512, False]] # 13 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 13], 1, Concat, [1]] # cat head P4 + - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large) + + - [[16, 19, 22], 1, Pose, [nc, kpt_shape]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/11/yolo11-seg.yaml b/ultralytics/cfg/models/11/yolo11-seg.yaml new file mode 100644 index 0000000000..0f02d96c06 --- /dev/null +++ b/ultralytics/cfg/models/11/yolo11-seg.yaml @@ -0,0 +1,47 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLO11-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolo11n-seg.yaml' will call yolo11-seg.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.50, 0.25, 1024] # summary: 355 layers, 2876848 parameters, 2876832 gradients, 10.5 GFLOPs + s: [0.50, 0.50, 1024] # summary: 355 layers, 10113248 parameters, 10113232 gradients, 35.8 GFLOPs + m: [0.50, 1.00, 512] # summary: 445 layers, 22420896 parameters, 22420880 gradients, 123.9 GFLOPs + l: [1.00, 1.00, 512] # summary: 667 layers, 27678368 parameters, 27678352 gradients, 143.0 GFLOPs + x: [1.00, 1.50, 512] # summary: 667 layers, 62142656 parameters, 62142640 gradients, 320.2 GFLOPs + +# YOLO11n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 2, C3k2, [256, False, 0.25]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 2, C3k2, [512, False, 0.25]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 2, C3k2, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 2, C3k2, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 2, C2PSA, [1024]] # 10 + +# YOLO11n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 2, C3k2, [512, False]] # 13 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 13], 1, Concat, [1]] # cat head P4 + - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large) + + - [[16, 19, 22], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/11/yolo11.yaml b/ultralytics/cfg/models/11/yolo11.yaml new file mode 100644 index 0000000000..8d06a12991 --- /dev/null +++ b/ultralytics/cfg/models/11/yolo11.yaml @@ -0,0 +1,47 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLO11 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolo11n.yaml' will call yolo11.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.50, 0.25, 1024] # summary: 319 layers, 2624080 parameters, 2624064 gradients, 6.6 GFLOPs + s: [0.50, 0.50, 1024] # summary: 319 layers, 9458752 parameters, 9458736 gradients, 21.7 GFLOPs + m: [0.50, 1.00, 512] # summary: 409 layers, 20114688 parameters, 20114672 gradients, 68.5 GFLOPs + l: [1.00, 1.00, 512] # summary: 631 layers, 25372160 parameters, 25372144 gradients, 87.6 GFLOPs + x: [1.00, 1.50, 512] # summary: 631 layers, 56966176 parameters, 56966160 gradients, 196.0 GFLOPs + +# YOLO11n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 2, C3k2, [256, False, 0.25]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 2, C3k2, [512, False, 0.25]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 2, C3k2, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 2, C3k2, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 2, C2PSA, [1024]] # 10 + +# YOLO11n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 2, C3k2, [512, False]] # 13 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 13], 1, Concat, [1]] # cat head P4 + - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large) + + - [[16, 19, 22], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/README.md b/ultralytics/cfg/models/README.md index bcaf8deda4..68a9238384 100644 --- a/ultralytics/cfg/models/README.md +++ b/ultralytics/cfg/models/README.md @@ -11,8 +11,8 @@ To get started, simply browse through the models in this directory and find one Model `*.yaml` files may be used directly in the [Command Line Interface (CLI)](https://docs.ultralytics.com/usage/cli/) with a `yolo` command: ```bash -# Train a YOLOv8n model using the coco8 dataset for 100 epochs -yolo task=detect mode=train model=yolov8n.yaml data=coco8.yaml epochs=100 +# Train a YOLO11n model using the coco8 dataset for 100 epochs +yolo task=detect mode=train model=yolo11n.yaml data=coco8.yaml epochs=100 ``` They may also be used directly in a Python environment, and accept the same [arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above: @@ -20,7 +20,7 @@ They may also be used directly in a Python environment, and accept the same [arg ```python from ultralytics import YOLO -# Initialize a YOLOv8n model from a YAML configuration file +# Initialize a YOLO11n model from a YAML configuration file model = YOLO("model.yaml") # If a pre-trained model is available, use it instead diff --git a/ultralytics/data/augment.py b/ultralytics/data/augment.py index a20f85a08a..49bdc92235 100644 --- a/ultralytics/data/augment.py +++ b/ultralytics/data/augment.py @@ -1628,92 +1628,105 @@ class LetterBox: return labels -class CopyPaste: +class CopyPaste(BaseMixTransform): """ - Implements Copy-Paste augmentation as described in https://arxiv.org/abs/2012.07177. + CopyPaste class for applying Copy-Paste augmentation to image datasets. - This class applies Copy-Paste augmentation on images and their corresponding instances. + This class implements the Copy-Paste augmentation technique as described in the paper "Simple Copy-Paste is a Strong + Data Augmentation Method for Instance Segmentation" (https://arxiv.org/abs/2012.07177). It combines objects from + different images to create new training samples. Attributes: - p (float): Probability of applying the Copy-Paste augmentation. Must be between 0 and 1. + dataset (Any): The dataset to which Copy-Paste augmentation will be applied. + pre_transform (Callable | None): Optional transform to apply before Copy-Paste. + p (float): Probability of applying Copy-Paste augmentation. Methods: - __call__: Applies Copy-Paste augmentation to given image and instances. + get_indexes: Returns a random index from the dataset. + _mix_transform: Applies Copy-Paste augmentation to the input labels. + __call__: Applies the Copy-Paste transformation to images and annotations. Examples: - >>> copypaste = CopyPaste(p=0.5) - >>> augmented_labels = copypaste(labels) - >>> augmented_image = augmented_labels["img"] + >>> from ultralytics.data.augment import CopyPaste + >>> dataset = YourDataset(...) # Your image dataset + >>> copypaste = CopyPaste(dataset, p=0.5) + >>> augmented_labels = copypaste(original_labels) """ - def __init__(self, p=0.5) -> None: - """ - Initializes the CopyPaste augmentation object. + def __init__(self, dataset=None, pre_transform=None, p=0.5, mode="flip") -> None: + """Initializes CopyPaste object with dataset, pre_transform, and probability of applying MixUp.""" + super().__init__(dataset=dataset, pre_transform=pre_transform, p=p) + assert mode in {"flip", "mixup"}, f"Expected `mode` to be `flip` or `mixup`, but got {mode}." + self.mode = mode - This class implements the Copy-Paste augmentation as described in the paper "Simple Copy-Paste is a Strong Data - Augmentation Method for Instance Segmentation" (https://arxiv.org/abs/2012.07177). It applies the Copy-Paste - augmentation on images and their corresponding instances with a given probability. + def get_indexes(self): + """Returns a list of random indexes from the dataset for CopyPaste augmentation.""" + return random.randint(0, len(self.dataset) - 1) - Args: - p (float): The probability of applying the Copy-Paste augmentation. Must be between 0 and 1. + def _mix_transform(self, labels): + """Applies Copy-Paste augmentation to combine objects from another image into the current image.""" + labels2 = labels["mix_labels"][0] + return self._transform(labels, labels2) - Attributes: - p (float): Stores the probability of applying the augmentation. + def __call__(self, labels): + """Applies Copy-Paste augmentation to an image and its labels.""" + if len(labels["instances"].segments) == 0 or self.p == 0: + return labels + if self.mode == "flip": + return self._transform(labels) - Examples: - >>> augment = CopyPaste(p=0.7) - >>> augmented_data = augment(original_data) - """ - self.p = p + # Get index of one or three other images + indexes = self.get_indexes() + if isinstance(indexes, int): + indexes = [indexes] - def __call__(self, labels): - """ - Applies Copy-Paste augmentation to an image and its instances. + # Get images information will be used for Mosaic or MixUp + mix_labels = [self.dataset.get_image_and_label(i) for i in indexes] - Args: - labels (Dict): A dictionary containing: - - 'img' (np.ndarray): The image to augment. - - 'cls' (np.ndarray): Class labels for the instances. - - 'instances' (ultralytics.engine.results.Instances): Object containing bounding boxes, segments, etc. + if self.pre_transform is not None: + for i, data in enumerate(mix_labels): + mix_labels[i] = self.pre_transform(data) + labels["mix_labels"] = mix_labels - Returns: - (Dict): Dictionary with augmented image and updated instances under 'img', 'cls', and 'instances' keys. + # Update cls and texts + labels = self._update_label_text(labels) + # Mosaic or MixUp + labels = self._mix_transform(labels) + labels.pop("mix_labels", None) + return labels - Examples: - >>> labels = {"img": np.random.rand(640, 640, 3), "cls": np.array([0, 1, 2]), "instances": Instances(...)} - >>> augmenter = CopyPaste(p=0.5) - >>> augmented_labels = augmenter(labels) - """ - im = labels["img"] - cls = labels["cls"] + def _transform(self, labels1, labels2={}): + """Applies Copy-Paste augmentation to combine objects from another image into the current image.""" + im = labels1["img"] + cls = labels1["cls"] h, w = im.shape[:2] - instances = labels.pop("instances") + instances = labels1.pop("instances") instances.convert_bbox(format="xyxy") instances.denormalize(w, h) - if self.p and len(instances.segments): - _, w, _ = im.shape # height, width, channels - im_new = np.zeros(im.shape, np.uint8) - - # Calculate ioa first then select indexes randomly - ins_flip = deepcopy(instances) - ins_flip.fliplr(w) - - ioa = bbox_ioa(ins_flip.bboxes, instances.bboxes) # intersection over area, (N, M) - indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) - n = len(indexes) - for j in random.sample(list(indexes), k=round(self.p * n)): - cls = np.concatenate((cls, cls[[j]]), axis=0) - instances = Instances.concatenate((instances, ins_flip[[j]]), axis=0) - cv2.drawContours(im_new, instances.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED) - - result = cv2.flip(im, 1) # augment segments (flip left-right) - i = cv2.flip(im_new, 1).astype(bool) - im[i] = result[i] - - labels["img"] = im - labels["cls"] = cls - labels["instances"] = instances - return labels + + im_new = np.zeros(im.shape, np.uint8) + instances2 = labels2.pop("instances", None) + if instances2 is None: + instances2 = deepcopy(instances) + instances2.fliplr(w) + ioa = bbox_ioa(instances2.bboxes, instances.bboxes) # intersection over area, (N, M) + indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) + n = len(indexes) + sorted_idx = np.argsort(ioa.max(1)[indexes]) + indexes = indexes[sorted_idx] + for j in indexes[: round(self.p * n)]: + cls = np.concatenate((cls, labels2.get("cls", cls)[[j]]), axis=0) + instances = Instances.concatenate((instances, instances2[[j]]), axis=0) + cv2.drawContours(im_new, instances2.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED) + + result = labels2.get("img", cv2.flip(im, 1)) # augment segments + i = im_new.astype(bool) + im[i] = result[i] + + labels1["img"] = im + labels1["cls"] = cls + labels1["instances"] = instances + return labels1 class Albumentations: @@ -2259,9 +2272,9 @@ class RandomLoadText: def v8_transforms(dataset, imgsz, hyp, stretch=False): """ - Applies a series of image transformations for YOLOv8 training. + Applies a series of image transformations for training. - This function creates a composition of image augmentation techniques to prepare images for YOLOv8 training. + This function creates a composition of image augmentation techniques to prepare images for YOLO training. It includes operations such as mosaic, copy-paste, random perspective, mixup, and various color adjustments. Args: @@ -2280,20 +2293,28 @@ def v8_transforms(dataset, imgsz, hyp, stretch=False): >>> transforms = v8_transforms(dataset, imgsz=640, hyp=hyp) >>> augmented_data = transforms(dataset[0]) """ - pre_transform = Compose( - [ - Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic), - CopyPaste(p=hyp.copy_paste), - RandomPerspective( - degrees=hyp.degrees, - translate=hyp.translate, - scale=hyp.scale, - shear=hyp.shear, - perspective=hyp.perspective, - pre_transform=None if stretch else LetterBox(new_shape=(imgsz, imgsz)), - ), - ] + mosaic = Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic) + affine = RandomPerspective( + degrees=hyp.degrees, + translate=hyp.translate, + scale=hyp.scale, + shear=hyp.shear, + perspective=hyp.perspective, + pre_transform=None if stretch else LetterBox(new_shape=(imgsz, imgsz)), ) + + pre_transform = Compose([mosaic, affine]) + if hyp.copy_paste_mode == "flip": + pre_transform.insert(1, CopyPaste(p=hyp.copy_paste, mode=hyp.copy_paste_mode)) + else: + pre_transform.append( + CopyPaste( + dataset, + pre_transform=Compose([Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic), affine]), + p=hyp.copy_paste, + mode=hyp.copy_paste_mode, + ) + ) flip_idx = dataset.data.get("flip_idx", []) # for keypoints augmentation if dataset.use_keypoints: kpt_shape = dataset.data.get("kpt_shape", None) diff --git a/ultralytics/engine/exporter.py b/ultralytics/engine/exporter.py index d4987f90be..73ee545f33 100644 --- a/ultralytics/engine/exporter.py +++ b/ultralytics/engine/exporter.py @@ -178,6 +178,16 @@ class Exporter: if fmt in {"mlmodel", "mlpackage", "mlprogram", "apple", "ios", "coreml"}: # 'coreml' aliases fmt = "coreml" fmts = tuple(export_formats()["Argument"][1:]) # available export formats + if fmt not in fmts: + import difflib + + # Get the closest match if format is invalid + matches = difflib.get_close_matches(fmt, fmts, n=1, cutoff=0.6) # 60% similarity required to match + if matches: + LOGGER.warning(f"WARNING ⚠️ Invalid export format='{fmt}', updating to format='{matches[0]}'") + fmt = matches[0] + else: + raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}") flags = [x == fmt for x in fmts] if sum(flags) != 1: raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}") diff --git a/ultralytics/engine/model.py b/ultralytics/engine/model.py index 6775188915..c4db53426a 100644 --- a/ultralytics/engine/model.py +++ b/ultralytics/engine/model.py @@ -127,7 +127,7 @@ class Model(nn.Module): # Check if Ultralytics HUB model from https://hub.ultralytics.com if self.is_hub_model(model): # Fetch model from HUB - checks.check_requirements("hub-sdk>=0.0.8") + checks.check_requirements("hub-sdk>=0.0.12") session = HUBTrainingSession.create_session(model) model = session.model_file if session.train_args: # training sent from HUB @@ -377,7 +377,7 @@ class Model(nn.Module): self.model.load(weights) return self - def save(self, filename: Union[str, Path] = "saved_model.pt", use_dill=True) -> None: + def save(self, filename: Union[str, Path] = "saved_model.pt") -> None: """ Saves the current model state to a file. @@ -386,7 +386,6 @@ class Model(nn.Module): Args: filename (Union[str, Path]): The name of the file to save the model to. - use_dill (bool): Whether to try using dill for serialization if available. Raises: AssertionError: If the model is not a PyTorch model. @@ -408,7 +407,7 @@ class Model(nn.Module): "license": "AGPL-3.0 License (https://ultralytics.com/license)", "docs": "https://docs.ultralytics.com", } - torch.save({**self.ckpt, **updates}, filename, use_dill=use_dill) + torch.save({**self.ckpt, **updates}, filename) def info(self, detailed: bool = False, verbose: bool = True): """ diff --git a/ultralytics/engine/trainer.py b/ultralytics/engine/trainer.py index ae98540b03..9fcc697040 100644 --- a/ultralytics/engine/trainer.py +++ b/ultralytics/engine/trainer.py @@ -12,7 +12,7 @@ import os import subprocess import time import warnings -from copy import deepcopy +from copy import copy, deepcopy from datetime import datetime, timedelta from pathlib import Path @@ -538,6 +538,8 @@ class BaseTrainer: self.best.write_bytes(serialized_ckpt) # save best.pt if (self.save_period > 0) and (self.epoch % self.save_period == 0): (self.wdir / f"epoch{self.epoch}.pt").write_bytes(serialized_ckpt) # save epoch, i.e. 'epoch3.pt' + # if self.args.close_mosaic and self.epoch == (self.epochs - self.args.close_mosaic - 1): + # (self.wdir / "last_mosaic.pt").write_bytes(serialized_ckpt) # save mosaic checkpoint def get_dataset(self): """ @@ -698,7 +700,12 @@ class BaseTrainer: resume = True self.args = get_cfg(ckpt_args) self.args.model = self.args.resume = str(last) # reinstate model - for k in "imgsz", "batch", "device": # allow arg updates to reduce memory or update device on resume + for k in ( + "imgsz", + "batch", + "device", + "close_mosaic", + ): # allow arg updates to reduce memory or update device on resume if k in overrides: setattr(self.args, k, overrides[k]) @@ -742,7 +749,7 @@ class BaseTrainer: self.train_loader.dataset.mosaic = False if hasattr(self.train_loader.dataset, "close_mosaic"): LOGGER.info("Closing dataloader mosaic") - self.train_loader.dataset.close_mosaic(hyp=self.args) + self.train_loader.dataset.close_mosaic(hyp=copy(self.args)) def build_optimizer(self, model, name="auto", lr=0.001, momentum=0.9, decay=1e-5, iterations=1e5): """ diff --git a/ultralytics/hub/__init__.py b/ultralytics/hub/__init__.py index 33b0c3748d..9c9c9dfa16 100644 --- a/ultralytics/hub/__init__.py +++ b/ultralytics/hub/__init__.py @@ -38,7 +38,7 @@ def login(api_key: str = None, save=True) -> bool: Returns: (bool): True if authentication is successful, False otherwise. """ - checks.check_requirements("hub-sdk>=0.0.8") + checks.check_requirements("hub-sdk>=0.0.12") from hub_sdk import HUBClient api_key_url = f"{HUB_WEB_ROOT}/settings?tab=api+keys" # set the redirect URL diff --git a/ultralytics/hub/session.py b/ultralytics/hub/session.py index 170bb82b67..89b5ddfc1e 100644 --- a/ultralytics/hub/session.py +++ b/ultralytics/hub/session.py @@ -63,22 +63,24 @@ class HUBTrainingSession: # Initialize client self.client = HUBClient(credentials) - # Load models if authenticated - if self.client.authenticated: + # Load models + try: if model_id: self.load_model(model_id) # load existing model else: self.model = self.client.model() # load empty model + except Exception: + if identifier.startswith(f"{HUB_WEB_ROOT}/models/") and not self.client.authenticated: + LOGGER.warning( + f"{PREFIX}WARNING ⚠️ Please log in using 'yolo login API_KEY'. " + "You can find your API Key at: https://hub.ultralytics.com/settings?tab=api+keys." + ) @classmethod def create_session(cls, identifier, args=None): """Class method to create an authenticated HUBTrainingSession or return None.""" try: session = cls(identifier) - if not session.client.authenticated: - if identifier.startswith(f"{HUB_WEB_ROOT}/models/"): - LOGGER.warning(f"{PREFIX}WARNING ⚠️ Login to Ultralytics HUB with 'yolo hub login API_KEY'.") - return None if args and not identifier.startswith(f"{HUB_WEB_ROOT}/models/"): # not a HUB model URL session.create_model(args) assert session.model.id, "HUB model not loaded correctly" diff --git a/ultralytics/hub/utils.py b/ultralytics/hub/utils.py index 2fc956fb34..469a1c37e7 100644 --- a/ultralytics/hub/utils.py +++ b/ultralytics/hub/utils.py @@ -170,7 +170,7 @@ def smart_request(method, url, retry=3, timeout=30, thread=True, code=-1, verbos class Events: """ A class for collecting anonymous event analytics. Event analytics are enabled when sync=True in settings and - disabled when sync=False. Run 'yolo settings' to see and update settings YAML file. + disabled when sync=False. Run 'yolo settings' to see and update settings. Attributes: url (str): The URL to send anonymous events. diff --git a/ultralytics/models/sam/build.py b/ultralytics/models/sam/build.py index 0e7ddedcf0..e110531244 100644 --- a/ultralytics/models/sam/build.py +++ b/ultralytics/models/sam/build.py @@ -210,8 +210,6 @@ def _build_sam( state_dict = torch.load(f) sam.load_state_dict(state_dict) sam.eval() - # sam.load_state_dict(torch.load(checkpoint), strict=True) - # sam.eval() return sam diff --git a/ultralytics/models/sam/modules/sam.py b/ultralytics/models/sam/modules/sam.py index c902153f17..2728b0b481 100644 --- a/ultralytics/models/sam/modules/sam.py +++ b/ultralytics/models/sam/modules/sam.py @@ -645,9 +645,7 @@ class SAM2Model(torch.nn.Module): # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images. # In this case, we skip the fusion with any memory. if self.num_maskmem == 0: # Disable memory and skip fusion - pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) - return pix_feat - + return current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) num_obj_ptr_tokens = 0 # Step 1: condition the visual features of the current frame on previous memories if not is_init_cond_frame: diff --git a/ultralytics/models/sam/predict.py b/ultralytics/models/sam/predict.py index 686ef70c63..768d63d8f1 100644 --- a/ultralytics/models/sam/predict.py +++ b/ultralytics/models/sam/predict.py @@ -196,6 +196,7 @@ class Predictor(BasePredictor): bboxes = self.prompts.pop("bboxes", bboxes) points = self.prompts.pop("points", points) masks = self.prompts.pop("masks", masks) + labels = self.prompts.pop("labels", labels) if all(i is None for i in [bboxes, points, masks]): return self.generate(im, *args, **kwargs) diff --git a/ultralytics/nn/modules/__init__.py b/ultralytics/nn/modules/__init__.py index 2071a2895a..a840c5a71a 100644 --- a/ultralytics/nn/modules/__init__.py +++ b/ultralytics/nn/modules/__init__.py @@ -20,6 +20,7 @@ Example: from .block import ( C1, C2, + C2PSA, C3, C3TR, CIB, @@ -38,7 +39,9 @@ from .block import ( C2f, C2fAttn, C2fCIB, + C2fPSA, C3Ghost, + C3k2, C3x, CBFuse, CBLinear, @@ -110,6 +113,10 @@ __all__ = ( "C2", "C3", "C2f", + "C3k2", + "SCDown", + "C2fPSA", + "C2PSA", "C2fAttn", "C3x", "C3TR", @@ -149,5 +156,4 @@ __all__ = ( "C2fCIB", "Attention", "PSA", - "SCDown", ) diff --git a/ultralytics/nn/modules/block.py b/ultralytics/nn/modules/block.py index 07be2b8845..7208ea639b 100644 --- a/ultralytics/nn/modules/block.py +++ b/ultralytics/nn/modules/block.py @@ -40,6 +40,9 @@ __all__ = ( "SPPELAN", "CBFuse", "CBLinear", + "C3k2", + "C2fPSA", + "C2PSA", "RepVGGDW", "CIB", "C2fCIB", @@ -696,6 +699,49 @@ class CBFuse(nn.Module): return torch.sum(torch.stack(res + xs[-1:]), dim=0) +class C3f(nn.Module): + """Faster Implementation of CSP Bottleneck with 2 convolutions.""" + + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): + """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups, + expansion. + """ + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv((2 + n) * c_, c2, 1) # optional act=FReLU(c2) + self.m = nn.ModuleList(Bottleneck(c_, c_, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)) + + def forward(self, x): + """Forward pass through C2f layer.""" + y = [self.cv2(x), self.cv1(x)] + y.extend(m(y[-1]) for m in self.m) + return self.cv3(torch.cat(y, 1)) + + +class C3k2(C2f): + """Faster Implementation of CSP Bottleneck with 2 convolutions.""" + + def __init__(self, c1, c2, n=1, c3k=False, e=0.5, g=1, shortcut=True): + """Initializes the C3k2 module, a faster CSP Bottleneck with 2 convolutions and optional C3k blocks.""" + super().__init__(c1, c2, n, shortcut, g, e) + self.m = nn.ModuleList( + C3k(self.c, self.c, 2, shortcut, g) if c3k else Bottleneck(self.c, self.c, shortcut, g) for _ in range(n) + ) + + +class C3k(C3): + """C3k is a CSP bottleneck module with customizable kernel sizes for feature extraction in neural networks.""" + + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, k=3): + """Initializes the C3k module with specified channels, number of layers, and configurations.""" + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + # self.m = nn.Sequential(*(RepBottleneck(c_, c_, shortcut, g, k=(k, k), e=1.0) for _ in range(n))) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=(k, k), e=1.0) for _ in range(n))) + + class RepVGGDW(torch.nn.Module): """RepVGGDW is a class that represents a depth wise separable convolutional block in RepVGG architecture.""" @@ -873,25 +919,69 @@ class Attention(nn.Module): return x +class PSABlock(nn.Module): + """ + PSABlock class implementing a Position-Sensitive Attention block for neural networks. + + This class encapsulates the functionality for applying multi-head attention and feed-forward neural network layers + with optional shortcut connections. + + Attributes: + attn (Attention): Multi-head attention module. + ffn (nn.Sequential): Feed-forward neural network module. + add (bool): Flag indicating whether to add shortcut connections. + + Methods: + forward: Performs a forward pass through the PSABlock, applying attention and feed-forward layers. + + Examples: + Create a PSABlock and perform a forward pass + >>> psablock = PSABlock(c=128, attn_ratio=0.5, num_heads=4, shortcut=True) + >>> input_tensor = torch.randn(1, 128, 32, 32) + >>> output_tensor = psablock(input_tensor) + """ + + def __init__(self, c, attn_ratio=0.5, num_heads=4, shortcut=True) -> None: + """Initializes the PSABlock with attention and feed-forward layers for enhanced feature extraction.""" + super().__init__() + + self.attn = Attention(c, attn_ratio=attn_ratio, num_heads=num_heads) + self.ffn = nn.Sequential(Conv(c, c * 2, 1), Conv(c * 2, c, 1, act=False)) + self.add = shortcut + + def forward(self, x): + """Executes a forward pass through PSABlock, applying attention and feed-forward layers to the input tensor.""" + x = x + self.attn(x) if self.add else self.attn(x) + x = x + self.ffn(x) if self.add else self.ffn(x) + return x + + class PSA(nn.Module): """ - Position-wise Spatial Attention module. + PSA class for implementing Position-Sensitive Attention in neural networks. - Args: - c1 (int): Number of input channels. - c2 (int): Number of output channels. - e (float): Expansion factor for the intermediate channels. Default is 0.5. + This class encapsulates the functionality for applying position-sensitive attention and feed-forward networks to + input tensors, enhancing feature extraction and processing capabilities. Attributes: - c (int): Number of intermediate channels. + c (int): Number of hidden channels after applying the initial convolution. cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c. cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c. - attn (Attention): Attention module for spatial attention. - ffn (nn.Sequential): Feed-forward network module. + attn (Attention): Attention module for position-sensitive attention. + ffn (nn.Sequential): Feed-forward network for further processing. + + Methods: + forward: Applies position-sensitive attention and feed-forward network to the input tensor. + + Examples: + Create a PSA module and apply it to an input tensor + >>> psa = PSA(c1=128, c2=128, e=0.5) + >>> input_tensor = torch.randn(1, 128, 64, 64) + >>> output_tensor = psa.forward(input_tensor) """ def __init__(self, c1, c2, e=0.5): - """Initializes convolution layers, attention module, and feed-forward network with channel reduction.""" + """Initializes the PSA module with input/output channels and attention mechanism for feature extraction.""" super().__init__() assert c1 == c2 self.c = int(c1 * e) @@ -902,46 +992,117 @@ class PSA(nn.Module): self.ffn = nn.Sequential(Conv(self.c, self.c * 2, 1), Conv(self.c * 2, self.c, 1, act=False)) def forward(self, x): - """ - Forward pass of the PSA module. - - Args: - x (torch.Tensor): Input tensor. - - Returns: - (torch.Tensor): Output tensor. - """ + """Executes forward pass in PSA module, applying attention and feed-forward layers to the input tensor.""" a, b = self.cv1(x).split((self.c, self.c), dim=1) b = b + self.attn(b) b = b + self.ffn(b) return self.cv2(torch.cat((a, b), 1)) +class C2PSA(nn.Module): + """ + C2PSA module with attention mechanism for enhanced feature extraction and processing. + + This module implements a convolutional block with attention mechanisms to enhance feature extraction and processing + capabilities. It includes a series of PSABlock modules for self-attention and feed-forward operations. + + Attributes: + c (int): Number of hidden channels. + cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c. + cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c. + m (nn.Sequential): Sequential container of PSABlock modules for attention and feed-forward operations. + + Methods: + forward: Performs a forward pass through the C2PSA module, applying attention and feed-forward operations. + + Notes: + This module essentially is the same as PSA module, but refactored to allow stacking more PSABlock modules. + + Examples: + >>> c2psa = C2PSA(c1=256, c2=256, n=3, e=0.5) + >>> input_tensor = torch.randn(1, 256, 64, 64) + >>> output_tensor = c2psa(input_tensor) + """ + + def __init__(self, c1, c2, n=1, e=0.5): + """Initializes the C2PSA module with specified input/output channels, number of layers, and expansion ratio.""" + super().__init__() + assert c1 == c2 + self.c = int(c1 * e) + self.cv1 = Conv(c1, 2 * self.c, 1, 1) + self.cv2 = Conv(2 * self.c, c1, 1) + + self.m = nn.Sequential(*(PSABlock(self.c, attn_ratio=0.5, num_heads=self.c // 64) for _ in range(n))) + + def forward(self, x): + """Processes the input tensor 'x' through a series of PSA blocks and returns the transformed tensor.""" + a, b = self.cv1(x).split((self.c, self.c), dim=1) + b = self.m(b) + return self.cv2(torch.cat((a, b), 1)) + + +class C2fPSA(C2f): + """ + C2fPSA module with enhanced feature extraction using PSA blocks. + + This class extends the C2f module by incorporating PSA blocks for improved attention mechanisms and feature extraction. + + Attributes: + c (int): Number of hidden channels. + cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c. + cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c. + m (nn.ModuleList): List of PSA blocks for feature extraction. + + Methods: + forward: Performs a forward pass through the C2fPSA module. + forward_split: Performs a forward pass using split() instead of chunk(). + + Examples: + >>> import torch + >>> from ultralytics.models.common import C2fPSA + >>> model = C2fPSA(c1=64, c2=64, n=3, e=0.5) + >>> x = torch.randn(1, 64, 128, 128) + >>> output = model(x) + >>> print(output.shape) + """ + + def __init__(self, c1, c2, n=1, e=0.5): + """Initializes the C2fPSA module, a variant of C2f with PSA blocks for enhanced feature extraction.""" + assert c1 == c2 + super().__init__(c1, c2, n=n, e=e) + self.m = nn.ModuleList(PSABlock(self.c, attn_ratio=0.5, num_heads=self.c // 64) for _ in range(n)) + + class SCDown(nn.Module): - """Spatial Channel Downsample (SCDown) module for reducing spatial and channel dimensions.""" + """ + SCDown module for downsampling with separable convolutions. - def __init__(self, c1, c2, k, s): - """ - Spatial Channel Downsample (SCDown) module. + This module performs downsampling using a combination of pointwise and depthwise convolutions, which helps in + efficiently reducing the spatial dimensions of the input tensor while maintaining the channel information. - Args: - c1 (int): Number of input channels. - c2 (int): Number of output channels. - k (int): Kernel size for the convolutional layer. - s (int): Stride for the convolutional layer. - """ + Attributes: + cv1 (Conv): Pointwise convolution layer that reduces the number of channels. + cv2 (Conv): Depthwise convolution layer that performs spatial downsampling. + + Methods: + forward: Applies the SCDown module to the input tensor. + + Examples: + >>> import torch + >>> from ultralytics import SCDown + >>> model = SCDown(c1=64, c2=128, k=3, s=2) + >>> x = torch.randn(1, 64, 128, 128) + >>> y = model(x) + >>> print(y.shape) + torch.Size([1, 128, 64, 64]) + """ + + def __init__(self, c1, c2, k, s): + """Initializes the SCDown module with specified input/output channels, kernel size, and stride.""" super().__init__() self.cv1 = Conv(c1, c2, 1, 1) self.cv2 = Conv(c2, c2, k=k, s=s, g=c2, act=False) def forward(self, x): - """ - Forward pass of the SCDown module. - - Args: - x (torch.Tensor): Input tensor. - - Returns: - (torch.Tensor): Output tensor after applying the SCDown module. - """ + """Applies convolution and downsampling to the input tensor in the SCDown module.""" return self.cv2(self.cv1(x)) diff --git a/ultralytics/nn/modules/conv.py b/ultralytics/nn/modules/conv.py index 2d9c7c0679..aaa70f5745 100644 --- a/ultralytics/nn/modules/conv.py +++ b/ultralytics/nn/modules/conv.py @@ -209,7 +209,8 @@ class RepConv(nn.Module): kernelid, biasid = self._fuse_bn_tensor(self.bn) return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid - def _pad_1x1_to_3x3_tensor(self, kernel1x1): + @staticmethod + def _pad_1x1_to_3x3_tensor(kernel1x1): """Pads a 1x1 tensor to a 3x3 tensor.""" if kernel1x1 is None: return 0 diff --git a/ultralytics/nn/modules/head.py b/ultralytics/nn/modules/head.py index 1a02e2b258..5f2931777e 100644 --- a/ultralytics/nn/modules/head.py +++ b/ultralytics/nn/modules/head.py @@ -11,7 +11,7 @@ from torch.nn.init import constant_, xavier_uniform_ from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors from .block import DFL, BNContrastiveHead, ContrastiveHead, Proto -from .conv import Conv +from .conv import Conv, DWConv from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer from .utils import bias_init_with_prob, linear_init @@ -41,7 +41,14 @@ class Detect(nn.Module): self.cv2 = nn.ModuleList( nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch ) - self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch) + self.cv3 = nn.ModuleList( + nn.Sequential( + nn.Sequential(DWConv(x, x, 3), Conv(x, c3, 1)), + nn.Sequential(DWConv(c3, c3, 3), Conv(c3, c3, 1)), + nn.Conv2d(c3, self.nc, 1), + ) + for x in ch + ) self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity() if self.end2end: diff --git a/ultralytics/nn/tasks.py b/ultralytics/nn/tasks.py index ad860e93e4..4ae6dba72e 100644 --- a/ultralytics/nn/tasks.py +++ b/ultralytics/nn/tasks.py @@ -13,6 +13,7 @@ from ultralytics.nn.modules import ( AIFI, C1, C2, + C2PSA, C3, C3TR, ELAN1, @@ -28,7 +29,9 @@ from ultralytics.nn.modules import ( C2f, C2fAttn, C2fCIB, + C2fPSA, C3Ghost, + C3k2, C3x, CBFuse, CBLinear, @@ -968,12 +971,15 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3) GhostBottleneck, SPP, SPPF, + C2fPSA, + C2PSA, DWConv, Focus, BottleneckCSP, C1, C2, C2f, + C3k2, RepNCSPELAN4, ELAN1, ADown, @@ -1001,9 +1007,26 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3) ) # num heads args = [c1, c2, *args[1:]] - if m in {BottleneckCSP, C1, C2, C2f, C2fAttn, C3, C3TR, C3Ghost, C3x, RepC3, C2fCIB}: + if m in { + BottleneckCSP, + C1, + C2, + C2f, + C3k2, + C2fAttn, + C3, + C3TR, + C3Ghost, + C3x, + RepC3, + C2fPSA, + C2fCIB, + C2PSA, + }: args.insert(2, n) # number of repeats n = 1 + if m is C3k2 and scale in "mlx": # for M/L/X sizes + args[3] = True elif m is AIFI: args = [ch[f], *args] elif m in {HGStem, HGBlock}: @@ -1080,7 +1103,7 @@ def guess_model_scale(model_path): with contextlib.suppress(AttributeError): import re - return re.search(r"yolov\d+([nslmx])", Path(model_path).stem).group(1) # n, s, m, l, or x + return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # n, s, m, l, or x return "" diff --git a/ultralytics/solutions/object_counter.py b/ultralytics/solutions/object_counter.py index 398629a8bc..cc7fe45946 100644 --- a/ultralytics/solutions/object_counter.py +++ b/ultralytics/solutions/object_counter.py @@ -176,22 +176,24 @@ class ObjectCounter: # Count objects using line elif len(self.reg_pts) == 2: - if prev_position is not None and track_id not in self.count_ids: - # Check if the object's movement segment intersects the counting line - if LineString([(prev_position[0], prev_position[1]), (box[0], box[1])]).intersects( + if ( + prev_position is not None + and track_id not in self.count_ids + and LineString([(prev_position[0], prev_position[1]), (box[0], box[1])]).intersects( self.counting_line_segment - ): - self.count_ids.append(track_id) - - # Determine the direction of movement (IN or OUT) - dx = (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) - dy = (box[1] - prev_position[1]) * (self.counting_region.centroid.y - prev_position[1]) - if dx > 0 and dy > 0: - self.in_counts += 1 - self.class_wise_count[self.names[cls]]["IN"] += 1 - else: - self.out_counts += 1 - self.class_wise_count[self.names[cls]]["OUT"] += 1 + ) + ): + self.count_ids.append(track_id) + + # Determine the direction of movement (IN or OUT) + dx = (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) + dy = (box[1] - prev_position[1]) * (self.counting_region.centroid.y - prev_position[1]) + if dx > 0 and dy > 0: + self.in_counts += 1 + self.class_wise_count[self.names[cls]]["IN"] += 1 + else: + self.out_counts += 1 + self.class_wise_count[self.names[cls]]["OUT"] += 1 labels_dict = {} diff --git a/ultralytics/solutions/parking_management.py b/ultralytics/solutions/parking_management.py index 6128493377..ef58ad6274 100644 --- a/ultralytics/solutions/parking_management.py +++ b/ultralytics/solutions/parking_management.py @@ -128,14 +128,13 @@ class ParkingPtsSelection: rg_data = [] # regions data for box in self.rg_data: - rs_box = [] # rescaled box list - for x, y in box: - rs_box.append( - ( - int(x * self.imgw / self.canvas.winfo_width()), # width scaling - int(y * self.imgh / self.canvas.winfo_height()), - ) - ) # height scaling + rs_box = [ + ( + int(x * self.imgw / self.canvas.winfo_width()), # width scaling + int(y * self.imgh / self.canvas.winfo_height()), # height scaling + ) + for x, y in box + ] rg_data.append({"points": rs_box}) with open("bounding_boxes.json", "w") as f: json.dump(rg_data, f, indent=4) diff --git a/ultralytics/solutions/streamlit_inference.py b/ultralytics/solutions/streamlit_inference.py index 85394350da..ea85cffba3 100644 --- a/ultralytics/solutions/streamlit_inference.py +++ b/ultralytics/solutions/streamlit_inference.py @@ -23,13 +23,13 @@ def inference(model=None): # Main title of streamlit application main_title_cfg = """

- Ultralytics YOLOv8 Streamlit Application + Ultralytics YOLO Streamlit Application

""" # Subtitle of streamlit application sub_title_cfg = """

- Experience real-time object detection on your webcam with the power of Ultralytics YOLOv8! 🚀

+ Experience real-time object detection on your webcam with the power of Ultralytics YOLO! 🚀
""" # Set html page configuration @@ -67,7 +67,7 @@ def inference(model=None): vid_file_name = 0 # Add dropdown menu for model selection - available_models = [x.replace("yolo", "YOLO") for x in GITHUB_ASSETS_STEMS if x.startswith("yolov8")] + available_models = [x.replace("yolo", "YOLO") for x in GITHUB_ASSETS_STEMS if x.startswith("yolo11")] if model: available_models.insert(0, model.split(".pt")[0]) # insert model without suffix as *.pt is added later diff --git a/ultralytics/trackers/README.md b/ultralytics/trackers/README.md index d7bc855814..3743d5374c 100644 --- a/ultralytics/trackers/README.md +++ b/ultralytics/trackers/README.md @@ -13,7 +13,7 @@ The output from Ultralytics trackers is consistent with standard object detectio - **Ease of Use:** Simple Python API and CLI options for quick integration and deployment. - **Customizability:** Easy to use with custom trained YOLO models, allowing integration into domain-specific applications. -**Video Tutorial:** [Object Detection and Tracking with Ultralytics YOLOv8](https://www.youtube.com/embed/hHyHmOtmEgs?si=VNZtXmm45Nb9s-N-). +**Video Tutorial:** [Object Detection and Tracking with Ultralytics YOLO](https://www.youtube.com/embed/hHyHmOtmEgs?si=VNZtXmm45Nb9s-N-). ## Features at a Glance @@ -34,7 +34,7 @@ The default tracker is BoT-SORT. ## Tracking -To run the tracker on video streams, use a trained Detect, Segment or Pose model such as YOLOv8n, YOLOv8n-seg and YOLOv8n-pose. +To run the tracker on video streams, use a trained Detect, Segment or Pose model such as YOLO11n, YOLO11n-seg and YOLO11n-pose. #### Python @@ -42,9 +42,9 @@ To run the tracker on video streams, use a trained Detect, Segment or Pose model from ultralytics import YOLO # Load an official or custom model -model = YOLO("yolov8n.pt") # Load an official Detect model -model = YOLO("yolov8n-seg.pt") # Load an official Segment model -model = YOLO("yolov8n-pose.pt") # Load an official Pose model +model = YOLO("yolo11n.pt") # Load an official Detect model +model = YOLO("yolo11n-seg.pt") # Load an official Segment model +model = YOLO("yolo11n-pose.pt") # Load an official Pose model model = YOLO("path/to/best.pt") # Load a custom trained model # Perform tracking with the model @@ -58,9 +58,9 @@ results = model.track( ```bash # Perform tracking with various models using the command line interface -yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" # Official Detect model -yolo track model=yolov8n-seg.pt source="https://youtu.be/LNwODJXcvt4" # Official Segment model -yolo track model=yolov8n-pose.pt source="https://youtu.be/LNwODJXcvt4" # Official Pose model +yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" # Official Detect model +yolo track model=yolo11n-seg.pt source="https://youtu.be/LNwODJXcvt4" # Official Segment model +yolo track model=yolo11n-pose.pt source="https://youtu.be/LNwODJXcvt4" # Official Pose model yolo track model=path/to/best.pt source="https://youtu.be/LNwODJXcvt4" # Custom trained model # Track using ByteTrack tracker @@ -81,7 +81,7 @@ Tracking configuration shares properties with Predict mode, such as `conf`, `iou from ultralytics import YOLO # Configure the tracking parameters and run the tracker -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") results = model.track(source="https://youtu.be/LNwODJXcvt4", conf=0.3, iou=0.5, show=True) ``` @@ -89,7 +89,7 @@ results = model.track(source="https://youtu.be/LNwODJXcvt4", conf=0.3, iou=0.5, ```bash # Configure tracking parameters and run the tracker using the command line interface -yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3, iou=0.5 show +yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3, iou=0.5 show ``` ### Tracker Selection @@ -102,7 +102,7 @@ Ultralytics also allows you to use a modified tracker configuration file. To do from ultralytics import YOLO # Load the model and run the tracker with a custom configuration file -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") results = model.track(source="https://youtu.be/LNwODJXcvt4", tracker="custom_tracker.yaml") ``` @@ -110,7 +110,7 @@ results = model.track(source="https://youtu.be/LNwODJXcvt4", tracker="custom_tra ```bash # Load the model and run the tracker with a custom configuration file using the command line interface -yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" tracker='custom_tracker.yaml' +yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" tracker='custom_tracker.yaml' ``` For a comprehensive list of tracking arguments, refer to the [ultralytics/cfg/trackers](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/trackers) page. @@ -119,7 +119,7 @@ For a comprehensive list of tracking arguments, refer to the [ultralytics/cfg/tr ### Persisting Tracks Loop -Here is a Python script using OpenCV (`cv2`) and YOLOv8 to run object tracking on video frames. This script still assumes you have already installed the necessary packages (`opencv-python` and `ultralytics`). The `persist=True` argument tells the tracker than the current image or frame is the next in a sequence and to expect tracks from the previous image in the current image. +Here is a Python script using OpenCV (`cv2`) and YOLO11 to run object tracking on video frames. This script still assumes you have already installed the necessary packages (`opencv-python` and `ultralytics`). The `persist=True` argument tells the tracker than the current image or frame is the next in a sequence and to expect tracks from the previous image in the current image. #### Python @@ -128,8 +128,8 @@ import cv2 from ultralytics import YOLO -# Load the YOLOv8 model -model = YOLO("yolov8n.pt") +# Load the YOLO11 model +model = YOLO("yolo11n.pt") # Open the video file video_path = "path/to/video.mp4" @@ -141,14 +141,14 @@ while cap.isOpened(): success, frame = cap.read() if success: - # Run YOLOv8 tracking on the frame, persisting tracks between frames + # Run YOLO11 tracking on the frame, persisting tracks between frames results = model.track(frame, persist=True) # Visualize the results on the frame annotated_frame = results[0].plot() # Display the annotated frame - cv2.imshow("YOLOv8 Tracking", annotated_frame) + cv2.imshow("YOLO11 Tracking", annotated_frame) # Break the loop if 'q' is pressed if cv2.waitKey(1) & 0xFF == ord("q"): @@ -166,9 +166,9 @@ Please note the change from `model(frame)` to `model.track(frame)`, which enable ### Plotting Tracks Over Time -Visualizing object tracks over consecutive frames can provide valuable insights into the movement patterns and behavior of detected objects within a video. With Ultralytics YOLOv8, plotting these tracks is a seamless and efficient process. +Visualizing object tracks over consecutive frames can provide valuable insights into the movement patterns and behavior of detected objects within a video. With Ultralytics YOLO11, plotting these tracks is a seamless and efficient process. -In the following example, we demonstrate how to utilize YOLOv8's tracking capabilities to plot the movement of detected objects across multiple video frames. This script involves opening a video file, reading it frame by frame, and utilizing the YOLO model to identify and track various objects. By retaining the center points of the detected bounding boxes and connecting them, we can draw lines that represent the paths followed by the tracked objects. +In the following example, we demonstrate how to utilize YOLO11's tracking capabilities to plot the movement of detected objects across multiple video frames. This script involves opening a video file, reading it frame by frame, and utilizing the YOLO model to identify and track various objects. By retaining the center points of the detected bounding boxes and connecting them, we can draw lines that represent the paths followed by the tracked objects. #### Python @@ -180,8 +180,8 @@ import numpy as np from ultralytics import YOLO -# Load the YOLOv8 model -model = YOLO("yolov8n.pt") +# Load the YOLO11 model +model = YOLO("yolo11n.pt") # Open the video file video_path = "path/to/video.mp4" @@ -196,7 +196,7 @@ while cap.isOpened(): success, frame = cap.read() if success: - # Run YOLOv8 tracking on the frame, persisting tracks between frames + # Run YOLO11 tracking on the frame, persisting tracks between frames results = model.track(frame, persist=True) # Get the boxes and track IDs @@ -225,7 +225,7 @@ while cap.isOpened(): ) # Display the annotated frame - cv2.imshow("YOLOv8 Tracking", annotated_frame) + cv2.imshow("YOLO11 Tracking", annotated_frame) # Break the loop if 'q' is pressed if cv2.waitKey(1) & 0xFF == ord("q"): @@ -247,7 +247,7 @@ In the provided Python script, we make use of Python's `threading` module to run To ensure that each thread receives the correct parameters (the video file and the model to use), we define a function `run_tracker_in_thread` that accepts these parameters and contains the main tracking loop. This function reads the video frame by frame, runs the tracker, and displays the results. -Two different models are used in this example: `yolov8n.pt` and `yolov8n-seg.pt`, each tracking objects in a different video file. The video files are specified in `video_file1` and `video_file2`. +Two different models are used in this example: `yolo11n.pt` and `yolo11n-seg.pt`, each tracking objects in a different video file. The video files are specified in `video_file1` and `video_file2`. The `daemon=True` parameter in `threading.Thread` means that these threads will be closed as soon as the main program finishes. We then start the threads with `start()` and use `join()` to make the main thread wait until both tracker threads have finished. @@ -278,8 +278,8 @@ def run_tracker_in_thread(filename, model): # Load the models -model1 = YOLO("yolov8n.pt") -model2 = YOLO("yolov8n-seg.pt") +model1 = YOLO("yolo11n.pt") +model2 = YOLO("yolo11n-seg.pt") # Define the video files for the trackers video_file1 = "path/to/video1.mp4" diff --git a/ultralytics/utils/__init__.py b/ultralytics/utils/__init__.py index b4cc312d85..a7ab5d905c 100644 --- a/ultralytics/utils/__init__.py +++ b/ultralytics/utils/__init__.py @@ -111,7 +111,7 @@ torch.set_printoptions(linewidth=320, precision=4, profile="default") np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format}) # format short g, %precision=5 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS) # NumExpr max threads -os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" # for deterministic training +os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" # for deterministic training to avoid CUDA warning os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # suppress verbose TF compiler warnings in Colab os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR" # suppress "NNPACK.cpp could not initialize NNPACK" warnings os.environ["KINETO_LOG_LEVEL"] = "5" # suppress verbose PyTorch profiler output when computing FLOPs @@ -971,7 +971,7 @@ def threaded(func): def set_sentry(): """ Initialize the Sentry SDK for error tracking and reporting. Only used if sentry_sdk package is installed and - sync=True in settings. Run 'yolo settings' to see and update settings YAML file. + sync=True in settings. Run 'yolo settings' to see and update settings. Conditions required to send errors (ALL conditions must be met or no errors will be reported): - sentry_sdk package is installed @@ -983,36 +983,11 @@ def set_sentry(): - online environment - CLI used to run package (checked with 'yolo' as the name of the main CLI command) - The function also configures Sentry SDK to ignore KeyboardInterrupt and FileNotFoundError - exceptions and to exclude events with 'out of memory' in their exception message. + The function also configures Sentry SDK to ignore KeyboardInterrupt and FileNotFoundError exceptions and to exclude + events with 'out of memory' in their exception message. Additionally, the function sets custom tags and user information for Sentry events. """ - - def before_send(event, hint): - """ - Modify the event before sending it to Sentry based on specific exception types and messages. - - Args: - event (dict): The event dictionary containing information about the error. - hint (dict): A dictionary containing additional information about the error. - - Returns: - dict: The modified event or None if the event should not be sent to Sentry. - """ - if "exc_info" in hint: - exc_type, exc_value, tb = hint["exc_info"] - if exc_type in {KeyboardInterrupt, FileNotFoundError} or "out of memory" in str(exc_value): - return None # do not send event - - event["tags"] = { - "sys_argv": ARGV[0], - "sys_argv_name": Path(ARGV[0]).name, - "install": "git" if IS_GIT_DIR else "pip" if IS_PIP_PACKAGE else "other", - "os": ENVIRONMENT, - } - return event - if ( SETTINGS["sync"] and RANK in {-1, 0} @@ -1028,9 +1003,34 @@ def set_sentry(): except ImportError: return + def before_send(event, hint): + """ + Modify the event before sending it to Sentry based on specific exception types and messages. + + Args: + event (dict): The event dictionary containing information about the error. + hint (dict): A dictionary containing additional information about the error. + + Returns: + dict: The modified event or None if the event should not be sent to Sentry. + """ + if "exc_info" in hint: + exc_type, exc_value, _ = hint["exc_info"] + if exc_type in {KeyboardInterrupt, FileNotFoundError} or "out of memory" in str(exc_value): + return None # do not send event + + event["tags"] = { + "sys_argv": ARGV[0], + "sys_argv_name": Path(ARGV[0]).name, + "install": "git" if IS_GIT_DIR else "pip" if IS_PIP_PACKAGE else "other", + "os": ENVIRONMENT, + } + return event + sentry_sdk.init( - dsn="https://5ff1556b71594bfea135ff0203a0d290@o4504521589325824.ingest.sentry.io/4504521592406016", + dsn="https://888e5a0778212e1d0314c37d4b9aae5d@o4504521589325824.ingest.us.sentry.io/4504521592406016", debug=False, + auto_enabling_integrations=False, traces_sample_rate=1.0, release=__version__, environment="production", # 'dev' or 'production' @@ -1092,10 +1092,17 @@ class JSONDict(dict): try: self.file_path.parent.mkdir(parents=True, exist_ok=True) with open(self.file_path, "w") as f: - json.dump(dict(self), f, indent=2) + json.dump(dict(self), f, indent=2, default=self._json_default) except Exception as e: print(f"Error writing to {self.file_path}: {e}") + @staticmethod + def _json_default(obj): + """Handle JSON serialization of Path objects.""" + if isinstance(obj, Path): + return str(obj) + raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable") + def __setitem__(self, key, value): """Store a key-value pair and persist to disk.""" with self.lock: @@ -1110,7 +1117,7 @@ class JSONDict(dict): def __str__(self): """Return a pretty-printed JSON string representation of the dictionary.""" - return f'JSONDict("{self.file_path}"):\n{json.dumps(dict(self), indent=2, ensure_ascii=False)}' + return f'JSONDict("{self.file_path}"):\n{json.dumps(dict(self), indent=2, ensure_ascii=False, default=self._json_default)}' def update(self, *args, **kwargs): """Update the dictionary and persist changes.""" @@ -1163,25 +1170,26 @@ class SettingsManager(JSONDict): self.file = Path(file) self.version = version self.defaults = { - "settings_version": version, - "datasets_dir": str(datasets_root / "datasets"), - "weights_dir": str(root / "weights"), - "runs_dir": str(root / "runs"), - "uuid": hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), - "sync": True, - "api_key": "", - "openai_api_key": "", - "clearml": True, # integrations - "comet": True, - "dvc": True, - "hub": True, - "mlflow": True, - "neptune": True, - "raytune": True, - "tensorboard": True, - "wandb": True, - "vscode_msg": True, + "settings_version": version, # Settings schema version + "datasets_dir": str(datasets_root / "datasets"), # Datasets directory + "weights_dir": str(root / "weights"), # Model weights directory + "runs_dir": str(root / "runs"), # Experiment runs directory + "uuid": hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), # SHA-256 anonymized UUID hash + "sync": True, # Enable synchronization + "api_key": "", # Ultralytics API Key + "openai_api_key": "", # OpenAI API Key + "clearml": True, # ClearML integration + "comet": True, # Comet integration + "dvc": True, # DVC integration + "hub": True, # Ultralytics HUB integration + "mlflow": True, # MLflow integration + "neptune": True, # Neptune integration + "raytune": True, # Ray Tune integration + "tensorboard": True, # TensorBoard logging + "wandb": True, # Weights & Biases logging + "vscode_msg": True, # VSCode messaging } + self.help_msg = ( f"\nView Ultralytics Settings with 'yolo settings' or at '{self.file}'" "\nUpdate Settings with 'yolo settings key=value', i.e. 'yolo settings runs_dir=path/to/dir'. " diff --git a/ultralytics/utils/benchmarks.py b/ultralytics/utils/benchmarks.py index 53ad62c5fe..fe6e2a6513 100644 --- a/ultralytics/utils/benchmarks.py +++ b/ultralytics/utils/benchmarks.py @@ -43,36 +43,40 @@ from ultralytics.utils import ARM64, ASSETS, IS_JETSON, IS_RASPBERRYPI, LINUX, L from ultralytics.utils.checks import IS_PYTHON_3_12, check_requirements, check_yolo from ultralytics.utils.downloads import safe_download from ultralytics.utils.files import file_size -from ultralytics.utils.torch_utils import select_device +from ultralytics.utils.torch_utils import get_cpu_info, select_device def benchmark( - model=WEIGHTS_DIR / "yolov8n.pt", data=None, imgsz=160, half=False, int8=False, device="cpu", verbose=False + model=WEIGHTS_DIR / "yolov8n.pt", + data=None, + imgsz=160, + half=False, + int8=False, + device="cpu", + verbose=False, + eps=1e-3, ): """ Benchmark a YOLO model across different formats for speed and accuracy. Args: - model (str | Path | optional): Path to the model file or directory. Default is - Path(SETTINGS['weights_dir']) / 'yolov8n.pt'. - data (str, optional): Dataset to evaluate on, inherited from TASK2DATA if not passed. Default is None. - imgsz (int, optional): Image size for the benchmark. Default is 160. - half (bool, optional): Use half-precision for the model if True. Default is False. - int8 (bool, optional): Use int8-precision for the model if True. Default is False. - device (str, optional): Device to run the benchmark on, either 'cpu' or 'cuda'. Default is 'cpu'. - verbose (bool | float | optional): If True or a float, assert benchmarks pass with given metric. - Default is False. + model (str | Path): Path to the model file or directory. + data (str | None): Dataset to evaluate on, inherited from TASK2DATA if not passed. + imgsz (int): Image size for the benchmark. + half (bool): Use half-precision for the model if True. + int8 (bool): Use int8-precision for the model if True. + device (str): Device to run the benchmark on, either 'cpu' or 'cuda'. + verbose (bool | float): If True or a float, assert benchmarks pass with given metric. + eps (float): Epsilon value for divide by zero prevention. Returns: - df (pandas.DataFrame): A pandas DataFrame with benchmark results for each format, including file size, - metric, and inference time. + (pandas.DataFrame): A pandas DataFrame with benchmark results for each format, including file size, metric, + and inference time. - Example: - ```python - from ultralytics.utils.benchmarks import benchmark - - benchmark(model="yolov8n.pt", imgsz=640) - ``` + Examples: + Benchmark a YOLO model with default settings: + >>> from ultralytics.utils.benchmarks import benchmark + >>> benchmark(model="yolov8n.pt", imgsz=640) """ import pandas as pd # scope for faster 'import ultralytics' @@ -106,6 +110,7 @@ def benchmark( if i in {11}: # Paddle assert not isinstance(model, YOLOWorld), "YOLOWorldv2 Paddle exports not supported yet" assert not is_end2end, "End-to-end models not supported by PaddlePaddle yet" + assert LINUX or MACOS, "Windows Paddle exports not supported yet" if i in {12}: # NCNN assert not isinstance(model, YOLOWorld), "YOLOWorldv2 NCNN exports not supported yet" if "cpu" in device.type: @@ -138,7 +143,7 @@ def benchmark( data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, int8=int8, verbose=False ) metric, speed = results.results_dict[key], results.speed["inference"] - fps = round((1000 / speed), 2) # frames per second + fps = round(1000 / (speed + eps), 2) # frames per second y.append([name, "✅", round(file_size(filename), 1), round(metric, 4), round(speed, 2), fps]) except Exception as e: if verbose: @@ -165,10 +170,10 @@ def benchmark( class RF100Benchmark: - """Benchmark YOLO model performance across formats for speed and accuracy.""" + """Benchmark YOLO model performance across various formats for speed and accuracy.""" def __init__(self): - """Function for initialization of RF100Benchmark.""" + """Initialize the RF100Benchmark class for benchmarking YOLO model performance across various formats.""" self.ds_names = [] self.ds_cfg_list = [] self.rf = None @@ -180,6 +185,11 @@ class RF100Benchmark: Args: api_key (str): The API key. + + Examples: + Set the Roboflow API key for accessing datasets: + >>> benchmark = RF100Benchmark() + >>> benchmark.set_key("your_roboflow_api_key") """ check_requirements("roboflow") from roboflow import Roboflow @@ -188,10 +198,15 @@ class RF100Benchmark: def parse_dataset(self, ds_link_txt="datasets_links.txt"): """ - Parse dataset links and downloads datasets. + Parse dataset links and download datasets. Args: - ds_link_txt (str): Path to dataset_links file. + ds_link_txt (str): Path to the file containing dataset links. + + Examples: + >>> benchmark = RF100Benchmark() + >>> benchmark.set_key("api_key") + >>> benchmark.parse_dataset("datasets_links.txt") """ (shutil.rmtree("rf-100"), os.mkdir("rf-100")) if os.path.exists("rf-100") else os.mkdir("rf-100") os.chdir("rf-100") @@ -217,10 +232,13 @@ class RF100Benchmark: @staticmethod def fix_yaml(path): """ - Function to fix YAML train and val path. + Fixes the train and validation paths in a given YAML file. Args: - path (str): YAML file path. + path (str): Path to the YAML file to be fixed. + + Examples: + >>> RF100Benchmark.fix_yaml("path/to/data.yaml") """ with open(path) as file: yaml_data = yaml.safe_load(file) @@ -231,13 +249,21 @@ class RF100Benchmark: def evaluate(self, yaml_path, val_log_file, eval_log_file, list_ind): """ - Model evaluation on validation results. + Evaluate model performance on validation results. Args: - yaml_path (str): YAML file path. - val_log_file (str): val_log_file path. - eval_log_file (str): eval_log_file path. - list_ind (int): Index for current dataset. + yaml_path (str): Path to the YAML configuration file. + val_log_file (str): Path to the validation log file. + eval_log_file (str): Path to the evaluation log file. + list_ind (int): Index of the current dataset in the list. + + Returns: + (float): The mean average precision (mAP) value for the evaluated model. + + Examples: + Evaluate a model on a specific dataset + >>> benchmark = RF100Benchmark() + >>> benchmark.evaluate("path/to/data.yaml", "path/to/val_log.txt", "path/to/eval_log.txt", 0) """ skip_symbols = ["🚀", "⚠️", "💡", "❌"] with open(yaml_path) as stream: @@ -285,21 +311,23 @@ class ProfileModels: This class profiles the performance of different models, returning results such as model speed and FLOPs. Attributes: - paths (list): Paths of the models to profile. - num_timed_runs (int): Number of timed runs for the profiling. Default is 100. - num_warmup_runs (int): Number of warmup runs before profiling. Default is 10. - min_time (float): Minimum number of seconds to profile for. Default is 60. - imgsz (int): Image size used in the models. Default is 640. + paths (List[str]): Paths of the models to profile. + num_timed_runs (int): Number of timed runs for the profiling. + num_warmup_runs (int): Number of warmup runs before profiling. + min_time (float): Minimum number of seconds to profile for. + imgsz (int): Image size used in the models. + half (bool): Flag to indicate whether to use FP16 half-precision for TensorRT profiling. + trt (bool): Flag to indicate whether to profile using TensorRT. + device (torch.device): Device used for profiling. Methods: - profile(): Profiles the models and prints the result. + profile: Profiles the models and prints the result. - Example: - ```python - from ultralytics.utils.benchmarks import ProfileModels - - ProfileModels(["yolov8n.yaml", "yolov8s.yaml"], imgsz=640).profile() - ``` + Examples: + Profile models and print results + >>> from ultralytics.utils.benchmarks import ProfileModels + >>> profiler = ProfileModels(["yolov8n.yaml", "yolov8s.yaml"], imgsz=640) + >>> profiler.profile() """ def __init__( @@ -317,17 +345,23 @@ class ProfileModels: Initialize the ProfileModels class for profiling models. Args: - paths (list): List of paths of the models to be profiled. - num_timed_runs (int, optional): Number of timed runs for the profiling. Default is 100. - num_warmup_runs (int, optional): Number of warmup runs before the actual profiling starts. Default is 10. - min_time (float, optional): Minimum time in seconds for profiling a model. Default is 60. - imgsz (int, optional): Size of the image used during profiling. Default is 640. - half (bool, optional): Flag to indicate whether to use FP16 half-precision for TensorRT profiling. - trt (bool, optional): Flag to indicate whether to profile using TensorRT. Default is True. - device (torch.device, optional): Device used for profiling. If None, it is determined automatically. + paths (List[str]): List of paths of the models to be profiled. + num_timed_runs (int): Number of timed runs for the profiling. + num_warmup_runs (int): Number of warmup runs before the actual profiling starts. + min_time (float): Minimum time in seconds for profiling a model. + imgsz (int): Size of the image used during profiling. + half (bool): Flag to indicate whether to use FP16 half-precision for TensorRT profiling. + trt (bool): Flag to indicate whether to profile using TensorRT. + device (torch.device | None): Device used for profiling. If None, it is determined automatically. Notes: - FP16 'half' argument option removed for ONNX as slower on CPU than FP32 + FP16 'half' argument option removed for ONNX as slower on CPU than FP32. + + Examples: + Initialize and profile models + >>> from ultralytics.utils.benchmarks import ProfileModels + >>> profiler = ProfileModels(["yolov8n.yaml", "yolov8s.yaml"], imgsz=640) + >>> profiler.profile() """ self.paths = paths self.num_timed_runs = num_timed_runs @@ -339,7 +373,7 @@ class ProfileModels: self.device = device or torch.device(0 if torch.cuda.is_available() else "cpu") def profile(self): - """Logs the benchmarking results of a model, checks metrics against floor and returns the results.""" + """Profiles YOLO models for speed and accuracy across various formats including ONNX and TensorRT.""" files = self.get_files() if not files: @@ -404,7 +438,7 @@ class ProfileModels: @staticmethod def iterative_sigma_clipping(data, sigma=2, max_iters=3): - """Applies an iterative sigma clipping algorithm to the given data times number of iterations.""" + """Applies iterative sigma clipping to data to remove outliers based on specified sigma and iteration count.""" data = np.array(data) for _ in range(max_iters): mean, std = np.mean(data), np.std(data) @@ -415,7 +449,7 @@ class ProfileModels: return data def profile_tensorrt_model(self, engine_file: str, eps: float = 1e-3): - """Profiles the TensorRT model, measuring average run time and standard deviation among runs.""" + """Profiles YOLO model performance with TensorRT, measuring average run time and standard deviation.""" if not self.trt or not Path(engine_file).is_file(): return 0.0, 0.0 @@ -499,16 +533,16 @@ class ProfileModels: return np.mean(run_times), np.std(run_times) def generate_table_row(self, model_name, t_onnx, t_engine, model_info): - """Generates a formatted string for a table row that includes model performance and metric details.""" + """Generates a table row string with model performance metrics including inference times and model details.""" layers, params, gradients, flops = model_info return ( - f"| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± " - f"{t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |" + f"| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.1f}±{t_onnx[1]:.1f} ms | {t_engine[0]:.1f}±" + f"{t_engine[1]:.1f} ms | {params / 1e6:.1f} | {flops:.1f} |" ) @staticmethod def generate_results_dict(model_name, t_onnx, t_engine, model_info): - """Generates a dictionary of model details including name, parameters, GFLOPS and speed metrics.""" + """Generates a dictionary of profiling results including model name, parameters, GFLOPs, and speed metrics.""" layers, params, gradients, flops = model_info return { "model/name": model_name, @@ -520,16 +554,19 @@ class ProfileModels: @staticmethod def print_table(table_rows): - """Formats and prints a comparison table for different models with given statistics and performance data.""" + """Prints a formatted table of model profiling results, including speed and accuracy metrics.""" gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "GPU" - header = ( - f"| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | " - f"Speed
{gpu} TensorRT
(ms) | params
(M) | FLOPs
(B) |" - ) - separator = ( - "|-------------|---------------------|--------------------|------------------------------|" - "-----------------------------------|------------------|-----------------|" - ) + headers = [ + "Model", + "size
(pixels)", + "mAPval
50-95", + f"Speed
CPU ({get_cpu_info()}) ONNX
(ms)", + f"Speed
{gpu} TensorRT
(ms)", + "params
(M)", + "FLOPs
(B)", + ] + header = "|" + "|".join(f" {h} " for h in headers) + "|" + separator = "|" + "|".join("-" * (len(h) + 2) for h in headers) + "|" print(f"\n\n{header}") print(separator) diff --git a/ultralytics/utils/checks.py b/ultralytics/utils/checks.py index 70d3d088b4..85eccf67e3 100644 --- a/ultralytics/utils/checks.py +++ b/ultralytics/utils/checks.py @@ -629,24 +629,24 @@ def collect_system_info(): def check_amp(model): """ - Checks the PyTorch Automatic Mixed Precision (AMP) functionality of a YOLOv8 model. If the checks fail, it means + Checks the PyTorch Automatic Mixed Precision (AMP) functionality of a YOLO11 model. If the checks fail, it means there are anomalies with AMP on the system that may cause NaN losses or zero-mAP results, so AMP will be disabled during training. Args: - model (nn.Module): A YOLOv8 model instance. + model (nn.Module): A YOLO11 model instance. Example: ```python from ultralytics import YOLO from ultralytics.utils.checks import check_amp - model = YOLO("yolov8n.pt").model.cuda() + model = YOLO("yolo11n.pt").model.cuda() check_amp(model) ``` Returns: - (bool): Returns True if the AMP functionality works correctly with YOLOv8 model, else False. + (bool): Returns True if the AMP functionality works correctly with YOLO11 model, else False. """ from ultralytics.utils.torch_utils import autocast @@ -657,27 +657,28 @@ def check_amp(model): def amp_allclose(m, im): """All close FP32 vs AMP results.""" batch = [im] * 8 - a = m(batch, imgsz=128, device=device, verbose=False)[0].boxes.data # FP32 inference + imgsz = max(256, int(model.stride.max() * 4)) # max stride P5-32 and P6-64 + a = m(batch, imgsz=imgsz, device=device, verbose=False)[0].boxes.data # FP32 inference with autocast(enabled=True): - b = m(batch, imgsz=128, device=device, verbose=False)[0].boxes.data # AMP inference + b = m(batch, imgsz=imgsz, device=device, verbose=False)[0].boxes.data # AMP inference del m return a.shape == b.shape and torch.allclose(a, b.float(), atol=0.5) # close to 0.5 absolute tolerance im = ASSETS / "bus.jpg" # image to check prefix = colorstr("AMP: ") - LOGGER.info(f"{prefix}running Automatic Mixed Precision (AMP) checks with YOLOv8n...") + LOGGER.info(f"{prefix}running Automatic Mixed Precision (AMP) checks with YOLO11n...") warning_msg = "Setting 'amp=True'. If you experience zero-mAP or NaN losses you can disable AMP with amp=False." try: from ultralytics import YOLO - assert amp_allclose(YOLO("yolov8n.pt"), im) + assert amp_allclose(YOLO("yolo11n.pt"), im) LOGGER.info(f"{prefix}checks passed ✅") except ConnectionError: - LOGGER.warning(f"{prefix}checks skipped ⚠️, offline and unable to download YOLOv8n. {warning_msg}") + LOGGER.warning(f"{prefix}checks skipped ⚠️, offline and unable to download YOLO11n. {warning_msg}") except (AttributeError, ModuleNotFoundError): LOGGER.warning( f"{prefix}checks skipped ⚠️. " - f"Unable to load YOLOv8n due to possible Ultralytics package modifications. {warning_msg}" + f"Unable to load YOLO11n due to possible Ultralytics package modifications. {warning_msg}" ) except AssertionError: LOGGER.warning( diff --git a/ultralytics/utils/downloads.py b/ultralytics/utils/downloads.py index 79c3f6b09a..5cbc868ab6 100644 --- a/ultralytics/utils/downloads.py +++ b/ultralytics/utils/downloads.py @@ -18,6 +18,7 @@ from ultralytics.utils import LOGGER, TQDM, checks, clean_url, emojis, is_online GITHUB_ASSETS_REPO = "ultralytics/assets" GITHUB_ASSETS_NAMES = ( [f"yolov8{k}{suffix}.pt" for k in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose", "-obb", "-oiv7")] + + [f"yolo11{k}{suffix}.pt" for k in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose", "-obb")] + [f"yolov5{k}{resolution}u.pt" for k in "nsmlx" for resolution in ("", "6")] + [f"yolov3{k}u.pt" for k in ("", "-spp", "-tiny")] + [f"yolov8{k}-world.pt" for k in "smlx"] @@ -408,7 +409,7 @@ def get_github_assets(repo="ultralytics/assets", version="latest", retry=False): return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolov8n.pt', 'yolov8s.pt', ...] -def attempt_download_asset(file, repo="ultralytics/assets", release="v8.2.0", **kwargs): +def attempt_download_asset(file, repo="ultralytics/assets", release="v8.3.0", **kwargs): """ Attempt to download a file from GitHub release assets if it is not found locally. The function checks for the file locally first, then tries to download it from the specified GitHub repository release. @@ -416,7 +417,7 @@ def attempt_download_asset(file, repo="ultralytics/assets", release="v8.2.0", ** Args: file (str | Path): The filename or file path to be downloaded. repo (str, optional): The GitHub repository in the format 'owner/repo'. Defaults to 'ultralytics/assets'. - release (str, optional): The specific release version to be downloaded. Defaults to 'v8.2.0'. + release (str, optional): The specific release version to be downloaded. Defaults to 'v8.3.0'. **kwargs (any): Additional keyword arguments for the download process. Returns: diff --git a/ultralytics/utils/files.py b/ultralytics/utils/files.py index d0953c748e..29c68d48de 100644 --- a/ultralytics/utils/files.py +++ b/ultralytics/utils/files.py @@ -219,4 +219,4 @@ def update_models(model_names=("yolov8n.pt",), source_dir=Path("."), update_name # Save model using model.save() print(f"Re-saving {model_name} model to {save_path}") - model.save(save_path, use_dill=False) + model.save(save_path) diff --git a/ultralytics/utils/loss.py b/ultralytics/utils/loss.py index 15bf92f9da..94038aefec 100644 --- a/ultralytics/utils/loss.py +++ b/ultralytics/utils/loss.py @@ -228,8 +228,11 @@ class v8DetectionLoss: # Pboxes pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + # dfl_conf = pred_distri.view(batch_size, -1, 4, self.reg_max).detach().softmax(-1) + # dfl_conf = (dfl_conf.amax(-1).mean(-1) + dfl_conf.amax(-1).amin(-1)) / 2 _, target_bboxes, target_scores, fg_mask, _ = self.assigner( + # pred_scores.detach().sigmoid() * 0.8 + dfl_conf.unsqueeze(-1) * 0.2, pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), anchor_points * stride_tensor, diff --git a/ultralytics/utils/patches.py b/ultralytics/utils/patches.py index d918e0efea..e9ba5dfb30 100644 --- a/ultralytics/utils/patches.py +++ b/ultralytics/utils/patches.py @@ -86,25 +86,15 @@ def torch_load(*args, **kwargs): return _torch_load(*args, **kwargs) -def torch_save(*args, use_dill=True, **kwargs): +def torch_save(*args, **kwargs): """ Optionally use dill to serialize lambda functions where pickle does not, adding robustness with 3 retries and exponential standoff in case of save failure. Args: *args (tuple): Positional arguments to pass to torch.save. - use_dill (bool): Whether to try using dill for serialization if available. Defaults to True. **kwargs (Any): Keyword arguments to pass to torch.save. """ - try: - assert use_dill - import dill as pickle - except (AssertionError, ImportError): - import pickle - - if "pickle_module" not in kwargs: - kwargs["pickle_module"] = pickle - for i in range(4): # 3 retries try: return _torch_save(*args, **kwargs) diff --git a/ultralytics/utils/plotting.py b/ultralytics/utils/plotting.py index f3edf78168..9d3051239c 100644 --- a/ultralytics/utils/plotting.py +++ b/ultralytics/utils/plotting.py @@ -13,8 +13,8 @@ import torch from PIL import Image, ImageDraw, ImageFont from PIL import __version__ as pil_version -from ultralytics.utils import LOGGER, TryExcept, ops, plt_settings, threaded -from ultralytics.utils.checks import check_font, check_version, is_ascii +from ultralytics.utils import IS_JUPYTER, LOGGER, TryExcept, ops, plt_settings, threaded +from ultralytics.utils.checks import check_font, check_requirements, check_version, is_ascii from ultralytics.utils.files import increment_path @@ -524,7 +524,18 @@ class Annotator: def show(self, title=None): """Show the annotated image.""" - Image.fromarray(np.asarray(self.im)[..., ::-1]).show(title) + im = Image.fromarray(np.asarray(self.im)[..., ::-1]) # Convert numpy array to PIL Image with RGB to BGR + if IS_JUPYTER: + check_requirements("ipython") + try: + from IPython.display import display + + display(im) + except ImportError as e: + LOGGER.warning(f"Unable to display image in Jupyter notebooks: {e}") + else: + # Convert numpy array to PIL Image and show + im.show(title=title) def save(self, filename="image.jpg"): """Save the annotated image to 'filename'.""" @@ -580,8 +591,8 @@ class Annotator: Args: label (str): queue counts label points (tuple): region points for center point calculation to display text - region_color (RGB): queue region color - txt_color (RGB): text display color + region_color (tuple): RGB queue region color. + txt_color (tuple): RGB text display color. """ x_values = [point[0] for point in points] y_values = [point[1] for point in points] @@ -620,8 +631,8 @@ class Annotator: Args: im0 (ndarray): inference image text (str): object/class name - txt_color (bgr color): display color for text foreground - bg_color (bgr color): display color for text background + txt_color (tuple): display color for text foreground + bg_color (tuple): display color for text background x_center (float): x position center point for bounding box y_center (float): y position center point for bounding box margin (int): gap between text and rectangle for better display @@ -644,8 +655,8 @@ class Annotator: Args: im0 (ndarray): inference image text (dict): labels dictionary - txt_color (bgr color): display color for text foreground - bg_color (bgr color): display color for text background + txt_color (tuple): display color for text foreground + bg_color (tuple): display color for text background margin (int): gap between text and rectangle for better display """ horizontal_gap = int(im0.shape[1] * 0.02) @@ -794,11 +805,14 @@ class Annotator: Function for drawing segmented object in bounding box shape. Args: - mask (list): masks data list for instance segmentation area plotting - mask_color (RGB): mask foreground color - label (str): Detection label text - txt_color (RGB): text color + mask (np.ndarray): A 2D array of shape (N, 2) containing the contour points of the segmented object. + mask_color (tuple): RGB color for the contour and label background. + label (str, optional): Text label for the object. If None, no label is drawn. + txt_color (tuple): RGB color for the label text. """ + if mask.size == 0: # no masks to plot + return + cv2.polylines(self.im, [np.int32([mask])], isClosed=True, color=mask_color, thickness=2) text_size, _ = cv2.getTextSize(label, 0, self.sf, self.tf) @@ -822,8 +836,8 @@ class Annotator: Args: pixels_distance (float): Pixels distance between two bbox centroids. centroids (list): Bounding box centroids data. - line_color (RGB): Distance line color. - centroid_color (RGB): Bounding box centroid color. + line_color (tuple): RGB distance line color. + centroid_color (tuple): RGB bounding box centroid color. """ # Get the text size (text_width_m, text_height_m), _ = cv2.getTextSize( diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py index a39969299c..00176d3033 100644 --- a/ultralytics/utils/torch_utils.py +++ b/ultralytics/utils/torch_utils.py @@ -159,7 +159,7 @@ def select_device(device="", batch=0, newline=False, verbose=True): if isinstance(device, torch.device): return device - s = f"Ultralytics YOLOv{__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} " + s = f"Ultralytics {__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} " device = str(device).lower() for remove in "cuda:", "none", "(", ")", "[", "]", "'", " ": device = device.replace(remove, "") # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1' @@ -170,6 +170,8 @@ def select_device(device="", batch=0, newline=False, verbose=True): elif device: # non-cpu device requested if device == "cuda": device = "0" + if "," in device: + device = ",".join([x for x in device.split(",") if x]) # remove sequential commas, i.e. "0,,1" -> "0,1" visible = os.environ.get("CUDA_VISIBLE_DEVICES", None) os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable - must be before assert is_available() if not (torch.cuda.is_available() and torch.cuda.device_count() >= len(device.split(","))): @@ -191,7 +193,7 @@ def select_device(device="", batch=0, newline=False, verbose=True): ) if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available - devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + devices = device.split(",") if device else "0" # i.e. "0,1" -> ["0", "1"] n = len(devices) # device count if n > 1: # multi-GPU if batch < 1: @@ -595,7 +597,7 @@ def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "", updates: dict # Save combined = {**metadata, **x, **(updates or {})} - torch.save(combined, s or f, use_dill=False) # combine dicts (prefer to the right) + torch.save(combined, s or f) # combine dicts (prefer to the right) mb = os.path.getsize(s or f) / 1e6 # file size LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") return combined