From 51e93d611110c88e230c901ce4b796da53d28827 Mon Sep 17 00:00:00 2001 From: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Date: Tue, 1 Oct 2024 15:41:15 +0200 Subject: [PATCH] YOLO11 Tasks, Modes, Usage, Macros and Solutions Updates (#16593) Signed-off-by: UltralyticsAssistant --- docs/en/datasets/classify/caltech256.md | 2 +- docs/en/datasets/classify/cifar10.md | 2 +- docs/en/datasets/classify/fashion-mnist.md | 2 +- docs/en/datasets/detect/open-images-v7.md | 2 +- docs/en/datasets/segment/carparts-seg.md | 2 +- docs/en/guides/security-alarm-system.md | 2 +- docs/en/guides/streamlit-live-inference.md | 2 +- docs/en/guides/yolo-performance-metrics.md | 2 +- docs/en/help/FAQ.md | 30 +++--- docs/en/help/index.md | 6 +- docs/en/index.md | 4 +- docs/en/macros/export-table.md | 26 +++--- docs/en/modes/benchmark.md | 38 ++++---- docs/en/modes/export.md | 36 ++++---- docs/en/modes/predict.md | 102 ++++++++++----------- docs/en/modes/track.md | 62 ++++++------- docs/en/modes/train.md | 62 ++++++------- docs/en/modes/val.md | 50 +++++----- docs/en/solutions/index.md | 44 ++++----- docs/en/tasks/classify.md | 72 +++++++-------- docs/en/tasks/detect.md | 82 ++++++++--------- docs/en/tasks/index.md | 2 +- docs/en/tasks/obb.md | 70 +++++++------- docs/en/tasks/pose.md | 68 +++++++------- docs/en/tasks/segment.md | 68 +++++++------- docs/en/usage/callbacks.md | 16 ++-- docs/en/usage/cfg.md | 18 ++-- docs/en/usage/cli.md | 84 ++++++++--------- docs/en/usage/engine.md | 30 +++--- docs/en/usage/python.md | 86 ++++++++--------- docs/en/usage/simple-utilities.md | 10 +- 31 files changed, 541 insertions(+), 541 deletions(-) diff --git a/docs/en/datasets/classify/caltech256.md b/docs/en/datasets/classify/caltech256.md index de7c2ea461..e23f0ccd46 100644 --- a/docs/en/datasets/classify/caltech256.md +++ b/docs/en/datasets/classify/caltech256.md @@ -16,7 +16,7 @@ The [Caltech-256](https://data.caltech.edu/records/nyy15-4j048) dataset is an ex allowfullscreen>
- Watch: How to Train [Image Classification](https://www.ultralytics.com/glossary/image-classification) Model using Caltech-256 Dataset with Ultralytics HUB + Watch: How to Train Image Classification Model using Caltech-256 Dataset with Ultralytics HUB

## Key Features diff --git a/docs/en/datasets/classify/cifar10.md b/docs/en/datasets/classify/cifar10.md index 7bae78b38a..070a5453ca 100644 --- a/docs/en/datasets/classify/cifar10.md +++ b/docs/en/datasets/classify/cifar10.md @@ -16,7 +16,7 @@ The [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) (Canadian Institute allowfullscreen>
- Watch: How to Train an [Image Classification](https://www.ultralytics.com/glossary/image-classification) Model with CIFAR-10 Dataset using Ultralytics YOLOv8 + Watch: How to Train an Image Classification Model with CIFAR-10 Dataset using Ultralytics YOLOv8

## Key Features diff --git a/docs/en/datasets/classify/fashion-mnist.md b/docs/en/datasets/classify/fashion-mnist.md index 531cd2c1bd..a4c675c3b0 100644 --- a/docs/en/datasets/classify/fashion-mnist.md +++ b/docs/en/datasets/classify/fashion-mnist.md @@ -16,7 +16,7 @@ The [Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset is allowfullscreen>
- Watch: How to do [Image Classification](https://www.ultralytics.com/glossary/image-classification) on Fashion MNIST Dataset using Ultralytics YOLOv8 + Watch: How to do Image Classification on Fashion MNIST Dataset using Ultralytics YOLOv8

## Key Features diff --git a/docs/en/datasets/detect/open-images-v7.md b/docs/en/datasets/detect/open-images-v7.md index 1e6f1f7e4f..6ca2fab501 100644 --- a/docs/en/datasets/detect/open-images-v7.md +++ b/docs/en/datasets/detect/open-images-v7.md @@ -16,7 +16,7 @@ keywords: Open Images V7, Google dataset, computer vision, YOLOv8 models, object allowfullscreen>
- Watch: [Object Detection](https://www.ultralytics.com/glossary/object-detection) using OpenImagesV7 Pretrained Model + Watch: Object Detection using OpenImagesV7 Pretrained Model

## Open Images V7 Pretrained Models diff --git a/docs/en/datasets/segment/carparts-seg.md b/docs/en/datasets/segment/carparts-seg.md index b798cacad1..dd56817492 100644 --- a/docs/en/datasets/segment/carparts-seg.md +++ b/docs/en/datasets/segment/carparts-seg.md @@ -18,7 +18,7 @@ Whether you're working on automotive research, developing AI solutions for vehic allowfullscreen>
- Watch: Carparts [Instance Segmentation](https://www.ultralytics.com/glossary/instance-segmentation) Using Ultralytics HUB + Watch: Carparts Instance Segmentation Using Ultralytics HUB

## Dataset Structure diff --git a/docs/en/guides/security-alarm-system.md b/docs/en/guides/security-alarm-system.md index d90e44a583..a75ba7fbb5 100644 --- a/docs/en/guides/security-alarm-system.md +++ b/docs/en/guides/security-alarm-system.md @@ -22,7 +22,7 @@ The Security Alarm System Project utilizing Ultralytics YOLOv8 integrates advanc allowfullscreen>
- Watch: Security Alarm System Project with Ultralytics YOLOv8 [Object Detection](https://www.ultralytics.com/glossary/object-detection) + Watch: Security Alarm System Project with Ultralytics YOLOv8 Object Detection

### Code diff --git a/docs/en/guides/streamlit-live-inference.md b/docs/en/guides/streamlit-live-inference.md index e8fb5c9165..27d3b71409 100644 --- a/docs/en/guides/streamlit-live-inference.md +++ b/docs/en/guides/streamlit-live-inference.md @@ -18,7 +18,7 @@ Streamlit makes it simple to build and deploy interactive web applications. Comb allowfullscreen>
- Watch: How to Use Streamlit with Ultralytics for Real-Time [Computer Vision](https://www.ultralytics.com/glossary/computer-vision-cv) in Your Browser + Watch: How to Use Streamlit with Ultralytics for Real-Time Computer Vision in Your Browser

| Aquaculture | Animals husbandry | diff --git a/docs/en/guides/yolo-performance-metrics.md b/docs/en/guides/yolo-performance-metrics.md index aeed82355d..9a05ffb737 100644 --- a/docs/en/guides/yolo-performance-metrics.md +++ b/docs/en/guides/yolo-performance-metrics.md @@ -18,7 +18,7 @@ Performance metrics are key tools to evaluate the [accuracy](https://www.ultraly allowfullscreen>
- Watch: Ultralytics YOLOv8 Performance Metrics | MAP, F1 Score, [Precision](https://www.ultralytics.com/glossary/precision), IoU & Accuracy + Watch: Ultralytics YOLOv8 Performance Metrics | MAP, F1 Score, Precision, IoU & Accuracy

## Object Detection Metrics diff --git a/docs/en/help/FAQ.md b/docs/en/help/FAQ.md index 234fb9e82f..bde16d98af 100644 --- a/docs/en/help/FAQ.md +++ b/docs/en/help/FAQ.md @@ -14,7 +14,7 @@ This FAQ section addresses common questions and issues users might encounter whi Ultralytics is a [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) AI company specializing in state-of-the-art object detection and [image segmentation](https://www.ultralytics.com/glossary/image-segmentation) models, with a focus on the YOLO (You Only Look Once) family. Their offerings include: -- Open-source implementations of [YOLOv5](https://docs.ultralytics.com/models/yolov5/) and [YOLOv8](https://docs.ultralytics.com/models/yolov8/) +- Open-source implementations of [YOLO11](https://docs.ultralytics.com/models/yolov8/) and [YOLO11](https://docs.ultralytics.com/models/yolo11/) - A wide range of [pre-trained models](https://docs.ultralytics.com/models/) for various computer vision tasks - A comprehensive [Python package](https://docs.ultralytics.com/usage/python/) for seamless integration of YOLO models into projects - Versatile [tools](https://docs.ultralytics.com/modes/) for training, testing, and deploying models @@ -54,9 +54,9 @@ Recommended setup: For troubleshooting common issues, visit the [YOLO Common Issues](https://docs.ultralytics.com/guides/yolo-common-issues/) page. -### How can I train a custom YOLOv8 model on my own dataset? +### How can I train a custom YOLO11 model on my own dataset? -To train a custom YOLOv8 model: +To train a custom YOLO11 model: 1. Prepare your dataset in YOLO format (images and corresponding label txt files). 2. Create a YAML file describing your dataset structure and classes. @@ -77,11 +77,11 @@ For a more in-depth guide, including data preparation and advanced training opti ### What pretrained models are available in Ultralytics? -Ultralytics offers a diverse range of pretrained YOLOv8 models for various tasks: +Ultralytics offers a diverse range of pretrained YOLO11 models for various tasks: -- Object Detection: YOLOv8n, YOLOv8s, YOLOv8m, YOLOv8l, YOLOv8x -- [Instance Segmentation](https://www.ultralytics.com/glossary/instance-segmentation): YOLOv8n-seg, YOLOv8s-seg, YOLOv8m-seg, YOLOv8l-seg, YOLOv8x-seg -- Classification: YOLOv8n-cls, YOLOv8s-cls, YOLOv8m-cls, YOLOv8l-cls, YOLOv8x-cls +- Object Detection: YOLO11n, YOLO11s, YOLO11m, YOLO11l, YOLO11x +- [Instance Segmentation](https://www.ultralytics.com/glossary/instance-segmentation): YOLO11n-seg, YOLO11s-seg, YOLO11m-seg, YOLO11l-seg, YOLO11x-seg +- Classification: YOLO11n-cls, YOLO11s-cls, YOLO11m-cls, YOLO11l-cls, YOLO11x-cls These models vary in size and complexity, offering different trade-offs between speed and [accuracy](https://www.ultralytics.com/glossary/accuracy). Explore the full range of [pretrained models](https://docs.ultralytics.com/models/yolov8/) to find the best fit for your project. @@ -118,17 +118,17 @@ Absolutely! Ultralytics models are designed for versatile deployment across vari Ultralytics provides export functions to convert models to various formats for deployment. Explore the wide range of [deployment options](https://docs.ultralytics.com/guides/model-deployment-options/) to find the best solution for your use case. -### What's the difference between YOLOv5 and YOLOv8? +### What's the difference between YOLOv8 and YOLO11? Key distinctions include: -- Architecture: YOLOv8 features an improved backbone and head design for enhanced performance. -- Performance: YOLOv8 generally offers superior accuracy and speed compared to YOLOv5. -- Tasks: YOLOv8 natively supports [object detection](https://www.ultralytics.com/glossary/object-detection), instance segmentation, and classification in a unified framework. -- Codebase: YOLOv8 is implemented with a more modular and extensible architecture, facilitating easier customization and extension. -- Training: YOLOv8 incorporates advanced training techniques like multi-dataset training and hyperparameter evolution for improved results. +- Architecture: YOLO11 features an improved backbone and head design for enhanced performance. +- Performance: YOLO11 generally offers superior accuracy and speed compared to YOLOv8. +- Tasks: YOLO11 natively supports [object detection](https://www.ultralytics.com/glossary/object-detection), instance segmentation, and classification in a unified framework. +- Codebase: YOLO11 is implemented with a more modular and extensible architecture, facilitating easier customization and extension. +- Training: YOLO11 incorporates advanced training techniques like multi-dataset training and hyperparameter evolution for improved results. -For an in-depth comparison of features and performance metrics, visit the [YOLOv5 vs YOLOv8](https://www.ultralytics.com/yolo) comparison page. +For an in-depth comparison of features and performance metrics, visit the [YOLO](https://www.ultralytics.com/yolo) comparison page. ### How can I contribute to the Ultralytics open-source project? @@ -176,7 +176,7 @@ Enhancing your YOLO model's performance can be achieved through several techniqu 1. [Hyperparameter Tuning](https://www.ultralytics.com/glossary/hyperparameter-tuning): Experiment with different hyperparameters using the [Hyperparameter Tuning Guide](https://docs.ultralytics.com/guides/hyperparameter-tuning/) to optimize model performance. 2. [Data Augmentation](https://www.ultralytics.com/glossary/data-augmentation): Implement techniques like flip, scale, rotate, and color adjustments to enhance your training dataset and improve model generalization. -3. [Transfer Learning](https://www.ultralytics.com/glossary/transfer-learning): Leverage pre-trained models and fine-tune them on your specific dataset using the [Train YOLOv8](https://docs.ultralytics.com/modes/train/) guide. +3. [Transfer Learning](https://www.ultralytics.com/glossary/transfer-learning): Leverage pre-trained models and fine-tune them on your specific dataset using the [Train YOLO11](https://docs.ultralytics.com/modes/train/) guide. 4. Export to Efficient Formats: Convert your model to optimized formats like TensorRT or ONNX for faster inference using the [Export guide](../modes/export.md). 5. Benchmarking: Utilize the [Benchmark Mode](https://docs.ultralytics.com/modes/benchmark/) to measure and improve inference speed and accuracy systematically. diff --git a/docs/en/help/index.md b/docs/en/help/index.md index e8f2eecd7a..c1ff0128b2 100644 --- a/docs/en/help/index.md +++ b/docs/en/help/index.md @@ -22,9 +22,9 @@ We encourage you to review these resources for a seamless and productive experie ### What is Ultralytics YOLO and how does it benefit my [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) projects? -Ultralytics YOLO (You Only Look Once) is a state-of-the-art, real-time [object detection](https://www.ultralytics.com/glossary/object-detection) model. Its latest version, YOLOv8, enhances speed, [accuracy](https://www.ultralytics.com/glossary/accuracy), and versatility, making it ideal for a wide range of applications, from real-time video analytics to advanced machine learning research. YOLO's efficiency in detecting objects in images and videos has made it the go-to solution for businesses and researchers looking to integrate robust [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) capabilities into their projects. +Ultralytics YOLO (You Only Look Once) is a state-of-the-art, real-time [object detection](https://www.ultralytics.com/glossary/object-detection) model. Its latest version, YOLO11, enhances speed, [accuracy](https://www.ultralytics.com/glossary/accuracy), and versatility, making it ideal for a wide range of applications, from real-time video analytics to advanced machine learning research. YOLO's efficiency in detecting objects in images and videos has made it the go-to solution for businesses and researchers looking to integrate robust [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) capabilities into their projects. -For more details on YOLOv8, visit the [YOLOv8 documentation](../tasks/detect.md). +For more details on YOLO11, visit the [YOLO11 documentation](../tasks/detect.md). ### How do I contribute to Ultralytics YOLO repositories? @@ -32,7 +32,7 @@ Contributing to Ultralytics YOLO repositories is straightforward. Start by revie ### Why should I use Ultralytics HUB for my machine learning projects? -Ultralytics HUB offers a seamless, no-code solution for managing your machine learning projects. It enables you to generate, train, and deploy AI models like YOLOv8 effortlessly. Unique features include cloud training, real-time tracking, and intuitive dataset management. Ultralytics HUB simplifies the entire workflow, from data processing to [model deployment](https://www.ultralytics.com/glossary/model-deployment), making it an indispensable tool for both beginners and advanced users. +Ultralytics HUB offers a seamless, no-code solution for managing your machine learning projects. It enables you to generate, train, and deploy AI models like YOLO11 effortlessly. Unique features include cloud training, real-time tracking, and intuitive dataset management. Ultralytics HUB simplifies the entire workflow, from data processing to [model deployment](https://www.ultralytics.com/glossary/model-deployment), making it an indispensable tool for both beginners and advanced users. To get started, visit [Ultralytics HUB Quickstart](../hub/quickstart.md). diff --git a/docs/en/index.md b/docs/en/index.md index c4c8caccc0..45c5dab7d8 100644 --- a/docs/en/index.md +++ b/docs/en/index.md @@ -81,10 +81,10 @@ Explore the Ultralytics Docs, a comprehensive resource designed to help you unde - [YOLOv5](https://github.com/ultralytics/yolov5) further improved the model's performance and added new features such as hyperparameter optimization, integrated experiment tracking and automatic export to popular export formats. - [YOLOv6](https://github.com/meituan/YOLOv6) was open-sourced by [Meituan](https://about.meituan.com/) in 2022 and is in use in many of the company's autonomous delivery robots. - [YOLOv7](https://github.com/WongKinYiu/yolov7) added additional tasks such as pose estimation on the COCO keypoints dataset. -- [YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of YOLO by Ultralytics. As a cutting-edge, state-of-the-art (SOTA) model, YOLOv8 builds on the success of previous versions, introducing new features and improvements for enhanced performance, flexibility, and efficiency. YOLOv8 supports a full range of vision AI tasks, including [detection](tasks/detect.md), [segmentation](tasks/segment.md), [pose estimation](tasks/pose.md), [tracking](modes/track.md), and [classification](tasks/classify.md). This versatility allows users to leverage YOLOv8's capabilities across diverse applications and domains. +- [YOLOv8](https://github.com/ultralytics/ultralytics) released in 2023 by Ultralytics. YOLOv8 introduced new features and improvements for enhanced performance, flexibility, and efficiency, supporting a full range of vision AI tasks, - [YOLOv9](models/yolov9.md) introduces innovative methods like Programmable Gradient Information (PGI) and the Generalized Efficient Layer Aggregation Network (GELAN). - [YOLOv10](models/yolov10.md) is created by researchers from [Tsinghua University](https://www.tsinghua.edu.cn/en/) using the [Ultralytics](https://www.ultralytics.com/) [Python package](https://pypi.org/project/ultralytics/). This version provides real-time [object detection](tasks/detect.md) advancements by introducing an End-to-End head that eliminates Non-Maximum Suppression (NMS) requirements. -- **[YOLO11](models/yolo11.md) NEW 🚀**: Ultralytics' latest YOLO models delivering state-of-the-art (SOTA) performance across multiple tasks. +- **[YOLO11](models/yolo11.md) NEW 🚀**: Ultralytics' latest YOLO models delivering state-of-the-art (SOTA) performance across multiple tasks, including [detection](tasks/detect.md), [segmentation](tasks/segment.md), [pose estimation](tasks/pose.md), [tracking](modes/track.md), and [classification](tasks/classify.md), leverage capabilities across diverse AI applications and domains. ## YOLO Licenses: How is Ultralytics YOLO licensed? diff --git a/docs/en/macros/export-table.md b/docs/en/macros/export-table.md index 924a8727c9..7cda31963a 100644 --- a/docs/en/macros/export-table.md +++ b/docs/en/macros/export-table.md @@ -1,15 +1,15 @@ | Format | `format` Argument | Model | Metadata | Arguments | | ------------------------------------------------- | ----------------- | ----------------------------------------------- | -------- | -------------------------------------------------------------------- | -| [PyTorch](https://pytorch.org/) | - | `{{ model_name or "yolov8n" }}.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `{{ model_name or "yolov8n" }}.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `{{ model_name or "yolov8n" }}.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `{{ model_name or "yolov8n" }}_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `{{ model_name or "yolov8n" }}.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `{{ model_name or "yolov8n" }}.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `{{ model_name or "yolov8n" }}_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `{{ model_name or "yolov8n" }}.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `{{ model_name or "yolov8n" }}.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `{{ model_name or "yolov8n" }}_edgetpu.tflite` | ✅ | `imgsz` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `{{ model_name or "yolov8n" }}_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `{{ model_name or "yolov8n" }}_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `{{ model_name or "yolov8n" }}_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `{{ model_name or "yolo11n" }}.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `{{ model_name or "yolo11n" }}.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `{{ model_name or "yolo11n" }}.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `{{ model_name or "yolo11n" }}_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `{{ model_name or "yolo11n" }}.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `{{ model_name or "yolo11n" }}.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `{{ model_name or "yolo11n" }}_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `{{ model_name or "yolo11n" }}.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `{{ model_name or "yolo11n" }}.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `{{ model_name or "yolo11n" }}_edgetpu.tflite` | ✅ | `imgsz` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `{{ model_name or "yolo11n" }}_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `{{ model_name or "yolo11n" }}_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `{{ model_name or "yolo11n" }}_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | diff --git a/docs/en/modes/benchmark.md b/docs/en/modes/benchmark.md index 209a0e03e0..3086e98ec6 100644 --- a/docs/en/modes/benchmark.md +++ b/docs/en/modes/benchmark.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn how to evaluate your YOLOv8 model's performance in real-world scenarios using benchmark mode. Optimize speed, accuracy, and resource allocation across export formats. -keywords: model benchmarking, YOLOv8, Ultralytics, performance evaluation, export formats, ONNX, TensorRT, OpenVINO, CoreML, TensorFlow, optimization, mAP50-95, inference time +description: Learn how to evaluate your YOLO11 model's performance in real-world scenarios using benchmark mode. Optimize speed, accuracy, and resource allocation across export formats. +keywords: model benchmarking, YOLO11, Ultralytics, performance evaluation, export formats, ONNX, TensorRT, OpenVINO, CoreML, TensorFlow, optimization, mAP50-95, inference time --- # Model Benchmarking with Ultralytics YOLO @@ -10,7 +10,7 @@ keywords: model benchmarking, YOLOv8, Ultralytics, performance evaluation, expor ## Introduction -Once your model is trained and validated, the next logical step is to evaluate its performance in various real-world scenarios. Benchmark mode in Ultralytics YOLOv8 serves this purpose by providing a robust framework for assessing the speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) of your model across a range of export formats. +Once your model is trained and validated, the next logical step is to evaluate its performance in various real-world scenarios. Benchmark mode in Ultralytics YOLO11 serves this purpose by providing a robust framework for assessing the speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) of your model across a range of export formats.


@@ -50,7 +50,7 @@ Once your model is trained and validated, the next logical step is to evaluate i ## Usage Examples -Run YOLOv8n benchmarks on all supported export formats including ONNX, TensorRT etc. See Arguments section below for a full list of export arguments. +Run YOLO11n benchmarks on all supported export formats including ONNX, TensorRT etc. See Arguments section below for a full list of export arguments. !!! example @@ -60,13 +60,13 @@ Run YOLOv8n benchmarks on all supported export formats including ONNX, TensorRT from ultralytics.utils.benchmarks import benchmark # Benchmark on GPU - benchmark(model="yolov8n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) + benchmark(model="yolo11n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) ``` === "CLI" ```bash - yolo benchmark model=yolov8n.pt data='coco8.yaml' imgsz=640 half=False device=0 + yolo benchmark model=yolo11n.pt data='coco8.yaml' imgsz=640 half=False device=0 ``` ## Arguments @@ -75,7 +75,7 @@ Arguments such as `model`, `data`, `imgsz`, `half`, `device`, and `verbose` prov | Key | Default Value | Description | | --------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | `None` | Specifies the path to the model file. Accepts both `.pt` and `.yaml` formats, e.g., `"yolov8n.pt"` for pre-trained models or configuration files. | +| `model` | `None` | Specifies the path to the model file. Accepts both `.pt` and `.yaml` formats, e.g., `"yolo11n.pt"` for pre-trained models or configuration files. | | `data` | `None` | Path to a YAML file defining the dataset for benchmarking, typically including paths and settings for [validation data](https://www.ultralytics.com/glossary/validation-data). Example: `"coco8.yaml"`. | | `imgsz` | `640` | The input image size for the model. Can be a single integer for square images or a tuple `(width, height)` for non-square, e.g., `(640, 480)`. | | `half` | `False` | Enables FP16 (half-precision) inference, reducing memory usage and possibly increasing speed on compatible hardware. Use `half=True` to enable. | @@ -93,9 +93,9 @@ See full `export` details in the [Export](../modes/export.md) page. ## FAQ -### How do I benchmark my YOLOv8 model's performance using Ultralytics? +### How do I benchmark my YOLO11 model's performance using Ultralytics? -Ultralytics YOLOv8 offers a Benchmark mode to assess your model's performance across different export formats. This mode provides insights into key metrics such as [mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP50-95), accuracy, and inference time in milliseconds. To run benchmarks, you can use either Python or CLI commands. For example, to benchmark on a GPU: +Ultralytics YOLO11 offers a Benchmark mode to assess your model's performance across different export formats. This mode provides insights into key metrics such as [mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP50-95), accuracy, and inference time in milliseconds. To run benchmarks, you can use either Python or CLI commands. For example, to benchmark on a GPU: !!! example @@ -105,29 +105,29 @@ Ultralytics YOLOv8 offers a Benchmark mode to assess your model's performance ac from ultralytics.utils.benchmarks import benchmark # Benchmark on GPU - benchmark(model="yolov8n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) + benchmark(model="yolo11n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) ``` === "CLI" ```bash - yolo benchmark model=yolov8n.pt data='coco8.yaml' imgsz=640 half=False device=0 + yolo benchmark model=yolo11n.pt data='coco8.yaml' imgsz=640 half=False device=0 ``` For more details on benchmark arguments, visit the [Arguments](#arguments) section. -### What are the benefits of exporting YOLOv8 models to different formats? +### What are the benefits of exporting YOLO11 models to different formats? -Exporting YOLOv8 models to different formats such as ONNX, TensorRT, and OpenVINO allows you to optimize performance based on your deployment environment. For instance: +Exporting YOLO11 models to different formats such as ONNX, TensorRT, and OpenVINO allows you to optimize performance based on your deployment environment. For instance: - **ONNX:** Provides up to 3x CPU speedup. - **TensorRT:** Offers up to 5x GPU speedup. - **OpenVINO:** Specifically optimized for Intel hardware. These formats enhance both the speed and accuracy of your models, making them more efficient for various real-world applications. Visit the [Export](../modes/export.md) page for complete details. -### Why is benchmarking crucial in evaluating YOLOv8 models? +### Why is benchmarking crucial in evaluating YOLO11 models? -Benchmarking your YOLOv8 models is essential for several reasons: +Benchmarking your YOLO11 models is essential for several reasons: - **Informed Decisions:** Understand the trade-offs between speed and accuracy. - **Resource Allocation:** Gauge the performance across different hardware options. @@ -135,9 +135,9 @@ Benchmarking your YOLOv8 models is essential for several reasons: - **Cost Efficiency:** Optimize hardware usage based on benchmark results. Key metrics such as mAP50-95, Top-5 accuracy, and inference time help in making these evaluations. Refer to the [Key Metrics](#key-metrics-in-benchmark-mode) section for more information. -### Which export formats are supported by YOLOv8, and what are their advantages? +### Which export formats are supported by YOLO11, and what are their advantages? -YOLOv8 supports a variety of export formats, each tailored for specific hardware and use cases: +YOLO11 supports a variety of export formats, each tailored for specific hardware and use cases: - **ONNX:** Best for CPU performance. - **TensorRT:** Ideal for GPU efficiency. @@ -145,11 +145,11 @@ YOLOv8 supports a variety of export formats, each tailored for specific hardware - **CoreML & [TensorFlow](https://www.ultralytics.com/glossary/tensorflow):** Useful for iOS and general ML applications. For a complete list of supported formats and their respective advantages, check out the [Supported Export Formats](#supported-export-formats) section. -### What arguments can I use to fine-tune my YOLOv8 benchmarks? +### What arguments can I use to fine-tune my YOLO11 benchmarks? When running benchmarks, several arguments can be customized to suit specific needs: -- **model:** Path to the model file (e.g., "yolov8n.pt"). +- **model:** Path to the model file (e.g., "yolo11n.pt"). - **data:** Path to a YAML file defining the dataset (e.g., "coco8.yaml"). - **imgsz:** The input image size, either as a single integer or a tuple. - **half:** Enable FP16 inference for better performance. diff --git a/docs/en/modes/export.md b/docs/en/modes/export.md index 706dd91cdc..048a2cdf3c 100644 --- a/docs/en/modes/export.md +++ b/docs/en/modes/export.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn how to export your YOLOv8 model to various formats like ONNX, TensorRT, and CoreML. Achieve maximum compatibility and performance. -keywords: YOLOv8, Model Export, ONNX, TensorRT, CoreML, Ultralytics, AI, Machine Learning, Inference, Deployment +description: Learn how to export your YOLO11 model to various formats like ONNX, TensorRT, and CoreML. Achieve maximum compatibility and performance. +keywords: YOLO11, Model Export, ONNX, TensorRT, CoreML, Ultralytics, AI, Machine Learning, Inference, Deployment --- # Model Export with Ultralytics YOLO @@ -10,7 +10,7 @@ keywords: YOLOv8, Model Export, ONNX, TensorRT, CoreML, Ultralytics, AI, Machine ## Introduction -The ultimate goal of training a model is to deploy it for real-world applications. Export mode in Ultralytics YOLOv8 offers a versatile range of options for exporting your trained model to different formats, making it deployable across various platforms and devices. This comprehensive guide aims to walk you through the nuances of model exporting, showcasing how to achieve maximum compatibility and performance. +The ultimate goal of training a model is to deploy it for real-world applications. Export mode in Ultralytics YOLO11 offers a versatile range of options for exporting your trained model to different formats, making it deployable across various platforms and devices. This comprehensive guide aims to walk you through the nuances of model exporting, showcasing how to achieve maximum compatibility and performance.


@@ -23,7 +23,7 @@ The ultimate goal of training a model is to deploy it for real-world application Watch: How To Export Custom Trained Ultralytics YOLOv8 Model and Run Live Inference on Webcam.

-## Why Choose YOLOv8's Export Mode? +## Why Choose YOLO11's Export Mode? - **Versatility:** Export to multiple formats including ONNX, TensorRT, CoreML, and more. - **Performance:** Gain up to 5x GPU speedup with TensorRT and 3x CPU speedup with ONNX or OpenVINO. @@ -46,7 +46,7 @@ Here are some of the standout functionalities: ## Usage Examples -Export a YOLOv8n model to a different format like ONNX or TensorRT. See the Arguments section below for a full list of export arguments. +Export a YOLO11n model to a different format like ONNX or TensorRT. See the Arguments section below for a full list of export arguments. !!! example @@ -56,7 +56,7 @@ Export a YOLOv8n model to a different format like ONNX or TensorRT. See the Argu from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -66,7 +66,7 @@ Export a YOLOv8n model to a different format like ONNX or TensorRT. See the Argu === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=yolo11n.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` @@ -80,15 +80,15 @@ Adjusting these parameters allows for customization of the export process to fit ## Export Formats -Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} ## FAQ -### How do I export a YOLOv8 model to ONNX format? +### How do I export a YOLO11 model to ONNX format? -Exporting a YOLOv8 model to ONNX format is straightforward with Ultralytics. It provides both Python and CLI methods for exporting models. +Exporting a YOLO11 model to ONNX format is straightforward with Ultralytics. It provides both Python and CLI methods for exporting models. !!! example @@ -98,7 +98,7 @@ Exporting a YOLOv8 model to ONNX format is straightforward with Ultralytics. It from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -108,7 +108,7 @@ Exporting a YOLOv8 model to ONNX format is straightforward with Ultralytics. It === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=yolo11n.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` @@ -116,7 +116,7 @@ For more details on the process, including advanced options like handling differ ### What are the benefits of using TensorRT for model export? -Using TensorRT for model export offers significant performance improvements. YOLOv8 models exported to TensorRT can achieve up to a 5x GPU speedup, making it ideal for real-time inference applications. +Using TensorRT for model export offers significant performance improvements. YOLO11 models exported to TensorRT can achieve up to a 5x GPU speedup, making it ideal for real-time inference applications. - **Versatility:** Optimize models for a specific hardware setup. - **Speed:** Achieve faster inference through advanced optimizations. @@ -124,7 +124,7 @@ Using TensorRT for model export offers significant performance improvements. YOL To learn more about integrating TensorRT, see the [TensorRT integration guide](../integrations/tensorrt.md). -### How do I enable INT8 quantization when exporting my YOLOv8 model? +### How do I enable INT8 quantization when exporting my YOLO11 model? INT8 quantization is an excellent way to compress the model and speed up inference, especially on edge devices. Here's how you can enable INT8 quantization: @@ -135,14 +135,14 @@ INT8 quantization is an excellent way to compress the model and speed up inferen ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") # Load a model + model = YOLO("yolo11n.pt") # Load a model model.export(format="onnx", int8=True) ``` === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx int8=True # export model with INT8 quantization + yolo export model=yolo11n.pt format=onnx int8=True # export model with INT8 quantization ``` INT8 quantization can be applied to various formats, such as TensorRT and CoreML. More details can be found in the [Export section](../modes/export.md). @@ -160,14 +160,14 @@ To enable this feature, use the `dynamic=True` flag during export: ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") model.export(format="onnx", dynamic=True) ``` === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx dynamic=True + yolo export model=yolo11n.pt format=onnx dynamic=True ``` For additional context, refer to the [dynamic input size configuration](#arguments). diff --git a/docs/en/modes/predict.md b/docs/en/modes/predict.md index 196d9e2028..527c12f4a2 100644 --- a/docs/en/modes/predict.md +++ b/docs/en/modes/predict.md @@ -1,7 +1,7 @@ --- comments: true -description: Harness the power of Ultralytics YOLOv8 for real-time, high-speed inference on various data sources. Learn about predict mode, key features, and practical applications. -keywords: Ultralytics, YOLOv8, model prediction, inference, predict mode, real-time inference, computer vision, machine learning, streaming, high performance +description: Harness the power of Ultralytics YOLO11 for real-time, high-speed inference on various data sources. Learn about predict mode, key features, and practical applications. +keywords: Ultralytics, YOLO11, model prediction, inference, predict mode, real-time inference, computer vision, machine learning, streaming, high performance --- # Model Prediction with Ultralytics YOLO @@ -10,7 +10,7 @@ keywords: Ultralytics, YOLOv8, model prediction, inference, predict mode, real-t ## Introduction -In the world of [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv), the process of making sense out of visual data is called 'inference' or 'prediction'. Ultralytics YOLOv8 offers a powerful feature known as **predict mode** that is tailored for high-performance, real-time inference on a wide range of data sources. +In the world of [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv), the process of making sense out of visual data is called 'inference' or 'prediction'. Ultralytics YOLO11 offers a powerful feature known as **predict mode** that is tailored for high-performance, real-time inference on a wide range of data sources.


@@ -32,7 +32,7 @@ In the world of [machine learning](https://www.ultralytics.com/glossary/machine- ## Why Use Ultralytics YOLO for Inference? -Here's why you should consider YOLOv8's predict mode for your various inference needs: +Here's why you should consider YOLO11's predict mode for your various inference needs: - **Versatility:** Capable of making inferences on images, videos, and even live streams. - **Performance:** Engineered for real-time, high-speed processing without sacrificing [accuracy](https://www.ultralytics.com/glossary/accuracy). @@ -41,7 +41,7 @@ Here's why you should consider YOLOv8's predict mode for your various inference ### Key Features of Predict Mode -YOLOv8's predict mode is designed to be robust and versatile, featuring: +YOLO11's predict mode is designed to be robust and versatile, featuring: - **Multiple Data Source Compatibility:** Whether your data is in the form of individual images, a collection of images, video files, or real-time video streams, predict mode has you covered. - **Streaming Mode:** Use the streaming feature to generate a memory-efficient generator of `Results` objects. Enable this by setting `stream=True` in the predictor's call method. @@ -58,7 +58,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # pretrained YOLOv8n model + model = YOLO("yolo11n.pt") # pretrained YOLO11n model # Run batched inference on a list of images results = model(["image1.jpg", "image2.jpg"]) # return a list of Results objects @@ -80,7 +80,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # pretrained YOLOv8n model + model = YOLO("yolo11n.pt") # pretrained YOLO11n model # Run batched inference on a list of images results = model(["image1.jpg", "image2.jpg"], stream=True) # return a generator of Results objects @@ -98,7 +98,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m ## Inference Sources -YOLOv8 can process different types of input sources for inference, as shown in the table below. The sources include static images, video streams, and various data formats. The table also indicates whether each source can be used in streaming mode with the argument `stream=True` ✅. Streaming mode is beneficial for processing videos or live streams as it creates a generator of results instead of loading all frames into memory. +YOLO11 can process different types of input sources for inference, as shown in the table below. The sources include static images, video streams, and various data formats. The table also indicates whether each source can be used in streaming mode with the argument `stream=True` ✅. Streaming mode is beneficial for processing videos or live streams as it creates a generator of results instead of loading all frames into memory. !!! tip @@ -131,8 +131,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define path to the image file source = "path/to/image.jpg" @@ -147,8 +147,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define current screenshot as source source = "screen" @@ -163,8 +163,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define remote image or video URL source = "https://ultralytics.com/images/bus.jpg" @@ -181,8 +181,8 @@ Below are code examples for using each source type: from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Open an image using PIL source = Image.open("path/to/image.jpg") @@ -199,8 +199,8 @@ Below are code examples for using each source type: from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Read an image using OpenCV source = cv2.imread("path/to/image.jpg") @@ -217,8 +217,8 @@ Below are code examples for using each source type: from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Create a random numpy array of HWC shape (640, 640, 3) with values in range [0, 255] and type uint8 source = np.random.randint(low=0, high=255, size=(640, 640, 3), dtype="uint8") @@ -235,8 +235,8 @@ Below are code examples for using each source type: from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Create a random torch tensor of BCHW shape (1, 3, 640, 640) with values in range [0, 1] and type float32 source = torch.rand(1, 3, 640, 640, dtype=torch.float32) @@ -251,8 +251,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define a path to a CSV file with images, URLs, videos and directories source = "path/to/file.csv" @@ -267,8 +267,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define path to video file source = "path/to/video.mp4" @@ -283,8 +283,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define path to directory containing images and videos for inference source = "path/to/dir" @@ -299,8 +299,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define a glob search for all JPG files in a directory source = "path/to/dir/*.jpg" @@ -318,8 +318,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Define source as YouTube video URL source = "https://youtu.be/LNwODJXcvt4" @@ -335,8 +335,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Single stream with batch-size 1 inference source = "rtsp://example.com/media.mp4" # RTSP, RTMP, TCP, or IP streaming address @@ -354,8 +354,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Multiple streams with batched inference (e.g., batch-size 8 for 8 streams) source = "path/to/list.streams" # *.streams text file with one streaming address per line @@ -385,8 +385,8 @@ Below are code examples for using each source type: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Run inference on 'bus.jpg' with arguments model.predict("bus.jpg", save=True, imgsz=320, conf=0.5) @@ -402,7 +402,7 @@ Visualization arguments: ## Image and Video Formats -YOLOv8 supports various image and video formats, as specified in [ultralytics/data/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/utils.py). See the tables below for the valid suffixes and example predict commands. +YOLO11 supports various image and video formats, as specified in [ultralytics/data/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/utils.py). See the tables below for the valid suffixes and example predict commands. ### Images @@ -449,8 +449,8 @@ All Ultralytics `predict()` calls will return a list of `Results` objects: ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Run inference on an image results = model("bus.jpg") # list of 1 Results object @@ -501,8 +501,8 @@ For more details see the [`Results` class documentation](../reference/engine/res ```python from ultralytics import YOLO - # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + # Load a pretrained YOLO11n model + model = YOLO("yolo11n.pt") # Run inference on an image results = model("bus.jpg") # results list @@ -540,7 +540,7 @@ For more details see the [`Boxes` class documentation](../reference/engine/resul from ultralytics import YOLO # Load a pretrained YOLOv8n-seg Segment model - model = YOLO("yolov8n-seg.pt") + model = YOLO("yolo11n-seg.pt") # Run inference on an image results = model("bus.jpg") # results list @@ -573,7 +573,7 @@ For more details see the [`Masks` class documentation](../reference/engine/resul from ultralytics import YOLO # Load a pretrained YOLOv8n-pose Pose model - model = YOLO("yolov8n-pose.pt") + model = YOLO("yolo11n-pose.pt") # Run inference on an image results = model("bus.jpg") # results list @@ -607,7 +607,7 @@ For more details see the [`Keypoints` class documentation](../reference/engine/r from ultralytics import YOLO # Load a pretrained YOLOv8n-cls Classify model - model = YOLO("yolov8n-cls.pt") + model = YOLO("yolo11n-cls.pt") # Run inference on an image results = model("bus.jpg") # results list @@ -642,7 +642,7 @@ For more details see the [`Probs` class documentation](../reference/engine/resul from ultralytics import YOLO # Load a pretrained YOLOv8n model - model = YOLO("yolov8n-obb.pt") + model = YOLO("yolo11n-obb.pt") # Run inference on an image results = model("bus.jpg") # results list @@ -682,7 +682,7 @@ The `plot()` method in `Results` objects facilitates visualization of prediction from ultralytics import YOLO # Load a pretrained YOLOv8n model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Run inference on 'bus.jpg' results = model(["bus.jpg", "zidane.jpg"]) # results list @@ -747,8 +747,8 @@ When using YOLO models in a multi-threaded application, it's important to instan # Starting threads that each have their own model instance - Thread(target=thread_safe_predict, args=("yolov8n.pt", "image1.jpg")).start() - Thread(target=thread_safe_predict, args=("yolov8n.pt", "image2.jpg")).start() + Thread(target=thread_safe_predict, args=("yolo11n.pt", "image1.jpg")).start() + Thread(target=thread_safe_predict, args=("yolo11n.pt", "image2.jpg")).start() ``` For an in-depth look at thread-safe inference with YOLO models and step-by-step instructions, please refer to our [YOLO Thread-Safe Inference Guide](../guides/yolo-thread-safe-inference.md). This guide will provide you with all the necessary information to avoid common pitfalls and ensure that your multi-threaded inference runs smoothly. @@ -765,7 +765,7 @@ Here's a Python script using OpenCV (`cv2`) and YOLOv8 to run inference on video from ultralytics import YOLO # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Open the video file video_path = "path/to/your/video/file.mp4" diff --git a/docs/en/modes/track.md b/docs/en/modes/track.md index 46c43b0b1a..90a856a049 100644 --- a/docs/en/modes/track.md +++ b/docs/en/modes/track.md @@ -60,7 +60,7 @@ The default tracker is BoT-SORT. If object confidence score will be low, i.e lower than [`track_high_thresh`](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/trackers/bytetrack.yaml#L5), then there will be no tracks successfully returned and updated. -To run the tracker on video streams, use a trained Detect, Segment or Pose model such as YOLOv8n, YOLOv8n-seg and YOLOv8n-pose. +To run the tracker on video streams, use a trained Detect, Segment or Pose model such as YOLO11n, YOLO11n-seg and YOLO11n-pose. !!! example @@ -70,9 +70,9 @@ To run the tracker on video streams, use a trained Detect, Segment or Pose model from ultralytics import YOLO # Load an official or custom model - model = YOLO("yolov8n.pt") # Load an official Detect model - model = YOLO("yolov8n-seg.pt") # Load an official Segment model - model = YOLO("yolov8n-pose.pt") # Load an official Pose model + model = YOLO("yolo11n.pt") # Load an official Detect model + model = YOLO("yolo11n-seg.pt") # Load an official Segment model + model = YOLO("yolo11n-pose.pt") # Load an official Pose model model = YOLO("path/to/best.pt") # Load a custom trained model # Perform tracking with the model @@ -84,9 +84,9 @@ To run the tracker on video streams, use a trained Detect, Segment or Pose model ```bash # Perform tracking with various models using the command line interface - yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" # Official Detect model - yolo track model=yolov8n-seg.pt source="https://youtu.be/LNwODJXcvt4" # Official Segment model - yolo track model=yolov8n-pose.pt source="https://youtu.be/LNwODJXcvt4" # Official Pose model + yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" # Official Detect model + yolo track model=yolo11n-seg.pt source="https://youtu.be/LNwODJXcvt4" # Official Segment model + yolo track model=yolo11n-pose.pt source="https://youtu.be/LNwODJXcvt4" # Official Pose model yolo track model=path/to/best.pt source="https://youtu.be/LNwODJXcvt4" # Custom trained model # Track using ByteTrack tracker @@ -113,7 +113,7 @@ Tracking configuration shares properties with Predict mode, such as `conf`, `iou from ultralytics import YOLO # Configure the tracking parameters and run the tracker - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") results = model.track(source="https://youtu.be/LNwODJXcvt4", conf=0.3, iou=0.5, show=True) ``` @@ -121,7 +121,7 @@ Tracking configuration shares properties with Predict mode, such as `conf`, `iou ```bash # Configure tracking parameters and run the tracker using the command line interface - yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3, iou=0.5 show + yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" conf=0.3, iou=0.5 show ``` ### Tracker Selection @@ -136,7 +136,7 @@ Ultralytics also allows you to use a modified tracker configuration file. To do from ultralytics import YOLO # Load the model and run the tracker with a custom configuration file - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") results = model.track(source="https://youtu.be/LNwODJXcvt4", tracker="custom_tracker.yaml") ``` @@ -144,7 +144,7 @@ Ultralytics also allows you to use a modified tracker configuration file. To do ```bash # Load the model and run the tracker with a custom configuration file using the command line interface - yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" tracker='custom_tracker.yaml' + yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" tracker='custom_tracker.yaml' ``` For a comprehensive list of tracking arguments, refer to the [ultralytics/cfg/trackers](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/trackers) page. @@ -153,7 +153,7 @@ For a comprehensive list of tracking arguments, refer to the [ultralytics/cfg/tr ### Persisting Tracks Loop -Here is a Python script using [OpenCV](https://www.ultralytics.com/glossary/opencv) (`cv2`) and YOLOv8 to run object tracking on video frames. This script still assumes you have already installed the necessary packages (`opencv-python` and `ultralytics`). The `persist=True` argument tells the tracker that the current image or frame is the next in a sequence and to expect tracks from the previous image in the current image. +Here is a Python script using [OpenCV](https://www.ultralytics.com/glossary/opencv) (`cv2`) and YOLO11 to run object tracking on video frames. This script still assumes you have already installed the necessary packages (`opencv-python` and `ultralytics`). The `persist=True` argument tells the tracker that the current image or frame is the next in a sequence and to expect tracks from the previous image in the current image. !!! example "Streaming for-loop with tracking" @@ -162,8 +162,8 @@ Here is a Python script using [OpenCV](https://www.ultralytics.com/glossary/open from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Open the video file video_path = "path/to/video.mp4" @@ -175,14 +175,14 @@ Here is a Python script using [OpenCV](https://www.ultralytics.com/glossary/open success, frame = cap.read() if success: - # Run YOLOv8 tracking on the frame, persisting tracks between frames + # Run YOLO11 tracking on the frame, persisting tracks between frames results = model.track(frame, persist=True) # Visualize the results on the frame annotated_frame = results[0].plot() # Display the annotated frame - cv2.imshow("YOLOv8 Tracking", annotated_frame) + cv2.imshow("YOLO11 Tracking", annotated_frame) # Break the loop if 'q' is pressed if cv2.waitKey(1) & 0xFF == ord("q"): @@ -200,9 +200,9 @@ Please note the change from `model(frame)` to `model.track(frame)`, which enable ### Plotting Tracks Over Time -Visualizing object tracks over consecutive frames can provide valuable insights into the movement patterns and behavior of detected objects within a video. With Ultralytics YOLOv8, plotting these tracks is a seamless and efficient process. +Visualizing object tracks over consecutive frames can provide valuable insights into the movement patterns and behavior of detected objects within a video. With Ultralytics YOLO11, plotting these tracks is a seamless and efficient process. -In the following example, we demonstrate how to utilize YOLOv8's tracking capabilities to plot the movement of detected objects across multiple video frames. This script involves opening a video file, reading it frame by frame, and utilizing the YOLO model to identify and track various objects. By retaining the center points of the detected bounding boxes and connecting them, we can draw lines that represent the paths followed by the tracked objects. +In the following example, we demonstrate how to utilize YOLO11's tracking capabilities to plot the movement of detected objects across multiple video frames. This script involves opening a video file, reading it frame by frame, and utilizing the YOLO model to identify and track various objects. By retaining the center points of the detected bounding boxes and connecting them, we can draw lines that represent the paths followed by the tracked objects. !!! example "Plotting tracks over multiple video frames" @@ -214,8 +214,8 @@ In the following example, we demonstrate how to utilize YOLOv8's tracking capabi from ultralytics import YOLO - # Load the YOLOv8 model - model = YOLO("yolov8n.pt") + # Load the YOLO11 model + model = YOLO("yolo11n.pt") # Open the video file video_path = "path/to/video.mp4" @@ -230,7 +230,7 @@ In the following example, we demonstrate how to utilize YOLOv8's tracking capabi success, frame = cap.read() if success: - # Run YOLOv8 tracking on the frame, persisting tracks between frames + # Run YOLO11 tracking on the frame, persisting tracks between frames results = model.track(frame, persist=True) # Get the boxes and track IDs @@ -253,7 +253,7 @@ In the following example, we demonstrate how to utilize YOLOv8's tracking capabi cv2.polylines(annotated_frame, [points], isClosed=False, color=(230, 230, 230), thickness=10) # Display the annotated frame - cv2.imshow("YOLOv8 Tracking", annotated_frame) + cv2.imshow("YOLO11 Tracking", annotated_frame) # Break the loop if 'q' is pressed if cv2.waitKey(1) & 0xFF == ord("q"): @@ -275,7 +275,7 @@ In the provided Python script, we make use of Python's `threading` module to run To ensure that each thread receives the correct parameters (the video file, the model to use and the file index), we define a function `run_tracker_in_thread` that accepts these parameters and contains the main tracking loop. This function reads the video frame by frame, runs the tracker, and displays the results. -Two different models are used in this example: `yolov8n.pt` and `yolov8n-seg.pt`, each tracking objects in a different video file. The video files are specified in `video_file1` and `video_file2`. +Two different models are used in this example: `yolo11n.pt` and `yolo11n-seg.pt`, each tracking objects in a different video file. The video files are specified in `video_file1` and `video_file2`. The `daemon=True` parameter in `threading.Thread` means that these threads will be closed as soon as the main program finishes. We then start the threads with `start()` and use `join()` to make the main thread wait until both tracker threads have finished. @@ -291,7 +291,7 @@ Finally, after all threads have completed their task, the windows displaying the from ultralytics import YOLO # Define model names and video sources - MODEL_NAMES = ["yolov8n.pt", "yolov8n-seg.pt"] + MODEL_NAMES = ["yolo11n.pt", "yolo11n-seg.pt"] SOURCES = ["path/to/video.mp4", "0"] # local video, 0 for webcam @@ -300,7 +300,7 @@ Finally, after all threads have completed their task, the windows displaying the Run YOLO tracker in its own thread for concurrent processing. Args: - model_name (str): The YOLOv8 model object. + model_name (str): The YOLO11 model object. filename (str): The path to the video file or the identifier for the webcam/external camera source. """ model = YOLO(model_name) @@ -357,14 +357,14 @@ You can configure a custom tracker by copying an existing tracker configuration ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") results = model.track(source="https://youtu.be/LNwODJXcvt4", tracker="custom_tracker.yaml") ``` === "CLI" ```bash - yolo track model=yolov8n.pt source="https://youtu.be/LNwODJXcvt4" tracker='custom_tracker.yaml' + yolo track model=yolo11n.pt source="https://youtu.be/LNwODJXcvt4" tracker='custom_tracker.yaml' ``` ### How can I run object tracking on multiple video streams simultaneously? @@ -381,7 +381,7 @@ To run object tracking on multiple video streams simultaneously, you can use Pyt from ultralytics import YOLO # Define model names and video sources - MODEL_NAMES = ["yolov8n.pt", "yolov8n-seg.pt"] + MODEL_NAMES = ["yolo11n.pt", "yolo11n-seg.pt"] SOURCES = ["path/to/video.mp4", "0"] # local video, 0 for webcam @@ -390,7 +390,7 @@ To run object tracking on multiple video streams simultaneously, you can use Pyt Run YOLO tracker in its own thread for concurrent processing. Args: - model_name (str): The YOLOv8 model object. + model_name (str): The YOLO11 model object. filename (str): The path to the video file or the identifier for the webcam/external camera source. """ model = YOLO(model_name) @@ -438,7 +438,7 @@ To visualize object tracks over multiple video frames, you can use the YOLO mode from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") video_path = "path/to/video.mp4" cap = cv2.VideoCapture(video_path) track_history = defaultdict(lambda: []) @@ -458,7 +458,7 @@ To visualize object tracks over multiple video frames, you can use the YOLO mode track.pop(0) points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2)) cv2.polylines(annotated_frame, [points], isClosed=False, color=(230, 230, 230), thickness=10) - cv2.imshow("YOLOv8 Tracking", annotated_frame) + cv2.imshow("YOLO11 Tracking", annotated_frame) if cv2.waitKey(1) & 0xFF == ord("q"): break else: diff --git a/docs/en/modes/train.md b/docs/en/modes/train.md index f5722b7280..9cbe791991 100644 --- a/docs/en/modes/train.md +++ b/docs/en/modes/train.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn how to efficiently train object detection models using YOLOv8 with comprehensive instructions on settings, augmentation, and hardware utilization. -keywords: Ultralytics, YOLOv8, model training, deep learning, object detection, GPU training, dataset augmentation, hyperparameter tuning, model performance, M1 M2 training +description: Learn how to efficiently train object detection models using YOLO11 with comprehensive instructions on settings, augmentation, and hardware utilization. +keywords: Ultralytics, YOLO11, model training, deep learning, object detection, GPU training, dataset augmentation, hyperparameter tuning, model performance, M1 M2 training --- # Model Training with Ultralytics YOLO @@ -10,7 +10,7 @@ keywords: Ultralytics, YOLOv8, model training, deep learning, object detection, ## Introduction -Training a [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) model involves feeding it data and adjusting its parameters so that it can make accurate predictions. Train mode in Ultralytics YOLOv8 is engineered for effective and efficient training of object detection models, fully utilizing modern hardware capabilities. This guide aims to cover all the details you need to get started with training your own models using YOLOv8's robust set of features. +Training a [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) model involves feeding it data and adjusting its parameters so that it can make accurate predictions. Train mode in Ultralytics YOLO11 is engineered for effective and efficient training of object detection models, fully utilizing modern hardware capabilities. This guide aims to cover all the details you need to get started with training your own models using YOLO11's robust set of features.


@@ -20,12 +20,12 @@ Training a [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl allowfullscreen>
- Watch: How to Train a YOLOv8 model on Your Custom Dataset in Google Colab. + Watch: How to Train a YOLO model on Your Custom Dataset in Google Colab.

## Why Choose Ultralytics YOLO for Training? -Here are some compelling reasons to opt for YOLOv8's Train mode: +Here are some compelling reasons to opt for YOLO11's Train mode: - **Efficiency:** Make the most out of your hardware, whether you're on a single-GPU setup or scaling across multiple GPUs. - **Versatility:** Train on custom datasets in addition to readily available ones like COCO, VOC, and ImageNet. @@ -34,7 +34,7 @@ Here are some compelling reasons to opt for YOLOv8's Train mode: ### Key Features of Train Mode -The following are some notable features of YOLOv8's Train mode: +The following are some notable features of YOLO11's Train mode: - **Automatic Dataset Download:** Standard datasets like COCO, VOC, and ImageNet are downloaded automatically on first use. - **Multi-GPU Support:** Scale your training efforts seamlessly across multiple GPUs to expedite the process. @@ -43,11 +43,11 @@ The following are some notable features of YOLOv8's Train mode: !!! tip - * YOLOv8 datasets like COCO, VOC, ImageNet and many others automatically download on first use, i.e. `yolo train data=coco.yaml` + * YOLO11 datasets like COCO, VOC, ImageNet and many others automatically download on first use, i.e. `yolo train data=coco.yaml` ## Usage Examples -Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. The training device can be specified using the `device` argument. If no argument is passed GPU `device=0` will be used if available, otherwise `device='cpu'` will be used. See Arguments section below for a full list of training arguments. +Train YOLO11n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. The training device can be specified using the `device` argument. If no argument is passed GPU `device=0` will be used if available, otherwise `device='cpu'` will be used. See Arguments section below for a full list of training arguments. !!! example "Single-GPU and CPU Training Example" @@ -59,9 +59,9 @@ Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/ from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.yaml") # build a new model from YAML - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n.yaml").load("yolov8n.pt") # build from YAML and transfer weights + model = YOLO("yolo11n.yaml") # build a new model from YAML + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.yaml").load("yolo11n.pt") # build from YAML and transfer weights # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) @@ -71,13 +71,13 @@ Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/ ```bash # Build a new model from YAML and start training from scratch - yolo detect train data=coco8.yaml model=yolov8n.yaml epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.yaml epochs=100 imgsz=640 # Start training from a pretrained *.pt model - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo detect train data=coco8.yaml model=yolov8n.yaml pretrained=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.yaml pretrained=yolo11n.pt epochs=100 imgsz=640 ``` ### Multi-GPU Training @@ -94,7 +94,7 @@ Multi-GPU training allows for more efficient utilization of available hardware r from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model with 2 GPUs results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device=[0, 1]) @@ -104,7 +104,7 @@ Multi-GPU training allows for more efficient utilization of available hardware r ```bash # Start training from a pretrained *.pt model using GPUs 0 and 1 - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 device=0,1 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 device=0,1 ``` ### Apple M1 and M2 MPS Training @@ -121,7 +121,7 @@ To enable training on Apple M1 and M2 chips, you should specify 'mps' as your de from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model with MPS results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device="mps") @@ -131,7 +131,7 @@ To enable training on Apple M1 and M2 chips, you should specify 'mps' as your de ```bash # Start training from a pretrained *.pt model using MPS - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 device=mps + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 device=mps ``` While leveraging the computational power of the M1/M2 chips, this enables more efficient processing of the training tasks. For more detailed guidance and advanced configuration options, please refer to the [PyTorch MPS documentation](https://pytorch.org/docs/stable/notes/mps.html). @@ -199,7 +199,7 @@ These settings can be adjusted to meet the specific requirements of the dataset ## Logging -In training a YOLOv8 model, you might find it valuable to keep track of the model's performance over time. This is where logging comes into play. Ultralytics' YOLO provides support for three types of loggers - Comet, ClearML, and TensorBoard. +In training a YOLO11 model, you might find it valuable to keep track of the model's performance over time. This is where logging comes into play. Ultralytics' YOLO provides support for three types of loggers - Comet, ClearML, and TensorBoard. To use a logger, select it from the dropdown menu in the code snippet above and run it. The chosen logger will be installed and initialized. @@ -272,9 +272,9 @@ After setting up your logger, you can then proceed with your model training. All ## FAQ -### How do I train an [object detection](https://www.ultralytics.com/glossary/object-detection) model using Ultralytics YOLOv8? +### How do I train an [object detection](https://www.ultralytics.com/glossary/object-detection) model using Ultralytics YOLO11? -To train an object detection model using Ultralytics YOLOv8, you can either use the Python API or the CLI. Below is an example for both: +To train an object detection model using Ultralytics YOLO11, you can either use the Python API or the CLI. Below is an example for both: !!! example "Single-GPU and CPU Training Example" @@ -284,7 +284,7 @@ To train an object detection model using Ultralytics YOLOv8, you can either use from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) @@ -293,14 +293,14 @@ To train an object detection model using Ultralytics YOLOv8, you can either use === "CLI" ```bash - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For more details, refer to the [Train Settings](#train-settings) section. -### What are the key features of Ultralytics YOLOv8's Train mode? +### What are the key features of Ultralytics YOLO11's Train mode? -The key features of Ultralytics YOLOv8's Train mode include: +The key features of Ultralytics YOLO11's Train mode include: - **Automatic Dataset Download:** Automatically downloads standard datasets like COCO, VOC, and ImageNet. - **Multi-GPU Support:** Scale training across multiple GPUs for faster processing. @@ -309,7 +309,7 @@ The key features of Ultralytics YOLOv8's Train mode include: These features make training efficient and customizable to your needs. For more details, see the [Key Features of Train Mode](#key-features-of-train-mode) section. -### How do I resume training from an interrupted session in Ultralytics YOLOv8? +### How do I resume training from an interrupted session in Ultralytics YOLO11? To resume training from an interrupted session, set the `resume` argument to `True` and specify the path to the last saved checkpoint. @@ -335,9 +335,9 @@ To resume training from an interrupted session, set the `resume` argument to `Tr Check the section on [Resuming Interrupted Trainings](#resuming-interrupted-trainings) for more information. -### Can I train YOLOv8 models on Apple M1 and M2 chips? +### Can I train YOLO11 models on Apple M1 and M2 chips? -Yes, Ultralytics YOLOv8 supports training on Apple M1 and M2 chips utilizing the Metal Performance Shaders (MPS) framework. Specify 'mps' as your training device. +Yes, Ultralytics YOLO11 supports training on Apple M1 and M2 chips utilizing the Metal Performance Shaders (MPS) framework. Specify 'mps' as your training device. !!! example "MPS Training Example" @@ -347,7 +347,7 @@ Yes, Ultralytics YOLOv8 supports training on Apple M1 and M2 chips utilizing the from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model on M1/M2 chip results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device="mps") @@ -356,14 +356,14 @@ Yes, Ultralytics YOLOv8 supports training on Apple M1 and M2 chips utilizing the === "CLI" ```bash - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 device=mps + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 device=mps ``` For more details, refer to the [Apple M1 and M2 MPS Training](#apple-m1-and-m2-mps-training) section. ### What are the common training settings, and how do I configure them? -Ultralytics YOLOv8 allows you to configure a variety of training settings such as batch size, learning rate, epochs, and more through arguments. Here's a brief overview: +Ultralytics YOLO11 allows you to configure a variety of training settings such as batch size, learning rate, epochs, and more through arguments. Here's a brief overview: | Argument | Default | Description | | -------- | ------- | ---------------------------------------------------------------------- | diff --git a/docs/en/modes/val.md b/docs/en/modes/val.md index 91eb4c2a87..da275d6316 100644 --- a/docs/en/modes/val.md +++ b/docs/en/modes/val.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn how to validate your YOLOv8 model with precise metrics, easy-to-use tools, and custom settings for optimal performance. -keywords: Ultralytics, YOLOv8, model validation, machine learning, object detection, mAP metrics, Python API, CLI +description: Learn how to validate your YOLO11 model with precise metrics, easy-to-use tools, and custom settings for optimal performance. +keywords: Ultralytics, YOLO11, model validation, machine learning, object detection, mAP metrics, Python API, CLI --- # Model Validation with Ultralytics YOLO @@ -10,7 +10,7 @@ keywords: Ultralytics, YOLOv8, model validation, machine learning, object detect ## Introduction -Validation is a critical step in the [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) pipeline, allowing you to assess the quality of your trained models. Val mode in Ultralytics YOLOv8 provides a robust suite of tools and metrics for evaluating the performance of your [object detection](https://www.ultralytics.com/glossary/object-detection) models. This guide serves as a complete resource for understanding how to effectively use the Val mode to ensure that your models are both accurate and reliable. +Validation is a critical step in the [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) pipeline, allowing you to assess the quality of your trained models. Val mode in Ultralytics YOLO11 provides a robust suite of tools and metrics for evaluating the performance of your [object detection](https://www.ultralytics.com/glossary/object-detection) models. This guide serves as a complete resource for understanding how to effectively use the Val mode to ensure that your models are both accurate and reliable.


@@ -25,7 +25,7 @@ Validation is a critical step in the [machine learning](https://www.ultralytics. ## Why Validate with Ultralytics YOLO? -Here's why using YOLOv8's Val mode is advantageous: +Here's why using YOLO11's Val mode is advantageous: - **Precision:** Get accurate metrics like mAP50, mAP75, and mAP50-95 to comprehensively evaluate your model. - **Convenience:** Utilize built-in features that remember training settings, simplifying the validation process. @@ -34,7 +34,7 @@ Here's why using YOLOv8's Val mode is advantageous: ### Key Features of Val Mode -These are the notable functionalities offered by YOLOv8's Val mode: +These are the notable functionalities offered by YOLO11's Val mode: - **Automated Settings:** Models remember their training configurations for straightforward validation. - **Multi-Metric Support:** Evaluate your model based on a range of accuracy metrics. @@ -43,11 +43,11 @@ These are the notable functionalities offered by YOLOv8's Val mode: !!! tip - * YOLOv8 models automatically remember their training settings, so you can validate a model at the same image size and on the original dataset easily with just `yolo val model=yolov8n.pt` or `model('yolov8n.pt').val()` + * YOLO11 models automatically remember their training settings, so you can validate a model at the same image size and on the original dataset easily with just `yolo val model=yolo11n.pt` or `model('yolo11n.pt').val()` ## Usage Examples -Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. See Arguments section below for a full list of export arguments. +Validate trained YOLO11n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. See Arguments section below for a full list of export arguments. !!! example @@ -57,7 +57,7 @@ Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/a from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -71,7 +71,7 @@ Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/a === "CLI" ```bash - yolo detect val model=yolov8n.pt # val official model + yolo detect val model=yolo11n.pt # val official model yolo detect val model=path/to/best.pt # val custom model ``` @@ -95,7 +95,7 @@ The below examples showcase YOLO model validation with custom arguments in Pytho from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Customize validation settings validation_results = model.val(data="coco8.yaml", imgsz=640, batch=16, conf=0.25, iou=0.6, device="0") @@ -104,20 +104,20 @@ The below examples showcase YOLO model validation with custom arguments in Pytho === "CLI" ```bash - yolo val model=yolov8n.pt data=coco8.yaml imgsz=640 batch=16 conf=0.25 iou=0.6 device=0 + yolo val model=yolo11n.pt data=coco8.yaml imgsz=640 batch=16 conf=0.25 iou=0.6 device=0 ``` ## FAQ -### How do I validate my YOLOv8 model with Ultralytics? +### How do I validate my YOLO11 model with Ultralytics? -To validate your YOLOv8 model, you can use the Val mode provided by Ultralytics. For example, using the Python API, you can load a model and run validation with: +To validate your YOLO11 model, you can use the Val mode provided by Ultralytics. For example, using the Python API, you can load a model and run validation with: ```python from ultralytics import YOLO # Load a model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Validate the model metrics = model.val() @@ -127,14 +127,14 @@ print(metrics.box.map) # map50-95 Alternatively, you can use the command-line interface (CLI): ```bash -yolo val model=yolov8n.pt +yolo val model=yolo11n.pt ``` For further customization, you can adjust various arguments like `imgsz`, `batch`, and `conf` in both Python and CLI modes. Check the [Arguments for YOLO Model Validation](#arguments-for-yolo-model-validation) section for the full list of parameters. -### What metrics can I get from YOLOv8 model validation? +### What metrics can I get from YOLO11 model validation? -YOLOv8 model validation provides several key metrics to assess model performance. These include: +YOLO11 model validation provides several key metrics to assess model performance. These include: - mAP50 (mean Average Precision at IoU threshold 0.5) - mAP75 (mean Average Precision at IoU threshold 0.75) @@ -156,16 +156,16 @@ For a complete performance evaluation, it's crucial to review all these metrics. Using Ultralytics YOLO for validation provides several advantages: -- **[Precision](https://www.ultralytics.com/glossary/precision):** YOLOv8 offers accurate performance metrics including mAP50, mAP75, and mAP50-95. +- **[Precision](https://www.ultralytics.com/glossary/precision):** YOLO11 offers accurate performance metrics including mAP50, mAP75, and mAP50-95. - **Convenience:** The models remember their training settings, making validation straightforward. - **Flexibility:** You can validate against the same or different datasets and image sizes. - **Hyperparameter Tuning:** Validation metrics help in fine-tuning models for better performance. These benefits ensure that your models are evaluated thoroughly and can be optimized for superior results. Learn more about these advantages in the [Why Validate with Ultralytics YOLO](#why-validate-with-ultralytics-yolo) section. -### Can I validate my YOLOv8 model using a custom dataset? +### Can I validate my YOLO11 model using a custom dataset? -Yes, you can validate your YOLOv8 model using a [custom dataset](https://docs.ultralytics.com/datasets/). Specify the `data` argument with the path to your dataset configuration file. This file should include paths to the [validation data](https://www.ultralytics.com/glossary/validation-data), class names, and other relevant details. +Yes, you can validate your YOLO11 model using a [custom dataset](https://docs.ultralytics.com/datasets/). Specify the `data` argument with the path to your dataset configuration file. This file should include paths to the [validation data](https://www.ultralytics.com/glossary/validation-data), class names, and other relevant details. Example in Python: @@ -173,7 +173,7 @@ Example in Python: from ultralytics import YOLO # Load a model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Validate with a custom dataset metrics = model.val(data="path/to/your/custom_dataset.yaml") @@ -183,12 +183,12 @@ print(metrics.box.map) # map50-95 Example using CLI: ```bash -yolo val model=yolov8n.pt data=path/to/your/custom_dataset.yaml +yolo val model=yolo11n.pt data=path/to/your/custom_dataset.yaml ``` For more customizable options during validation, see the [Example Validation with Arguments](#example-validation-with-arguments) section. -### How do I save validation results to a JSON file in YOLOv8? +### How do I save validation results to a JSON file in YOLO11? To save the validation results to a JSON file, you can set the `save_json` argument to `True` when running validation. This can be done in both the Python API and CLI. @@ -198,7 +198,7 @@ Example in Python: from ultralytics import YOLO # Load a model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Save validation results to JSON metrics = model.val(save_json=True) @@ -207,7 +207,7 @@ metrics = model.val(save_json=True) Example using CLI: ```bash -yolo val model=yolov8n.pt save_json=True +yolo val model=yolo11n.pt save_json=True ``` This functionality is particularly useful for further analysis or integration with other tools. Check the [Arguments for YOLO Model Validation](#arguments-for-yolo-model-validation) for more details. diff --git a/docs/en/solutions/index.md b/docs/en/solutions/index.md index 52423c14f5..e5187ed8d4 100644 --- a/docs/en/solutions/index.md +++ b/docs/en/solutions/index.md @@ -1,12 +1,12 @@ --- comments: true -description: Explore Ultralytics Solutions using YOLOv8 for object counting, blurring, security, and more. Enhance efficiency and solve real-world problems with cutting-edge AI. -keywords: Ultralytics, YOLOv8, object counting, object blurring, security systems, AI solutions, real-time analysis, computer vision applications +description: Explore Ultralytics Solutions using YOLO11 for object counting, blurring, security, and more. Enhance efficiency and solve real-world problems with cutting-edge AI. +keywords: Ultralytics, YOLO11, object counting, object blurring, security systems, AI solutions, real-time analysis, computer vision applications --- -# Ultralytics Solutions: Harness YOLOv8 to Solve Real-World Problems +# Ultralytics Solutions: Harness YOLO11 to Solve Real-World Problems -Ultralytics Solutions provide cutting-edge applications of YOLO models, offering real-world solutions like object counting, blurring, and security systems, enhancing efficiency and [accuracy](https://www.ultralytics.com/glossary/accuracy) in diverse industries. Discover the power of YOLOv8 for practical, impactful implementations. +Ultralytics Solutions provide cutting-edge applications of YOLO models, offering real-world solutions like object counting, blurring, and security systems, enhancing efficiency and [accuracy](https://www.ultralytics.com/glossary/accuracy) in diverse industries. Discover the power of YOLO11 for practical, impactful implementations. ![Ultralytics Solutions Thumbnail](https://github.com/ultralytics/docs/releases/download/0/ultralytics-solutions-thumbnail.avif) @@ -14,21 +14,21 @@ Ultralytics Solutions provide cutting-edge applications of YOLO models, offering Here's our curated list of Ultralytics solutions that can be used to create awesome [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) projects. -- [Object Counting](../guides/object-counting.md) 🚀 NEW: Learn to perform real-time object counting with YOLOv8. Gain the expertise to accurately count objects in live video streams. -- [Object Cropping](../guides/object-cropping.md) 🚀 NEW: Master object cropping with YOLOv8 for precise extraction of objects from images and videos. -- [Object Blurring](../guides/object-blurring.md) 🚀 NEW: Apply object blurring using YOLOv8 to protect privacy in image and video processing. -- [Workouts Monitoring](../guides/workouts-monitoring.md) 🚀 NEW: Discover how to monitor workouts using YOLOv8. Learn to track and analyze various fitness routines in real time. -- [Objects Counting in Regions](../guides/region-counting.md) 🚀 NEW: Count objects in specific regions using YOLOv8 for accurate detection in varied areas. -- [Security Alarm System](../guides/security-alarm-system.md) 🚀 NEW: Create a security alarm system with YOLOv8 that triggers alerts upon detecting new objects. Customize the system to fit your specific needs. +- [Object Counting](../guides/object-counting.md) 🚀 NEW: Learn to perform real-time object counting with YOLO11. Gain the expertise to accurately count objects in live video streams. +- [Object Cropping](../guides/object-cropping.md) 🚀 NEW: Master object cropping with YOLO11 for precise extraction of objects from images and videos. +- [Object Blurring](../guides/object-blurring.md) 🚀 NEW: Apply object blurring using YOLO11 to protect privacy in image and video processing. +- [Workouts Monitoring](../guides/workouts-monitoring.md) 🚀 NEW: Discover how to monitor workouts using YOLO11. Learn to track and analyze various fitness routines in real time. +- [Objects Counting in Regions](../guides/region-counting.md) 🚀 NEW: Count objects in specific regions using YOLO11 for accurate detection in varied areas. +- [Security Alarm System](../guides/security-alarm-system.md) 🚀 NEW: Create a security alarm system with YOLO11 that triggers alerts upon detecting new objects. Customize the system to fit your specific needs. - [Heatmaps](../guides/heatmaps.md) 🚀 NEW: Utilize detection heatmaps to visualize data intensity across a matrix, providing clear insights in computer vision tasks. -- [Instance Segmentation with Object Tracking](../guides/instance-segmentation-and-tracking.md) 🚀 NEW: Implement [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) and object tracking with YOLOv8 to achieve precise object boundaries and continuous monitoring. +- [Instance Segmentation with Object Tracking](../guides/instance-segmentation-and-tracking.md) 🚀 NEW: Implement [instance segmentation](https://www.ultralytics.com/glossary/instance-segmentation) and object tracking with YOLO11 to achieve precise object boundaries and continuous monitoring. - [VisionEye View Objects Mapping](../guides/vision-eye.md) 🚀 NEW: Develop systems that mimic human eye focus on specific objects, enhancing the computer's ability to discern and prioritize details. -- [Speed Estimation](../guides/speed-estimation.md) 🚀 NEW: Estimate object speed using YOLOv8 and object tracking techniques, crucial for applications like autonomous vehicles and traffic monitoring. -- [Distance Calculation](../guides/distance-calculation.md) 🚀 NEW: Calculate distances between objects using [bounding box](https://www.ultralytics.com/glossary/bounding-box) centroids in YOLOv8, essential for spatial analysis. -- [Queue Management](../guides/queue-management.md) 🚀 NEW: Implement efficient queue management systems to minimize wait times and improve productivity using YOLOv8. -- [Parking Management](../guides/parking-management.md) 🚀 NEW: Organize and direct vehicle flow in parking areas with YOLOv8, optimizing space utilization and user experience. -- [Analytics](../guides/analytics.md) 📊 NEW: Conduct comprehensive data analysis to discover patterns and make informed decisions, leveraging YOLOv8 for descriptive, predictive, and prescriptive analytics. -- [Live Inference with Streamlit](../guides/streamlit-live-inference.md) 🚀 NEW: Leverage the power of YOLOv8 for real-time [object detection](https://www.ultralytics.com/glossary/object-detection) directly through your web browser with a user-friendly Streamlit interface. +- [Speed Estimation](../guides/speed-estimation.md) 🚀 NEW: Estimate object speed using YOLO11 and object tracking techniques, crucial for applications like autonomous vehicles and traffic monitoring. +- [Distance Calculation](../guides/distance-calculation.md) 🚀 NEW: Calculate distances between objects using [bounding box](https://www.ultralytics.com/glossary/bounding-box) centroids in YOLO11, essential for spatial analysis. +- [Queue Management](../guides/queue-management.md) 🚀 NEW: Implement efficient queue management systems to minimize wait times and improve productivity using YOLO11. +- [Parking Management](../guides/parking-management.md) 🚀 NEW: Organize and direct vehicle flow in parking areas with YOLO11, optimizing space utilization and user experience. +- [Analytics](../guides/analytics.md) 📊 NEW: Conduct comprehensive data analysis to discover patterns and make informed decisions, leveraging YOLO11 for descriptive, predictive, and prescriptive analytics. +- [Live Inference with Streamlit](../guides/streamlit-live-inference.md) 🚀 NEW: Leverage the power of YOLO11 for real-time [object detection](https://www.ultralytics.com/glossary/object-detection) directly through your web browser with a user-friendly Streamlit interface. ## Contribute to Our Solutions @@ -42,20 +42,20 @@ Let's work together to make the Ultralytics YOLO ecosystem more robust and versa ### How can I use Ultralytics YOLO for real-time object counting? -Ultralytics YOLOv8 can be used for real-time object counting by leveraging its advanced object detection capabilities. You can follow our detailed guide on [Object Counting](../guides/object-counting.md) to set up YOLOv8 for live video stream analysis. Simply install YOLOv8, load your model, and process video frames to count objects dynamically. +Ultralytics YOLO11 can be used for real-time object counting by leveraging its advanced object detection capabilities. You can follow our detailed guide on [Object Counting](../guides/object-counting.md) to set up YOLO11 for live video stream analysis. Simply install YOLO11, load your model, and process video frames to count objects dynamically. ### What are the benefits of using Ultralytics YOLO for security systems? -Ultralytics YOLOv8 enhances security systems by offering real-time object detection and alert mechanisms. By employing YOLOv8, you can create a security alarm system that triggers alerts when new objects are detected in the surveillance area. Learn how to set up a [Security Alarm System](../guides/security-alarm-system.md) with YOLOv8 for robust security monitoring. +Ultralytics YOLO11 enhances security systems by offering real-time object detection and alert mechanisms. By employing YOLO11, you can create a security alarm system that triggers alerts when new objects are detected in the surveillance area. Learn how to set up a [Security Alarm System](../guides/security-alarm-system.md) with YOLO11 for robust security monitoring. ### How can Ultralytics YOLO improve queue management systems? -Ultralytics YOLOv8 can significantly improve queue management systems by accurately counting and tracking people in queues, thus helping to reduce wait times and optimize service efficiency. Follow our detailed guide on [Queue Management](../guides/queue-management.md) to learn how to implement YOLOv8 for effective queue monitoring and analysis. +Ultralytics YOLO11 can significantly improve queue management systems by accurately counting and tracking people in queues, thus helping to reduce wait times and optimize service efficiency. Follow our detailed guide on [Queue Management](../guides/queue-management.md) to learn how to implement YOLO11 for effective queue monitoring and analysis. ### Can Ultralytics YOLO be used for workout monitoring? -Yes, Ultralytics YOLOv8 can be effectively used for monitoring workouts by tracking and analyzing fitness routines in real-time. This allows for precise evaluation of exercise form and performance. Explore our guide on [Workouts Monitoring](../guides/workouts-monitoring.md) to learn how to set up an AI-powered workout monitoring system using YOLOv8. +Yes, Ultralytics YOLO11 can be effectively used for monitoring workouts by tracking and analyzing fitness routines in real-time. This allows for precise evaluation of exercise form and performance. Explore our guide on [Workouts Monitoring](../guides/workouts-monitoring.md) to learn how to set up an AI-powered workout monitoring system using YOLO11. ### How does Ultralytics YOLO help in creating heatmaps for [data visualization](https://www.ultralytics.com/glossary/data-visualization)? -Ultralytics YOLOv8 can generate heatmaps to visualize data intensity across a given area, highlighting regions of high activity or interest. This feature is particularly useful in understanding patterns and trends in various computer vision tasks. Learn more about creating and using [Heatmaps](../guides/heatmaps.md) with YOLOv8 for comprehensive data analysis and visualization. +Ultralytics YOLO11 can generate heatmaps to visualize data intensity across a given area, highlighting regions of high activity or interest. This feature is particularly useful in understanding patterns and trends in various computer vision tasks. Learn more about creating and using [Heatmaps](../guides/heatmaps.md) with YOLO11 for comprehensive data analysis and visualization. diff --git a/docs/en/tasks/classify.md b/docs/en/tasks/classify.md index 7674c825ff..1b1af70d90 100644 --- a/docs/en/tasks/classify.md +++ b/docs/en/tasks/classify.md @@ -26,16 +26,16 @@ The output of an image classifier is a single class label and a confidence score !!! tip - YOLOv8 Classify models use the `-cls` suffix, i.e. `yolov8n-cls.pt` and are pretrained on [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml). + YOLO11 Classify models use the `-cls` suffix, i.e. `yolo11n-cls.pt` and are pretrained on [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml). -## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) -YOLOv8 pretrained Classify models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. +YOLO11 pretrained Classify models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. | Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) at 640 | -|----------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|--------------------------------|-------------------------------------|--------------------|--------------------------| +| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | | [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 | 3.3 | | [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 | 12.1 | | [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 | 39.3 | @@ -47,7 +47,7 @@ YOLOv8 pretrained Classify models are shown here. Detect, Segment and Pose model ## Train -Train YOLOv8n-cls on the MNIST160 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 64. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. +Train YOLO11n-cls on the MNIST160 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 64. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. !!! example @@ -57,9 +57,9 @@ Train YOLOv8n-cls on the MNIST160 dataset for 100 [epochs](https://www.ultralyti from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.yaml") # build a new model from YAML - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n-cls.yaml").load("yolov8n-cls.pt") # build from YAML and transfer weights + model = YOLO("yolo11n-cls.yaml") # build a new model from YAML + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.yaml").load("yolo11n-cls.pt") # build from YAML and transfer weights # Train the model results = model.train(data="mnist160", epochs=100, imgsz=64) @@ -69,13 +69,13 @@ Train YOLOv8n-cls on the MNIST160 dataset for 100 [epochs](https://www.ultralyti ```bash # Build a new model from YAML and start training from scratch - yolo classify train data=mnist160 model=yolov8n-cls.yaml epochs=100 imgsz=64 + yolo classify train data=mnist160 model=yolo11n-cls.yaml epochs=100 imgsz=64 # Start training from a pretrained *.pt model - yolo classify train data=mnist160 model=yolov8n-cls.pt epochs=100 imgsz=64 + yolo classify train data=mnist160 model=yolo11n-cls.pt epochs=100 imgsz=64 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo classify train data=mnist160 model=yolov8n-cls.yaml pretrained=yolov8n-cls.pt epochs=100 imgsz=64 + yolo classify train data=mnist160 model=yolo11n-cls.yaml pretrained=yolo11n-cls.pt epochs=100 imgsz=64 ``` ### Dataset format @@ -84,7 +84,7 @@ YOLO classification dataset format can be found in detail in the [Dataset Guide] ## Val -Validate trained YOLOv8n-cls model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the MNIST160 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n-cls model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the MNIST160 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example @@ -94,7 +94,7 @@ Validate trained YOLOv8n-cls model [accuracy](https://www.ultralytics.com/glossa from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load an official model + model = YOLO("yolo11n-cls.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -106,13 +106,13 @@ Validate trained YOLOv8n-cls model [accuracy](https://www.ultralytics.com/glossa === "CLI" ```bash - yolo classify val model=yolov8n-cls.pt # val official model + yolo classify val model=yolo11n-cls.pt # val official model yolo classify val model=path/to/best.pt # val custom model ``` ## Predict -Use a trained YOLOv8n-cls model to run predictions on images. +Use a trained YOLO11n-cls model to run predictions on images. !!! example @@ -122,7 +122,7 @@ Use a trained YOLOv8n-cls model to run predictions on images. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load an official model + model = YOLO("yolo11n-cls.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Predict with the model @@ -132,7 +132,7 @@ Use a trained YOLOv8n-cls model to run predictions on images. === "CLI" ```bash - yolo classify predict model=yolov8n-cls.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo classify predict model=yolo11n-cls.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo classify predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model ``` @@ -140,7 +140,7 @@ See full `predict` mode details in the [Predict](../modes/predict.md) page. ## Export -Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc. +Export a YOLO11n-cls model to a different format like ONNX, CoreML, etc. !!! example @@ -150,7 +150,7 @@ Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load an official model + model = YOLO("yolo11n-cls.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -160,11 +160,11 @@ Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc. === "CLI" ```bash - yolo export model=yolov8n-cls.pt format=onnx # export official model + yolo export model=yolo11n-cls.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` -Available YOLOv8-cls export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n-cls.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11-cls export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n-cls.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} @@ -172,13 +172,13 @@ See full `export` details in the [Export](../modes/export.md) page. ## FAQ -### What is the purpose of YOLOv8 in image classification? +### What is the purpose of YOLO11 in image classification? -YOLOv8 models, such as `yolov8n-cls.pt`, are designed for efficient image classification. They assign a single class label to an entire image along with a confidence score. This is particularly useful for applications where knowing the specific class of an image is sufficient, rather than identifying the location or shape of objects within the image. +YOLO11 models, such as `yolo11n-cls.pt`, are designed for efficient image classification. They assign a single class label to an entire image along with a confidence score. This is particularly useful for applications where knowing the specific class of an image is sufficient, rather than identifying the location or shape of objects within the image. -### How do I train a YOLOv8 model for image classification? +### How do I train a YOLO11 model for image classification? -To train a YOLOv8 model, you can use either Python or CLI commands. For example, to train a `yolov8n-cls` model on the MNIST160 dataset for 100 epochs at an image size of 64: +To train a YOLO11 model, you can use either Python or CLI commands. For example, to train a `yolo11n-cls` model on the MNIST160 dataset for 100 epochs at an image size of 64: !!! example @@ -188,7 +188,7 @@ To train a YOLOv8 model, you can use either Python or CLI commands. For example, from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="mnist160", epochs=100, imgsz=64) @@ -197,18 +197,18 @@ To train a YOLOv8 model, you can use either Python or CLI commands. For example, === "CLI" ```bash - yolo classify train data=mnist160 model=yolov8n-cls.pt epochs=100 imgsz=64 + yolo classify train data=mnist160 model=yolo11n-cls.pt epochs=100 imgsz=64 ``` For more configuration options, visit the [Configuration](../usage/cfg.md) page. -### Where can I find pretrained YOLOv8 classification models? +### Where can I find pretrained YOLO11 classification models? -Pretrained YOLOv8 classification models can be found in the [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) section. Models like `yolov8n-cls.pt`, `yolov8s-cls.pt`, `yolov8m-cls.pt`, etc., are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset and can be easily downloaded and used for various image classification tasks. +Pretrained YOLO11 classification models can be found in the [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) section. Models like `yolo11n-cls.pt`, `yolo11s-cls.pt`, `yolo11m-cls.pt`, etc., are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset and can be easily downloaded and used for various image classification tasks. -### How can I export a trained YOLOv8 model to different formats? +### How can I export a trained YOLO11 model to different formats? -You can export a trained YOLOv8 model to various formats using Python or CLI commands. For instance, to export a model to ONNX format: +You can export a trained YOLO11 model to various formats using Python or CLI commands. For instance, to export a model to ONNX format: !!! example @@ -218,7 +218,7 @@ You can export a trained YOLOv8 model to various formats using Python or CLI com from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load the trained model + model = YOLO("yolo11n-cls.pt") # load the trained model # Export the model to ONNX model.export(format="onnx") @@ -227,12 +227,12 @@ You can export a trained YOLOv8 model to various formats using Python or CLI com === "CLI" ```bash - yolo export model=yolov8n-cls.pt format=onnx # export the trained model to ONNX format + yolo export model=yolo11n-cls.pt format=onnx # export the trained model to ONNX format ``` For detailed export options, refer to the [Export](../modes/export.md) page. -### How do I validate a trained YOLOv8 classification model? +### How do I validate a trained YOLO11 classification model? To validate a trained model's accuracy on a dataset like MNIST160, you can use the following Python or CLI commands: @@ -244,7 +244,7 @@ To validate a trained model's accuracy on a dataset like MNIST160, you can use t from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-cls.pt") # load the trained model + model = YOLO("yolo11n-cls.pt") # load the trained model # Validate the model metrics = model.val() # no arguments needed, uses the dataset and settings from training @@ -255,7 +255,7 @@ To validate a trained model's accuracy on a dataset like MNIST160, you can use t === "CLI" ```bash - yolo classify val model=yolov8n-cls.pt # validate the trained model + yolo classify val model=yolo11n-cls.pt # validate the trained model ``` For more information, visit the [Validate](#val) section. diff --git a/docs/en/tasks/detect.md b/docs/en/tasks/detect.md index 079dc7ecb7..58d58759b1 100644 --- a/docs/en/tasks/detect.md +++ b/docs/en/tasks/detect.md @@ -25,16 +25,16 @@ The output of an object detector is a set of bounding boxes that enclose the obj !!! tip - YOLOv8 Detect models are the default YOLOv8 models, i.e. `yolov8n.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml). + YOLO11 Detect models are the default YOLO11 models, i.e. `yolo11n.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml). -## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) -YOLOv8 pretrained Detect models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. +YOLO11 pretrained Detect models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. | Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | -|--------------------------------------------------------------------------------------|-----------------------|----------------------|--------------------------------|-------------------------------------|--------------------|-------------------| +| ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | | [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) | 640 | 39.5 | 56.1 ± 0.8 | 1.5 ± 0.0 | 2.6 | 6.5 | | [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) | 640 | 47.0 | 90.0 ± 1.2 | 2.5 ± 0.0 | 9.4 | 21.5 | | [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) | 640 | 51.5 | 183.2 ± 2.0 | 4.7 ± 0.1 | 20.1 | 68.0 | @@ -46,7 +46,7 @@ YOLOv8 pretrained Detect models are shown here. Detect, Segment and Pose models ## Train -Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. +Train YOLO11n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. !!! example @@ -56,9 +56,9 @@ Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/ from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.yaml") # build a new model from YAML - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n.yaml").load("yolov8n.pt") # build from YAML and transfer weights + model = YOLO("yolo11n.yaml") # build a new model from YAML + model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n.yaml").load("yolo11n.pt") # build from YAML and transfer weights # Train the model results = model.train(data="coco8.yaml", epochs=100, imgsz=640) @@ -68,13 +68,13 @@ Train YOLOv8n on the COCO8 dataset for 100 [epochs](https://www.ultralytics.com/ ```bash # Build a new model from YAML and start training from scratch - yolo detect train data=coco8.yaml model=yolov8n.yaml epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.yaml epochs=100 imgsz=640 # Start training from a pretrained *.pt model - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo detect train data=coco8.yaml model=yolov8n.yaml pretrained=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.yaml pretrained=yolo11n.pt epochs=100 imgsz=640 ``` ### Dataset format @@ -83,7 +83,7 @@ YOLO detection dataset format can be found in detail in the [Dataset Guide](../d ## Val -Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example @@ -93,7 +93,7 @@ Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/a from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -107,13 +107,13 @@ Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/a === "CLI" ```bash - yolo detect val model=yolov8n.pt # val official model + yolo detect val model=yolo11n.pt # val official model yolo detect val model=path/to/best.pt # val custom model ``` ## Predict -Use a trained YOLOv8n model to run predictions on images. +Use a trained YOLO11n model to run predictions on images. !!! example @@ -123,7 +123,7 @@ Use a trained YOLOv8n model to run predictions on images. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Predict with the model @@ -133,7 +133,7 @@ Use a trained YOLOv8n model to run predictions on images. === "CLI" ```bash - yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo detect predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model ``` @@ -141,7 +141,7 @@ See full `predict` mode details in the [Predict](../modes/predict.md) page. ## Export -Export a YOLOv8n model to a different format like ONNX, CoreML, etc. +Export a YOLO11n model to a different format like ONNX, CoreML, etc. !!! example @@ -151,7 +151,7 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official model + model = YOLO("yolo11n.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -161,11 +161,11 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc. === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=yolo11n.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` -Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} @@ -173,9 +173,9 @@ See full `export` details in the [Export](../modes/export.md) page. ## FAQ -### How do I train a YOLOv8 model on my custom dataset? +### How do I train a YOLO11 model on my custom dataset? -Training a YOLOv8 model on a custom dataset involves a few steps: +Training a YOLO11 model on a custom dataset involves a few steps: 1. **Prepare the Dataset**: Ensure your dataset is in the YOLO format. For guidance, refer to our [Dataset Guide](../datasets/detect/index.md). 2. **Load the Model**: Use the Ultralytics YOLO library to load a pre-trained model or create a new model from a YAML file. @@ -189,7 +189,7 @@ Training a YOLOv8 model on a custom dataset involves a few steps: from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model on your custom dataset model.train(data="my_custom_dataset.yaml", epochs=100, imgsz=640) @@ -198,26 +198,26 @@ Training a YOLOv8 model on a custom dataset involves a few steps: === "CLI" ```bash - yolo detect train data=my_custom_dataset.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=my_custom_dataset.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` For detailed configuration options, visit the [Configuration](../usage/cfg.md) page. -### What pretrained models are available in YOLOv8? +### What pretrained models are available in YOLO11? -Ultralytics YOLOv8 offers various pretrained models for object detection, segmentation, and pose estimation. These models are pretrained on the COCO dataset or ImageNet for classification tasks. Here are some of the available models: +Ultralytics YOLO11 offers various pretrained models for object detection, segmentation, and pose estimation. These models are pretrained on the COCO dataset or ImageNet for classification tasks. Here are some of the available models: -- [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt) -- [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s.pt) -- [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m.pt) -- [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) -- [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) +- [YOLO11n](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt) +- [YOLO11s](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt) +- [YOLO11m](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt) +- [YOLO11l](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l.pt) +- [YOLO11x](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x.pt) -For a detailed list and performance metrics, refer to the [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) section. +For a detailed list and performance metrics, refer to the [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) section. ### How can I validate the accuracy of my trained YOLOv8 model? -To validate the accuracy of your trained YOLOv8 model, you can use the `.val()` method in Python or the `yolo detect val` command in CLI. This will provide metrics like mAP50-95, mAP50, and more. +To validate the accuracy of your trained YOLO11 model, you can use the `.val()` method in Python or the `yolo detect val` command in CLI. This will provide metrics like mAP50-95, mAP50, and more. !!! example @@ -242,9 +242,9 @@ To validate the accuracy of your trained YOLOv8 model, you can use the `.val()` For more validation details, visit the [Val](../modes/val.md) page. -### What formats can I export a YOLOv8 model to? +### What formats can I export a YOLO11 model to? -Ultralytics YOLOv8 allows exporting models to various formats such as ONNX, TensorRT, CoreML, and more to ensure compatibility across different platforms and devices. +Ultralytics YOLO11 allows exporting models to various formats such as ONNX, TensorRT, CoreML, and more to ensure compatibility across different platforms and devices. !!! example @@ -254,7 +254,7 @@ Ultralytics YOLOv8 allows exporting models to various formats such as ONNX, Tens from ultralytics import YOLO # Load the model - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Export the model to ONNX format model.export(format="onnx") @@ -263,18 +263,18 @@ Ultralytics YOLOv8 allows exporting models to various formats such as ONNX, Tens === "CLI" ```bash - yolo export model=yolov8n.pt format=onnx + yolo export model=yolo11n.pt format=onnx ``` Check the full list of supported formats and instructions on the [Export](../modes/export.md) page. -### Why should I use Ultralytics YOLOv8 for object detection? +### Why should I use Ultralytics YOLO11 for object detection? -Ultralytics YOLOv8 is designed to offer state-of-the-art performance for object detection, segmentation, and pose estimation. Here are some key advantages: +Ultralytics YOLO11 is designed to offer state-of-the-art performance for object detection, segmentation, and pose estimation. Here are some key advantages: 1. **Pretrained Models**: Utilize models pretrained on popular datasets like COCO and ImageNet for faster development. 2. **High Accuracy**: Achieves impressive mAP scores, ensuring reliable object detection. 3. **Speed**: Optimized for real-time inference, making it ideal for applications requiring swift processing. 4. **Flexibility**: Export models to various formats like ONNX and TensorRT for deployment across multiple platforms. -Explore our [Blog](https://www.ultralytics.com/blog) for use cases and success stories showcasing YOLOv8 in action. +Explore our [Blog](https://www.ultralytics.com/blog) for use cases and success stories showcasing YOLO11 in action. diff --git a/docs/en/tasks/index.md b/docs/en/tasks/index.md index 3ad5f2a0ef..d474800706 100644 --- a/docs/en/tasks/index.md +++ b/docs/en/tasks/index.md @@ -19,7 +19,7 @@ YOLO11 is an AI framework that supports multiple [computer vision](https://www.u allowfullscreen>
- Watch: Explore Ultralytics YOLO Tasks: [Object Detection](https://www.ultralytics.com/glossary/object-detection), Segmentation, OBB, Tracking, and Pose Estimation. + Watch: Explore Ultralytics YOLO Tasks: Object Detection, Segmentation, OBB, Tracking, and Pose Estimation.

## [Detection](detect.md) diff --git a/docs/en/tasks/obb.md b/docs/en/tasks/obb.md index fba69b22fa..8bb67e572a 100644 --- a/docs/en/tasks/obb.md +++ b/docs/en/tasks/obb.md @@ -17,7 +17,7 @@ The output of an oriented object detector is a set of rotated bounding boxes tha !!! tip - YOLOv8 OBB models use the `-obb` suffix, i.e. `yolov8n-obb.pt` and are pretrained on [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml). + YOLO11 OBB models use the `-obb` suffix, i.e. `yolo11n-obb.pt` and are pretrained on [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml).


@@ -36,14 +36,14 @@ The output of an oriented object detector is a set of rotated bounding boxes tha | :------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------: | | ![Ships Detection using OBB](https://github.com/ultralytics/docs/releases/download/0/ships-detection-using-obb.avif) | ![Vehicle Detection using OBB](https://github.com/ultralytics/docs/releases/download/0/vehicle-detection-using-obb.avif) | -## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) -YOLOv8 pretrained OBB models are shown here, which are pretrained on the [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml) dataset. +YOLO11 pretrained OBB models are shown here, which are pretrained on the [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml) dataset. [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. | Model | size
(pixels) | mAPtest
50 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | -|----------------------------------------------------------------------------------------------|-----------------------|--------------------|--------------------------------|-------------------------------------|--------------------|-------------------| +| -------------------------------------------------------------------------------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | | [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.6 ± 0.8 | 4.4 ± 0.0 | 2.7 | 17.2 | | [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.4 ± 4.0 | 5.1 ± 0.0 | 9.7 | 57.5 | | [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.8 ± 2.9 | 10.1 ± 0.4 | 20.9 | 183.5 | @@ -55,7 +55,7 @@ YOLOv8 pretrained OBB models are shown here, which are pretrained on the [DOTAv1 ## Train -Train YOLOv8n-obb on the `dota8.yaml` dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. +Train YOLO11n-obb on the `dota8.yaml` dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. !!! example @@ -65,9 +65,9 @@ Train YOLOv8n-obb on the `dota8.yaml` dataset for 100 [epochs](https://www.ultra from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.yaml") # build a new model from YAML - model = YOLO("yolov8n-obb.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n-obb.yaml").load("yolov8n.pt") # build from YAML and transfer weights + model = YOLO("yolo11n-obb.yaml") # build a new model from YAML + model = YOLO("yolo11n-obb.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-obb.yaml").load("yolo11n.pt") # build from YAML and transfer weights # Train the model results = model.train(data="dota8.yaml", epochs=100, imgsz=640) @@ -77,13 +77,13 @@ Train YOLOv8n-obb on the `dota8.yaml` dataset for 100 [epochs](https://www.ultra ```bash # Build a new model from YAML and start training from scratch - yolo obb train data=dota8.yaml model=yolov8n-obb.yaml epochs=100 imgsz=640 + yolo obb train data=dota8.yaml model=yolo11n-obb.yaml epochs=100 imgsz=640 # Start training from a pretrained *.pt model - yolo obb train data=dota8.yaml model=yolov8n-obb.pt epochs=100 imgsz=640 + yolo obb train data=dota8.yaml model=yolo11n-obb.pt epochs=100 imgsz=640 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo obb train data=dota8.yaml model=yolov8n-obb.yaml pretrained=yolov8n-obb.pt epochs=100 imgsz=640 + yolo obb train data=dota8.yaml model=yolo11n-obb.yaml pretrained=yolo11n-obb.pt epochs=100 imgsz=640 ```

@@ -103,7 +103,7 @@ OBB dataset format can be found in detail in the [Dataset Guide](../datasets/obb ## Val -Validate trained YOLOv8n-obb model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the DOTA8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n-obb model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the DOTA8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example @@ -113,7 +113,7 @@ Validate trained YOLOv8n-obb model [accuracy](https://www.ultralytics.com/glossa from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") # load an official model + model = YOLO("yolo11n-obb.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -127,13 +127,13 @@ Validate trained YOLOv8n-obb model [accuracy](https://www.ultralytics.com/glossa === "CLI" ```bash - yolo obb val model=yolov8n-obb.pt data=dota8.yaml # val official model + yolo obb val model=yolo11n-obb.pt data=dota8.yaml # val official model yolo obb val model=path/to/best.pt data=path/to/data.yaml # val custom model ``` ## Predict -Use a trained YOLOv8n-obb model to run predictions on images. +Use a trained YOLO11n-obb model to run predictions on images. !!! example @@ -143,7 +143,7 @@ Use a trained YOLOv8n-obb model to run predictions on images. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") # load an official model + model = YOLO("yolo11n-obb.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Predict with the model @@ -153,7 +153,7 @@ Use a trained YOLOv8n-obb model to run predictions on images. === "CLI" ```bash - yolo obb predict model=yolov8n-obb.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo obb predict model=yolo11n-obb.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo obb predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model ``` @@ -172,7 +172,7 @@ See full `predict` mode details in the [Predict](../modes/predict.md) page. ## Export -Export a YOLOv8n-obb model to a different format like ONNX, CoreML, etc. +Export a YOLO11n-obb model to a different format like ONNX, CoreML, etc. !!! example @@ -182,7 +182,7 @@ Export a YOLOv8n-obb model to a different format like ONNX, CoreML, etc. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") # load an official model + model = YOLO("yolo11n-obb.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -192,11 +192,11 @@ Export a YOLOv8n-obb model to a different format like ONNX, CoreML, etc. === "CLI" ```bash - yolo export model=yolov8n-obb.pt format=onnx # export official model + yolo export model=yolo11n-obb.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` -Available YOLOv8-obb export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n-obb.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11-obb export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n-obb.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} @@ -208,9 +208,9 @@ See full `export` details in the [Export](../modes/export.md) page. Oriented Bounding Boxes (OBB) include an additional angle to enhance object localization accuracy in images. Unlike regular bounding boxes, which are axis-aligned rectangles, OBBs can rotate to fit the orientation of the object better. This is particularly useful for applications requiring precise object placement, such as aerial or satellite imagery ([Dataset Guide](../datasets/obb/index.md)). -### How do I train a YOLOv8n-obb model using a custom dataset? +### How do I train a YOLO11n-obb model using a custom dataset? -To train a YOLOv8n-obb model with a custom dataset, follow the example below using Python or CLI: +To train a YOLO11n-obb model with a custom dataset, follow the example below using Python or CLI: !!! example @@ -220,7 +220,7 @@ To train a YOLOv8n-obb model with a custom dataset, follow the example below usi from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n-obb.pt") + model = YOLO("yolo11n-obb.pt") # Train the model results = model.train(data="path/to/custom_dataset.yaml", epochs=100, imgsz=640) @@ -229,18 +229,18 @@ To train a YOLOv8n-obb model with a custom dataset, follow the example below usi === "CLI" ```bash - yolo obb train data=path/to/custom_dataset.yaml model=yolov8n-obb.pt epochs=100 imgsz=640 + yolo obb train data=path/to/custom_dataset.yaml model=yolo11n-obb.pt epochs=100 imgsz=640 ``` For more training arguments, check the [Configuration](../usage/cfg.md) section. -### What datasets can I use for training YOLOv8-OBB models? +### What datasets can I use for training YOLO11-OBB models? -YOLOv8-OBB models are pretrained on datasets like [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml) but you can use any dataset formatted for OBB. Detailed information on OBB dataset formats can be found in the [Dataset Guide](../datasets/obb/index.md). +YOLO11-OBB models are pretrained on datasets like [DOTAv1](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/DOTAv1.yaml) but you can use any dataset formatted for OBB. Detailed information on OBB dataset formats can be found in the [Dataset Guide](../datasets/obb/index.md). -### How can I export a YOLOv8-OBB model to ONNX format? +### How can I export a YOLO11-OBB model to ONNX format? -Exporting a YOLOv8-OBB model to ONNX format is straightforward using either Python or CLI: +Exporting a YOLO11-OBB model to ONNX format is straightforward using either Python or CLI: !!! example @@ -250,7 +250,7 @@ Exporting a YOLOv8-OBB model to ONNX format is straightforward using either Pyth from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") + model = YOLO("yolo11n-obb.pt") # Export the model model.export(format="onnx") @@ -259,14 +259,14 @@ Exporting a YOLOv8-OBB model to ONNX format is straightforward using either Pyth === "CLI" ```bash - yolo export model=yolov8n-obb.pt format=onnx + yolo export model=yolo11n-obb.pt format=onnx ``` For more export formats and details, refer to the [Export](../modes/export.md) page. -### How do I validate the accuracy of a YOLOv8n-obb model? +### How do I validate the accuracy of a YOLO11n-obb model? -To validate a YOLOv8n-obb model, you can use Python or CLI commands as shown below: +To validate a YOLO11n-obb model, you can use Python or CLI commands as shown below: !!! example @@ -276,7 +276,7 @@ To validate a YOLOv8n-obb model, you can use Python or CLI commands as shown bel from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-obb.pt") + model = YOLO("yolo11n-obb.pt") # Validate the model metrics = model.val(data="dota8.yaml") @@ -285,7 +285,7 @@ To validate a YOLOv8n-obb model, you can use Python or CLI commands as shown bel === "CLI" ```bash - yolo obb val model=yolov8n-obb.pt data=dota8.yaml + yolo obb val model=yolo11n-obb.pt data=dota8.yaml ``` See full validation details in the [Val](../modes/val.md) section. diff --git a/docs/en/tasks/pose.md b/docs/en/tasks/pose.md index 8f5f0b8968..8b3059bc72 100644 --- a/docs/en/tasks/pose.md +++ b/docs/en/tasks/pose.md @@ -38,9 +38,9 @@ The output of a pose estimation model is a set of points that represent the keyp !!! tip - YOLOv8 _pose_ models use the `-pose` suffix, i.e. `yolov8n-pose.pt`. These models are trained on the [COCO keypoints](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco-pose.yaml) dataset and are suitable for a variety of pose estimation tasks. + YOLO11 _pose_ models use the `-pose` suffix, i.e. `yolo11n-pose.pt`. These models are trained on the [COCO keypoints](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco-pose.yaml) dataset and are suitable for a variety of pose estimation tasks. - In the default YOLOv8 pose model, there are 17 keypoints, each representing a different part of the human body. Here is the mapping of each index to its respective body joint: + In the default YOLO11 pose model, there are 17 keypoints, each representing a different part of the human body. Here is the mapping of each index to its respective body joint: 0: Nose 1: Left Eye @@ -60,14 +60,14 @@ The output of a pose estimation model is a set of points that represent the keyp 15: Left Ankle 16: Right Ankle -## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) -YOLOv8 pretrained Pose models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. +YOLO11 pretrained Pose models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. | Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | -|------------------------------------------------------------------------------------------------|-----------------------|-----------------------|--------------------|--------------------------------|-------------------------------------|--------------------|-------------------| +| ---------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | | [YOLO11n-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt) | 640 | 50.0 | 81.0 | 52.4 ± 0.5 | 1.7 ± 0.0 | 2.9 | 7.6 | | [YOLO11s-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-pose.pt) | 640 | 58.9 | 86.3 | 90.5 ± 0.6 | 2.6 ± 0.0 | 9.9 | 23.2 | | [YOLO11m-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-pose.pt) | 640 | 64.9 | 89.4 | 187.3 ± 0.8 | 4.9 ± 0.1 | 20.9 | 71.7 | @@ -79,7 +79,7 @@ YOLOv8 pretrained Pose models are shown here. Detect, Segment and Pose models ar ## Train -Train a YOLOv8-pose model on the COCO128-pose dataset. +Train a YOLO11-pose model on the COCO128-pose dataset. !!! example @@ -89,9 +89,9 @@ Train a YOLOv8-pose model on the COCO128-pose dataset. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.yaml") # build a new model from YAML - model = YOLO("yolov8n-pose.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n-pose.yaml").load("yolov8n-pose.pt") # build from YAML and transfer weights + model = YOLO("yolo11n-pose.yaml") # build a new model from YAML + model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-pose.yaml").load("yolo11n-pose.pt") # build from YAML and transfer weights # Train the model results = model.train(data="coco8-pose.yaml", epochs=100, imgsz=640) @@ -101,13 +101,13 @@ Train a YOLOv8-pose model on the COCO128-pose dataset. ```bash # Build a new model from YAML and start training from scratch - yolo pose train data=coco8-pose.yaml model=yolov8n-pose.yaml epochs=100 imgsz=640 + yolo pose train data=coco8-pose.yaml model=yolo11n-pose.yaml epochs=100 imgsz=640 # Start training from a pretrained *.pt model - yolo pose train data=coco8-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 + yolo pose train data=coco8-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo pose train data=coco8-pose.yaml model=yolov8n-pose.yaml pretrained=yolov8n-pose.pt epochs=100 imgsz=640 + yolo pose train data=coco8-pose.yaml model=yolo11n-pose.yaml pretrained=yolo11n-pose.pt epochs=100 imgsz=640 ``` ### Dataset format @@ -116,7 +116,7 @@ YOLO pose dataset format can be found in detail in the [Dataset Guide](../datase ## Val -Validate trained YOLOv8n-pose model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO128-pose dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n-pose model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO128-pose dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example @@ -126,7 +126,7 @@ Validate trained YOLOv8n-pose model [accuracy](https://www.ultralytics.com/gloss from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.pt") # load an official model + model = YOLO("yolo11n-pose.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -140,13 +140,13 @@ Validate trained YOLOv8n-pose model [accuracy](https://www.ultralytics.com/gloss === "CLI" ```bash - yolo pose val model=yolov8n-pose.pt # val official model + yolo pose val model=yolo11n-pose.pt # val official model yolo pose val model=path/to/best.pt # val custom model ``` ## Predict -Use a trained YOLOv8n-pose model to run predictions on images. +Use a trained YOLO11n-pose model to run predictions on images. !!! example @@ -156,7 +156,7 @@ Use a trained YOLOv8n-pose model to run predictions on images. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.pt") # load an official model + model = YOLO("yolo11n-pose.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Predict with the model @@ -166,7 +166,7 @@ Use a trained YOLOv8n-pose model to run predictions on images. === "CLI" ```bash - yolo pose predict model=yolov8n-pose.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo pose predict model=yolo11n-pose.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo pose predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model ``` @@ -174,7 +174,7 @@ See full `predict` mode details in the [Predict](../modes/predict.md) page. ## Export -Export a YOLOv8n Pose model to a different format like ONNX, CoreML, etc. +Export a YOLO11n Pose model to a different format like ONNX, CoreML, etc. !!! example @@ -184,7 +184,7 @@ Export a YOLOv8n Pose model to a different format like ONNX, CoreML, etc. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-pose.pt") # load an official model + model = YOLO("yolo11n-pose.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -194,11 +194,11 @@ Export a YOLOv8n Pose model to a different format like ONNX, CoreML, etc. === "CLI" ```bash - yolo export model=yolov8n-pose.pt format=onnx # export official model + yolo export model=yolo11n-pose.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` -Available YOLOv8-pose export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n-pose.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11-pose export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n-pose.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} @@ -206,20 +206,20 @@ See full `export` details in the [Export](../modes/export.md) page. ## FAQ -### What is Pose Estimation with Ultralytics YOLOv8 and how does it work? +### What is Pose Estimation with Ultralytics YOLO11 and how does it work? -Pose estimation with Ultralytics YOLOv8 involves identifying specific points, known as keypoints, in an image. These keypoints typically represent joints or other important features of the object. The output includes the `[x, y]` coordinates and confidence scores for each point. YOLOv8-pose models are specifically designed for this task and use the `-pose` suffix, such as `yolov8n-pose.pt`. These models are pre-trained on datasets like [COCO keypoints](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco-pose.yaml) and can be used for various pose estimation tasks. For more information, visit the [Pose Estimation Page](#pose-estimation). +Pose estimation with Ultralytics YOLO11 involves identifying specific points, known as keypoints, in an image. These keypoints typically represent joints or other important features of the object. The output includes the `[x, y]` coordinates and confidence scores for each point. YOLO11-pose models are specifically designed for this task and use the `-pose` suffix, such as `yolo11n-pose.pt`. These models are pre-trained on datasets like [COCO keypoints](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco-pose.yaml) and can be used for various pose estimation tasks. For more information, visit the [Pose Estimation Page](#pose-estimation). -### How can I train a YOLOv8-pose model on a custom dataset? +### How can I train a YOLO11-pose model on a custom dataset? -Training a YOLOv8-pose model on a custom dataset involves loading a model, either a new model defined by a YAML file or a pre-trained model. You can then start the training process using your specified dataset and parameters. +Training a YOLO11-pose model on a custom dataset involves loading a model, either a new model defined by a YAML file or a pre-trained model. You can then start the training process using your specified dataset and parameters. ```python from ultralytics import YOLO # Load a model -model = YOLO("yolov8n-pose.yaml") # build a new model from YAML -model = YOLO("yolov8n-pose.pt") # load a pretrained model (recommended for training) +model = YOLO("yolo11n-pose.yaml") # build a new model from YAML +model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) # Train the model results = model.train(data="your-dataset.yaml", epochs=100, imgsz=640) @@ -227,9 +227,9 @@ results = model.train(data="your-dataset.yaml", epochs=100, imgsz=640) For comprehensive details on training, refer to the [Train Section](#train). -### How do I validate a trained YOLOv8-pose model? +### How do I validate a trained YOLO11-pose model? -Validation of a YOLOv8-pose model involves assessing its accuracy using the same dataset parameters retained during training. Here's an example: +Validation of a YOLO11-pose model involves assessing its accuracy using the same dataset parameters retained during training. Here's an example: ```python from ultralytics import YOLO @@ -244,9 +244,9 @@ metrics = model.val() # no arguments needed, dataset and settings remembered For more information, visit the [Val Section](#val). -### Can I export a YOLOv8-pose model to other formats, and how? +### Can I export a YOLO11-pose model to other formats, and how? -Yes, you can export a YOLOv8-pose model to various formats like ONNX, CoreML, TensorRT, and more. This can be done using either Python or the Command Line Interface (CLI). +Yes, you can export a YOLO11-pose model to various formats like ONNX, CoreML, TensorRT, and more. This can be done using either Python or the Command Line Interface (CLI). ```python from ultralytics import YOLO @@ -261,6 +261,6 @@ model.export(format="onnx") Refer to the [Export Section](#export) for more details. -### What are the available Ultralytics YOLOv8-pose models and their performance metrics? +### What are the available Ultralytics YOLO11-pose models and their performance metrics? -Ultralytics YOLOv8 offers various pretrained pose models such as YOLOv8n-pose, YOLOv8s-pose, YOLOv8m-pose, among others. These models differ in size, accuracy (mAP), and speed. For instance, the YOLOv8n-pose model achieves a mAPpose50-95 of 50.4 and an mAPpose50 of 80.1. For a complete list and performance details, visit the [Models Section](#models). +Ultralytics YOLO11 offers various pretrained pose models such as YOLO11n-pose, YOLO11s-pose, YOLO11m-pose, among others. These models differ in size, accuracy (mAP), and speed. For instance, the YOLO11n-pose model achieves a mAPpose50-95 of 50.4 and an mAPpose50 of 80.1. For a complete list and performance details, visit the [Models Section](#models). diff --git a/docs/en/tasks/segment.md b/docs/en/tasks/segment.md index f7054fb36a..f205bb15fb 100644 --- a/docs/en/tasks/segment.md +++ b/docs/en/tasks/segment.md @@ -26,16 +26,16 @@ The output of an instance segmentation model is a set of masks or contours that !!! tip - YOLOv8 Segment models use the `-seg` suffix, i.e. `yolov8n-seg.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml). + YOLO11 Segment models use the `-seg` suffix, i.e. `yolo11n-seg.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml). -## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8) +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/11) -YOLOv8 pretrained Segment models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. +YOLO11 pretrained Segment models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset. [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. | Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
T4 TensorRT10
(ms) | params
(M) | FLOPs
(B) | -|----------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|--------------------------------|-------------------------------------|--------------------|-------------------| +| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | | [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.9 ± 1.1 | 1.8 ± 0.0 | 2.9 | 10.4 | | [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.6 ± 4.9 | 2.9 ± 0.0 | 10.1 | 35.5 | | [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.6 ± 1.2 | 6.3 ± 0.1 | 22.4 | 123.3 | @@ -47,7 +47,7 @@ YOLOv8 pretrained Segment models are shown here. Detect, Segment and Pose models ## Train -Train YOLOv8n-seg on the COCO128-seg dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. +Train YOLO11n-seg on the COCO128-seg dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) at image size 640. For a full list of available arguments see the [Configuration](../usage/cfg.md) page. !!! example @@ -57,9 +57,9 @@ Train YOLOv8n-seg on the COCO128-seg dataset for 100 [epochs](https://www.ultral from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.yaml") # build a new model from YAML - model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) - model = YOLO("yolov8n-seg.yaml").load("yolov8n.pt") # build from YAML and transfer weights + model = YOLO("yolo11n-seg.yaml") # build a new model from YAML + model = YOLO("yolo11n-seg.pt") # load a pretrained model (recommended for training) + model = YOLO("yolo11n-seg.yaml").load("yolo11n.pt") # build from YAML and transfer weights # Train the model results = model.train(data="coco8-seg.yaml", epochs=100, imgsz=640) @@ -69,13 +69,13 @@ Train YOLOv8n-seg on the COCO128-seg dataset for 100 [epochs](https://www.ultral ```bash # Build a new model from YAML and start training from scratch - yolo segment train data=coco8-seg.yaml model=yolov8n-seg.yaml epochs=100 imgsz=640 + yolo segment train data=coco8-seg.yaml model=yolo11n-seg.yaml epochs=100 imgsz=640 # Start training from a pretrained *.pt model - yolo segment train data=coco8-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=coco8-seg.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 # Build a new model from YAML, transfer pretrained weights to it and start training - yolo segment train data=coco8-seg.yaml model=yolov8n-seg.yaml pretrained=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=coco8-seg.yaml model=yolo11n-seg.yaml pretrained=yolo11n-seg.pt epochs=100 imgsz=640 ``` ### Dataset format @@ -84,7 +84,7 @@ YOLO segmentation dataset format can be found in detail in the [Dataset Guide](. ## Val -Validate trained YOLOv8n-seg model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO128-seg dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n-seg model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO128-seg dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example @@ -94,7 +94,7 @@ Validate trained YOLOv8n-seg model [accuracy](https://www.ultralytics.com/glossa from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load an official model + model = YOLO("yolo11n-seg.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Validate the model @@ -112,13 +112,13 @@ Validate trained YOLOv8n-seg model [accuracy](https://www.ultralytics.com/glossa === "CLI" ```bash - yolo segment val model=yolov8n-seg.pt # val official model + yolo segment val model=yolo11n-seg.pt # val official model yolo segment val model=path/to/best.pt # val custom model ``` ## Predict -Use a trained YOLOv8n-seg model to run predictions on images. +Use a trained YOLO11n-seg model to run predictions on images. !!! example @@ -128,7 +128,7 @@ Use a trained YOLOv8n-seg model to run predictions on images. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load an official model + model = YOLO("yolo11n-seg.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom model # Predict with the model @@ -138,7 +138,7 @@ Use a trained YOLOv8n-seg model to run predictions on images. === "CLI" ```bash - yolo segment predict model=yolov8n-seg.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo segment predict model=yolo11n-seg.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo segment predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model ``` @@ -146,7 +146,7 @@ See full `predict` mode details in the [Predict](../modes/predict.md) page. ## Export -Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc. +Export a YOLO11n-seg model to a different format like ONNX, CoreML, etc. !!! example @@ -156,7 +156,7 @@ Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc. from ultralytics import YOLO # Load a model - model = YOLO("yolov8n-seg.pt") # load an official model + model = YOLO("yolo11n-seg.pt") # load an official model model = YOLO("path/to/best.pt") # load a custom trained model # Export the model @@ -166,11 +166,11 @@ Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc. === "CLI" ```bash - yolo export model=yolov8n-seg.pt format=onnx # export official model + yolo export model=yolo11n-seg.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model ``` -Available YOLOv8-seg export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolov8n-seg.onnx`. Usage examples are shown for your model after export completes. +Available YOLO11-seg export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. You can predict or validate directly on exported models, i.e. `yolo predict model=yolo11n-seg.onnx`. Usage examples are shown for your model after export completes. {% include "macros/export-table.md" %} @@ -178,9 +178,9 @@ See full `export` details in the [Export](../modes/export.md) page. ## FAQ -### How do I train a YOLOv8 segmentation model on a custom dataset? +### How do I train a YOLO11 segmentation model on a custom dataset? -To train a YOLOv8 segmentation model on a custom dataset, you first need to prepare your dataset in the YOLO segmentation format. You can use tools like [JSON2YOLO](https://github.com/ultralytics/JSON2YOLO) to convert datasets from other formats. Once your dataset is ready, you can train the model using Python or CLI commands: +To train a YOLO11 segmentation model on a custom dataset, you first need to prepare your dataset in the YOLO segmentation format. You can use tools like [JSON2YOLO](https://github.com/ultralytics/JSON2YOLO) to convert datasets from other formats. Once your dataset is ready, you can train the model using Python or CLI commands: !!! example @@ -189,8 +189,8 @@ To train a YOLOv8 segmentation model on a custom dataset, you first need to prep ```python from ultralytics import YOLO - # Load a pretrained YOLOv8 segment model - model = YOLO("yolov8n-seg.pt") + # Load a pretrained YOLO11 segment model + model = YOLO("yolo11n-seg.pt") # Train the model results = model.train(data="path/to/your_dataset.yaml", epochs=100, imgsz=640) @@ -199,18 +199,18 @@ To train a YOLOv8 segmentation model on a custom dataset, you first need to prep === "CLI" ```bash - yolo segment train data=path/to/your_dataset.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + yolo segment train data=path/to/your_dataset.yaml model=yolo11n-seg.pt epochs=100 imgsz=640 ``` Check the [Configuration](../usage/cfg.md) page for more available arguments. -### What is the difference between [object detection](https://www.ultralytics.com/glossary/object-detection) and instance segmentation in YOLOv8? +### What is the difference between [object detection](https://www.ultralytics.com/glossary/object-detection) and instance segmentation in YOLO11? -Object detection identifies and localizes objects within an image by drawing bounding boxes around them, whereas instance segmentation not only identifies the bounding boxes but also delineates the exact shape of each object. YOLOv8 instance segmentation models provide masks or contours that outline each detected object, which is particularly useful for tasks where knowing the precise shape of objects is important, such as medical imaging or autonomous driving. +Object detection identifies and localizes objects within an image by drawing bounding boxes around them, whereas instance segmentation not only identifies the bounding boxes but also delineates the exact shape of each object. YOLO11 instance segmentation models provide masks or contours that outline each detected object, which is particularly useful for tasks where knowing the precise shape of objects is important, such as medical imaging or autonomous driving. -### Why use YOLOv8 for instance segmentation? +### Why use YOLO11 for instance segmentation? -Ultralytics YOLOv8 is a state-of-the-art model recognized for its high accuracy and real-time performance, making it ideal for instance segmentation tasks. YOLOv8 Segment models come pretrained on the [COCO dataset](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml), ensuring robust performance across a variety of objects. Additionally, YOLOv8 supports training, validation, prediction, and export functionalities with seamless integration, making it highly versatile for both research and industry applications. +Ultralytics YOLO11 is a state-of-the-art model recognized for its high accuracy and real-time performance, making it ideal for instance segmentation tasks. YOLO11 Segment models come pretrained on the [COCO dataset](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml), ensuring robust performance across a variety of objects. Additionally, YOLOv8 supports training, validation, prediction, and export functionalities with seamless integration, making it highly versatile for both research and industry applications. ### How do I load and validate a pretrained YOLOv8 segmentation model? @@ -224,7 +224,7 @@ Loading and validating a pretrained YOLOv8 segmentation model is straightforward from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n-seg.pt") + model = YOLO("yolo11n-seg.pt") # Validate the model metrics = model.val() @@ -235,7 +235,7 @@ Loading and validating a pretrained YOLOv8 segmentation model is straightforward === "CLI" ```bash - yolo segment val model=yolov8n-seg.pt + yolo segment val model=yolo11n-seg.pt ``` These steps will provide you with validation metrics like [Mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP), crucial for assessing model performance. @@ -252,7 +252,7 @@ Exporting a YOLOv8 segmentation model to ONNX format is simple and can be done u from ultralytics import YOLO # Load a pretrained model - model = YOLO("yolov8n-seg.pt") + model = YOLO("yolo11n-seg.pt") # Export the model to ONNX format model.export(format="onnx") @@ -261,7 +261,7 @@ Exporting a YOLOv8 segmentation model to ONNX format is simple and can be done u === "CLI" ```bash - yolo export model=yolov8n-seg.pt format=onnx + yolo export model=yolo11n-seg.pt format=onnx ``` For more details on exporting to various formats, refer to the [Export](../modes/export.md) page. diff --git a/docs/en/usage/callbacks.md b/docs/en/usage/callbacks.md index 2886f8f512..16c4718786 100644 --- a/docs/en/usage/callbacks.md +++ b/docs/en/usage/callbacks.md @@ -1,7 +1,7 @@ --- comments: true description: Explore Ultralytics callbacks for training, validation, exporting, and prediction. Learn how to use and customize them for your ML models. -keywords: Ultralytics, callbacks, training, validation, export, prediction, ML models, YOLOv8, Python, machine learning +keywords: Ultralytics, callbacks, training, validation, export, prediction, ML models, YOLO11, Python, machine learning --- ## Callbacks @@ -16,7 +16,7 @@ Ultralytics framework supports callbacks as entry points in strategic stages of allowfullscreen>
- Watch: Mastering Ultralytics YOLOv8: Callbacks + Watch: Mastering Ultralytics YOLO: Callbacks

## Examples @@ -41,7 +41,7 @@ def on_predict_batch_end(predictor): # Create a YOLO model instance -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Add the custom callback to the model model.add_callback("on_predict_batch_end", on_predict_batch_end) @@ -119,7 +119,7 @@ def on_predict_batch_end(predictor): predictor.results = zip(predictor.results, image) -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") model.add_callback("on_predict_batch_end", on_predict_batch_end) for result, frame in model.predict(): pass @@ -141,7 +141,7 @@ def on_train_epoch_end(trainer): trainer.log({"additional_metric": additional_metric}) -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") model.add_callback("on_train_epoch_end", on_train_epoch_end) model.train(data="coco.yaml", epochs=10) ``` @@ -164,7 +164,7 @@ def on_val_end(validator): validator.log({"custom_metric": custom_metric}) -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") model.add_callback("on_val_end", on_val_end) model.val(data="coco.yaml") ``` @@ -187,7 +187,7 @@ def on_predict_end(predictor): log_prediction(result) -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") model.add_callback("on_predict_end", on_predict_end) results = model.predict(source="image.jpg") ``` @@ -215,7 +215,7 @@ def on_predict_batch_end(predictor): predictor.results = zip(predictor.results, image) -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") model.add_callback("on_predict_batch_end", on_predict_batch_end) for result, frame in model.predict(): pass diff --git a/docs/en/usage/cfg.md b/docs/en/usage/cfg.md index aecc5fc646..8f8ac6025f 100644 --- a/docs/en/usage/cfg.md +++ b/docs/en/usage/cfg.md @@ -14,7 +14,7 @@ YOLO settings and hyperparameters play a critical role in the model's performanc allowfullscreen>
- Watch: Mastering Ultralytics YOLOv8: Configuration + Watch: Mastering Ultralytics YOLO: Configuration

Ultralytics commands use the following syntax: @@ -32,8 +32,8 @@ Ultralytics commands use the following syntax: ```python from ultralytics import YOLO - # Load a YOLOv8 model from a pre-trained weights file - model = YOLO("yolov8n.pt") + # Load a YOLO11 model from a pre-trained weights file + model = YOLO("yolo11n.pt") # Run MODE mode using the custom arguments ARGS (guess TASK) model.MODE(ARGS) @@ -67,12 +67,12 @@ YOLO models can be used for a variety of tasks, including detection, segmentatio YOLO models can be used in different modes depending on the specific problem you are trying to solve. These modes include: -- **Train**: For training a YOLOv8 model on a custom dataset. -- **Val**: For validating a YOLOv8 model after it has been trained. -- **Predict**: For making predictions using a trained YOLOv8 model on new images or videos. -- **Export**: For exporting a YOLOv8 model to a format that can be used for deployment. -- **Track**: For tracking objects in real-time using a YOLOv8 model. -- **Benchmark**: For benchmarking YOLOv8 exports (ONNX, TensorRT, etc.) speed and accuracy. +- **Train**: For training a YOLO11 model on a custom dataset. +- **Val**: For validating a YOLO11 model after it has been trained. +- **Predict**: For making predictions using a trained YOLO11 model on new images or videos. +- **Export**: For exporting a YOLO11 model to a format that can be used for deployment. +- **Track**: For tracking objects in real-time using a YOLO11 model. +- **Benchmark**: For benchmarking YOLO11 exports (ONNX, TensorRT, etc.) speed and accuracy. | Argument | Default | Description | | -------- | --------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | diff --git a/docs/en/usage/cli.md b/docs/en/usage/cli.md index d1d7c8de48..a2276f2e00 100644 --- a/docs/en/usage/cli.md +++ b/docs/en/usage/cli.md @@ -1,7 +1,7 @@ --- comments: true -description: Explore the YOLOv8 command line interface (CLI) for easy execution of detection tasks without needing a Python environment. -keywords: YOLOv8 CLI, command line interface, YOLOv8 commands, detection tasks, Ultralytics, model training, model prediction +description: Explore the YOLO11 command line interface (CLI) for easy execution of detection tasks without needing a Python environment. +keywords: YOLO11 CLI, command line interface, YOLO11 commands, detection tasks, Ultralytics, model training, model prediction --- # Command Line Interface Usage @@ -16,7 +16,7 @@ The YOLO command line interface (CLI) allows for simple single-line commands wit allowfullscreen>
- Watch: Mastering Ultralytics YOLOv8: CLI + Watch: Mastering Ultralytics YOLO: CLI

!!! example @@ -37,28 +37,28 @@ The YOLO command line interface (CLI) allows for simple single-line commands wit Train a detection model for 10 [epochs](https://www.ultralytics.com/glossary/epoch) with an initial learning_rate of 0.01 ```bash - yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 + yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` === "Predict" Predict a YouTube video using a pretrained segmentation model at image size 320: ```bash - yolo predict model=yolov8n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 + yolo predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 ``` === "Val" Val a pretrained detection model at batch-size 1 and image size 640: ```bash - yolo val model=yolov8n.pt data=coco8.yaml batch=1 imgsz=640 + yolo val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640 ``` === "Export" - Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) + Export a YOLO11n classification model to ONNX format at image size 224 by 128 (no TASK required) ```bash - yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + yolo export model=yolo11n-cls.pt format=onnx imgsz=224,128 ``` === "Special" @@ -75,7 +75,7 @@ The YOLO command line interface (CLI) allows for simple single-line commands wit Where: -- `TASK` (optional) is one of `[detect, segment, classify, pose, obb]`. If it is not passed explicitly YOLOv8 will try to guess the `TASK` from the model type. +- `TASK` (optional) is one of `[detect, segment, classify, pose, obb]`. If it is not passed explicitly YOLO11 will try to guess the `TASK` from the model type. - `MODE` (required) is one of `[train, val, predict, export, track, benchmark]` - `ARGS` (optional) are any number of custom `arg=value` pairs like `imgsz=320` that override defaults. For a full list of available `ARGS` see the [Configuration](cfg.md) page and `defaults.yaml` @@ -83,21 +83,21 @@ Where: Arguments must be passed as `arg=val` pairs, split by an equals `=` sign and delimited by spaces ` ` between pairs. Do not use `--` argument prefixes or commas `,` between arguments. - - `yolo predict model=yolov8n.pt imgsz=640 conf=0.25`   ✅ - - `yolo predict model yolov8n.pt imgsz 640 conf 0.25`   ❌ - - `yolo predict --model yolov8n.pt --imgsz 640 --conf 0.25`   ❌ + - `yolo predict model=yolo11n.pt imgsz=640 conf=0.25`   ✅ + - `yolo predict model yolo11n.pt imgsz 640 conf 0.25`   ❌ + - `yolo predict --model yolo11n.pt --imgsz 640 --conf 0.25`   ❌ ## Train -Train YOLOv8n on the COCO8 dataset for 100 epochs at image size 640. For a full list of available arguments see the [Configuration](cfg.md) page. +Train YOLO11n on the COCO8 dataset for 100 epochs at image size 640. For a full list of available arguments see the [Configuration](cfg.md) page. !!! example === "Train" - Start training YOLOv8n on COCO8 for 100 epochs at image-size 640. + Start training YOLO11n on COCO8 for 100 epochs at image-size 640. ```bash - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=100 imgsz=640 ``` === "Resume" @@ -109,15 +109,15 @@ Train YOLOv8n on the COCO8 dataset for 100 epochs at image size 640. For a full ## Val -Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. +Validate trained YOLO11n model [accuracy](https://www.ultralytics.com/glossary/accuracy) on the COCO8 dataset. No arguments are needed as the `model` retains its training `data` and arguments as model attributes. !!! example === "Official" - Validate an official YOLOv8n model. + Validate an official YOLO11n model. ```bash - yolo detect val model=yolov8n.pt + yolo detect val model=yolo11n.pt ``` === "Custom" @@ -129,15 +129,15 @@ Validate trained YOLOv8n model [accuracy](https://www.ultralytics.com/glossary/a ## Predict -Use a trained YOLOv8n model to run predictions on images. +Use a trained YOLO11n model to run predictions on images. !!! example === "Official" - Predict with an official YOLOv8n model. + Predict with an official YOLO11n model. ```bash - yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' + yolo detect predict model=yolo11n.pt source='https://ultralytics.com/images/bus.jpg' ``` === "Custom" @@ -149,15 +149,15 @@ Use a trained YOLOv8n model to run predictions on images. ## Export -Export a YOLOv8n model to a different format like ONNX, CoreML, etc. +Export a YOLO11n model to a different format like ONNX, CoreML, etc. !!! example === "Official" - Export an official YOLOv8n model to ONNX format. + Export an official YOLO11n model to ONNX format. ```bash - yolo export model=yolov8n.pt format=onnx + yolo export model=yolo11n.pt format=onnx ``` === "Custom" @@ -167,7 +167,7 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc. yolo export model=path/to/best.pt format=onnx ``` -Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. +Available YOLO11 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`. {% include "macros/export-table.md" %} @@ -183,21 +183,21 @@ Default arguments can be overridden by simply passing them as arguments in the C Train a detection model for `10 epochs` with `learning_rate` of `0.01` ```bash - yolo detect train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 + yolo detect train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` === "Predict" Predict a YouTube video using a pretrained segmentation model at image size 320: ```bash - yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 + yolo segment predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 ``` === "Val" Validate a pretrained detection model at batch-size 1 and image size 640: ```bash - yolo detect val model=yolov8n.pt data=coco8.yaml batch=1 imgsz=640 + yolo detect val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640 ``` ## Overriding default config file @@ -219,19 +219,19 @@ This will create `default_copy.yaml`, which you can then pass as `cfg=default_co ## FAQ -### How do I use the Ultralytics YOLOv8 command line interface (CLI) for model training? +### How do I use the Ultralytics YOLO11 command line interface (CLI) for model training? -To train a YOLOv8 model using the CLI, you can execute a simple one-line command in the terminal. For example, to train a detection model for 10 epochs with a [learning rate](https://www.ultralytics.com/glossary/learning-rate) of 0.01, you would run: +To train a YOLO11 model using the CLI, you can execute a simple one-line command in the terminal. For example, to train a detection model for 10 epochs with a [learning rate](https://www.ultralytics.com/glossary/learning-rate) of 0.01, you would run: ```bash -yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 +yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` This command uses the `train` mode with specific arguments. Refer to the full list of available arguments in the [Configuration Guide](cfg.md). -### What tasks can I perform with the Ultralytics YOLOv8 CLI? +### What tasks can I perform with the Ultralytics YOLO11 CLI? -The Ultralytics YOLOv8 CLI supports a variety of tasks including detection, segmentation, classification, validation, prediction, export, and tracking. For instance: +The Ultralytics YOLO11 CLI supports a variety of tasks including detection, segmentation, classification, validation, prediction, export, and tracking. For instance: - **Train a Model**: Run `yolo train data= model= epochs=`. - **Run Predictions**: Use `yolo predict model= source= imgsz=`. @@ -239,32 +239,32 @@ The Ultralytics YOLOv8 CLI supports a variety of tasks including detection, segm Each task can be customized with various arguments. For detailed syntax and examples, see the respective sections like [Train](#train), [Predict](#predict), and [Export](#export). -### How can I validate the accuracy of a trained YOLOv8 model using the CLI? +### How can I validate the accuracy of a trained YOLO11 model using the CLI? -To validate a YOLOv8 model's accuracy, use the `val` mode. For example, to validate a pretrained detection model with a [batch size](https://www.ultralytics.com/glossary/batch-size) of 1 and image size of 640, run: +To validate a YOLO11 model's accuracy, use the `val` mode. For example, to validate a pretrained detection model with a [batch size](https://www.ultralytics.com/glossary/batch-size) of 1 and image size of 640, run: ```bash -yolo val model=yolov8n.pt data=coco8.yaml batch=1 imgsz=640 +yolo val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640 ``` This command evaluates the model on the specified dataset and provides performance metrics. For more details, refer to the [Val](#val) section. -### What formats can I export my YOLOv8 models to using the CLI? +### What formats can I export my YOLO11 models to using the CLI? -YOLOv8 models can be exported to various formats such as ONNX, CoreML, TensorRT, and more. For instance, to export a model to ONNX format, run: +YOLO11 models can be exported to various formats such as ONNX, CoreML, TensorRT, and more. For instance, to export a model to ONNX format, run: ```bash -yolo export model=yolov8n.pt format=onnx +yolo export model=yolo11n.pt format=onnx ``` For complete details, visit the [Export](../modes/export.md) page. -### How do I customize YOLOv8 CLI commands to override default arguments? +### How do I customize YOLO11 CLI commands to override default arguments? -To override default arguments in YOLOv8 CLI commands, pass them as `arg=value` pairs. For example, to train a model with custom arguments, use: +To override default arguments in YOLO11 CLI commands, pass them as `arg=value` pairs. For example, to train a model with custom arguments, use: ```bash -yolo train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01 +yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01 ``` For a full list of available arguments and their descriptions, refer to the [Configuration Guide](cfg.md). Ensure arguments are formatted correctly, as shown in the [Overriding default arguments](#overriding-default-arguments) section. diff --git a/docs/en/usage/engine.md b/docs/en/usage/engine.md index dc44047ff7..d5d807c995 100644 --- a/docs/en/usage/engine.md +++ b/docs/en/usage/engine.md @@ -1,7 +1,7 @@ --- comments: true -description: Learn to customize the YOLOv8 Trainer for specific tasks. Step-by-step instructions with Python examples for maximum model performance. -keywords: Ultralytics, YOLOv8, Trainer Customization, Python, Machine Learning, AI, Model Training, DetectionTrainer, Custom Models +description: Learn to customize the YOLO11 Trainer for specific tasks. Step-by-step instructions with Python examples for maximum model performance. +keywords: Ultralytics, YOLO11, Trainer Customization, Python, Machine Learning, AI, Model Training, DetectionTrainer, Custom Models --- Both the Ultralytics YOLO command-line and Python interfaces are simply a high-level abstraction on the base engine executors. Let's take a look at the Trainer engine. @@ -14,7 +14,7 @@ Both the Ultralytics YOLO command-line and Python interfaces are simply a high-l allowfullscreen>
- Watch: Mastering Ultralytics YOLOv8: Advanced Customization + Watch: Mastering Ultralytics YOLO: Advanced Customization

## BaseTrainer @@ -26,7 +26,7 @@ BaseTrainer contains the generic boilerplate training routine. It can be customi ## DetectionTrainer -Here's how you can use the YOLOv8 `DetectionTrainer` and customize it. +Here's how you can use the YOLO11 `DetectionTrainer` and customize it. ```python from ultralytics.models.yolo.detect import DetectionTrainer @@ -96,9 +96,9 @@ There are other components that can be customized similarly like `Validators` an ## FAQ -### How do I customize the Ultralytics YOLOv8 DetectionTrainer for specific tasks? +### How do I customize the Ultralytics YOLO11 DetectionTrainer for specific tasks? -To customize the Ultralytics YOLOv8 `DetectionTrainer` for a specific task, you can override its methods to adapt to your custom model and dataloader. Start by inheriting from `DetectionTrainer` and then redefine methods like `get_model` to implement your custom functionalities. Here's an example: +To customize the Ultralytics YOLO11 `DetectionTrainer` for a specific task, you can override its methods to adapt to your custom model and dataloader. Start by inheriting from `DetectionTrainer` and then redefine methods like `get_model` to implement your custom functionalities. Here's an example: ```python from ultralytics.models.yolo.detect import DetectionTrainer @@ -117,18 +117,18 @@ trained_model = trainer.best # get best model For further customization like changing the `loss function` or adding a `callback`, you can reference our [Callbacks Guide](../usage/callbacks.md). -### What are the key components of the BaseTrainer in Ultralytics YOLOv8? +### What are the key components of the BaseTrainer in Ultralytics YOLO11? -The `BaseTrainer` in Ultralytics YOLOv8 serves as the foundation for training routines and can be customized for various tasks by overriding its generic methods. Key components include: +The `BaseTrainer` in Ultralytics YOLO11 serves as the foundation for training routines and can be customized for various tasks by overriding its generic methods. Key components include: - `get_model(cfg, weights)` to build the model to be trained. - `get_dataloader()` to build the dataloader. For more details on the customization and source code, see the [`BaseTrainer` Reference](../reference/engine/trainer.md). -### How can I add a callback to the Ultralytics YOLOv8 DetectionTrainer? +### How can I add a callback to the Ultralytics YOLO11 DetectionTrainer? -You can add callbacks to monitor and modify the training process in Ultralytics YOLOv8 `DetectionTrainer`. For instance, here's how you can add a callback to log model weights after every training [epoch](https://www.ultralytics.com/glossary/epoch): +You can add callbacks to monitor and modify the training process in Ultralytics YOLO11 `DetectionTrainer`. For instance, here's how you can add a callback to log model weights after every training [epoch](https://www.ultralytics.com/glossary/epoch): ```python from ultralytics.models.yolo.detect import DetectionTrainer @@ -148,19 +148,19 @@ trainer.train() For further details on callback events and entry points, refer to our [Callbacks Guide](../usage/callbacks.md). -### Why should I use Ultralytics YOLOv8 for model training? +### Why should I use Ultralytics YOLO11 for model training? -Ultralytics YOLOv8 offers a high-level abstraction on powerful engine executors, making it ideal for rapid development and customization. Key benefits include: +Ultralytics YOLO11 offers a high-level abstraction on powerful engine executors, making it ideal for rapid development and customization. Key benefits include: - **Ease of Use**: Both command-line and Python interfaces simplify complex tasks. - **Performance**: Optimized for real-time [object detection](https://www.ultralytics.com/glossary/object-detection) and various vision AI applications. - **Customization**: Easily extendable for custom models, [loss functions](https://www.ultralytics.com/glossary/loss-function), and dataloaders. -Learn more about YOLOv8's capabilities by visiting [Ultralytics YOLO](https://www.ultralytics.com/yolo). +Learn more about YOLO11's capabilities by visiting [Ultralytics YOLO](https://www.ultralytics.com/yolo). -### Can I use the Ultralytics YOLOv8 DetectionTrainer for non-standard models? +### Can I use the Ultralytics YOLO11 DetectionTrainer for non-standard models? -Yes, Ultralytics YOLOv8 `DetectionTrainer` is highly flexible and can be customized for non-standard models. By inheriting from `DetectionTrainer`, you can overload different methods to support your specific model's needs. Here's a simple example: +Yes, Ultralytics YOLO11 `DetectionTrainer` is highly flexible and can be customized for non-standard models. By inheriting from `DetectionTrainer`, you can overload different methods to support your specific model's needs. Here's a simple example: ```python from ultralytics.models.yolo.detect import DetectionTrainer diff --git a/docs/en/usage/python.md b/docs/en/usage/python.md index 5236af0a1d..c3afa2a1c6 100644 --- a/docs/en/usage/python.md +++ b/docs/en/usage/python.md @@ -1,12 +1,12 @@ --- comments: true -description: Learn to integrate YOLOv8 in Python for object detection, segmentation, and classification. Load, train models, and make predictions easily with our comprehensive guide. -keywords: YOLOv8, Python, object detection, segmentation, classification, machine learning, AI, pretrained models, train models, make predictions +description: Learn to integrate YOLO11 in Python for object detection, segmentation, and classification. Load, train models, and make predictions easily with our comprehensive guide. +keywords: YOLO11, Python, object detection, segmentation, classification, machine learning, AI, pretrained models, train models, make predictions --- # Python Usage -Welcome to the YOLOv8 Python Usage documentation! This guide is designed to help you seamlessly integrate YOLOv8 into your Python projects for [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification. Here, you'll learn how to load and use pretrained models, train new models, and perform predictions on images. The easy-to-use Python interface is a valuable resource for anyone looking to incorporate YOLOv8 into their Python projects, allowing you to quickly implement advanced object detection capabilities. Let's get started! +Welcome to the YOLO11 Python Usage documentation! This guide is designed to help you seamlessly integrate YOLO11 into your Python projects for [object detection](https://www.ultralytics.com/glossary/object-detection), segmentation, and classification. Here, you'll learn how to load and use pretrained models, train new models, and perform predictions on images. The easy-to-use Python interface is a valuable resource for anyone looking to incorporate YOLO11 into their Python projects, allowing you to quickly implement advanced object detection capabilities. Let's get started!


@@ -16,7 +16,7 @@ Welcome to the YOLOv8 Python Usage documentation! This guide is designed to help allowfullscreen>
- Watch: Mastering Ultralytics YOLOv8: Python + Watch: Mastering Ultralytics YOLO11: Python

For example, users can load a model, train it, evaluate its performance on a validation set, and even export it to ONNX format with just a few lines of code. @@ -27,10 +27,10 @@ For example, users can load a model, train it, evaluate its performance on a val from ultralytics import YOLO # Create a new YOLO model from scratch - model = YOLO("yolov8n.yaml") + model = YOLO("yolo11n.yaml") # Load a pretrained YOLO model (recommended for training) - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") # Train the model using the 'coco8.yaml' dataset for 3 epochs results = model.train(data="coco8.yaml", epochs=3) @@ -47,7 +47,7 @@ For example, users can load a model, train it, evaluate its performance on a val ## [Train](../modes/train.md) -Train mode is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can accurately predict the classes and locations of objects in an image. +Train mode is used for training a YOLO11 model on a custom dataset. In this mode, the model is trained using the specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can accurately predict the classes and locations of objects in an image. !!! example "Train" @@ -56,7 +56,7 @@ Train mode is used for training a YOLOv8 model on a custom dataset. In this mode ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") # pass any model type + model = YOLO("yolo11n.pt") # pass any model type results = model.train(epochs=5) ``` @@ -65,7 +65,7 @@ Train mode is used for training a YOLOv8 model on a custom dataset. In this mode ```python from ultralytics import YOLO - model = YOLO("yolov8n.yaml") + model = YOLO("yolo11n.yaml") results = model.train(data="coco8.yaml", epochs=5) ``` @@ -80,7 +80,7 @@ Train mode is used for training a YOLOv8 model on a custom dataset. In this mode ## [Val](../modes/val.md) -Val mode is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a validation set to measure its [accuracy](https://www.ultralytics.com/glossary/accuracy) and generalization performance. This mode can be used to tune the hyperparameters of the model to improve its performance. +Val mode is used for validating a YOLO11 model after it has been trained. In this mode, the model is evaluated on a validation set to measure its [accuracy](https://www.ultralytics.com/glossary/accuracy) and generalization performance. This mode can be used to tune the hyperparameters of the model to improve its performance. !!! example "Val" @@ -89,8 +89,8 @@ Val mode is used for validating a YOLOv8 model after it has been trained. In thi ```python from ultralytics import YOLO - # Load a YOLOv8 model - model = YOLO("yolov8n.yaml") + # Load a YOLO11 model + model = YOLO("yolo11n.yaml") # Train the model model.train(data="coco8.yaml", epochs=5) @@ -104,8 +104,8 @@ Val mode is used for validating a YOLOv8 model after it has been trained. In thi ```python from ultralytics import YOLO - # Load a YOLOv8 model - model = YOLO("yolov8n.yaml") + # Load a YOLO11 model + model = YOLO("yolo11n.yaml") # Train the model model.train(data="coco8.yaml", epochs=5) @@ -118,7 +118,7 @@ Val mode is used for validating a YOLOv8 model after it has been trained. In thi ## [Predict](../modes/predict.md) -Predict mode is used for making predictions using a trained YOLOv8 model on new images or videos. In this mode, the model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model predicts the classes and locations of objects in the input images or videos. +Predict mode is used for making predictions using a trained YOLO11 model on new images or videos. In this mode, the model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model predicts the classes and locations of objects in the input images or videos. !!! example "Predict" @@ -189,27 +189,27 @@ Predict mode is used for making predictions using a trained YOLOv8 model on new ## [Export](../modes/export.md) -Export mode is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the model is converted to a format that can be used by other software applications or hardware devices. This mode is useful when deploying the model to production environments. +Export mode is used for exporting a YOLO11 model to a format that can be used for deployment. In this mode, the model is converted to a format that can be used by other software applications or hardware devices. This mode is useful when deploying the model to production environments. !!! example "Export" === "Export to ONNX" - Export an official YOLOv8n model to ONNX with dynamic batch-size and image-size. + Export an official YOLO11n model to ONNX with dynamic batch-size and image-size. ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") model.export(format="onnx", dynamic=True) ``` === "Export to TensorRT" - Export an official YOLOv8n model to TensorRT on `device=0` for acceleration on CUDA devices. + Export an official YOLO11n model to TensorRT on `device=0` for acceleration on CUDA devices. ```python from ultralytics import YOLO - model = YOLO("yolov8n.pt") + model = YOLO("yolo11n.pt") model.export(format="onnx", device=0) ``` @@ -217,7 +217,7 @@ Export mode is used for exporting a YOLOv8 model to a format that can be used fo ## [Track](../modes/track.md) -Track mode is used for tracking objects in real-time using a YOLOv8 model. In this mode, the model is loaded from a checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful for applications such as surveillance systems or self-driving cars. +Track mode is used for tracking objects in real-time using a YOLO11 model. In this mode, the model is loaded from a checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful for applications such as surveillance systems or self-driving cars. !!! example "Track" @@ -227,8 +227,8 @@ Track mode is used for tracking objects in real-time using a YOLOv8 model. In th from ultralytics import YOLO # Load a model - model = YOLO("yolov8n.pt") # load an official detection model - model = YOLO("yolov8n-seg.pt") # load an official segmentation model + model = YOLO("yolo11n.pt") # load an official detection model + model = YOLO("yolo11n-seg.pt") # load an official segmentation model model = YOLO("path/to/best.pt") # load a custom model # Track with the model @@ -240,18 +240,18 @@ Track mode is used for tracking objects in real-time using a YOLOv8 model. In th ## [Benchmark](../modes/benchmark.md) -Benchmark mode is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks provide information on the size of the exported format, its `mAP50-95` metrics (for object detection and segmentation) or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for their specific use case based on their requirements for speed and accuracy. +Benchmark mode is used to profile the speed and accuracy of various export formats for YOLO11. The benchmarks provide information on the size of the exported format, its `mAP50-95` metrics (for object detection and segmentation) or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for their specific use case based on their requirements for speed and accuracy. !!! example "Benchmark" === "Python" - Benchmark an official YOLOv8n model across all export formats. + Benchmark an official YOLO11n model across all export formats. ```python from ultralytics.utils.benchmarks import benchmark # Benchmark - benchmark(model="yolov8n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) + benchmark(model="yolo11n.pt", data="coco8.yaml", imgsz=640, half=False, device=0) ``` [Benchmark Examples](../modes/benchmark.md){ .md-button } @@ -268,7 +268,7 @@ Explorer API can be used to explore datasets with advanced semantic, vector-simi from ultralytics import Explorer # create an Explorer object - exp = Explorer(data="coco8.yaml", model="yolov8n.pt") + exp = Explorer(data="coco8.yaml", model="yolo11n.pt") exp.create_embeddings_table() similar = exp.get_similar(img="https://ultralytics.com/images/bus.jpg", limit=10) @@ -287,7 +287,7 @@ Explorer API can be used to explore datasets with advanced semantic, vector-simi from ultralytics import Explorer # create an Explorer object - exp = Explorer(data="coco8.yaml", model="yolov8n.pt") + exp = Explorer(data="coco8.yaml", model="yolo11n.pt") exp.create_embeddings_table() similar = exp.get_similar(idx=1, limit=10) @@ -333,15 +333,15 @@ You can easily customize Trainers to support custom tasks or explore R&D ideas. ## FAQ -### How can I integrate YOLOv8 into my Python project for object detection? +### How can I integrate YOLO11 into my Python project for object detection? -Integrating Ultralytics YOLOv8 into your Python projects is simple. You can load a pre-trained model or train a new model from scratch. Here's how to get started: +Integrating Ultralytics YOLO11 into your Python projects is simple. You can load a pre-trained model or train a new model from scratch. Here's how to get started: ```python from ultralytics import YOLO # Load a pretrained YOLO model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Perform object detection on an image results = model("https://ultralytics.com/images/bus.jpg") @@ -353,9 +353,9 @@ for result in results: See more detailed examples in our [Predict Mode](../modes/predict.md) section. -### What are the different modes available in YOLOv8? +### What are the different modes available in YOLO11? -Ultralytics YOLOv8 provides various modes to cater to different [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) workflows. These include: +Ultralytics YOLO11 provides various modes to cater to different [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) workflows. These include: - **[Train](../modes/train.md)**: Train a model using custom datasets. - **[Val](../modes/val.md)**: Validate model performance on a validation set. @@ -366,15 +366,15 @@ Ultralytics YOLOv8 provides various modes to cater to different [machine learnin Each mode is designed to provide comprehensive functionalities for different stages of model development and deployment. -### How do I train a custom YOLOv8 model using my dataset? +### How do I train a custom YOLO11 model using my dataset? -To train a custom YOLOv8 model, you need to specify your dataset and other hyperparameters. Here's a quick example: +To train a custom YOLO11 model, you need to specify your dataset and other hyperparameters. Here's a quick example: ```python from ultralytics import YOLO # Load the YOLO model -model = YOLO("yolov8n.yaml") +model = YOLO("yolo11n.yaml") # Train the model with custom dataset model.train(data="path/to/your/dataset.yaml", epochs=10) @@ -382,15 +382,15 @@ model.train(data="path/to/your/dataset.yaml", epochs=10) For more details on training and hyperlinks to example usage, visit our [Train Mode](../modes/train.md) page. -### How do I export YOLOv8 models for deployment? +### How do I export YOLO11 models for deployment? -Exporting YOLOv8 models in a format suitable for deployment is straightforward with the `export` function. For example, you can export a model to ONNX format: +Exporting YOLO11 models in a format suitable for deployment is straightforward with the `export` function. For example, you can export a model to ONNX format: ```python from ultralytics import YOLO # Load the YOLO model -model = YOLO("yolov8n.pt") +model = YOLO("yolo11n.pt") # Export the model to ONNX format model.export(format="onnx") @@ -398,15 +398,15 @@ model.export(format="onnx") For various export options, refer to the [Export Mode](../modes/export.md) documentation. -### Can I validate my YOLOv8 model on different datasets? +### Can I validate my YOLO11 model on different datasets? -Yes, validating YOLOv8 models on different datasets is possible. After training, you can use the validation mode to evaluate the performance: +Yes, validating YOLO11 models on different datasets is possible. After training, you can use the validation mode to evaluate the performance: ```python from ultralytics import YOLO -# Load a YOLOv8 model -model = YOLO("yolov8n.yaml") +# Load a YOLO11 model +model = YOLO("yolo11n.yaml") # Train the model model.train(data="coco8.yaml", epochs=5) diff --git a/docs/en/usage/simple-utilities.md b/docs/en/usage/simple-utilities.md index e40a2478ee..0a947adaf1 100644 --- a/docs/en/usage/simple-utilities.md +++ b/docs/en/usage/simple-utilities.md @@ -38,7 +38,7 @@ from ultralytics.data.annotator import auto_annotate auto_annotate( # (1)! data="path/to/new/data", - det_model="yolov8n.pt", + det_model="yolo11n.pt", sam_model="mobile_sam.pt", device="cuda", output_dir="path/to/save_labels", @@ -93,7 +93,7 @@ from ultralytics.utils.plotting import Annotator from ultralytics import YOLO import cv2 -model = YOLO('yolov8n.pt') # Load pretrain or fine-tune model +model = YOLO('yolo11n.pt') # Load pretrain or fine-tune model # Process the image source = cv2.imread('path/to/image.jpg') @@ -468,7 +468,7 @@ import cv2 from ultralytics import YOLO from ultralytics.utils.plotting import Annotator -model = YOLO("yolov8s.pt") +model = YOLO("yolo11s.pt") names = model.names cap = cv2.VideoCapture("path/to/video/file.mp4") @@ -507,7 +507,7 @@ import cv2 from ultralytics import YOLO from ultralytics.utils.plotting import Annotator -model = YOLO("yolov8s.pt") +model = YOLO("yolo11s.pt") names = model.names cap = cv2.VideoCapture("path/to/video/file.mp4") @@ -598,7 +598,7 @@ from ultralytics.data.annotator import auto_annotate auto_annotate( data="path/to/new/data", - det_model="yolov8n.pt", + det_model="yolo11n.pt", sam_model="mobile_sam.pt", device="cuda", output_dir="path/to/save_labels",