Merge branch 'main' into base-resize

base-resize
Ultralytics Assistant 1 month ago committed by GitHub
commit 1d085db5d9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      .github/workflows/cla.yml
  2. 3
      .github/workflows/docker.yaml
  3. 4
      .github/workflows/docs.yml
  4. 5
      .github/workflows/format.yml
  5. 2
      .github/workflows/merge-main-into-prs.yml
  6. 11
      .github/workflows/publish.yml
  7. 4
      docker/Dockerfile-runner
  8. 4
      docs/en/guides/conda-quickstart.md
  9. 33
      docs/en/guides/distance-calculation.md
  10. 1
      docs/en/guides/heatmaps.md
  11. 2
      docs/en/guides/model-deployment-practices.md
  12. 192
      docs/en/guides/raspberry-pi.md
  13. 1
      docs/en/guides/workouts-monitoring.md
  14. 120
      docs/en/help/CLA.md
  15. 80
      docs/en/index.md
  16. 20
      docs/en/integrations/comet.md
  17. 2
      docs/en/integrations/weights-biases.md
  18. 15
      docs/en/models/sam-2.md
  19. 8
      docs/en/models/sam.md
  20. 44
      examples/YOLOv8-OpenCV-int8-tflite-Python/main.py
  21. 2
      examples/tutorial.ipynb
  22. 2
      mkdocs.yml
  23. 2
      ultralytics/__init__.py
  24. 2
      ultralytics/cfg/__init__.py
  25. 2
      ultralytics/data/utils.py
  26. 2
      ultralytics/engine/exporter.py
  27. 114
      ultralytics/models/sam/predict.py
  28. 3
      ultralytics/models/yolo/classify/train.py
  29. 2
      ultralytics/nn/autobackend.py
  30. 9
      ultralytics/nn/tasks.py
  31. 4
      ultralytics/solutions/analytics.py
  32. 87
      ultralytics/solutions/distance_calculation.py
  33. 3
      ultralytics/solutions/heatmap.py
  34. 4
      ultralytics/solutions/object_counter.py
  35. 6
      ultralytics/utils/__init__.py
  36. 2
      ultralytics/utils/callbacks/tensorboard.py
  37. 6
      ultralytics/utils/checks.py
  38. 2
      ultralytics/utils/downloads.py
  39. 2
      ultralytics/utils/metrics.py
  40. 31
      ultralytics/utils/plotting.py
  41. 2
      ultralytics/utils/torch_utils.py

@ -30,7 +30,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Must be repository secret PAT
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
PERSONAL_ACCESS_TOKEN: ${{ secrets._GITHUB_TOKEN }}
with:
path-to-signatures: "signatures/version1/cla.json"
path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document

@ -182,8 +182,9 @@ jobs:
steps:
- name: Trigger Additional GitHub Actions
env:
GH_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
GH_TOKEN: ${{ secrets._GITHUB_TOKEN }}
run: |
sleep 60
gh workflow run deploy_cloud_run.yml \
--repo ultralytics/assistant \
--ref main

@ -34,7 +34,7 @@ jobs:
uses: actions/checkout@v4
with:
repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }}
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}
token: ${{ secrets.GITHUB_TOKEN }}
ref: ${{ github.head_ref || github.ref }}
fetch-depth: 0
- name: Set up Python
@ -94,5 +94,5 @@ jobs:
else
LATEST_HASH=$(git rev-parse --short=7 HEAD)
git commit -m "Update Docs for 'ultralytics ${{ steps.check_pypi.outputs.version }} - $LATEST_HASH'"
git push https://${{ secrets.PERSONAL_ACCESS_TOKEN }}@github.com/ultralytics/docs.git gh-pages
git push https://${{ secrets._GITHUB_TOKEN }}@github.com/ultralytics/docs.git gh-pages
fi

@ -20,15 +20,14 @@ jobs:
- name: Run Ultralytics Formatting
uses: ultralytics/actions@main
with:
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated
token: ${{ secrets._GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated
labels: true # autolabel issues and PRs
python: true # format Python code and docstrings
prettier: true # format YAML, JSON, Markdown and CSS
spelling: true # check spelling
links: false # check broken links
summary: true # print PR summary with GPT4o (requires 'openai_api_key')
openai_azure_api_key: ${{ secrets.OPENAI_AZURE_API_KEY }}
openai_azure_endpoint: ${{ secrets.OPENAI_AZURE_ENDPOINT }}
openai_api_key: ${{ secrets.OPENAI_API_KEY }}
first_issue_response: |
👋 Hello @${{ github.actor }}, thank you for your interest in Ultralytics 🚀! We recommend a visit to the [Docs](https://docs.ultralytics.com) for new users where you can find many [Python](https://docs.ultralytics.com/usage/python/) and [CLI](https://docs.ultralytics.com/usage/cli/) usage examples and where many of the most common questions may already be answered.

@ -33,7 +33,7 @@ jobs:
import os
import time
g = Github("${{ secrets.PERSONAL_ACCESS_TOKEN }}")
g = Github("${{ secrets._GITHUB_TOKEN }}")
repo = g.get_repo("${{ github.repository }}")
# Fetch the default branch name

@ -23,7 +23,7 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
with:
token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # use your PAT here
token: ${{ secrets._GITHUB_TOKEN }} # use your PAT here
- name: Git config
run: |
git config --global user.name "UltralyticsAssistant"
@ -36,7 +36,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip wheel
pip install requests build twine toml
pip install ultralytics-actions build twine toml
- name: Check PyPI version
shell: python
run: |
@ -103,15 +103,14 @@ jobs:
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets._GITHUB_TOKEN }}
CURRENT_TAG: ${{ steps.check_pypi.outputs.current_tag }}
PREVIOUS_TAG: ${{ steps.check_pypi.outputs.previous_tag }}
run: |
curl -s "https://raw.githubusercontent.com/ultralytics/actions/main/utils/summarize_release.py" | python -
run: ultralytics-actions-summarize-release
shell: bash
- name: Extract PR Details
env:
GH_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}
GH_TOKEN: ${{ secrets._GITHUB_TOKEN }}
run: |
# Check if the event is a pull request or pull_request_target
if [ "${{ github.event_name }}" = "pull_request" ] || [ "${{ github.event_name }}" = "pull_request_target" ]; then

@ -17,8 +17,8 @@ ENV PYTHONUNBUFFERED=1 \
WORKDIR /actions-runner
# Download and unpack the latest runner from https://github.com/actions/runner
RUN FILENAME=actions-runner-linux-x64-2.317.0.tar.gz && \
curl -o $FILENAME -L https://github.com/actions/runner/releases/download/v2.317.0/$FILENAME && \
RUN FILENAME=actions-runner-linux-x64-2.320.0.tar.gz && \
curl -o $FILENAME -L https://github.com/actions/runner/releases/download/v2.320.0/$FILENAME && \
tar xzf $FILENAME && \
rm $FILENAME

@ -37,7 +37,7 @@ This guide provides a comprehensive introduction to setting up a Conda environme
First, let's create a new Conda environment. Open your terminal and run the following command:
```bash
conda create --name ultralytics-env python=3.8 -y
conda create --name ultralytics-env python=3.11 -y
```
Activate the new environment:
@ -135,7 +135,7 @@ Congratulations! You have successfully set up a Conda environment, installed the
Setting up a Conda environment for Ultralytics projects is straightforward and ensures smooth package management. First, create a new Conda environment using the following command:
```bash
conda create --name ultralytics-env python=3.8 -y
conda create --name ultralytics-env python=3.11 -y
```
Then, activate the new environment with:

@ -43,12 +43,9 @@ Measuring the gap between two objects is known as distance calculation within a
```python
import cv2
from ultralytics import YOLO, solutions
from ultralytics import solutions
model = YOLO("yolo11n.pt")
names = model.model.names
cap = cv2.VideoCapture("path/to/video/file.mp4")
cap = cv2.VideoCapture("Path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
@ -56,16 +53,14 @@ Measuring the gap between two objects is known as distance calculation within a
video_writer = cv2.VideoWriter("distance_calculation.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
# Init distance-calculation obj
dist_obj = solutions.DistanceCalculation(names=names, view_img=True)
distance = solutions.DistanceCalculation(model="yolo11n.pt", show=True)
while cap.isOpened():
success, im0 = cap.read()
if not success:
print("Video frame is empty or video processing has been successfully completed.")
break
tracks = model.track(im0, persist=True, show=False)
im0 = dist_obj.start_process(im0, tracks)
im0 = distance.calculate(im0)
video_writer.write(im0)
cap.release()
@ -84,13 +79,11 @@ Measuring the gap between two objects is known as distance calculation within a
### Arguments `DistanceCalculation()`
| `Name` | `Type` | `Default` | Description |
| ---------------- | ------- | --------------- | --------------------------------------------------------- |
| `names` | `dict` | `None` | Dictionary of classes names. |
| `view_img` | `bool` | `False` | Flag to indicate if the video stream should be displayed. |
| `line_thickness` | `int` | `2` | Thickness of the lines drawn on the image. |
| `line_color` | `tuple` | `(255, 255, 0)` | Color of the lines drawn on the image (BGR format). |
| `centroid_color` | `tuple` | `(255, 0, 255)` | Color of the centroids drawn (BGR format). |
| `Name` | `Type` | `Default` | Description |
| ------------ | ------ | --------- | ---------------------------------------------------- |
| `model` | `str` | `None` | Path to Ultralytics YOLO Model File |
| `line_width` | `int` | `2` | Line thickness for bounding boxes. |
| `show` | `bool` | `False` | Flag to control whether to display the video stream. |
### Arguments `model.track`
@ -122,10 +115,8 @@ To delete points drawn during distance calculation with Ultralytics YOLO11, you
The key arguments for initializing the `DistanceCalculation` class in Ultralytics YOLO11 include:
- `names`: Dictionary mapping class indices to class names.
- `view_img`: Flag to indicate if the video stream should be displayed.
- `line_thickness`: Thickness of the lines drawn on the image.
- `line_color`: Color of the lines drawn on the image (BGR format).
- `centroid_color`: Color of the centroids (BGR format).
- `model`: Model file path.
- `show`: Flag to indicate if the video stream should be displayed.
- `line_width`: Thickness of bounding box and the lines drawn on the image.
For an exhaustive list and default values, see the [arguments of DistanceCalculation](#arguments-distancecalculation).

@ -222,6 +222,7 @@ A heatmap generated with [Ultralytics YOLO11](https://github.com/ultralytics/ult
| Name | Type | Default | Description |
| ------------ | ------ | ------------------ | ----------------------------------------------------------------- |
| `model` | `str` | `None` | Path to Ultralytics YOLO Model File |
| `colormap` | `int` | `cv2.COLORMAP_JET` | Colormap to use for the heatmap. |
| `show` | `bool` | `False` | Whether to display the image with the heatmap overlay. |
| `show_in` | `bool` | `True` | Whether to display the count of objects entering the region. |

@ -106,7 +106,7 @@ When deploying [machine learning](https://www.ultralytics.com/glossary/machine-l
- **Profile the Inference Pipeline:** Identifying bottlenecks in the inference pipeline can help pinpoint the source of delays. Use profiling tools to analyze each step of the inference process, identifying and addressing any stages that cause significant delays, such as inefficient layers or data transfer issues.
- **Use Appropriate Precision:** Using higher precision than necessary can slow down inference times. Experiment with using lower precision, such as FP16 (half-precision), instead of FP32 (full-precision). While FP16 can reduce inference time, also keep in mind that it can impact model accuracy.
If you are facing this issue while deploying YOLO11, consider that YOLO11 offers [various model sizes](../models/yolov8.md), such as YOLO11n (nano) for devices with lower memory capacity and YOLOv8x (extra-large) for more powerful GPUs. Choosing the right model variant for your hardware can help balance memory usage and processing time.
If you are facing this issue while deploying YOLO11, consider that YOLO11 offers [various model sizes](../models/yolo11.md), such as YOLO11n (nano) for devices with lower memory capacity and YOLO11x (extra-large) for more powerful GPUs. Choosing the right model variant for your hardware can help balance memory usage and processing time.
Also keep in mind that the size of the input images directly impacts memory usage and processing time. Lower resolutions reduce memory usage and speed up inference, while higher resolutions improve accuracy but require more memory and processing power.

@ -1,12 +1,12 @@
---
comments: true
description: Learn how to deploy Ultralytics YOLOv8 on Raspberry Pi with our comprehensive guide. Get performance benchmarks, setup instructions, and best practices.
keywords: Ultralytics, YOLOv8, Raspberry Pi, setup, guide, benchmarks, computer vision, object detection, NCNN, Docker, camera modules
description: Learn how to deploy Ultralytics YOLO11 on Raspberry Pi with our comprehensive guide. Get performance benchmarks, setup instructions, and best practices.
keywords: Ultralytics, YOLO11, Raspberry Pi, setup, guide, benchmarks, computer vision, object detection, NCNN, Docker, camera modules
---
# Quick Start Guide: Raspberry Pi with Ultralytics YOLOv8
# Quick Start Guide: Raspberry Pi with Ultralytics YOLO11
This comprehensive guide provides a detailed walkthrough for deploying Ultralytics YOLOv8 on [Raspberry Pi](https://www.raspberrypi.com/) devices. Additionally, it showcases performance benchmarks to demonstrate the capabilities of YOLOv8 on these small and powerful devices.
This comprehensive guide provides a detailed walkthrough for deploying Ultralytics YOLO11 on [Raspberry Pi](https://www.raspberrypi.com/) devices. Additionally, it showcases performance benchmarks to demonstrate the capabilities of YOLO11 on these small and powerful devices.
<p align="center">
<br>
@ -56,7 +56,7 @@ There are two ways of setting up Ultralytics package on Raspberry Pi to build yo
### Start with Docker
The fastest way to get started with Ultralytics YOLOv8 on Raspberry Pi is to run with pre-built docker image for Raspberry Pi.
The fastest way to get started with Ultralytics YOLO11 on Raspberry Pi is to run with pre-built docker image for Raspberry Pi.
Execute the below command to pull the Docker container and run on Raspberry Pi. This is based on [arm64v8/debian](https://hub.docker.com/r/arm64v8/debian) docker image which contains Debian 12 (Bookworm) in a Python3 environment.
@ -98,7 +98,7 @@ Out of all the model export formats supported by Ultralytics, [NCNN](https://doc
## Convert Model to NCNN and Run Inference
The YOLOv8n model in PyTorch format is converted to NCNN to run inference with the exported model.
The YOLO11n model in PyTorch format is converted to NCNN to run inference with the exported model.
!!! example
@ -107,14 +107,14 @@ The YOLOv8n model in PyTorch format is converted to NCNN to run inference with t
```python
from ultralytics import YOLO
# Load a YOLOv8n PyTorch model
model = YOLO("yolov8n.pt")
# Load a YOLO11n PyTorch model
model = YOLO("yolo11n.pt")
# Export the model to NCNN format
model.export(format="ncnn") # creates 'yolov8n_ncnn_model'
model.export(format="ncnn") # creates 'yolo11n_ncnn_model'
# Load the exported NCNN model
ncnn_model = YOLO("yolov8n_ncnn_model")
ncnn_model = YOLO("yolo11n_ncnn_model")
# Run inference
results = ncnn_model("https://ultralytics.com/images/bus.jpg")
@ -123,102 +123,62 @@ The YOLOv8n model in PyTorch format is converted to NCNN to run inference with t
=== "CLI"
```bash
# Export a YOLOv8n PyTorch model to NCNN format
yolo export model=yolov8n.pt format=ncnn # creates 'yolov8n_ncnn_model'
# Export a YOLO11n PyTorch model to NCNN format
yolo export model=yolo11n.pt format=ncnn # creates 'yolo11n_ncnn_model'
# Run inference with the exported model
yolo predict model='yolov8n_ncnn_model' source='https://ultralytics.com/images/bus.jpg'
yolo predict model='yolo11n_ncnn_model' source='https://ultralytics.com/images/bus.jpg'
```
!!! tip
For more details about supported export options, visit the [Ultralytics documentation page on deployment options](https://docs.ultralytics.com/guides/model-deployment-options/).
## Raspberry Pi 5 vs Raspberry Pi 4 YOLOv8 Benchmarks
## Raspberry Pi 5 YOLO11 Benchmarks
YOLOv8 benchmarks were run by the Ultralytics team on nine different model formats measuring speed and [accuracy](https://www.ultralytics.com/glossary/accuracy): PyTorch, TorchScript, ONNX, OpenVINO, TF SavedModel, TF GraphDef, TF Lite, PaddlePaddle, NCNN. Benchmarks were run on both Raspberry Pi 5 and Raspberry Pi 4 at FP32 [precision](https://www.ultralytics.com/glossary/precision) with default input image size of 640.
!!! note
We have only included benchmarks for YOLOv8n and YOLOv8s models because other models sizes are too big to run on the Raspberry Pis and does not offer decent performance.
YOLO11 benchmarks were run by the Ultralytics team on nine different model formats measuring speed and [accuracy](https://www.ultralytics.com/glossary/accuracy): PyTorch, TorchScript, ONNX, OpenVINO, TF SavedModel, TF GraphDef, TF Lite, PaddlePaddle, NCNN. Benchmarks were run on a Raspberry Pi 5 at FP32 [precision](https://www.ultralytics.com/glossary/precision) with default input image size of 640.
### Comparison Chart
!!! tip "Performance"
=== "YOLOv8n"
<div style="text-align: center;">
<img width="800" src="https://github.com/ultralytics/docs/releases/download/0/yolov8n-benchmark-comparison.avif" alt="NVIDIA Jetson Ecosystem">
</div>
=== "YOLOv8s"
We have only included benchmarks for YOLO11n and YOLO11s models because other models sizes are too big to run on the Raspberry Pis and does not offer decent performance.
<div style="text-align: center;">
<img width="800" src="https://github.com/ultralytics/docs/releases/download/0/yolov8s-performance-comparison.avif" alt="NVIDIA Jetson Ecosystem">
</div>
<div style="text-align: center;">
<img width="800" src="https://github.com/ultralytics/docs/releases/download/0/rpi-yolo11-benchmarks.avif" alt="YOLO11 benchmarks on RPi 5">
</div>
### Detailed Comparison Table
The below table represents the benchmark results for two different models (YOLOv8n, YOLOv8s) across nine different formats (PyTorch, TorchScript, ONNX, OpenVINO, TF SavedModel, TF GraphDef, TF Lite, PaddlePaddle, NCNN), running on both Raspberry Pi 4 and Raspberry Pi 5, giving us the status, size, mAP50-95(B) metric, and inference time for each combination.
The below table represents the benchmark results for two different models (YOLO11n, YOLO11s) across nine different formats (PyTorch, TorchScript, ONNX, OpenVINO, TF SavedModel, TF GraphDef, TF Lite, PaddlePaddle, NCNN), running on a Raspberry Pi 5, giving us the status, size, mAP50-95(B) metric, and inference time for each combination.
!!! tip "Performance"
=== "YOLOv8n on RPi5"
| Format | Status | Size on disk (MB) | mAP50-95(B) | Inference time (ms/im) |
|---------------|--------|-------------------|-------------|------------------------|
| PyTorch | ✅ | 6.2 | 0.6381 | 508.61 |
| TorchScript | ✅ | 12.4 | 0.6092 | 558.38 |
| ONNX | ✅ | 12.2 | 0.6092 | 198.69 |
| OpenVINO | ✅ | 12.3 | 0.6092 | 704.70 |
| TF SavedModel | ✅ | 30.6 | 0.6092 | 367.64 |
| TF GraphDef | ✅ | 12.3 | 0.6092 | 473.22 |
| TF Lite | ✅ | 12.3 | 0.6092 | 380.67 |
| PaddlePaddle | ✅ | 24.4 | 0.6092 | 703.51 |
| NCNN | ✅ | 12.2 | 0.6034 | 94.28 |
=== "YOLOv8s on RPi5"
| Format | Status | Size on disk (MB) | mAP50-95(B) | Inference time (ms/im) |
|---------------|--------|-------------------|-------------|------------------------|
| PyTorch | ✅ | 21.5 | 0.6967 | 969.49 |
| TorchScript | ✅ | 43.0 | 0.7136 | 1110.04 |
| ONNX | ✅ | 42.8 | 0.7136 | 451.37 |
| OpenVINO | ✅ | 42.9 | 0.7136 | 873.51 |
| TF SavedModel | ✅ | 107.0 | 0.7136 | 658.15 |
| TF GraphDef | ✅ | 42.8 | 0.7136 | 946.01 |
| TF Lite | ✅ | 42.8 | 0.7136 | 1013.27 |
| PaddlePaddle | ✅ | 85.5 | 0.7136 | 1560.23 |
| NCNN | ✅ | 42.7 | 0.7204 | 211.26 |
=== "YOLOv8n on RPi4"
=== "YOLO11n"
| Format | Status | Size on disk (MB) | mAP50-95(B) | Inference time (ms/im) |
|---------------|--------|-------------------|-------------|------------------------|
| PyTorch | ✅ | 6.2 | 0.6381 | 1068.42 |
| TorchScript | ✅ | 12.4 | 0.6092 | 1248.01 |
| ONNX | ✅ | 12.2 | 0.6092 | 560.04 |
| OpenVINO | ✅ | 12.3 | 0.6092 | 534.93 |
| TF SavedModel | ✅ | 30.6 | 0.6092 | 816.50 |
| TF GraphDef | ✅ | 12.3 | 0.6092 | 1007.57 |
| TF Lite | ✅ | 12.3 | 0.6092 | 950.29 |
| PaddlePaddle | ✅ | 24.4 | 0.6092 | 1507.75 |
| NCNN | ✅ | 12.2 | 0.6092 | 414.73 |
=== "YOLOv8s on RPi4"
| PyTorch | ✅ | 5.4 | 0.61 | 524.828 |
| TorchScript | ✅ | 10.5 | 0.6082 | 666.874 |
| ONNX | ✅ | 10.2 | 0.6082 | 181.818 |
| OpenVINO | ✅ | 10.4 | 0.6082 | 530.224 |
| TF SavedModel | ✅ | 25.8 | 0.6082 | 405.964 |
| TF GraphDef | ✅ | 10.3 | 0.6082 | 473.558 |
| TF Lite | ✅ | 10.3 | 0.6082 | 324.158 |
| PaddlePaddle | ✅ | 20.4 | 0.6082 | 644.312 |
| NCNN | ✅ | 10.2 | 0.6106 | 93.938 |
=== "YOLO11s"
| Format | Status | Size on disk (MB) | mAP50-95(B) | Inference time (ms/im) |
|---------------|--------|-------------------|-------------|------------------------|
| PyTorch | ✅ | 21.5 | 0.6967 | 2589.58 |
| TorchScript | ✅ | 43.0 | 0.7136 | 2901.33 |
| ONNX | ✅ | 42.8 | 0.7136 | 1436.33 |
| OpenVINO | ✅ | 42.9 | 0.7136 | 1225.19 |
| TF SavedModel | ✅ | 107.0 | 0.7136 | 1770.95 |
| TF GraphDef | ✅ | 42.8 | 0.7136 | 2146.66 |
| TF Lite | ✅ | 42.8 | 0.7136 | 2945.03 |
| PaddlePaddle | ✅ | 85.5 | 0.7136 | 3962.62 |
| NCNN | ✅ | 42.7 | 0.7136 | 1042.39 |
| PyTorch | ✅ | 18.4 | 0.7526 | 1226.426 |
| TorchScript | ✅ | 36.5 | 0.7416 | 1507.95 |
| ONNX | ✅ | 36.3 | 0.7416 | 415.24 |
| OpenVINO | ✅ | 36.4 | 0.7416 | 1167.102 |
| TF SavedModel | ✅ | 91.1 | 0.7416 | 776.14 |
| TF GraphDef | ✅ | 36.4 | 0.7416 | 1014.396 |
| TF Lite | ✅ | 36.4 | 0.7416 | 845.934 |
| PaddlePaddle | ✅ | 72.5 | 0.7416 | 1567.824 |
| NCNN | ✅ | 36.2 | 0.7419 | 197.358 |
## Reproduce Our Results
@ -231,25 +191,25 @@ To reproduce the above Ultralytics benchmarks on all [export formats](../modes/e
```python
from ultralytics import YOLO
# Load a YOLOv8n PyTorch model
model = YOLO("yolov8n.pt")
# Load a YOLO11n PyTorch model
model = YOLO("yolo11n.pt")
# Benchmark YOLOv8n speed and accuracy on the COCO8 dataset for all all export formats
# Benchmark YOLO11n speed and accuracy on the COCO8 dataset for all all export formats
results = model.benchmarks(data="coco8.yaml", imgsz=640)
```
=== "CLI"
```bash
# Benchmark YOLOv8n speed and accuracy on the COCO8 dataset for all all export formats
yolo benchmark model=yolov8n.pt data=coco8.yaml imgsz=640
# Benchmark YOLO11n speed and accuracy on the COCO8 dataset for all all export formats
yolo benchmark model=yolo11n.pt data=coco8.yaml imgsz=640
```
Note that benchmarking results might vary based on the exact hardware and software configuration of a system, as well as the current workload of the system at the time the benchmarks are run. For the most reliable results use a dataset with a large number of images, i.e. `data='coco8.yaml' (4 val images), or `data='coco.yaml'` (5000 val images).
## Use Raspberry Pi Camera
When using Raspberry Pi for Computer Vision projects, it can be essentially to grab real-time video feeds to perform inference. The onboard MIPI CSI connector on the Raspberry Pi allows you to connect official Raspberry PI camera modules. In this guide, we have used a [Raspberry Pi Camera Module 3](https://www.raspberrypi.com/products/camera-module-3/) to grab the video feeds and perform inference using YOLOv8 models.
When using Raspberry Pi for Computer Vision projects, it can be essentially to grab real-time video feeds to perform inference. The onboard MIPI CSI connector on the Raspberry Pi allows you to connect official Raspberry PI camera modules. In this guide, we have used a [Raspberry Pi Camera Module 3](https://www.raspberrypi.com/products/camera-module-3/) to grab the video feeds and perform inference using YOLO11 models.
!!! tip
@ -273,13 +233,13 @@ rpicam-hello
### Inference with Camera
There are 2 methods of using the Raspberry Pi Camera to inference YOLOv8 models.
There are 2 methods of using the Raspberry Pi Camera to inference YOLO11 models.
!!! usage
=== "Method 1"
We can use `picamera2`which comes pre-installed with Raspberry Pi OS to access the camera and inference YOLOv8 models.
We can use `picamera2`which comes pre-installed with Raspberry Pi OS to access the camera and inference YOLO11 models.
!!! example
@ -299,14 +259,14 @@ There are 2 methods of using the Raspberry Pi Camera to inference YOLOv8 models.
picam2.configure("preview")
picam2.start()
# Load the YOLOv8 model
model = YOLO("yolov8n.pt")
# Load the YOLO11 model
model = YOLO("yolo11n.pt")
while True:
# Capture frame-by-frame
frame = picam2.capture_array()
# Run YOLOv8 inference on the frame
# Run YOLO11 inference on the frame
results = model(frame)
# Visualize the results on the frame
@ -340,8 +300,8 @@ There are 2 methods of using the Raspberry Pi Camera to inference YOLOv8 models.
```python
from ultralytics import YOLO
# Load a YOLOv8n PyTorch model
model = YOLO("yolov8n.pt")
# Load a YOLO11n PyTorch model
model = YOLO("yolo11n.pt")
# Run inference
results = model("tcp://127.0.0.1:8888")
@ -350,7 +310,7 @@ There are 2 methods of using the Raspberry Pi Camera to inference YOLOv8 models.
=== "CLI"
```bash
yolo predict model=yolov8n.pt source="tcp://127.0.0.1:8888"
yolo predict model=yolo11n.pt source="tcp://127.0.0.1:8888"
```
!!! tip
@ -359,7 +319,7 @@ There are 2 methods of using the Raspberry Pi Camera to inference YOLOv8 models.
## Best Practices when using Raspberry Pi
There are a couple of best practices to follow in order to enable maximum performance on Raspberry Pis running YOLOv8.
There are a couple of best practices to follow in order to enable maximum performance on Raspberry Pis running YOLO11.
1. Use an SSD
@ -371,7 +331,7 @@ There are a couple of best practices to follow in order to enable maximum perfor
## Next Steps
Congratulations on successfully setting up YOLO on your Raspberry Pi! For further learning and support, visit [Ultralytics YOLOv8 Docs](../index.md) and [Kashmir World Foundation](https://www.kashmirworldfoundation.org/).
Congratulations on successfully setting up YOLO on your Raspberry Pi! For further learning and support, visit [Ultralytics YOLO11 Docs](../index.md) and [Kashmir World Foundation](https://www.kashmirworldfoundation.org/).
## Acknowledgements and Citations
@ -381,9 +341,9 @@ For more information about Kashmir World Foundation's activities, you can visit
## FAQ
### How do I set up Ultralytics YOLOv8 on a Raspberry Pi without using Docker?
### How do I set up Ultralytics YOLO11 on a Raspberry Pi without using Docker?
To set up Ultralytics YOLOv8 on a Raspberry Pi without Docker, follow these steps:
To set up Ultralytics YOLO11 on a Raspberry Pi without Docker, follow these steps:
1. Update the package list and install `pip`:
```bash
@ -402,13 +362,13 @@ To set up Ultralytics YOLOv8 on a Raspberry Pi without Docker, follow these step
For detailed instructions, refer to the [Start without Docker](#start-without-docker) section.
### Why should I use Ultralytics YOLOv8's NCNN format on Raspberry Pi for AI tasks?
### Why should I use Ultralytics YOLO11's NCNN format on Raspberry Pi for AI tasks?
Ultralytics YOLOv8's NCNN format is highly optimized for mobile and embedded platforms, making it ideal for running AI tasks on Raspberry Pi devices. NCNN maximizes inference performance by leveraging ARM architecture, providing faster and more efficient processing compared to other formats. For more details on supported export options, visit the [Ultralytics documentation page on deployment options](../modes/export.md).
Ultralytics YOLO11's NCNN format is highly optimized for mobile and embedded platforms, making it ideal for running AI tasks on Raspberry Pi devices. NCNN maximizes inference performance by leveraging ARM architecture, providing faster and more efficient processing compared to other formats. For more details on supported export options, visit the [Ultralytics documentation page on deployment options](../modes/export.md).
### How can I convert a YOLOv8 model to NCNN format for use on Raspberry Pi?
### How can I convert a YOLO11 model to NCNN format for use on Raspberry Pi?
You can convert a PyTorch YOLOv8 model to NCNN format using either Python or CLI commands:
You can convert a PyTorch YOLO11 model to NCNN format using either Python or CLI commands:
!!! example
@ -417,14 +377,14 @@ You can convert a PyTorch YOLOv8 model to NCNN format using either Python or CLI
```python
from ultralytics import YOLO
# Load a YOLOv8n PyTorch model
model = YOLO("yolov8n.pt")
# Load a YOLO11n PyTorch model
model = YOLO("yolo11n.pt")
# Export the model to NCNN format
model.export(format="ncnn") # creates 'yolov8n_ncnn_model'
model.export(format="ncnn") # creates 'yolo11n_ncnn_model'
# Load the exported NCNN model
ncnn_model = YOLO("yolov8n_ncnn_model")
ncnn_model = YOLO("yolo11n_ncnn_model")
# Run inference
results = ncnn_model("https://ultralytics.com/images/bus.jpg")
@ -433,16 +393,16 @@ You can convert a PyTorch YOLOv8 model to NCNN format using either Python or CLI
=== "CLI"
```bash
# Export a YOLOv8n PyTorch model to NCNN format
yolo export model=yolov8n.pt format=ncnn # creates 'yolov8n_ncnn_model'
# Export a YOLO11n PyTorch model to NCNN format
yolo export model=yolo11n.pt format=ncnn # creates 'yolo11n_ncnn_model'
# Run inference with the exported model
yolo predict model='yolov8n_ncnn_model' source='https://ultralytics.com/images/bus.jpg'
yolo predict model='yolo11n_ncnn_model' source='https://ultralytics.com/images/bus.jpg'
```
For more details, see the [Use NCNN on Raspberry Pi](#use-ncnn-on-raspberry-pi) section.
### What are the hardware differences between Raspberry Pi 4 and Raspberry Pi 5 relevant to running YOLOv8?
### What are the hardware differences between Raspberry Pi 4 and Raspberry Pi 5 relevant to running YOLO11?
Key differences include:
@ -450,11 +410,11 @@ Key differences include:
- **Max CPU Frequency**: Raspberry Pi 4 has a max frequency of 1.8GHz, whereas Raspberry Pi 5 reaches 2.4GHz.
- **Memory**: Raspberry Pi 4 offers up to 8GB of LPDDR4-3200 SDRAM, while Raspberry Pi 5 features LPDDR4X-4267 SDRAM, available in 4GB and 8GB variants.
These enhancements contribute to better performance benchmarks for YOLOv8 models on Raspberry Pi 5 compared to Raspberry Pi 4. Refer to the [Raspberry Pi Series Comparison](#raspberry-pi-series-comparison) table for more details.
These enhancements contribute to better performance benchmarks for YOLO11 models on Raspberry Pi 5 compared to Raspberry Pi 4. Refer to the [Raspberry Pi Series Comparison](#raspberry-pi-series-comparison) table for more details.
### How can I set up a Raspberry Pi Camera Module to work with Ultralytics YOLOv8?
### How can I set up a Raspberry Pi Camera Module to work with Ultralytics YOLO11?
There are two methods to set up a Raspberry Pi Camera for YOLOv8 inference:
There are two methods to set up a Raspberry Pi Camera for YOLO11 inference:
1. **Using `picamera2`**:
@ -471,7 +431,7 @@ There are two methods to set up a Raspberry Pi Camera for YOLOv8 inference:
picam2.configure("preview")
picam2.start()
model = YOLO("yolov8n.pt")
model = YOLO("yolo11n.pt")
while True:
frame = picam2.capture_array()
@ -494,7 +454,7 @@ There are two methods to set up a Raspberry Pi Camera for YOLOv8 inference:
```python
from ultralytics import YOLO
model = YOLO("yolov8n.pt")
model = YOLO("yolo11n.pt")
results = model("tcp://127.0.0.1:8888")
```

@ -106,6 +106,7 @@ Monitoring workouts through pose estimation with [Ultralytics YOLO11](https://gi
| `show` | `bool` | `False` | Flag to display the image. |
| `up_angle` | `float` | `145.0` | Angle threshold for the 'up' pose. |
| `down_angle` | `float` | `90.0` | Angle threshold for the 'down' pose. |
| `model` | `str` | `None` | Path to Ultralytics YOLO Pose Model File |
### Arguments `model.predict`

@ -5,46 +5,126 @@ keywords: Ultralytics, Contributor License Agreement, open source, contributions
# Ultralytics Individual Contributor License Agreement
Thank you for your interest in contributing to open source software projects (“Projects”) made available by Ultralytics Inc. (“Ultralytics”). This Individual Contributor License Agreement (“Agreement”) sets out the terms governing any source code, object code, bug fixes, configuration changes, tools, specifications, documentation, data, materials, feedback, information or other works of authorship that you submit or have submitted, in any form and in any manner, to Ultralytics in respect of any Projects (collectively “Contributions”). If you have any questions respecting this Agreement, please contact hello@ultralytics.com.
Thank you for your interest in contributing to software projects managed by Ultralytics Inc. ("**Ultralytics**", "**We**" or "**Us**"). This Contributor License Agreement ("**Agreement**") sets out the rights granted by contributors ("**You**" or "**Your**") to Us and the terms governing any contributions as defined in Section 1. This license is for your protection as a Contributor as well as the protection of Ultralytics; it does not change your rights to use your own Contributions for any other purpose.
You agree that the following terms apply to all of your past, present and future Contributions. Except for the licenses granted in this Agreement, you retain all of your right, title and interest in and to your Contributions.
By accepting and agreeing to these terms and conditions You accept and agree to the following terms and conditions for Your past, present and future Contributions submitted to Ultralytics. Except for the license granted herein to Ultralytics and recipients of software distributed by Ultralytics, You reserve all right, title, and interest in and to Your Contributions.
**Copyright License.** You hereby grant, and agree to grant, to Ultralytics a non-exclusive, perpetual, irrevocable, worldwide, fully-paid, royalty-free, transferable copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, and distribute your Contributions and such derivative works, with the right to sublicense the foregoing rights through multiple tiers of sublicensees.
If you have any questions respecting this Agreement, please contact hello@ultralytics.com.
**Patent License.** You hereby grant, and agree to grant, to Ultralytics a non-exclusive, perpetual, irrevocable, worldwide, fully-paid, royalty-free, transferable patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer your Contributions, where such license applies only to those patent claims licensable by you that are necessarily infringed by your Contributions alone or by combination of your Contributions with the Project to which such Contributions were submitted, with the right to sublicense the foregoing rights through multiple tiers of sublicensees.
## 1. Definitions
**Moral Rights.** To the fullest extent permitted under applicable law, you hereby waive, and agree not to assert, all of your “moral rights” in or relating to your Contributions for the benefit of Ultralytics, its assigns, and their respective direct and indirect sublicensees.
### 1.1 "You" or "Your"
**Third Party Content/Rights.** If your Contribution includes or is based on any source code, object code, bug fixes, configuration changes, tools, specifications, documentation, data, materials, feedback, information or other works of authorship that were not authored by you (“Third Party Content”) or if you are aware of any third party intellectual property or proprietary rights associated with your Contribution (“Third Party Rights”), then you agree to include with the submission of your Contribution full details respecting such Third Party Content and Third Party Rights, including, without limitation, identification of which aspects of your Contribution contain Third Party Content or are associated with Third Party Rights, the owner/author of the Third Party Content and Third Party Rights, where you obtained the Third Party Content, and any applicable third party license terms or restrictions respecting the Third Party Content and Third Party Rights. For greater certainty, the foregoing obligations respecting the identification of Third Party Content and Third Party Rights do not apply to any portion of a Project that is incorporated into your Contribution to that same Project.
Shall mean the individual who submits a Contribution to Ultralytics or the legal entity authorized by the copyright owner that is making this Agreement with Ultralytics. For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
**Representations.** You represent that, other than the Third Party Content and Third Party Rights identified by you in accordance with this Agreement, you are the sole author of your Contributions and are legally entitled to grant the foregoing licenses and waivers in respect of your Contributions. If your Contributions were created in the course of your employment with your past or present employer(s), you represent that such employer(s) has authorized you to make your Contributions on behalf of such employer(s) or such employer(s) has waived all of their right, title or interest in or to your Contributions.
### 1.2 "Contribution"
**Disclaimer.** To the fullest extent permitted under applicable law, your Contributions are provided on an "asis" basis, without any warranties or conditions, express or implied, including, without limitation, any implied warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not required to provide support for your Contributions, except to the extent you desire to provide support.
Shall mean any original work of authorship, including but not limited to source code, object code, bug fixes, configuration changes, tools, specifications, documentation, data, materials, feedback, information, or any other works of authorship, that is intentionally submitted by You to Ultralytics, in any form and in any manner, for inclusion in, or documentation of, any of the projects managed by Ultralytics (the "**Work**"). This includes any modifications or additions to existing works that are submitted for the purpose of contributing to a Project and improving the Work.
**No Obligation.** You acknowledge that Ultralytics is under no obligation to use or incorporate your Contributions into any of the Projects. The decision to use or incorporate your Contributions into any of the Projects will be made at the sole discretion of Ultralytics or its authorized delegates.
### 1.3 "Copyright"
**Disputes.** This Agreement shall be governed by and construed in accordance with the laws of the State of New York, United States of America, without giving effect to its principles or rules regarding conflicts of laws, other than such principles directing application of New York law. The parties hereby submit to venue in, and jurisdiction of the courts located in New York, New York for purposes relating to this Agreement. In the event that any of the provisions of this Agreement shall be held by a court or other tribunal of competent jurisdiction to be unenforceable, the remaining portions hereof shall remain in full force and effect.
Means all rights protecting works of authorship owned or controlled by You, including copyright, moral and neighboring rights, as appropriate, for the full term of their existence including any extensions by You.
**Assignment.** You agree that Ultralytics may assign this Agreement, and all of its rights, obligations and licenses hereunder.
### 1.4 "Submit" or "Submission" or "Submitted"
Or any derivatives shall mean any form of electronic, verbal, or written communication sent to Ultralytics or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, Ultralytics for the purpose of discussing and improving the Project, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution."
### 1.5 "Project"
Shall mean any of the software projects owned, managed, or maintained by Ultralytics, including but not limited to open-source projects made available by Ultralytics to which Contributions may be submitted.
## 2. Grant of Rights
### 2.1 Copyright License
To the maximum extent permitted by the relevant law, and subject to the terms and conditions of this Agreement, You hereby grant to Ultralytics and to recipients of software distributed by Ultralytics a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Your Contributions and such derivative works.
### 2.2 Patent License
To the maximum extent permitted by the relevant law, and subject to the terms and conditions of this Agreement, You hereby grant to Ultralytics and to recipients of software distributed by Ultralytics a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for that Contribution or Work shall terminate as of the date such litigation is filed.
### 2.3 Outbound License
Based on the grant of rights in Sections 2.1 and 2.2, if We include Your Contribution in a Material, We may license the Contribution under any license, including copyleft, permissive, commercial, or proprietary licenses.
### 2.4 Moral Rights
To the fullest extent permitted by law, You hereby waive, and agree not to assert, all of Your "moral rights" in or relating to Your Contributions for the benefit of Ultralytics, its assigns, and their respective direct and indirect sublicensees.
## 3. Representations and Warranties
You represent that:
(a) You have the legal authority to enter into this Agreement.
(b) You own the Copyright and patent claims covering the Contribution which are required to grant the rights under Section 2.
(c) The grant of rights under Section 2 does not violate any grant of rights which You have made to third parties, including Your employer. If Your Contributions were created in the course of Your employment with Your past or present employer(s), You represent that such employer(s) has authorized You to make Contributions on behalf of such employer(s) or such employer(s) has waived all of their right, title, or interest in or to Your Contributions.
(d) You have followed the instructions provided by Ultralytics if You do not own the Copyright in the entire work of authorship submitted.
(e) Should You wish to submit work that is not Your original creation, You may submit it to Ultralytics separately from any Contribution, identifying the complete details of its source and of any license or other restriction (including, but not limited to, related patents, trademarks, and license agreements) of which You are personally aware, and conspicuously marking the work as "Submitted on behalf of a third-party: [named here]."
(f) You agree to notify Ultralytics of any facts or circumstances of which You become aware that would make these representations inaccurate in any respect.
## 4. Disclaimer of Warranties
EXCEPT FOR THE EXPRESS WARRANTIES IN SECTION 3, THE CONTRIBUTION IS PROVIDED "AS IS". MORE PARTICULARLY, ALL EXPRESS OR IMPLIED WARRANTIES INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT ARE EXPRESSLY DISCLAIMED BY YOU TO US. TO THE EXTENT THAT ANY SUCH WARRANTIES CANNOT BE DISCLAIMED, SUCH WARRANTY IS LIMITED IN DURATION TO THE MINIMUM PERIOD PERMITTED BY LAW.
## 5. Miscellaneous
### 5.1 Governing Law and Jurisdiction
This Agreement will be governed by and construed in accordance with the laws of the State of New York, United States of America, excluding its conflicts of law provisions. The parties submit to venue in, and jurisdiction of, the courts located in New York, New York, for purposes relating to this Agreement. You waive all defenses of lack of personal jurisdiction and forum non-conveniens.
### 5.2 Entire Agreement
This Agreement sets out the entire agreement between You and Ultralytics for Your Contributions and overrides all other agreements or understandings.
### 5.3 Assignment
Ultralytics may assign this Agreement, and all of its rights, obligations, and licenses hereunder, without Your prior consent.
### 5.4 Waiver of Performance
The failure of either party to require performance by the other party of any provision of this Agreement in one situation shall not affect the right of a party to require such performance at any time in the future. A waiver of performance under a provision in one situation shall not be considered a waiver of the performance of the provision in the future or a waiver of the provision in its entirety.
### 5.5 Severability
If any provision of this Agreement is found void and unenforceable, such provision will be replaced to the extent possible with a provision that comes closest to the meaning of the original provision and which is enforceable. The terms and conditions set forth in this Agreement shall apply notwithstanding any failure of essential purpose of this Agreement or any limited remedy to the maximum extent possible under law.
### 5.6 No Obligation
You acknowledge that Ultralytics is under no obligation to use or incorporate your Contributions into any of the Work. The decision to use or incorporate your Contributions into any of the Work will be made at the sole discretion of Ultralytics or its authorized delegates.
### 5.7 Effective Date
The Effective Date of this Agreement shall be the date You execute this Agreement or the date You first Submit a Contribution to Ultralytics, whichever is earlier.
## FAQ
### What is the purpose of the Ultralytics Individual Contributor License Agreement?
### What is the purpose of the Ultralytics Contributor License Agreement (CLA)?
The Ultralytics Individual Contributor License Agreement (ICLA) governs the terms under which you contribute to Ultralytics' open-source projects. It sets out the rights and obligations related to your contributions, including granting copyright and patent licenses, waiving moral rights, and disclosing any third-party content.
The Ultralytics CLA defines the terms under which you contribute to Ultralytics' software projects. It outlines the rights and obligations related to your contributions, including granting copyright and patent licenses, and addressing the handling of third-party content.
### Why do I need to agree to the Copyright License in the ICLA?
### Why do I need to agree to the Copyright License in the CLA?
Agreeing to the Copyright License allows Ultralytics to use and distribute your contributions, including making derivative works. This ensures that your contributions can be integrated into Ultralytics projects and shared with the community, fostering collaboration and software development.
Agreeing to the Copyright License allows Ultralytics and its users to use, modify, distribute, and create derivative works from your contributions. This ensures that your contributions can be integrated into Ultralytics projects and shared with the community, fostering collaboration and software development.
### How does the Patent License benefit both contributors and Ultralytics?
The Patent License grants Ultralytics the rights to use, make, and sell contributions covered by your patents, which is crucial for product development and commercialization. In return, it allows your patented innovations to be more widely used and recognized, promoting innovation within the community.
The Patent License grants Ultralytics the rights to use, make, and sell contributions covered by your patents. This is essential for product development and commercialization. In return, your patented innovations gain wider use and recognition, promoting innovation within the community.
### What should I do if my contribution includes third-party content?
If your contribution includes third-party content, you must clearly mark it and provide comprehensive details about its source and any applicable licenses or restrictions. This ensures proper attribution and legal compliance within Ultralytics projects, maintaining transparency and respecting the rights of original content creators.
### What should I do if my contribution contains third-party content?
### What happens if Ultralytics decides not to use my contribution?
If your contribution includes third-party content or you are aware of any third-party intellectual property rights, you must provide full details of such content and rights when submitting your contribution. This includes identifying the third-party content, its author, and the applicable license terms. For more information on third-party content, refer to the Third Party Content/Rights section of the Agreement.
Ultralytics is not obligated to use or incorporate your contributions into any projects. The decision to use your contributions is entirely at Ultralytics' discretion, meaning that while your contributions are valuable, they may not always align with the project's current needs or directions.
---
### What happens if Ultralytics does not use my contributions?
**Need More Help?**
Ultralytics is not obligated to use or incorporate your contributions into any projects. The decision to use or integrate contributions is at Ultralytics' sole discretion. This means that while your contributions are valuable, they may not always align with the project's current needs or directions. For further details, see the No Obligation section.
If you have any further questions or need clarification regarding the Contributor License Agreement, please contact us at hello@ultralytics.com.

@ -54,11 +54,69 @@ Explore the Ultralytics Docs, a comprehensive resource designed to help you unde
## Where to Start
- **Install** `ultralytics` with pip and get up and running in minutes &nbsp; [:material-clock-fast: Get Started](quickstart.md){ .md-button }
- **Predict** new images and videos with YOLO &nbsp; [:octicons-image-16: Predict on Images](modes/predict.md){ .md-button }
- **Train** a new YOLO model on your own custom dataset &nbsp; [:fontawesome-solid-brain: Train a Model](modes/train.md){ .md-button }
- **Tasks** YOLO tasks like segment, classify, pose and track &nbsp; [:material-magnify-expand: Explore Tasks](tasks/index.md){ .md-button }
- **[YOLO11](models/yolo11.md) 🚀 NEW**: Ultralytics' latest SOTA models &nbsp; [:material-magnify-expand: Explore new YOLO11 models](models/yolo11.md){ .md-button }
<div class="grid cards" markdown>
- :material-clock-fast:{ .lg .middle } &nbsp; **Getting Started**
***
Install `ultralytics` with pip and get up and running in minutes to train a YOLO model
***
[:octicons-arrow-right-24: Quickstart](quickstart.md)
- :material-image:{ .lg .middle } &nbsp; **Predict**
***
Predict on new images, videos and streams with YOLO <br /> &nbsp;
***
[:octicons-arrow-right-24: Learn more](modes/predict.md)
- :fontawesome-solid-brain:{ .lg .middle } &nbsp; **Train a Model**
***
Train a new YOLO model on your own custom dataset from scratch or load and train on a pretrained model
***
[:octicons-arrow-right-24: Learn more](modes/train.md)
- :material-magnify-expand:{ .lg .middle } &nbsp; **Explore Tasks**
***
Discover YOLO tasks like detect, segment, classify, pose, OBB and track <br /> &nbsp;
***
[:octicons-arrow-right-24: Explore Tasks](tasks/index.md)
- :rocket:{ .lg .middle } &nbsp; **Explore YOLO11 NEW**
***
Discover Ultralytics' latest state-of-the-art YOLO11 models and their capabilities <br /> &nbsp;
***
[:octicons-arrow-right-24: YOLO11 Models 🚀 NEW](models/yolo11.md)
- :material-scale-balance:{ .lg .middle } &nbsp; **Open Source, AGPL-3.0**
***
Ultralytics offers two licensing options for YOLO: AGPL-3.0 License and Enterprise License. Ultralytics is available on [GitHub](https://github.com/ultralytics/ultralytics)
***
[:octicons-arrow-right-24: License](https://www.ultralytics.com/license)
</div>
<p align="center">
<br>
@ -105,7 +163,7 @@ Ultralytics YOLO is the latest advancement in the acclaimed YOLO (You Only Look
Getting started with YOLO is quick and straightforward. You can install the Ultralytics package using [pip](https://pypi.org/project/ultralytics/) and get up and running in minutes. Here's a basic installation command:
!!! example
!!! example "Installation using pip"
=== "CLI"
@ -121,11 +179,11 @@ Training a custom YOLO model on your dataset involves a few detailed steps:
1. Prepare your annotated dataset.
2. Configure the training parameters in a YAML file.
3. Use the `yolo train` command to start training.
3. Use the `yolo TASK train` command to start training. (Each `TASK` has its own argument)
Here's example code:
Here's example code for the Object Detection Task:
!!! example
!!! example "Train Example for Object Detection Task"
=== "Python"
@ -143,7 +201,7 @@ Here's example code:
```bash
# Train a YOLO model from the command line
yolo train data=path/to/dataset.yaml epochs=100 imgsz=640
yolo detect train data=path/to/dataset.yaml epochs=100 imgsz=640
```
For a detailed walkthrough, check out our [Train a Model](modes/train.md) guide, which includes examples and tips for optimizing your training process.
@ -161,7 +219,7 @@ For more details, visit our [Licensing](https://www.ultralytics.com/license) pag
Ultralytics YOLO supports efficient and customizable multi-object tracking. To utilize tracking capabilities, you can use the `yolo track` command as shown below:
!!! example
!!! example "Example for Object Tracking on a Video"
=== "Python"

@ -50,17 +50,21 @@ After installing the required packages, you'll need to sign up, get a [Comet API
Then, you can initialize your Comet project. Comet will automatically detect the API key and proceed with the setup.
```python
import comet_ml
!!! example "Initialize Comet project"
comet_ml.login(project_name="comet-example-yolov8-coco128")
```
=== "Python"
```python
import comet_ml
comet_ml.login(project_name="comet-example-yolo11-coco128")
```
If you are using a Google Colab notebook, the code above will prompt you to enter your API key for initialization.
## Usage
Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements.
Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/yolo11.md). This will help you choose the most appropriate model for your project requirements.
!!! example "Usage"
@ -75,7 +79,7 @@ Before diving into the usage instructions, be sure to check out the range of [YO
# Train the model
results = model.train(
data="coco8.yaml",
project="comet-example-yolov8-coco128",
project="comet-example-yolo11-coco128",
batch=32,
save_period=1,
save_json=True,
@ -200,7 +204,7 @@ To integrate Comet ML with Ultralytics YOLO11, follow these steps:
```python
import comet_ml
comet_ml.login(project_name="comet-example-yolov8-coco128")
comet_ml.login(project_name="comet-example-yolo11-coco128")
```
4. **Train your YOLO11 model and log metrics**:
@ -211,7 +215,7 @@ To integrate Comet ML with Ultralytics YOLO11, follow these steps:
model = YOLO("yolo11n.pt")
results = model.train(
data="coco8.yaml",
project="comet-example-yolov8-coco128",
project="comet-example-yolo11-coco128",
batch=32,
save_period=1,
save_json=True,

@ -210,7 +210,7 @@ These features help in tracking experiments, optimizing models, and collaboratin
After running your training script with W&B integration:
1. A link to your W&B dashboard will be provided in the console output.
2. Click on the link or go to [wandb.ai](https://wandb.ai) and log in to your account.
2. Click on the link or go to [wandb.ai](https://wandb.ai/) and log in to your account.
3. Navigate to your project to view detailed metrics, visualizations, and model performance data.
The dashboard offers insights into your model's training process, allowing you to analyze and improve your YOLO11 models effectively.

@ -142,11 +142,20 @@ SAM 2 can be utilized across a broad spectrum of tasks, including real-time vide
# Display model information (optional)
model.info()
# Segment with bounding box prompt
# Run inference with bboxes prompt
results = model("path/to/image.jpg", bboxes=[100, 100, 200, 200])
# Segment with point prompt
results = model("path/to/image.jpg", points=[150, 150], labels=[1])
# Run inference with single point
results = model(points=[900, 370], labels=[1])
# Run inference with multiple points
results = model(points=[[400, 370], [900, 370]], labels=[1, 1])
# Run inference with multiple points prompt per object
results = model(points=[[[400, 370], [900, 370]]], labels=[[1, 1]])
# Run inference with negative points prompt
results = model(points=[[[400, 370], [900, 370]]], labels=[[1, 0]])
```
#### Segment Everything

@ -59,16 +59,16 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
results = model("ultralytics/assets/zidane.jpg", bboxes=[439, 437, 524, 709])
# Run inference with single point
results = predictor(points=[900, 370], labels=[1])
results = model(points=[900, 370], labels=[1])
# Run inference with multiple points
results = predictor(points=[[400, 370], [900, 370]], labels=[1, 1])
results = model(points=[[400, 370], [900, 370]], labels=[1, 1])
# Run inference with multiple points prompt per object
results = predictor(points=[[[400, 370], [900, 370]]], labels=[[1, 1]])
results = model(points=[[[400, 370], [900, 370]]], labels=[[1, 1]])
# Run inference with negative points prompt
results = predictor(points=[[[400, 370], [900, 370]]], labels=[[1, 0]])
results = model(points=[[[400, 370], [900, 370]]], labels=[[1, 0]])
```
!!! example "Segment everything"

@ -188,38 +188,48 @@ class Yolov8TFLite:
Returns:
numpy.ndarray: The input image with detections drawn on it.
"""
# Transpose predictions outside the loop
output = [np.transpose(pred) for pred in output]
boxes = []
scores = []
class_ids = []
# Vectorize extraction of bounding boxes, scores, and class IDs
for pred in output:
pred = np.transpose(pred)
for box in pred:
x, y, w, h = box[:4]
x1 = x - w / 2
y1 = y - h / 2
boxes.append([x1, y1, w, h])
idx = np.argmax(box[4:])
scores.append(box[idx + 4])
class_ids.append(idx)
x, y, w, h = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3]
x1 = x - w / 2
y1 = y - h / 2
boxes.extend(np.column_stack([x1, y1, w, h]))
# Argmax and score extraction for all predictions at once
idx = np.argmax(pred[:, 4:], axis=1)
scores.extend(pred[np.arange(pred.shape[0]), idx + 4])
class_ids.extend(idx)
# Precompute gain and pad once
img_height, img_width = input_image.shape[:2]
gain = min(img_width / self.img_width, img_height / self.img_height)
pad = (
round((img_width - self.img_width * gain) / 2 - 0.1),
round((img_height - self.img_height * gain) / 2 - 0.1),
)
# Non-Maximum Suppression (NMS) in one go
indices = cv2.dnn.NMSBoxes(boxes, scores, self.confidence_thres, self.iou_thres)
for i in indices:
# Get the box, score, and class ID corresponding to the index
# Process selected indices
for i in indices.flatten():
box = boxes[i]
gain = min(img_width / self.img_width, img_height / self.img_height)
pad = (
round((img_width - self.img_width * gain) / 2 - 0.1),
round((img_height - self.img_height * gain) / 2 - 0.1),
)
box[0] = (box[0] - pad[0]) / gain
box[1] = (box[1] - pad[1]) / gain
box[2] = box[2] / gain
box[3] = box[3] / gain
score = scores[i]
class_id = class_ids[i]
if score > 0.25:
print(box, score, class_id)
# Draw the detection on the input image
self.draw_detections(input_image, box, score, class_id)

@ -582,7 +582,7 @@
"from ultralytics import YOLO\n",
"\n",
"model = YOLO('yolo11n-obb.pt') # load a pretrained YOLO OBB model\n",
"model.train(data='coco8-dota.yaml', epochs=3) # train the model\n",
"model.train(data='dota8.yaml', epochs=3) # train the model\n",
"model('https://ultralytics.com/images/bus.jpg') # predict on an image"
],
"metadata": {

@ -162,7 +162,7 @@ nav:
- solutions/index.md
- Guides:
- guides/index.md
- Live Inference 🚀 NEW: guides/streamlit-live-inference.md # for promotion of new pages
- YOLO11 🚀 NEW: models/yolo11.md # for promotion of new pages
- Languages:
- 🇬🇧&nbsp English: https://ultralytics.com/docs/
- 🇨🇳&nbsp 简体中文: https://docs.ultralytics.com/zh/

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.3.12"
__version__ = "8.3.13"
import os

@ -639,7 +639,7 @@ def smart_value(v):
else:
try:
return eval(v)
except: # noqa E722
except Exception:
return v

@ -65,7 +65,7 @@ def exif_size(img: Image.Image):
rotation = exif.get(274, None) # the EXIF key for the orientation tag is 274
if rotation in {6, 8}: # rotation 270 or 90
s = s[1], s[0]
except: # noqa E722
except Exception:
pass
return s

@ -965,7 +965,7 @@ class Exporter:
f'--out_dir "{Path(f).parent}" '
"--show_operations "
"--search_delegate "
"--delegate_search_step 3 "
"--delegate_search_step 30 "
"--timeout_sec 180 "
f'"{tflite_model}"'
)

@ -235,7 +235,42 @@ class Predictor(BasePredictor):
"""
features = self.get_im_features(im) if self.features is None else self.features
src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]
bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
points = (points, labels) if points is not None else None
# Embed prompts
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
# Predict masks
pred_masks, pred_scores = self.model.mask_decoder(
image_embeddings=features,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
# (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
# `d` could be 1 or 3 depends on `multimask_output`.
return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
"""
Prepares and transforms the input prompts for processing based on the destination shape.
Args:
dst_shape (tuple): The target shape (height, width) for the prompts.
bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
Raises:
AssertionError: If the number of points don't match the number of labels, in case labels were passed.
Returns:
(tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
"""
src_shape = self.batch[1][0].shape[:2]
r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
# Transform input prompts
if points is not None:
@ -258,23 +293,7 @@ class Predictor(BasePredictor):
bboxes *= r
if masks is not None:
masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
points = (points, labels) if points is not None else None
# Embed prompts
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
# Predict masks
pred_masks, pred_scores = self.model.mask_decoder(
image_embeddings=features,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
# (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
# `d` could be 1 or 3 depends on `multimask_output`.
return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
return bboxes, points, labels, masks
def generate(
self,
@ -693,34 +712,7 @@ class SAM2Predictor(Predictor):
"""
features = self.get_im_features(im) if self.features is None else self.features
src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]
r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
# Transform input prompts
if points is not None:
points = torch.as_tensor(points, dtype=torch.float32, device=self.device)
points = points[None] if points.ndim == 1 else points
# Assuming labels are all positive if users don't pass labels.
if labels is None:
labels = torch.ones(points.shape[0])
labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
points *= r
# (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
points, labels = points[:, None], labels[:, None]
if bboxes is not None:
bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
bboxes = bboxes.view(-1, 2, 2) * r
bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
# NOTE: merge "boxes" and "points" into a single "points" input
# (where boxes are added at the beginning) to model.sam_prompt_encoder
if points is not None:
points = torch.cat([bboxes, points], dim=1)
labels = torch.cat([bbox_labels, labels], dim=1)
else:
points, labels = bboxes, bbox_labels
if masks is not None:
masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
points = (points, labels) if points is not None else None
sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
@ -744,6 +736,36 @@ class SAM2Predictor(Predictor):
# `d` could be 1 or 3 depends on `multimask_output`.
return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
"""
Prepares and transforms the input prompts for processing based on the destination shape.
Args:
dst_shape (tuple): The target shape (height, width) for the prompts.
bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
Raises:
AssertionError: If the number of points don't match the number of labels, in case labels were passed.
Returns:
(tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
"""
bboxes, points, labels, masks = super()._prepare_prompts(dst_shape, bboxes, points, labels, masks)
if bboxes is not None:
bboxes = bboxes.view(-1, 2, 2)
bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
# NOTE: merge "boxes" and "points" into a single "points" input
# (where boxes are added at the beginning) to model.sam_prompt_encoder
if points is not None:
points = torch.cat([bboxes, points], dim=1)
labels = torch.cat([bbox_labels, labels], dim=1)
else:
points, labels = bboxes, bbox_labels
return bboxes, points, labels, masks
def set_image(self, image):
"""
Preprocesses and sets a single image for inference using the SAM2 model.

@ -8,7 +8,7 @@ from ultralytics.data import ClassificationDataset, build_dataloader
from ultralytics.engine.trainer import BaseTrainer
from ultralytics.models import yolo
from ultralytics.nn.tasks import ClassificationModel
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK, colorstr
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
from ultralytics.utils.plotting import plot_images, plot_results
from ultralytics.utils.torch_utils import is_parallel, strip_optimizer, torch_distributed_zero_first
@ -141,7 +141,6 @@ class ClassificationTrainer(BaseTrainer):
self.metrics = self.validator(model=f)
self.metrics.pop("fitness", None)
self.run_callbacks("on_fit_epoch_end")
LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
def plot_training_samples(self, batch, ni):
"""Plots training samples with their annotations."""

@ -46,7 +46,7 @@ def default_class_names(data=None):
if data:
try:
return yaml_load(check_yaml(data))["names"]
except: # noqa E722
except Exception:
pass
return {i: f"class{i}" for i in range(999)} # return default if above errors

@ -963,7 +963,6 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
args[j] = locals()[a] if a in locals() else ast.literal_eval(a)
except ValueError:
pass
n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain
if m in {
Classify,
@ -1102,7 +1101,7 @@ def guess_model_scale(model_path):
(str): The size character of the model's scale, which can be n, s, m, l, or x.
"""
try:
return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # n, s, m, l, or x
return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # noqa, returns n, s, m, l, or x
except AttributeError:
return ""
@ -1139,7 +1138,7 @@ def guess_model_task(model):
if isinstance(model, dict):
try:
return cfg2task(model)
except: # noqa E722
except Exception:
pass
# Guess from PyTorch model
@ -1147,12 +1146,12 @@ def guess_model_task(model):
for x in "model.args", "model.model.args", "model.model.model.args":
try:
return eval(x)["task"]
except: # noqa E722
except Exception:
pass
for x in "model.yaml", "model.model.yaml", "model.model.model.yaml":
try:
return cfg2task(eval(x))
except: # noqa E722
except Exception:
pass
for m in model.modules():

@ -61,11 +61,11 @@ class Analytics(BaseSolution):
self.extract_tracks(im0) # Extract tracks
if self.type == "line":
for box in self.boxes:
for _ in self.boxes:
self.total_counts += 1
im0 = self.update_graph(frame_number=frame_number)
self.total_counts = 0
elif self.type == "pie" or self.type == "bar" or self.type == "area":
elif self.type in {"pie", "bar", "area"}:
self.clswise_count = {}
for box, cls in zip(self.boxes, self.clss):
if self.names[int(cls)] in self.clswise_count:

@ -4,55 +4,21 @@ import math
import cv2
from ultralytics.utils.checks import check_imshow
from ultralytics.solutions.solutions import BaseSolution # Import a parent class
from ultralytics.utils.plotting import Annotator, colors
class DistanceCalculation:
class DistanceCalculation(BaseSolution):
"""A class to calculate distance between two objects in a real-time video stream based on their tracks."""
def __init__(
self,
names,
view_img=False,
line_thickness=2,
line_color=(255, 0, 255),
centroid_color=(104, 31, 17),
):
"""
Initializes the DistanceCalculation class with the given parameters.
Args:
names (dict): Dictionary of classes names.
view_img (bool, optional): Flag to indicate if the video stream should be displayed. Defaults to False.
line_thickness (int, optional): Thickness of the lines drawn on the image. Defaults to 2.
line_color (tuple, optional): Color of the lines drawn on the image (BGR format). Defaults to (255, 255, 0).
centroid_color (tuple, optional): Color of the centroids drawn (BGR format). Defaults to (255, 0, 255).
"""
# Visual & image information
self.im0 = None
self.annotator = None
self.view_img = view_img
self.line_color = line_color
self.centroid_color = centroid_color
# Prediction & tracking information
self.names = names
self.boxes = None
self.line_thickness = line_thickness
self.trk_ids = None
# Distance calculation information
self.centroids = []
def __init__(self, **kwargs):
"""Initializes the DistanceCalculation class with the given parameters."""
super().__init__(**kwargs)
# Mouse event information
self.left_mouse_count = 0
self.selected_boxes = {}
# Check if environment supports imshow
self.env_check = check_imshow(warn=True)
self.window_name = "Ultralytics Solutions"
def mouse_event_for_distance(self, event, x, y, flags, param):
"""
Handles mouse events to select regions in a real-time video stream.
@ -67,7 +33,7 @@ class DistanceCalculation:
if event == cv2.EVENT_LBUTTONDOWN:
self.left_mouse_count += 1
if self.left_mouse_count <= 2:
for box, track_id in zip(self.boxes, self.trk_ids):
for box, track_id in zip(self.boxes, self.track_ids):
if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
self.selected_boxes[track_id] = box
@ -75,30 +41,21 @@ class DistanceCalculation:
self.selected_boxes = {}
self.left_mouse_count = 0
def start_process(self, im0, tracks):
def calculate(self, im0):
"""
Processes the video frame and calculates the distance between two bounding boxes.
Args:
im0 (ndarray): The image frame.
tracks (list): List of tracks obtained from the object tracking process.
Returns:
(ndarray): The processed image frame.
"""
self.im0 = im0
if tracks[0].boxes.id is None:
if self.view_img:
self.display_frames()
return im0
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
self.extract_tracks(im0) # Extract tracks
self.boxes = tracks[0].boxes.xyxy.cpu()
clss = tracks[0].boxes.cls.cpu().tolist()
self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
self.annotator = Annotator(self.im0, line_width=self.line_thickness)
for box, cls, track_id in zip(self.boxes, clss, self.trk_ids):
# Iterate over bounding boxes, track ids and classes index
for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
if len(self.selected_boxes) == 2:
@ -115,25 +72,11 @@ class DistanceCalculation:
pixels_distance = math.sqrt(
(self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
)
self.annotator.plot_distance_and_line(pixels_distance, self.centroids, self.line_color, self.centroid_color)
self.annotator.plot_distance_and_line(pixels_distance, self.centroids)
self.centroids = []
if self.view_img and self.env_check:
self.display_frames()
return im0
def display_frames(self):
"""Displays the current frame with annotations."""
cv2.namedWindow(self.window_name)
cv2.setMouseCallback(self.window_name, self.mouse_event_for_distance)
cv2.imshow(self.window_name, self.im0)
if cv2.waitKey(1) & 0xFF == ord("q"):
return
self.display_output(im0) # display output with base class function
cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
if __name__ == "__main__":
names = {0: "person", 1: "car"} # example class names
distance_calculation = DistanceCalculation(names)
return im0 # return output image for more usage

@ -52,7 +52,8 @@ class Heatmap(ObjectCounter):
Returns:
im0 (ndarray): Processed image for further usage
"""
self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99 if not self.initialized else self.heatmap
if not self.initialized:
self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99
self.initialized = True # Initialize heatmap only once
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator

@ -112,13 +112,13 @@ class ObjectCounter(BaseSolution):
# Iterate over bounding boxes, track ids and classes index
for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
# Draw bounding box and counting region
self.annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
self.store_tracking_history(track_id, box) # Store track history
self.store_classwise_counts(cls) # store classwise counts in dict
# Draw tracks of objects
self.annotator.draw_centroid_and_tracks(
self.track_line, color=colors(int(track_id), True), track_thickness=self.line_width
self.track_line, color=colors(int(cls), True), track_thickness=self.line_width
)
# store previous position of track for object counting

@ -526,7 +526,7 @@ def read_device_model() -> str:
try:
with open("/proc/device-tree/model") as f:
return f.read()
except: # noqa E722
except Exception:
return ""
@ -584,7 +584,7 @@ def is_docker() -> bool:
try:
with open("/proc/self/cgroup") as f:
return "docker" in f.read()
except: # noqa E722
except Exception:
return False
@ -623,7 +623,7 @@ def is_online() -> bool:
for dns in ("1.1.1.1", "8.8.8.8"): # check Cloudflare and Google DNS
socket.create_connection(address=(dns, 80), timeout=2.0).close()
return True
except: # noqa E722
except Exception:
return False

@ -50,7 +50,7 @@ def _log_tensorboard_graph(trainer):
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
return
except: # noqa E722
except Exception:
# Fallback to TorchScript export steps (RTDETR)
try:
model = deepcopy(de_parallel(trainer.model))

@ -277,7 +277,7 @@ def check_latest_pypi_version(package_name="ultralytics"):
response = requests.get(f"https://pypi.org/pypi/{package_name}/json", timeout=3)
if response.status_code == 200:
return response.json()["info"]["version"]
except: # noqa E722
except Exception:
return None
@ -299,7 +299,7 @@ def check_pip_update_available():
f"Update with 'pip install -U ultralytics'"
)
return True
except: # noqa E722
except Exception:
pass
return False
@ -715,7 +715,7 @@ def git_describe(path=ROOT): # path must be a directory
"""Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe."""
try:
return subprocess.check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1]
except: # noqa E722
except Exception:
return ""

@ -60,7 +60,7 @@ def is_url(url, check=False):
with request.urlopen(url) as response:
return response.getcode() == 200 # check if exists online
return True
except: # noqa E722
except Exception:
return False

@ -598,7 +598,7 @@ def ap_per_class(
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
if plot and j == 0:
if j == 0:
prec_values.append(np.interp(x, mrec, mpre)) # precision at mAP@0.5
prec_values = np.array(prec_values) # (nc, 1000)

@ -804,31 +804,30 @@ class Annotator:
self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
)
def plot_distance_and_line(self, pixels_distance, centroids, line_color, centroid_color):
def plot_distance_and_line(
self, pixels_distance, centroids, line_color=(104, 31, 17), centroid_color=(255, 0, 255)
):
"""
Plot the distance and line on frame.
Args:
pixels_distance (float): Pixels distance between two bbox centroids.
centroids (list): Bounding box centroids data.
line_color (tuple): RGB distance line color.
centroid_color (tuple): RGB bounding box centroid color.
line_color (tuple, optional): Distance line color.
centroid_color (tuple, optional): Bounding box centroid color.
"""
# Get the text size
(text_width_m, text_height_m), _ = cv2.getTextSize(
f"Pixels Distance: {pixels_distance:.2f}", 0, self.sf, self.tf
)
text = f"Pixels Distance: {pixels_distance:.2f}"
(text_width_m, text_height_m), _ = cv2.getTextSize(text, 0, self.sf, self.tf)
# Define corners with 10-pixel margin and draw rectangle
top_left = (15, 25)
bottom_right = (15 + text_width_m + 20, 25 + text_height_m + 20)
cv2.rectangle(self.im, top_left, bottom_right, centroid_color, -1)
cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 20, 25 + text_height_m + 20), line_color, -1)
# Calculate the position for the text with a 10-pixel margin and draw text
text_position = (top_left[0] + 10, top_left[1] + text_height_m + 10)
text_position = (25, 25 + text_height_m + 10)
cv2.putText(
self.im,
f"Pixels Distance: {pixels_distance:.2f}",
text,
text_position,
0,
self.sf,
@ -1118,7 +1117,7 @@ def plot_images(
im[y : y + h, x : x + w, :][mask] = (
im[y : y + h, x : x + w, :][mask] * 0.4 + np.array(color) * 0.6
)
except: # noqa E722
except Exception:
pass
annotator.fromarray(im)
if not save:
@ -1156,16 +1155,16 @@ def plot_results(file="path/to/results.csv", dir="", segment=False, pose=False,
save_dir = Path(file).parent if file else Path(dir)
if classify:
fig, ax = plt.subplots(2, 2, figsize=(6, 6), tight_layout=True)
index = [1, 4, 2, 3]
index = [2, 5, 3, 4]
elif segment:
fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)
index = [1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]
index = [2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 8, 9, 12, 13]
elif pose:
fig, ax = plt.subplots(2, 9, figsize=(21, 6), tight_layout=True)
index = [1, 2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 18, 8, 9, 12, 13]
index = [2, 3, 4, 5, 6, 7, 8, 11, 12, 15, 16, 17, 18, 19, 9, 10, 13, 14]
else:
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
index = [1, 2, 3, 4, 5, 8, 9, 10, 6, 7]
index = [2, 3, 4, 5, 6, 9, 10, 11, 7, 8]
ax = ax.ravel()
files = list(save_dir.glob("results*.csv"))
assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."

@ -119,7 +119,7 @@ def get_cpu_info():
info = cpuinfo.get_cpu_info() # info dict
string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], "unknown")
PERSISTENT_CACHE["cpu_info"] = string.replace("(R)", "").replace("CPU ", "").replace("@ ", "")
except: # noqa E722
except Exception:
pass
return PERSISTENT_CACHE.get("cpu_info", "unknown")

Loading…
Cancel
Save