diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index e82aecc5a0..1ec1b9a93c 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -18,7 +18,7 @@ jobs:
name: Publish
runs-on: ubuntu-latest
permissions:
- id-token: write # for PyPI trusted publishing
+ id-token: write # for PyPI trusted publishing
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -41,49 +41,11 @@ jobs:
shell: python
run: |
import os
- import requests
- import toml
-
- # Load version and package name from pyproject.toml
- pyproject = toml.load('pyproject.toml')
- package_name = pyproject['project']['name']
- local_version = pyproject['project'].get('version', 'dynamic')
-
- # If version is dynamic, extract it from the specified file
- if local_version == 'dynamic':
- version_attr = pyproject['tool']['setuptools']['dynamic']['version']['attr']
- module_path, attr_name = version_attr.rsplit('.', 1)
- with open(f"{module_path.replace('.', '/')}/__init__.py") as f:
- local_version = next(line.split('=')[1].strip().strip("'\"") for line in f if line.startswith(attr_name))
-
- print(f"Local Version: {local_version}")
-
- # Get online version from PyPI
- response = requests.get(f"https://pypi.org/pypi/{package_name}/json")
- online_version = response.json()['info']['version'] if response.status_code == 200 else None
- print(f"Online Version: {online_version or 'Not Found'}")
-
- # Determine if a new version should be published
- publish = False
- if online_version:
- local_ver = tuple(map(int, local_version.split('.')))
- online_ver = tuple(map(int, online_version.split('.')))
- major_diff = local_ver[0] - online_ver[0]
- minor_diff = local_ver[1] - online_ver[1]
- patch_diff = local_ver[2] - online_ver[2]
-
- publish = (
- (major_diff == 0 and minor_diff == 0 and 0 < patch_diff <= 2) or
- (major_diff == 0 and minor_diff == 1 and local_ver[2] == 0) or
- (major_diff == 1 and local_ver[1] == 0 and local_ver[2] == 0)
- )
- else:
- publish = True # First release
-
+ from actions.utils import check_pypi_version
+ local_version, online_version, publish = check_pypi_version()
os.system(f'echo "increment={publish}" >> $GITHUB_OUTPUT')
os.system(f'echo "current_tag=v{local_version}" >> $GITHUB_OUTPUT')
os.system(f'echo "previous_tag=v{online_version}" >> $GITHUB_OUTPUT')
-
if publish:
print('Ready to publish new version to PyPI ✅.')
id: check_pypi
diff --git a/README.md b/README.md
index cce98e50b0..291977d609 100644
--- a/README.md
+++ b/README.md
@@ -116,7 +116,7 @@ See YOLO [Python Docs](https://docs.ultralytics.com/usage/python/) for more exam
YOLO11 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLO11 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models.
-
+
All [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
@@ -207,7 +207,7 @@ See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples with
##
Integrations
-Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [Roboflow](https://roboflow.com/?ref=ultralytics), ClearML, [Comet](https://bit.ly/yolov8-readme-comet), Neural Magic and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow.
+Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/), [Comet](https://bit.ly/yolov8-readme-comet), [Roboflow](https://roboflow.com/?ref=ultralytics) and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow.
@@ -216,11 +216,11 @@ Our key integrations with leading AI platforms extend the functionality of Ultra
-| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW |
-| :--------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
-| Label and export your custom datasets directly to YOLO11 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLO11 using [ClearML](https://clear.ml/) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLO11 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |
+| Ultralytics HUB 🚀 | W&B | Comet ⭐ NEW | Neural Magic |
+| :----------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
+| Streamline YOLO workflows: Label, train, and deploy effortlessly with [Ultralytics HUB](https://ultralytics.com/hub). Try now! | Track experiments, hyperparameters, and results with [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLO11 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |
## Ultralytics HUB
diff --git a/README.zh-CN.md b/README.zh-CN.md
index ca49bb8ad1..ae2ded2ea9 100644
--- a/README.zh-CN.md
+++ b/README.zh-CN.md
@@ -116,7 +116,7 @@ path = model.export(format="onnx") # 返回导出模型的路径
YOLO11 [检测](https://docs.ultralytics.com/tasks/detect/)、[分割](https://docs.ultralytics.com/tasks/segment/) 和 [姿态](https://docs.ultralytics.com/tasks/pose/) 模型在 [COCO](https://docs.ultralytics.com/datasets/detect/coco/) 数据集上进行预训练,这些模型可在此处获得,此外还有在 [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) 数据集上预训练的 YOLO11 [分类](https://docs.ultralytics.com/tasks/classify/) 模型。所有检测、分割和姿态模型均支持 [跟踪](https://docs.ultralytics.com/modes/track/) 模式。
-
+
所有[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models)在首次使用时自动从最新的 Ultralytics [发布](https://github.com/ultralytics/assets/releases)下载。
@@ -207,7 +207,7 @@ YOLO11 [检测](https://docs.ultralytics.com/tasks/detect/)、[分割](https://d
## 集成
-我们与领先的 AI 平台的关键集成扩展了 Ultralytics 产品的功能,增强了数据集标记、训练、可视化和模型管理等任务的能力。了解 Ultralytics 如何与 [Roboflow](https://roboflow.com/?ref=ultralytics)、ClearML、[Comet](https://bit.ly/yolov8-readme-comet)、Neural Magic 和 [OpenVINO](https://docs.ultralytics.com/integrations/openvino/) 合作,优化您的 AI 工作流程。
+我们与领先的 AI 平台的关键集成扩展了 Ultralytics 产品的功能,提升了数据集标注、训练、可视化和模型管理等任务。探索 Ultralytics 如何通过与 [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/)、[Comet](https://bit.ly/yolov8-readme-comet)、[Roboflow](https://roboflow.com/?ref=ultralytics) 和 [OpenVINO](https://docs.ultralytics.com/integrations/openvino/) 的合作,优化您的 AI 工作流程。
@@ -216,11 +216,11 @@ YOLO11 [检测](https://docs.ultralytics.com/tasks/detect/)、[分割](https://d
-| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW |
-| :--------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
-| Label and export your custom datasets directly to YOLO11 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLO11 using [ClearML](https://clear.ml/) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLO11 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |
+| Ultralytics HUB 🚀 | W&B | Comet ⭐ 全新 | Neural Magic |
+| :------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------: |
+| 简化 YOLO 工作流程:通过 [Ultralytics HUB](https://ultralytics.com/hub) 轻松标注、训练和部署。立即试用! | 使用 [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) 跟踪实验、超参数和结果 | 永久免费,[Comet](https://bit.ly/yolov5-readme-comet) 允许您保存 YOLO11 模型、恢复训练,并交互式地可视化和调试预测结果 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) 运行 YOLO11 推理,速度提升至 6 倍 |
## Ultralytics HUB
diff --git a/docs/build_docs.py b/docs/build_docs.py
index 483a2dd051..281a85f513 100644
--- a/docs/build_docs.py
+++ b/docs/build_docs.py
@@ -199,7 +199,7 @@ def convert_plaintext_links_to_html(content):
for text_node in paragraph.find_all(string=True, recursive=False):
if text_node.parent.name not in {"a", "code"}: # Ignore links and code blocks
new_text = re.sub(
- r'(https?://[^\s()<>]+(?:\.[^\s()<>]+)+)(?\1',
str(text_node),
)
diff --git a/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md b/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md
index 5f6fceb781..e2d2a03f45 100644
--- a/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md
+++ b/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md
@@ -85,7 +85,7 @@ After installing the runtime, you need to plug in your Coral Edge TPU into a USB
To use the Edge TPU, you need to convert your model into a compatible format. It is recommended that you run export on Google Colab, x86_64 Linux machine, using the official [Ultralytics Docker container](docker-quickstart.md), or using [Ultralytics HUB](../hub/quickstart.md), since the Edge TPU compiler is not available on ARM. See the [Export Mode](../modes/export.md) for the available arguments.
-!!! note "Exporting the model"
+!!! example "Exporting the model"
=== "Python"
@@ -105,13 +105,27 @@ To use the Edge TPU, you need to convert your model into a compatible format. It
yolo export model=path/to/model.pt format=edgetpu # Export an official model or custom model
```
-The exported model will be saved in the `_saved_model/` folder with the name `_full_integer_quant_edgetpu.tflite`.
+The exported model will be saved in the `_saved_model/` folder with the name `_full_integer_quant_edgetpu.tflite`. It is important that your model ends with the suffix `_edgetpu.tflite`, otherwise ultralytics doesn't know that you're using a Edge TPU model.
## Running the model
-After exporting your model, you can run inference with it using the following code:
+Before you can actually run the model, you will need to install the correct libraries.
-!!! note "Running the model"
+If `tensorflow` is installed, uninstall tensorflow with the following command:
+
+```bash
+pip uninstall tensorflow tensorflow-aarch64
+```
+
+Then install/update `tflite-runtime`:
+
+```bash
+pip install -U tflite-runtime
+```
+
+Now you can run inference using the following code:
+
+!!! example "Running the model"
=== "Python"
@@ -119,7 +133,7 @@ After exporting your model, you can run inference with it using the following co
from ultralytics import YOLO
# Load a model
- model = YOLO("path/to/edgetpu_model.tflite") # Load an official model or custom model
+ model = YOLO("path/to/_full_integer_quant_edgetpu.tflite") # Load an official model or custom model
# Run Prediction
model.predict("path/to/source.png")
@@ -128,27 +142,30 @@ After exporting your model, you can run inference with it using the following co
=== "CLI"
```bash
- yolo predict model=path/to/edgetpu_model.tflite source=path/to/source.png # Load an official model or custom model
+ yolo predict model=path/to/_full_integer_quant_edgetpu.tflite source=path/to/source.png # Load an official model or custom model
```
Find comprehensive information on the [Predict](../modes/predict.md) page for full prediction mode details.
-???+ warning "Important"
+!!! note "Inference with multiple Edge TPUs"
- You should run the model using `tflite-runtime` and not `tensorflow`.
- If `tensorflow` is installed, uninstall tensorflow with the following command:
+ If you have multiple Edge TPUs you can use the following code to select a specific TPU.
- ```bash
- pip uninstall tensorflow tensorflow-aarch64
- ```
+ === "Python"
+
+ ```python
+ from ultralytics import YOLO
- Then install/update `tflite-runtime`:
+ # Load a model
+ model = YOLO("path/to/_full_integer_quant_edgetpu.tflite") # Load an official model or custom model
- ```
- pip install -U tflite-runtime
- ```
+ # Run Prediction
+ model.predict("path/to/source.png") # Inference defaults to the first TPU
+
+ model.predict("path/to/source.png", device="tpu:0") # Select the first TPU
- If you want a `tflite-runtime` wheel for `tensorflow` 2.15.0 download it from [here](https://github.com/feranick/TFlite-builds/releases) and install it using `pip` or your package manager of choice.
+ model.predict("path/to/source.png", device="tpu:1") # Select the second TPU
+ ```
## FAQ
diff --git a/docs/en/guides/parking-management.md b/docs/en/guides/parking-management.md
index 6cf07e4847..b6140181ae 100644
--- a/docs/en/guides/parking-management.md
+++ b/docs/en/guides/parking-management.md
@@ -102,12 +102,10 @@ Parking management with [Ultralytics YOLO11](https://github.com/ultralytics/ultr
### Optional Arguments `ParkingManagement`
-| Name | Type | Default | Description |
-| ------------------------ | ------- | ------------- | -------------------------------------------------------------- |
-| `model` | `str` | `None` | Path to the YOLO11 model. |
-| `json_file` | `str` | `None` | Path to the JSON file, that have all parking coordinates data. |
-| `occupied_region_color` | `tuple` | `(0, 0, 255)` | RGB color for occupied regions. |
-| `available_region_color` | `tuple` | `(0, 255, 0)` | RGB color for available regions. |
+| Name | Type | Default | Description |
+| ----------- | ----- | ------- | -------------------------------------------------------------- |
+| `model` | `str` | `None` | Path to the YOLO11 model. |
+| `json_file` | `str` | `None` | Path to the JSON file, that have all parking coordinates data. |
### Arguments `model.track`
diff --git a/docs/en/help/CI.md b/docs/en/help/CI.md
index c63d678eb8..0f6b4c3a40 100644
--- a/docs/en/help/CI.md
+++ b/docs/en/help/CI.md
@@ -22,14 +22,18 @@ Here's a brief description of our CI actions:
Below is the table showing the status of these CI tests for our main repositories:
-| Repository | CI | Docker Deployment | Broken Links | CodeQL | PyPI and Docs Publishing |
-| --------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| [yolov3](https://github.com/ultralytics/yolov3) | [![YOLOv3 CI](https://github.com/ultralytics/yolov3/actions/workflows/ci-testing.yml/badge.svg)](https://github.com/ultralytics/yolov3/actions/workflows/ci-testing.yml) | [![Publish Docker Images](https://github.com/ultralytics/yolov3/actions/workflows/docker.yml/badge.svg)](https://github.com/ultralytics/yolov3/actions/workflows/docker.yml) | [![Check Broken links](https://github.com/ultralytics/yolov3/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/yolov3/actions/workflows/links.yml) | [![CodeQL](https://github.com/ultralytics/yolov3/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/ultralytics/yolov3/actions/workflows/codeql-analysis.yml) | |
-| [yolov5](https://github.com/ultralytics/yolov5) | [![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml) | [![Publish Docker Images](https://github.com/ultralytics/yolov5/actions/workflows/docker.yml/badge.svg)](https://github.com/ultralytics/yolov5/actions/workflows/docker.yml) | [![Check Broken links](https://github.com/ultralytics/yolov5/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/yolov5/actions/workflows/links.yml) | [![CodeQL](https://github.com/ultralytics/yolov5/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/ultralytics/yolov5/actions/workflows/codeql-analysis.yml) | |
-| [ultralytics](https://github.com/ultralytics/ultralytics) | [![ultralytics CI](https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg)](https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml) | [![Publish Docker Images](https://github.com/ultralytics/ultralytics/actions/workflows/docker.yaml/badge.svg)](https://github.com/ultralytics/ultralytics/actions/workflows/docker.yaml) | [![Check Broken links](https://github.com/ultralytics/ultralytics/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/ultralytics/actions/workflows/links.yml) | [![CodeQL](https://github.com/ultralytics/ultralytics/actions/workflows/codeql.yaml/badge.svg)](https://github.com/ultralytics/ultralytics/actions/workflows/codeql.yaml) | [![Publish to PyPI and Deploy Docs](https://github.com/ultralytics/ultralytics/actions/workflows/publish.yml/badge.svg)](https://github.com/ultralytics/ultralytics/actions/workflows/publish.yml) |
-| [hub-sdk](https://github.com/ultralytics/hub-sdk) | [![HUB-SDK CI](https://github.com/ultralytics/hub-sdk/actions/workflows/ci.yml/badge.svg)](https://github.com/ultralytics/hub-sdk/actions/workflows/ci.yml) | | [![Check Broken links](https://github.com/ultralytics/hub-sdk/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/hub-sdk/actions/workflows/links.yml) | [![CodeQL](https://github.com/ultralytics/hub-sdk/actions/workflows/codeql.yaml/badge.svg)](https://github.com/ultralytics/hub-sdk/actions/workflows/codeql.yaml) | [![Publish to PyPI](https://github.com/ultralytics/hub-sdk/actions/workflows/publish.yml/badge.svg)](https://github.com/ultralytics/hub-sdk/actions/workflows/publish.yml) |
-| [hub](https://github.com/ultralytics/hub) | [![HUB CI](https://github.com/ultralytics/hub/actions/workflows/ci.yaml/badge.svg)](https://github.com/ultralytics/hub/actions/workflows/ci.yaml) | | [![Check Broken links](https://github.com/ultralytics/hub/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/hub/actions/workflows/links.yml) | | |
-| [docs](https://github.com/ultralytics/docs) | | | [![Check Broken links](https://github.com/ultralytics/docs/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/links.yml)[![Check Domains](https://github.com/ultralytics/docs/actions/workflows/check_domains.yml/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/check_domains.yml) | | [![pages-build-deployment](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment) |
+| Repository | CI | Docker Deployment | Broken Links | CodeQL | PyPI and Docs Publishing |
+| --------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [yolov3](https://github.com/ultralytics/yolov3) | [![YOLOv3 CI](https://github.com/ultralytics/yolov3/actions/workflows/ci-testing.yml/badge.svg)](https://github.com/ultralytics/yolov3/actions/workflows/ci-testing.yml) | [![Publish Docker Images](https://github.com/ultralytics/yolov3/actions/workflows/docker.yml/badge.svg)](https://github.com/ultralytics/yolov3/actions/workflows/docker.yml) | [![Check Broken links](https://github.com/ultralytics/yolov3/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/yolov3/actions/workflows/links.yml) | [![CodeQL](https://github.com/ultralytics/yolov3/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/ultralytics/yolov3/actions/workflows/codeql-analysis.yml) | |
+| [yolov5](https://github.com/ultralytics/yolov5) | [![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml) | [![Publish Docker Images](https://github.com/ultralytics/yolov5/actions/workflows/docker.yml/badge.svg)](https://github.com/ultralytics/yolov5/actions/workflows/docker.yml) | [![Check Broken links](https://github.com/ultralytics/yolov5/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/yolov5/actions/workflows/links.yml) | [![CodeQL](https://github.com/ultralytics/yolov5/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/ultralytics/yolov5/actions/workflows/codeql-analysis.yml) | |
+| [ultralytics](https://github.com/ultralytics/ultralytics) | [![ultralytics CI](https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg)](https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml) | [![Publish Docker Images](https://github.com/ultralytics/ultralytics/actions/workflows/docker.yaml/badge.svg)](https://github.com/ultralytics/ultralytics/actions/workflows/docker.yaml) | [![Check Broken links](https://github.com/ultralytics/ultralytics/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/ultralytics/actions/workflows/links.yml) | [![CodeQL](https://github.com/ultralytics/ultralytics/actions/workflows/codeql.yaml/badge.svg)](https://github.com/ultralytics/ultralytics/actions/workflows/codeql.yaml) | [![Publish to PyPI and Deploy Docs](https://github.com/ultralytics/ultralytics/actions/workflows/publish.yml/badge.svg)](https://github.com/ultralytics/ultralytics/actions/workflows/publish.yml) |
+| [hub-sdk](https://github.com/ultralytics/hub-sdk) | [![HUB-SDK CI](https://github.com/ultralytics/hub-sdk/actions/workflows/ci.yml/badge.svg)](https://github.com/ultralytics/hub-sdk/actions/workflows/ci.yml) | | [![Check Broken links](https://github.com/ultralytics/hub-sdk/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/hub-sdk/actions/workflows/links.yml) | [![CodeQL](https://github.com/ultralytics/hub-sdk/actions/workflows/codeql.yaml/badge.svg)](https://github.com/ultralytics/hub-sdk/actions/workflows/codeql.yaml) | [![Publish to PyPI](https://github.com/ultralytics/hub-sdk/actions/workflows/publish.yml/badge.svg)](https://github.com/ultralytics/hub-sdk/actions/workflows/publish.yml) |
+| [hub](https://github.com/ultralytics/hub) | [![HUB CI](https://github.com/ultralytics/hub/actions/workflows/ci.yaml/badge.svg)](https://github.com/ultralytics/hub/actions/workflows/ci.yaml) | | [![Check Broken links](https://github.com/ultralytics/hub/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/hub/actions/workflows/links.yml) | | |
+| [mkdocs](https://github.com/ultralytics/mkdocs) | [![Ultralytics Actions](https://github.com/ultralytics/mkdocs/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/mkdocs/actions/workflows/format.yml) | | | [![CodeQL](https://github.com/ultralytics/mkdocs/actions/workflows/github-code-scanning/codeql/badge.svg)](https://github.com/ultralytics/mkdocs/actions/workflows/github-code-scanning/codeql) | [![Publish to PyPI](https://github.com/ultralytics/mkdocs/actions/workflows/publish.yml/badge.svg)](https://github.com/ultralytics/mkdocs/actions/workflows/publish.yml) |
+| [thop](https://github.com/ultralytics/thop) | [![Ultralytics Actions](https://github.com/ultralytics/thop/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/thop/actions/workflows/format.yml) | | | [![CodeQL](https://github.com/ultralytics/thop/actions/workflows/github-code-scanning/codeql/badge.svg)](https://github.com/ultralytics/thop/actions/workflows/github-code-scanning/codeql) | [![Publish to PyPI](https://github.com/ultralytics/thop/actions/workflows/publish.yml/badge.svg)](https://github.com/ultralytics/mkdocs/actions/workflows/publish.yml) |
+| [actions](https://github.com/ultralytics/mkdocs) | [![Ultralytics Actions](https://github.com/ultralytics/actions/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/actions/actions/workflows/format.yml) | | | [![CodeQL](https://github.com/ultralytics/actions/actions/workflows/github-code-scanning/codeql/badge.svg)](https://github.com/ultralytics/actions/actions/workflows/github-code-scanning/codeql) | [![Publish to PyPI](https://github.com/ultralytics/actions/actions/workflows/publish.yml/badge.svg)](https://github.com/ultralytics/actions/actions/workflows/publish.yml) |
+| [docs](https://github.com/ultralytics/docs) | [![Ultralytics Actions](https://github.com/ultralytics/docs/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/format.yml) | | [![Check Broken links](https://github.com/ultralytics/docs/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/links.yml)[![Check Domains](https://github.com/ultralytics/docs/actions/workflows/check_domains.yml/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/check_domains.yml) | | [![pages-build-deployment](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment) |
+| [handbook](https://github.com/ultralytics/handbook) | [![Ultralytics Actions](https://github.com/ultralytics/handbook/actions/workflows/format.yml/badge.svg)](https://github.com/ultralytics/handbook/actions/workflows/format.yml) | | [![Check Broken links](https://github.com/ultralytics/handbook/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/handbook/actions/workflows/links.yml) | | [![pages-build-deployment](https://github.com/ultralytics/handbook/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/ultralytics/handbook/actions/workflows/pages/pages-build-deployment) |
Each badge shows the status of the last run of the corresponding CI test on the `main` branch of the respective repository. If a test fails, the badge will display a "failing" status, and if it passes, it will display a "passing" status.
diff --git a/docs/en/models/yolo-world.md b/docs/en/models/yolo-world.md
index 96a6e0b606..a9ea22a780 100644
--- a/docs/en/models/yolo-world.md
+++ b/docs/en/models/yolo-world.md
@@ -320,7 +320,7 @@ This approach provides a powerful means of customizing state-of-the-art object d
## Citations and Acknowledgements
-We extend our gratitude to the [Tencent AILab Computer Vision Center](https://ai.tencent.com/) for their pioneering work in real-time open-vocabulary object detection with YOLO-World:
+We extend our gratitude to the [Tencent AILab Computer Vision Center](https://www.tencent.com/) for their pioneering work in real-time open-vocabulary object detection with YOLO-World:
!!! quote ""
diff --git a/docs/en/yolov5/index.md b/docs/en/yolov5/index.md
index 17be5e24a0..ec52007485 100644
--- a/docs/en/yolov5/index.md
+++ b/docs/en/yolov5/index.md
@@ -22,11 +22,11 @@ keywords: YOLOv5, Ultralytics, object detection, computer vision, deep learning,
-Welcome to the Ultralytics' YOLOv5🚀 Documentation! YOLOv5, the fifth iteration of the revolutionary "You Only Look Once" [object detection](https://www.ultralytics.com/glossary/object-detection) model, is designed to deliver high-speed, high-accuracy results in real-time.
+Welcome to the Ultralytics' YOLOv5🚀 Documentation! YOLOv5, the fifth iteration of the revolutionary "You Only Look Once" object detection model, is designed to deliver high-speed, high-accuracy results in real-time.
-Built on PyTorch, this powerful [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) framework has garnered immense popularity for its versatility, ease of use, and high performance. Our documentation guides you through the installation process, explains the architectural nuances of the model, showcases various use-cases, and provides a series of detailed tutorials. These resources will help you harness the full potential of YOLOv5 for your [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) projects. Let's get started!
+Built on PyTorch, this powerful deep learning framework has garnered immense popularity for its versatility, ease of use, and high performance. Our documentation guides you through the installation process, explains the architectural nuances of the model, showcases various use-cases, and provides a series of detailed tutorials. These resources will help you harness the full potential of YOLOv5 for your computer vision projects. Let's get started!
diff --git a/docs/mkdocs_github_authors.yaml b/docs/mkdocs_github_authors.yaml
index 0e0423c248..2e20921385 100644
--- a/docs/mkdocs_github_authors.yaml
+++ b/docs/mkdocs_github_authors.yaml
@@ -76,6 +76,9 @@
79740115+0xSynapse@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/79740115?v=4
username: 0xSynapse
+91465467+lalayants@users.noreply.github.com:
+ avatar: https://avatars.githubusercontent.com/u/91465467?v=4
+ username: lalayants
Francesco.mttl@gmail.com:
avatar: https://avatars.githubusercontent.com/u/3855193?v=4
username: ambitious-octopus
diff --git a/mkdocs.yml b/mkdocs.yml
index 4071e81af9..4796c29dec 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -556,6 +556,7 @@ nav:
- utils: reference/nn/modules/utils.md
- tasks: reference/nn/tasks.md
- solutions:
+ - solutions: reference/solutions/solutions.md
- ai_gym: reference/solutions/ai_gym.md
- analytics: reference/solutions/analytics.md
- distance_calculation: reference/solutions/distance_calculation.md
diff --git a/pyproject.toml b/pyproject.toml
index 3fb80e62af..f6cb23204a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -26,7 +26,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "ultralytics"
dynamic = ["version"]
-description = "Ultralytics YOLO for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification."
+description = "Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification."
readme = "README.md"
requires-python = ">=3.8"
license = { "text" = "AGPL-3.0" }
diff --git a/tests/test_solutions.py b/tests/test_solutions.py
index d3ba2d5fc2..e01da6d818 100644
--- a/tests/test_solutions.py
+++ b/tests/test_solutions.py
@@ -17,10 +17,15 @@ def test_major_solutions():
cap = cv2.VideoCapture("solutions_ci_demo.mp4")
assert cap.isOpened(), "Error reading video file"
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
- counter = solutions.ObjectCounter(region=region_points, model="yolo11n.pt", show=False)
- heatmap = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA, model="yolo11n.pt", show=False)
- speed = solutions.SpeedEstimator(region=region_points, model="yolo11n.pt", show=False)
- queue = solutions.QueueManager(region=region_points, model="yolo11n.pt", show=False)
+ counter = solutions.ObjectCounter(region=region_points, model="yolo11n.pt", show=False) # Test object counter
+ heatmap = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA, model="yolo11n.pt", show=False) # Test heatmaps
+ speed = solutions.SpeedEstimator(region=region_points, model="yolo11n.pt", show=False) # Test queue manager
+ queue = solutions.QueueManager(region=region_points, model="yolo11n.pt", show=False) # Test speed estimation
+ line_analytics = solutions.Analytics(analytics_type="line", model="yolo11n.pt", show=False) # line analytics
+ pie_analytics = solutions.Analytics(analytics_type="pie", model="yolo11n.pt", show=False) # line analytics
+ bar_analytics = solutions.Analytics(analytics_type="bar", model="yolo11n.pt", show=False) # line analytics
+ area_analytics = solutions.Analytics(analytics_type="area", model="yolo11n.pt", show=False) # line analytics
+ frame_count = 0 # Required for analytics
while cap.isOpened():
success, im0 = cap.read()
if not success:
@@ -30,24 +35,23 @@ def test_major_solutions():
_ = heatmap.generate_heatmap(original_im0.copy())
_ = speed.estimate_speed(original_im0.copy())
_ = queue.process_queue(original_im0.copy())
+ _ = line_analytics.process_data(original_im0.copy(), frame_count)
+ _ = pie_analytics.process_data(original_im0.copy(), frame_count)
+ _ = bar_analytics.process_data(original_im0.copy(), frame_count)
+ _ = area_analytics.process_data(original_im0.copy(), frame_count)
cap.release()
- cv2.destroyAllWindows()
-
-@pytest.mark.slow
-def test_aigym():
- """Test the workouts monitoring solution."""
+ # Test workouts monitoring
safe_download(url=WORKOUTS_SOLUTION_DEMO)
- cap = cv2.VideoCapture("solution_ci_pose_demo.mp4")
- assert cap.isOpened(), "Error reading video file"
- gym = solutions.AIGym(line_width=2, kpts=[5, 11, 13])
- while cap.isOpened():
- success, im0 = cap.read()
+ cap1 = cv2.VideoCapture("solution_ci_pose_demo.mp4")
+ assert cap1.isOpened(), "Error reading video file"
+ gym = solutions.AIGym(line_width=2, kpts=[5, 11, 13], show=False)
+ while cap1.isOpened():
+ success, im0 = cap1.read()
if not success:
break
_ = gym.monitor(im0)
- cap.release()
- cv2.destroyAllWindows()
+ cap1.release()
@pytest.mark.slow
diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py
index 06ee07e308..9c0a6f3943 100644
--- a/ultralytics/__init__.py
+++ b/ultralytics/__init__.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-__version__ = "8.3.13"
+__version__ = "8.3.16"
import os
diff --git a/ultralytics/cfg/__init__.py b/ultralytics/cfg/__init__.py
index c8d8f44f02..e73610aaf5 100644
--- a/ultralytics/cfg/__init__.py
+++ b/ultralytics/cfg/__init__.py
@@ -1,6 +1,5 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-import contextlib
import shutil
import subprocess
import sys
diff --git a/ultralytics/cfg/solutions/default.yaml b/ultralytics/cfg/solutions/default.yaml
index e4e1b845a0..a353fd2a21 100644
--- a/ultralytics/cfg/solutions/default.yaml
+++ b/ultralytics/cfg/solutions/default.yaml
@@ -15,3 +15,4 @@ down_angle: 90 # Workouts down_angle for counts, 90 is default value. You can ch
kpts: [6, 8, 10] # Keypoints for workouts monitoring, i.e. If you want to consider keypoints for pushups that have mostly values of [6, 8, 10].
colormap: # Colormap for heatmap, Only OPENCV supported colormaps can be used. By default COLORMAP_PARULA will be used for visualization.
analytics_type: "line" # Analytics type i.e "line", "pie", "bar" or "area" charts. By default, "line" analytics will be used for processing.
+json_file: # parking system regions file path.
diff --git a/ultralytics/data/split_dota.py b/ultralytics/data/split_dota.py
index f9acffe9bb..b745b3662c 100644
--- a/ultralytics/data/split_dota.py
+++ b/ultralytics/data/split_dota.py
@@ -13,9 +13,6 @@ from tqdm import tqdm
from ultralytics.data.utils import exif_size, img2label_paths
from ultralytics.utils.checks import check_requirements
-check_requirements("shapely")
-from shapely.geometry import Polygon
-
def bbox_iof(polygon1, bbox2, eps=1e-6):
"""
@@ -33,6 +30,9 @@ def bbox_iof(polygon1, bbox2, eps=1e-6):
Polygon format: [x1, y1, x2, y2, x3, y3, x4, y4].
Bounding box format: [x_min, y_min, x_max, y_max].
"""
+ check_requirements("shapely")
+ from shapely.geometry import Polygon
+
polygon1 = polygon1.reshape(-1, 4, 2)
lt_point = np.min(polygon1, axis=-2) # left-top
rb_point = np.max(polygon1, axis=-2) # right-bottom
diff --git a/ultralytics/engine/exporter.py b/ultralytics/engine/exporter.py
index e9aa06cdc6..a42e0bb431 100644
--- a/ultralytics/engine/exporter.py
+++ b/ultralytics/engine/exporter.py
@@ -440,7 +440,7 @@ class Exporter:
"""YOLO ONNX export."""
requirements = ["onnx>=1.12.0"]
if self.args.simplify:
- requirements += ["onnxslim==0.1.34", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
+ requirements += ["onnxslim", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
check_requirements(requirements)
import onnx # noqa
diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py
index 816f916ba7..8e36d54450 100644
--- a/ultralytics/nn/autobackend.py
+++ b/ultralytics/nn/autobackend.py
@@ -113,7 +113,7 @@ class AutoBackend(nn.Module):
fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
stride = 32 # default stride
- model, metadata = None, None
+ model, metadata, task = None, None, None
# Set device
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
@@ -334,11 +334,15 @@ class AutoBackend(nn.Module):
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate
if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
- LOGGER.info(f"Loading {w} for TensorFlow Lite Edge TPU inference...")
+ device = device[3:] if str(device).startswith("tpu") else ":0"
+ LOGGER.info(f"Loading {w} on device {device[1:]} for TensorFlow Lite Edge TPU inference...")
delegate = {"Linux": "libedgetpu.so.1", "Darwin": "libedgetpu.1.dylib", "Windows": "edgetpu.dll"}[
platform.system()
]
- interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
+ interpreter = Interpreter(
+ model_path=w,
+ experimental_delegates=[load_delegate(delegate, options={"device": device})],
+ )
else: # TFLite
LOGGER.info(f"Loading {w} for TensorFlow Lite inference...")
interpreter = Interpreter(model_path=w) # load TFLite model
@@ -503,7 +507,7 @@ class AutoBackend(nn.Module):
# TensorRT
elif self.engine:
- if self.dynamic or im.shape != self.bindings["images"].shape:
+ if self.dynamic and im.shape != self.bindings["images"].shape:
if self.is_trt10:
self.context.set_input_shape("images", im.shape)
self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape)
diff --git a/ultralytics/solutions/ai_gym.py b/ultralytics/solutions/ai_gym.py
index 26f22d7032..0d131bd9d6 100644
--- a/ultralytics/solutions/ai_gym.py
+++ b/ultralytics/solutions/ai_gym.py
@@ -1,16 +1,40 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-from ultralytics.solutions.solutions import BaseSolution # Import a parent class
+from ultralytics.solutions.solutions import BaseSolution
from ultralytics.utils.plotting import Annotator
class AIGym(BaseSolution):
- """A class to manage the gym steps of people in a real-time video stream based on their poses."""
+ """
+ A class to manage gym steps of people in a real-time video stream based on their poses.
+
+ This class extends BaseSolution to monitor workouts using YOLO pose estimation models. It tracks and counts
+ repetitions of exercises based on predefined angle thresholds for up and down positions.
+
+ Attributes:
+ count (List[int]): Repetition counts for each detected person.
+ angle (List[float]): Current angle of the tracked body part for each person.
+ stage (List[str]): Current exercise stage ('up', 'down', or '-') for each person.
+ initial_stage (str | None): Initial stage of the exercise.
+ up_angle (float): Angle threshold for considering the 'up' position of an exercise.
+ down_angle (float): Angle threshold for considering the 'down' position of an exercise.
+ kpts (List[int]): Indices of keypoints used for angle calculation.
+ lw (int): Line width for drawing annotations.
+ annotator (Annotator): Object for drawing annotations on the image.
+
+ Methods:
+ monitor: Processes a frame to detect poses, calculate angles, and count repetitions.
+
+ Examples:
+ >>> gym = AIGym(model="yolov8n-pose.pt")
+ >>> image = cv2.imread("gym_scene.jpg")
+ >>> processed_image = gym.monitor(image)
+ >>> cv2.imshow("Processed Image", processed_image)
+ >>> cv2.waitKey(0)
+ """
def __init__(self, **kwargs):
- """Initialization function for AiGYM class, a child class of BaseSolution class, can be used for workouts
- monitoring.
- """
+ """Initializes AIGym for workout monitoring using pose estimation and predefined angles."""
# Check if the model name ends with '-pose'
if "model" in kwargs and "-pose" not in kwargs["model"]:
kwargs["model"] = "yolo11n-pose.pt"
@@ -31,12 +55,22 @@ class AIGym(BaseSolution):
def monitor(self, im0):
"""
- Monitor the workouts using Ultralytics YOLOv8 Pose Model: https://docs.ultralytics.com/tasks/pose/.
+ Monitors workouts using Ultralytics YOLO Pose Model.
+
+ This function processes an input image to track and analyze human poses for workout monitoring. It uses
+ the YOLO Pose model to detect keypoints, estimate angles, and count repetitions based on predefined
+ angle thresholds.
Args:
- im0 (ndarray): The input image that will be used for processing
- Returns
- im0 (ndarray): The processed image for more usage
+ im0 (ndarray): Input image for processing.
+
+ Returns:
+ (ndarray): Processed image with annotations for workout monitoring.
+
+ Examples:
+ >>> gym = AIGym()
+ >>> image = cv2.imread("workout.jpg")
+ >>> processed_image = gym.monitor(image)
"""
# Extract tracks
tracks = self.model.track(source=im0, persist=True, classes=self.CFG["classes"])[0]
diff --git a/ultralytics/solutions/analytics.py b/ultralytics/solutions/analytics.py
index 38489827af..aed7beed94 100644
--- a/ultralytics/solutions/analytics.py
+++ b/ultralytics/solutions/analytics.py
@@ -12,10 +12,41 @@ from ultralytics.solutions.solutions import BaseSolution # Import a parent clas
class Analytics(BaseSolution):
- """A class to create and update various types of charts (line, bar, pie, area) for visual analytics."""
+ """
+ A class for creating and updating various types of charts for visual analytics.
+
+ This class extends BaseSolution to provide functionality for generating line, bar, pie, and area charts
+ based on object detection and tracking data.
+
+ Attributes:
+ type (str): The type of analytics chart to generate ('line', 'bar', 'pie', or 'area').
+ x_label (str): Label for the x-axis.
+ y_label (str): Label for the y-axis.
+ bg_color (str): Background color of the chart frame.
+ fg_color (str): Foreground color of the chart frame.
+ title (str): Title of the chart window.
+ max_points (int): Maximum number of data points to display on the chart.
+ fontsize (int): Font size for text display.
+ color_cycle (cycle): Cyclic iterator for chart colors.
+ total_counts (int): Total count of detected objects (used for line charts).
+ clswise_count (Dict[str, int]): Dictionary for class-wise object counts.
+ fig (Figure): Matplotlib figure object for the chart.
+ ax (Axes): Matplotlib axes object for the chart.
+ canvas (FigureCanvas): Canvas for rendering the chart.
+
+ Methods:
+ process_data: Processes image data and updates the chart.
+ update_graph: Updates the chart with new data points.
+
+ Examples:
+ >>> analytics = Analytics(analytics_type="line")
+ >>> frame = cv2.imread("image.jpg")
+ >>> processed_frame = analytics.process_data(frame, frame_number=1)
+ >>> cv2.imshow("Analytics", processed_frame)
+ """
def __init__(self, **kwargs):
- """Initialize the Analytics class with various chart types."""
+ """Initialize Analytics class with various chart types for visual data representation."""
super().__init__(**kwargs)
self.type = self.CFG["analytics_type"] # extract type of analytics
@@ -31,8 +62,8 @@ class Analytics(BaseSolution):
figsize = (19.2, 10.8) # Set output image size 1920 * 1080
self.color_cycle = cycle(["#DD00BA", "#042AFF", "#FF4447", "#7D24FF", "#BD00FF"])
- self.total_counts = 0 # count variable for storing total counts i.e for line
- self.clswise_count = {} # dictionary for classwise counts
+ self.total_counts = 0 # count variable for storing total counts i.e. for line
+ self.clswise_count = {} # dictionary for class-wise counts
# Ensure line and area chart
if self.type in {"line", "area"}:
@@ -48,15 +79,28 @@ class Analytics(BaseSolution):
self.canvas = FigureCanvas(self.fig) # Set common axis properties
self.ax.set_facecolor(self.bg_color)
self.color_mapping = {}
- self.ax.axis("equal") if type == "pie" else None # Ensure pie chart is circular
+
+ if self.type == "pie": # Ensure pie chart is circular
+ self.ax.axis("equal")
def process_data(self, im0, frame_number):
"""
- Process the image data, run object tracking.
+ Processes image data and runs object tracking to update analytics charts.
Args:
- im0 (ndarray): Input image for processing.
- frame_number (int): Video frame # for plotting the data.
+ im0 (np.ndarray): Input image for processing.
+ frame_number (int): Video frame number for plotting the data.
+
+ Returns:
+ (np.ndarray): Processed image with updated analytics chart.
+
+ Raises:
+ ModuleNotFoundError: If an unsupported chart type is specified.
+
+ Examples:
+ >>> analytics = Analytics(analytics_type="line")
+ >>> frame = np.zeros((480, 640, 3), dtype=np.uint8)
+ >>> processed_frame = analytics.process_data(frame, frame_number=1)
"""
self.extract_tracks(im0) # Extract tracks
@@ -79,13 +123,22 @@ class Analytics(BaseSolution):
def update_graph(self, frame_number, count_dict=None, plot="line"):
"""
- Update the graph (line or area) with new data for single or multiple classes.
+ Updates the graph with new data for single or multiple classes.
Args:
frame_number (int): The current frame number.
- count_dict (dict, optional): Dictionary with class names as keys and counts as values for multiple classes.
- If None, updates a single line graph.
- plot (str): Type of the plot i.e. line, bar or area.
+ count_dict (Dict[str, int] | None): Dictionary with class names as keys and counts as values for multiple
+ classes. If None, updates a single line graph.
+ plot (str): Type of the plot. Options are 'line', 'bar', 'pie', or 'area'.
+
+ Returns:
+ (np.ndarray): Updated image containing the graph.
+
+ Examples:
+ >>> analytics = Analytics()
+ >>> frame_number = 10
+ >>> count_dict = {"person": 5, "car": 3}
+ >>> updated_image = analytics.update_graph(frame_number, count_dict, plot="bar")
"""
if count_dict is None:
# Single line update
diff --git a/ultralytics/solutions/distance_calculation.py b/ultralytics/solutions/distance_calculation.py
index 773b6086da..608aa97d7e 100644
--- a/ultralytics/solutions/distance_calculation.py
+++ b/ultralytics/solutions/distance_calculation.py
@@ -4,15 +4,41 @@ import math
import cv2
-from ultralytics.solutions.solutions import BaseSolution # Import a parent class
+from ultralytics.solutions.solutions import BaseSolution
from ultralytics.utils.plotting import Annotator, colors
class DistanceCalculation(BaseSolution):
- """A class to calculate distance between two objects in a real-time video stream based on their tracks."""
+ """
+ A class to calculate distance between two objects in a real-time video stream based on their tracks.
+
+ This class extends BaseSolution to provide functionality for selecting objects and calculating the distance
+ between them in a video stream using YOLO object detection and tracking.
+
+ Attributes:
+ left_mouse_count (int): Counter for left mouse button clicks.
+ selected_boxes (Dict[int, List[float]]): Dictionary to store selected bounding boxes and their track IDs.
+ annotator (Annotator): An instance of the Annotator class for drawing on the image.
+ boxes (List[List[float]]): List of bounding boxes for detected objects.
+ track_ids (List[int]): List of track IDs for detected objects.
+ clss (List[int]): List of class indices for detected objects.
+ names (List[str]): List of class names that the model can detect.
+ centroids (List[List[int]]): List to store centroids of selected bounding boxes.
+
+ Methods:
+ mouse_event_for_distance: Handles mouse events for selecting objects in the video stream.
+ calculate: Processes video frames and calculates the distance between selected objects.
+
+ Examples:
+ >>> distance_calc = DistanceCalculation()
+ >>> frame = cv2.imread("frame.jpg")
+ >>> processed_frame = distance_calc.calculate(frame)
+ >>> cv2.imshow("Distance Calculation", processed_frame)
+ >>> cv2.waitKey(0)
+ """
def __init__(self, **kwargs):
- """Initializes the DistanceCalculation class with the given parameters."""
+ """Initializes the DistanceCalculation class for measuring object distances in video streams."""
super().__init__(**kwargs)
# Mouse event information
@@ -21,14 +47,18 @@ class DistanceCalculation(BaseSolution):
def mouse_event_for_distance(self, event, x, y, flags, param):
"""
- Handles mouse events to select regions in a real-time video stream.
+ Handles mouse events to select regions in a real-time video stream for distance calculation.
Args:
- event (int): Type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.).
+ event (int): Type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN).
x (int): X-coordinate of the mouse pointer.
y (int): Y-coordinate of the mouse pointer.
- flags (int): Flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY, etc.).
- param (dict): Additional parameters passed to the function.
+ flags (int): Flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY).
+ param (Dict): Additional parameters passed to the function.
+
+ Examples:
+ >>> # Assuming 'dc' is an instance of DistanceCalculation
+ >>> cv2.setMouseCallback("window_name", dc.mouse_event_for_distance)
"""
if event == cv2.EVENT_LBUTTONDOWN:
self.left_mouse_count += 1
@@ -43,13 +73,23 @@ class DistanceCalculation(BaseSolution):
def calculate(self, im0):
"""
- Processes the video frame and calculates the distance between two bounding boxes.
+ Processes a video frame and calculates the distance between two selected bounding boxes.
+
+ This method extracts tracks from the input frame, annotates bounding boxes, and calculates the distance
+ between two user-selected objects if they have been chosen.
Args:
- im0 (ndarray): The image frame.
+ im0 (numpy.ndarray): The input image frame to process.
Returns:
- (ndarray): The processed image frame.
+ (numpy.ndarray): The processed image frame with annotations and distance calculations.
+
+ Examples:
+ >>> import numpy as np
+ >>> from ultralytics.solutions import DistanceCalculation
+ >>> dc = DistanceCalculation()
+ >>> frame = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
+ >>> processed_frame = dc.calculate(frame)
"""
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
self.extract_tracks(im0) # Extract tracks
diff --git a/ultralytics/solutions/heatmap.py b/ultralytics/solutions/heatmap.py
index d7dcf71cff..39352a9bd7 100644
--- a/ultralytics/solutions/heatmap.py
+++ b/ultralytics/solutions/heatmap.py
@@ -3,15 +3,40 @@
import cv2
import numpy as np
-from ultralytics.solutions.object_counter import ObjectCounter # Import object counter class
+from ultralytics.solutions.object_counter import ObjectCounter
from ultralytics.utils.plotting import Annotator
class Heatmap(ObjectCounter):
- """A class to draw heatmaps in real-time video stream based on their tracks."""
+ """
+ A class to draw heatmaps in real-time video streams based on object tracks.
+
+ This class extends the ObjectCounter class to generate and visualize heatmaps of object movements in video
+ streams. It uses tracked object positions to create a cumulative heatmap effect over time.
+
+ Attributes:
+ initialized (bool): Flag indicating whether the heatmap has been initialized.
+ colormap (int): OpenCV colormap used for heatmap visualization.
+ heatmap (np.ndarray): Array storing the cumulative heatmap data.
+ annotator (Annotator): Object for drawing annotations on the image.
+
+ Methods:
+ heatmap_effect: Calculates and updates the heatmap effect for a given bounding box.
+ generate_heatmap: Generates and applies the heatmap effect to each frame.
+
+ Examples:
+ >>> from ultralytics.solutions import Heatmap
+ >>> heatmap = Heatmap(model="yolov8n.pt", colormap=cv2.COLORMAP_JET)
+ >>> results = heatmap("path/to/video.mp4")
+ >>> for result in results:
+ ... print(result.speed) # Print inference speed
+ ... cv2.imshow("Heatmap", result.plot())
+ ... if cv2.waitKey(1) & 0xFF == ord("q"):
+ ... break
+ """
def __init__(self, **kwargs):
- """Initializes function for heatmap class with default values."""
+ """Initializes the Heatmap class for real-time video stream heatmap generation based on object tracks."""
super().__init__(**kwargs)
self.initialized = False # bool variable for heatmap initialization
@@ -23,10 +48,15 @@ class Heatmap(ObjectCounter):
def heatmap_effect(self, box):
"""
- Efficient calculation of heatmap area and effect location for applying colormap.
+ Efficiently calculates heatmap area and effect location for applying colormap.
Args:
- box (list): Bounding Box coordinates data [x0, y0, x1, y1]
+ box (List[float]): Bounding box coordinates [x0, y0, x1, y1].
+
+ Examples:
+ >>> heatmap = Heatmap()
+ >>> box = [100, 100, 200, 200]
+ >>> heatmap.heatmap_effect(box)
"""
x0, y0, x1, y1 = map(int, box)
radius_squared = (min(x1 - x0, y1 - y0) // 2) ** 2
@@ -48,9 +78,15 @@ class Heatmap(ObjectCounter):
Generate heatmap for each frame using Ultralytics.
Args:
- im0 (ndarray): Input image array for processing
+ im0 (np.ndarray): Input image array for processing.
+
Returns:
- im0 (ndarray): Processed image for further usage
+ (np.ndarray): Processed image with heatmap overlay and object counts (if region is specified).
+
+ Examples:
+ >>> heatmap = Heatmap()
+ >>> im0 = cv2.imread("image.jpg")
+ >>> result = heatmap.generate_heatmap(im0)
"""
if not self.initialized:
self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99
@@ -70,16 +106,17 @@ class Heatmap(ObjectCounter):
self.store_classwise_counts(cls) # store classwise counts in dict
# Store tracking previous position and perform object counting
- prev_position = self.track_history[track_id][-2] if len(self.track_history[track_id]) > 1 else None
+ prev_position = None
+ if len(self.track_history[track_id]) > 1:
+ prev_position = self.track_history[track_id][-2]
self.count_objects(self.track_line, box, track_id, prev_position, cls) # Perform object counting
- self.display_counts(im0) if self.region is not None else None # Display the counts on the frame
+ if self.region is not None:
+ self.display_counts(im0) # Display the counts on the frame
# Normalize, apply colormap to heatmap and combine with original image
- im0 = (
- im0
- if self.track_data.id is None
- else cv2.addWeighted(
+ if self.track_data.id is not None:
+ im0 = cv2.addWeighted(
im0,
0.5,
cv2.applyColorMap(
@@ -88,7 +125,6 @@ class Heatmap(ObjectCounter):
0.5,
0,
)
- )
self.display_output(im0) # display output with base class function
return im0 # return output image for more usage
diff --git a/ultralytics/solutions/object_counter.py b/ultralytics/solutions/object_counter.py
index d576746421..6374920734 100644
--- a/ultralytics/solutions/object_counter.py
+++ b/ultralytics/solutions/object_counter.py
@@ -1,18 +1,40 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-from shapely.geometry import LineString, Point
-
-from ultralytics.solutions.solutions import BaseSolution # Import a parent class
+from ultralytics.solutions.solutions import BaseSolution
from ultralytics.utils.plotting import Annotator, colors
class ObjectCounter(BaseSolution):
- """A class to manage the counting of objects in a real-time video stream based on their tracks."""
+ """
+ A class to manage the counting of objects in a real-time video stream based on their tracks.
+
+ This class extends the BaseSolution class and provides functionality for counting objects moving in and out of a
+ specified region in a video stream. It supports both polygonal and linear regions for counting.
+
+ Attributes:
+ in_count (int): Counter for objects moving inward.
+ out_count (int): Counter for objects moving outward.
+ counted_ids (List[int]): List of IDs of objects that have been counted.
+ classwise_counts (Dict[str, Dict[str, int]]): Dictionary for counts, categorized by object class.
+ region_initialized (bool): Flag indicating whether the counting region has been initialized.
+ show_in (bool): Flag to control display of inward count.
+ show_out (bool): Flag to control display of outward count.
+
+ Methods:
+ count_objects: Counts objects within a polygonal or linear region.
+ store_classwise_counts: Initializes class-wise counts if not already present.
+ display_counts: Displays object counts on the frame.
+ count: Processes input data (frames or object tracks) and updates counts.
+
+ Examples:
+ >>> counter = ObjectCounter()
+ >>> frame = cv2.imread("frame.jpg")
+ >>> processed_frame = counter.count(frame)
+ >>> print(f"Inward count: {counter.in_count}, Outward count: {counter.out_count}")
+ """
def __init__(self, **kwargs):
- """Initialization function for Count class, a child class of BaseSolution class, can be used for counting the
- objects.
- """
+ """Initializes the ObjectCounter class for real-time object counting in video streams."""
super().__init__(**kwargs)
self.in_count = 0 # Counter for objects moving inward
@@ -26,14 +48,23 @@ class ObjectCounter(BaseSolution):
def count_objects(self, track_line, box, track_id, prev_position, cls):
"""
- Helper function to count objects within a polygonal region.
+ Counts objects within a polygonal or linear region based on their tracks.
Args:
- track_line (dict): last 30 frame track record
- box (list): Bounding box data for specific track in current frame
- track_id (int): track ID of the object
- prev_position (tuple): last frame position coordinates of the track
- cls (int): Class index for classwise count updates
+ track_line (Dict): Last 30 frame track record for the object.
+ box (List[float]): Bounding box coordinates [x1, y1, x2, y2] for the specific track in the current frame.
+ track_id (int): Unique identifier for the tracked object.
+ prev_position (Tuple[float, float]): Last frame position coordinates (x, y) of the track.
+ cls (int): Class index for classwise count updates.
+
+ Examples:
+ >>> counter = ObjectCounter()
+ >>> track_line = {1: [100, 200], 2: [110, 210], 3: [120, 220]}
+ >>> box = [130, 230, 150, 250]
+ >>> track_id = 1
+ >>> prev_position = (120, 220)
+ >>> cls = 0
+ >>> counter.count_objects(track_line, box, track_id, prev_position, cls)
"""
if prev_position is None or track_id in self.counted_ids:
return
@@ -42,7 +73,7 @@ class ObjectCounter(BaseSolution):
dx = (box[0] - prev_position[0]) * (centroid.x - prev_position[0])
dy = (box[1] - prev_position[1]) * (centroid.y - prev_position[1])
- if len(self.region) >= 3 and self.r_s.contains(Point(track_line[-1])):
+ if len(self.region) >= 3 and self.r_s.contains(self.Point(track_line[-1])):
self.counted_ids.append(track_id)
# For polygon region
if dx > 0:
@@ -52,7 +83,7 @@ class ObjectCounter(BaseSolution):
self.out_count += 1
self.classwise_counts[self.names[cls]]["OUT"] += 1
- elif len(self.region) < 3 and LineString([prev_position, box[:2]]).intersects(self.l_s):
+ elif len(self.region) < 3 and self.LineString([prev_position, box[:2]]).intersects(self.r_s):
self.counted_ids.append(track_id)
# For linear region
if dx > 0 and dy > 0:
@@ -64,20 +95,34 @@ class ObjectCounter(BaseSolution):
def store_classwise_counts(self, cls):
"""
- Initialize class-wise counts if not already present.
+ Initialize class-wise counts for a specific object class if not already present.
Args:
- cls (int): Class index for classwise count updates
+ cls (int): Class index for classwise count updates.
+
+ This method ensures that the 'classwise_counts' dictionary contains an entry for the specified class,
+ initializing 'IN' and 'OUT' counts to zero if the class is not already present.
+
+ Examples:
+ >>> counter = ObjectCounter()
+ >>> counter.store_classwise_counts(0) # Initialize counts for class index 0
+ >>> print(counter.classwise_counts)
+ {'person': {'IN': 0, 'OUT': 0}}
"""
if self.names[cls] not in self.classwise_counts:
self.classwise_counts[self.names[cls]] = {"IN": 0, "OUT": 0}
def display_counts(self, im0):
"""
- Helper function to display object counts on the frame.
+ Displays object counts on the input image or frame.
Args:
- im0 (ndarray): The input image or frame
+ im0 (numpy.ndarray): The input image or frame to display counts on.
+
+ Examples:
+ >>> counter = ObjectCounter()
+ >>> frame = cv2.imread("image.jpg")
+ >>> counter.display_counts(frame)
"""
labels_dict = {
str.capitalize(key): f"{'IN ' + str(value['IN']) if self.show_in else ''} "
@@ -91,12 +136,21 @@ class ObjectCounter(BaseSolution):
def count(self, im0):
"""
- Processes input data (frames or object tracks) and updates counts.
+ Processes input data (frames or object tracks) and updates object counts.
+
+ This method initializes the counting region, extracts tracks, draws bounding boxes and regions, updates
+ object counts, and displays the results on the input image.
Args:
- im0 (ndarray): The input image that will be used for processing
- Returns
- im0 (ndarray): The processed image for more usage
+ im0 (numpy.ndarray): The input image or frame to be processed.
+
+ Returns:
+ (numpy.ndarray): The processed image with annotations and count information.
+
+ Examples:
+ >>> counter = ObjectCounter()
+ >>> frame = cv2.imread("path/to/image.jpg")
+ >>> processed_frame = counter.count(frame)
"""
if not self.region_initialized:
self.initialize_region()
@@ -122,7 +176,9 @@ class ObjectCounter(BaseSolution):
)
# store previous position of track for object counting
- prev_position = self.track_history[track_id][-2] if len(self.track_history[track_id]) > 1 else None
+ prev_position = None
+ if len(self.track_history[track_id]) > 1:
+ prev_position = self.track_history[track_id][-2]
self.count_objects(self.track_line, box, track_id, prev_position, cls) # Perform object counting
self.display_counts(im0) # Display the counts on the frame
diff --git a/ultralytics/solutions/parking_management.py b/ultralytics/solutions/parking_management.py
index ef58ad6274..fa815938ab 100644
--- a/ultralytics/solutions/parking_management.py
+++ b/ultralytics/solutions/parking_management.py
@@ -5,237 +5,232 @@ import json
import cv2
import numpy as np
-from ultralytics.utils.checks import check_imshow, check_requirements
+from ultralytics.solutions.solutions import LOGGER, BaseSolution, check_requirements
from ultralytics.utils.plotting import Annotator
class ParkingPtsSelection:
- """Class for selecting and managing parking zone points on images using a Tkinter-based UI."""
+ """
+ A class for selecting and managing parking zone points on images using a Tkinter-based UI.
+
+ This class provides functionality to upload an image, select points to define parking zones, and save the
+ selected points to a JSON file. It uses Tkinter for the graphical user interface.
+
+ Attributes:
+ tk (module): The Tkinter module for GUI operations.
+ filedialog (module): Tkinter's filedialog module for file selection operations.
+ messagebox (module): Tkinter's messagebox module for displaying message boxes.
+ master (tk.Tk): The main Tkinter window.
+ canvas (tk.Canvas): The canvas widget for displaying the image and drawing bounding boxes.
+ image (PIL.Image.Image): The uploaded image.
+ canvas_image (ImageTk.PhotoImage): The image displayed on the canvas.
+ rg_data (List[List[Tuple[int, int]]]): List of bounding boxes, each defined by 4 points.
+ current_box (List[Tuple[int, int]]): Temporary storage for the points of the current bounding box.
+ imgw (int): Original width of the uploaded image.
+ imgh (int): Original height of the uploaded image.
+ canvas_max_width (int): Maximum width of the canvas.
+ canvas_max_height (int): Maximum height of the canvas.
+
+ Methods:
+ setup_ui: Sets up the Tkinter UI components.
+ initialize_properties: Initializes the necessary properties.
+ upload_image: Uploads an image, resizes it to fit the canvas, and displays it.
+ on_canvas_click: Handles mouse clicks to add points for bounding boxes.
+ draw_box: Draws a bounding box on the canvas.
+ remove_last_bounding_box: Removes the last bounding box and redraws the canvas.
+ redraw_canvas: Redraws the canvas with the image and all bounding boxes.
+ save_to_json: Saves the bounding boxes to a JSON file.
+
+ Examples:
+ >>> parking_selector = ParkingPtsSelection()
+ >>> # Use the GUI to upload an image, select parking zones, and save the data
+ """
def __init__(self):
- """Initializes the UI for selecting parking zone points in a tkinter window."""
+ """Initializes the ParkingPtsSelection class, setting up UI and properties for parking zone point selection."""
check_requirements("tkinter")
+ import tkinter as tk
+ from tkinter import filedialog, messagebox
- import tkinter as tk # scope for multi-environment compatibility
+ self.tk, self.filedialog, self.messagebox = tk, filedialog, messagebox
+ self.setup_ui()
+ self.initialize_properties()
+ self.master.mainloop()
- self.tk = tk
- self.master = tk.Tk()
+ def setup_ui(self):
+ """Sets up the Tkinter UI components for the parking zone points selection interface."""
+ self.master = self.tk.Tk()
self.master.title("Ultralytics Parking Zones Points Selector")
-
- # Disable window resizing
self.master.resizable(False, False)
- # Setup canvas for image display
+ # Canvas for image display
self.canvas = self.tk.Canvas(self.master, bg="white")
+ self.canvas.pack(side=self.tk.BOTTOM)
- # Setup buttons
+ # Button frame with buttons
button_frame = self.tk.Frame(self.master)
button_frame.pack(side=self.tk.TOP)
- self.tk.Button(button_frame, text="Upload Image", command=self.upload_image).grid(row=0, column=0)
- self.tk.Button(button_frame, text="Remove Last BBox", command=self.remove_last_bounding_box).grid(
- row=0, column=1
- )
- self.tk.Button(button_frame, text="Save", command=self.save_to_json).grid(row=0, column=2)
-
- # Initialize properties
- self.image_path = None
- self.image = None
- self.canvas_image = None
- self.rg_data = [] # region coordinates
- self.current_box = []
- self.imgw = 0 # image width
- self.imgh = 0 # image height
+ for text, cmd in [
+ ("Upload Image", self.upload_image),
+ ("Remove Last BBox", self.remove_last_bounding_box),
+ ("Save", self.save_to_json),
+ ]:
+ self.tk.Button(button_frame, text=text, command=cmd).pack(side=self.tk.LEFT)
- # Constants
- self.canvas_max_width = 1280
- self.canvas_max_height = 720
-
- self.master.mainloop()
+ def initialize_properties(self):
+ """Initialize properties for image, canvas, bounding boxes, and dimensions."""
+ self.image = self.canvas_image = None
+ self.rg_data, self.current_box = [], []
+ self.imgw = self.imgh = 0
+ self.canvas_max_width, self.canvas_max_height = 1280, 720
def upload_image(self):
- """Upload an image and resize it to fit canvas."""
- from tkinter import filedialog
-
+ """Uploads and displays an image on the canvas, resizing it to fit within specified dimensions."""
from PIL import Image, ImageTk # scope because ImageTk requires tkinter package
- self.image_path = filedialog.askopenfilename(filetypes=[("Image Files", "*.png;*.jpg;*.jpeg")])
- if not self.image_path:
+ self.image = Image.open(self.filedialog.askopenfilename(filetypes=[("Image Files", "*.png;*.jpg;*.jpeg")]))
+ if not self.image:
return
- self.image = Image.open(self.image_path)
self.imgw, self.imgh = self.image.size
-
- # Calculate the aspect ratio and resize image
aspect_ratio = self.imgw / self.imgh
- if aspect_ratio > 1:
- # Landscape orientation
- canvas_width = min(self.canvas_max_width, self.imgw)
- canvas_height = int(canvas_width / aspect_ratio)
- else:
- # Portrait orientation
- canvas_height = min(self.canvas_max_height, self.imgh)
- canvas_width = int(canvas_height * aspect_ratio)
-
- # Check if canvas is already initialized
- if self.canvas:
- self.canvas.destroy() # Destroy previous canvas
-
- self.canvas = self.tk.Canvas(self.master, bg="white", width=canvas_width, height=canvas_height)
- resized_image = self.image.resize((canvas_width, canvas_height), Image.LANCZOS)
- self.canvas_image = ImageTk.PhotoImage(resized_image)
- self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
+ canvas_width = (
+ min(self.canvas_max_width, self.imgw) if aspect_ratio > 1 else int(self.canvas_max_height * aspect_ratio)
+ )
+ canvas_height = (
+ min(self.canvas_max_height, self.imgh) if aspect_ratio <= 1 else int(canvas_width / aspect_ratio)
+ )
- self.canvas.pack(side=self.tk.BOTTOM)
+ self.canvas.config(width=canvas_width, height=canvas_height)
+ self.canvas_image = ImageTk.PhotoImage(self.image.resize((canvas_width, canvas_height), Image.LANCZOS))
+ self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
self.canvas.bind("", self.on_canvas_click)
- # Reset bounding boxes and current box
- self.rg_data = []
- self.current_box = []
+ self.rg_data.clear(), self.current_box.clear()
def on_canvas_click(self, event):
- """Handle mouse clicks on canvas to create points for bounding boxes."""
+ """Handles mouse clicks to add points for bounding boxes on the canvas."""
self.current_box.append((event.x, event.y))
self.canvas.create_oval(event.x - 3, event.y - 3, event.x + 3, event.y + 3, fill="red")
-
if len(self.current_box) == 4:
- self.rg_data.append(self.current_box)
- [
- self.canvas.create_line(self.current_box[i], self.current_box[(i + 1) % 4], fill="blue", width=2)
- for i in range(4)
- ]
- self.current_box = []
+ self.rg_data.append(self.current_box.copy())
+ self.draw_box(self.current_box)
+ self.current_box.clear()
- def remove_last_bounding_box(self):
- """Remove the last drawn bounding box from canvas."""
- from tkinter import messagebox # scope for multi-environment compatibility
+ def draw_box(self, box):
+ """Draws a bounding box on the canvas using the provided coordinates."""
+ for i in range(4):
+ self.canvas.create_line(box[i], box[(i + 1) % 4], fill="blue", width=2)
- if self.rg_data:
- self.rg_data.pop() # Remove the last bounding box
- self.canvas.delete("all") # Clear the canvas
- self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image) # Redraw the image
+ def remove_last_bounding_box(self):
+ """Removes the last bounding box from the list and redraws the canvas."""
+ if not self.rg_data:
+ self.messagebox.showwarning("Warning", "No bounding boxes to remove.")
+ return
+ self.rg_data.pop()
+ self.redraw_canvas()
- # Redraw all bounding boxes
- for box in self.rg_data:
- [self.canvas.create_line(box[i], box[(i + 1) % 4], fill="blue", width=2) for i in range(4)]
- messagebox.showinfo("Success", "Last bounding box removed.")
- else:
- messagebox.showwarning("Warning", "No bounding boxes to remove.")
+ def redraw_canvas(self):
+ """Redraws the canvas with the image and all bounding boxes."""
+ self.canvas.delete("all")
+ self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
+ for box in self.rg_data:
+ self.draw_box(box)
def save_to_json(self):
- """Saves rescaled bounding boxes to 'bounding_boxes.json' based on image-to-canvas size ratio."""
- from tkinter import messagebox # scope for multi-environment compatibility
-
- rg_data = [] # regions data
- for box in self.rg_data:
- rs_box = [
- (
- int(x * self.imgw / self.canvas.winfo_width()), # width scaling
- int(y * self.imgh / self.canvas.winfo_height()), # height scaling
- )
- for x, y in box
- ]
- rg_data.append({"points": rs_box})
+ """Saves the selected parking zone points to a JSON file with scaled coordinates."""
+ scale_w, scale_h = self.imgw / self.canvas.winfo_width(), self.imgh / self.canvas.winfo_height()
+ data = [{"points": [(int(x * scale_w), int(y * scale_h)) for x, y in box]} for box in self.rg_data]
with open("bounding_boxes.json", "w") as f:
- json.dump(rg_data, f, indent=4)
+ json.dump(data, f, indent=4)
+ self.messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
- messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
+class ParkingManagement(BaseSolution):
+ """
+ Manages parking occupancy and availability using YOLO model for real-time monitoring and visualization.
-class ParkingManagement:
- """Manages parking occupancy and availability using YOLOv8 for real-time monitoring and visualization."""
+ This class extends BaseSolution to provide functionality for parking lot management, including detection of
+ occupied spaces, visualization of parking regions, and display of occupancy statistics.
- def __init__(
- self,
- model, # Ultralytics YOLO model file path
- json_file, # Parking management annotation file created from Parking Annotator
- occupied_region_color=(0, 0, 255), # occupied region color
- available_region_color=(0, 255, 0), # available region color
- ):
- """
- Initializes the parking management system with a YOLOv8 model and visualization settings.
+ Attributes:
+ json_file (str): Path to the JSON file containing parking region details.
+ json (List[Dict]): Loaded JSON data containing parking region information.
+ pr_info (Dict[str, int]): Dictionary storing parking information (Occupancy and Available spaces).
+ arc (Tuple[int, int, int]): RGB color tuple for available region visualization.
+ occ (Tuple[int, int, int]): RGB color tuple for occupied region visualization.
+ dc (Tuple[int, int, int]): RGB color tuple for centroid visualization of detected objects.
- Args:
- model (str): Path to the YOLOv8 model.
- json_file (str): file that have all parking slot points data
- occupied_region_color (tuple): RGB color tuple for occupied regions.
- available_region_color (tuple): RGB color tuple for available regions.
- """
- # Model initialization
- from ultralytics import YOLO
+ Methods:
+ process_data: Processes model data for parking lot management and visualization.
- self.model = YOLO(model)
+ Examples:
+ >>> from ultralytics.solutions import ParkingManagement
+ >>> parking_manager = ParkingManagement(model="yolov8n.pt", json_file="parking_regions.json")
+ >>> results = parking_manager(source="parking_lot_video.mp4")
+ >>> print(f"Occupied spaces: {parking_manager.pr_info['Occupancy']}")
+ >>> print(f"Available spaces: {parking_manager.pr_info['Available']}")
+ """
- # Load JSON data
- with open(json_file) as f:
- self.json_data = json.load(f)
+ def __init__(self, **kwargs):
+ """Initializes the parking management system with a YOLO model and visualization settings."""
+ super().__init__(**kwargs)
- self.pr_info = {"Occupancy": 0, "Available": 0} # dictionary for parking information
+ self.json_file = self.CFG["json_file"] # Load JSON data
+ if self.json_file is None:
+ LOGGER.warning("❌ json_file argument missing. Parking region details required.")
+ raise ValueError("❌ Json file path can not be empty")
- self.occ = occupied_region_color
- self.arc = available_region_color
+ with open(self.json_file) as f:
+ self.json = json.load(f)
- self.env_check = check_imshow(warn=True) # check if environment supports imshow
+ self.pr_info = {"Occupancy": 0, "Available": 0} # dictionary for parking information
- def process_data(self, im0):
- """
- Process the model data for parking lot management.
+ self.arc = (0, 0, 255) # available region color
+ self.occ = (0, 255, 0) # occupied region color
+ self.dc = (255, 0, 189) # centroid color for each box
- Args:
- im0 (ndarray): inference image
+ def process_data(self, im0):
"""
- results = self.model.track(im0, persist=True, show=False) # object tracking
+ Processes the model data for parking lot management.
- es, fs = len(self.json_data), 0 # empty slots, filled slots
- annotator = Annotator(im0) # init annotator
+ This function analyzes the input image, extracts tracks, and determines the occupancy status of parking
+ regions defined in the JSON file. It annotates the image with occupied and available parking spots,
+ and updates the parking information.
- # extract tracks data
- if results[0].boxes.id is None:
- self.display_frames(im0)
- return im0
+ Args:
+ im0 (np.ndarray): The input inference image.
- boxes = results[0].boxes.xyxy.cpu().tolist()
- clss = results[0].boxes.cls.cpu().tolist()
+ Examples:
+ >>> parking_manager = ParkingManagement(json_file="parking_regions.json")
+ >>> image = cv2.imread("parking_lot.jpg")
+ >>> parking_manager.process_data(image)
+ """
+ self.extract_tracks(im0) # extract tracks from im0
+ es, fs = len(self.json), 0 # empty slots, filled slots
+ annotator = Annotator(im0, self.line_width) # init annotator
- for region in self.json_data:
+ for region in self.json:
# Convert points to a NumPy array with the correct dtype and reshape properly
pts_array = np.array(region["points"], dtype=np.int32).reshape((-1, 1, 2))
rg_occupied = False # occupied region initialization
- for box, cls in zip(boxes, clss):
- xc = int((box[0] + box[2]) / 2)
- yc = int((box[1] + box[3]) / 2)
- annotator.display_objects_labels(
- im0, self.model.names[int(cls)], (104, 31, 17), (255, 255, 255), xc, yc, 10
- )
+ for box, cls in zip(self.boxes, self.clss):
+ xc, yc = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)
dist = cv2.pointPolygonTest(pts_array, (xc, yc), False)
if dist >= 0:
+ # cv2.circle(im0, (xc, yc), radius=self.line_width * 4, color=self.dc, thickness=-1)
+ annotator.display_objects_labels(
+ im0, self.model.names[int(cls)], (104, 31, 17), (255, 255, 255), xc, yc, 10
+ )
rg_occupied = True
break
- if rg_occupied:
- fs += 1
- es -= 1
-
+ fs, es = (fs + 1, es - 1) if rg_occupied else (fs, es)
# Plotting regions
- color = self.occ if rg_occupied else self.arc
- cv2.polylines(im0, [pts_array], isClosed=True, color=color, thickness=2)
+ cv2.polylines(im0, [pts_array], isClosed=True, color=self.occ if rg_occupied else self.arc, thickness=2)
- self.pr_info["Occupancy"] = fs
- self.pr_info["Available"] = es
+ self.pr_info["Occupancy"], self.pr_info["Available"] = fs, es
annotator.display_analytics(im0, self.pr_info, (104, 31, 17), (255, 255, 255), 10)
-
- self.display_frames(im0)
- return im0
-
- def display_frames(self, im0):
- """
- Display frame.
-
- Args:
- im0 (ndarray): inference image
- """
- if self.env_check:
- cv2.imshow("Ultralytics Parking Manager", im0)
- # Break Window
- if cv2.waitKey(1) & 0xFF == ord("q"):
- return
+ self.display_output(im0) # display output with base class function
+ return im0 # return output image for more usage
diff --git a/ultralytics/solutions/queue_management.py b/ultralytics/solutions/queue_management.py
index 287f337dc5..ca0acb14f8 100644
--- a/ultralytics/solutions/queue_management.py
+++ b/ultralytics/solutions/queue_management.py
@@ -1,16 +1,40 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-from shapely.geometry import Point
-
-from ultralytics.solutions.solutions import BaseSolution # Import a parent class
+from ultralytics.solutions.solutions import BaseSolution
from ultralytics.utils.plotting import Annotator, colors
class QueueManager(BaseSolution):
- """A class to manage the queue in a real-time video stream based on object tracks."""
+ """
+ Manages queue counting in real-time video streams based on object tracks.
+
+ This class extends BaseSolution to provide functionality for tracking and counting objects within a specified
+ region in video frames.
+
+ Attributes:
+ counts (int): The current count of objects in the queue.
+ rect_color (Tuple[int, int, int]): RGB color tuple for drawing the queue region rectangle.
+ region_length (int): The number of points defining the queue region.
+ annotator (Annotator): An instance of the Annotator class for drawing on frames.
+ track_line (List[Tuple[int, int]]): List of track line coordinates.
+ track_history (Dict[int, List[Tuple[int, int]]]): Dictionary storing tracking history for each object.
+
+ Methods:
+ initialize_region: Initializes the queue region.
+ process_queue: Processes a single frame for queue management.
+ extract_tracks: Extracts object tracks from the current frame.
+ store_tracking_history: Stores the tracking history for an object.
+ display_output: Displays the processed output.
+
+ Examples:
+ >>> queue_manager = QueueManager(source="video.mp4", region=[100, 100, 200, 200, 300, 300])
+ >>> for frame in video_stream:
+ ... processed_frame = queue_manager.process_queue(frame)
+ ... cv2.imshow("Queue Management", processed_frame)
+ """
def __init__(self, **kwargs):
- """Initializes the QueueManager with specified parameters for tracking and counting objects."""
+ """Initializes the QueueManager with parameters for tracking and counting objects in a video stream."""
super().__init__(**kwargs)
self.initialize_region()
self.counts = 0 # Queue counts Information
@@ -19,12 +43,31 @@ class QueueManager(BaseSolution):
def process_queue(self, im0):
"""
- Main function to start the queue management process.
+ Processes the queue management for a single frame of video.
Args:
- im0 (ndarray): The input image that will be used for processing
- Returns
- im0 (ndarray): The processed image for more usage
+ im0 (numpy.ndarray): Input image for processing, typically a frame from a video stream.
+
+ Returns:
+ (numpy.ndarray): Processed image with annotations, bounding boxes, and queue counts.
+
+ This method performs the following steps:
+ 1. Resets the queue count for the current frame.
+ 2. Initializes an Annotator object for drawing on the image.
+ 3. Extracts tracks from the image.
+ 4. Draws the counting region on the image.
+ 5. For each detected object:
+ - Draws bounding boxes and labels.
+ - Stores tracking history.
+ - Draws centroids and tracks.
+ - Checks if the object is inside the counting region and updates the count.
+ 6. Displays the queue count on the image.
+ 7. Displays the processed output.
+
+ Examples:
+ >>> queue_manager = QueueManager()
+ >>> frame = cv2.imread("frame.jpg")
+ >>> processed_frame = queue_manager.process_queue(frame)
"""
self.counts = 0 # Reset counts every frame
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
@@ -48,8 +91,10 @@ class QueueManager(BaseSolution):
track_history = self.track_history.get(track_id, [])
# store previous position of track and check if the object is inside the counting region
- prev_position = track_history[-2] if len(track_history) > 1 else None
- if self.region_length >= 3 and prev_position and self.r_s.contains(Point(self.track_line[-1])):
+ prev_position = None
+ if len(track_history) > 1:
+ prev_position = track_history[-2]
+ if self.region_length >= 3 and prev_position and self.r_s.contains(self.Point(self.track_line[-1])):
self.counts += 1
# Display queue counts
diff --git a/ultralytics/solutions/solutions.py b/ultralytics/solutions/solutions.py
index 71a92becfd..1af0c0ba09 100644
--- a/ultralytics/solutions/solutions.py
+++ b/ultralytics/solutions/solutions.py
@@ -9,21 +9,51 @@ from ultralytics import YOLO
from ultralytics.utils import LOGGER, yaml_load
from ultralytics.utils.checks import check_imshow, check_requirements
-check_requirements("shapely>=2.0.0")
-from shapely.geometry import LineString, Polygon
-
DEFAULT_SOL_CFG_PATH = Path(__file__).resolve().parents[1] / "cfg/solutions/default.yaml"
class BaseSolution:
- """A class to manage all the Ultralytics Solutions: https://docs.ultralytics.com/solutions/."""
+ """
+ A base class for managing Ultralytics Solutions.
+
+ This class provides core functionality for various Ultralytics Solutions, including model loading, object tracking,
+ and region initialization.
+
+ Attributes:
+ LineString (shapely.geometry.LineString): Class for creating line string geometries.
+ Polygon (shapely.geometry.Polygon): Class for creating polygon geometries.
+ Point (shapely.geometry.Point): Class for creating point geometries.
+ CFG (Dict): Configuration dictionary loaded from a YAML file and updated with kwargs.
+ region (List[Tuple[int, int]]): List of coordinate tuples defining a region of interest.
+ line_width (int): Width of lines used in visualizations.
+ model (ultralytics.YOLO): Loaded YOLO model instance.
+ names (Dict[int, str]): Dictionary mapping class indices to class names.
+ env_check (bool): Flag indicating whether the environment supports image display.
+ track_history (collections.defaultdict): Dictionary to store tracking history for each object.
+
+ Methods:
+ extract_tracks: Apply object tracking and extract tracks from an input image.
+ store_tracking_history: Store object tracking history for a given track ID and bounding box.
+ initialize_region: Initialize the counting region and line segment based on configuration.
+ display_output: Display the results of processing, including showing frames or saving results.
+
+ Examples:
+ >>> solution = BaseSolution(model="yolov8n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
+ >>> solution.initialize_region()
+ >>> image = cv2.imread("image.jpg")
+ >>> solution.extract_tracks(image)
+ >>> solution.display_output(image)
+ """
def __init__(self, **kwargs):
- """
- Base initializer for all solutions.
+ """Initializes the BaseSolution class with configuration settings and YOLO model for Ultralytics solutions."""
+ check_requirements("shapely>=2.0.0")
+ from shapely.geometry import LineString, Point, Polygon
+
+ self.LineString = LineString
+ self.Polygon = Polygon
+ self.Point = Point
- Child classes should call this with necessary parameters.
- """
# Load config and update with args
self.CFG = yaml_load(DEFAULT_SOL_CFG_PATH)
self.CFG.update(kwargs)
@@ -42,10 +72,15 @@ class BaseSolution:
def extract_tracks(self, im0):
"""
- Apply object tracking and extract tracks.
+ Applies object tracking and extracts tracks from an input image or frame.
Args:
- im0 (ndarray): The input image or frame
+ im0 (ndarray): The input image or frame.
+
+ Examples:
+ >>> solution = BaseSolution()
+ >>> frame = cv2.imread("path/to/image.jpg")
+ >>> solution.extract_tracks(frame)
"""
self.tracks = self.model.track(source=im0, persist=True, classes=self.CFG["classes"])
@@ -62,11 +97,18 @@ class BaseSolution:
def store_tracking_history(self, track_id, box):
"""
- Store object tracking history.
+ Stores the tracking history of an object.
+
+ This method updates the tracking history for a given object by appending the center point of its
+ bounding box to the track line. It maintains a maximum of 30 points in the tracking history.
Args:
- track_id (int): The track ID of the object
- box (list): Bounding box coordinates of the object
+ track_id (int): The unique identifier for the tracked object.
+ box (List[float]): The bounding box coordinates of the object in the format [x1, y1, x2, y2].
+
+ Examples:
+ >>> solution = BaseSolution()
+ >>> solution.store_tracking_history(1, [100, 200, 300, 400])
"""
# Store tracking history
self.track_line = self.track_history[track_id]
@@ -75,19 +117,32 @@ class BaseSolution:
self.track_line.pop(0)
def initialize_region(self):
- """Initialize the counting region and line segment based on config."""
- self.region = [(20, 400), (1080, 404), (1080, 360), (20, 360)] if self.region is None else self.region
- self.r_s = Polygon(self.region) if len(self.region) >= 3 else LineString(self.region) # region segment
- self.l_s = LineString(
- [(self.region[0][0], self.region[0][1]), (self.region[1][0], self.region[1][1])]
- ) # line segment
+ """Initialize the counting region and line segment based on configuration settings."""
+ if self.region is None:
+ self.region = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
+ self.r_s = (
+ self.Polygon(self.region) if len(self.region) >= 3 else self.LineString(self.region)
+ ) # region or line
def display_output(self, im0):
"""
Display the results of the processing, which could involve showing frames, printing counts, or saving results.
+ This method is responsible for visualizing the output of the object detection and tracking process. It displays
+ the processed frame with annotations, and allows for user interaction to close the display.
+
Args:
- im0 (ndarray): The input image or frame
+ im0 (numpy.ndarray): The input image or frame that has been processed and annotated.
+
+ Examples:
+ >>> solution = BaseSolution()
+ >>> frame = cv2.imread("path/to/image.jpg")
+ >>> solution.display_output(frame)
+
+ Notes:
+ - This method will only display output if the 'show' configuration is set to True and the environment
+ supports image display.
+ - The display can be closed by pressing the 'q' key.
"""
if self.CFG.get("show") and self.env_check:
cv2.imshow("Ultralytics Solutions", im0)
diff --git a/ultralytics/solutions/speed_estimation.py b/ultralytics/solutions/speed_estimation.py
index decd159b55..0c4bc5f057 100644
--- a/ultralytics/solutions/speed_estimation.py
+++ b/ultralytics/solutions/speed_estimation.py
@@ -4,15 +4,43 @@ from time import time
import numpy as np
-from ultralytics.solutions.solutions import BaseSolution, LineString
+from ultralytics.solutions.solutions import BaseSolution
from ultralytics.utils.plotting import Annotator, colors
class SpeedEstimator(BaseSolution):
- """A class to estimate the speed of objects in a real-time video stream based on their tracks."""
+ """
+ A class to estimate the speed of objects in a real-time video stream based on their tracks.
+
+ This class extends the BaseSolution class and provides functionality for estimating object speeds using
+ tracking data in video streams.
+
+ Attributes:
+ spd (Dict[int, float]): Dictionary storing speed data for tracked objects.
+ trkd_ids (List[int]): List of tracked object IDs that have already been speed-estimated.
+ trk_pt (Dict[int, float]): Dictionary storing previous timestamps for tracked objects.
+ trk_pp (Dict[int, Tuple[float, float]]): Dictionary storing previous positions for tracked objects.
+ annotator (Annotator): Annotator object for drawing on images.
+ region (List[Tuple[int, int]]): List of points defining the speed estimation region.
+ track_line (List[Tuple[float, float]]): List of points representing the object's track.
+ r_s (LineString): LineString object representing the speed estimation region.
+
+ Methods:
+ initialize_region: Initializes the speed estimation region.
+ estimate_speed: Estimates the speed of objects based on tracking data.
+ store_tracking_history: Stores the tracking history for an object.
+ extract_tracks: Extracts tracks from the current frame.
+ display_output: Displays the output with annotations.
+
+ Examples:
+ >>> estimator = SpeedEstimator()
+ >>> frame = cv2.imread("frame.jpg")
+ >>> processed_frame = estimator.estimate_speed(frame)
+ >>> cv2.imshow("Speed Estimation", processed_frame)
+ """
def __init__(self, **kwargs):
- """Initializes the SpeedEstimator with the given parameters."""
+ """Initializes the SpeedEstimator object with speed estimation parameters and data structures."""
super().__init__(**kwargs)
self.initialize_region() # Initialize speed region
@@ -27,9 +55,15 @@ class SpeedEstimator(BaseSolution):
Estimates the speed of objects based on tracking data.
Args:
- im0 (ndarray): The input image that will be used for processing
- Returns
- im0 (ndarray): The processed image for more usage
+ im0 (np.ndarray): Input image for processing. Shape is typically (H, W, C) for RGB images.
+
+ Returns:
+ (np.ndarray): Processed image with speed estimations and annotations.
+
+ Examples:
+ >>> estimator = SpeedEstimator()
+ >>> image = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
+ >>> processed_image = estimator.estimate_speed(image)
"""
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
self.extract_tracks(im0) # Extract tracks
@@ -56,7 +90,7 @@ class SpeedEstimator(BaseSolution):
)
# Calculate object speed and direction based on region intersection
- if LineString([self.trk_pp[track_id], self.track_line[-1]]).intersects(self.l_s):
+ if self.LineString([self.trk_pp[track_id], self.track_line[-1]]).intersects(self.r_s):
direction = "known"
else:
direction = "unknown"
diff --git a/ultralytics/solutions/streamlit_inference.py b/ultralytics/solutions/streamlit_inference.py
index ea85cffba3..dcae3add76 100644
--- a/ultralytics/solutions/streamlit_inference.py
+++ b/ultralytics/solutions/streamlit_inference.py
@@ -11,7 +11,7 @@ from ultralytics.utils.downloads import GITHUB_ASSETS_STEMS
def inference(model=None):
- """Runs real-time object detection on video input using Ultralytics YOLOv8 in a Streamlit application."""
+ """Performs real-time object detection on video input using YOLO in a Streamlit web application."""
check_requirements("streamlit>=1.29.0") # scope imports for faster ultralytics package load speeds
import streamlit as st
@@ -108,7 +108,7 @@ def inference(model=None):
st.warning("Failed to read frame from webcam. Please make sure the webcam is connected properly.")
break
- prev_time = time.time()
+ prev_time = time.time() # Store initial time for FPS calculation
# Store model predictions
if enable_trk == "Yes":
@@ -120,7 +120,6 @@ def inference(model=None):
# Calculate model FPS
curr_time = time.time()
fps = 1 / (curr_time - prev_time)
- prev_time = curr_time
# display frame
org_frame.image(frame, channels="BGR")
diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py
index 0143b933d8..0dbc728e23 100644
--- a/ultralytics/utils/torch_utils.py
+++ b/ultralytics/utils/torch_utils.py
@@ -163,7 +163,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
Note:
Sets the 'CUDA_VISIBLE_DEVICES' environment variable for specifying which GPUs to use.
"""
- if isinstance(device, torch.device):
+ if isinstance(device, torch.device) or str(device).startswith("tpu"):
return device
s = f"Ultralytics {__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} "