@@ -119,10 +118,8 @@ See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examp
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 |
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 |
-- **mAP
** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
-
Reproduce by `yolo val detect data=coco.yaml device=0`
-- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
-
** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `yolo val detect data=coco.yaml device=0`
+- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val detect data=coco.yaml batch=1 device=0|cpu`
@@ -138,10 +135,8 @@ See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examp
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-oiv7.pt) | 640 | 34.9 | 596.9 | 2.43 | 44.1 | 167.4 |
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-oiv7.pt) | 640 | 36.3 | 860.6 | 3.56 | 68.7 | 260.6 |
-- **mAP
** values are for single-model single-scale on [Open Image V7](https://docs.ultralytics.com/datasets/detect/open-images-v7/) dataset.
-
Reproduce by `yolo val detect data=open-images-v7.yaml device=0`
-- **Speed** averaged over Open Image V7 val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
-
Reproduce by `yolo val detect data=open-images-v7.yaml batch=1 device=0|cpu`
+- **mAP
** values are for single-model single-scale on [Open Image V7](https://docs.ultralytics.com/datasets/detect/open-images-v7/) dataset.
Reproduce by `yolo val detect data=open-images-v7.yaml device=0`
+- **Speed** averaged over Open Image V7 val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val detect data=open-images-v7.yaml batch=1 device=0|cpu`
@@ -157,10 +152,8 @@ See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage e
| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 |
| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 |
-- **mAP
** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
-
Reproduce by `yolo val segment data=coco-seg.yaml device=0`
-- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
-
** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `yolo val segment data=coco-seg.yaml device=0`
+- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu`
@@ -177,11 +170,8 @@ See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples wit
| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 |
| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 |
-- **mAP
** values are for single-model single-scale on [COCO Keypoints val2017](http://cocodataset.org)
- dataset.
-
Reproduce by `yolo val pose data=coco-pose.yaml device=0`
-- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
-
** values are for single-model single-scale on [COCO Keypoints val2017](http://cocodataset.org) dataset.
Reproduce by `yolo val pose data=coco-pose.yaml device=0`
+- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
@@ -197,10 +187,8 @@ See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usag
| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 |
| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 |
-- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set.
-
Reproduce by `yolo val classify data=path/to/ImageNet device=0`
-- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
-
Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
+- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set.
Reproduce by `yolo val classify data=path/to/ImageNet device=0`
+- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance.
Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
diff --git a/README.zh-CN.md b/README.zh-CN.md
index cc581c9874..7a3bf9cb92 100644
--- a/README.zh-CN.md
+++ b/README.zh-CN.md
@@ -4,8 +4,7 @@
-[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)
-
+[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)
@@ -119,10 +118,8 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 |
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 |
-- **mAP
val** 值是基于单模型单尺度在 [COCO val2017](http://cocodataset.org) 数据集上的结果。
-
通过 `yolo val detect data=coco.yaml device=0` 复现
-- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。
-
通过 `yolo val detect data=coco.yaml batch=1 device=0|cpu` 复现
+- **mAP
val** 值是基于单模型单尺度在 [COCO val2017](http://cocodataset.org) 数据集上的结果。
通过 `yolo val detect data=coco.yaml device=0` 复现
+- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。
通过 `yolo val detect data=coco.yaml batch=1 device=0|cpu` 复现
@@ -138,10 +135,8 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-oiv7.pt) | 640 | 34.9 | 596.9 | 2.43 | 44.1 | 167.4 |
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-oiv7.pt) | 640 | 36.3 | 860.6 | 3.56 | 68.7 | 260.6 |
-- **mAP
验证** 值适用于在[Open Image V7](https://docs.ultralytics.com/datasets/detect/open-images-v7/)数据集上的单模型单尺度。
-
通过 `yolo val detect data=open-images-v7.yaml device=0` 以复现。
-- **速度** 在使用[Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/)实例对Open Image V7验证图像进行平均测算。
-
通过 `yolo val detect data=open-images-v7.yaml batch=1 device=0|cpu` 以复现。
+- **mAP
验证** 值适用于在[Open Image V7](https://docs.ultralytics.com/datasets/detect/open-images-v7/)数据集上的单模型单尺度。
通过 `yolo val detect data=open-images-v7.yaml device=0` 以复现。
+- **速度** 在使用[Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/)实例对Open Image V7验证图像进行平均测算。
通过 `yolo val detect data=open-images-v7.yaml batch=1 device=0|cpu` 以复现。
@@ -157,10 +152,8 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式
| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 |
| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 |
-- **mAP
val** 值是基于单模型单尺度在 [COCO val2017](http://cocodataset.org) 数据集上的结果。
-
通过 `yolo val segment data=coco-seg.yaml device=0` 复现
-- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。
-
通过 `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu` 复现
+- **mAP
val** 值是基于单模型单尺度在 [COCO val2017](http://cocodataset.org) 数据集上的结果。
通过 `yolo val segment data=coco-seg.yaml device=0` 复现
+- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。
通过 `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu` 复现
@@ -177,10 +170,8 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式
| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 |
| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 |
-- **mAP
val** 值是基于单模型单尺度在 [COCO Keypoints val2017](http://cocodataset.org) 数据集上的结果。
-
通过 `yolo val pose data=coco-pose.yaml device=0` 复现
-- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。
-
通过 `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu` 复现
+- **mAP
val** 值是基于单模型单尺度在 [COCO Keypoints val2017](http://cocodataset.org) 数据集上的结果。
通过 `yolo val pose data=coco-pose.yaml device=0` 复现
+- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 COCO val 图像进行平均计算的。
通过 `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu` 复现
@@ -196,10 +187,8 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式
| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 |
| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 |
-- **acc** 值是模型在 [ImageNet](https://www.image-net.org/) 数据集验证集上的准确率。
-
通过 `yolo val classify data=path/to/ImageNet device=0` 复现
-- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 ImageNet val 图像进行平均计算的。
-
通过 `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` 复现
+- **acc** 值是模型在 [ImageNet](https://www.image-net.org/) 数据集验证集上的准确率。
通过 `yolo val classify data=path/to/ImageNet device=0` 复现
+- **速度** 是使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例对 ImageNet val 图像进行平均计算的。
通过 `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` 复现
diff --git a/docs/README.md b/docs/README.md
index bcf7e0f0c1..a5da59e1db 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -4,7 +4,7 @@ Ultralytics Docs are deployed to [https://docs.ultralytics.com](https://docs.ult
[![pages-build-deployment](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment) [![Check Broken links](https://github.com/ultralytics/docs/actions/workflows/links.yml/badge.svg)](https://github.com/ultralytics/docs/actions/workflows/links.yml)
-### Install Ultralytics package
+## Install Ultralytics package
[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics)
@@ -32,7 +32,7 @@ This will install the ultralytics package and its dependencies in developer mode
Note that you may need to use the pip3 command instead of pip if you have multiple versions of Python installed on your system.
-### Building and Serving Locally
+## Building and Serving Locally
The `mkdocs serve` command is used to build and serve a local version of the MkDocs documentation site. It is typically used during the development and testing phase of a documentation project.
@@ -52,7 +52,7 @@ While the site is being served, you can make changes to the documentation files
To stop the serve command and terminate the local server, you can use the `CTRL+C` keyboard shortcut.
-### Building and Serving Multi-Language
+## Building and Serving Multi-Language
For multi-language MkDocs sites use the following additional steps:
@@ -81,7 +81,7 @@ For multi-language MkDocs sites use the following additional steps:
Note the above steps are combined into the Ultralytics [build_docs.py](https://github.com/ultralytics/ultralytics/blob/main/docs/build_docs.py) script.
-### Deploying Your Documentation Site
+## Deploying Your Documentation Site
To deploy your MkDocs documentation site, you will need to choose a hosting provider and a deployment method. Some popular options include GitHub Pages, GitLab Pages, and Amazon S3.
diff --git a/docs/en/guides/heatmaps.md b/docs/en/guides/heatmaps.md
index 2d65c6b5d6..360f72efec 100644
--- a/docs/en/guides/heatmaps.md
+++ b/docs/en/guides/heatmaps.md
@@ -35,12 +35,11 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
from ultralytics.solutions import heatmap
import cv2
- model = YOLO("yolov8s.pt")
+ model = YOLO("yolov8s.pt") # YOLOv8 custom/pretrained model
cap = cv2.VideoCapture("path/to/video/file.mp4")
- if not cap.isOpened():
- print("Error reading video file")
- exit(0)
+ assert cap.isOpened(), "Error reading video file"
+ # Heatmap Init
heatmap_obj = heatmap.Heatmap()
heatmap_obj.set_args(colormap=cv2.COLORMAP_CIVIDIS,
imw=cap.get(4), # should same as im0 width
@@ -52,7 +51,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
if not success:
exit(0)
results = model.track(im0, persist=True)
- frame = heatmap_obj.generate_heatmap(im0, tracks=results)
+ im0 = heatmap_obj.generate_heatmap(im0, tracks=results)
```
@@ -62,14 +61,13 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
from ultralytics.solutions import heatmap
import cv2
- model = YOLO("yolov8s.pt")
+ model = YOLO("yolov8s.pt") # YOLOv8 custom/pretrained model
cap = cv2.VideoCapture("path/to/video/file.mp4")
- if not cap.isOpened():
- print("Error reading video file")
- exit(0)
+ assert cap.isOpened(), "Error reading video file"
classes_for_heatmap = [0, 2]
+ # Heatmap init
heatmap_obj = heatmap.Heatmap()
heatmap_obj.set_args(colormap=cv2.COLORMAP_CIVIDIS,
imw=cap.get(4), # should same as im0 width
@@ -80,29 +78,28 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
success, im0 = cap.read()
if not success:
exit(0)
- results = model.track(im0, persist=True,
- classes=classes_for_heatmap)
- frame = heatmap_obj.generate_heatmap(im0, tracks=results)
+ results = model.track(im0, persist=True, classes=classes_for_heatmap)
+ im0 = heatmap_obj.generate_heatmap(im0, tracks=results)
```
=== "Heatmap with Save Output"
```python
from ultralytics import YOLO
- import heatmap
+ from ultralytics.solutions import heatmap
import cv2
- model = YOLO("yolov8n.pt")
+ model = YOLO("yolov8s.pt") # YOLOv8 custom/pretrained model
cap = cv2.VideoCapture("path/to/video/file.mp4")
- if not cap.isOpened():
- print("Error reading video file")
- exit(0)
+ assert cap.isOpened(), "Error reading video file"
+ # Video writer
video_writer = cv2.VideoWriter("heatmap_output.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
+ # Heatmap init
heatmap_obj = heatmap.Heatmap()
heatmap_obj.set_args(colormap=cv2.COLORMAP_CIVIDIS,
imw=cap.get(4), # should same as im0 width
@@ -113,22 +110,55 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
success, im0 = cap.read()
if not success:
exit(0)
- results = model.track(im0, persist=True)
- frame = heatmap_obj.generate_heatmap(im0, tracks=results)
+ results = model.track(im0, persist=True, classes=classes_for_heatmap)
+ im0 = heatmap_obj.generate_heatmap(im0, tracks=results)
video_writer.write(im0)
- video_writer.release()
+ ```
+
+ === "Heatmap with Object Counting"
+ ```python
+ from ultralytics import YOLO
+ from ultralytics.solutions import heatmap
+ import cv2
+
+ model = YOLO("yolov8s.pt") # YOLOv8 custom/pretrained model
+
+ cap = cv2.VideoCapture("path/to/video/file.mp4") # Video file Path, webcam 0
+ assert cap.isOpened(), "Error reading video file"
+
+ # Region for object counting
+ count_reg_pts = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
+
+ # Heatmap Init
+ heatmap_obj = heatmap.Heatmap()
+ heatmap_obj.set_args(colormap=cv2.COLORMAP_JET,
+ imw=cap.get(4), # should same as im0 width
+ imh=cap.get(3), # should same as im0 height
+ view_img=True,
+ count_reg_pts=count_reg_pts)
+
+ while cap.isOpened():
+ success, im0 = cap.read()
+ if not success:
+ exit(0)
+ results = model.track(im0, persist=True)
+ im0 = heatmap_obj.generate_heatmap(im0, tracks=results)
```
### Arguments `set_args`
-| Name | Type | Default | Description |
-|---------------|----------------|---------|--------------------------------|
-| view_img | `bool` | `False` | Display the frame with heatmap |
-| colormap | `cv2.COLORMAP` | `None` | cv2.COLORMAP for heatmap |
-| imw | `int` | `None` | Width of Heatmap |
-| imh | `int` | `None` | Height of Heatmap |
-| heatmap_alpha | `float` | `0.5` | Heatmap alpha value |
+| Name | Type | Default | Description |
+|---------------------|----------------|-----------------|---------------------------------|
+| view_img | `bool` | `False` | Display the frame with heatmap |
+| colormap | `cv2.COLORMAP` | `None` | cv2.COLORMAP for heatmap |
+| imw | `int` | `None` | Width of Heatmap |
+| imh | `int` | `None` | Height of Heatmap |
+| heatmap_alpha | `float` | `0.5` | Heatmap alpha value |
+| count_reg_pts | `list` | `None` | Object counting region points |
+| count_txt_thickness | `int` | `2` | Count values text size |
+| count_reg_color | `tuple` | `(255, 0, 255)` | Counting region color |
+| region_thickness | `int` | `5` | Counting region thickness value |
### Arguments `model.track`
@@ -140,3 +170,32 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
| `conf` | `float` | `0.3` | Confidence Threshold |
| `iou` | `float` | `0.5` | IOU Threshold |
| `classes` | `list` | `None` | filter results by class, i.e. classes=0, or classes=[0,2,3] |
+
+### Heatmap COLORMAPs
+
+| Colormap Name | Description |
+|---------------------------------|----------------------------------------|
+| `cv::COLORMAP_AUTUMN` | Autumn color map |
+| `cv::COLORMAP_BONE` | Bone color map |
+| `cv::COLORMAP_JET` | Jet color map |
+| `cv::COLORMAP_WINTER` | Winter color map |
+| `cv::COLORMAP_RAINBOW` | Rainbow color map |
+| `cv::COLORMAP_OCEAN` | Ocean color map |
+| `cv::COLORMAP_SUMMER` | Summer color map |
+| `cv::COLORMAP_SPRING` | Spring color map |
+| `cv::COLORMAP_COOL` | Cool color map |
+| `cv::COLORMAP_HSV` | HSV (Hue, Saturation, Value) color map |
+| `cv::COLORMAP_PINK` | Pink color map |
+| `cv::COLORMAP_HOT` | Hot color map |
+| `cv::COLORMAP_PARULA` | Parula color map |
+| `cv::COLORMAP_MAGMA` | Magma color map |
+| `cv::COLORMAP_INFERNO` | Inferno color map |
+| `cv::COLORMAP_PLASMA` | Plasma color map |
+| `cv::COLORMAP_VIRIDIS` | Viridis color map |
+| `cv::COLORMAP_CIVIDIS` | Cividis color map |
+| `cv::COLORMAP_TWILIGHT` | Twilight color map |
+| `cv::COLORMAP_TWILIGHT_SHIFTED` | Shifted Twilight color map |
+| `cv::COLORMAP_TURBO` | Turbo color map |
+| `cv::COLORMAP_DEEPGREEN` | Deep Green color map |
+
+These colormaps are commonly used for visualizing data with different color representations.
diff --git a/docs/en/guides/object-counting.md b/docs/en/guides/object-counting.md
index abc3607f12..c340701453 100644
--- a/docs/en/guides/object-counting.md
+++ b/docs/en/guides/object-counting.md
@@ -23,7 +23,6 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
| ![Conveyor Belt Packets Counting Using Ultralytics YOLOv8](https://github.com/RizwanMunawar/ultralytics/assets/62513924/70e2d106-510c-4c6c-a57a-d34a765aa757) | ![Fish Counting in Sea using Ultralytics YOLOv8](https://github.com/RizwanMunawar/ultralytics/assets/62513924/c60d047b-3837-435f-8d29-bb9fc95d2191) |
| Conveyor Belt Packets Counting Using Ultralytics YOLOv8 | Fish Counting in Sea using Ultralytics YOLOv8 |
-
!!! Example "Object Counting Example"
=== "Object Counting"
@@ -34,9 +33,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
- if not cap.isOpened():
- print("Error reading video file")
- exit(0)
+ assert cap.isOpened(), "Error reading video file"
counter = object_counter.ObjectCounter() # Init Object Counter
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
@@ -61,9 +58,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
- if not cap.isOpened():
- print("Error reading video file")
- exit(0)
+ assert cap.isOpened(), "Error reading video file"
classes_to_count = [0, 2]
counter = object_counter.ObjectCounter() # Init Object Counter
@@ -91,9 +86,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
- if not cap.isOpened():
- print("Error reading video file")
- exit(0)
+ assert cap.isOpened(), "Error reading video file"
video_writer = cv2.VideoWriter("object_counting.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
@@ -134,7 +127,6 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
| track_thickness | `int` | `2` | Tracking line thickness |
| draw_tracks | `bool` | `False` | Draw Tracks lines |
-
### Arguments `model.track`
| Name | Type | Default | Description |
diff --git a/docs/en/guides/workouts-monitoring.md b/docs/en/guides/workouts-monitoring.md
index bd2eb49810..71e0124de6 100644
--- a/docs/en/guides/workouts-monitoring.md
+++ b/docs/en/guides/workouts-monitoring.md
@@ -23,7 +23,6 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
| ![PushUps Counting](https://github.com/RizwanMunawar/ultralytics/assets/62513924/cf016a41-589f-420f-8a8c-2cc8174a16de) | ![PullUps Counting](https://github.com/RizwanMunawar/ultralytics/assets/62513924/cb20f316-fac2-4330-8445-dcf5ffebe329) |
| PushUps Counting | PullUps Counting |
-
!!! Example "Workouts Monitoring Example"
=== "Workouts Monitoring"
@@ -34,9 +33,7 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
model = YOLO("yolov8n-pose.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
- if not cap.isOpened():
- print("Error reading video file")
- exit(0)
+ assert cap.isOpened(), "Error reading video file"
gym_object = ai_gym.AIGym() # init AI GYM module
gym_object.set_args(line_thickness=2,
@@ -62,9 +59,7 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
model = YOLO("yolov8n-pose.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
- if not cap.isOpened():
- print("Error reading video file")
- exit(0)
+ assert cap.isOpened(), "Error reading video file"
video_writer = cv2.VideoWriter("workouts.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
diff --git a/docs/en/yolov5/tutorials/clearml_logging_integration.md b/docs/en/yolov5/tutorials/clearml_logging_integration.md
index c40d16d571..dda96f8f36 100644
--- a/docs/en/yolov5/tutorials/clearml_logging_integration.md
+++ b/docs/en/yolov5/tutorials/clearml_logging_integration.md
@@ -90,7 +90,6 @@ This will capture:
- Mosaic per epoch
- Validation images per epoch
-
That's a lot right? 🤯 Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them!
There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works!
diff --git a/examples/YOLOv8-ONNXRuntime-Rust/README.md b/examples/YOLOv8-ONNXRuntime-Rust/README.md
index 6876c15e91..8961d9ce7e 100644
--- a/examples/YOLOv8-ONNXRuntime-Rust/README.md
+++ b/examples/YOLOv8-ONNXRuntime-Rust/README.md
@@ -155,8 +155,7 @@ cargo run --release -- --help
### Classification
-Running dynamic shape ONNX model on `CPU` with image size `--height 224 --width 224`.
-Saving plotted image in `runs` directory.
+Running dynamic shape ONNX model on `CPU` with image size `--height 224 --width 224`. Saving plotted image in `runs` directory.
```
cargo run --release -- --model ../assets/weights/yolov8m-cls-dyn.onnx --source ../assets/images/dog.jpg --height 224 --width 224 --plot --profile
diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py
index e1e4eed847..ed780b1edb 100644
--- a/ultralytics/__init__.py
+++ b/ultralytics/__init__.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-__version__ = '8.0.223'
+__version__ = '8.0.224'
from ultralytics.models import RTDETR, SAM, YOLO
from ultralytics.models.fastsam import FastSAM
diff --git a/ultralytics/cfg/models/README.md b/ultralytics/cfg/models/README.md
index 4749441d63..c022fb57a6 100644
--- a/ultralytics/cfg/models/README.md
+++ b/ultralytics/cfg/models/README.md
@@ -14,8 +14,7 @@ Model `*.yaml` files may be used directly in the Command Line Interface (CLI) wi
yolo task=detect mode=train model=yolov8n.yaml data=coco128.yaml epochs=100
```
-They may also be used directly in a Python environment, and accepts the same
-[arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above:
+They may also be used directly in a Python environment, and accepts the same [arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above:
```python
from ultralytics import YOLO
diff --git a/ultralytics/solutions/heatmap.py b/ultralytics/solutions/heatmap.py
index 5f5172dcf3..80e8899f5d 100644
--- a/ultralytics/solutions/heatmap.py
+++ b/ultralytics/solutions/heatmap.py
@@ -1,14 +1,24 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
+from collections import defaultdict
+
import cv2
import numpy as np
+from ultralytics.utils.checks import check_requirements
+from ultralytics.utils.plotting import Annotator
+
+check_requirements('shapely>=2.0.0')
+
+from shapely.geometry import Polygon
+from shapely.geometry.point import Point
+
class Heatmap:
"""A class to draw heatmaps in real-time video stream based on their tracks."""
def __init__(self):
- """Initializes the heatmap class with default values for Visual, Image, track and heatmap parameters."""
+ """Initializes the heatmap class with default values for Visual, Image, track, count and heatmap parameters."""
# Visual Information
self.annotator = None
@@ -28,8 +38,28 @@ class Heatmap:
self.boxes = None
self.track_ids = None
self.clss = None
-
- def set_args(self, imw, imh, colormap=cv2.COLORMAP_JET, heatmap_alpha=0.5, view_img=False):
+ self.track_history = None
+
+ # Counting Info
+ self.count_reg_pts = None
+ self.count_region = None
+ self.in_counts = 0
+ self.out_counts = 0
+ self.count_list = []
+ self.count_txt_thickness = 0
+ self.count_reg_color = (0, 255, 0)
+ self.region_thickness = 5
+
+ def set_args(self,
+ imw,
+ imh,
+ colormap=cv2.COLORMAP_JET,
+ heatmap_alpha=0.5,
+ view_img=False,
+ count_reg_pts=None,
+ count_txt_thickness=2,
+ count_reg_color=(255, 0, 255),
+ region_thickness=5):
"""
Configures the heatmap colormap, width, height and display parameters.
@@ -39,6 +69,10 @@ class Heatmap:
imh (int): The height of the frame.
heatmap_alpha (float): alpha value for heatmap display
view_img (bool): Flag indicating frame display
+ count_reg_pts (list): Object counting region points
+ count_txt_thickness (int): Text thickness for object counting display
+ count_reg_color (RGB color): Color of object counting region
+ region_thickness (int): Object counting Region thickness
"""
self.imw = imw
self.imh = imh
@@ -46,8 +80,16 @@ class Heatmap:
self.heatmap_alpha = heatmap_alpha
self.view_img = view_img
- # Heatmap new frame
- self.heatmap = np.zeros((int(self.imw), int(self.imh)), dtype=np.float32)
+ self.heatmap = np.zeros((int(self.imw), int(self.imh)), dtype=np.float32) # Heatmap new frame
+
+ if count_reg_pts is not None:
+ self.track_history = defaultdict(list)
+ self.count_reg_pts = count_reg_pts
+ self.count_region = Polygon(self.count_reg_pts)
+
+ self.count_txt_thickness = count_txt_thickness # Counting text thickness
+ self.count_reg_color = count_reg_color
+ self.region_thickness = region_thickness
def extract_results(self, tracks):
"""
@@ -56,8 +98,6 @@ class Heatmap:
Args:
tracks (list): List of tracks obtained from the object tracking process.
"""
- if tracks[0].boxes.id is None:
- return
self.boxes = tracks[0].boxes.xyxy.cpu()
self.clss = tracks[0].boxes.cls.cpu().tolist()
self.track_ids = tracks[0].boxes.id.int().cpu().tolist()
@@ -70,15 +110,49 @@ class Heatmap:
im0 (nd array): Image
tracks (list): List of tracks obtained from the object tracking process.
"""
- self.extract_results(tracks)
self.im0 = im0
+ if tracks[0].boxes.id is None:
+ return self.im0
- for box, cls in zip(self.boxes, self.clss):
- self.heatmap[int(box[1]):int(box[3]), int(box[0]):int(box[2])] += 1
+ self.extract_results(tracks)
+ self.annotator = Annotator(self.im0, self.count_txt_thickness, None)
+
+ if self.count_reg_pts is not None:
+ # Draw counting region
+ self.annotator.draw_region(reg_pts=self.count_reg_pts,
+ color=self.count_reg_color,
+ thickness=self.region_thickness)
+
+ for box, cls, track_id in zip(self.boxes, self.clss, self.track_ids):
+ self.heatmap[int(box[1]):int(box[3]), int(box[0]):int(box[2])] += 1
+
+ # Store tracking hist
+ track_line = self.track_history[track_id]
+ track_line.append((float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2)))
+ if len(track_line) > 30:
+ track_line.pop(0)
+
+ # Count objects
+ if self.count_region.contains(Point(track_line[-1])):
+ if track_id not in self.count_list:
+ self.count_list.append(track_id)
+ if box[0] < self.count_region.centroid.x:
+ self.out_counts += 1
+ else:
+ self.in_counts += 1
+ else:
+ for box, cls in zip(self.boxes, self.clss):
+ self.heatmap[int(box[1]):int(box[3]), int(box[0]):int(box[2])] += 1
# Normalize, apply colormap to heatmap and combine with original image
heatmap_normalized = cv2.normalize(self.heatmap, None, 0, 255, cv2.NORM_MINMAX)
heatmap_colored = cv2.applyColorMap(heatmap_normalized.astype(np.uint8), self.colormap)
+
+ if self.count_reg_pts is not None:
+ incount_label = 'InCount : ' + f'{self.in_counts}'
+ outcount_label = 'OutCount : ' + f'{self.out_counts}'
+ self.annotator.count_labels(in_count=incount_label, out_count=outcount_label)
+
im0_with_heatmap = cv2.addWeighted(self.im0, 1 - self.heatmap_alpha, heatmap_colored, self.heatmap_alpha, 0)
if self.view_img:
@@ -94,6 +168,7 @@ class Heatmap:
im0_with_heatmap (nd array): Original Image with heatmap
"""
cv2.imshow('Ultralytics Heatmap', im0_with_heatmap)
+
if cv2.waitKey(1) & 0xFF == ord('q'):
return
diff --git a/ultralytics/solutions/object_counter.py b/ultralytics/solutions/object_counter.py
index acb60354c7..d6409d09c5 100644
--- a/ultralytics/solutions/object_counter.py
+++ b/ultralytics/solutions/object_counter.py
@@ -119,7 +119,8 @@ class ObjectCounter:
# Draw Tracks
track_line = self.track_history[track_id]
track_line.append((float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2)))
- track_line.pop(0) if len(track_line) > 30 else None
+ if len(track_line) > 30:
+ track_line.pop(0)
if self.draw_tracks:
self.annotator.draw_centroid_and_tracks(track_line,
diff --git a/ultralytics/utils/plotting.py b/ultralytics/utils/plotting.py
index aebc4003d3..ff6607cf69 100644
--- a/ultralytics/utils/plotting.py
+++ b/ultralytics/utils/plotting.py
@@ -259,9 +259,9 @@ class Annotator:
return np.asarray(self.im)
# Object Counting Annotator
- def draw_region(self, reg_pts=None, color=(0, 255, 0)):
+ def draw_region(self, reg_pts=None, color=(0, 255, 0), thickness=5):
# Draw region line
- cv2.polylines(self.im, [np.array(reg_pts, dtype=np.int32)], isClosed=True, color=color, thickness=self.tf + 2)
+ cv2.polylines(self.im, [np.array(reg_pts, dtype=np.int32)], isClosed=True, color=color, thickness=thickness)
def draw_centroid_and_tracks(self, track, color=(255, 0, 255), track_thickness=2):
# Draw region line