diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 700d9efd5..f70d35208 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -111,22 +111,22 @@ jobs:
- name: Benchmark DetectionModel
shell: python
run: |
- from ultralytics.yolo.utils.benchmarks import benchmark
+ from ultralytics.utils.benchmarks import benchmark
benchmark(model='${{ matrix.model }}.pt', imgsz=160, half=False, hard_fail=0.26)
- name: Benchmark SegmentationModel
shell: python
run: |
- from ultralytics.yolo.utils.benchmarks import benchmark
+ from ultralytics.utils.benchmarks import benchmark
benchmark(model='${{ matrix.model }}-seg.pt', imgsz=160, half=False, hard_fail=0.30)
- name: Benchmark ClassificationModel
shell: python
run: |
- from ultralytics.yolo.utils.benchmarks import benchmark
+ from ultralytics.utils.benchmarks import benchmark
benchmark(model='${{ matrix.model }}-cls.pt', imgsz=160, half=False, hard_fail=0.36)
- name: Benchmark PoseModel
shell: python
run: |
- from ultralytics.yolo.utils.benchmarks import benchmark
+ from ultralytics.utils.benchmarks import benchmark
benchmark(model='${{ matrix.model }}-pose.pt', imgsz=160, half=False, hard_fail=0.17)
- name: Benchmark Summary
run: |
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index 72e221687..911f4bd0d 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -40,7 +40,7 @@ jobs:
import os
import pkg_resources as pkg
import ultralytics
- from ultralytics.yolo.utils.checks import check_latest_pypi_version
+ from ultralytics.utils.checks import check_latest_pypi_version
v_local = pkg.parse_version(ultralytics.__version__).release
v_pypi = pkg.parse_version(check_latest_pypi_version()).release
diff --git a/README.md b/README.md
index bf7947f2a..ca91b0518 100644
--- a/README.md
+++ b/README.md
@@ -100,7 +100,7 @@ results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
path = model.export(format="onnx") # export the model to ONNX format
```
-[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases). See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python) for more examples.
+[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases). See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python) for more examples.
@@ -110,7 +110,7 @@ YOLOv8 [Detect](https://docs.ultralytics.com/tasks/detect), [Segment](https://do
-All [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
+All [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
Detection
diff --git a/README.zh-CN.md b/README.zh-CN.md
index 872624907..c112ad3c8 100644
--- a/README.zh-CN.md
+++ b/README.zh-CN.md
@@ -100,7 +100,7 @@ results = model("https://ultralytics.com/images/bus.jpg") # 对图像进行预
success = model.export(format="onnx") # 将模型导出为 ONNX 格式
```
-[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) 会自动从最新的 Ultralytics [发布版本](https://github.com/ultralytics/assets/releases)中下载。查看 YOLOv8 [Python 文档](https://docs.ultralytics.com/usage/python)以获取更多示例。
+[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) 会自动从最新的 Ultralytics [发布版本](https://github.com/ultralytics/assets/releases)中下载。查看 YOLOv8 [Python 文档](https://docs.ultralytics.com/usage/python)以获取更多示例。
@@ -110,7 +110,7 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式
-所有[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models)在首次使用时会自动从最新的Ultralytics [发布版本](https://github.com/ultralytics/assets/releases)下载。
+所有[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models)在首次使用时会自动从最新的Ultralytics [发布版本](https://github.com/ultralytics/assets/releases)下载。
检测
diff --git a/docs/build_reference.py b/docs/build_reference.py
index 65dcc6c0d..f94531035 100644
--- a/docs/build_reference.py
+++ b/docs/build_reference.py
@@ -10,7 +10,7 @@ import os
import re
from collections import defaultdict
from pathlib import Path
-from ultralytics.yolo.utils import ROOT
+from ultralytics.utils import ROOT
NEW_YAML_DIR = ROOT.parent
CODE_DIR = ROOT
@@ -39,7 +39,7 @@ def create_markdown(py_filepath, module_path, classes, functions):
with open(md_filepath, 'r') as file:
existing_content = file.read()
header_parts = existing_content.split('---', 2)
- if len(header_parts) >= 3:
+ if 'description:' in header_parts or 'comments:' in header_parts and len(header_parts) >= 3:
header_content = f"{header_parts[0]}---{header_parts[1]}---\n\n"
module_path = module_path.replace('.__init__', '')
diff --git a/docs/datasets/detect/argoverse.md b/docs/datasets/detect/argoverse.md
index 8d0acbc04..139c9203e 100644
--- a/docs/datasets/detect/argoverse.md
+++ b/docs/datasets/detect/argoverse.md
@@ -29,12 +29,12 @@ The Argoverse dataset is widely used for training and evaluating deep learning m
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the Argoverse dataset, the `Argoverse.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/Argoverse.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/Argoverse.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the Argoverse dataset, the `Argoverse.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/Argoverse.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/Argoverse.yaml).
-!!! example "ultralytics/datasets/Argoverse.yaml"
+!!! example "ultralytics/cfg/datasets/Argoverse.yaml"
```yaml
- --8<-- "ultralytics/datasets/Argoverse.yaml"
+ --8<-- "ultralytics/cfg/datasets/Argoverse.yaml"
```
## Usage
diff --git a/docs/datasets/detect/coco.md b/docs/datasets/detect/coco.md
index 196977d4e..b296b7fbf 100644
--- a/docs/datasets/detect/coco.md
+++ b/docs/datasets/detect/coco.md
@@ -29,12 +29,12 @@ The COCO dataset is widely used for training and evaluating deep learning models
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO dataset, the `coco.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO dataset, the `coco.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml).
-!!! example "ultralytics/datasets/coco.yaml"
+!!! example "ultralytics/cfg/datasets/coco.yaml"
```yaml
- --8<-- "ultralytics/datasets/coco.yaml"
+ --8<-- "ultralytics/cfg/datasets/coco.yaml"
```
## Usage
diff --git a/docs/datasets/detect/coco8.md b/docs/datasets/detect/coco8.md
index 5fb28234e..8cbb75a99 100644
--- a/docs/datasets/detect/coco8.md
+++ b/docs/datasets/detect/coco8.md
@@ -19,12 +19,12 @@ and [YOLOv8](https://github.com/ultralytics/ultralytics).
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO8 dataset, the `coco8.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco8.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco8.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO8 dataset, the `coco8.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8.yaml).
-!!! example "ultralytics/datasets/coco8.yaml"
+!!! example "ultralytics/cfg/datasets/coco8.yaml"
```yaml
- --8<-- "ultralytics/datasets/coco8.yaml"
+ --8<-- "ultralytics/cfg/datasets/coco8.yaml"
```
## Usage
diff --git a/docs/datasets/detect/globalwheat2020.md b/docs/datasets/detect/globalwheat2020.md
index 66ad45c92..9b9997403 100644
--- a/docs/datasets/detect/globalwheat2020.md
+++ b/docs/datasets/detect/globalwheat2020.md
@@ -28,12 +28,12 @@ The Global Wheat Head Dataset is widely used for training and evaluating deep le
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the Global Wheat Head Dataset, the `GlobalWheat2020.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/GlobalWheat2020.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/GlobalWheat2020.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the Global Wheat Head Dataset, the `GlobalWheat2020.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/GlobalWheat2020.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/GlobalWheat2020.yaml).
-!!! example "ultralytics/datasets/GlobalWheat2020.yaml"
+!!! example "ultralytics/cfg/datasets/GlobalWheat2020.yaml"
```yaml
- --8<-- "ultralytics/datasets/GlobalWheat2020.yaml"
+ --8<-- "ultralytics/cfg/datasets/GlobalWheat2020.yaml"
```
## Usage
diff --git a/docs/datasets/detect/index.md b/docs/datasets/detect/index.md
index 6a9cd614d..cf29cba5b 100644
--- a/docs/datasets/detect/index.md
+++ b/docs/datasets/detect/index.md
@@ -93,7 +93,7 @@ If you have your own dataset and would like to use it for training detection mod
You can easily convert labels from the popular COCO dataset format to the YOLO format using the following code snippet:
```python
-from ultralytics.yolo.data.converter import convert_coco
+from ultralytics.data.converter import convert_coco
convert_coco(labels_dir='../coco/annotations/')
```
diff --git a/docs/datasets/detect/objects365.md b/docs/datasets/detect/objects365.md
index 25a109962..85a65f18e 100644
--- a/docs/datasets/detect/objects365.md
+++ b/docs/datasets/detect/objects365.md
@@ -28,12 +28,12 @@ The Objects365 dataset is widely used for training and evaluating deep learning
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the Objects365 Dataset, the `Objects365.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/Objects365.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/Objects365.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the Objects365 Dataset, the `Objects365.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/Objects365.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/Objects365.yaml).
-!!! example "ultralytics/datasets/Objects365.yaml"
+!!! example "ultralytics/cfg/datasets/Objects365.yaml"
```yaml
- --8<-- "ultralytics/datasets/Objects365.yaml"
+ --8<-- "ultralytics/cfg/datasets/Objects365.yaml"
```
## Usage
diff --git a/docs/datasets/detect/sku-110k.md b/docs/datasets/detect/sku-110k.md
index 1d366b3c2..419dc724d 100644
--- a/docs/datasets/detect/sku-110k.md
+++ b/docs/datasets/detect/sku-110k.md
@@ -30,12 +30,12 @@ The SKU-110k dataset is widely used for training and evaluating deep learning mo
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the SKU-110K dataset, the `SKU-110K.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/SKU-110K.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/SKU-110K.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. For the case of the SKU-110K dataset, the `SKU-110K.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/SKU-110K.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/SKU-110K.yaml).
-!!! example "ultralytics/datasets/SKU-110K.yaml"
+!!! example "ultralytics/cfg/datasets/SKU-110K.yaml"
```yaml
- --8<-- "ultralytics/datasets/SKU-110K.yaml"
+ --8<-- "ultralytics/cfg/datasets/SKU-110K.yaml"
```
## Usage
diff --git a/docs/datasets/detect/visdrone.md b/docs/datasets/detect/visdrone.md
index cd7a309d4..15b5995fc 100644
--- a/docs/datasets/detect/visdrone.md
+++ b/docs/datasets/detect/visdrone.md
@@ -26,12 +26,12 @@ The VisDrone dataset is widely used for training and evaluating deep learning mo
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the Visdrone dataset, the `VisDrone.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/VisDrone.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/VisDrone.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the Visdrone dataset, the `VisDrone.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/VisDrone.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/VisDrone.yaml).
-!!! example "ultralytics/datasets/VisDrone.yaml"
+!!! example "ultralytics/cfg/datasets/VisDrone.yaml"
```yaml
- --8<-- "ultralytics/datasets/VisDrone.yaml"
+ --8<-- "ultralytics/cfg/datasets/VisDrone.yaml"
```
## Usage
diff --git a/docs/datasets/detect/voc.md b/docs/datasets/detect/voc.md
index 4e5c54641..c9f599391 100644
--- a/docs/datasets/detect/voc.md
+++ b/docs/datasets/detect/voc.md
@@ -29,12 +29,12 @@ The VOC dataset is widely used for training and evaluating deep learning models
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the VOC dataset, the `VOC.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/VOC.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/VOC.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the VOC dataset, the `VOC.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/VOC.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/VOC.yaml).
-!!! example "ultralytics/datasets/VOC.yaml"
+!!! example "ultralytics/cfg/datasets/VOC.yaml"
```yaml
- --8<-- "ultralytics/datasets/VOC.yaml"
+ --8<-- "ultralytics/cfg/datasets/VOC.yaml"
```
## Usage
diff --git a/docs/datasets/detect/xview.md b/docs/datasets/detect/xview.md
index 92bac2f04..db956b9f8 100644
--- a/docs/datasets/detect/xview.md
+++ b/docs/datasets/detect/xview.md
@@ -32,12 +32,12 @@ The xView dataset is widely used for training and evaluating deep learning model
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the xView dataset, the `xView.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/xView.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/xView.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the xView dataset, the `xView.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/xView.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/xView.yaml).
-!!! example "ultralytics/datasets/xView.yaml"
+!!! example "ultralytics/cfg/datasets/xView.yaml"
```yaml
- --8<-- "ultralytics/datasets/xView.yaml"
+ --8<-- "ultralytics/cfg/datasets/xView.yaml"
```
## Usage
diff --git a/docs/datasets/pose/coco.md b/docs/datasets/pose/coco.md
index 217f2b08e..72d6eb6b7 100644
--- a/docs/datasets/pose/coco.md
+++ b/docs/datasets/pose/coco.md
@@ -30,12 +30,12 @@ The COCO-Pose dataset is specifically used for training and evaluating deep lear
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO-Pose dataset, the `coco-pose.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco-pose.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco-pose.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO-Pose dataset, the `coco-pose.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco-pose.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco-pose.yaml).
-!!! example "ultralytics/datasets/coco-pose.yaml"
+!!! example "ultralytics/cfg/datasets/coco-pose.yaml"
```yaml
- --8<-- "ultralytics/datasets/coco-pose.yaml"
+ --8<-- "ultralytics/cfg/datasets/coco-pose.yaml"
```
## Usage
diff --git a/docs/datasets/pose/coco8-pose.md b/docs/datasets/pose/coco8-pose.md
index a7a115b62..e13cd04ef 100644
--- a/docs/datasets/pose/coco8-pose.md
+++ b/docs/datasets/pose/coco8-pose.md
@@ -19,12 +19,12 @@ and [YOLOv8](https://github.com/ultralytics/ultralytics).
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO8-Pose dataset, the `coco8-pose.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco8-pose.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco8-pose.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO8-Pose dataset, the `coco8-pose.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-pose.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-pose.yaml).
-!!! example "ultralytics/datasets/coco8-pose.yaml"
+!!! example "ultralytics/cfg/datasets/coco8-pose.yaml"
```yaml
- --8<-- "ultralytics/datasets/coco8-pose.yaml"
+ --8<-- "ultralytics/cfg/datasets/coco8-pose.yaml"
```
## Usage
diff --git a/docs/datasets/pose/index.md b/docs/datasets/pose/index.md
index 27d57d8a0..b18012b09 100644
--- a/docs/datasets/pose/index.md
+++ b/docs/datasets/pose/index.md
@@ -120,7 +120,7 @@ If you have your own dataset and would like to use it for training pose estimati
Ultralytics provides a convenient conversion tool to convert labels from the popular COCO dataset format to YOLO format:
```python
-from ultralytics.yolo.data.converter import convert_coco
+from ultralytics.data.converter import convert_coco
convert_coco(labels_dir='../coco/annotations/', use_keypoints=True)
```
diff --git a/docs/datasets/segment/coco.md b/docs/datasets/segment/coco.md
index bfb72923a..c84a818ae 100644
--- a/docs/datasets/segment/coco.md
+++ b/docs/datasets/segment/coco.md
@@ -29,12 +29,12 @@ COCO-Seg is widely used for training and evaluating deep learning models in inst
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO-Seg dataset, the `coco.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO-Seg dataset, the `coco.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml).
-!!! example "ultralytics/datasets/coco.yaml"
+!!! example "ultralytics/cfg/datasets/coco.yaml"
```yaml
- --8<-- "ultralytics/datasets/coco.yaml"
+ --8<-- "ultralytics/cfg/datasets/coco.yaml"
```
## Usage
diff --git a/docs/datasets/segment/coco8-seg.md b/docs/datasets/segment/coco8-seg.md
index 8b9721c4f..b35c4d5b8 100644
--- a/docs/datasets/segment/coco8-seg.md
+++ b/docs/datasets/segment/coco8-seg.md
@@ -19,12 +19,12 @@ and [YOLOv8](https://github.com/ultralytics/ultralytics).
## Dataset YAML
-A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO8-Seg dataset, the `coco8-seg.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco8-seg.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco8-seg.yaml).
+A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It contains information about the dataset's paths, classes, and other relevant information. In the case of the COCO8-Seg dataset, the `coco8-seg.yaml` file is maintained at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-seg.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco8-seg.yaml).
-!!! example "ultralytics/datasets/coco8-seg.yaml"
+!!! example "ultralytics/cfg/datasets/coco8-seg.yaml"
```yaml
- --8<-- "ultralytics/datasets/coco8-seg.yaml"
+ --8<-- "ultralytics/cfg/datasets/coco8-seg.yaml"
```
## Usage
diff --git a/docs/datasets/segment/index.md b/docs/datasets/segment/index.md
index 5bafefe6d..af2dc96b1 100644
--- a/docs/datasets/segment/index.md
+++ b/docs/datasets/segment/index.md
@@ -104,7 +104,7 @@ If you have your own dataset and would like to use it for training segmentation
You can easily convert labels from the popular COCO dataset format to the YOLO format using the following code snippet:
```python
-from ultralytics.yolo.data.converter import convert_coco
+from ultralytics.data.converter import convert_coco
convert_coco(labels_dir='../coco/annotations/', use_segments=True)
```
@@ -122,7 +122,7 @@ Auto-annotation is an essential feature that allows you to generate a segmentati
To auto-annotate your dataset using the Ultralytics framework, you can use the `auto_annotate` function as shown below:
```python
-from ultralytics.yolo.data.annotator import auto_annotate
+from ultralytics.data.annotator import auto_annotate
auto_annotate(data="path/to/images", det_model="yolov8x.pt", sam_model='sam_b.pt')
```
diff --git a/docs/hub/datasets.md b/docs/hub/datasets.md
index c1bdc38ef..77a49462b 100644
--- a/docs/hub/datasets.md
+++ b/docs/hub/datasets.md
@@ -34,7 +34,7 @@ The dataset YAML is the same standard YOLOv5 and YOLOv8 YAML format.
!!! example "coco8.yaml"
```yaml
- --8<-- "ultralytics/datasets/coco8.yaml"
+ --8<-- "ultralytics/cfg/datasets/coco8.yaml"
```
After zipping your dataset, you should validate it before uploading it to Ultralytics HUB. Ultralytics HUB conducts the dataset validation check post-upload, so by ensuring your dataset is correctly formatted and error-free ahead of time, you can forestall any setbacks due to dataset rejection.
diff --git a/docs/models/fast-sam.md b/docs/models/fast-sam.md
index eefb58093..c360a29ac 100644
--- a/docs/models/fast-sam.md
+++ b/docs/models/fast-sam.md
@@ -42,7 +42,7 @@ To perform object detection on an image, use the `predict` method as shown below
```python
from ultralytics import FastSAM
-from ultralytics.yolo.fastsam import FastSAMPrompt
+from ultralytics.models.fastsam import FastSAMPrompt
# Define image path and inference device
IMAGE_PATH = 'ultralytics/assets/bus.jpg'
diff --git a/docs/models/mobile-sam.md b/docs/models/mobile-sam.md
index 94bc83db4..0a7083bb2 100644
--- a/docs/models/mobile-sam.md
+++ b/docs/models/mobile-sam.md
@@ -6,9 +6,9 @@ keywords: MobileSAM, Faster Segment Anything, Segment Anything, Segment Anything
![MobileSAM Logo](https://github.com/ChaoningZhang/MobileSAM/blob/master/assets/logo2.png?raw=true)
-# Faster Segment Anything (MobileSAM)
+# Mobile Segment Anything (MobileSAM)
-The MobileSAM paper is now available on [ResearchGate](https://www.researchgate.net/publication/371851844_Faster_Segment_Anything_Towards_Lightweight_SAM_for_Mobile_Applications) and [arXiv](https://arxiv.org/pdf/2306.14289.pdf). The most recent version will initially appear on ResearchGate due to the delayed content update on arXiv.
+The MobileSAM paper is now available on [arXiv](https://arxiv.org/pdf/2306.14289.pdf).
A demonstration of MobileSAM running on a CPU can be accessed at this [demo link](https://huggingface.co/spaces/dhkim2810/MobileSAM). The performance on a Mac i5 CPU takes approximately 3 seconds. On the Hugging Face demo, the interface and lower-performance CPUs contribute to a slower response, but it continues to function effectively.
diff --git a/docs/models/sam.md b/docs/models/sam.md
index e9f9ac035..ae334ce61 100644
--- a/docs/models/sam.md
+++ b/docs/models/sam.md
@@ -88,7 +88,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
=== "Prompt inference"
```python
- from ultralytics.vit.sam import Predictor as SAMPredictor
+ from ultralytics.models.sam import Predictor as SAMPredictor
# Create SAMPredictor
overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024, model="mobile_sam.pt")
@@ -108,7 +108,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
=== "Segment everything"
```python
- from ultralytics.vit.sam import Predictor as SAMPredictor
+ from ultralytics.models.sam import Predictor as SAMPredictor
# Create SAMPredictor
overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024, model="mobile_sam.pt")
@@ -119,7 +119,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
```
-- More additional args for `Segment everything` see [`Predictor/generate` Reference](../reference/vit/sam/predict.md).
+- More additional args for `Segment everything` see [`Predictor/generate` Reference](../reference/models/sam/predict.md).
## Available Models and Supported Tasks
@@ -184,7 +184,7 @@ Auto-annotation is a key feature of SAM, allowing users to generate a [segmentat
To auto-annotate your dataset with the Ultralytics framework, use the `auto_annotate` function as shown below:
```python
-from ultralytics.yolo.data.annotator import auto_annotate
+from ultralytics.data.annotator import auto_annotate
auto_annotate(data="path/to/images", det_model="yolov8x.pt", sam_model='sam_b.pt')
```
diff --git a/docs/modes/benchmark.md b/docs/modes/benchmark.md
index 0c5b35fae..9ce6221a4 100644
--- a/docs/modes/benchmark.md
+++ b/docs/modes/benchmark.md
@@ -27,7 +27,7 @@ full list of export arguments.
=== "Python"
```python
- from ultralytics.yolo.utils.benchmarks import benchmark
+ from ultralytics.utils.benchmarks import benchmark
# Benchmark on GPU
benchmark(model='yolov8n.pt', imgsz=640, half=False, device=0)
diff --git a/docs/modes/predict.md b/docs/modes/predict.md
index 8708933e6..7a6bf82e2 100644
--- a/docs/modes/predict.md
+++ b/docs/modes/predict.md
@@ -316,7 +316,7 @@ All supported arguments:
## Image and Video Formats
-YOLOv8 supports various image and video formats, as specified in [yolo/data/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/data/utils.py). See the tables below for the valid suffixes and example predict commands.
+YOLOv8 supports various image and video formats, as specified in [data/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/utils.py). See the tables below for the valid suffixes and example predict commands.
### Image Suffixes
@@ -451,7 +451,7 @@ operations are cached, meaning they're only calculated once per object, and thos
keypoints.data # raw probs tensor, (num_class, )
```
-Class reference documentation for `Results` module and its components can be found [here](../reference/yolo/engine/results.md)
+Class reference documentation for `Results` module and its components can be found [here](../reference/engine/results.md)
## Plotting results
diff --git a/docs/modes/track.md b/docs/modes/track.md
index 7b9a83a71..e0266b970 100644
--- a/docs/modes/track.md
+++ b/docs/modes/track.md
@@ -79,7 +79,7 @@ to [predict page](https://docs.ultralytics.com/modes/predict/).
### Tracker
We also support using a modified tracker config file, just copy a config file i.e `custom_tracker.yaml`
-from [ultralytics/tracker/cfg](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/tracker/cfg) and modify
+from [ultralytics/cfg/trackers](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/trackers) and modify
any configurations(expect the `tracker_type`) you need to.
!!! example ""
@@ -97,5 +97,5 @@ any configurations(expect the `tracker_type`) you need to.
yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" tracker='custom_tracker.yaml'
```
-Please refer to [ultralytics/tracker/cfg](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/tracker/cfg)
+Please refer to [ultralytics/cfg/trackers](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/trackers)
page
\ No newline at end of file
diff --git a/docs/reference/cfg/__init__.md b/docs/reference/cfg/__init__.md
new file mode 100644
index 000000000..a30308c94
--- /dev/null
+++ b/docs/reference/cfg/__init__.md
@@ -0,0 +1,44 @@
+## cfg2dict
+---
+### ::: ultralytics.cfg.cfg2dict
+
+
+## get_cfg
+---
+### ::: ultralytics.cfg.get_cfg
+
+
+## _handle_deprecation
+---
+### ::: ultralytics.cfg._handle_deprecation
+
+
+## check_cfg_mismatch
+---
+### ::: ultralytics.cfg.check_cfg_mismatch
+
+
+## merge_equals_args
+---
+### ::: ultralytics.cfg.merge_equals_args
+
+
+## handle_yolo_hub
+---
+### ::: ultralytics.cfg.handle_yolo_hub
+
+
+## handle_yolo_settings
+---
+### ::: ultralytics.cfg.handle_yolo_settings
+
+
+## entrypoint
+---
+### ::: ultralytics.cfg.entrypoint
+
+
+## copy_default_cfg
+---
+### ::: ultralytics.cfg.copy_default_cfg
+
diff --git a/docs/reference/data/annotator.md b/docs/reference/data/annotator.md
new file mode 100644
index 000000000..6a5807412
--- /dev/null
+++ b/docs/reference/data/annotator.md
@@ -0,0 +1,4 @@
+## auto_annotate
+---
+### ::: ultralytics.data.annotator.auto_annotate
+
diff --git a/docs/reference/data/augment.md b/docs/reference/data/augment.md
new file mode 100644
index 000000000..a3dbfea8d
--- /dev/null
+++ b/docs/reference/data/augment.md
@@ -0,0 +1,94 @@
+## BaseTransform
+---
+### ::: ultralytics.data.augment.BaseTransform
+
+
+## Compose
+---
+### ::: ultralytics.data.augment.Compose
+
+
+## BaseMixTransform
+---
+### ::: ultralytics.data.augment.BaseMixTransform
+
+
+## Mosaic
+---
+### ::: ultralytics.data.augment.Mosaic
+
+
+## MixUp
+---
+### ::: ultralytics.data.augment.MixUp
+
+
+## RandomPerspective
+---
+### ::: ultralytics.data.augment.RandomPerspective
+
+
+## RandomHSV
+---
+### ::: ultralytics.data.augment.RandomHSV
+
+
+## RandomFlip
+---
+### ::: ultralytics.data.augment.RandomFlip
+
+
+## LetterBox
+---
+### ::: ultralytics.data.augment.LetterBox
+
+
+## CopyPaste
+---
+### ::: ultralytics.data.augment.CopyPaste
+
+
+## Albumentations
+---
+### ::: ultralytics.data.augment.Albumentations
+
+
+## Format
+---
+### ::: ultralytics.data.augment.Format
+
+
+## ClassifyLetterBox
+---
+### ::: ultralytics.data.augment.ClassifyLetterBox
+
+
+## CenterCrop
+---
+### ::: ultralytics.data.augment.CenterCrop
+
+
+## ToTensor
+---
+### ::: ultralytics.data.augment.ToTensor
+
+
+## v8_transforms
+---
+### ::: ultralytics.data.augment.v8_transforms
+
+
+## classify_transforms
+---
+### ::: ultralytics.data.augment.classify_transforms
+
+
+## hsv2colorjitter
+---
+### ::: ultralytics.data.augment.hsv2colorjitter
+
+
+## classify_albumentations
+---
+### ::: ultralytics.data.augment.classify_albumentations
+
diff --git a/docs/reference/data/base.md b/docs/reference/data/base.md
new file mode 100644
index 000000000..65aadce82
--- /dev/null
+++ b/docs/reference/data/base.md
@@ -0,0 +1,4 @@
+## BaseDataset
+---
+### ::: ultralytics.data.base.BaseDataset
+
diff --git a/docs/reference/data/build.md b/docs/reference/data/build.md
new file mode 100644
index 000000000..fa7e271cd
--- /dev/null
+++ b/docs/reference/data/build.md
@@ -0,0 +1,34 @@
+## InfiniteDataLoader
+---
+### ::: ultralytics.data.build.InfiniteDataLoader
+
+
+## _RepeatSampler
+---
+### ::: ultralytics.data.build._RepeatSampler
+
+
+## seed_worker
+---
+### ::: ultralytics.data.build.seed_worker
+
+
+## build_yolo_dataset
+---
+### ::: ultralytics.data.build.build_yolo_dataset
+
+
+## build_dataloader
+---
+### ::: ultralytics.data.build.build_dataloader
+
+
+## check_source
+---
+### ::: ultralytics.data.build.check_source
+
+
+## load_inference_source
+---
+### ::: ultralytics.data.build.load_inference_source
+
diff --git a/docs/reference/data/converter.md b/docs/reference/data/converter.md
new file mode 100644
index 000000000..c38e07884
--- /dev/null
+++ b/docs/reference/data/converter.md
@@ -0,0 +1,29 @@
+## coco91_to_coco80_class
+---
+### ::: ultralytics.data.converter.coco91_to_coco80_class
+
+
+## convert_coco
+---
+### ::: ultralytics.data.converter.convert_coco
+
+
+## rle2polygon
+---
+### ::: ultralytics.data.converter.rle2polygon
+
+
+## min_index
+---
+### ::: ultralytics.data.converter.min_index
+
+
+## merge_multi_segment
+---
+### ::: ultralytics.data.converter.merge_multi_segment
+
+
+## delete_dsstore
+---
+### ::: ultralytics.data.converter.delete_dsstore
+
diff --git a/docs/reference/data/dataset.md b/docs/reference/data/dataset.md
new file mode 100644
index 000000000..6d69959fe
--- /dev/null
+++ b/docs/reference/data/dataset.md
@@ -0,0 +1,14 @@
+## YOLODataset
+---
+### ::: ultralytics.data.dataset.YOLODataset
+
+
+## ClassificationDataset
+---
+### ::: ultralytics.data.dataset.ClassificationDataset
+
+
+## SemanticDataset
+---
+### ::: ultralytics.data.dataset.SemanticDataset
+
diff --git a/docs/reference/data/loaders.md b/docs/reference/data/loaders.md
new file mode 100644
index 000000000..3c98d8f44
--- /dev/null
+++ b/docs/reference/data/loaders.md
@@ -0,0 +1,39 @@
+## SourceTypes
+---
+### ::: ultralytics.data.loaders.SourceTypes
+
+
+## LoadStreams
+---
+### ::: ultralytics.data.loaders.LoadStreams
+
+
+## LoadScreenshots
+---
+### ::: ultralytics.data.loaders.LoadScreenshots
+
+
+## LoadImages
+---
+### ::: ultralytics.data.loaders.LoadImages
+
+
+## LoadPilAndNumpy
+---
+### ::: ultralytics.data.loaders.LoadPilAndNumpy
+
+
+## LoadTensor
+---
+### ::: ultralytics.data.loaders.LoadTensor
+
+
+## autocast_list
+---
+### ::: ultralytics.data.loaders.autocast_list
+
+
+## get_best_youtube_url
+---
+### ::: ultralytics.data.loaders.get_best_youtube_url
+
diff --git a/docs/reference/data/utils.md b/docs/reference/data/utils.md
new file mode 100644
index 000000000..6d471cce9
--- /dev/null
+++ b/docs/reference/data/utils.md
@@ -0,0 +1,69 @@
+## HUBDatasetStats
+---
+### ::: ultralytics.data.utils.HUBDatasetStats
+
+
+## img2label_paths
+---
+### ::: ultralytics.data.utils.img2label_paths
+
+
+## get_hash
+---
+### ::: ultralytics.data.utils.get_hash
+
+
+## exif_size
+---
+### ::: ultralytics.data.utils.exif_size
+
+
+## verify_image_label
+---
+### ::: ultralytics.data.utils.verify_image_label
+
+
+## polygon2mask
+---
+### ::: ultralytics.data.utils.polygon2mask
+
+
+## polygons2masks
+---
+### ::: ultralytics.data.utils.polygons2masks
+
+
+## polygons2masks_overlap
+---
+### ::: ultralytics.data.utils.polygons2masks_overlap
+
+
+## check_det_dataset
+---
+### ::: ultralytics.data.utils.check_det_dataset
+
+
+## check_cls_dataset
+---
+### ::: ultralytics.data.utils.check_cls_dataset
+
+
+## compress_one_image
+---
+### ::: ultralytics.data.utils.compress_one_image
+
+
+## delete_dsstore
+---
+### ::: ultralytics.data.utils.delete_dsstore
+
+
+## zip_directory
+---
+### ::: ultralytics.data.utils.zip_directory
+
+
+## autosplit
+---
+### ::: ultralytics.data.utils.autosplit
+
diff --git a/docs/reference/engine/exporter.md b/docs/reference/engine/exporter.md
new file mode 100644
index 000000000..662f74a67
--- /dev/null
+++ b/docs/reference/engine/exporter.md
@@ -0,0 +1,29 @@
+## Exporter
+---
+### ::: ultralytics.engine.exporter.Exporter
+
+
+## iOSDetectModel
+---
+### ::: ultralytics.engine.exporter.iOSDetectModel
+
+
+## export_formats
+---
+### ::: ultralytics.engine.exporter.export_formats
+
+
+## gd_outputs
+---
+### ::: ultralytics.engine.exporter.gd_outputs
+
+
+## try_export
+---
+### ::: ultralytics.engine.exporter.try_export
+
+
+## export
+---
+### ::: ultralytics.engine.exporter.export
+
diff --git a/docs/reference/engine/model.md b/docs/reference/engine/model.md
new file mode 100644
index 000000000..43433252e
--- /dev/null
+++ b/docs/reference/engine/model.md
@@ -0,0 +1,4 @@
+## YOLO
+---
+### ::: ultralytics.engine.model.YOLO
+
diff --git a/docs/reference/engine/predictor.md b/docs/reference/engine/predictor.md
new file mode 100644
index 000000000..8c3d3c34d
--- /dev/null
+++ b/docs/reference/engine/predictor.md
@@ -0,0 +1,4 @@
+## BasePredictor
+---
+### ::: ultralytics.engine.predictor.BasePredictor
+
diff --git a/docs/reference/engine/results.md b/docs/reference/engine/results.md
new file mode 100644
index 000000000..ec3a09fae
--- /dev/null
+++ b/docs/reference/engine/results.md
@@ -0,0 +1,29 @@
+## BaseTensor
+---
+### ::: ultralytics.engine.results.BaseTensor
+
+
+## Results
+---
+### ::: ultralytics.engine.results.Results
+
+
+## Boxes
+---
+### ::: ultralytics.engine.results.Boxes
+
+
+## Masks
+---
+### ::: ultralytics.engine.results.Masks
+
+
+## Keypoints
+---
+### ::: ultralytics.engine.results.Keypoints
+
+
+## Probs
+---
+### ::: ultralytics.engine.results.Probs
+
diff --git a/docs/reference/engine/trainer.md b/docs/reference/engine/trainer.md
new file mode 100644
index 000000000..e215a445d
--- /dev/null
+++ b/docs/reference/engine/trainer.md
@@ -0,0 +1,4 @@
+## BaseTrainer
+---
+### ::: ultralytics.engine.trainer.BaseTrainer
+
diff --git a/docs/reference/engine/validator.md b/docs/reference/engine/validator.md
new file mode 100644
index 000000000..192711462
--- /dev/null
+++ b/docs/reference/engine/validator.md
@@ -0,0 +1,4 @@
+## BaseValidator
+---
+### ::: ultralytics.engine.validator.BaseValidator
+
diff --git a/docs/reference/hub/__init__.md b/docs/reference/hub/__init__.md
index 767ec1485..93c9befb4 100644
--- a/docs/reference/hub/__init__.md
+++ b/docs/reference/hub/__init__.md
@@ -1,8 +1,3 @@
----
-description: Access Ultralytics HUB, manage API keys, train models, and export in various formats with ease using the HUB API.
-keywords: Ultralytics, YOLO, Docs HUB, API, login, logout, reset model, export model, check dataset, HUBDatasetStats, YOLO training, YOLO model
----
-
## login
---
### ::: ultralytics.hub.login
diff --git a/docs/reference/hub/auth.md b/docs/reference/hub/auth.md
index 2098e8eb5..d293b4cc8 100644
--- a/docs/reference/hub/auth.md
+++ b/docs/reference/hub/auth.md
@@ -1,8 +1,3 @@
----
-description: Learn how to use Ultralytics hub authentication in your projects with examples and guidelines from the Auth page on Ultralytics Docs.
-keywords: Ultralytics, ultralytics hub, api keys, authentication, collab accounts, requests, hub management, monitoring
----
-
## Auth
---
### ::: ultralytics.hub.auth.Auth
diff --git a/docs/reference/hub/session.md b/docs/reference/hub/session.md
index 8fe82c4f2..6863c5ab1 100644
--- a/docs/reference/hub/session.md
+++ b/docs/reference/hub/session.md
@@ -1,8 +1,3 @@
----
-description: Accelerate your AI development with the Ultralytics HUB Training Session. High-performance training of object detection models.
-keywords: YOLOv5, object detection, HUBTrainingSession, custom models, Ultralytics Docs
----
-
## HUBTrainingSession
---
### ::: ultralytics.hub.session.HUBTrainingSession
diff --git a/docs/reference/hub/utils.md b/docs/reference/hub/utils.md
index 9b896b048..2d78273a9 100644
--- a/docs/reference/hub/utils.md
+++ b/docs/reference/hub/utils.md
@@ -1,8 +1,3 @@
----
-description: Explore Ultralytics events, including 'request_with_credentials' and 'smart_request', to improve your project's performance and efficiency.
-keywords: Ultralytics, Hub Utils, API Documentation, Python, requests_with_progress, Events, classes, usage, examples
----
-
## Events
---
### ::: ultralytics.hub.utils.Events
diff --git a/docs/reference/models/fastsam/model.md b/docs/reference/models/fastsam/model.md
new file mode 100644
index 000000000..0dd097c8a
--- /dev/null
+++ b/docs/reference/models/fastsam/model.md
@@ -0,0 +1,4 @@
+## FastSAM
+---
+### ::: ultralytics.models.fastsam.model.FastSAM
+
diff --git a/docs/reference/models/fastsam/predict.md b/docs/reference/models/fastsam/predict.md
new file mode 100644
index 000000000..59080e7c0
--- /dev/null
+++ b/docs/reference/models/fastsam/predict.md
@@ -0,0 +1,4 @@
+## FastSAMPredictor
+---
+### ::: ultralytics.models.fastsam.predict.FastSAMPredictor
+
diff --git a/docs/reference/models/fastsam/prompt.md b/docs/reference/models/fastsam/prompt.md
new file mode 100644
index 000000000..a8e4df0eb
--- /dev/null
+++ b/docs/reference/models/fastsam/prompt.md
@@ -0,0 +1,4 @@
+## FastSAMPrompt
+---
+### ::: ultralytics.models.fastsam.prompt.FastSAMPrompt
+
diff --git a/docs/reference/models/fastsam/utils.md b/docs/reference/models/fastsam/utils.md
new file mode 100644
index 000000000..0b6659a3c
--- /dev/null
+++ b/docs/reference/models/fastsam/utils.md
@@ -0,0 +1,9 @@
+## adjust_bboxes_to_image_border
+---
+### ::: ultralytics.models.fastsam.utils.adjust_bboxes_to_image_border
+
+
+## bbox_iou
+---
+### ::: ultralytics.models.fastsam.utils.bbox_iou
+
diff --git a/docs/reference/models/fastsam/val.md b/docs/reference/models/fastsam/val.md
new file mode 100644
index 000000000..b72c06a56
--- /dev/null
+++ b/docs/reference/models/fastsam/val.md
@@ -0,0 +1,4 @@
+## FastSAMValidator
+---
+### ::: ultralytics.models.fastsam.val.FastSAMValidator
+
diff --git a/docs/reference/models/nas/model.md b/docs/reference/models/nas/model.md
new file mode 100644
index 000000000..48912a357
--- /dev/null
+++ b/docs/reference/models/nas/model.md
@@ -0,0 +1,4 @@
+## NAS
+---
+### ::: ultralytics.models.nas.model.NAS
+
diff --git a/docs/reference/models/nas/predict.md b/docs/reference/models/nas/predict.md
new file mode 100644
index 000000000..e54fb3ad8
--- /dev/null
+++ b/docs/reference/models/nas/predict.md
@@ -0,0 +1,4 @@
+## NASPredictor
+---
+### ::: ultralytics.models.nas.predict.NASPredictor
+
diff --git a/docs/reference/models/nas/val.md b/docs/reference/models/nas/val.md
new file mode 100644
index 000000000..150ee0ad4
--- /dev/null
+++ b/docs/reference/models/nas/val.md
@@ -0,0 +1,4 @@
+## NASValidator
+---
+### ::: ultralytics.models.nas.val.NASValidator
+
diff --git a/docs/reference/models/rtdetr/model.md b/docs/reference/models/rtdetr/model.md
new file mode 100644
index 000000000..3ad8c65d6
--- /dev/null
+++ b/docs/reference/models/rtdetr/model.md
@@ -0,0 +1,4 @@
+## RTDETR
+---
+### ::: ultralytics.models.rtdetr.model.RTDETR
+
diff --git a/docs/reference/models/rtdetr/predict.md b/docs/reference/models/rtdetr/predict.md
new file mode 100644
index 000000000..9fc8f5ad6
--- /dev/null
+++ b/docs/reference/models/rtdetr/predict.md
@@ -0,0 +1,4 @@
+## RTDETRPredictor
+---
+### ::: ultralytics.models.rtdetr.predict.RTDETRPredictor
+
diff --git a/docs/reference/models/rtdetr/train.md b/docs/reference/models/rtdetr/train.md
new file mode 100644
index 000000000..2c559c6d4
--- /dev/null
+++ b/docs/reference/models/rtdetr/train.md
@@ -0,0 +1,9 @@
+## RTDETRTrainer
+---
+### ::: ultralytics.models.rtdetr.train.RTDETRTrainer
+
+
+## train
+---
+### ::: ultralytics.models.rtdetr.train.train
+
diff --git a/docs/reference/models/rtdetr/val.md b/docs/reference/models/rtdetr/val.md
new file mode 100644
index 000000000..bffdf4cbb
--- /dev/null
+++ b/docs/reference/models/rtdetr/val.md
@@ -0,0 +1,9 @@
+## RTDETRDataset
+---
+### ::: ultralytics.models.rtdetr.val.RTDETRDataset
+
+
+## RTDETRValidator
+---
+### ::: ultralytics.models.rtdetr.val.RTDETRValidator
+
diff --git a/docs/reference/models/sam/amg.md b/docs/reference/models/sam/amg.md
new file mode 100644
index 000000000..8a4e56ffc
--- /dev/null
+++ b/docs/reference/models/sam/amg.md
@@ -0,0 +1,84 @@
+## MaskData
+---
+### ::: ultralytics.models.sam.amg.MaskData
+
+
+## is_box_near_crop_edge
+---
+### ::: ultralytics.models.sam.amg.is_box_near_crop_edge
+
+
+## box_xyxy_to_xywh
+---
+### ::: ultralytics.models.sam.amg.box_xyxy_to_xywh
+
+
+## batch_iterator
+---
+### ::: ultralytics.models.sam.amg.batch_iterator
+
+
+## mask_to_rle_pytorch
+---
+### ::: ultralytics.models.sam.amg.mask_to_rle_pytorch
+
+
+## rle_to_mask
+---
+### ::: ultralytics.models.sam.amg.rle_to_mask
+
+
+## area_from_rle
+---
+### ::: ultralytics.models.sam.amg.area_from_rle
+
+
+## calculate_stability_score
+---
+### ::: ultralytics.models.sam.amg.calculate_stability_score
+
+
+## build_point_grid
+---
+### ::: ultralytics.models.sam.amg.build_point_grid
+
+
+## build_all_layer_point_grids
+---
+### ::: ultralytics.models.sam.amg.build_all_layer_point_grids
+
+
+## generate_crop_boxes
+---
+### ::: ultralytics.models.sam.amg.generate_crop_boxes
+
+
+## uncrop_boxes_xyxy
+---
+### ::: ultralytics.models.sam.amg.uncrop_boxes_xyxy
+
+
+## uncrop_points
+---
+### ::: ultralytics.models.sam.amg.uncrop_points
+
+
+## uncrop_masks
+---
+### ::: ultralytics.models.sam.amg.uncrop_masks
+
+
+## remove_small_regions
+---
+### ::: ultralytics.models.sam.amg.remove_small_regions
+
+
+## coco_encode_rle
+---
+### ::: ultralytics.models.sam.amg.coco_encode_rle
+
+
+## batched_mask_to_box
+---
+### ::: ultralytics.models.sam.amg.batched_mask_to_box
+
diff --git a/docs/reference/models/sam/build.md b/docs/reference/models/sam/build.md
new file mode 100644
index 000000000..447ffd380
--- /dev/null
+++ b/docs/reference/models/sam/build.md
@@ -0,0 +1,29 @@
+## build_sam_vit_h
+---
+### ::: ultralytics.models.sam.build.build_sam_vit_h
+
+
+## build_sam_vit_l
+---
+### ::: ultralytics.models.sam.build.build_sam_vit_l
+
+
+## build_sam_vit_b
+---
+### ::: ultralytics.models.sam.build.build_sam_vit_b
+
+
+## build_mobile_sam
+---
+### ::: ultralytics.models.sam.build.build_mobile_sam
+
+
+## _build_sam
+---
+### ::: ultralytics.models.sam.build._build_sam
+
+
+## build_sam
+---
+### ::: ultralytics.models.sam.build.build_sam
+
diff --git a/docs/reference/models/sam/model.md b/docs/reference/models/sam/model.md
new file mode 100644
index 000000000..ed2220de9
--- /dev/null
+++ b/docs/reference/models/sam/model.md
@@ -0,0 +1,4 @@
+## SAM
+---
+### ::: ultralytics.models.sam.model.SAM
+
diff --git a/docs/reference/models/sam/modules/decoders.md b/docs/reference/models/sam/modules/decoders.md
new file mode 100644
index 000000000..3b37aaa21
--- /dev/null
+++ b/docs/reference/models/sam/modules/decoders.md
@@ -0,0 +1,9 @@
+## MaskDecoder
+---
+### ::: ultralytics.models.sam.modules.decoders.MaskDecoder
+
+
+## MLP
+---
+### ::: ultralytics.models.sam.modules.decoders.MLP
+
diff --git a/docs/reference/models/sam/modules/encoders.md b/docs/reference/models/sam/modules/encoders.md
new file mode 100644
index 000000000..82ade9958
--- /dev/null
+++ b/docs/reference/models/sam/modules/encoders.md
@@ -0,0 +1,49 @@
+## ImageEncoderViT
+---
+### ::: ultralytics.models.sam.modules.encoders.ImageEncoderViT
+
+
+## PromptEncoder
+---
+### ::: ultralytics.models.sam.modules.encoders.PromptEncoder
+
+
+## PositionEmbeddingRandom
+---
+### ::: ultralytics.models.sam.modules.encoders.PositionEmbeddingRandom
+
+
+## Block
+---
+### ::: ultralytics.models.sam.modules.encoders.Block
+
+
+## Attention
+---
+### ::: ultralytics.models.sam.modules.encoders.Attention
+
+
+## PatchEmbed
+---
+### ::: ultralytics.models.sam.modules.encoders.PatchEmbed
+
+
+## window_partition
+---
+### ::: ultralytics.models.sam.modules.encoders.window_partition
+
+
+## window_unpartition
+---
+### ::: ultralytics.models.sam.modules.encoders.window_unpartition
+
+
+## get_rel_pos
+---
+### ::: ultralytics.models.sam.modules.encoders.get_rel_pos
+
+
+## add_decomposed_rel_pos
+---
+### ::: ultralytics.models.sam.modules.encoders.add_decomposed_rel_pos
+
diff --git a/docs/reference/models/sam/modules/sam.md b/docs/reference/models/sam/modules/sam.md
new file mode 100644
index 000000000..9063f5e6b
--- /dev/null
+++ b/docs/reference/models/sam/modules/sam.md
@@ -0,0 +1,4 @@
+## Sam
+---
+### ::: ultralytics.models.sam.modules.sam.Sam
+
diff --git a/docs/reference/models/sam/modules/tiny_encoder.md b/docs/reference/models/sam/modules/tiny_encoder.md
new file mode 100644
index 000000000..813f69a84
--- /dev/null
+++ b/docs/reference/models/sam/modules/tiny_encoder.md
@@ -0,0 +1,54 @@
+## Conv2d_BN
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.Conv2d_BN
+
+
+## PatchEmbed
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.PatchEmbed
+
+
+## MBConv
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.MBConv
+
+
+## PatchMerging
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.PatchMerging
+
+
+## ConvLayer
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.ConvLayer
+
+
+## Mlp
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.Mlp
+
+
+## Attention
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.Attention
+
+
+## TinyViTBlock
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.TinyViTBlock
+
+
+## BasicLayer
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.BasicLayer
+
+
+## LayerNorm2d
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.LayerNorm2d
+
+
+## TinyViT
+---
+### ::: ultralytics.models.sam.modules.tiny_encoder.TinyViT
+
diff --git a/docs/reference/models/sam/modules/transformer.md b/docs/reference/models/sam/modules/transformer.md
new file mode 100644
index 000000000..70fd49764
--- /dev/null
+++ b/docs/reference/models/sam/modules/transformer.md
@@ -0,0 +1,14 @@
+## TwoWayTransformer
+---
+### ::: ultralytics.models.sam.modules.transformer.TwoWayTransformer
+
+
+## TwoWayAttentionBlock
+---
+### ::: ultralytics.models.sam.modules.transformer.TwoWayAttentionBlock
+
+
+## Attention
+---
+### ::: ultralytics.models.sam.modules.transformer.Attention
+
diff --git a/docs/reference/models/sam/predict.md b/docs/reference/models/sam/predict.md
new file mode 100644
index 000000000..038e1764e
--- /dev/null
+++ b/docs/reference/models/sam/predict.md
@@ -0,0 +1,4 @@
+## Predictor
+---
+### ::: ultralytics.models.sam.predict.Predictor
+
diff --git a/docs/reference/models/utils/loss.md b/docs/reference/models/utils/loss.md
new file mode 100644
index 000000000..5b4e016a0
--- /dev/null
+++ b/docs/reference/models/utils/loss.md
@@ -0,0 +1,9 @@
+## DETRLoss
+---
+### ::: ultralytics.models.utils.loss.DETRLoss
+
+
+## RTDETRDetectionLoss
+---
+### ::: ultralytics.models.utils.loss.RTDETRDetectionLoss
+
diff --git a/docs/reference/models/utils/ops.md b/docs/reference/models/utils/ops.md
new file mode 100644
index 000000000..ad639b2c6
--- /dev/null
+++ b/docs/reference/models/utils/ops.md
@@ -0,0 +1,14 @@
+## HungarianMatcher
+---
+### ::: ultralytics.models.utils.ops.HungarianMatcher
+
+
+## get_cdn_group
+---
+### ::: ultralytics.models.utils.ops.get_cdn_group
+
+
+## inverse_sigmoid
+---
+### ::: ultralytics.models.utils.ops.inverse_sigmoid
+
diff --git a/docs/reference/models/yolo/classify/predict.md b/docs/reference/models/yolo/classify/predict.md
new file mode 100644
index 000000000..ce1a5543f
--- /dev/null
+++ b/docs/reference/models/yolo/classify/predict.md
@@ -0,0 +1,9 @@
+## ClassificationPredictor
+---
+### ::: ultralytics.models.yolo.classify.predict.ClassificationPredictor
+
+
+## predict
+---
+### ::: ultralytics.models.yolo.classify.predict.predict
+
diff --git a/docs/reference/models/yolo/classify/train.md b/docs/reference/models/yolo/classify/train.md
new file mode 100644
index 000000000..0b505e680
--- /dev/null
+++ b/docs/reference/models/yolo/classify/train.md
@@ -0,0 +1,9 @@
+## ClassificationTrainer
+---
+### ::: ultralytics.models.yolo.classify.train.ClassificationTrainer
+
+
+## train
+---
+### ::: ultralytics.models.yolo.classify.train.train
+
diff --git a/docs/reference/models/yolo/classify/val.md b/docs/reference/models/yolo/classify/val.md
new file mode 100644
index 000000000..20038c5b0
--- /dev/null
+++ b/docs/reference/models/yolo/classify/val.md
@@ -0,0 +1,9 @@
+## ClassificationValidator
+---
+### ::: ultralytics.models.yolo.classify.val.ClassificationValidator
+
+
+## val
+---
+### ::: ultralytics.models.yolo.classify.val.val
+
diff --git a/docs/reference/models/yolo/detect/predict.md b/docs/reference/models/yolo/detect/predict.md
new file mode 100644
index 000000000..91b5438b0
--- /dev/null
+++ b/docs/reference/models/yolo/detect/predict.md
@@ -0,0 +1,9 @@
+## DetectionPredictor
+---
+### ::: ultralytics.models.yolo.detect.predict.DetectionPredictor
+
+
+## predict
+---
+### ::: ultralytics.models.yolo.detect.predict.predict
+
diff --git a/docs/reference/models/yolo/detect/train.md b/docs/reference/models/yolo/detect/train.md
new file mode 100644
index 000000000..a034a7a8f
--- /dev/null
+++ b/docs/reference/models/yolo/detect/train.md
@@ -0,0 +1,9 @@
+## DetectionTrainer
+---
+### ::: ultralytics.models.yolo.detect.train.DetectionTrainer
+
+
+## train
+---
+### ::: ultralytics.models.yolo.detect.train.train
+
diff --git a/docs/reference/models/yolo/detect/val.md b/docs/reference/models/yolo/detect/val.md
new file mode 100644
index 000000000..3ce049657
--- /dev/null
+++ b/docs/reference/models/yolo/detect/val.md
@@ -0,0 +1,9 @@
+## DetectionValidator
+---
+### ::: ultralytics.models.yolo.detect.val.DetectionValidator
+
+
+## val
+---
+### ::: ultralytics.models.yolo.detect.val.val
+
diff --git a/docs/reference/models/yolo/pose/predict.md b/docs/reference/models/yolo/pose/predict.md
new file mode 100644
index 000000000..589621e7a
--- /dev/null
+++ b/docs/reference/models/yolo/pose/predict.md
@@ -0,0 +1,9 @@
+## PosePredictor
+---
+### ::: ultralytics.models.yolo.pose.predict.PosePredictor
+
+
+## predict
+---
+### ::: ultralytics.models.yolo.pose.predict.predict
+
diff --git a/docs/reference/models/yolo/pose/train.md b/docs/reference/models/yolo/pose/train.md
new file mode 100644
index 000000000..f407029e1
--- /dev/null
+++ b/docs/reference/models/yolo/pose/train.md
@@ -0,0 +1,9 @@
+## PoseTrainer
+---
+### ::: ultralytics.models.yolo.pose.train.PoseTrainer
+
+
+## train
+---
+### ::: ultralytics.models.yolo.pose.train.train
+
diff --git a/docs/reference/models/yolo/pose/val.md b/docs/reference/models/yolo/pose/val.md
new file mode 100644
index 000000000..443cb84dc
--- /dev/null
+++ b/docs/reference/models/yolo/pose/val.md
@@ -0,0 +1,9 @@
+## PoseValidator
+---
+### ::: ultralytics.models.yolo.pose.val.PoseValidator
+
+
+## val
+---
+### ::: ultralytics.models.yolo.pose.val.val
+
diff --git a/docs/reference/models/yolo/segment/predict.md b/docs/reference/models/yolo/segment/predict.md
new file mode 100644
index 000000000..39b005fcb
--- /dev/null
+++ b/docs/reference/models/yolo/segment/predict.md
@@ -0,0 +1,9 @@
+## SegmentationPredictor
+---
+### ::: ultralytics.models.yolo.segment.predict.SegmentationPredictor
+
+
+## predict
+---
+### ::: ultralytics.models.yolo.segment.predict.predict
+
diff --git a/docs/reference/models/yolo/segment/train.md b/docs/reference/models/yolo/segment/train.md
new file mode 100644
index 000000000..36822ed44
--- /dev/null
+++ b/docs/reference/models/yolo/segment/train.md
@@ -0,0 +1,9 @@
+## SegmentationTrainer
+---
+### ::: ultralytics.models.yolo.segment.train.SegmentationTrainer
+
+
+## train
+---
+### ::: ultralytics.models.yolo.segment.train.train
+
diff --git a/docs/reference/models/yolo/segment/val.md b/docs/reference/models/yolo/segment/val.md
new file mode 100644
index 000000000..82afadd68
--- /dev/null
+++ b/docs/reference/models/yolo/segment/val.md
@@ -0,0 +1,9 @@
+## SegmentationValidator
+---
+### ::: ultralytics.models.yolo.segment.val.SegmentationValidator
+
+
+## val
+---
+### ::: ultralytics.models.yolo.segment.val.val
+
diff --git a/docs/reference/nn/autobackend.md b/docs/reference/nn/autobackend.md
index ccd10773e..6c4ffd1a7 100644
--- a/docs/reference/nn/autobackend.md
+++ b/docs/reference/nn/autobackend.md
@@ -1,8 +1,3 @@
----
-description: Ensure class names match filenames for easy imports. Use AutoBackend to automatically rename and refactor model files.
-keywords: AutoBackend, ultralytics, nn, autobackend, check class names, neural network
----
-
## AutoBackend
---
### ::: ultralytics.nn.autobackend.AutoBackend
diff --git a/docs/reference/nn/autoshape.md b/docs/reference/nn/autoshape.md
deleted file mode 100644
index b009e090a..000000000
--- a/docs/reference/nn/autoshape.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Detect 80+ object categories with bounding box coordinates and class probabilities using AutoShape in Ultralytics YOLO. Explore Detections now.
-keywords: Ultralytics, YOLO, docs, autoshape, detections, object detection, customized shapes, bounding boxes, computer vision
----
-
-## AutoShape
----
-### ::: ultralytics.nn.autoshape.AutoShape
-
-
-## Detections
----
-### ::: ultralytics.nn.autoshape.Detections
-
diff --git a/docs/reference/nn/modules/block.md b/docs/reference/nn/modules/block.md
index 3f1a54825..7e30b644c 100644
--- a/docs/reference/nn/modules/block.md
+++ b/docs/reference/nn/modules/block.md
@@ -1,8 +1,3 @@
----
-description: Explore ultralytics.nn.modules.block to build powerful YOLO object detection models. Master DFL, HGStem, SPP, CSP components and more.
-keywords: Ultralytics, NN Modules, Blocks, DFL, HGStem, SPP, C1, C2f, C3x, C3TR, GhostBottleneck, BottleneckCSP, Computer Vision
----
-
## DFL
---
### ::: ultralytics.nn.modules.block.DFL
diff --git a/docs/reference/nn/modules/conv.md b/docs/reference/nn/modules/conv.md
index 6a5df52c3..882354669 100644
--- a/docs/reference/nn/modules/conv.md
+++ b/docs/reference/nn/modules/conv.md
@@ -1,8 +1,3 @@
----
-description: Explore convolutional neural network modules & techniques such as LightConv, DWConv, ConvTranspose, GhostConv, CBAM & autopad with Ultralytics Docs.
-keywords: Ultralytics, Convolutional Neural Network, Conv2, DWConv, ConvTranspose, GhostConv, ChannelAttention, CBAM, autopad
----
-
## Conv
---
### ::: ultralytics.nn.modules.conv.Conv
diff --git a/docs/reference/nn/modules/head.md b/docs/reference/nn/modules/head.md
index 7a460b521..7f055e8b8 100644
--- a/docs/reference/nn/modules/head.md
+++ b/docs/reference/nn/modules/head.md
@@ -1,8 +1,3 @@
----
-description: 'Learn about Ultralytics YOLO modules: Segment, Classify, and RTDETRDecoder. Optimize object detection and classification in your project.'
-keywords: Ultralytics, YOLO, object detection, pose estimation, RTDETRDecoder, modules, classes, documentation
----
-
## Detect
---
### ::: ultralytics.nn.modules.head.Detect
diff --git a/docs/reference/nn/modules/transformer.md b/docs/reference/nn/modules/transformer.md
index a927f9b80..918178a87 100644
--- a/docs/reference/nn/modules/transformer.md
+++ b/docs/reference/nn/modules/transformer.md
@@ -1,8 +1,3 @@
----
-description: Explore the Ultralytics nn modules pages on Transformer and MLP blocks, LayerNorm2d, and Deformable Transformer Decoder Layer.
-keywords: Ultralytics, NN Modules, TransformerEncoderLayer, TransformerLayer, MLPBlock, LayerNorm2d, DeformableTransformerDecoderLayer, examples, code snippets, tutorials
----
-
## TransformerEncoderLayer
---
### ::: ultralytics.nn.modules.transformer.TransformerEncoderLayer
diff --git a/docs/reference/nn/modules/utils.md b/docs/reference/nn/modules/utils.md
index 94c4f3e51..740f58dda 100644
--- a/docs/reference/nn/modules/utils.md
+++ b/docs/reference/nn/modules/utils.md
@@ -1,8 +1,3 @@
----
-description: 'Learn about Ultralytics NN modules: get_clones, linear_init_, and multi_scale_deformable_attn_pytorch. Code examples and usage tips.'
-keywords: Ultralytics, NN Utils, Docs, PyTorch, bias initialization, linear initialization, multi-scale deformable attention
----
-
## _get_clones
---
### ::: ultralytics.nn.modules.utils._get_clones
diff --git a/docs/reference/nn/tasks.md b/docs/reference/nn/tasks.md
index 010cea15e..8285fa4d3 100644
--- a/docs/reference/nn/tasks.md
+++ b/docs/reference/nn/tasks.md
@@ -1,8 +1,3 @@
----
-description: Learn how to work with Ultralytics YOLO Detection, Segmentation & Classification Models, load weights and parse models in PyTorch.
-keywords: neural network, deep learning, computer vision, object detection, image segmentation, image classification, model ensemble, PyTorch
----
-
## BaseModel
---
### ::: ultralytics.nn.tasks.BaseModel
@@ -38,6 +33,11 @@ keywords: neural network, deep learning, computer vision, object detection, imag
### ::: ultralytics.nn.tasks.Ensemble
+## temporary_modules
+---
+### ::: ultralytics.nn.tasks.temporary_modules
+
+
## torch_safe_load
---
### ::: ultralytics.nn.tasks.torch_safe_load
diff --git a/docs/reference/tracker/track.md b/docs/reference/tracker/track.md
deleted file mode 100644
index 88db7f264..000000000
--- a/docs/reference/tracker/track.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-description: Learn how to register custom event-tracking and track predictions with Ultralytics YOLO via on_predict_start and register_tracker methods.
-keywords: Ultralytics YOLO, tracker registration, on_predict_start, object detection
----
-
-## on_predict_start
----
-### ::: ultralytics.tracker.track.on_predict_start
-
-
-## on_predict_postprocess_end
----
-### ::: ultralytics.tracker.track.on_predict_postprocess_end
-
-
-## register_tracker
----
-### ::: ultralytics.tracker.track.register_tracker
-
diff --git a/docs/reference/tracker/trackers/basetrack.md b/docs/reference/tracker/trackers/basetrack.md
deleted file mode 100644
index ab5cf58cd..000000000
--- a/docs/reference/tracker/trackers/basetrack.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: 'TrackState: A comprehensive guide to Ultralytics tracker''s BaseTrack for monitoring model performance. Improve your tracking capabilities now!'
-keywords: object detection, object tracking, Ultralytics YOLO, TrackState, workflow improvement
----
-
-## TrackState
----
-### ::: ultralytics.tracker.trackers.basetrack.TrackState
-
-
-## BaseTrack
----
-### ::: ultralytics.tracker.trackers.basetrack.BaseTrack
-
diff --git a/docs/reference/tracker/trackers/bot_sort.md b/docs/reference/tracker/trackers/bot_sort.md
deleted file mode 100644
index b53e9b143..000000000
--- a/docs/reference/tracker/trackers/bot_sort.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: '"Optimize tracking with Ultralytics BOTrack. Easily sort and track bots with BOTSORT. Streamline data collection for improved performance."'
-keywords: BOTrack, Ultralytics YOLO Docs, features, usage
----
-
-## BOTrack
----
-### ::: ultralytics.tracker.trackers.bot_sort.BOTrack
-
-
-## BOTSORT
----
-### ::: ultralytics.tracker.trackers.bot_sort.BOTSORT
-
diff --git a/docs/reference/tracker/trackers/byte_tracker.md b/docs/reference/tracker/trackers/byte_tracker.md
deleted file mode 100644
index 797be1db6..000000000
--- a/docs/reference/tracker/trackers/byte_tracker.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Learn how to track ByteAI model sizes and tips for model optimization with STrack, a byte tracking tool from Ultralytics.
-keywords: Byte Tracker, Ultralytics STrack, application monitoring, bytes sent, bytes received, code examples, setup instructions
----
-
-## STrack
----
-### ::: ultralytics.tracker.trackers.byte_tracker.STrack
-
-
-## BYTETracker
----
-### ::: ultralytics.tracker.trackers.byte_tracker.BYTETracker
-
diff --git a/docs/reference/tracker/utils/gmc.md b/docs/reference/tracker/utils/gmc.md
deleted file mode 100644
index 6441f071d..000000000
--- a/docs/reference/tracker/utils/gmc.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: '"Track Google Marketing Campaigns in GMC with Ultralytics Tracker. Learn to set up and use GMC for detailed analytics. Get started now."'
-keywords: Ultralytics, YOLO, object detection, tracker, optimization, models, documentation
----
-
-## GMC
----
-### ::: ultralytics.tracker.utils.gmc.GMC
-
diff --git a/docs/reference/tracker/utils/kalman_filter.md b/docs/reference/tracker/utils/kalman_filter.md
deleted file mode 100644
index fbe547305..000000000
--- a/docs/reference/tracker/utils/kalman_filter.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Improve object tracking with KalmanFilterXYAH in Ultralytics YOLO - an efficient and accurate algorithm for state estimation.
-keywords: KalmanFilterXYAH, Ultralytics Docs, Kalman filter algorithm, object tracking, computer vision, YOLO
----
-
-## KalmanFilterXYAH
----
-### ::: ultralytics.tracker.utils.kalman_filter.KalmanFilterXYAH
-
-
-## KalmanFilterXYWH
----
-### ::: ultralytics.tracker.utils.kalman_filter.KalmanFilterXYWH
-
diff --git a/docs/reference/tracker/utils/matching.md b/docs/reference/tracker/utils/matching.md
deleted file mode 100644
index 385b39ce8..000000000
--- a/docs/reference/tracker/utils/matching.md
+++ /dev/null
@@ -1,64 +0,0 @@
----
-description: Learn how to match and fuse object detections for accurate target tracking using Ultralytics' YOLO merge_matches, iou_distance, and embedding_distance.
-keywords: Ultralytics, multi-object tracking, object tracking, detection, recognition, matching, indices, iou distance, gate cost matrix, fuse iou, bbox ious
----
-
-## merge_matches
----
-### ::: ultralytics.tracker.utils.matching.merge_matches
-
-
-## _indices_to_matches
----
-### ::: ultralytics.tracker.utils.matching._indices_to_matches
-
-
-## linear_assignment
----
-### ::: ultralytics.tracker.utils.matching.linear_assignment
-
-
-## ious
----
-### ::: ultralytics.tracker.utils.matching.ious
-
-
-## iou_distance
----
-### ::: ultralytics.tracker.utils.matching.iou_distance
-
-
-## v_iou_distance
----
-### ::: ultralytics.tracker.utils.matching.v_iou_distance
-
-
-## embedding_distance
----
-### ::: ultralytics.tracker.utils.matching.embedding_distance
-
-
-## gate_cost_matrix
----
-### ::: ultralytics.tracker.utils.matching.gate_cost_matrix
-
-
-## fuse_motion
----
-### ::: ultralytics.tracker.utils.matching.fuse_motion
-
-
-## fuse_iou
----
-### ::: ultralytics.tracker.utils.matching.fuse_iou
-
-
-## fuse_score
----
-### ::: ultralytics.tracker.utils.matching.fuse_score
-
-
-## bbox_ious
----
-### ::: ultralytics.tracker.utils.matching.bbox_ious
-
diff --git a/docs/reference/trackers/basetrack.md b/docs/reference/trackers/basetrack.md
new file mode 100644
index 000000000..6118833a2
--- /dev/null
+++ b/docs/reference/trackers/basetrack.md
@@ -0,0 +1,9 @@
+## TrackState
+---
+### ::: ultralytics.trackers.basetrack.TrackState
+
+
+## BaseTrack
+---
+### ::: ultralytics.trackers.basetrack.BaseTrack
+
diff --git a/docs/reference/trackers/bot_sort.md b/docs/reference/trackers/bot_sort.md
new file mode 100644
index 000000000..d0dbc6994
--- /dev/null
+++ b/docs/reference/trackers/bot_sort.md
@@ -0,0 +1,9 @@
+## BOTrack
+---
+### ::: ultralytics.trackers.bot_sort.BOTrack
+
+
+## BOTSORT
+---
+### ::: ultralytics.trackers.bot_sort.BOTSORT
+
diff --git a/docs/reference/trackers/byte_tracker.md b/docs/reference/trackers/byte_tracker.md
new file mode 100644
index 000000000..f480c5a48
--- /dev/null
+++ b/docs/reference/trackers/byte_tracker.md
@@ -0,0 +1,9 @@
+## STrack
+---
+### ::: ultralytics.trackers.byte_tracker.STrack
+
+
+## BYTETracker
+---
+### ::: ultralytics.trackers.byte_tracker.BYTETracker
+
diff --git a/docs/reference/trackers/track.md b/docs/reference/trackers/track.md
new file mode 100644
index 000000000..bba3d0aa4
--- /dev/null
+++ b/docs/reference/trackers/track.md
@@ -0,0 +1,14 @@
+## on_predict_start
+---
+### ::: ultralytics.trackers.track.on_predict_start
+
+
+## on_predict_postprocess_end
+---
+### ::: ultralytics.trackers.track.on_predict_postprocess_end
+
+
+## register_tracker
+---
+### ::: ultralytics.trackers.track.register_tracker
+
diff --git a/docs/reference/trackers/utils/gmc.md b/docs/reference/trackers/utils/gmc.md
new file mode 100644
index 000000000..299458a9d
--- /dev/null
+++ b/docs/reference/trackers/utils/gmc.md
@@ -0,0 +1,4 @@
+## GMC
+---
+### ::: ultralytics.trackers.utils.gmc.GMC
+
diff --git a/docs/reference/trackers/utils/kalman_filter.md b/docs/reference/trackers/utils/kalman_filter.md
new file mode 100644
index 000000000..3502d7477
--- /dev/null
+++ b/docs/reference/trackers/utils/kalman_filter.md
@@ -0,0 +1,9 @@
+## KalmanFilterXYAH
+---
+### ::: ultralytics.trackers.utils.kalman_filter.KalmanFilterXYAH
+
+
+## KalmanFilterXYWH
+---
+### ::: ultralytics.trackers.utils.kalman_filter.KalmanFilterXYWH
+
diff --git a/docs/reference/trackers/utils/matching.md b/docs/reference/trackers/utils/matching.md
new file mode 100644
index 000000000..a90eb129d
--- /dev/null
+++ b/docs/reference/trackers/utils/matching.md
@@ -0,0 +1,59 @@
+## merge_matches
+---
+### ::: ultralytics.trackers.utils.matching.merge_matches
+
+
+## _indices_to_matches
+---
+### ::: ultralytics.trackers.utils.matching._indices_to_matches
+
+
+## linear_assignment
+---
+### ::: ultralytics.trackers.utils.matching.linear_assignment
+
+
+## ious
+---
+### ::: ultralytics.trackers.utils.matching.ious
+
+
+## iou_distance
+---
+### ::: ultralytics.trackers.utils.matching.iou_distance
+
+
+## v_iou_distance
+---
+### ::: ultralytics.trackers.utils.matching.v_iou_distance
+
+
+## embedding_distance
+---
+### ::: ultralytics.trackers.utils.matching.embedding_distance
+
+
+## gate_cost_matrix
+---
+### ::: ultralytics.trackers.utils.matching.gate_cost_matrix
+
+
+## fuse_motion
+---
+### ::: ultralytics.trackers.utils.matching.fuse_motion
+
+
+## fuse_iou
+---
+### ::: ultralytics.trackers.utils.matching.fuse_iou
+
+
+## fuse_score
+---
+### ::: ultralytics.trackers.utils.matching.fuse_score
+
+
+## bbox_ious
+---
+### ::: ultralytics.trackers.utils.matching.bbox_ious
+
diff --git a/docs/reference/utils/__init__.md b/docs/reference/utils/__init__.md
new file mode 100644
index 000000000..bdf9f6d2f
--- /dev/null
+++ b/docs/reference/utils/__init__.md
@@ -0,0 +1,169 @@
+## SimpleClass
+---
+### ::: ultralytics.utils.SimpleClass
+
+
+## IterableSimpleNamespace
+---
+### ::: ultralytics.utils.IterableSimpleNamespace
+
+
+## EmojiFilter
+---
+### ::: ultralytics.utils.EmojiFilter
+
+
+## ThreadingLocked
+---
+### ::: ultralytics.utils.ThreadingLocked
+
+
+## TryExcept
+---
+### ::: ultralytics.utils.TryExcept
+
+
+## plt_settings
+---
+### ::: ultralytics.utils.plt_settings
+
+
+## set_logging
+---
+### ::: ultralytics.utils.set_logging
+
+
+## emojis
+---
+### ::: ultralytics.utils.emojis
+
+
+## yaml_save
+---
+### ::: ultralytics.utils.yaml_save
+
+
+## yaml_load
+---
+### ::: ultralytics.utils.yaml_load
+
+
+## yaml_print
+---
+### ::: ultralytics.utils.yaml_print
+
+
+## is_colab
+---
+### ::: ultralytics.utils.is_colab
+
+
+## is_kaggle
+---
+### ::: ultralytics.utils.is_kaggle
+
+
+## is_jupyter
+---
+### ::: ultralytics.utils.is_jupyter
+
+
+## is_docker
+---
+### ::: ultralytics.utils.is_docker
+
+
+## is_online
+---
+### ::: ultralytics.utils.is_online
+
+
+## is_pip_package
+---
+### ::: ultralytics.utils.is_pip_package
+
+
+## is_dir_writeable
+---
+### ::: ultralytics.utils.is_dir_writeable
+
+
+## is_pytest_running
+---
+### ::: ultralytics.utils.is_pytest_running
+
+
+## is_github_actions_ci
+---
+### ::: ultralytics.utils.is_github_actions_ci
+
+
+## is_git_dir
+---
+### ::: ultralytics.utils.is_git_dir
+
+
+## get_git_dir
+---
+### ::: ultralytics.utils.get_git_dir
+
+
+## get_git_origin_url
+---
+### ::: ultralytics.utils.get_git_origin_url
+
+
+## get_git_branch
+---
+### ::: ultralytics.utils.get_git_branch
+
+
+## get_default_args
+---
+### ::: ultralytics.utils.get_default_args
+
+
+## get_user_config_dir
+---
+### ::: ultralytics.utils.get_user_config_dir
+
+
+## colorstr
+---
+### ::: ultralytics.utils.colorstr
+
+
+## threaded
+---
+### ::: ultralytics.utils.threaded
+
+
+## set_sentry
+---
+### ::: ultralytics.utils.set_sentry
+
+
+## get_settings
+---
+### ::: ultralytics.utils.get_settings
+
+
+## set_settings
+---
+### ::: ultralytics.utils.set_settings
+
+
+## deprecation_warn
+---
+### ::: ultralytics.utils.deprecation_warn
+
+
+## clean_url
+---
+### ::: ultralytics.utils.clean_url
+
+
+## url2file
+---
+### ::: ultralytics.utils.url2file
+
diff --git a/docs/reference/utils/autobatch.md b/docs/reference/utils/autobatch.md
new file mode 100644
index 000000000..f23bc2048
--- /dev/null
+++ b/docs/reference/utils/autobatch.md
@@ -0,0 +1,9 @@
+## check_train_batch_size
+---
+### ::: ultralytics.utils.autobatch.check_train_batch_size
+
+
+## autobatch
+---
+### ::: ultralytics.utils.autobatch.autobatch
+
diff --git a/docs/reference/utils/benchmarks.md b/docs/reference/utils/benchmarks.md
new file mode 100644
index 000000000..0e1d16669
--- /dev/null
+++ b/docs/reference/utils/benchmarks.md
@@ -0,0 +1,9 @@
+## ProfileModels
+---
+### ::: ultralytics.utils.benchmarks.ProfileModels
+
+
+## benchmark
+---
+### ::: ultralytics.utils.benchmarks.benchmark
+
diff --git a/docs/reference/utils/callbacks/base.md b/docs/reference/utils/callbacks/base.md
new file mode 100644
index 000000000..350d61a26
--- /dev/null
+++ b/docs/reference/utils/callbacks/base.md
@@ -0,0 +1,134 @@
+## on_pretrain_routine_start
+---
+### ::: ultralytics.utils.callbacks.base.on_pretrain_routine_start
+
+
+## on_pretrain_routine_end
+---
+### ::: ultralytics.utils.callbacks.base.on_pretrain_routine_end
+
+
+## on_train_start
+---
+### ::: ultralytics.utils.callbacks.base.on_train_start
+
+
+## on_train_epoch_start
+---
+### ::: ultralytics.utils.callbacks.base.on_train_epoch_start
+
+
+## on_train_batch_start
+---
+### ::: ultralytics.utils.callbacks.base.on_train_batch_start
+
+
+## optimizer_step
+---
+### ::: ultralytics.utils.callbacks.base.optimizer_step
+
+
+## on_before_zero_grad
+---
+### ::: ultralytics.utils.callbacks.base.on_before_zero_grad
+
+
+## on_train_batch_end
+---
+### ::: ultralytics.utils.callbacks.base.on_train_batch_end
+
+
+## on_train_epoch_end
+---
+### ::: ultralytics.utils.callbacks.base.on_train_epoch_end
+
+
+## on_fit_epoch_end
+---
+### ::: ultralytics.utils.callbacks.base.on_fit_epoch_end
+
+
+## on_model_save
+---
+### ::: ultralytics.utils.callbacks.base.on_model_save
+
+
+## on_train_end
+---
+### ::: ultralytics.utils.callbacks.base.on_train_end
+
+
+## on_params_update
+---
+### ::: ultralytics.utils.callbacks.base.on_params_update
+
+
+## teardown
+---
+### ::: ultralytics.utils.callbacks.base.teardown
+
+
+## on_val_start
+---
+### ::: ultralytics.utils.callbacks.base.on_val_start
+
+
+## on_val_batch_start
+---
+### ::: ultralytics.utils.callbacks.base.on_val_batch_start
+
+
+## on_val_batch_end
+---
+### ::: ultralytics.utils.callbacks.base.on_val_batch_end
+
+
+## on_val_end
+---
+### ::: ultralytics.utils.callbacks.base.on_val_end
+
+
+## on_predict_start
+---
+### ::: ultralytics.utils.callbacks.base.on_predict_start
+
+
+## on_predict_batch_start
+---
+### ::: ultralytics.utils.callbacks.base.on_predict_batch_start
+
+
+## on_predict_batch_end
+---
+### ::: ultralytics.utils.callbacks.base.on_predict_batch_end
+
+
+## on_predict_postprocess_end
+---
+### ::: ultralytics.utils.callbacks.base.on_predict_postprocess_end
+
+
+## on_predict_end
+---
+### ::: ultralytics.utils.callbacks.base.on_predict_end
+
+
+## on_export_start
+---
+### ::: ultralytics.utils.callbacks.base.on_export_start
+
+
+## on_export_end
+---
+### ::: ultralytics.utils.callbacks.base.on_export_end
+
+
+## get_default_callbacks
+---
+### ::: ultralytics.utils.callbacks.base.get_default_callbacks
+
+
+## add_integration_callbacks
+---
+### ::: ultralytics.utils.callbacks.base.add_integration_callbacks
+
diff --git a/docs/reference/utils/callbacks/clearml.md b/docs/reference/utils/callbacks/clearml.md
new file mode 100644
index 000000000..39ef7cc84
--- /dev/null
+++ b/docs/reference/utils/callbacks/clearml.md
@@ -0,0 +1,34 @@
+## _log_debug_samples
+---
+### ::: ultralytics.utils.callbacks.clearml._log_debug_samples
+
+
+## _log_plot
+---
+### ::: ultralytics.utils.callbacks.clearml._log_plot
+
+
+## on_pretrain_routine_start
+---
+### ::: ultralytics.utils.callbacks.clearml.on_pretrain_routine_start
+
+
+## on_train_epoch_end
+---
+### ::: ultralytics.utils.callbacks.clearml.on_train_epoch_end
+
+
+## on_fit_epoch_end
+---
+### ::: ultralytics.utils.callbacks.clearml.on_fit_epoch_end
+
+
+## on_val_end
+---
+### ::: ultralytics.utils.callbacks.clearml.on_val_end
+
+
+## on_train_end
+---
+### ::: ultralytics.utils.callbacks.clearml.on_train_end
+
diff --git a/docs/reference/utils/callbacks/comet.md b/docs/reference/utils/callbacks/comet.md
new file mode 100644
index 000000000..7f18c1a50
--- /dev/null
+++ b/docs/reference/utils/callbacks/comet.md
@@ -0,0 +1,119 @@
+## _get_comet_mode
+---
+### ::: ultralytics.utils.callbacks.comet._get_comet_mode
+
+
+## _get_comet_model_name
+---
+### ::: ultralytics.utils.callbacks.comet._get_comet_model_name
+
+
+## _get_eval_batch_logging_interval
+---
+### ::: ultralytics.utils.callbacks.comet._get_eval_batch_logging_interval
+
+
+## _get_max_image_predictions_to_log
+---
+### ::: ultralytics.utils.callbacks.comet._get_max_image_predictions_to_log
+
+
+## _scale_confidence_score
+---
+### ::: ultralytics.utils.callbacks.comet._scale_confidence_score
+
+
+## _should_log_confusion_matrix
+---
+### ::: ultralytics.utils.callbacks.comet._should_log_confusion_matrix
+
+
+## _should_log_image_predictions
+---
+### ::: ultralytics.utils.callbacks.comet._should_log_image_predictions
+
+
+## _get_experiment_type
+---
+### ::: ultralytics.utils.callbacks.comet._get_experiment_type
+
+
+## _create_experiment
+---
+### ::: ultralytics.utils.callbacks.comet._create_experiment
+
+
+## _fetch_trainer_metadata
+---
+### ::: ultralytics.utils.callbacks.comet._fetch_trainer_metadata
+
+
+## _scale_bounding_box_to_original_image_shape
+---
+### ::: ultralytics.utils.callbacks.comet._scale_bounding_box_to_original_image_shape
+
+
+## _format_ground_truth_annotations_for_detection
+---
+### ::: ultralytics.utils.callbacks.comet._format_ground_truth_annotations_for_detection
+
+
+## _format_prediction_annotations_for_detection
+---
+### ::: ultralytics.utils.callbacks.comet._format_prediction_annotations_for_detection
+
+
+## _fetch_annotations
+---
+### ::: ultralytics.utils.callbacks.comet._fetch_annotations
+
+
+## _create_prediction_metadata_map
+---
+### ::: ultralytics.utils.callbacks.comet._create_prediction_metadata_map
+
+
+## _log_confusion_matrix
+---
+### ::: ultralytics.utils.callbacks.comet._log_confusion_matrix
+
+
+## _log_images
+---
+### ::: ultralytics.utils.callbacks.comet._log_images
+
+
+## _log_image_predictions
+---
+### ::: ultralytics.utils.callbacks.comet._log_image_predictions
+
+
+## _log_plots
+---
+### ::: ultralytics.utils.callbacks.comet._log_plots
+
+
+## _log_model
+---
+### ::: ultralytics.utils.callbacks.comet._log_model
+
+
+## on_pretrain_routine_start
+---
+### ::: ultralytics.utils.callbacks.comet.on_pretrain_routine_start
+
+
+## on_train_epoch_end
+---
+### ::: ultralytics.utils.callbacks.comet.on_train_epoch_end
+
+
+## on_fit_epoch_end
+---
+### ::: ultralytics.utils.callbacks.comet.on_fit_epoch_end
+
+
+## on_train_end
+---
+### ::: ultralytics.utils.callbacks.comet.on_train_end
+
diff --git a/docs/reference/utils/callbacks/dvc.md b/docs/reference/utils/callbacks/dvc.md
new file mode 100644
index 000000000..3d0fcef47
--- /dev/null
+++ b/docs/reference/utils/callbacks/dvc.md
@@ -0,0 +1,49 @@
+## _logger_disabled
+---
+### ::: ultralytics.utils.callbacks.dvc._logger_disabled
+
+
+## _log_images
+---
+### ::: ultralytics.utils.callbacks.dvc._log_images
+
+
+## _log_plots
+---
+### ::: ultralytics.utils.callbacks.dvc._log_plots
+
+
+## _log_confusion_matrix
+---
+### ::: ultralytics.utils.callbacks.dvc._log_confusion_matrix
+
+
+## on_pretrain_routine_start
+---
+### ::: ultralytics.utils.callbacks.dvc.on_pretrain_routine_start
+
+
+## on_pretrain_routine_end
+---
+### ::: ultralytics.utils.callbacks.dvc.on_pretrain_routine_end
+
+
+## on_train_start
+---
+### ::: ultralytics.utils.callbacks.dvc.on_train_start
+
+
+## on_train_epoch_start
+---
+### ::: ultralytics.utils.callbacks.dvc.on_train_epoch_start
+
+
+## on_fit_epoch_end
+---
+### ::: ultralytics.utils.callbacks.dvc.on_fit_epoch_end
+
+
+## on_train_end
+---
+### ::: ultralytics.utils.callbacks.dvc.on_train_end
+
diff --git a/docs/reference/utils/callbacks/hub.md b/docs/reference/utils/callbacks/hub.md
new file mode 100644
index 000000000..ec99c0193
--- /dev/null
+++ b/docs/reference/utils/callbacks/hub.md
@@ -0,0 +1,39 @@
+## on_pretrain_routine_end
+---
+### ::: ultralytics.utils.callbacks.hub.on_pretrain_routine_end
+
+
+## on_fit_epoch_end
+---
+### ::: ultralytics.utils.callbacks.hub.on_fit_epoch_end
+
+
+## on_model_save
+---
+### ::: ultralytics.utils.callbacks.hub.on_model_save
+
+
+## on_train_end
+---
+### ::: ultralytics.utils.callbacks.hub.on_train_end
+
+
+## on_train_start
+---
+### ::: ultralytics.utils.callbacks.hub.on_train_start
+
+
+## on_val_start
+---
+### ::: ultralytics.utils.callbacks.hub.on_val_start
+
+
+## on_predict_start
+---
+### ::: ultralytics.utils.callbacks.hub.on_predict_start
+
+
+## on_export_start
+---
+### ::: ultralytics.utils.callbacks.hub.on_export_start
+
diff --git a/docs/reference/utils/callbacks/mlflow.md b/docs/reference/utils/callbacks/mlflow.md
new file mode 100644
index 000000000..77d11a067
--- /dev/null
+++ b/docs/reference/utils/callbacks/mlflow.md
@@ -0,0 +1,14 @@
+## on_pretrain_routine_end
+---
+### ::: ultralytics.utils.callbacks.mlflow.on_pretrain_routine_end
+
+
+## on_fit_epoch_end
+---
+### ::: ultralytics.utils.callbacks.mlflow.on_fit_epoch_end
+
+
+## on_train_end
+---
+### ::: ultralytics.utils.callbacks.mlflow.on_train_end
+
diff --git a/docs/reference/utils/callbacks/neptune.md b/docs/reference/utils/callbacks/neptune.md
new file mode 100644
index 000000000..a745c6a5d
--- /dev/null
+++ b/docs/reference/utils/callbacks/neptune.md
@@ -0,0 +1,39 @@
+## _log_scalars
+---
+### ::: ultralytics.utils.callbacks.neptune._log_scalars
+
+
+## _log_images
+---
+### ::: ultralytics.utils.callbacks.neptune._log_images
+
+
+## _log_plot
+---
+### ::: ultralytics.utils.callbacks.neptune._log_plot
+
+
+## on_pretrain_routine_start
+---
+### ::: ultralytics.utils.callbacks.neptune.on_pretrain_routine_start
+
+
+## on_train_epoch_end
+---
+### ::: ultralytics.utils.callbacks.neptune.on_train_epoch_end
+
+
+## on_fit_epoch_end
+---
+### ::: ultralytics.utils.callbacks.neptune.on_fit_epoch_end
+
+
+## on_val_end
+---
+### ::: ultralytics.utils.callbacks.neptune.on_val_end
+
+
+## on_train_end
+---
+### ::: ultralytics.utils.callbacks.neptune.on_train_end
+
diff --git a/docs/reference/utils/callbacks/raytune.md b/docs/reference/utils/callbacks/raytune.md
new file mode 100644
index 000000000..1cb2f901e
--- /dev/null
+++ b/docs/reference/utils/callbacks/raytune.md
@@ -0,0 +1,4 @@
+## on_fit_epoch_end
+---
+### ::: ultralytics.utils.callbacks.raytune.on_fit_epoch_end
+
diff --git a/docs/reference/utils/callbacks/tensorboard.md b/docs/reference/utils/callbacks/tensorboard.md
new file mode 100644
index 000000000..a22d76b8a
--- /dev/null
+++ b/docs/reference/utils/callbacks/tensorboard.md
@@ -0,0 +1,19 @@
+## _log_scalars
+---
+### ::: ultralytics.utils.callbacks.tensorboard._log_scalars
+
+
+## on_pretrain_routine_start
+---
+### ::: ultralytics.utils.callbacks.tensorboard.on_pretrain_routine_start
+
+
+## on_batch_end
+---
+### ::: ultralytics.utils.callbacks.tensorboard.on_batch_end
+
+
+## on_fit_epoch_end
+---
+### ::: ultralytics.utils.callbacks.tensorboard.on_fit_epoch_end
+
diff --git a/docs/reference/utils/callbacks/wb.md b/docs/reference/utils/callbacks/wb.md
new file mode 100644
index 000000000..87ff13417
--- /dev/null
+++ b/docs/reference/utils/callbacks/wb.md
@@ -0,0 +1,24 @@
+## _log_plots
+---
+### ::: ultralytics.utils.callbacks.wb._log_plots
+
+
+## on_pretrain_routine_start
+---
+### ::: ultralytics.utils.callbacks.wb.on_pretrain_routine_start
+
+
+## on_fit_epoch_end
+---
+### ::: ultralytics.utils.callbacks.wb.on_fit_epoch_end
+
+
+## on_train_epoch_end
+---
+### ::: ultralytics.utils.callbacks.wb.on_train_epoch_end
+
+
+## on_train_end
+---
+### ::: ultralytics.utils.callbacks.wb.on_train_end
+
diff --git a/docs/reference/utils/checks.md b/docs/reference/utils/checks.md
new file mode 100644
index 000000000..bf79ed77a
--- /dev/null
+++ b/docs/reference/utils/checks.md
@@ -0,0 +1,89 @@
+## is_ascii
+---
+### ::: ultralytics.utils.checks.is_ascii
+
+
+## check_imgsz
+---
+### ::: ultralytics.utils.checks.check_imgsz
+
+
+## check_version
+---
+### ::: ultralytics.utils.checks.check_version
+
+
+## check_latest_pypi_version
+---
+### ::: ultralytics.utils.checks.check_latest_pypi_version
+
+
+## check_pip_update_available
+---
+### ::: ultralytics.utils.checks.check_pip_update_available
+
+
+## check_font
+---
+### ::: ultralytics.utils.checks.check_font
+
+
+## check_python
+---
+### ::: ultralytics.utils.checks.check_python
+
+
+## check_requirements
+---
+### ::: ultralytics.utils.checks.check_requirements
+
+
+## check_torchvision
+---
+### ::: ultralytics.utils.checks.check_torchvision
+
+
+## check_suffix
+---
+### ::: ultralytics.utils.checks.check_suffix
+
+
+## check_yolov5u_filename
+---
+### ::: ultralytics.utils.checks.check_yolov5u_filename
+
+
+## check_file
+---
+### ::: ultralytics.utils.checks.check_file
+
+
+## check_yaml
+---
+### ::: ultralytics.utils.checks.check_yaml
+
+
+## check_imshow
+---
+### ::: ultralytics.utils.checks.check_imshow
+
+
+## check_yolo
+---
+### ::: ultralytics.utils.checks.check_yolo
+
+
+## check_amp
+---
+### ::: ultralytics.utils.checks.check_amp
+
+
+## git_describe
+---
+### ::: ultralytics.utils.checks.git_describe
+
+
+## print_args
+---
+### ::: ultralytics.utils.checks.print_args
+
diff --git a/docs/reference/utils/dist.md b/docs/reference/utils/dist.md
new file mode 100644
index 000000000..c5122a34a
--- /dev/null
+++ b/docs/reference/utils/dist.md
@@ -0,0 +1,19 @@
+## find_free_network_port
+---
+### ::: ultralytics.utils.dist.find_free_network_port
+
+
+## generate_ddp_file
+---
+### ::: ultralytics.utils.dist.generate_ddp_file
+
+
+## generate_ddp_command
+---
+### ::: ultralytics.utils.dist.generate_ddp_command
+
+
+## ddp_cleanup
+---
+### ::: ultralytics.utils.dist.ddp_cleanup
+
diff --git a/docs/reference/utils/downloads.md b/docs/reference/utils/downloads.md
new file mode 100644
index 000000000..8f9da2b20
--- /dev/null
+++ b/docs/reference/utils/downloads.md
@@ -0,0 +1,34 @@
+## is_url
+---
+### ::: ultralytics.utils.downloads.is_url
+
+
+## unzip_file
+---
+### ::: ultralytics.utils.downloads.unzip_file
+
+
+## check_disk_space
+---
+### ::: ultralytics.utils.downloads.check_disk_space
+
+
+## safe_download
+---
+### ::: ultralytics.utils.downloads.safe_download
+
+
+## get_github_assets
+---
+### ::: ultralytics.utils.downloads.get_github_assets
+
+
+## attempt_download_asset
+---
+### ::: ultralytics.utils.downloads.attempt_download_asset
+
+
+## download
+---
+### ::: ultralytics.utils.downloads.download
+
diff --git a/docs/reference/utils/errors.md b/docs/reference/utils/errors.md
new file mode 100644
index 000000000..a508c793a
--- /dev/null
+++ b/docs/reference/utils/errors.md
@@ -0,0 +1,4 @@
+## HUBModelError
+---
+### ::: ultralytics.utils.errors.HUBModelError
+
diff --git a/docs/reference/utils/files.md b/docs/reference/utils/files.md
new file mode 100644
index 000000000..0143b5ff1
--- /dev/null
+++ b/docs/reference/utils/files.md
@@ -0,0 +1,34 @@
+## WorkingDirectory
+---
+### ::: ultralytics.utils.files.WorkingDirectory
+
+
+## increment_path
+---
+### ::: ultralytics.utils.files.increment_path
+
+
+## file_age
+---
+### ::: ultralytics.utils.files.file_age
+
+
+## file_date
+---
+### ::: ultralytics.utils.files.file_date
+
+
+## file_size
+---
+### ::: ultralytics.utils.files.file_size
+
+
+## get_latest_run
+---
+### ::: ultralytics.utils.files.get_latest_run
+
+
+## make_dirs
+---
+### ::: ultralytics.utils.files.make_dirs
+
diff --git a/docs/reference/utils/instance.md b/docs/reference/utils/instance.md
new file mode 100644
index 000000000..03771c3bd
--- /dev/null
+++ b/docs/reference/utils/instance.md
@@ -0,0 +1,14 @@
+## Bboxes
+---
+### ::: ultralytics.utils.instance.Bboxes
+
+
+## Instances
+---
+### ::: ultralytics.utils.instance.Instances
+
+
+## _ntuple
+---
+### ::: ultralytics.utils.instance._ntuple
+
diff --git a/docs/reference/utils/loss.md b/docs/reference/utils/loss.md
new file mode 100644
index 000000000..164db8a75
--- /dev/null
+++ b/docs/reference/utils/loss.md
@@ -0,0 +1,39 @@
+## VarifocalLoss
+---
+### ::: ultralytics.utils.loss.VarifocalLoss
+
+
+## FocalLoss
+---
+### ::: ultralytics.utils.loss.FocalLoss
+
+
+## BboxLoss
+---
+### ::: ultralytics.utils.loss.BboxLoss
+
+
+## KeypointLoss
+---
+### ::: ultralytics.utils.loss.KeypointLoss
+
+
+## v8DetectionLoss
+---
+### ::: ultralytics.utils.loss.v8DetectionLoss
+
+
+## v8SegmentationLoss
+---
+### ::: ultralytics.utils.loss.v8SegmentationLoss
+
+
+## v8PoseLoss
+---
+### ::: ultralytics.utils.loss.v8PoseLoss
+
+
+## v8ClassificationLoss
+---
+### ::: ultralytics.utils.loss.v8ClassificationLoss
+
diff --git a/docs/reference/utils/metrics.md b/docs/reference/utils/metrics.md
new file mode 100644
index 000000000..4c7764a7b
--- /dev/null
+++ b/docs/reference/utils/metrics.md
@@ -0,0 +1,89 @@
+## ConfusionMatrix
+---
+### ::: ultralytics.utils.metrics.ConfusionMatrix
+
+
+## Metric
+---
+### ::: ultralytics.utils.metrics.Metric
+
+
+## DetMetrics
+---
+### ::: ultralytics.utils.metrics.DetMetrics
+
+
+## SegmentMetrics
+---
+### ::: ultralytics.utils.metrics.SegmentMetrics
+
+
+## PoseMetrics
+---
+### ::: ultralytics.utils.metrics.PoseMetrics
+
+
+## ClassifyMetrics
+---
+### ::: ultralytics.utils.metrics.ClassifyMetrics
+
+
+## box_area
+---
+### ::: ultralytics.utils.metrics.box_area
+
+
+## bbox_ioa
+---
+### ::: ultralytics.utils.metrics.bbox_ioa
+
+
+## box_iou
+---
+### ::: ultralytics.utils.metrics.box_iou
+
+
+## bbox_iou
+---
+### ::: ultralytics.utils.metrics.bbox_iou
+
+
+## mask_iou
+---
+### ::: ultralytics.utils.metrics.mask_iou
+
+
+## kpt_iou
+---
+### ::: ultralytics.utils.metrics.kpt_iou
+
+
+## smooth_BCE
+---
+### ::: ultralytics.utils.metrics.smooth_BCE
+
+
+## smooth
+---
+### ::: ultralytics.utils.metrics.smooth
+
+
+## plot_pr_curve
+---
+### ::: ultralytics.utils.metrics.plot_pr_curve
+
+
+## plot_mc_curve
+---
+### ::: ultralytics.utils.metrics.plot_mc_curve
+
+
+## compute_ap
+---
+### ::: ultralytics.utils.metrics.compute_ap
+
+
+## ap_per_class
+---
+### ::: ultralytics.utils.metrics.ap_per_class
+
diff --git a/docs/reference/utils/ops.md b/docs/reference/utils/ops.md
new file mode 100644
index 000000000..0ed015abf
--- /dev/null
+++ b/docs/reference/utils/ops.md
@@ -0,0 +1,139 @@
+## Profile
+---
+### ::: ultralytics.utils.ops.Profile
+
+
+## coco80_to_coco91_class
+---
+### ::: ultralytics.utils.ops.coco80_to_coco91_class
+
+
+## segment2box
+---
+### ::: ultralytics.utils.ops.segment2box
+
+
+## scale_boxes
+---
+### ::: ultralytics.utils.ops.scale_boxes
+
+
+## make_divisible
+---
+### ::: ultralytics.utils.ops.make_divisible
+
+
+## non_max_suppression
+---
+### ::: ultralytics.utils.ops.non_max_suppression
+
+
+## clip_boxes
+---
+### ::: ultralytics.utils.ops.clip_boxes
+
+
+## clip_coords
+---
+### ::: ultralytics.utils.ops.clip_coords
+
+
+## scale_image
+---
+### ::: ultralytics.utils.ops.scale_image
+
+
+## xyxy2xywh
+---
+### ::: ultralytics.utils.ops.xyxy2xywh
+
+
+## xywh2xyxy
+---
+### ::: ultralytics.utils.ops.xywh2xyxy
+
+
+## xywhn2xyxy
+---
+### ::: ultralytics.utils.ops.xywhn2xyxy
+
+
+## xyxy2xywhn
+---
+### ::: ultralytics.utils.ops.xyxy2xywhn
+
+
+## xyn2xy
+---
+### ::: ultralytics.utils.ops.xyn2xy
+
+
+## xywh2ltwh
+---
+### ::: ultralytics.utils.ops.xywh2ltwh
+
+
+## xyxy2ltwh
+---
+### ::: ultralytics.utils.ops.xyxy2ltwh
+
+
+## ltwh2xywh
+---
+### ::: ultralytics.utils.ops.ltwh2xywh
+
+
+## ltwh2xyxy
+---
+### ::: ultralytics.utils.ops.ltwh2xyxy
+
+
+## segments2boxes
+---
+### ::: ultralytics.utils.ops.segments2boxes
+
+
+## resample_segments
+---
+### ::: ultralytics.utils.ops.resample_segments
+
+
+## crop_mask
+---
+### ::: ultralytics.utils.ops.crop_mask
+
+
+## process_mask_upsample
+---
+### ::: ultralytics.utils.ops.process_mask_upsample
+
+
+## process_mask
+---
+### ::: ultralytics.utils.ops.process_mask
+
+
+## process_mask_native
+---
+### ::: ultralytics.utils.ops.process_mask_native
+
+
+## scale_masks
+---
+### ::: ultralytics.utils.ops.scale_masks
+
+
+## scale_coords
+---
+### ::: ultralytics.utils.ops.scale_coords
+
+
+## masks2segments
+---
+### ::: ultralytics.utils.ops.masks2segments
+
+
+## clean_str
+---
+### ::: ultralytics.utils.ops.clean_str
+
diff --git a/docs/reference/utils/patches.md b/docs/reference/utils/patches.md
new file mode 100644
index 000000000..30ccb2b2e
--- /dev/null
+++ b/docs/reference/utils/patches.md
@@ -0,0 +1,19 @@
+## imread
+---
+### ::: ultralytics.utils.patches.imread
+
+
+## imwrite
+---
+### ::: ultralytics.utils.patches.imwrite
+
+
+## imshow
+---
+### ::: ultralytics.utils.patches.imshow
+
+
+## torch_save
+---
+### ::: ultralytics.utils.patches.torch_save
+
diff --git a/docs/reference/utils/plotting.md b/docs/reference/utils/plotting.md
new file mode 100644
index 000000000..fb641daeb
--- /dev/null
+++ b/docs/reference/utils/plotting.md
@@ -0,0 +1,39 @@
+## Colors
+---
+### ::: ultralytics.utils.plotting.Colors
+
+
+## Annotator
+---
+### ::: ultralytics.utils.plotting.Annotator
+
+
+## plot_labels
+---
+### ::: ultralytics.utils.plotting.plot_labels
+
+
+## save_one_box
+---
+### ::: ultralytics.utils.plotting.save_one_box
+
+
+## plot_images
+---
+### ::: ultralytics.utils.plotting.plot_images
+
+
+## plot_results
+---
+### ::: ultralytics.utils.plotting.plot_results
+
+
+## output_to_target
+---
+### ::: ultralytics.utils.plotting.output_to_target
+
+
+## feature_visualization
+---
+### ::: ultralytics.utils.plotting.feature_visualization
+
diff --git a/docs/reference/utils/tal.md b/docs/reference/utils/tal.md
new file mode 100644
index 000000000..99dc54355
--- /dev/null
+++ b/docs/reference/utils/tal.md
@@ -0,0 +1,29 @@
+## TaskAlignedAssigner
+---
+### ::: ultralytics.utils.tal.TaskAlignedAssigner
+
+
+## select_candidates_in_gts
+---
+### ::: ultralytics.utils.tal.select_candidates_in_gts
+
+
+## select_highest_overlaps
+---
+### ::: ultralytics.utils.tal.select_highest_overlaps
+
+
+## make_anchors
+---
+### ::: ultralytics.utils.tal.make_anchors
+
+
+## dist2bbox
+---
+### ::: ultralytics.utils.tal.dist2bbox
+
+
+## bbox2dist
+---
+### ::: ultralytics.utils.tal.bbox2dist
+
diff --git a/docs/reference/utils/torch_utils.md b/docs/reference/utils/torch_utils.md
new file mode 100644
index 000000000..2117cd978
--- /dev/null
+++ b/docs/reference/utils/torch_utils.md
@@ -0,0 +1,134 @@
+## ModelEMA
+---
+### ::: ultralytics.utils.torch_utils.ModelEMA
+
+
+## EarlyStopping
+---
+### ::: ultralytics.utils.torch_utils.EarlyStopping
+
+
+## torch_distributed_zero_first
+---
+### ::: ultralytics.utils.torch_utils.torch_distributed_zero_first
+
+
+## smart_inference_mode
+---
+### ::: ultralytics.utils.torch_utils.smart_inference_mode
+
+
+## get_cpu_info
+---
+### ::: ultralytics.utils.torch_utils.get_cpu_info
+
+
+## select_device
+---
+### ::: ultralytics.utils.torch_utils.select_device
+
+
+## time_sync
+---
+### ::: ultralytics.utils.torch_utils.time_sync
+
+
+## fuse_conv_and_bn
+---
+### ::: ultralytics.utils.torch_utils.fuse_conv_and_bn
+
+
+## fuse_deconv_and_bn
+---
+### ::: ultralytics.utils.torch_utils.fuse_deconv_and_bn
+
+
+## model_info
+---
+### ::: ultralytics.utils.torch_utils.model_info
+
+
+## get_num_params
+---
+### ::: ultralytics.utils.torch_utils.get_num_params
+
+
+## get_num_gradients
+---
+### ::: ultralytics.utils.torch_utils.get_num_gradients
+
+
+## model_info_for_loggers
+---
+### ::: ultralytics.utils.torch_utils.model_info_for_loggers
+
+
+## get_flops
+---
+### ::: ultralytics.utils.torch_utils.get_flops
+
+
+## get_flops_with_torch_profiler
+---
+### ::: ultralytics.utils.torch_utils.get_flops_with_torch_profiler
+
+
+## initialize_weights
+---
+### ::: ultralytics.utils.torch_utils.initialize_weights
+
+
+## scale_img
+---
+### ::: ultralytics.utils.torch_utils.scale_img
+
+
+## make_divisible
+---
+### ::: ultralytics.utils.torch_utils.make_divisible
+
+
+## copy_attr
+---
+### ::: ultralytics.utils.torch_utils.copy_attr
+
+
+## get_latest_opset
+---
+### ::: ultralytics.utils.torch_utils.get_latest_opset
+
+
+## intersect_dicts
+---
+### ::: ultralytics.utils.torch_utils.intersect_dicts
+
+
+## is_parallel
+---
+### ::: ultralytics.utils.torch_utils.is_parallel
+
+
+## de_parallel
+---
+### ::: ultralytics.utils.torch_utils.de_parallel
+
+
+## one_cycle
+---
+### ::: ultralytics.utils.torch_utils.one_cycle
+
+
+## init_seeds
+---
+### ::: ultralytics.utils.torch_utils.init_seeds
+
+
+## strip_optimizer
+---
+### ::: ultralytics.utils.torch_utils.strip_optimizer
+
+
+## profile
+---
+### ::: ultralytics.utils.torch_utils.profile
+
diff --git a/docs/reference/utils/tuner.md b/docs/reference/utils/tuner.md
new file mode 100644
index 000000000..40263c589
--- /dev/null
+++ b/docs/reference/utils/tuner.md
@@ -0,0 +1,4 @@
+## run_ray_tune
+---
+### ::: ultralytics.utils.tuner.run_ray_tune
+
diff --git a/docs/reference/vit/rtdetr/model.md b/docs/reference/vit/rtdetr/model.md
deleted file mode 100644
index f4446087c..000000000
--- a/docs/reference/vit/rtdetr/model.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn about the RTDETR model in Ultralytics YOLO Docs and how it can be used for object detection with improved speed and accuracy. Find implementation details and more.
-keywords: RTDETR, Ultralytics, YOLO, object detection, speed, accuracy, implementation details
----
-
-## RTDETR
----
-### ::: ultralytics.vit.rtdetr.model.RTDETR
-
diff --git a/docs/reference/vit/rtdetr/predict.md b/docs/reference/vit/rtdetr/predict.md
deleted file mode 100644
index 032c2da59..000000000
--- a/docs/reference/vit/rtdetr/predict.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn about the RTDETRPredictor class and how to use it for vision transformer object detection with Ultralytics YOLO.
-keywords: RTDETRPredictor, object detection, vision transformer, Ultralytics YOLO
----
-
-## RTDETRPredictor
----
-### ::: ultralytics.vit.rtdetr.predict.RTDETRPredictor
-
diff --git a/docs/reference/vit/rtdetr/train.md b/docs/reference/vit/rtdetr/train.md
deleted file mode 100644
index 03f33f7c8..000000000
--- a/docs/reference/vit/rtdetr/train.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Learn how to use RTDETRTrainer from Ultralytics YOLO Docs. Train object detection models with the latest VIT-based RTDETR system.
-keywords: RTDETRTrainer, Ultralytics YOLO Docs, object detection, VIT-based RTDETR system, train
----
-
-## RTDETRTrainer
----
-### ::: ultralytics.vit.rtdetr.train.RTDETRTrainer
-
-
-## train
----
-### ::: ultralytics.vit.rtdetr.train.train
-
diff --git a/docs/reference/vit/rtdetr/val.md b/docs/reference/vit/rtdetr/val.md
deleted file mode 100644
index 32359b32f..000000000
--- a/docs/reference/vit/rtdetr/val.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Documentation for RTDETRValidator data validation tool in Ultralytics RTDETRDataset.
-keywords: RTDETRDataset, RTDETRValidator, data validation, documentation
----
-
-## RTDETRDataset
----
-### ::: ultralytics.vit.rtdetr.val.RTDETRDataset
-
-
-## RTDETRValidator
----
-### ::: ultralytics.vit.rtdetr.val.RTDETRValidator
-
diff --git a/docs/reference/vit/sam/amg.md b/docs/reference/vit/sam/amg.md
deleted file mode 100644
index a5b5e4f82..000000000
--- a/docs/reference/vit/sam/amg.md
+++ /dev/null
@@ -1,89 +0,0 @@
----
-description: Explore and learn about functions in Ultralytics MaskData library such as mask_to_rle_pytorch, area_from_rle, generate_crop_boxes, and more.
-keywords: Ultralytics, SAM, MaskData, mask_to_rle_pytorch, area_from_rle, generate_crop_boxes, batched_mask_to_box, documentation
----
-
-## MaskData
----
-### ::: ultralytics.vit.sam.amg.MaskData
-
-
-## is_box_near_crop_edge
----
-### ::: ultralytics.vit.sam.amg.is_box_near_crop_edge
-
-
-## box_xyxy_to_xywh
----
-### ::: ultralytics.vit.sam.amg.box_xyxy_to_xywh
-
-
-## batch_iterator
----
-### ::: ultralytics.vit.sam.amg.batch_iterator
-
-
-## mask_to_rle_pytorch
----
-### ::: ultralytics.vit.sam.amg.mask_to_rle_pytorch
-
-
-## rle_to_mask
----
-### ::: ultralytics.vit.sam.amg.rle_to_mask
-
-
-## area_from_rle
----
-### ::: ultralytics.vit.sam.amg.area_from_rle
-
-
-## calculate_stability_score
----
-### ::: ultralytics.vit.sam.amg.calculate_stability_score
-
-
-## build_point_grid
----
-### ::: ultralytics.vit.sam.amg.build_point_grid
-
-
-## build_all_layer_point_grids
----
-### ::: ultralytics.vit.sam.amg.build_all_layer_point_grids
-
-
-## generate_crop_boxes
----
-### ::: ultralytics.vit.sam.amg.generate_crop_boxes
-
-
-## uncrop_boxes_xyxy
----
-### ::: ultralytics.vit.sam.amg.uncrop_boxes_xyxy
-
-
-## uncrop_points
----
-### ::: ultralytics.vit.sam.amg.uncrop_points
-
-
-## uncrop_masks
----
-### ::: ultralytics.vit.sam.amg.uncrop_masks
-
-
-## remove_small_regions
----
-### ::: ultralytics.vit.sam.amg.remove_small_regions
-
-
-## coco_encode_rle
----
-### ::: ultralytics.vit.sam.amg.coco_encode_rle
-
-
-## batched_mask_to_box
----
-### ::: ultralytics.vit.sam.amg.batched_mask_to_box
-
diff --git a/docs/reference/vit/sam/build.md b/docs/reference/vit/sam/build.md
deleted file mode 100644
index c44e48b51..000000000
--- a/docs/reference/vit/sam/build.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-description: Learn how to build SAM and VIT models with Ultralytics YOLO Docs. Enhance your understanding of computer vision models today!.
-keywords: SAM, VIT, computer vision models, build SAM models, build VIT models, Ultralytics YOLO Docs
----
-
-## build_sam_vit_h
----
-### ::: ultralytics.vit.sam.build.build_sam_vit_h
-
-
-## build_sam_vit_l
----
-### ::: ultralytics.vit.sam.build.build_sam_vit_l
-
-
-## build_sam_vit_b
----
-### ::: ultralytics.vit.sam.build.build_sam_vit_b
-
-
-## build_mobile_sam
----
-### ::: ultralytics.vit.sam.build.build_mobile_sam
-
-
-## _build_sam
----
-### ::: ultralytics.vit.sam.build._build_sam
-
-
-## build_sam
----
-### ::: ultralytics.vit.sam.build.build_sam
-
diff --git a/docs/reference/vit/sam/model.md b/docs/reference/vit/sam/model.md
deleted file mode 100644
index 4149847fc..000000000
--- a/docs/reference/vit/sam/model.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn about the Ultralytics VIT SAM model for object detection and how it can help streamline your computer vision workflow. Check out the documentation for implementation details and examples.
-keywords: Ultralytics, VIT, SAM, object detection, computer vision, deep learning, implementation, examples
----
-
-## SAM
----
-### ::: ultralytics.vit.sam.model.SAM
-
diff --git a/docs/reference/vit/sam/modules/decoders.md b/docs/reference/vit/sam/modules/decoders.md
deleted file mode 100644
index 940d720c6..000000000
--- a/docs/reference/vit/sam/modules/decoders.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Learn about Ultralytics YOLO's MaskDecoder, Transformer architecture, MLP, mask prediction, and quality prediction.
-keywords: Ultralytics YOLO, MaskDecoder, Transformer architecture, mask prediction, image embeddings, prompt embeddings, multi-mask output, MLP, mask quality prediction
----
-
-## MaskDecoder
----
-### ::: ultralytics.vit.sam.modules.decoders.MaskDecoder
-
-
-## MLP
----
-### ::: ultralytics.vit.sam.modules.decoders.MLP
-
diff --git a/docs/reference/vit/sam/modules/encoders.md b/docs/reference/vit/sam/modules/encoders.md
deleted file mode 100644
index bd5760a97..000000000
--- a/docs/reference/vit/sam/modules/encoders.md
+++ /dev/null
@@ -1,54 +0,0 @@
----
-description: Learn about Ultralytics ViT encoder, position embeddings, attention, window partition, and more in our comprehensive documentation.
-keywords: Ultralytics YOLO, ViT Encoder, Position Embeddings, Attention, Window Partition, Rel Pos Encoding
----
-
-## ImageEncoderViT
----
-### ::: ultralytics.vit.sam.modules.encoders.ImageEncoderViT
-
-
-## PromptEncoder
----
-### ::: ultralytics.vit.sam.modules.encoders.PromptEncoder
-
-
-## PositionEmbeddingRandom
----
-### ::: ultralytics.vit.sam.modules.encoders.PositionEmbeddingRandom
-
-
-## Block
----
-### ::: ultralytics.vit.sam.modules.encoders.Block
-
-
-## Attention
----
-### ::: ultralytics.vit.sam.modules.encoders.Attention
-
-
-## PatchEmbed
----
-### ::: ultralytics.vit.sam.modules.encoders.PatchEmbed
-
-
-## window_partition
----
-### ::: ultralytics.vit.sam.modules.encoders.window_partition
-
-
-## window_unpartition
----
-### ::: ultralytics.vit.sam.modules.encoders.window_unpartition
-
-
-## get_rel_pos
----
-### ::: ultralytics.vit.sam.modules.encoders.get_rel_pos
-
-
-## add_decomposed_rel_pos
----
-### ::: ultralytics.vit.sam.modules.encoders.add_decomposed_rel_pos
-
diff --git a/docs/reference/vit/sam/modules/sam.md b/docs/reference/vit/sam/modules/sam.md
deleted file mode 100644
index 7ead8cb7f..000000000
--- a/docs/reference/vit/sam/modules/sam.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Explore the Sam module in Ultralytics VIT, a PyTorch-based vision library, and learn how to improve your image classification and segmentation tasks.
-keywords: Ultralytics VIT, Sam module, PyTorch vision library, image classification, segmentation tasks
----
-
-## Sam
----
-### ::: ultralytics.vit.sam.modules.sam.Sam
-
diff --git a/docs/reference/vit/sam/modules/tiny_encoder.md b/docs/reference/vit/sam/modules/tiny_encoder.md
deleted file mode 100644
index eb20355ff..000000000
--- a/docs/reference/vit/sam/modules/tiny_encoder.md
+++ /dev/null
@@ -1,59 +0,0 @@
----
-description: Learn about the Conv2d_BN, MBConv, ConvLayer, Attention, BasicLayer, and TinyViT modules.
-keywords: Conv2d_BN, MBConv, ConvLayer, Attention, BasicLayer, TinyViT
----
-
-## Conv2d_BN
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.Conv2d_BN
-
-
-## PatchEmbed
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.PatchEmbed
-
-
-## MBConv
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.MBConv
-
-
-## PatchMerging
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.PatchMerging
-
-
-## ConvLayer
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.ConvLayer
-
-
-## Mlp
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.Mlp
-
-
-## Attention
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.Attention
-
-
-## TinyViTBlock
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.TinyViTBlock
-
-
-## BasicLayer
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.BasicLayer
-
-
-## LayerNorm2d
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.LayerNorm2d
-
-
-## TinyViT
----
-### ::: ultralytics.vit.sam.modules.tiny_encoder.TinyViT
-
diff --git a/docs/reference/vit/sam/modules/transformer.md b/docs/reference/vit/sam/modules/transformer.md
deleted file mode 100644
index e0d8eeb86..000000000
--- a/docs/reference/vit/sam/modules/transformer.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-description: Explore the Attention and TwoWayTransformer modules in Ultralytics YOLO documentation. Learn how to integrate them in your project efficiently.
-keywords: Ultralytics YOLO, Attention module, TwoWayTransformer module, Object Detection, Deep Learning
----
-
-## TwoWayTransformer
----
-### ::: ultralytics.vit.sam.modules.transformer.TwoWayTransformer
-
-
-## TwoWayAttentionBlock
----
-### ::: ultralytics.vit.sam.modules.transformer.TwoWayAttentionBlock
-
-
-## Attention
----
-### ::: ultralytics.vit.sam.modules.transformer.Attention
-
diff --git a/docs/reference/vit/sam/predict.md b/docs/reference/vit/sam/predict.md
deleted file mode 100644
index 35479518b..000000000
--- a/docs/reference/vit/sam/predict.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: The VIT SAM Predictor from Ultralytics provides object detection capabilities for YOLO. Learn how to use it and speed up your object detection models.
-keywords: Ultralytics, VIT SAM Predictor, object detection, YOLO
----
-
-## Predictor
----
-### ::: ultralytics.vit.sam.predict.Predictor
-
diff --git a/docs/reference/vit/utils/loss.md b/docs/reference/vit/utils/loss.md
deleted file mode 100644
index cd45d5f5f..000000000
--- a/docs/reference/vit/utils/loss.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: DETRLoss is a method for optimizing detection of objects in images. Learn how to use it in RTDETRDetectionLoss at Ultralytics Docs.
-keywords: DETRLoss, RTDETRDetectionLoss, Ultralytics, object detection, image classification, computer vision
----
-
-## DETRLoss
----
-### ::: ultralytics.vit.utils.loss.DETRLoss
-
-
-## RTDETRDetectionLoss
----
-### ::: ultralytics.vit.utils.loss.RTDETRDetectionLoss
-
diff --git a/docs/reference/vit/utils/ops.md b/docs/reference/vit/utils/ops.md
deleted file mode 100644
index e4660f094..000000000
--- a/docs/reference/vit/utils/ops.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-description: Learn about HungarianMatcher and inverse_sigmoid functions in the Ultralytics YOLO Docs. Improve your object detection skills today!.
-keywords: Ultralytics, YOLO, object detection, HungarianMatcher, inverse_sigmoid
----
-
-## HungarianMatcher
----
-### ::: ultralytics.vit.utils.ops.HungarianMatcher
-
-
-## get_cdn_group
----
-### ::: ultralytics.vit.utils.ops.get_cdn_group
-
-
-## inverse_sigmoid
----
-### ::: ultralytics.vit.utils.ops.inverse_sigmoid
-
diff --git a/docs/reference/yolo/cfg/__init__.md b/docs/reference/yolo/cfg/__init__.md
deleted file mode 100644
index 26f4e54ad..000000000
--- a/docs/reference/yolo/cfg/__init__.md
+++ /dev/null
@@ -1,49 +0,0 @@
----
-description: Explore Ultralytics YOLO's configuration functions and tools. Handle settings, manage defaults, and deal with deprecations in your YOLO configuration.
-keywords: Ultralytics, YOLO, configuration, cfg2dict, get_cfg, handle_deprecation, check_cfg_mismatch, merge_equals_args, handle_yolo_hub, handle_yolo_settings, entrypoint, copy_default_cfg
----
-
-## cfg2dict
----
-### ::: ultralytics.yolo.cfg.cfg2dict
-
-
-## get_cfg
----
-### ::: ultralytics.yolo.cfg.get_cfg
-
-
-## _handle_deprecation
----
-### ::: ultralytics.yolo.cfg._handle_deprecation
-
-
-## check_cfg_mismatch
----
-### ::: ultralytics.yolo.cfg.check_cfg_mismatch
-
-
-## merge_equals_args
----
-### ::: ultralytics.yolo.cfg.merge_equals_args
-
-
-## handle_yolo_hub
----
-### ::: ultralytics.yolo.cfg.handle_yolo_hub
-
-
-## handle_yolo_settings
----
-### ::: ultralytics.yolo.cfg.handle_yolo_settings
-
-
-## entrypoint
----
-### ::: ultralytics.yolo.cfg.entrypoint
-
-
-## copy_default_cfg
----
-### ::: ultralytics.yolo.cfg.copy_default_cfg
-
diff --git a/docs/reference/yolo/data/annotator.md b/docs/reference/yolo/data/annotator.md
deleted file mode 100644
index 0999d5866..000000000
--- a/docs/reference/yolo/data/annotator.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn how to use auto_annotate in Ultralytics YOLO to generate annotations automatically for your dataset. Simplify object detection workflows.
-keywords: Ultralytics YOLO, Auto Annotator, AI, image annotation, object detection, labelling, tool
----
-
-## auto_annotate
----
-### ::: ultralytics.yolo.data.annotator.auto_annotate
-
diff --git a/docs/reference/yolo/data/augment.md b/docs/reference/yolo/data/augment.md
deleted file mode 100644
index 634f183c0..000000000
--- a/docs/reference/yolo/data/augment.md
+++ /dev/null
@@ -1,99 +0,0 @@
----
-description: Use Ultralytics YOLO Data Augmentation transforms with Base, MixUp, and Albumentations for object detection and classification.
-keywords: YOLO, data augmentation, transforms, BaseTransform, MixUp, RandomHSV, Albumentations, ToTensor, classify_transforms, classify_albumentations
----
-
-## BaseTransform
----
-### ::: ultralytics.yolo.data.augment.BaseTransform
-
-
-## Compose
----
-### ::: ultralytics.yolo.data.augment.Compose
-
-
-## BaseMixTransform
----
-### ::: ultralytics.yolo.data.augment.BaseMixTransform
-
-
-## Mosaic
----
-### ::: ultralytics.yolo.data.augment.Mosaic
-
-
-## MixUp
----
-### ::: ultralytics.yolo.data.augment.MixUp
-
-
-## RandomPerspective
----
-### ::: ultralytics.yolo.data.augment.RandomPerspective
-
-
-## RandomHSV
----
-### ::: ultralytics.yolo.data.augment.RandomHSV
-
-
-## RandomFlip
----
-### ::: ultralytics.yolo.data.augment.RandomFlip
-
-
-## LetterBox
----
-### ::: ultralytics.yolo.data.augment.LetterBox
-
-
-## CopyPaste
----
-### ::: ultralytics.yolo.data.augment.CopyPaste
-
-
-## Albumentations
----
-### ::: ultralytics.yolo.data.augment.Albumentations
-
-
-## Format
----
-### ::: ultralytics.yolo.data.augment.Format
-
-
-## ClassifyLetterBox
----
-### ::: ultralytics.yolo.data.augment.ClassifyLetterBox
-
-
-## CenterCrop
----
-### ::: ultralytics.yolo.data.augment.CenterCrop
-
-
-## ToTensor
----
-### ::: ultralytics.yolo.data.augment.ToTensor
-
-
-## v8_transforms
----
-### ::: ultralytics.yolo.data.augment.v8_transforms
-
-
-## classify_transforms
----
-### ::: ultralytics.yolo.data.augment.classify_transforms
-
-
-## hsv2colorjitter
----
-### ::: ultralytics.yolo.data.augment.hsv2colorjitter
-
-
-## classify_albumentations
----
-### ::: ultralytics.yolo.data.augment.classify_albumentations
-
diff --git a/docs/reference/yolo/data/base.md b/docs/reference/yolo/data/base.md
deleted file mode 100644
index 3742e672e..000000000
--- a/docs/reference/yolo/data/base.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn about BaseDataset in Ultralytics YOLO, a flexible dataset class for object detection. Maximize your YOLO performance with custom datasets.
-keywords: BaseDataset, Ultralytics YOLO, object detection, real-world applications, documentation
----
-
-## BaseDataset
----
-### ::: ultralytics.yolo.data.base.BaseDataset
-
diff --git a/docs/reference/yolo/data/build.md b/docs/reference/yolo/data/build.md
deleted file mode 100644
index cfee450b5..000000000
--- a/docs/reference/yolo/data/build.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-description: Maximize YOLO performance with Ultralytics' InfiniteDataLoader, seed_worker, build_dataloader, and load_inference_source functions.
-keywords: Ultralytics, YOLO, object detection, data loading, build dataloader, load inference source
----
-
-## InfiniteDataLoader
----
-### ::: ultralytics.yolo.data.build.InfiniteDataLoader
-
-
-## _RepeatSampler
----
-### ::: ultralytics.yolo.data.build._RepeatSampler
-
-
-## seed_worker
----
-### ::: ultralytics.yolo.data.build.seed_worker
-
-
-## build_yolo_dataset
----
-### ::: ultralytics.yolo.data.build.build_yolo_dataset
-
-
-## build_dataloader
----
-### ::: ultralytics.yolo.data.build.build_dataloader
-
-
-## check_source
----
-### ::: ultralytics.yolo.data.build.check_source
-
-
-## load_inference_source
----
-### ::: ultralytics.yolo.data.build.load_inference_source
-
diff --git a/docs/reference/yolo/data/converter.md b/docs/reference/yolo/data/converter.md
deleted file mode 100644
index 6fd5d7e29..000000000
--- a/docs/reference/yolo/data/converter.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-description: Convert COCO-91 to COCO-80 class, RLE to polygon, and merge multi-segment images with Ultralytics YOLO data converter. Improve your object detection.
-keywords: Ultralytics, YOLO, converter, COCO91, COCO80, rle2polygon, merge_multi_segment, annotations
----
-
-## coco91_to_coco80_class
----
-### ::: ultralytics.yolo.data.converter.coco91_to_coco80_class
-
-
-## convert_coco
----
-### ::: ultralytics.yolo.data.converter.convert_coco
-
-
-## rle2polygon
----
-### ::: ultralytics.yolo.data.converter.rle2polygon
-
-
-## min_index
----
-### ::: ultralytics.yolo.data.converter.min_index
-
-
-## merge_multi_segment
----
-### ::: ultralytics.yolo.data.converter.merge_multi_segment
-
-
-## delete_dsstore
----
-### ::: ultralytics.yolo.data.converter.delete_dsstore
-
diff --git a/docs/reference/yolo/data/dataloaders/stream_loaders.md b/docs/reference/yolo/data/dataloaders/stream_loaders.md
deleted file mode 100644
index 40afd3d64..000000000
--- a/docs/reference/yolo/data/dataloaders/stream_loaders.md
+++ /dev/null
@@ -1,44 +0,0 @@
----
-description: 'Ultralytics YOLO Docs: Learn about stream loaders for image and tensor data, as well as autocasting techniques. Check out SourceTypes and more.'
-keywords: Ultralytics YOLO, data loaders, stream load images, screenshots, tensor data, autocast list, youtube URL retriever
----
-
-## SourceTypes
----
-### ::: ultralytics.yolo.data.dataloaders.stream_loaders.SourceTypes
-
-
-## LoadStreams
----
-### ::: ultralytics.yolo.data.dataloaders.stream_loaders.LoadStreams
-
-
-## LoadScreenshots
----
-### ::: ultralytics.yolo.data.dataloaders.stream_loaders.LoadScreenshots
-
-
-## LoadImages
----
-### ::: ultralytics.yolo.data.dataloaders.stream_loaders.LoadImages
-
-
-## LoadPilAndNumpy
----
-### ::: ultralytics.yolo.data.dataloaders.stream_loaders.LoadPilAndNumpy
-
-
-## LoadTensor
----
-### ::: ultralytics.yolo.data.dataloaders.stream_loaders.LoadTensor
-
-
-## autocast_list
----
-### ::: ultralytics.yolo.data.dataloaders.stream_loaders.autocast_list
-
-
-## get_best_youtube_url
----
-### ::: ultralytics.yolo.data.dataloaders.stream_loaders.get_best_youtube_url
-
diff --git a/docs/reference/yolo/data/dataloaders/v5augmentations.md b/docs/reference/yolo/data/dataloaders/v5augmentations.md
deleted file mode 100644
index 63df3692c..000000000
--- a/docs/reference/yolo/data/dataloaders/v5augmentations.md
+++ /dev/null
@@ -1,89 +0,0 @@
----
-description: Enhance image data with Albumentations CenterCrop, normalize, augment_hsv, replicate, random_perspective, cutout, & box_candidates.
-keywords: YOLO, object detection, data loaders, V5 augmentations, CenterCrop, normalize, random_perspective
----
-
-## Albumentations
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.Albumentations
-
-
-## LetterBox
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.LetterBox
-
-
-## CenterCrop
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.CenterCrop
-
-
-## ToTensor
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.ToTensor
-
-
-## normalize
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.normalize
-
-
-## denormalize
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.denormalize
-
-
-## augment_hsv
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.augment_hsv
-
-
-## hist_equalize
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.hist_equalize
-
-
-## replicate
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.replicate
-
-
-## letterbox
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.letterbox
-
-
-## random_perspective
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.random_perspective
-
-
-## copy_paste
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.copy_paste
-
-
-## cutout
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.cutout
-
-
-## mixup
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.mixup
-
-
-## box_candidates
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.box_candidates
-
-
-## classify_albumentations
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.classify_albumentations
-
-
-## classify_transforms
----
-### ::: ultralytics.yolo.data.dataloaders.v5augmentations.classify_transforms
-
diff --git a/docs/reference/yolo/data/dataloaders/v5loader.md b/docs/reference/yolo/data/dataloaders/v5loader.md
deleted file mode 100644
index 559869519..000000000
--- a/docs/reference/yolo/data/dataloaders/v5loader.md
+++ /dev/null
@@ -1,94 +0,0 @@
----
-description: Efficiently load images and labels to models using Ultralytics YOLO's InfiniteDataLoader, LoadScreenshots, and LoadStreams.
-keywords: YOLO, data loader, image classification, object detection, Ultralytics
----
-
-## InfiniteDataLoader
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.InfiniteDataLoader
-
-
-## _RepeatSampler
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader._RepeatSampler
-
-
-## LoadScreenshots
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.LoadScreenshots
-
-
-## LoadImages
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.LoadImages
-
-
-## LoadStreams
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.LoadStreams
-
-
-## LoadImagesAndLabels
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.LoadImagesAndLabels
-
-
-## ClassificationDataset
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.ClassificationDataset
-
-
-## get_hash
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.get_hash
-
-
-## exif_size
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.exif_size
-
-
-## exif_transpose
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.exif_transpose
-
-
-## seed_worker
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.seed_worker
-
-
-## create_dataloader
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.create_dataloader
-
-
-## img2label_paths
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.img2label_paths
-
-
-## flatten_recursive
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.flatten_recursive
-
-
-## extract_boxes
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.extract_boxes
-
-
-## autosplit
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.autosplit
-
-
-## verify_image_label
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.verify_image_label
-
-
-## create_classification_dataloader
----
-### ::: ultralytics.yolo.data.dataloaders.v5loader.create_classification_dataloader
-
diff --git a/docs/reference/yolo/data/dataset.md b/docs/reference/yolo/data/dataset.md
deleted file mode 100644
index f42a0a11f..000000000
--- a/docs/reference/yolo/data/dataset.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-description: Create custom YOLOv5 datasets with Ultralytics YOLODataset and SemanticDataset. Streamline your object detection and segmentation projects.
-keywords: YOLODataset, SemanticDataset, Ultralytics YOLO Docs, Object Detection, Segmentation
----
-
-## YOLODataset
----
-### ::: ultralytics.yolo.data.dataset.YOLODataset
-
-
-## ClassificationDataset
----
-### ::: ultralytics.yolo.data.dataset.ClassificationDataset
-
-
-## SemanticDataset
----
-### ::: ultralytics.yolo.data.dataset.SemanticDataset
-
diff --git a/docs/reference/yolo/data/dataset_wrappers.md b/docs/reference/yolo/data/dataset_wrappers.md
deleted file mode 100644
index c56806df3..000000000
--- a/docs/reference/yolo/data/dataset_wrappers.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Create a custom dataset of mixed and oriented rectangular objects with Ultralytics YOLO's MixAndRectDataset.
-keywords: Ultralytics YOLO, MixAndRectDataset, dataset wrapper, image-level annotations, object-level annotations, rectangular object detection
----
-
-## MixAndRectDataset
----
-### ::: ultralytics.yolo.data.dataset_wrappers.MixAndRectDataset
-
diff --git a/docs/reference/yolo/data/utils.md b/docs/reference/yolo/data/utils.md
deleted file mode 100644
index f0f2e2fad..000000000
--- a/docs/reference/yolo/data/utils.md
+++ /dev/null
@@ -1,69 +0,0 @@
----
-description: Efficiently handle data in YOLO with Ultralytics. Utilize HUBDatasetStats and customize dataset with these data utility functions.
-keywords: YOLOv4, Object Detection, Computer Vision, Deep Learning, Convolutional Neural Network, CNN, Ultralytics Docs
----
-
-## HUBDatasetStats
----
-### ::: ultralytics.yolo.data.utils.HUBDatasetStats
-
-
-## img2label_paths
----
-### ::: ultralytics.yolo.data.utils.img2label_paths
-
-
-## get_hash
----
-### ::: ultralytics.yolo.data.utils.get_hash
-
-
-## exif_size
----
-### ::: ultralytics.yolo.data.utils.exif_size
-
-
-## verify_image_label
----
-### ::: ultralytics.yolo.data.utils.verify_image_label
-
-
-## polygon2mask
----
-### ::: ultralytics.yolo.data.utils.polygon2mask
-
-
-## polygons2masks
----
-### ::: ultralytics.yolo.data.utils.polygons2masks
-
-
-## polygons2masks_overlap
----
-### ::: ultralytics.yolo.data.utils.polygons2masks_overlap
-
-
-## check_det_dataset
----
-### ::: ultralytics.yolo.data.utils.check_det_dataset
-
-
-## check_cls_dataset
----
-### ::: ultralytics.yolo.data.utils.check_cls_dataset
-
-
-## compress_one_image
----
-### ::: ultralytics.yolo.data.utils.compress_one_image
-
-
-## delete_dsstore
----
-### ::: ultralytics.yolo.data.utils.delete_dsstore
-
-
-## zip_directory
----
-### ::: ultralytics.yolo.data.utils.zip_directory
-
diff --git a/docs/reference/yolo/engine/exporter.md b/docs/reference/yolo/engine/exporter.md
deleted file mode 100644
index 37189d46f..000000000
--- a/docs/reference/yolo/engine/exporter.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-description: Learn how to export your YOLO model in various formats using Ultralytics' exporter package - iOS, GDC, and more.
-keywords: Ultralytics, YOLO, exporter, iOS detect model, gd_outputs, export
----
-
-## Exporter
----
-### ::: ultralytics.yolo.engine.exporter.Exporter
-
-
-## iOSDetectModel
----
-### ::: ultralytics.yolo.engine.exporter.iOSDetectModel
-
-
-## export_formats
----
-### ::: ultralytics.yolo.engine.exporter.export_formats
-
-
-## gd_outputs
----
-### ::: ultralytics.yolo.engine.exporter.gd_outputs
-
-
-## try_export
----
-### ::: ultralytics.yolo.engine.exporter.try_export
-
-
-## export
----
-### ::: ultralytics.yolo.engine.exporter.export
-
diff --git a/docs/reference/yolo/engine/model.md b/docs/reference/yolo/engine/model.md
deleted file mode 100644
index 9ba793280..000000000
--- a/docs/reference/yolo/engine/model.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Discover the YOLO model of Ultralytics engine to simplify your object detection tasks with state-of-the-art models.
-keywords: YOLO, object detection, model, architecture, usage, customization, Ultralytics Docs
----
-
-## YOLO
----
-### ::: ultralytics.yolo.engine.model.YOLO
-
diff --git a/docs/reference/yolo/engine/predictor.md b/docs/reference/yolo/engine/predictor.md
deleted file mode 100644
index f4aed3c06..000000000
--- a/docs/reference/yolo/engine/predictor.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: '"The BasePredictor class in Ultralytics YOLO Engine predicts object detection in images and videos. Learn to implement YOLO with ease."'
-keywords: Ultralytics, YOLO, BasePredictor, Object Detection, Computer Vision, Fast Model, Insights
----
-
-## BasePredictor
----
-### ::: ultralytics.yolo.engine.predictor.BasePredictor
-
diff --git a/docs/reference/yolo/engine/results.md b/docs/reference/yolo/engine/results.md
deleted file mode 100644
index 38e568fa6..000000000
--- a/docs/reference/yolo/engine/results.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-description: Learn about BaseTensor & Boxes in Ultralytics YOLO Engine. Check out Ultralytics Docs for quality tutorials and resources on object detection.
-keywords: YOLO, Engine, Results, Masks, Probs, Ultralytics
----
-
-## BaseTensor
----
-### ::: ultralytics.yolo.engine.results.BaseTensor
-
-
-## Results
----
-### ::: ultralytics.yolo.engine.results.Results
-
-
-## Boxes
----
-### ::: ultralytics.yolo.engine.results.Boxes
-
-
-## Masks
----
-### ::: ultralytics.yolo.engine.results.Masks
-
-
-## Keypoints
----
-### ::: ultralytics.yolo.engine.results.Keypoints
-
-
-## Probs
----
-### ::: ultralytics.yolo.engine.results.Probs
-
diff --git a/docs/reference/yolo/engine/trainer.md b/docs/reference/yolo/engine/trainer.md
deleted file mode 100644
index e9a06e45b..000000000
--- a/docs/reference/yolo/engine/trainer.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Train faster with mixed precision. Learn how to use BaseTrainer with Advanced Mixed Precision to optimize YOLOv3 and YOLOv4 models.
-keywords: Ultralytics YOLO, BaseTrainer, object detection models, training guide
----
-
-## BaseTrainer
----
-### ::: ultralytics.yolo.engine.trainer.BaseTrainer
-
diff --git a/docs/reference/yolo/engine/validator.md b/docs/reference/yolo/engine/validator.md
deleted file mode 100644
index c9355ea18..000000000
--- a/docs/reference/yolo/engine/validator.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Ensure YOLOv5 models meet constraints and standards with the BaseValidator class. Learn how to use it here.
-keywords: Ultralytics, YOLO, BaseValidator, models, validation, object detection
----
-
-## BaseValidator
----
-### ::: ultralytics.yolo.engine.validator.BaseValidator
-
diff --git a/docs/reference/yolo/fastsam/model.md b/docs/reference/yolo/fastsam/model.md
deleted file mode 100644
index b84e9946a..000000000
--- a/docs/reference/yolo/fastsam/model.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn how to use FastSAM in Ultralytics YOLO to improve object detection accuracy and speed.
-keywords: FastSAM, object detection, accuracy, speed, Ultralytics YOLO
----
-
-## FastSAM
----
-### ::: ultralytics.yolo.fastsam.model.FastSAM
-
diff --git a/docs/reference/yolo/fastsam/predict.md b/docs/reference/yolo/fastsam/predict.md
deleted file mode 100644
index 377ae25a5..000000000
--- a/docs/reference/yolo/fastsam/predict.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: FastSAMPredictor API reference and usage guide for the Ultralytics YOLO object detection library.
-keywords: FastSAMPredictor, API, reference, usage, guide, Ultralytics, YOLO, object detection, library
----
-
-## FastSAMPredictor
----
-### ::: ultralytics.yolo.fastsam.predict.FastSAMPredictor
-
diff --git a/docs/reference/yolo/fastsam/prompt.md b/docs/reference/yolo/fastsam/prompt.md
deleted file mode 100644
index e6fdc6dbf..000000000
--- a/docs/reference/yolo/fastsam/prompt.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn how to use FastSAMPrompt in Ultralytics YOLO for fast and efficient object detection and tracking.
-keywords: FastSAMPrompt, Ultralytics YOLO, object detection, tracking, fast, efficient
----
-
-## FastSAMPrompt
----
-### ::: ultralytics.yolo.fastsam.prompt.FastSAMPrompt
-
diff --git a/docs/reference/yolo/fastsam/utils.md b/docs/reference/yolo/fastsam/utils.md
deleted file mode 100644
index 6031bbc8e..000000000
--- a/docs/reference/yolo/fastsam/utils.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Learn how to adjust bounding boxes to the image border in Ultralytics YOLO framework. Improve object detection accuracy by accounting for image borders.
-keywords: adjust_bboxes_to_image_border, Ultralytics YOLO, object detection, bounding boxes, image border
----
-
-## adjust_bboxes_to_image_border
----
-### ::: ultralytics.yolo.fastsam.utils.adjust_bboxes_to_image_border
-
-
-## bbox_iou
----
-### ::: ultralytics.yolo.fastsam.utils.bbox_iou
-
diff --git a/docs/reference/yolo/fastsam/val.md b/docs/reference/yolo/fastsam/val.md
deleted file mode 100644
index e4bb5476c..000000000
--- a/docs/reference/yolo/fastsam/val.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn about the FastSAMValidator module in Ultralytics YOLO. Validate and evaluate Segment Anything Model (SAM) datasets for object detection models with ease.
-keywords: FastSAMValidator, Ultralytics YOLO, SAM datasets, object detection, validation, evaluation
----
-
-## FastSAMValidator
----
-### ::: ultralytics.yolo.fastsam.val.FastSAMValidator
-
diff --git a/docs/reference/yolo/nas/model.md b/docs/reference/yolo/nas/model.md
deleted file mode 100644
index 9c3465969..000000000
--- a/docs/reference/yolo/nas/model.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn about the Neural Architecture Search (NAS) feature available in Ultralytics YOLO. Find out how NAS can improve object detection models and increase accuracy. Get started today!.
-keywords: Ultralytics YOLO, object detection, NAS, Neural Architecture Search, model optimization, accuracy improvement
----
-
-## NAS
----
-### ::: ultralytics.yolo.nas.model.NAS
-
diff --git a/docs/reference/yolo/nas/predict.md b/docs/reference/yolo/nas/predict.md
deleted file mode 100644
index fb2be3ffb..000000000
--- a/docs/reference/yolo/nas/predict.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn how to use NASPredictor in Ultralytics YOLO for deploying efficient CNN models with search algorithms in neural architecture search.
-keywords: Ultralytics YOLO, NASPredictor, neural architecture search, efficient CNN models, search algorithms
----
-
-## NASPredictor
----
-### ::: ultralytics.yolo.nas.predict.NASPredictor
-
diff --git a/docs/reference/yolo/nas/val.md b/docs/reference/yolo/nas/val.md
deleted file mode 100644
index 804ced3cf..000000000
--- a/docs/reference/yolo/nas/val.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn about NASValidator in the Ultralytics YOLO Docs. Properly validate YOLO neural architecture search results for optimal performance.
-keywords: NASValidator, YOLO, neural architecture search, validation, performance, Ultralytics
----
-
-## NASValidator
----
-### ::: ultralytics.yolo.nas.val.NASValidator
-
diff --git a/docs/reference/yolo/utils/__init__.md b/docs/reference/yolo/utils/__init__.md
deleted file mode 100644
index 77b658429..000000000
--- a/docs/reference/yolo/utils/__init__.md
+++ /dev/null
@@ -1,169 +0,0 @@
----
-description: Uncover utility functions in Ultralytics YOLO. Handle YAML, threading, logging, error-checking, and platform identification. Enhance your YOLO development process.
-keywords: Ultralytics, YOLO, utils, SimpleClass, IterableSimpleNamespace, EmojiFilter, TryExcept, plt_settings, set_logging, emojis, yaml_save, yaml_load, yaml_print, is_colab, is_kaggle, is_jupyter, is_docker, is_online, is_pip_package, is_dir_writeable, is_pytest_running, is_github_actions_ci, is_git_dir, get_git_dir, get_git_origin_url, get_git_branch, get_default_args, get_user_config_dir, colorstr, threaded, set_sentry, get_settings, set_settings, deprecation_warn, clean_url, url2file
----
-
-## SimpleClass
----
-### ::: ultralytics.yolo.utils.SimpleClass
-
-
-## IterableSimpleNamespace
----
-### ::: ultralytics.yolo.utils.IterableSimpleNamespace
-
-
-## EmojiFilter
----
-### ::: ultralytics.yolo.utils.EmojiFilter
-
-
-## TryExcept
----
-### ::: ultralytics.yolo.utils.TryExcept
-
-
-## plt_settings
----
-### ::: ultralytics.yolo.utils.plt_settings
-
-
-## set_logging
----
-### ::: ultralytics.yolo.utils.set_logging
-
-
-## emojis
----
-### ::: ultralytics.yolo.utils.emojis
-
-
-## yaml_save
----
-### ::: ultralytics.yolo.utils.yaml_save
-
-
-## yaml_load
----
-### ::: ultralytics.yolo.utils.yaml_load
-
-
-## yaml_print
----
-### ::: ultralytics.yolo.utils.yaml_print
-
-
-## is_colab
----
-### ::: ultralytics.yolo.utils.is_colab
-
-
-## is_kaggle
----
-### ::: ultralytics.yolo.utils.is_kaggle
-
-
-## is_jupyter
----
-### ::: ultralytics.yolo.utils.is_jupyter
-
-
-## is_docker
----
-### ::: ultralytics.yolo.utils.is_docker
-
-
-## is_online
----
-### ::: ultralytics.yolo.utils.is_online
-
-
-## is_pip_package
----
-### ::: ultralytics.yolo.utils.is_pip_package
-
-
-## is_dir_writeable
----
-### ::: ultralytics.yolo.utils.is_dir_writeable
-
-
-## is_pytest_running
----
-### ::: ultralytics.yolo.utils.is_pytest_running
-
-
-## is_github_actions_ci
----
-### ::: ultralytics.yolo.utils.is_github_actions_ci
-
-
-## is_git_dir
----
-### ::: ultralytics.yolo.utils.is_git_dir
-
-
-## get_git_dir
----
-### ::: ultralytics.yolo.utils.get_git_dir
-
-
-## get_git_origin_url
----
-### ::: ultralytics.yolo.utils.get_git_origin_url
-
-
-## get_git_branch
----
-### ::: ultralytics.yolo.utils.get_git_branch
-
-
-## get_default_args
----
-### ::: ultralytics.yolo.utils.get_default_args
-
-
-## get_user_config_dir
----
-### ::: ultralytics.yolo.utils.get_user_config_dir
-
-
-## colorstr
----
-### ::: ultralytics.yolo.utils.colorstr
-
-
-## threaded
----
-### ::: ultralytics.yolo.utils.threaded
-
-
-## set_sentry
----
-### ::: ultralytics.yolo.utils.set_sentry
-
-
-## get_settings
----
-### ::: ultralytics.yolo.utils.get_settings
-
-
-## set_settings
----
-### ::: ultralytics.yolo.utils.set_settings
-
-
-## deprecation_warn
----
-### ::: ultralytics.yolo.utils.deprecation_warn
-
-
-## clean_url
----
-### ::: ultralytics.yolo.utils.clean_url
-
-
-## url2file
----
-### ::: ultralytics.yolo.utils.url2file
-
diff --git a/docs/reference/yolo/utils/autobatch.md b/docs/reference/yolo/utils/autobatch.md
deleted file mode 100644
index fe9ba660f..000000000
--- a/docs/reference/yolo/utils/autobatch.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Dynamically adjusts input size to optimize GPU memory usage during training. Learn how to use check_train_batch_size with Ultralytics YOLO.
-keywords: YOLOv5, batch size, training, Ultralytics Autobatch, object detection, model performance
----
-
-## check_train_batch_size
----
-### ::: ultralytics.yolo.utils.autobatch.check_train_batch_size
-
-
-## autobatch
----
-### ::: ultralytics.yolo.utils.autobatch.autobatch
-
diff --git a/docs/reference/yolo/utils/benchmarks.md b/docs/reference/yolo/utils/benchmarks.md
deleted file mode 100644
index 7c3750209..000000000
--- a/docs/reference/yolo/utils/benchmarks.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Improve your YOLO's performance and measure its speed. Benchmark utility for YOLOv5.
-keywords: Ultralytics YOLO, ProfileModels, benchmark, model inference, detection
----
-
-## ProfileModels
----
-### ::: ultralytics.yolo.utils.benchmarks.ProfileModels
-
-
-## benchmark
----
-### ::: ultralytics.yolo.utils.benchmarks.benchmark
-
diff --git a/docs/reference/yolo/utils/callbacks/base.md b/docs/reference/yolo/utils/callbacks/base.md
deleted file mode 100644
index bdb3ae154..000000000
--- a/docs/reference/yolo/utils/callbacks/base.md
+++ /dev/null
@@ -1,139 +0,0 @@
----
-description: Learn about YOLO's callback functions from on_train_start to add_integration_callbacks. See how these callbacks modify and save models.
-keywords: YOLO, Ultralytics, callbacks, object detection, training, inference
----
-
-## on_pretrain_routine_start
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_pretrain_routine_start
-
-
-## on_pretrain_routine_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_pretrain_routine_end
-
-
-## on_train_start
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_train_start
-
-
-## on_train_epoch_start
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_train_epoch_start
-
-
-## on_train_batch_start
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_train_batch_start
-
-
-## optimizer_step
----
-### ::: ultralytics.yolo.utils.callbacks.base.optimizer_step
-
-
-## on_before_zero_grad
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_before_zero_grad
-
-
-## on_train_batch_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_train_batch_end
-
-
-## on_train_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_train_epoch_end
-
-
-## on_fit_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_fit_epoch_end
-
-
-## on_model_save
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_model_save
-
-
-## on_train_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_train_end
-
-
-## on_params_update
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_params_update
-
-
-## teardown
----
-### ::: ultralytics.yolo.utils.callbacks.base.teardown
-
-
-## on_val_start
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_val_start
-
-
-## on_val_batch_start
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_val_batch_start
-
-
-## on_val_batch_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_val_batch_end
-
-
-## on_val_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_val_end
-
-
-## on_predict_start
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_predict_start
-
-
-## on_predict_batch_start
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_predict_batch_start
-
-
-## on_predict_batch_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_predict_batch_end
-
-
-## on_predict_postprocess_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_predict_postprocess_end
-
-
-## on_predict_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_predict_end
-
-
-## on_export_start
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_export_start
-
-
-## on_export_end
----
-### ::: ultralytics.yolo.utils.callbacks.base.on_export_end
-
-
-## get_default_callbacks
----
-### ::: ultralytics.yolo.utils.callbacks.base.get_default_callbacks
-
-
-## add_integration_callbacks
----
-### ::: ultralytics.yolo.utils.callbacks.base.add_integration_callbacks
-
diff --git a/docs/reference/yolo/utils/callbacks/clearml.md b/docs/reference/yolo/utils/callbacks/clearml.md
deleted file mode 100644
index 54b8bc6af..000000000
--- a/docs/reference/yolo/utils/callbacks/clearml.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-description: Improve your YOLOv5 model training with callbacks from ClearML. Learn about log debug samples, pre-training routines, validation and more.
-keywords: Ultralytics YOLO, callbacks, log plots, epoch monitoring, training end events
----
-
-## _log_debug_samples
----
-### ::: ultralytics.yolo.utils.callbacks.clearml._log_debug_samples
-
-
-## _log_plot
----
-### ::: ultralytics.yolo.utils.callbacks.clearml._log_plot
-
-
-## on_pretrain_routine_start
----
-### ::: ultralytics.yolo.utils.callbacks.clearml.on_pretrain_routine_start
-
-
-## on_train_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.clearml.on_train_epoch_end
-
-
-## on_fit_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.clearml.on_fit_epoch_end
-
-
-## on_val_end
----
-### ::: ultralytics.yolo.utils.callbacks.clearml.on_val_end
-
-
-## on_train_end
----
-### ::: ultralytics.yolo.utils.callbacks.clearml.on_train_end
-
diff --git a/docs/reference/yolo/utils/callbacks/comet.md b/docs/reference/yolo/utils/callbacks/comet.md
deleted file mode 100644
index 1d4519195..000000000
--- a/docs/reference/yolo/utils/callbacks/comet.md
+++ /dev/null
@@ -1,124 +0,0 @@
----
-description: Learn about YOLO callbacks using the Comet.ml platform, enhancing object detection training and testing with custom logging and visualizations.
-keywords: Ultralytics, YOLO, callbacks, Comet ML, log images, log predictions, log plots, fetch metadata, fetch annotations, create experiment data, format experiment data
----
-
-## _get_comet_mode
----
-### ::: ultralytics.yolo.utils.callbacks.comet._get_comet_mode
-
-
-## _get_comet_model_name
----
-### ::: ultralytics.yolo.utils.callbacks.comet._get_comet_model_name
-
-
-## _get_eval_batch_logging_interval
----
-### ::: ultralytics.yolo.utils.callbacks.comet._get_eval_batch_logging_interval
-
-
-## _get_max_image_predictions_to_log
----
-### ::: ultralytics.yolo.utils.callbacks.comet._get_max_image_predictions_to_log
-
-
-## _scale_confidence_score
----
-### ::: ultralytics.yolo.utils.callbacks.comet._scale_confidence_score
-
-
-## _should_log_confusion_matrix
----
-### ::: ultralytics.yolo.utils.callbacks.comet._should_log_confusion_matrix
-
-
-## _should_log_image_predictions
----
-### ::: ultralytics.yolo.utils.callbacks.comet._should_log_image_predictions
-
-
-## _get_experiment_type
----
-### ::: ultralytics.yolo.utils.callbacks.comet._get_experiment_type
-
-
-## _create_experiment
----
-### ::: ultralytics.yolo.utils.callbacks.comet._create_experiment
-
-
-## _fetch_trainer_metadata
----
-### ::: ultralytics.yolo.utils.callbacks.comet._fetch_trainer_metadata
-
-
-## _scale_bounding_box_to_original_image_shape
----
-### ::: ultralytics.yolo.utils.callbacks.comet._scale_bounding_box_to_original_image_shape
-
-
-## _format_ground_truth_annotations_for_detection
----
-### ::: ultralytics.yolo.utils.callbacks.comet._format_ground_truth_annotations_for_detection
-
-
-## _format_prediction_annotations_for_detection
----
-### ::: ultralytics.yolo.utils.callbacks.comet._format_prediction_annotations_for_detection
-
-
-## _fetch_annotations
----
-### ::: ultralytics.yolo.utils.callbacks.comet._fetch_annotations
-
-
-## _create_prediction_metadata_map
----
-### ::: ultralytics.yolo.utils.callbacks.comet._create_prediction_metadata_map
-
-
-## _log_confusion_matrix
----
-### ::: ultralytics.yolo.utils.callbacks.comet._log_confusion_matrix
-
-
-## _log_images
----
-### ::: ultralytics.yolo.utils.callbacks.comet._log_images
-
-
-## _log_image_predictions
----
-### ::: ultralytics.yolo.utils.callbacks.comet._log_image_predictions
-
-
-## _log_plots
----
-### ::: ultralytics.yolo.utils.callbacks.comet._log_plots
-
-
-## _log_model
----
-### ::: ultralytics.yolo.utils.callbacks.comet._log_model
-
-
-## on_pretrain_routine_start
----
-### ::: ultralytics.yolo.utils.callbacks.comet.on_pretrain_routine_start
-
-
-## on_train_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.comet.on_train_epoch_end
-
-
-## on_fit_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.comet.on_fit_epoch_end
-
-
-## on_train_end
----
-### ::: ultralytics.yolo.utils.callbacks.comet.on_train_end
-
diff --git a/docs/reference/yolo/utils/callbacks/dvc.md b/docs/reference/yolo/utils/callbacks/dvc.md
deleted file mode 100644
index b32fc7a47..000000000
--- a/docs/reference/yolo/utils/callbacks/dvc.md
+++ /dev/null
@@ -1,54 +0,0 @@
----
-description: Explore Ultralytics YOLO Utils DVC Callbacks such as logging images, plots, confusion matrices, and training progress.
-keywords: Ultralytics, YOLO, Utils, DVC, Callbacks, images, plots, confusion matrices, training progress
----
-
-## _logger_disabled
----
-### ::: ultralytics.yolo.utils.callbacks.dvc._logger_disabled
-
-
-## _log_images
----
-### ::: ultralytics.yolo.utils.callbacks.dvc._log_images
-
-
-## _log_plots
----
-### ::: ultralytics.yolo.utils.callbacks.dvc._log_plots
-
-
-## _log_confusion_matrix
----
-### ::: ultralytics.yolo.utils.callbacks.dvc._log_confusion_matrix
-
-
-## on_pretrain_routine_start
----
-### ::: ultralytics.yolo.utils.callbacks.dvc.on_pretrain_routine_start
-
-
-## on_pretrain_routine_end
----
-### ::: ultralytics.yolo.utils.callbacks.dvc.on_pretrain_routine_end
-
-
-## on_train_start
----
-### ::: ultralytics.yolo.utils.callbacks.dvc.on_train_start
-
-
-## on_train_epoch_start
----
-### ::: ultralytics.yolo.utils.callbacks.dvc.on_train_epoch_start
-
-
-## on_fit_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.dvc.on_fit_epoch_end
-
-
-## on_train_end
----
-### ::: ultralytics.yolo.utils.callbacks.dvc.on_train_end
-
diff --git a/docs/reference/yolo/utils/callbacks/hub.md b/docs/reference/yolo/utils/callbacks/hub.md
deleted file mode 100644
index 7b2419982..000000000
--- a/docs/reference/yolo/utils/callbacks/hub.md
+++ /dev/null
@@ -1,44 +0,0 @@
----
-description: Improve YOLOv5 model training with Ultralytics' on-train callbacks. Boost performance on-pretrain-routine-end, model-save, train/predict start.
-keywords: Ultralytics, YOLO, callbacks, on_pretrain_routine_end, on_fit_epoch_end, on_train_start, on_val_start, on_predict_start, on_export_start
----
-
-## on_pretrain_routine_end
----
-### ::: ultralytics.yolo.utils.callbacks.hub.on_pretrain_routine_end
-
-
-## on_fit_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.hub.on_fit_epoch_end
-
-
-## on_model_save
----
-### ::: ultralytics.yolo.utils.callbacks.hub.on_model_save
-
-
-## on_train_end
----
-### ::: ultralytics.yolo.utils.callbacks.hub.on_train_end
-
-
-## on_train_start
----
-### ::: ultralytics.yolo.utils.callbacks.hub.on_train_start
-
-
-## on_val_start
----
-### ::: ultralytics.yolo.utils.callbacks.hub.on_val_start
-
-
-## on_predict_start
----
-### ::: ultralytics.yolo.utils.callbacks.hub.on_predict_start
-
-
-## on_export_start
----
-### ::: ultralytics.yolo.utils.callbacks.hub.on_export_start
-
diff --git a/docs/reference/yolo/utils/callbacks/mlflow.md b/docs/reference/yolo/utils/callbacks/mlflow.md
deleted file mode 100644
index 9d69d0fd7..000000000
--- a/docs/reference/yolo/utils/callbacks/mlflow.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-description: Track model performance and metrics with MLflow in YOLOv5. Use callbacks like on_pretrain_routine_end or on_train_end to log information.
-keywords: Ultralytics, YOLO, Utils, MLflow, callbacks, on_pretrain_routine_end, on_train_end, Tracking, Model Management, training
----
-
-## on_pretrain_routine_end
----
-### ::: ultralytics.yolo.utils.callbacks.mlflow.on_pretrain_routine_end
-
-
-## on_fit_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.mlflow.on_fit_epoch_end
-
-
-## on_train_end
----
-### ::: ultralytics.yolo.utils.callbacks.mlflow.on_train_end
-
diff --git a/docs/reference/yolo/utils/callbacks/neptune.md b/docs/reference/yolo/utils/callbacks/neptune.md
deleted file mode 100644
index cadbfb515..000000000
--- a/docs/reference/yolo/utils/callbacks/neptune.md
+++ /dev/null
@@ -1,44 +0,0 @@
----
-description: Improve YOLOv5 training with Neptune, a powerful logging tool. Track metrics like images, plots, and epochs for better model performance.
-keywords: Ultralytics, YOLO, Neptune, Callbacks, log scalars, log images, log plots, training, validation
----
-
-## _log_scalars
----
-### ::: ultralytics.yolo.utils.callbacks.neptune._log_scalars
-
-
-## _log_images
----
-### ::: ultralytics.yolo.utils.callbacks.neptune._log_images
-
-
-## _log_plot
----
-### ::: ultralytics.yolo.utils.callbacks.neptune._log_plot
-
-
-## on_pretrain_routine_start
----
-### ::: ultralytics.yolo.utils.callbacks.neptune.on_pretrain_routine_start
-
-
-## on_train_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.neptune.on_train_epoch_end
-
-
-## on_fit_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.neptune.on_fit_epoch_end
-
-
-## on_val_end
----
-### ::: ultralytics.yolo.utils.callbacks.neptune.on_val_end
-
-
-## on_train_end
----
-### ::: ultralytics.yolo.utils.callbacks.neptune.on_train_end
-
diff --git a/docs/reference/yolo/utils/callbacks/raytune.md b/docs/reference/yolo/utils/callbacks/raytune.md
deleted file mode 100644
index 2771c2e22..000000000
--- a/docs/reference/yolo/utils/callbacks/raytune.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: '"Improve YOLO model performance with on_fit_epoch_end callback. Learn to integrate with Ray Tune for hyperparameter tuning. Ultralytics YOLO docs."'
-keywords: on_fit_epoch_end, Ultralytics YOLO, callback function, training, model tuning
----
-
-## on_fit_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.raytune.on_fit_epoch_end
-
diff --git a/docs/reference/yolo/utils/callbacks/tensorboard.md b/docs/reference/yolo/utils/callbacks/tensorboard.md
deleted file mode 100644
index b5eea1be3..000000000
--- a/docs/reference/yolo/utils/callbacks/tensorboard.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-description: Learn how to monitor the training process with Tensorboard using Ultralytics YOLO's "_log_scalars" and "on_batch_end" methods.
-keywords: TensorBoard callbacks, YOLO training, ultralytics YOLO
----
-
-## _log_scalars
----
-### ::: ultralytics.yolo.utils.callbacks.tensorboard._log_scalars
-
-
-## on_pretrain_routine_start
----
-### ::: ultralytics.yolo.utils.callbacks.tensorboard.on_pretrain_routine_start
-
-
-## on_batch_end
----
-### ::: ultralytics.yolo.utils.callbacks.tensorboard.on_batch_end
-
-
-## on_fit_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.tensorboard.on_fit_epoch_end
-
diff --git a/docs/reference/yolo/utils/callbacks/wb.md b/docs/reference/yolo/utils/callbacks/wb.md
deleted file mode 100644
index 1116adc89..000000000
--- a/docs/reference/yolo/utils/callbacks/wb.md
+++ /dev/null
@@ -1,29 +0,0 @@
----
-description: Learn how to use Ultralytics YOLO's built-in callbacks `on_pretrain_routine_start` and `on_train_epoch_end` for improved training performance.
-keywords: Ultralytics, YOLO, callbacks, weights, biases, training
----
-
-## _log_plots
----
-### ::: ultralytics.yolo.utils.callbacks.wb._log_plots
-
-
-## on_pretrain_routine_start
----
-### ::: ultralytics.yolo.utils.callbacks.wb.on_pretrain_routine_start
-
-
-## on_fit_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.wb.on_fit_epoch_end
-
-
-## on_train_epoch_end
----
-### ::: ultralytics.yolo.utils.callbacks.wb.on_train_epoch_end
-
-
-## on_train_end
----
-### ::: ultralytics.yolo.utils.callbacks.wb.on_train_end
-
diff --git a/docs/reference/yolo/utils/checks.md b/docs/reference/yolo/utils/checks.md
deleted file mode 100644
index 48fd0bd94..000000000
--- a/docs/reference/yolo/utils/checks.md
+++ /dev/null
@@ -1,94 +0,0 @@
----
-description: 'Check functions for YOLO utils: image size, version, font, requirements, filename suffix, YAML file, YOLO, and Git version.'
-keywords: YOLO, Ultralytics, Utils, Checks, image sizing, version updates, font compatibility, Python requirements, file suffixes, YAML syntax, image showing, AMP
----
-
-## is_ascii
----
-### ::: ultralytics.yolo.utils.checks.is_ascii
-
-
-## check_imgsz
----
-### ::: ultralytics.yolo.utils.checks.check_imgsz
-
-
-## check_version
----
-### ::: ultralytics.yolo.utils.checks.check_version
-
-
-## check_latest_pypi_version
----
-### ::: ultralytics.yolo.utils.checks.check_latest_pypi_version
-
-
-## check_pip_update_available
----
-### ::: ultralytics.yolo.utils.checks.check_pip_update_available
-
-
-## check_font
----
-### ::: ultralytics.yolo.utils.checks.check_font
-
-
-## check_python
----
-### ::: ultralytics.yolo.utils.checks.check_python
-
-
-## check_requirements
----
-### ::: ultralytics.yolo.utils.checks.check_requirements
-
-
-## check_torchvision
----
-### ::: ultralytics.yolo.utils.checks.check_torchvision
-
-
-## check_suffix
----
-### ::: ultralytics.yolo.utils.checks.check_suffix
-
-
-## check_yolov5u_filename
----
-### ::: ultralytics.yolo.utils.checks.check_yolov5u_filename
-
-
-## check_file
----
-### ::: ultralytics.yolo.utils.checks.check_file
-
-
-## check_yaml
----
-### ::: ultralytics.yolo.utils.checks.check_yaml
-
-
-## check_imshow
----
-### ::: ultralytics.yolo.utils.checks.check_imshow
-
-
-## check_yolo
----
-### ::: ultralytics.yolo.utils.checks.check_yolo
-
-
-## check_amp
----
-### ::: ultralytics.yolo.utils.checks.check_amp
-
-
-## git_describe
----
-### ::: ultralytics.yolo.utils.checks.git_describe
-
-
-## print_args
----
-### ::: ultralytics.yolo.utils.checks.print_args
-
diff --git a/docs/reference/yolo/utils/dist.md b/docs/reference/yolo/utils/dist.md
deleted file mode 100644
index 3e5033eba..000000000
--- a/docs/reference/yolo/utils/dist.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-description: Learn how to find free network port and generate DDP (Distributed Data Parallel) command in Ultralytics YOLO with easy examples.
-keywords: ultralytics, YOLO, utils, dist, distributed deep learning, DDP file, DDP cleanup
----
-
-## find_free_network_port
----
-### ::: ultralytics.yolo.utils.dist.find_free_network_port
-
-
-## generate_ddp_file
----
-### ::: ultralytics.yolo.utils.dist.generate_ddp_file
-
-
-## generate_ddp_command
----
-### ::: ultralytics.yolo.utils.dist.generate_ddp_command
-
-
-## ddp_cleanup
----
-### ::: ultralytics.yolo.utils.dist.ddp_cleanup
-
diff --git a/docs/reference/yolo/utils/downloads.md b/docs/reference/yolo/utils/downloads.md
deleted file mode 100644
index 3e06f8f34..000000000
--- a/docs/reference/yolo/utils/downloads.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-description: Download and unzip YOLO pretrained models. Ultralytics YOLO docs utils.downloads.unzip_file, checks disk space, downloads and attempts assets.
-keywords: Ultralytics YOLO, downloads, trained models, datasets, weights, deep learning, computer vision
----
-
-## is_url
----
-### ::: ultralytics.yolo.utils.downloads.is_url
-
-
-## unzip_file
----
-### ::: ultralytics.yolo.utils.downloads.unzip_file
-
-
-## check_disk_space
----
-### ::: ultralytics.yolo.utils.downloads.check_disk_space
-
-
-## safe_download
----
-### ::: ultralytics.yolo.utils.downloads.safe_download
-
-
-## get_github_assets
----
-### ::: ultralytics.yolo.utils.downloads.get_github_assets
-
-
-## attempt_download_asset
----
-### ::: ultralytics.yolo.utils.downloads.attempt_download_asset
-
-
-## download
----
-### ::: ultralytics.yolo.utils.downloads.download
-
diff --git a/docs/reference/yolo/utils/errors.md b/docs/reference/yolo/utils/errors.md
deleted file mode 100644
index f3ddfc41f..000000000
--- a/docs/reference/yolo/utils/errors.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Learn about HUBModelError in Ultralytics YOLO Docs. Resolve the error and get the most out of your YOLO model.
-keywords: HUBModelError, Ultralytics YOLO, YOLO Documentation, Object detection errors, YOLO Errors, HUBModelError Solutions
----
-
-## HUBModelError
----
-### ::: ultralytics.yolo.utils.errors.HUBModelError
-
diff --git a/docs/reference/yolo/utils/files.md b/docs/reference/yolo/utils/files.md
deleted file mode 100644
index bad47b43a..000000000
--- a/docs/reference/yolo/utils/files.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-description: 'Learn about Ultralytics YOLO files and directory utilities: WorkingDirectory, file_age, file_size, and make_dirs.'
-keywords: YOLO, object detection, file utils, file age, file size, working directory, make directories, Ultralytics Docs
----
-
-## WorkingDirectory
----
-### ::: ultralytics.yolo.utils.files.WorkingDirectory
-
-
-## increment_path
----
-### ::: ultralytics.yolo.utils.files.increment_path
-
-
-## file_age
----
-### ::: ultralytics.yolo.utils.files.file_age
-
-
-## file_date
----
-### ::: ultralytics.yolo.utils.files.file_date
-
-
-## file_size
----
-### ::: ultralytics.yolo.utils.files.file_size
-
-
-## get_latest_run
----
-### ::: ultralytics.yolo.utils.files.get_latest_run
-
-
-## make_dirs
----
-### ::: ultralytics.yolo.utils.files.make_dirs
-
diff --git a/docs/reference/yolo/utils/instance.md b/docs/reference/yolo/utils/instance.md
deleted file mode 100644
index 953fe222e..000000000
--- a/docs/reference/yolo/utils/instance.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-description: Learn about Bounding Boxes (Bboxes) and _ntuple in Ultralytics YOLO for object detection. Improve accuracy and speed with these powerful tools.
-keywords: Ultralytics, YOLO, Bboxes, _ntuple, object detection, instance segmentation
----
-
-## Bboxes
----
-### ::: ultralytics.yolo.utils.instance.Bboxes
-
-
-## Instances
----
-### ::: ultralytics.yolo.utils.instance.Instances
-
-
-## _ntuple
----
-### ::: ultralytics.yolo.utils.instance._ntuple
-
diff --git a/docs/reference/yolo/utils/loss.md b/docs/reference/yolo/utils/loss.md
deleted file mode 100644
index 7d89d6e5c..000000000
--- a/docs/reference/yolo/utils/loss.md
+++ /dev/null
@@ -1,44 +0,0 @@
----
-description: Learn about Varifocal Loss and Keypoint Loss in Ultralytics YOLO for advanced bounding box and pose estimation. Visit our docs for more.
-keywords: Ultralytics, YOLO, loss functions, object detection, keypoint detection, segmentation, classification
----
-
-## VarifocalLoss
----
-### ::: ultralytics.yolo.utils.loss.VarifocalLoss
-
-
-## FocalLoss
----
-### ::: ultralytics.yolo.utils.loss.FocalLoss
-
-
-## BboxLoss
----
-### ::: ultralytics.yolo.utils.loss.BboxLoss
-
-
-## KeypointLoss
----
-### ::: ultralytics.yolo.utils.loss.KeypointLoss
-
-
-## v8DetectionLoss
----
-### ::: ultralytics.yolo.utils.loss.v8DetectionLoss
-
-
-## v8SegmentationLoss
----
-### ::: ultralytics.yolo.utils.loss.v8SegmentationLoss
-
-
-## v8PoseLoss
----
-### ::: ultralytics.yolo.utils.loss.v8PoseLoss
-
-
-## v8ClassificationLoss
----
-### ::: ultralytics.yolo.utils.loss.v8ClassificationLoss
-
diff --git a/docs/reference/yolo/utils/metrics.md b/docs/reference/yolo/utils/metrics.md
deleted file mode 100644
index 204096dca..000000000
--- a/docs/reference/yolo/utils/metrics.md
+++ /dev/null
@@ -1,94 +0,0 @@
----
-description: Explore Ultralytics YOLO's FocalLoss, DetMetrics, PoseMetrics, ClassifyMetrics, and more with Ultralytics Metrics documentation.
-keywords: YOLOv5, metrics, losses, confusion matrix, detection metrics, pose metrics, classification metrics, intersection over area, intersection over union, keypoint intersection over union, average precision, per class average precision, Ultralytics Docs
----
-
-## ConfusionMatrix
----
-### ::: ultralytics.yolo.utils.metrics.ConfusionMatrix
-
-
-## Metric
----
-### ::: ultralytics.yolo.utils.metrics.Metric
-
-
-## DetMetrics
----
-### ::: ultralytics.yolo.utils.metrics.DetMetrics
-
-
-## SegmentMetrics
----
-### ::: ultralytics.yolo.utils.metrics.SegmentMetrics
-
-
-## PoseMetrics
----
-### ::: ultralytics.yolo.utils.metrics.PoseMetrics
-
-
-## ClassifyMetrics
----
-### ::: ultralytics.yolo.utils.metrics.ClassifyMetrics
-
-
-## box_area
----
-### ::: ultralytics.yolo.utils.metrics.box_area
-
-
-## bbox_ioa
----
-### ::: ultralytics.yolo.utils.metrics.bbox_ioa
-
-
-## box_iou
----
-### ::: ultralytics.yolo.utils.metrics.box_iou
-
-
-## bbox_iou
----
-### ::: ultralytics.yolo.utils.metrics.bbox_iou
-
-
-## mask_iou
----
-### ::: ultralytics.yolo.utils.metrics.mask_iou
-
-
-## kpt_iou
----
-### ::: ultralytics.yolo.utils.metrics.kpt_iou
-
-
-## smooth_BCE
----
-### ::: ultralytics.yolo.utils.metrics.smooth_BCE
-
-
-## smooth
----
-### ::: ultralytics.yolo.utils.metrics.smooth
-
-
-## plot_pr_curve
----
-### ::: ultralytics.yolo.utils.metrics.plot_pr_curve
-
-
-## plot_mc_curve
----
-### ::: ultralytics.yolo.utils.metrics.plot_mc_curve
-
-
-## compute_ap
----
-### ::: ultralytics.yolo.utils.metrics.compute_ap
-
-
-## ap_per_class
----
-### ::: ultralytics.yolo.utils.metrics.ap_per_class
-
diff --git a/docs/reference/yolo/utils/ops.md b/docs/reference/yolo/utils/ops.md
deleted file mode 100644
index ce4e4d593..000000000
--- a/docs/reference/yolo/utils/ops.md
+++ /dev/null
@@ -1,144 +0,0 @@
----
-description: Learn about various utility functions in Ultralytics YOLO, including x, y, width, height conversions, non-max suppression, and more.
-keywords: Ultralytics, YOLO, Utils Ops, Functions, coco80_to_coco91_class, scale_boxes, non_max_suppression, clip_coords, xyxy2xywh, xywhn2xyxy, xyn2xy, xyxy2ltwh, ltwh2xyxy, resample_segments, process_mask_upsample, process_mask_native, masks2segments, clean_str
----
-
-## Profile
----
-### ::: ultralytics.yolo.utils.ops.Profile
-
-
-## coco80_to_coco91_class
----
-### ::: ultralytics.yolo.utils.ops.coco80_to_coco91_class
-
-
-## segment2box
----
-### ::: ultralytics.yolo.utils.ops.segment2box
-
-
-## scale_boxes
----
-### ::: ultralytics.yolo.utils.ops.scale_boxes
-
-
-## make_divisible
----
-### ::: ultralytics.yolo.utils.ops.make_divisible
-
-
-## non_max_suppression
----
-### ::: ultralytics.yolo.utils.ops.non_max_suppression
-
-
-## clip_boxes
----
-### ::: ultralytics.yolo.utils.ops.clip_boxes
-
-
-## clip_coords
----
-### ::: ultralytics.yolo.utils.ops.clip_coords
-
-
-## scale_image
----
-### ::: ultralytics.yolo.utils.ops.scale_image
-
-
-## xyxy2xywh
----
-### ::: ultralytics.yolo.utils.ops.xyxy2xywh
-
-
-## xywh2xyxy
----
-### ::: ultralytics.yolo.utils.ops.xywh2xyxy
-
-
-## xywhn2xyxy
----
-### ::: ultralytics.yolo.utils.ops.xywhn2xyxy
-
-
-## xyxy2xywhn
----
-### ::: ultralytics.yolo.utils.ops.xyxy2xywhn
-
-
-## xyn2xy
----
-### ::: ultralytics.yolo.utils.ops.xyn2xy
-
-
-## xywh2ltwh
----
-### ::: ultralytics.yolo.utils.ops.xywh2ltwh
-
-
-## xyxy2ltwh
----
-### ::: ultralytics.yolo.utils.ops.xyxy2ltwh
-
-
-## ltwh2xywh
----
-### ::: ultralytics.yolo.utils.ops.ltwh2xywh
-
-
-## ltwh2xyxy
----
-### ::: ultralytics.yolo.utils.ops.ltwh2xyxy
-
-
-## segments2boxes
----
-### ::: ultralytics.yolo.utils.ops.segments2boxes
-
-
-## resample_segments
----
-### ::: ultralytics.yolo.utils.ops.resample_segments
-
-
-## crop_mask
----
-### ::: ultralytics.yolo.utils.ops.crop_mask
-
-
-## process_mask_upsample
----
-### ::: ultralytics.yolo.utils.ops.process_mask_upsample
-
-
-## process_mask
----
-### ::: ultralytics.yolo.utils.ops.process_mask
-
-
-## process_mask_native
----
-### ::: ultralytics.yolo.utils.ops.process_mask_native
-
-
-## scale_masks
----
-### ::: ultralytics.yolo.utils.ops.scale_masks
-
-
-## scale_coords
----
-### ::: ultralytics.yolo.utils.ops.scale_coords
-
-
-## masks2segments
----
-### ::: ultralytics.yolo.utils.ops.masks2segments
-
-
-## clean_str
----
-### ::: ultralytics.yolo.utils.ops.clean_str
-
diff --git a/docs/reference/yolo/utils/patches.md b/docs/reference/yolo/utils/patches.md
deleted file mode 100644
index 85ceefa32..000000000
--- a/docs/reference/yolo/utils/patches.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-description: Learn how to use the Ultralytics YOLO Utils package's imread and imshow functions. These functions are used for reading and writing image files. Try out our TorchSave feature today.
-keywords: imread, imshow, ultralytics, YOLO, image files, torchsave
----
-
-## imread
----
-### ::: ultralytics.yolo.utils.patches.imread
-
-
-## imwrite
----
-### ::: ultralytics.yolo.utils.patches.imwrite
-
-
-## imshow
----
-### ::: ultralytics.yolo.utils.patches.imshow
-
-
-## torch_save
----
-### ::: ultralytics.yolo.utils.patches.torch_save
-
diff --git a/docs/reference/yolo/utils/plotting.md b/docs/reference/yolo/utils/plotting.md
deleted file mode 100644
index 0fd0babd0..000000000
--- a/docs/reference/yolo/utils/plotting.md
+++ /dev/null
@@ -1,44 +0,0 @@
----
-description: 'Discover the power of YOLO''s plotting functions: Colors, Labels and Images. Code examples to output targets and visualize features. Check it now.'
-keywords: YOLO, object detection, plotting, visualization, annotator, save one box, plot results, feature visualization, Ultralytics
----
-
-## Colors
----
-### ::: ultralytics.yolo.utils.plotting.Colors
-
-
-## Annotator
----
-### ::: ultralytics.yolo.utils.plotting.Annotator
-
-
-## plot_labels
----
-### ::: ultralytics.yolo.utils.plotting.plot_labels
-
-
-## save_one_box
----
-### ::: ultralytics.yolo.utils.plotting.save_one_box
-
-
-## plot_images
----
-### ::: ultralytics.yolo.utils.plotting.plot_images
-
-
-## plot_results
----
-### ::: ultralytics.yolo.utils.plotting.plot_results
-
-
-## output_to_target
----
-### ::: ultralytics.yolo.utils.plotting.output_to_target
-
-
-## feature_visualization
----
-### ::: ultralytics.yolo.utils.plotting.feature_visualization
-
diff --git a/docs/reference/yolo/utils/tal.md b/docs/reference/yolo/utils/tal.md
deleted file mode 100644
index f5a665289..000000000
--- a/docs/reference/yolo/utils/tal.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-description: Improve your YOLO models with Ultralytics' TaskAlignedAssigner, select_highest_overlaps, and dist2bbox utilities. Streamline your workflow today.
-keywords: Ultrayltics, YOLO, select_candidates_in_gts, make_anchor, bbox2dist, object detection, tracking
----
-
-## TaskAlignedAssigner
----
-### ::: ultralytics.yolo.utils.tal.TaskAlignedAssigner
-
-
-## select_candidates_in_gts
----
-### ::: ultralytics.yolo.utils.tal.select_candidates_in_gts
-
-
-## select_highest_overlaps
----
-### ::: ultralytics.yolo.utils.tal.select_highest_overlaps
-
-
-## make_anchors
----
-### ::: ultralytics.yolo.utils.tal.make_anchors
-
-
-## dist2bbox
----
-### ::: ultralytics.yolo.utils.tal.dist2bbox
-
-
-## bbox2dist
----
-### ::: ultralytics.yolo.utils.tal.bbox2dist
-
diff --git a/docs/reference/yolo/utils/torch_utils.md b/docs/reference/yolo/utils/torch_utils.md
deleted file mode 100644
index 6d24cac45..000000000
--- a/docs/reference/yolo/utils/torch_utils.md
+++ /dev/null
@@ -1,134 +0,0 @@
----
-description: Optimize your PyTorch models with Ultralytics YOLO's torch_utils functions such as ModelEMA, select_device, and is_parallel.
-keywords: Ultralytics YOLO, Torch, Utils, Pytorch, Object Detection
----
-
-## ModelEMA
----
-### ::: ultralytics.yolo.utils.torch_utils.ModelEMA
-
-
-## EarlyStopping
----
-### ::: ultralytics.yolo.utils.torch_utils.EarlyStopping
-
-
-## torch_distributed_zero_first
----
-### ::: ultralytics.yolo.utils.torch_utils.torch_distributed_zero_first
-
-
-## smart_inference_mode
----
-### ::: ultralytics.yolo.utils.torch_utils.smart_inference_mode
-
-
-## select_device
----
-### ::: ultralytics.yolo.utils.torch_utils.select_device
-
-
-## time_sync
----
-### ::: ultralytics.yolo.utils.torch_utils.time_sync
-
-
-## fuse_conv_and_bn
----
-### ::: ultralytics.yolo.utils.torch_utils.fuse_conv_and_bn
-
-
-## fuse_deconv_and_bn
----
-### ::: ultralytics.yolo.utils.torch_utils.fuse_deconv_and_bn
-
-
-## model_info
----
-### ::: ultralytics.yolo.utils.torch_utils.model_info
-
-
-## get_num_params
----
-### ::: ultralytics.yolo.utils.torch_utils.get_num_params
-
-
-## get_num_gradients
----
-### ::: ultralytics.yolo.utils.torch_utils.get_num_gradients
-
-
-## model_info_for_loggers
----
-### ::: ultralytics.yolo.utils.torch_utils.model_info_for_loggers
-
-
-## get_flops
----
-### ::: ultralytics.yolo.utils.torch_utils.get_flops
-
-
-## get_flops_with_torch_profiler
----
-### ::: ultralytics.yolo.utils.torch_utils.get_flops_with_torch_profiler
-
-
-## initialize_weights
----
-### ::: ultralytics.yolo.utils.torch_utils.initialize_weights
-
-
-## scale_img
----
-### ::: ultralytics.yolo.utils.torch_utils.scale_img
-
-
-## make_divisible
----
-### ::: ultralytics.yolo.utils.torch_utils.make_divisible
-
-
-## copy_attr
----
-### ::: ultralytics.yolo.utils.torch_utils.copy_attr
-
-
-## get_latest_opset
----
-### ::: ultralytics.yolo.utils.torch_utils.get_latest_opset
-
-
-## intersect_dicts
----
-### ::: ultralytics.yolo.utils.torch_utils.intersect_dicts
-
-
-## is_parallel
----
-### ::: ultralytics.yolo.utils.torch_utils.is_parallel
-
-
-## de_parallel
----
-### ::: ultralytics.yolo.utils.torch_utils.de_parallel
-
-
-## one_cycle
----
-### ::: ultralytics.yolo.utils.torch_utils.one_cycle
-
-
-## init_seeds
----
-### ::: ultralytics.yolo.utils.torch_utils.init_seeds
-
-
-## strip_optimizer
----
-### ::: ultralytics.yolo.utils.torch_utils.strip_optimizer
-
-
-## profile
----
-### ::: ultralytics.yolo.utils.torch_utils.profile
-
diff --git a/docs/reference/yolo/utils/tuner.md b/docs/reference/yolo/utils/tuner.md
deleted file mode 100644
index bf9300089..000000000
--- a/docs/reference/yolo/utils/tuner.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Optimize YOLO models' hyperparameters with Ultralytics YOLO's `run_ray_tune` function using Ray Tune and ASHA scheduler.
-keywords: Ultralytics YOLO, Hyperparameter Tuning, Ray Tune, ASHAScheduler, Optimization, Object Detection
----
-
-## run_ray_tune
----
-### ::: ultralytics.yolo.utils.tuner.run_ray_tune
-
diff --git a/docs/reference/yolo/v8/classify/predict.md b/docs/reference/yolo/v8/classify/predict.md
deleted file mode 100644
index 83bd5d6bf..000000000
--- a/docs/reference/yolo/v8/classify/predict.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Learn how to use ClassificationPredictor in Ultralytics YOLOv8 for object classification tasks in a simple and efficient way.
-keywords: Ultralytics, YOLO, v8, Classify Predictor, object detection, classification, computer vision
----
-
-## ClassificationPredictor
----
-### ::: ultralytics.yolo.v8.classify.predict.ClassificationPredictor
-
-
-## predict
----
-### ::: ultralytics.yolo.v8.classify.predict.predict
-
diff --git a/docs/reference/yolo/v8/classify/train.md b/docs/reference/yolo/v8/classify/train.md
deleted file mode 100644
index f488eac15..000000000
--- a/docs/reference/yolo/v8/classify/train.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Train a custom image classification model using Ultralytics YOLOv8 with ClassificationTrainer. Boost accuracy and efficiency today.
-keywords: Ultralytics, YOLOv8, object detection, classification, training, API
----
-
-## ClassificationTrainer
----
-### ::: ultralytics.yolo.v8.classify.train.ClassificationTrainer
-
-
-## train
----
-### ::: ultralytics.yolo.v8.classify.train.train
-
diff --git a/docs/reference/yolo/v8/classify/val.md b/docs/reference/yolo/v8/classify/val.md
deleted file mode 100644
index 76fe5305a..000000000
--- a/docs/reference/yolo/v8/classify/val.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Ensure model classification accuracy with Ultralytics YOLO's ClassificationValidator. Validate and improve your model with ease.
-keywords: ClassificationValidator, Ultralytics YOLO, Validation, Data Science, Deep Learning
----
-
-## ClassificationValidator
----
-### ::: ultralytics.yolo.v8.classify.val.ClassificationValidator
-
-
-## val
----
-### ::: ultralytics.yolo.v8.classify.val.val
-
diff --git a/docs/reference/yolo/v8/detect/predict.md b/docs/reference/yolo/v8/detect/predict.md
deleted file mode 100644
index 63499d3c7..000000000
--- a/docs/reference/yolo/v8/detect/predict.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Detect and predict objects in images and videos using the Ultralytics YOLO v8 model with DetectionPredictor.
-keywords: detectionpredictor, ultralytics yolo, object detection, neural network, machine learning
----
-
-## DetectionPredictor
----
-### ::: ultralytics.yolo.v8.detect.predict.DetectionPredictor
-
-
-## predict
----
-### ::: ultralytics.yolo.v8.detect.predict.predict
-
diff --git a/docs/reference/yolo/v8/detect/train.md b/docs/reference/yolo/v8/detect/train.md
deleted file mode 100644
index 8cf2809b1..000000000
--- a/docs/reference/yolo/v8/detect/train.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Train and optimize custom object detection models with Ultralytics DetectionTrainer and train functions. Get started with YOLO v8 today.
-keywords: DetectionTrainer, Ultralytics YOLO, custom object detection, train models, AI applications
----
-
-## DetectionTrainer
----
-### ::: ultralytics.yolo.v8.detect.train.DetectionTrainer
-
-
-## train
----
-### ::: ultralytics.yolo.v8.detect.train.train
-
diff --git a/docs/reference/yolo/v8/detect/val.md b/docs/reference/yolo/v8/detect/val.md
deleted file mode 100644
index 06ef65f4f..000000000
--- a/docs/reference/yolo/v8/detect/val.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Validate YOLOv5 detections using this PyTorch module. Ensure model accuracy with NMS IOU threshold tuning and label mapping.
-keywords: detection, validator, YOLOv5, object detection, model improvement, Ultralytics Docs
----
-
-## DetectionValidator
----
-### ::: ultralytics.yolo.v8.detect.val.DetectionValidator
-
-
-## val
----
-### ::: ultralytics.yolo.v8.detect.val.val
-
diff --git a/docs/reference/yolo/v8/pose/predict.md b/docs/reference/yolo/v8/pose/predict.md
deleted file mode 100644
index f8ac26b30..000000000
--- a/docs/reference/yolo/v8/pose/predict.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Predict human pose coordinates and confidence scores using YOLOv5. Use on real-time video streams or static images.
-keywords: Ultralytics, YOLO, v8, documentation, PosePredictor, pose prediction, pose estimation, predict method
----
-
-## PosePredictor
----
-### ::: ultralytics.yolo.v8.pose.predict.PosePredictor
-
-
-## predict
----
-### ::: ultralytics.yolo.v8.pose.predict.predict
-
diff --git a/docs/reference/yolo/v8/pose/train.md b/docs/reference/yolo/v8/pose/train.md
deleted file mode 100644
index 8c988fe7b..000000000
--- a/docs/reference/yolo/v8/pose/train.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Boost posture detection using PoseTrainer and train models using train() API. Learn PoseLoss for ultra-fast and accurate pose detection with Ultralytics YOLO.
-keywords: PoseTrainer, human pose models, deep learning, computer vision, Ultralytics YOLO, v8
----
-
-## PoseTrainer
----
-### ::: ultralytics.yolo.v8.pose.train.PoseTrainer
-
-
-## train
----
-### ::: ultralytics.yolo.v8.pose.train.train
-
diff --git a/docs/reference/yolo/v8/pose/val.md b/docs/reference/yolo/v8/pose/val.md
deleted file mode 100644
index c26c255e3..000000000
--- a/docs/reference/yolo/v8/pose/val.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Ensure proper human poses in images with YOLOv8 Pose Validation, part of the Ultralytics YOLO v8 suite.
-keywords: PoseValidator, Ultralytics YOLO, object detection, pose analysis, validation
----
-
-## PoseValidator
----
-### ::: ultralytics.yolo.v8.pose.val.PoseValidator
-
-
-## val
----
-### ::: ultralytics.yolo.v8.pose.val.val
-
diff --git a/docs/reference/yolo/v8/segment/predict.md b/docs/reference/yolo/v8/segment/predict.md
deleted file mode 100644
index eadf463f6..000000000
--- a/docs/reference/yolo/v8/segment/predict.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: '"Use SegmentationPredictor in YOLOv8 for efficient object detection and segmentation. Explore Ultralytics YOLO Docs for more information."'
-keywords: Ultralytics YOLO, SegmentationPredictor, object detection, segmentation masks, predict
----
-
-## SegmentationPredictor
----
-### ::: ultralytics.yolo.v8.segment.predict.SegmentationPredictor
-
-
-## predict
----
-### ::: ultralytics.yolo.v8.segment.predict.predict
-
diff --git a/docs/reference/yolo/v8/segment/train.md b/docs/reference/yolo/v8/segment/train.md
deleted file mode 100644
index 20bcc91f8..000000000
--- a/docs/reference/yolo/v8/segment/train.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Learn about SegmentationTrainer and Train in Ultralytics YOLO v8 for efficient object detection models. Improve your training with Ultralytics Docs.
-keywords: SegmentationTrainer, Ultralytics YOLO, object detection, segmentation, train, tutorial, guide, code examples
----
-
-## SegmentationTrainer
----
-### ::: ultralytics.yolo.v8.segment.train.SegmentationTrainer
-
-
-## train
----
-### ::: ultralytics.yolo.v8.segment.train.train
-
diff --git a/docs/reference/yolo/v8/segment/val.md b/docs/reference/yolo/v8/segment/val.md
deleted file mode 100644
index 137302f88..000000000
--- a/docs/reference/yolo/v8/segment/val.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-description: Ensure segmentation quality on large datasets with SegmentationValidator. Review and visualize results with ease. Learn more at Ultralytics Docs.
-keywords: SegmentationValidator, YOLOv8, Ultralytics Docs, segmentation model, validation
----
-
-## SegmentationValidator
----
-### ::: ultralytics.yolo.v8.segment.val.SegmentationValidator
-
-
-## val
----
-### ::: ultralytics.yolo.v8.segment.val.val
-
diff --git a/docs/tasks/classify.md b/docs/tasks/classify.md
index 8d71093d8..d0a6349e9 100644
--- a/docs/tasks/classify.md
+++ b/docs/tasks/classify.md
@@ -15,16 +15,16 @@ of that class are located or what their exact shape is.
!!! tip "Tip"
- YOLOv8 Classify models use the `-cls` suffix, i.e. `yolov8n-cls.pt` and are pretrained on [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml).
+ YOLOv8 Classify models use the `-cls` suffix, i.e. `yolov8n-cls.pt` and are pretrained on [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml).
-## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8)
+## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8)
YOLOv8 pretrained Classify models are shown here. Detect, Segment and Pose models are pretrained on
-the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify
+the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify
models are pretrained on
-the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset.
+the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset.
-[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest
+[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest
Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 |
@@ -178,4 +178,4 @@ i.e. `yolo predict model=yolov8n-cls.onnx`. Usage examples are shown for your mo
| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-cls_paddle_model/` | ✅ | `imgsz` |
| [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n-cls_ncnn_model/` | ✅ | `imgsz`, `half` |
-See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page.
\ No newline at end of file
+See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page.
diff --git a/docs/tasks/detect.md b/docs/tasks/detect.md
index 6573b75b9..5c2dd07da 100644
--- a/docs/tasks/detect.md
+++ b/docs/tasks/detect.md
@@ -12,13 +12,13 @@ The output of an object detector is a set of bounding boxes that enclose the obj
!!! tip "Tip"
- YOLOv8 Detect models are the default YOLOv8 models, i.e. `yolov8n.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml).
+ YOLOv8 Detect models are the default YOLOv8 models, i.e. `yolov8n.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml).
-## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8)
+## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8)
-YOLOv8 pretrained Detect models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset.
+YOLOv8 pretrained Detect models are shown here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset.
-[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
+[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) |
|--------------------------------------------------------------------------------------|-----------------------|----------------------|--------------------------------|-------------------------------------|--------------------|-------------------|
@@ -169,4 +169,4 @@ Available YOLOv8 export formats are in the table below. You can predict or valid
| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz` |
| [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half` |
-See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page.
\ No newline at end of file
+See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page.
diff --git a/docs/tasks/pose.md b/docs/tasks/pose.md
index ef8495dc7..9eeabed7c 100644
--- a/docs/tasks/pose.md
+++ b/docs/tasks/pose.md
@@ -17,16 +17,16 @@ parts of an object in a scene, and their location in relation to each other.
!!! tip "Tip"
- YOLOv8 _pose_ models use the `-pose` suffix, i.e. `yolov8n-pose.pt`. These models are trained on the [COCO keypoints](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco-pose.yaml) dataset and are suitable for a variety of pose estimation tasks.
+ YOLOv8 _pose_ models use the `-pose` suffix, i.e. `yolov8n-pose.pt`. These models are trained on the [COCO keypoints](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco-pose.yaml) dataset and are suitable for a variety of pose estimation tasks.
-## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8)
+## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8)
YOLOv8 pretrained Pose models are shown here. Detect, Segment and Pose models are pretrained on
-the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify
+the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify
models are pretrained on
-the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset.
+the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset.
-[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest
+[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest
Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
| Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) |
@@ -183,4 +183,4 @@ i.e. `yolo predict model=yolov8n-pose.onnx`. Usage examples are shown for your m
| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-pose_paddle_model/` | ✅ | `imgsz` |
| [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n-pose_ncnn_model/` | ✅ | `imgsz`, `half` |
-See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page.
\ No newline at end of file
+See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page.
diff --git a/docs/tasks/segment.md b/docs/tasks/segment.md
index 659ac8267..413ec7fbd 100644
--- a/docs/tasks/segment.md
+++ b/docs/tasks/segment.md
@@ -15,16 +15,16 @@ segmentation is useful when you need to know not only where objects are in an im
!!! tip "Tip"
- YOLOv8 Segment models use the `-seg` suffix, i.e. `yolov8n-seg.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml).
+ YOLOv8 Segment models use the `-seg` suffix, i.e. `yolov8n-seg.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml).
-## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8)
+## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models/v8)
YOLOv8 pretrained Segment models are shown here. Detect, Segment and Pose models are pretrained on
-the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify
+the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml) dataset, while Classify
models are pretrained on
-the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset.
+the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/ImageNet.yaml) dataset.
-[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest
+[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest
Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) |
@@ -183,4 +183,4 @@ i.e. `yolo predict model=yolov8n-seg.onnx`. Usage examples are shown for your mo
| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-seg_paddle_model/` | ✅ | `imgsz` |
| [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n-seg_ncnn_model/` | ✅ | `imgsz`, `half` |
-See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page.
\ No newline at end of file
+See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page.
diff --git a/docs/usage/callbacks.md b/docs/usage/callbacks.md
index 7968fafdd..ddaccbc65 100644
--- a/docs/usage/callbacks.md
+++ b/docs/usage/callbacks.md
@@ -40,7 +40,7 @@ for (result, frame) in model.track/predict():
## All callbacks
-Here are all supported callbacks. See callbacks [source code](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/utils/callbacks/base.py) for additional details.
+Here are all supported callbacks. See callbacks [source code](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/base.py) for additional details.
### Trainer Callbacks
diff --git a/docs/usage/cfg.md b/docs/usage/cfg.md
index b027da310..7d54176fd 100644
--- a/docs/usage/cfg.md
+++ b/docs/usage/cfg.md
@@ -38,7 +38,7 @@ Where:
- `MODE` (required) is one of `[train, val, predict, export, track, benchmark]`
- `ARGS` (optional) are any number of custom `arg=value` pairs like `imgsz=320` that override defaults.
For a full list of available `ARGS` see the [Configuration](cfg.md) page and `defaults.yaml`
- GitHub [source](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/cfg/default.yaml).
+ GitHub [source](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/default.yaml).
#### Tasks
diff --git a/docs/usage/cli.md b/docs/usage/cli.md
index 21879d7e8..84d922265 100644
--- a/docs/usage/cli.md
+++ b/docs/usage/cli.md
@@ -70,7 +70,7 @@ Where:
- `MODE` (required) is one of `[train, val, predict, export, track]`
- `ARGS` (optional) are any number of custom `arg=value` pairs like `imgsz=320` that override defaults.
For a full list of available `ARGS` see the [Configuration](cfg.md) page and `defaults.yaml`
- GitHub [source](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/cfg/default.yaml).
+ GitHub [source](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/default.yaml).
!!! warning "Warning"
diff --git a/docs/usage/engine.md b/docs/usage/engine.md
index 8f6444390..0ce8574ce 100644
--- a/docs/usage/engine.md
+++ b/docs/usage/engine.md
@@ -15,14 +15,14 @@ custom model and dataloader by just overriding these functions:
* `get_model(cfg, weights)` - The function that builds the model to be trained
* `get_dataloder()` - The function that builds the dataloader
- More details and source code can be found in [`BaseTrainer` Reference](../reference/yolo/engine/trainer.md)
+ More details and source code can be found in [`BaseTrainer` Reference](../reference/engine/trainer.md)
## DetectionTrainer
Here's how you can use the YOLOv8 `DetectionTrainer` and customize it.
```python
-from ultralytics.yolo.v8.detect import DetectionTrainer
+from ultralytics.models.yolo.detect import DetectionTrainer
trainer = DetectionTrainer(overrides={...})
trainer.train()
@@ -35,7 +35,7 @@ Let's customize the trainer **to train a custom detection model** that is not su
simply overloading the existing the `get_model` functionality:
```python
-from ultralytics.yolo.v8.detect import DetectionTrainer
+from ultralytics.models.yolo.detect import DetectionTrainer
class CustomTrainer(DetectionTrainer):
@@ -54,7 +54,7 @@ You now realize that you need to customize the trainer further to:
Here's how you can do it:
```python
-from ultralytics.yolo.v8.detect import DetectionTrainer
+from ultralytics.models.yolo.detect import DetectionTrainer
from ultralytics.nn.tasks import DetectionModel
diff --git a/docs/usage/python.md b/docs/usage/python.md
index 2d8bb4c4e..7fc30e889 100644
--- a/docs/usage/python.md
+++ b/docs/usage/python.md
@@ -240,7 +240,7 @@ their specific use case based on their requirements for speed and accuracy.
Benchmark an official YOLOv8n model across all export formats.
```python
- from ultralytics.yolo.utils.benchmarks import benchmark
+ from ultralytics.utils.benchmarks import benchmark
# Benchmark
benchmark(model='yolov8n.pt', imgsz=640, half=False, device=0)
@@ -256,7 +256,7 @@ from `BaseTrainer`.
!!! tip "Detection Trainer Example"
```python
- from ultralytics.yolo import v8 import DetectionTrainer, DetectionValidator, DetectionPredictor
+ from ultralytics.models.yolo import DetectionTrainer, DetectionValidator, DetectionPredictor
# trainer
trainer = DetectionTrainer(overrides={})
diff --git a/examples/YOLOv8-ONNXRuntime/main.py b/examples/YOLOv8-ONNXRuntime/main.py
index 9c5910053..3051c473b 100644
--- a/examples/YOLOv8-ONNXRuntime/main.py
+++ b/examples/YOLOv8-ONNXRuntime/main.py
@@ -5,8 +5,8 @@ import numpy as np
import onnxruntime as ort
import torch
-from ultralytics.yolo.utils import ROOT, yaml_load
-from ultralytics.yolo.utils.checks import check_requirements, check_yaml
+from ultralytics.utils import ROOT, yaml_load
+from ultralytics.utils.checks import check_requirements, check_yaml
class Yolov8:
diff --git a/examples/YOLOv8-OpenCV-ONNX-Python/main.py b/examples/YOLOv8-OpenCV-ONNX-Python/main.py
index d1f635c27..3cecacaa3 100644
--- a/examples/YOLOv8-OpenCV-ONNX-Python/main.py
+++ b/examples/YOLOv8-OpenCV-ONNX-Python/main.py
@@ -3,8 +3,8 @@ import argparse
import cv2.dnn
import numpy as np
-from ultralytics.yolo.utils import ROOT, yaml_load
-from ultralytics.yolo.utils.checks import check_yaml
+from ultralytics.utils import ROOT, yaml_load
+from ultralytics.utils.checks import check_yaml
CLASSES = yaml_load(check_yaml('coco128.yaml'))['names']
diff --git a/examples/tutorial.ipynb b/examples/tutorial.ipynb
index 9bb715062..aa9b2b6f6 100644
--- a/examples/tutorial.ipynb
+++ b/examples/tutorial.ipynb
@@ -300,7 +300,7 @@
"name": "stdout",
"text": [
"Ultralytics YOLOv8.0.71 🚀 Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n",
- "\u001b[34m\u001b[1myolo/engine/trainer: \u001b[0mtask=detect, mode=train, model=yolov8n.pt, data=coco128.yaml, epochs=3, patience=50, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=None, exist_ok=False, pretrained=False, optimizer=SGD, verbose=True, seed=0, deterministic=True, single_cls=False, image_weights=False, rect=False, cos_lr=False, close_mosaic=0, resume=False, amp=True, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, show=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, vid_stride=1, line_width=3, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, boxes=True, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, cfg=None, v5loader=False, tracker=botsort.yaml, save_dir=runs/detect/train\n",
+ "\u001b[34m\u001b[1mengine/trainer: \u001b[0mtask=detect, mode=train, model=yolov8n.pt, data=coco128.yaml, epochs=3, patience=50, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=None, exist_ok=False, pretrained=False, optimizer=SGD, verbose=True, seed=0, deterministic=True, single_cls=False, image_weights=False, rect=False, cos_lr=False, close_mosaic=0, resume=False, amp=True, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, show=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, vid_stride=1, line_width=3, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, boxes=True, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, cfg=None, tracker=botsort.yaml, save_dir=runs/detect/train\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 464 ultralytics.nn.modules.Conv [3, 16, 3, 2] \n",
diff --git a/mkdocs.yml b/mkdocs.yml
index 75f8a74ab..09939920d 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -250,14 +250,78 @@ nav:
- 'Android': hub/app/android.md
- Inference API: hub/inference_api.md
- Reference:
+ - cfg:
+ - __init__: reference/cfg/__init__.md
+ - data:
+ - annotator: reference/data/annotator.md
+ - augment: reference/data/augment.md
+ - base: reference/data/base.md
+ - build: reference/data/build.md
+ - converter: reference/data/converter.md
+ - dataset: reference/data/dataset.md
+ - loaders: reference/data/loaders.md
+ - utils: reference/data/utils.md
+ - engine:
+ - exporter: reference/engine/exporter.md
+ - model: reference/engine/model.md
+ - predictor: reference/engine/predictor.md
+ - results: reference/engine/results.md
+ - trainer: reference/engine/trainer.md
+ - validator: reference/engine/validator.md
- hub:
- __init__: reference/hub/__init__.md
- auth: reference/hub/auth.md
- session: reference/hub/session.md
- utils: reference/hub/utils.md
+ - models:
+ - fastsam:
+ - model: reference/models/fastsam/model.md
+ - predict: reference/models/fastsam/predict.md
+ - prompt: reference/models/fastsam/prompt.md
+ - utils: reference/models/fastsam/utils.md
+ - val: reference/models/fastsam/val.md
+ - nas:
+ - model: reference/models/nas/model.md
+ - predict: reference/models/nas/predict.md
+ - val: reference/models/nas/val.md
+ - rtdetr:
+ - model: reference/models/rtdetr/model.md
+ - predict: reference/models/rtdetr/predict.md
+ - train: reference/models/rtdetr/train.md
+ - val: reference/models/rtdetr/val.md
+ - sam:
+ - amg: reference/models/sam/amg.md
+ - build: reference/models/sam/build.md
+ - model: reference/models/sam/model.md
+ - modules:
+ - decoders: reference/models/sam/modules/decoders.md
+ - encoders: reference/models/sam/modules/encoders.md
+ - sam: reference/models/sam/modules/sam.md
+ - tiny_encoder: reference/models/sam/modules/tiny_encoder.md
+ - transformer: reference/models/sam/modules/transformer.md
+ - predict: reference/models/sam/predict.md
+ - utils:
+ - loss: reference/models/utils/loss.md
+ - ops: reference/models/utils/ops.md
+ - yolo:
+ - classify:
+ - predict: reference/models/yolo/classify/predict.md
+ - train: reference/models/yolo/classify/train.md
+ - val: reference/models/yolo/classify/val.md
+ - detect:
+ - predict: reference/models/yolo/detect/predict.md
+ - train: reference/models/yolo/detect/train.md
+ - val: reference/models/yolo/detect/val.md
+ - pose:
+ - predict: reference/models/yolo/pose/predict.md
+ - train: reference/models/yolo/pose/train.md
+ - val: reference/models/yolo/pose/val.md
+ - segment:
+ - predict: reference/models/yolo/segment/predict.md
+ - train: reference/models/yolo/segment/train.md
+ - val: reference/models/yolo/segment/val.md
- nn:
- autobackend: reference/nn/autobackend.md
- - autoshape: reference/nn/autoshape.md
- modules:
- block: reference/nn/modules/block.md
- conv: reference/nn/modules/conv.md
@@ -265,115 +329,44 @@ nav:
- transformer: reference/nn/modules/transformer.md
- utils: reference/nn/modules/utils.md
- tasks: reference/nn/tasks.md
- - tracker:
- - track: reference/tracker/track.md
- - trackers:
- - basetrack: reference/tracker/trackers/basetrack.md
- - bot_sort: reference/tracker/trackers/bot_sort.md
- - byte_tracker: reference/tracker/trackers/byte_tracker.md
+ - trackers:
+ - basetrack: reference/trackers/basetrack.md
+ - bot_sort: reference/trackers/bot_sort.md
+ - byte_tracker: reference/trackers/byte_tracker.md
+ - track: reference/trackers/track.md
- utils:
- - gmc: reference/tracker/utils/gmc.md
- - kalman_filter: reference/tracker/utils/kalman_filter.md
- - matching: reference/tracker/utils/matching.md
- - vit:
- - rtdetr:
- - model: reference/vit/rtdetr/model.md
- - predict: reference/vit/rtdetr/predict.md
- - train: reference/vit/rtdetr/train.md
- - val: reference/vit/rtdetr/val.md
- - sam:
- - amg: reference/vit/sam/amg.md
- - build: reference/vit/sam/build.md
- - model: reference/vit/sam/model.md
- - modules:
- - decoders: reference/vit/sam/modules/decoders.md
- - encoders: reference/vit/sam/modules/encoders.md
- - sam: reference/vit/sam/modules/sam.md
- - tiny_encoder: reference/vit/sam/modules/tiny_encoder.md
- - transformer: reference/vit/sam/modules/transformer.md
- - predict: reference/vit/sam/predict.md
- - utils:
- - loss: reference/vit/utils/loss.md
- - ops: reference/vit/utils/ops.md
- - yolo:
- - cfg:
- - __init__: reference/yolo/cfg/__init__.md
- - data:
- - annotator: reference/yolo/data/annotator.md
- - augment: reference/yolo/data/augment.md
- - base: reference/yolo/data/base.md
- - build: reference/yolo/data/build.md
- - converter: reference/yolo/data/converter.md
- - dataloaders:
- - stream_loaders: reference/yolo/data/dataloaders/stream_loaders.md
- - v5augmentations: reference/yolo/data/dataloaders/v5augmentations.md
- - v5loader: reference/yolo/data/dataloaders/v5loader.md
- - dataset: reference/yolo/data/dataset.md
- - dataset_wrappers: reference/yolo/data/dataset_wrappers.md
- - utils: reference/yolo/data/utils.md
- - engine:
- - exporter: reference/yolo/engine/exporter.md
- - model: reference/yolo/engine/model.md
- - predictor: reference/yolo/engine/predictor.md
- - results: reference/yolo/engine/results.md
- - trainer: reference/yolo/engine/trainer.md
- - validator: reference/yolo/engine/validator.md
- - fastsam:
- - model: reference/yolo/fastsam/model.md
- - predict: reference/yolo/fastsam/predict.md
- - prompt: reference/yolo/fastsam/prompt.md
- - utils: reference/yolo/fastsam/utils.md
- - val: reference/yolo/fastsam/val.md
- - nas:
- - model: reference/yolo/nas/model.md
- - predict: reference/yolo/nas/predict.md
- - val: reference/yolo/nas/val.md
- - utils:
- - __init__: reference/yolo/utils/__init__.md
- - autobatch: reference/yolo/utils/autobatch.md
- - benchmarks: reference/yolo/utils/benchmarks.md
- - callbacks:
- - base: reference/yolo/utils/callbacks/base.md
- - clearml: reference/yolo/utils/callbacks/clearml.md
- - comet: reference/yolo/utils/callbacks/comet.md
- - dvc: reference/yolo/utils/callbacks/dvc.md
- - hub: reference/yolo/utils/callbacks/hub.md
- - mlflow: reference/yolo/utils/callbacks/mlflow.md
- - neptune: reference/yolo/utils/callbacks/neptune.md
- - raytune: reference/yolo/utils/callbacks/raytune.md
- - tensorboard: reference/yolo/utils/callbacks/tensorboard.md
- - wb: reference/yolo/utils/callbacks/wb.md
- - checks: reference/yolo/utils/checks.md
- - dist: reference/yolo/utils/dist.md
- - downloads: reference/yolo/utils/downloads.md
- - errors: reference/yolo/utils/errors.md
- - files: reference/yolo/utils/files.md
- - instance: reference/yolo/utils/instance.md
- - loss: reference/yolo/utils/loss.md
- - metrics: reference/yolo/utils/metrics.md
- - ops: reference/yolo/utils/ops.md
- - patches: reference/yolo/utils/patches.md
- - plotting: reference/yolo/utils/plotting.md
- - tal: reference/yolo/utils/tal.md
- - torch_utils: reference/yolo/utils/torch_utils.md
- - tuner: reference/yolo/utils/tuner.md
- - v8:
- - classify:
- - predict: reference/yolo/v8/classify/predict.md
- - train: reference/yolo/v8/classify/train.md
- - val: reference/yolo/v8/classify/val.md
- - detect:
- - predict: reference/yolo/v8/detect/predict.md
- - train: reference/yolo/v8/detect/train.md
- - val: reference/yolo/v8/detect/val.md
- - pose:
- - predict: reference/yolo/v8/pose/predict.md
- - train: reference/yolo/v8/pose/train.md
- - val: reference/yolo/v8/pose/val.md
- - segment:
- - predict: reference/yolo/v8/segment/predict.md
- - train: reference/yolo/v8/segment/train.md
- - val: reference/yolo/v8/segment/val.md
+ - gmc: reference/trackers/utils/gmc.md
+ - kalman_filter: reference/trackers/utils/kalman_filter.md
+ - matching: reference/trackers/utils/matching.md
+ - utils:
+ - __init__: reference/utils/__init__.md
+ - autobatch: reference/utils/autobatch.md
+ - benchmarks: reference/utils/benchmarks.md
+ - callbacks:
+ - base: reference/utils/callbacks/base.md
+ - clearml: reference/utils/callbacks/clearml.md
+ - comet: reference/utils/callbacks/comet.md
+ - dvc: reference/utils/callbacks/dvc.md
+ - hub: reference/utils/callbacks/hub.md
+ - mlflow: reference/utils/callbacks/mlflow.md
+ - neptune: reference/utils/callbacks/neptune.md
+ - raytune: reference/utils/callbacks/raytune.md
+ - tensorboard: reference/utils/callbacks/tensorboard.md
+ - wb: reference/utils/callbacks/wb.md
+ - checks: reference/utils/checks.md
+ - dist: reference/utils/dist.md
+ - downloads: reference/utils/downloads.md
+ - errors: reference/utils/errors.md
+ - files: reference/utils/files.md
+ - instance: reference/utils/instance.md
+ - loss: reference/utils/loss.md
+ - metrics: reference/utils/metrics.md
+ - ops: reference/utils/ops.md
+ - patches: reference/utils/patches.md
+ - plotting: reference/utils/plotting.md
+ - tal: reference/utils/tal.md
+ - torch_utils: reference/utils/torch_utils.md
+ - tuner: reference/utils/tuner.md
- Help:
- Help: help/index.md
@@ -413,13 +406,13 @@ plugins:
quick-start.md: quickstart.md
app.md: hub/app/index.md
sdk.md: index.md
- reference/base_pred.md: reference/yolo/engine/predictor.md
- reference/base_trainer.md: reference/yolo/engine/trainer.md
- reference/exporter.md: reference/yolo/engine/exporter.md
- reference/model.md: reference/yolo/engine/model.md
+ reference/base_pred.md: reference/engine/predictor.md
+ reference/base_trainer.md: reference/engine/trainer.md
+ reference/exporter.md: reference/engine/exporter.md
+ reference/model.md: reference/engine/model.md
reference/nn.md: reference/nn/modules/head.md
- reference/ops.md: reference/yolo/utils/ops.md
- reference/results.md: reference/yolo/engine/results.md
+ reference/ops.md: reference/utils/ops.md
+ reference/results.md: reference/engine/results.md
reference/base_val.md: index.md
tasks/classification.md: tasks/classify.md
tasks/detection.md: tasks/detect.md
diff --git a/setup.py b/setup.py
index 83bddf64f..e808aa7b1 100644
--- a/setup.py
+++ b/setup.py
@@ -70,5 +70,4 @@ setup(
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows', ],
keywords='machine-learning, deep-learning, vision, ML, DL, AI, YOLO, YOLOv3, YOLOv5, YOLOv8, HUB, Ultralytics',
- entry_points={
- 'console_scripts': ['yolo = ultralytics.yolo.cfg:entrypoint', 'ultralytics = ultralytics.yolo.cfg:entrypoint']})
+ entry_points={'console_scripts': ['yolo = ultralytics.cfg:entrypoint', 'ultralytics = ultralytics.cfg:entrypoint']})
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 24e9703d2..6cf68d122 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -5,7 +5,7 @@ from pathlib import Path
import pytest
-from ultralytics.yolo.utils import ONLINE, ROOT, SETTINGS
+from ultralytics.utils import ONLINE, ROOT, SETTINGS
WEIGHT_DIR = Path(SETTINGS['weights_dir'])
TASK_ARGS = [ # (task, model, data)
diff --git a/tests/test_engine.py b/tests/test_engine.py
index b0110442d..e98b90eaf 100644
--- a/tests/test_engine.py
+++ b/tests/test_engine.py
@@ -3,10 +3,10 @@
from pathlib import Path
from ultralytics import YOLO
-from ultralytics.yolo.cfg import get_cfg
-from ultralytics.yolo.engine.exporter import Exporter
-from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, SETTINGS
-from ultralytics.yolo.v8 import classify, detect, segment
+from ultralytics.cfg import get_cfg
+from ultralytics.engine.exporter import Exporter
+from ultralytics.models.yolo import classify, detect, segment
+from ultralytics.utils import DEFAULT_CFG, ROOT, SETTINGS
CFG_DET = 'yolov8n.yaml'
CFG_SEG = 'yolov8n-seg.yaml'
@@ -65,7 +65,6 @@ def test_detect():
def test_segment():
overrides = {'data': 'coco8-seg.yaml', 'model': CFG_SEG, 'imgsz': 32, 'epochs': 1, 'save': False}
CFG.data = 'coco8-seg.yaml'
- CFG.v5loader = False
# YOLO(CFG_SEG).train(**overrides) # works
# trainer
diff --git a/tests/test_python.py b/tests/test_python.py
index 10f24fd3d..6502cf59c 100644
--- a/tests/test_python.py
+++ b/tests/test_python.py
@@ -9,8 +9,8 @@ from PIL import Image
from torchvision.transforms import ToTensor
from ultralytics import RTDETR, YOLO
-from ultralytics.yolo.data.build import load_inference_source
-from ultralytics.yolo.utils import LINUX, ONLINE, ROOT, SETTINGS
+from ultralytics.data.build import load_inference_source
+from ultralytics.utils import LINUX, ONLINE, ROOT, SETTINGS
MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n.pt'
CFG = 'yolov8n.yaml'
@@ -102,7 +102,7 @@ def test_val_scratch():
def test_amp():
if torch.cuda.is_available():
- from ultralytics.yolo.utils.checks import check_amp
+ from ultralytics.utils.checks import check_amp
model = YOLO(MODEL).model.cuda()
assert check_amp(model)
diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py
index a4d865298..3b1aabbc6 100644
--- a/ultralytics/__init__.py
+++ b/ultralytics/__init__.py
@@ -1,14 +1,13 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-__version__ = '8.0.134'
+__version__ = '8.0.136'
+from ultralytics.engine.model import YOLO
from ultralytics.hub import start
-from ultralytics.vit.rtdetr import RTDETR
-from ultralytics.vit.sam import SAM
-from ultralytics.yolo.engine.model import YOLO
-from ultralytics.yolo.fastsam import FastSAM
-from ultralytics.yolo.nas import NAS
-from ultralytics.yolo.utils.checks import check_yolo as checks
-from ultralytics.yolo.utils.downloads import download
+from ultralytics.models import RTDETR, SAM
+from ultralytics.models.fastsam import FastSAM
+from ultralytics.models.nas import NAS
+from ultralytics.utils.checks import check_yolo as checks
+from ultralytics.utils.downloads import download
__all__ = '__version__', 'YOLO', 'NAS', 'SAM', 'FastSAM', 'RTDETR', 'checks', 'download', 'start' # allow simpler import
diff --git a/ultralytics/cfg/__init__.py b/ultralytics/cfg/__init__.py
new file mode 100644
index 000000000..75703de99
--- /dev/null
+++ b/ultralytics/cfg/__init__.py
@@ -0,0 +1,421 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+
+import contextlib
+import re
+import shutil
+import sys
+from difflib import get_close_matches
+from pathlib import Path
+from types import SimpleNamespace
+from typing import Dict, List, Union
+
+from ultralytics.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_PATH, LOGGER, ROOT, USER_CONFIG_DIR,
+ IterableSimpleNamespace, __version__, checks, colorstr, deprecation_warn, get_settings,
+ yaml_load, yaml_print)
+
+# Define valid tasks and modes
+MODES = 'train', 'val', 'predict', 'export', 'track', 'benchmark'
+TASKS = 'detect', 'segment', 'classify', 'pose'
+TASK2DATA = {'detect': 'coco8.yaml', 'segment': 'coco8-seg.yaml', 'classify': 'imagenet100', 'pose': 'coco8-pose.yaml'}
+TASK2MODEL = {
+ 'detect': 'yolov8n.pt',
+ 'segment': 'yolov8n-seg.pt',
+ 'classify': 'yolov8n-cls.pt',
+ 'pose': 'yolov8n-pose.pt'}
+TASK2METRIC = {
+ 'detect': 'metrics/mAP50-95(B)',
+ 'segment': 'metrics/mAP50-95(M)',
+ 'classify': 'metrics/accuracy_top1',
+ 'pose': 'metrics/mAP50-95(P)'}
+
+
+CLI_HELP_MSG = \
+ f"""
+ Arguments received: {str(['yolo'] + sys.argv[1:])}. Ultralytics 'yolo' commands use the following syntax:
+
+ yolo TASK MODE ARGS
+
+ Where TASK (optional) is one of {TASKS}
+ MODE (required) is one of {MODES}
+ ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults.
+ See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
+
+ 1. Train a detection model for 10 epochs with an initial learning_rate of 0.01
+ yolo train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01
+
+ 2. Predict a YouTube video using a pretrained segmentation model at image size 320:
+ yolo predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320
+
+ 3. Val a pretrained detection model at batch-size 1 and image size 640:
+ yolo val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640
+
+ 4. Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required)
+ yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128
+
+ 5. Run special commands:
+ yolo help
+ yolo checks
+ yolo version
+ yolo settings
+ yolo copy-cfg
+ yolo cfg
+
+ Docs: https://docs.ultralytics.com
+ Community: https://community.ultralytics.com
+ GitHub: https://github.com/ultralytics/ultralytics
+ """
+
+# Define keys for arg type checks
+CFG_FLOAT_KEYS = 'warmup_epochs', 'box', 'cls', 'dfl', 'degrees', 'shear'
+CFG_FRACTION_KEYS = ('dropout', 'iou', 'lr0', 'lrf', 'momentum', 'weight_decay', 'warmup_momentum', 'warmup_bias_lr',
+ 'label_smoothing', 'hsv_h', 'hsv_s', 'hsv_v', 'translate', 'scale', 'perspective', 'flipud',
+ 'fliplr', 'mosaic', 'mixup', 'copy_paste', 'conf', 'iou', 'fraction') # fraction floats 0.0 - 1.0
+CFG_INT_KEYS = ('epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride',
+ 'line_width', 'workspace', 'nbs', 'save_period')
+CFG_BOOL_KEYS = ('save', 'exist_ok', 'verbose', 'deterministic', 'single_cls', 'rect', 'cos_lr', 'overlap_mask', 'val',
+ 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show', 'save_txt', 'save_conf', 'save_crop',
+ 'show_labels', 'show_conf', 'visualize', 'augment', 'agnostic_nms', 'retina_masks', 'boxes', 'keras',
+ 'optimize', 'int8', 'dynamic', 'simplify', 'nms', 'profile')
+
+
+def cfg2dict(cfg):
+ """
+ Convert a configuration object to a dictionary, whether it is a file path, a string, or a SimpleNamespace object.
+
+ Args:
+ cfg (str | Path | SimpleNamespace): Configuration object to be converted to a dictionary.
+
+ Returns:
+ cfg (dict): Configuration object in dictionary format.
+ """
+ if isinstance(cfg, (str, Path)):
+ cfg = yaml_load(cfg) # load dict
+ elif isinstance(cfg, SimpleNamespace):
+ cfg = vars(cfg) # convert to dict
+ return cfg
+
+
+def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, overrides: Dict = None):
+ """
+ Load and merge configuration data from a file or dictionary.
+
+ Args:
+ cfg (str | Path | Dict | SimpleNamespace): Configuration data.
+ overrides (str | Dict | optional): Overrides in the form of a file name or a dictionary. Default is None.
+
+ Returns:
+ (SimpleNamespace): Training arguments namespace.
+ """
+ cfg = cfg2dict(cfg)
+
+ # Merge overrides
+ if overrides:
+ overrides = cfg2dict(overrides)
+ check_cfg_mismatch(cfg, overrides)
+ cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides)
+
+ # Special handling for numeric project/name
+ for k in 'project', 'name':
+ if k in cfg and isinstance(cfg[k], (int, float)):
+ cfg[k] = str(cfg[k])
+ if cfg.get('name') == 'model': # assign model to 'name' arg
+ cfg['name'] = cfg.get('model', '').split('.')[0]
+ LOGGER.warning(f"WARNING ⚠️ 'name=model' automatically updated to 'name={cfg['name']}'.")
+
+ # Type and Value checks
+ for k, v in cfg.items():
+ if v is not None: # None values may be from optional args
+ if k in CFG_FLOAT_KEYS and not isinstance(v, (int, float)):
+ raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
+ f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')")
+ elif k in CFG_FRACTION_KEYS:
+ if not isinstance(v, (int, float)):
+ raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
+ f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')")
+ if not (0.0 <= v <= 1.0):
+ raise ValueError(f"'{k}={v}' is an invalid value. "
+ f"Valid '{k}' values are between 0.0 and 1.0.")
+ elif k in CFG_INT_KEYS and not isinstance(v, int):
+ raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
+ f"'{k}' must be an int (i.e. '{k}=8')")
+ elif k in CFG_BOOL_KEYS and not isinstance(v, bool):
+ raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
+ f"'{k}' must be a bool (i.e. '{k}=True' or '{k}=False')")
+
+ # Return instance
+ return IterableSimpleNamespace(**cfg)
+
+
+def _handle_deprecation(custom):
+ """
+ Hardcoded function to handle deprecated config keys
+ """
+
+ for key in custom.copy().keys():
+ if key == 'hide_labels':
+ deprecation_warn(key, 'show_labels')
+ custom['show_labels'] = custom.pop('hide_labels') == 'False'
+ if key == 'hide_conf':
+ deprecation_warn(key, 'show_conf')
+ custom['show_conf'] = custom.pop('hide_conf') == 'False'
+ if key == 'line_thickness':
+ deprecation_warn(key, 'line_width')
+ custom['line_width'] = custom.pop('line_thickness')
+
+ return custom
+
+
+def check_cfg_mismatch(base: Dict, custom: Dict, e=None):
+ """
+ This function checks for any mismatched keys between a custom configuration list and a base configuration list.
+ If any mismatched keys are found, the function prints out similar keys from the base list and exits the program.
+
+ Args:
+ custom (dict): a dictionary of custom configuration options
+ base (dict): a dictionary of base configuration options
+ """
+ custom = _handle_deprecation(custom)
+ base, custom = (set(x.keys()) for x in (base, custom))
+ mismatched = [x for x in custom if x not in base]
+ if mismatched:
+ string = ''
+ for x in mismatched:
+ matches = get_close_matches(x, base) # key list
+ matches = [f'{k}={DEFAULT_CFG_DICT[k]}' if DEFAULT_CFG_DICT.get(k) is not None else k for k in matches]
+ match_str = f'Similar arguments are i.e. {matches}.' if matches else ''
+ string += f"'{colorstr('red', 'bold', x)}' is not a valid YOLO argument. {match_str}\n"
+ raise SyntaxError(string + CLI_HELP_MSG) from e
+
+
+def merge_equals_args(args: List[str]) -> List[str]:
+ """
+ Merges arguments around isolated '=' args in a list of strings.
+ The function considers cases where the first argument ends with '=' or the second starts with '=',
+ as well as when the middle one is an equals sign.
+
+ Args:
+ args (List[str]): A list of strings where each element is an argument.
+
+ Returns:
+ List[str]: A list of strings where the arguments around isolated '=' are merged.
+ """
+ new_args = []
+ for i, arg in enumerate(args):
+ if arg == '=' and 0 < i < len(args) - 1: # merge ['arg', '=', 'val']
+ new_args[-1] += f'={args[i + 1]}'
+ del args[i + 1]
+ elif arg.endswith('=') and i < len(args) - 1 and '=' not in args[i + 1]: # merge ['arg=', 'val']
+ new_args.append(f'{arg}{args[i + 1]}')
+ del args[i + 1]
+ elif arg.startswith('=') and i > 0: # merge ['arg', '=val']
+ new_args[-1] += arg
+ else:
+ new_args.append(arg)
+ return new_args
+
+
+def handle_yolo_hub(args: List[str]) -> None:
+ """
+ Handle Ultralytics HUB command-line interface (CLI) commands.
+
+ This function processes Ultralytics HUB CLI commands such as login and logout.
+ It should be called when executing a script with arguments related to HUB authentication.
+
+ Args:
+ args (List[str]): A list of command line arguments
+
+ Example:
+ python my_script.py hub login your_api_key
+ """
+ from ultralytics import hub
+
+ if args[0] == 'login':
+ key = args[1] if len(args) > 1 else ''
+ # Log in to Ultralytics HUB using the provided API key
+ hub.login(key)
+ elif args[0] == 'logout':
+ # Log out from Ultralytics HUB
+ hub.logout()
+
+
+def handle_yolo_settings(args: List[str]) -> None:
+ """
+ Handle YOLO settings command-line interface (CLI) commands.
+
+ This function processes YOLO settings CLI commands such as reset.
+ It should be called when executing a script with arguments related to YOLO settings management.
+
+ Args:
+ args (List[str]): A list of command line arguments for YOLO settings management.
+
+ Example:
+ python my_script.py yolo settings reset
+ """
+ path = USER_CONFIG_DIR / 'settings.yaml' # get SETTINGS YAML file path
+ if any(args) and args[0] == 'reset':
+ path.unlink() # delete the settings file
+ get_settings() # create new settings
+ LOGGER.info('Settings reset successfully') # inform the user that settings have been reset
+ yaml_print(path) # print the current settings
+
+
+def entrypoint(debug=''):
+ """
+ This function is the ultralytics package entrypoint, it's responsible for parsing the command line arguments passed
+ to the package.
+
+ This function allows for:
+ - passing mandatory YOLO args as a list of strings
+ - specifying the task to be performed, either 'detect', 'segment' or 'classify'
+ - specifying the mode, either 'train', 'val', 'test', or 'predict'
+ - running special modes like 'checks'
+ - passing overrides to the package's configuration
+
+ It uses the package's default cfg and initializes it using the passed overrides.
+ Then it calls the CLI function with the composed cfg
+ """
+ args = (debug.split(' ') if debug else sys.argv)[1:]
+ if not args: # no arguments passed
+ LOGGER.info(CLI_HELP_MSG)
+ return
+
+ special = {
+ 'help': lambda: LOGGER.info(CLI_HELP_MSG),
+ 'checks': checks.check_yolo,
+ 'version': lambda: LOGGER.info(__version__),
+ 'settings': lambda: handle_yolo_settings(args[1:]),
+ 'cfg': lambda: yaml_print(DEFAULT_CFG_PATH),
+ 'hub': lambda: handle_yolo_hub(args[1:]),
+ 'login': lambda: handle_yolo_hub(args),
+ 'copy-cfg': copy_default_cfg}
+ full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special}
+
+ # Define common mis-uses of special commands, i.e. -h, -help, --help
+ special.update({k[0]: v for k, v in special.items()}) # singular
+ special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith('s')}) # singular
+ special = {**special, **{f'-{k}': v for k, v in special.items()}, **{f'--{k}': v for k, v in special.items()}}
+
+ overrides = {} # basic overrides, i.e. imgsz=320
+ for a in merge_equals_args(args): # merge spaces around '=' sign
+ if a.startswith('--'):
+ LOGGER.warning(f"WARNING ⚠️ '{a}' does not require leading dashes '--', updating to '{a[2:]}'.")
+ a = a[2:]
+ if a.endswith(','):
+ LOGGER.warning(f"WARNING ⚠️ '{a}' does not require trailing comma ',', updating to '{a[:-1]}'.")
+ a = a[:-1]
+ if '=' in a:
+ try:
+ re.sub(r' *= *', '=', a) # remove spaces around equals sign
+ k, v = a.split('=', 1) # split on first '=' sign
+ assert v, f"missing '{k}' value"
+ if k == 'cfg': # custom.yaml passed
+ LOGGER.info(f'Overriding {DEFAULT_CFG_PATH} with {v}')
+ overrides = {k: val for k, val in yaml_load(checks.check_yaml(v)).items() if k != 'cfg'}
+ else:
+ if v.lower() == 'none':
+ v = None
+ elif v.lower() == 'true':
+ v = True
+ elif v.lower() == 'false':
+ v = False
+ else:
+ with contextlib.suppress(Exception):
+ v = eval(v)
+ overrides[k] = v
+ except (NameError, SyntaxError, ValueError, AssertionError) as e:
+ check_cfg_mismatch(full_args_dict, {a: ''}, e)
+
+ elif a in TASKS:
+ overrides['task'] = a
+ elif a in MODES:
+ overrides['mode'] = a
+ elif a.lower() in special:
+ special[a.lower()]()
+ return
+ elif a in DEFAULT_CFG_DICT and isinstance(DEFAULT_CFG_DICT[a], bool):
+ overrides[a] = True # auto-True for default bool args, i.e. 'yolo show' sets show=True
+ elif a in DEFAULT_CFG_DICT:
+ raise SyntaxError(f"'{colorstr('red', 'bold', a)}' is a valid YOLO argument but is missing an '=' sign "
+ f"to set its value, i.e. try '{a}={DEFAULT_CFG_DICT[a]}'\n{CLI_HELP_MSG}")
+ else:
+ check_cfg_mismatch(full_args_dict, {a: ''})
+
+ # Check keys
+ check_cfg_mismatch(full_args_dict, overrides)
+
+ # Mode
+ mode = overrides.get('mode', None)
+ if mode is None:
+ mode = DEFAULT_CFG.mode or 'predict'
+ LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.")
+ elif mode not in MODES:
+ if mode not in ('checks', checks):
+ raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}")
+ LOGGER.warning("WARNING ⚠️ 'yolo mode=checks' is deprecated. Use 'yolo checks' instead.")
+ checks.check_yolo()
+ return
+
+ # Task
+ task = overrides.pop('task', None)
+ if task:
+ if task not in TASKS:
+ raise ValueError(f"Invalid 'task={task}'. Valid tasks are {TASKS}.\n{CLI_HELP_MSG}")
+ if 'model' not in overrides:
+ overrides['model'] = TASK2MODEL[task]
+
+ # Model
+ model = overrides.pop('model', DEFAULT_CFG.model)
+ if model is None:
+ model = 'yolov8n.pt'
+ LOGGER.warning(f"WARNING ⚠️ 'model' is missing. Using default 'model={model}'.")
+ overrides['model'] = model
+ if 'rtdetr' in model.lower(): # guess architecture
+ from ultralytics import RTDETR
+ model = RTDETR(model) # no task argument
+ elif 'sam' in model.lower():
+ from ultralytics import SAM
+ model = SAM(model)
+ else:
+ from ultralytics import YOLO
+ model = YOLO(model, task=task)
+ if isinstance(overrides.get('pretrained'), str):
+ model.load(overrides['pretrained'])
+
+ # Task Update
+ if task != model.task:
+ if task:
+ LOGGER.warning(f"WARNING ⚠️ conflicting 'task={task}' passed with 'task={model.task}' model. "
+ f"Ignoring 'task={task}' and updating to 'task={model.task}' to match model.")
+ task = model.task
+
+ # Mode
+ if mode in ('predict', 'track') and 'source' not in overrides:
+ overrides['source'] = DEFAULT_CFG.source or ROOT / 'assets' if (ROOT / 'assets').exists() \
+ else 'https://ultralytics.com/images/bus.jpg'
+ LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using default 'source={overrides['source']}'.")
+ elif mode in ('train', 'val'):
+ if 'data' not in overrides:
+ overrides['data'] = TASK2DATA.get(task or DEFAULT_CFG.task, DEFAULT_CFG.data)
+ LOGGER.warning(f"WARNING ⚠️ 'data' is missing. Using default 'data={overrides['data']}'.")
+ elif mode == 'export':
+ if 'format' not in overrides:
+ overrides['format'] = DEFAULT_CFG.format or 'torchscript'
+ LOGGER.warning(f"WARNING ⚠️ 'format' is missing. Using default 'format={overrides['format']}'.")
+
+ # Run command in python
+ # getattr(model, mode)(**vars(get_cfg(overrides=overrides))) # default args using default.yaml
+ getattr(model, mode)(**overrides) # default args from model
+
+
+# Special modes --------------------------------------------------------------------------------------------------------
+def copy_default_cfg():
+ """Copy and create a new default configuration file with '_copy' appended to its name."""
+ new_file = Path.cwd() / DEFAULT_CFG_PATH.name.replace('.yaml', '_copy.yaml')
+ shutil.copy2(DEFAULT_CFG_PATH, new_file)
+ LOGGER.info(f'{DEFAULT_CFG_PATH} copied to {new_file}\n'
+ f"Example YOLO command with this new custom cfg:\n yolo cfg='{new_file}' imgsz=320 batch=8")
+
+
+if __name__ == '__main__':
+ # Example Usage: entrypoint(debug='yolo predict model=yolov8n.pt')
+ entrypoint(debug='')
diff --git a/ultralytics/datasets/Argoverse.yaml b/ultralytics/cfg/datasets/Argoverse.yaml
similarity index 97%
rename from ultralytics/datasets/Argoverse.yaml
rename to ultralytics/cfg/datasets/Argoverse.yaml
index e78758a87..ddfdbb689 100644
--- a/ultralytics/datasets/Argoverse.yaml
+++ b/ultralytics/cfg/datasets/Argoverse.yaml
@@ -29,7 +29,7 @@ names:
download: |
import json
from tqdm import tqdm
- from ultralytics.yolo.utils.downloads import download
+ from ultralytics.utils.downloads import download
from pathlib import Path
def argoverse2yolo(set):
diff --git a/ultralytics/datasets/GlobalWheat2020.yaml b/ultralytics/cfg/datasets/GlobalWheat2020.yaml
similarity index 97%
rename from ultralytics/datasets/GlobalWheat2020.yaml
rename to ultralytics/cfg/datasets/GlobalWheat2020.yaml
index 533a0b7d8..165004f6d 100644
--- a/ultralytics/datasets/GlobalWheat2020.yaml
+++ b/ultralytics/cfg/datasets/GlobalWheat2020.yaml
@@ -32,7 +32,7 @@ names:
# Download script/URL (optional) ---------------------------------------------------------------------------------------
download: |
- from ultralytics.yolo.utils.downloads import download
+ from ultralytics.utils.downloads import download
from pathlib import Path
# Download
diff --git a/ultralytics/datasets/ImageNet.yaml b/ultralytics/cfg/datasets/ImageNet.yaml
similarity index 100%
rename from ultralytics/datasets/ImageNet.yaml
rename to ultralytics/cfg/datasets/ImageNet.yaml
diff --git a/ultralytics/datasets/Objects365.yaml b/ultralytics/cfg/datasets/Objects365.yaml
similarity index 98%
rename from ultralytics/datasets/Objects365.yaml
rename to ultralytics/cfg/datasets/Objects365.yaml
index 8065432e8..415eff983 100644
--- a/ultralytics/datasets/Objects365.yaml
+++ b/ultralytics/cfg/datasets/Objects365.yaml
@@ -386,9 +386,9 @@ names:
download: |
from tqdm import tqdm
- from ultralytics.yolo.utils.checks import check_requirements
- from ultralytics.yolo.utils.downloads import download
- from ultralytics.yolo.utils.ops import xyxy2xywhn
+ from ultralytics.utils.checks import check_requirements
+ from ultralytics.utils.downloads import download
+ from ultralytics.utils.ops import xyxy2xywhn
import numpy as np
from pathlib import Path
diff --git a/ultralytics/datasets/SKU-110K.yaml b/ultralytics/cfg/datasets/SKU-110K.yaml
similarity index 95%
rename from ultralytics/datasets/SKU-110K.yaml
rename to ultralytics/cfg/datasets/SKU-110K.yaml
index ced2e08a2..e6deac21e 100644
--- a/ultralytics/datasets/SKU-110K.yaml
+++ b/ultralytics/cfg/datasets/SKU-110K.yaml
@@ -27,8 +27,8 @@ download: |
import pandas as pd
from tqdm import tqdm
- from ultralytics.yolo.utils.downloads import download
- from ultralytics.yolo.utils.ops import xyxy2xywh
+ from ultralytics.utils.downloads import download
+ from ultralytics.utils.ops import xyxy2xywh
# Download
dir = Path(yaml['path']) # dataset root dir
diff --git a/ultralytics/datasets/VOC.yaml b/ultralytics/cfg/datasets/VOC.yaml
similarity index 98%
rename from ultralytics/datasets/VOC.yaml
rename to ultralytics/cfg/datasets/VOC.yaml
index f1c91d2ee..ab3438791 100644
--- a/ultralytics/datasets/VOC.yaml
+++ b/ultralytics/cfg/datasets/VOC.yaml
@@ -48,7 +48,7 @@ download: |
import xml.etree.ElementTree as ET
from tqdm import tqdm
- from ultralytics.yolo.utils.downloads import download
+ from ultralytics.utils.downloads import download
from pathlib import Path
def convert_label(path, lb_path, year, image_id):
diff --git a/ultralytics/datasets/VisDrone.yaml b/ultralytics/cfg/datasets/VisDrone.yaml
similarity index 98%
rename from ultralytics/datasets/VisDrone.yaml
rename to ultralytics/cfg/datasets/VisDrone.yaml
index a37782f06..a1a4a466c 100644
--- a/ultralytics/datasets/VisDrone.yaml
+++ b/ultralytics/cfg/datasets/VisDrone.yaml
@@ -32,7 +32,7 @@ download: |
import os
from pathlib import Path
- from ultralytics.yolo.utils.downloads import download
+ from ultralytics.utils.downloads import download
def visdrone2yolo(dir):
from PIL import Image
diff --git a/ultralytics/datasets/coco-pose.yaml b/ultralytics/cfg/datasets/coco-pose.yaml
similarity index 96%
rename from ultralytics/datasets/coco-pose.yaml
rename to ultralytics/cfg/datasets/coco-pose.yaml
index 82a2f1584..670d55bf6 100644
--- a/ultralytics/datasets/coco-pose.yaml
+++ b/ultralytics/cfg/datasets/coco-pose.yaml
@@ -23,7 +23,7 @@ names:
# Download script/URL (optional)
download: |
- from ultralytics.yolo.utils.downloads import download
+ from ultralytics.utils.downloads import download
from pathlib import Path
# Download labels
diff --git a/ultralytics/datasets/coco.yaml b/ultralytics/cfg/datasets/coco.yaml
similarity index 97%
rename from ultralytics/datasets/coco.yaml
rename to ultralytics/cfg/datasets/coco.yaml
index 0e6edf7d5..8a70a5b34 100644
--- a/ultralytics/datasets/coco.yaml
+++ b/ultralytics/cfg/datasets/coco.yaml
@@ -99,7 +99,7 @@ names:
# Download script/URL (optional)
download: |
- from ultralytics.yolo.utils.downloads import download
+ from ultralytics.utils.downloads import download
from pathlib import Path
# Download labels
diff --git a/ultralytics/datasets/coco128-seg.yaml b/ultralytics/cfg/datasets/coco128-seg.yaml
similarity index 100%
rename from ultralytics/datasets/coco128-seg.yaml
rename to ultralytics/cfg/datasets/coco128-seg.yaml
diff --git a/ultralytics/datasets/coco128.yaml b/ultralytics/cfg/datasets/coco128.yaml
similarity index 100%
rename from ultralytics/datasets/coco128.yaml
rename to ultralytics/cfg/datasets/coco128.yaml
diff --git a/ultralytics/datasets/coco8-pose.yaml b/ultralytics/cfg/datasets/coco8-pose.yaml
similarity index 100%
rename from ultralytics/datasets/coco8-pose.yaml
rename to ultralytics/cfg/datasets/coco8-pose.yaml
diff --git a/ultralytics/datasets/coco8-seg.yaml b/ultralytics/cfg/datasets/coco8-seg.yaml
similarity index 100%
rename from ultralytics/datasets/coco8-seg.yaml
rename to ultralytics/cfg/datasets/coco8-seg.yaml
diff --git a/ultralytics/datasets/coco8.yaml b/ultralytics/cfg/datasets/coco8.yaml
similarity index 100%
rename from ultralytics/datasets/coco8.yaml
rename to ultralytics/cfg/datasets/coco8.yaml
diff --git a/ultralytics/datasets/xView.yaml b/ultralytics/cfg/datasets/xView.yaml
similarity index 97%
rename from ultralytics/datasets/xView.yaml
rename to ultralytics/cfg/datasets/xView.yaml
index 6049f6f60..bdc2d9175 100644
--- a/ultralytics/datasets/xView.yaml
+++ b/ultralytics/cfg/datasets/xView.yaml
@@ -87,8 +87,8 @@ download: |
from PIL import Image
from tqdm import tqdm
- from ultralytics.yolo.data.dataloaders.v5loader import autosplit
- from ultralytics.yolo.utils.ops import xyxy2xywhn
+ from ultralytics.data.utils import autosplit
+ from ultralytics.utils.ops import xyxy2xywhn
def convert_labels(fname=Path('xView/xView_train.geojson')):
diff --git a/ultralytics/yolo/cfg/default.yaml b/ultralytics/cfg/default.yaml
similarity index 97%
rename from ultralytics/yolo/cfg/default.yaml
rename to ultralytics/cfg/default.yaml
index 25e4001ae..5babd254a 100644
--- a/ultralytics/yolo/cfg/default.yaml
+++ b/ultralytics/cfg/default.yaml
@@ -110,8 +110,5 @@ copy_paste: 0.0 # (float) segment copy-paste (probability)
# Custom config.yaml ---------------------------------------------------------------------------------------------------
cfg: # (str, optional) for overriding defaults.yaml
-# Debug, do not modify -------------------------------------------------------------------------------------------------
-v5loader: False # (bool) use legacy YOLOv5 dataloader (deprecated)
-
# Tracker settings ------------------------------------------------------------------------------------------------------
tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml]
diff --git a/ultralytics/models/README.md b/ultralytics/cfg/models/README.md
similarity index 100%
rename from ultralytics/models/README.md
rename to ultralytics/cfg/models/README.md
diff --git a/ultralytics/models/rt-detr/rtdetr-l.yaml b/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml
similarity index 100%
rename from ultralytics/models/rt-detr/rtdetr-l.yaml
rename to ultralytics/cfg/models/rt-detr/rtdetr-l.yaml
diff --git a/ultralytics/models/rt-detr/rtdetr-x.yaml b/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml
similarity index 100%
rename from ultralytics/models/rt-detr/rtdetr-x.yaml
rename to ultralytics/cfg/models/rt-detr/rtdetr-x.yaml
diff --git a/ultralytics/models/v3/yolov3-spp.yaml b/ultralytics/cfg/models/v3/yolov3-spp.yaml
similarity index 100%
rename from ultralytics/models/v3/yolov3-spp.yaml
rename to ultralytics/cfg/models/v3/yolov3-spp.yaml
diff --git a/ultralytics/models/v3/yolov3-tiny.yaml b/ultralytics/cfg/models/v3/yolov3-tiny.yaml
similarity index 100%
rename from ultralytics/models/v3/yolov3-tiny.yaml
rename to ultralytics/cfg/models/v3/yolov3-tiny.yaml
diff --git a/ultralytics/models/v3/yolov3.yaml b/ultralytics/cfg/models/v3/yolov3.yaml
similarity index 100%
rename from ultralytics/models/v3/yolov3.yaml
rename to ultralytics/cfg/models/v3/yolov3.yaml
diff --git a/ultralytics/models/v5/yolov5-p6.yaml b/ultralytics/cfg/models/v5/yolov5-p6.yaml
similarity index 100%
rename from ultralytics/models/v5/yolov5-p6.yaml
rename to ultralytics/cfg/models/v5/yolov5-p6.yaml
diff --git a/ultralytics/models/v5/yolov5.yaml b/ultralytics/cfg/models/v5/yolov5.yaml
similarity index 100%
rename from ultralytics/models/v5/yolov5.yaml
rename to ultralytics/cfg/models/v5/yolov5.yaml
diff --git a/ultralytics/models/v6/yolov6.yaml b/ultralytics/cfg/models/v6/yolov6.yaml
similarity index 100%
rename from ultralytics/models/v6/yolov6.yaml
rename to ultralytics/cfg/models/v6/yolov6.yaml
diff --git a/ultralytics/models/v8/yolov8-cls.yaml b/ultralytics/cfg/models/v8/yolov8-cls.yaml
similarity index 100%
rename from ultralytics/models/v8/yolov8-cls.yaml
rename to ultralytics/cfg/models/v8/yolov8-cls.yaml
diff --git a/ultralytics/models/v8/yolov8-p2.yaml b/ultralytics/cfg/models/v8/yolov8-p2.yaml
similarity index 100%
rename from ultralytics/models/v8/yolov8-p2.yaml
rename to ultralytics/cfg/models/v8/yolov8-p2.yaml
diff --git a/ultralytics/models/v8/yolov8-p6.yaml b/ultralytics/cfg/models/v8/yolov8-p6.yaml
similarity index 100%
rename from ultralytics/models/v8/yolov8-p6.yaml
rename to ultralytics/cfg/models/v8/yolov8-p6.yaml
diff --git a/ultralytics/models/v8/yolov8-pose-p6.yaml b/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml
similarity index 100%
rename from ultralytics/models/v8/yolov8-pose-p6.yaml
rename to ultralytics/cfg/models/v8/yolov8-pose-p6.yaml
diff --git a/ultralytics/models/v8/yolov8-pose.yaml b/ultralytics/cfg/models/v8/yolov8-pose.yaml
similarity index 100%
rename from ultralytics/models/v8/yolov8-pose.yaml
rename to ultralytics/cfg/models/v8/yolov8-pose.yaml
diff --git a/ultralytics/models/v8/yolov8-rtdetr.yaml b/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml
similarity index 100%
rename from ultralytics/models/v8/yolov8-rtdetr.yaml
rename to ultralytics/cfg/models/v8/yolov8-rtdetr.yaml
diff --git a/ultralytics/models/v8/yolov8-seg.yaml b/ultralytics/cfg/models/v8/yolov8-seg.yaml
similarity index 100%
rename from ultralytics/models/v8/yolov8-seg.yaml
rename to ultralytics/cfg/models/v8/yolov8-seg.yaml
diff --git a/ultralytics/models/v8/yolov8.yaml b/ultralytics/cfg/models/v8/yolov8.yaml
similarity index 100%
rename from ultralytics/models/v8/yolov8.yaml
rename to ultralytics/cfg/models/v8/yolov8.yaml
diff --git a/ultralytics/tracker/cfg/botsort.yaml b/ultralytics/cfg/trackers/botsort.yaml
similarity index 100%
rename from ultralytics/tracker/cfg/botsort.yaml
rename to ultralytics/cfg/trackers/botsort.yaml
diff --git a/ultralytics/tracker/cfg/bytetrack.yaml b/ultralytics/cfg/trackers/bytetrack.yaml
similarity index 100%
rename from ultralytics/tracker/cfg/bytetrack.yaml
rename to ultralytics/cfg/trackers/bytetrack.yaml
diff --git a/ultralytics/data/__init__.py b/ultralytics/data/__init__.py
new file mode 100644
index 000000000..6fa7e8458
--- /dev/null
+++ b/ultralytics/data/__init__.py
@@ -0,0 +1,8 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+
+from .base import BaseDataset
+from .build import build_dataloader, build_yolo_dataset, load_inference_source
+from .dataset import ClassificationDataset, SemanticDataset, YOLODataset
+
+__all__ = ('BaseDataset', 'ClassificationDataset', 'SemanticDataset', 'YOLODataset', 'build_yolo_dataset',
+ 'build_dataloader', 'load_inference_source')
diff --git a/ultralytics/yolo/data/annotator.py b/ultralytics/data/annotator.py
similarity index 100%
rename from ultralytics/yolo/data/annotator.py
rename to ultralytics/data/annotator.py
diff --git a/ultralytics/yolo/data/augment.py b/ultralytics/data/augment.py
similarity index 99%
rename from ultralytics/yolo/data/augment.py
rename to ultralytics/data/augment.py
index d6881595b..12d09cff2 100644
--- a/ultralytics/yolo/data/augment.py
+++ b/ultralytics/data/augment.py
@@ -9,11 +9,12 @@ import numpy as np
import torch
import torchvision.transforms as T
-from ..utils import LOGGER, colorstr
-from ..utils.checks import check_version
-from ..utils.instance import Instances
-from ..utils.metrics import bbox_ioa
-from ..utils.ops import segment2box
+from ultralytics.utils import LOGGER, colorstr
+from ultralytics.utils.checks import check_version
+from ultralytics.utils.instance import Instances
+from ultralytics.utils.metrics import bbox_ioa
+from ultralytics.utils.ops import segment2box
+
from .utils import polygons2masks, polygons2masks_overlap
POSE_FLIPLR_INDEX = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
diff --git a/ultralytics/yolo/data/base.py b/ultralytics/data/base.py
similarity index 99%
rename from ultralytics/yolo/data/base.py
rename to ultralytics/data/base.py
index d2e9793c1..bfc3cc19e 100644
--- a/ultralytics/yolo/data/base.py
+++ b/ultralytics/data/base.py
@@ -15,7 +15,8 @@ import psutil
from torch.utils.data import Dataset
from tqdm import tqdm
-from ..utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT
+from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT
+
from .utils import HELP_URL, IMG_FORMATS
diff --git a/ultralytics/yolo/data/build.py b/ultralytics/data/build.py
similarity index 94%
rename from ultralytics/yolo/data/build.py
rename to ultralytics/data/build.py
index 5499c76b7..8fd8602bc 100644
--- a/ultralytics/yolo/data/build.py
+++ b/ultralytics/data/build.py
@@ -9,12 +9,12 @@ import torch
from PIL import Image
from torch.utils.data import dataloader, distributed
-from ultralytics.yolo.data.dataloaders.stream_loaders import (LOADERS, LoadImages, LoadPilAndNumpy, LoadScreenshots,
- LoadStreams, LoadTensor, SourceTypes, autocast_list)
-from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS
-from ultralytics.yolo.utils.checks import check_file
+from ultralytics.data.loaders import (LOADERS, LoadImages, LoadPilAndNumpy, LoadScreenshots, LoadStreams, LoadTensor,
+ SourceTypes, autocast_list)
+from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
+from ultralytics.utils import RANK, colorstr
+from ultralytics.utils.checks import check_file
-from ..utils import RANK, colorstr
from .dataset import YOLODataset
from .utils import PIN_MEMORY
diff --git a/ultralytics/yolo/data/converter.py b/ultralytics/data/converter.py
similarity index 98%
rename from ultralytics/yolo/data/converter.py
rename to ultralytics/data/converter.py
index c1278dd95..c6f8547f9 100644
--- a/ultralytics/yolo/data/converter.py
+++ b/ultralytics/data/converter.py
@@ -6,8 +6,8 @@ import cv2
import numpy as np
from tqdm import tqdm
-from ultralytics.yolo.utils.checks import check_requirements
-from ultralytics.yolo.utils.files import make_dirs
+from ultralytics.utils.checks import check_requirements
+from ultralytics.utils.files import make_dirs
def coco91_to_coco80_class():
diff --git a/ultralytics/tracker/utils/__init__.py b/ultralytics/data/dataloaders/__init__.py
similarity index 100%
rename from ultralytics/tracker/utils/__init__.py
rename to ultralytics/data/dataloaders/__init__.py
diff --git a/ultralytics/yolo/data/dataset.py b/ultralytics/data/dataset.py
similarity index 99%
rename from ultralytics/yolo/data/dataset.py
rename to ultralytics/data/dataset.py
index 17e6d47c1..575243f38 100644
--- a/ultralytics/yolo/data/dataset.py
+++ b/ultralytics/data/dataset.py
@@ -10,7 +10,8 @@ import torch
import torchvision
from tqdm import tqdm
-from ..utils import LOCAL_RANK, NUM_THREADS, TQDM_BAR_FORMAT, is_dir_writeable
+from ultralytics.utils import LOCAL_RANK, NUM_THREADS, TQDM_BAR_FORMAT, is_dir_writeable
+
from .augment import Compose, Format, Instances, LetterBox, classify_albumentations, classify_transforms, v8_transforms
from .base import BaseDataset
from .utils import HELP_URL, LOGGER, get_hash, img2label_paths, verify_image_label
diff --git a/ultralytics/yolo/data/dataloaders/stream_loaders.py b/ultralytics/data/loaders.py
similarity index 98%
rename from ultralytics/yolo/data/dataloaders/stream_loaders.py
rename to ultralytics/data/loaders.py
index d124a4351..f75f3f7f9 100644
--- a/ultralytics/yolo/data/dataloaders/stream_loaders.py
+++ b/ultralytics/data/loaders.py
@@ -15,9 +15,9 @@ import requests
import torch
from PIL import Image
-from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS
-from ultralytics.yolo.utils import LOGGER, ROOT, is_colab, is_kaggle, ops
-from ultralytics.yolo.utils.checks import check_requirements
+from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
+from ultralytics.utils import LOGGER, ROOT, is_colab, is_kaggle, ops
+from ultralytics.utils.checks import check_requirements
@dataclass
@@ -318,11 +318,10 @@ class LoadTensor:
s = f'WARNING ⚠️ torch.Tensor inputs should be BCHW i.e. shape(1, 3, 640, 640) ' \
f'divisible by stride {stride}. Input shape{tuple(im.shape)} is incompatible.'
if len(im.shape) != 4:
- if len(im.shape) == 3:
- LOGGER.warning(s)
- im = im.unsqueeze(0)
- else:
+ if len(im.shape) != 3:
raise ValueError(s)
+ LOGGER.warning(s)
+ im = im.unsqueeze(0)
if im.shape[2] % stride or im.shape[3] % stride:
raise ValueError(s)
if im.max() > 1.0:
diff --git a/ultralytics/yolo/data/scripts/download_weights.sh b/ultralytics/data/scripts/download_weights.sh
similarity index 70%
rename from ultralytics/yolo/data/scripts/download_weights.sh
rename to ultralytics/data/scripts/download_weights.sh
index 72502a366..87db31fe1 100755
--- a/ultralytics/yolo/data/scripts/download_weights.sh
+++ b/ultralytics/data/scripts/download_weights.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Download latest models from https://github.com/ultralytics/assets/releases
-# Example usage: bash ultralytics/yolo/data/scripts/download_weights.sh
+# Example usage: bash ultralytics/data/scripts/download_weights.sh
# parent
# └── weights
# ├── yolov8n.pt ← downloads here
@@ -9,9 +9,9 @@
# └── ...
python - < None:
Fuses the model for faster inference.
- predict(source=None, stream=False, **kwargs) -> List[ultralytics.yolo.engine.results.Results]:
+ predict(source=None, stream=False, **kwargs) -> List[ultralytics.engine.results.Results]:
Performs prediction using the YOLO model.
Returns:
- list(ultralytics.yolo.engine.results.Results): The prediction results.
+ list(ultralytics.engine.results.Results): The prediction results.
"""
def __init__(self, model: Union[str, Path] = 'yolov8n.pt', task=None) -> None:
@@ -230,7 +229,7 @@ class YOLO:
Check the 'configuration' section in the documentation for all available options.
Returns:
- (List[ultralytics.yolo.engine.results.Results]): The prediction results.
+ (List[ultralytics.engine.results.Results]): The prediction results.
"""
if source is None:
source = ROOT / 'assets' if is_git_dir() else 'https://ultralytics.com/images/bus.jpg'
@@ -265,11 +264,11 @@ class YOLO:
**kwargs (optional): Additional keyword arguments for the tracking process.
Returns:
- (List[ultralytics.yolo.engine.results.Results]): The tracking results.
+ (List[ultralytics.engine.results.Results]): The tracking results.
"""
if not hasattr(self.predictor, 'trackers'):
- from ultralytics.tracker import register_tracker
+ from ultralytics.trackers import register_tracker
register_tracker(self, persist)
# ByteTrack-based method needs low confidence predictions as input
conf = kwargs.get('conf') or 0.1
@@ -315,7 +314,7 @@ class YOLO:
**kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs
"""
self._check_is_pytorch_model()
- from ultralytics.yolo.utils.benchmarks import benchmark
+ from ultralytics.utils.benchmarks import benchmark
overrides = self.model.args.copy()
overrides.update(kwargs)
overrides['mode'] = 'benchmark'
@@ -389,7 +388,7 @@ class YOLO:
def tune(self, *args, **kwargs):
"""
- Runs hyperparameter tuning using Ray Tune. See ultralytics.yolo.utils.tuner.run_ray_tune for Args.
+ Runs hyperparameter tuning using Ray Tune. See ultralytics.utils.tuner.run_ray_tune for Args.
Returns:
(dict): A dictionary containing the results of the hyperparameter search.
@@ -398,7 +397,7 @@ class YOLO:
ModuleNotFoundError: If Ray Tune is not installed.
"""
self._check_is_pytorch_model()
- from ultralytics.yolo.utils.tuner import run_ray_tune
+ from ultralytics.utils.tuner import run_ray_tune
return run_ray_tune(self, *args, **kwargs)
@property
diff --git a/ultralytics/yolo/engine/predictor.py b/ultralytics/engine/predictor.py
similarity index 97%
rename from ultralytics/yolo/engine/predictor.py
rename to ultralytics/engine/predictor.py
index 011d0abd0..142fe63c2 100644
--- a/ultralytics/yolo/engine/predictor.py
+++ b/ultralytics/engine/predictor.py
@@ -34,14 +34,14 @@ import cv2
import numpy as np
import torch
+from ultralytics.cfg import get_cfg
+from ultralytics.data import load_inference_source
+from ultralytics.data.augment import LetterBox, classify_transforms
from ultralytics.nn.autobackend import AutoBackend
-from ultralytics.yolo.cfg import get_cfg
-from ultralytics.yolo.data import load_inference_source
-from ultralytics.yolo.data.augment import LetterBox, classify_transforms
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, MACOS, SETTINGS, WINDOWS, callbacks, colorstr, ops
-from ultralytics.yolo.utils.checks import check_imgsz, check_imshow
-from ultralytics.yolo.utils.files import increment_path
-from ultralytics.yolo.utils.torch_utils import select_device, smart_inference_mode
+from ultralytics.utils import DEFAULT_CFG, LOGGER, MACOS, SETTINGS, WINDOWS, callbacks, colorstr, ops
+from ultralytics.utils.checks import check_imgsz, check_imshow
+from ultralytics.utils.files import increment_path
+from ultralytics.utils.torch_utils import select_device, smart_inference_mode
STREAM_WARNING = """
WARNING ⚠️ stream/video/webcam/dir predict source will accumulate results in RAM unless `stream=True` is passed,
diff --git a/ultralytics/yolo/engine/results.py b/ultralytics/engine/results.py
similarity index 99%
rename from ultralytics/yolo/engine/results.py
rename to ultralytics/engine/results.py
index e934730d6..7978b73b6 100644
--- a/ultralytics/yolo/engine/results.py
+++ b/ultralytics/engine/results.py
@@ -12,9 +12,9 @@ from pathlib import Path
import numpy as np
import torch
-from ultralytics.yolo.data.augment import LetterBox
-from ultralytics.yolo.utils import LOGGER, SimpleClass, deprecation_warn, ops
-from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
+from ultralytics.data.augment import LetterBox
+from ultralytics.utils import LOGGER, SimpleClass, deprecation_warn, ops
+from ultralytics.utils.plotting import Annotator, colors, save_one_box
class BaseTensor(SimpleClass):
diff --git a/ultralytics/yolo/engine/trainer.py b/ultralytics/engine/trainer.py
similarity index 97%
rename from ultralytics/yolo/engine/trainer.py
rename to ultralytics/engine/trainer.py
index 144be9c8d..5c034cfc0 100644
--- a/ultralytics/yolo/engine/trainer.py
+++ b/ultralytics/engine/trainer.py
@@ -21,17 +21,17 @@ from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from tqdm import tqdm
+from ultralytics.cfg import get_cfg
+from ultralytics.data.utils import check_cls_dataset, check_det_dataset
from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
-from ultralytics.yolo.cfg import get_cfg
-from ultralytics.yolo.data.utils import check_cls_dataset, check_det_dataset
-from ultralytics.yolo.utils import (DEFAULT_CFG, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, __version__, callbacks,
- clean_url, colorstr, emojis, yaml_save)
-from ultralytics.yolo.utils.autobatch import check_train_batch_size
-from ultralytics.yolo.utils.checks import check_amp, check_file, check_imgsz, print_args
-from ultralytics.yolo.utils.dist import ddp_cleanup, generate_ddp_command
-from ultralytics.yolo.utils.files import get_latest_run, increment_path
-from ultralytics.yolo.utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, init_seeds, one_cycle,
- select_device, strip_optimizer)
+from ultralytics.utils import (DEFAULT_CFG, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, __version__, callbacks, clean_url,
+ colorstr, emojis, yaml_save)
+from ultralytics.utils.autobatch import check_train_batch_size
+from ultralytics.utils.checks import check_amp, check_file, check_imgsz, print_args
+from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
+from ultralytics.utils.files import get_latest_run, increment_path
+from ultralytics.utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, init_seeds, one_cycle, select_device,
+ strip_optimizer)
class BaseTrainer:
@@ -244,7 +244,7 @@ class BaseTrainer:
metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix='val')
self.metrics = dict(zip(metric_keys, [0] * len(metric_keys))) # TODO: init metrics for plot_results()?
self.ema = ModelEMA(self.model)
- if self.args.plots and not self.args.v5loader:
+ if self.args.plots:
self.plot_training_labels()
# Optimizer
diff --git a/ultralytics/yolo/engine/validator.py b/ultralytics/engine/validator.py
similarity index 95%
rename from ultralytics/yolo/engine/validator.py
rename to ultralytics/engine/validator.py
index a3faebf7c..4d0f9f5d5 100644
--- a/ultralytics/yolo/engine/validator.py
+++ b/ultralytics/engine/validator.py
@@ -25,14 +25,14 @@ from pathlib import Path
import torch
from tqdm import tqdm
+from ultralytics.cfg import get_cfg
+from ultralytics.data.utils import check_cls_dataset, check_det_dataset
from ultralytics.nn.autobackend import AutoBackend
-from ultralytics.yolo.cfg import get_cfg
-from ultralytics.yolo.data.utils import check_cls_dataset, check_det_dataset
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, callbacks, colorstr, emojis
-from ultralytics.yolo.utils.checks import check_imgsz
-from ultralytics.yolo.utils.files import increment_path
-from ultralytics.yolo.utils.ops import Profile
-from ultralytics.yolo.utils.torch_utils import de_parallel, select_device, smart_inference_mode
+from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, callbacks, colorstr, emojis
+from ultralytics.utils.checks import check_imgsz
+from ultralytics.utils.files import increment_path
+from ultralytics.utils.ops import Profile
+from ultralytics.utils.torch_utils import de_parallel, select_device, smart_inference_mode
class BaseValidator:
diff --git a/ultralytics/hub/__init__.py b/ultralytics/hub/__init__.py
index 6059083af..6ddb85558 100644
--- a/ultralytics/hub/__init__.py
+++ b/ultralytics/hub/__init__.py
@@ -2,10 +2,10 @@
import requests
+from ultralytics.data.utils import HUBDatasetStats
from ultralytics.hub.auth import Auth
from ultralytics.hub.utils import PREFIX
-from ultralytics.yolo.data.utils import HUBDatasetStats
-from ultralytics.yolo.utils import LOGGER, SETTINGS, USER_CONFIG_DIR, yaml_save
+from ultralytics.utils import LOGGER, SETTINGS, USER_CONFIG_DIR, yaml_save
def login(api_key=''):
@@ -65,7 +65,7 @@ def reset_model(model_id=''):
def export_fmts_hub():
"""Returns a list of HUB-supported export formats."""
- from ultralytics.yolo.engine.exporter import export_formats
+ from ultralytics.engine.exporter import export_formats
return list(export_formats()['Argument'][1:]) + ['ultralytics_tflite', 'ultralytics_coreml']
diff --git a/ultralytics/hub/auth.py b/ultralytics/hub/auth.py
index 960b3dc30..d8d728fb1 100644
--- a/ultralytics/hub/auth.py
+++ b/ultralytics/hub/auth.py
@@ -3,7 +3,7 @@
import requests
from ultralytics.hub.utils import HUB_API_ROOT, PREFIX, request_with_credentials
-from ultralytics.yolo.utils import LOGGER, SETTINGS, emojis, is_colab, set_settings
+from ultralytics.utils import LOGGER, SETTINGS, emojis, is_colab, set_settings
API_KEY_URL = 'https://hub.ultralytics.com/settings?tab=api+keys'
diff --git a/ultralytics/hub/session.py b/ultralytics/hub/session.py
index 1a96d209e..b7856e342 100644
--- a/ultralytics/hub/session.py
+++ b/ultralytics/hub/session.py
@@ -7,8 +7,8 @@ from time import sleep
import requests
from ultralytics.hub.utils import HUB_API_ROOT, PREFIX, smart_request
-from ultralytics.yolo.utils import LOGGER, __version__, checks, emojis, is_colab, threaded
-from ultralytics.yolo.utils.errors import HUBModelError
+from ultralytics.utils import LOGGER, __version__, checks, emojis, is_colab, threaded
+from ultralytics.utils.errors import HUBModelError
AGENT_NAME = f'python-{__version__}-colab' if is_colab() else f'python-{__version__}-local'
diff --git a/ultralytics/hub/utils.py b/ultralytics/hub/utils.py
index ecd64a95e..adf0b3f51 100644
--- a/ultralytics/hub/utils.py
+++ b/ultralytics/hub/utils.py
@@ -11,9 +11,8 @@ from pathlib import Path
import requests
from tqdm import tqdm
-from ultralytics.yolo.utils import (ENVIRONMENT, LOGGER, ONLINE, RANK, SETTINGS, TESTS_RUNNING, TQDM_BAR_FORMAT,
- TryExcept, __version__, colorstr, get_git_origin_url, is_colab, is_git_dir,
- is_pip_package)
+from ultralytics.utils import (ENVIRONMENT, LOGGER, ONLINE, RANK, SETTINGS, TESTS_RUNNING, TQDM_BAR_FORMAT, TryExcept,
+ __version__, colorstr, get_git_origin_url, is_colab, is_git_dir, is_pip_package)
PREFIX = colorstr('Ultralytics HUB: ')
HELP_MSG = 'If this issue persists please visit https://github.com/ultralytics/hub/issues for assistance.'
diff --git a/ultralytics/vit/__init__.py b/ultralytics/models/__init__.py
similarity index 69%
rename from ultralytics/vit/__init__.py
rename to ultralytics/models/__init__.py
index 8e96f915d..cca622266 100644
--- a/ultralytics/vit/__init__.py
+++ b/ultralytics/models/__init__.py
@@ -1,5 +1,3 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-
from .rtdetr import RTDETR
from .sam import SAM
diff --git a/ultralytics/yolo/fastsam/__init__.py b/ultralytics/models/fastsam/__init__.py
similarity index 100%
rename from ultralytics/yolo/fastsam/__init__.py
rename to ultralytics/models/fastsam/__init__.py
diff --git a/ultralytics/yolo/fastsam/model.py b/ultralytics/models/fastsam/model.py
similarity index 90%
rename from ultralytics/yolo/fastsam/model.py
rename to ultralytics/models/fastsam/model.py
index 36c7d4270..96ebc30d8 100644
--- a/ultralytics/yolo/fastsam/model.py
+++ b/ultralytics/models/fastsam/model.py
@@ -9,13 +9,13 @@ Usage - Predict:
results = model.predict('ultralytics/assets/bus.jpg')
"""
-from ultralytics.yolo.cfg import get_cfg
-from ultralytics.yolo.engine.exporter import Exporter
-from ultralytics.yolo.engine.model import YOLO
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, ROOT, is_git_dir
-from ultralytics.yolo.utils.checks import check_imgsz
+from ultralytics.cfg import get_cfg
+from ultralytics.engine.exporter import Exporter
+from ultralytics.engine.model import YOLO
+from ultralytics.utils import DEFAULT_CFG, LOGGER, ROOT, is_git_dir
+from ultralytics.utils.checks import check_imgsz
+from ultralytics.utils.torch_utils import model_info, smart_inference_mode
-from ...yolo.utils.torch_utils import model_info, smart_inference_mode
from .predict import FastSAMPredictor
@@ -41,7 +41,7 @@ class FastSAM(YOLO):
Check the 'configuration' section in the documentation for all available options.
Returns:
- (List[ultralytics.yolo.engine.results.Results]): The prediction results.
+ (List[ultralytics.engine.results.Results]): The prediction results.
"""
if source is None:
source = ROOT / 'assets' if is_git_dir() else 'https://ultralytics.com/images/bus.jpg'
diff --git a/ultralytics/yolo/fastsam/predict.py b/ultralytics/models/fastsam/predict.py
similarity index 92%
rename from ultralytics/yolo/fastsam/predict.py
rename to ultralytics/models/fastsam/predict.py
index 0a6ac277c..7453632cd 100644
--- a/ultralytics/yolo/fastsam/predict.py
+++ b/ultralytics/models/fastsam/predict.py
@@ -2,10 +2,10 @@
import torch
-from ultralytics.yolo.engine.results import Results
-from ultralytics.yolo.fastsam.utils import bbox_iou
-from ultralytics.yolo.utils import DEFAULT_CFG, ops
-from ultralytics.yolo.v8.detect.predict import DetectionPredictor
+from ultralytics.engine.results import Results
+from ultralytics.models.fastsam.utils import bbox_iou
+from ultralytics.models.yolo.detect.predict import DetectionPredictor
+from ultralytics.utils import DEFAULT_CFG, ops
class FastSAMPredictor(DetectionPredictor):
diff --git a/ultralytics/yolo/fastsam/prompt.py b/ultralytics/models/fastsam/prompt.py
similarity index 99%
rename from ultralytics/yolo/fastsam/prompt.py
rename to ultralytics/models/fastsam/prompt.py
index d34968d8e..6cee91df0 100644
--- a/ultralytics/yolo/fastsam/prompt.py
+++ b/ultralytics/models/fastsam/prompt.py
@@ -22,7 +22,7 @@ class FastSAMPrompt:
try:
import clip # for linear_assignment
except ImportError:
- from ultralytics.yolo.utils.checks import check_requirements
+ from ultralytics.utils.checks import check_requirements
check_requirements('git+https://github.com/openai/CLIP.git') # required before installing lap from source
import clip
self.clip = clip
diff --git a/ultralytics/yolo/fastsam/utils.py b/ultralytics/models/fastsam/utils.py
similarity index 100%
rename from ultralytics/yolo/fastsam/utils.py
rename to ultralytics/models/fastsam/utils.py
diff --git a/ultralytics/yolo/fastsam/val.py b/ultralytics/models/fastsam/val.py
similarity index 97%
rename from ultralytics/yolo/fastsam/val.py
rename to ultralytics/models/fastsam/val.py
index 250bd5e41..9bbae57ba 100644
--- a/ultralytics/yolo/fastsam/val.py
+++ b/ultralytics/models/fastsam/val.py
@@ -7,11 +7,11 @@ import numpy as np
import torch
import torch.nn.functional as F
-from ultralytics.yolo.utils import LOGGER, NUM_THREADS, ops
-from ultralytics.yolo.utils.checks import check_requirements
-from ultralytics.yolo.utils.metrics import SegmentMetrics, box_iou, mask_iou
-from ultralytics.yolo.utils.plotting import output_to_target, plot_images
-from ultralytics.yolo.v8.detect import DetectionValidator
+from ultralytics.models.yolo.detect import DetectionValidator
+from ultralytics.utils import LOGGER, NUM_THREADS, ops
+from ultralytics.utils.checks import check_requirements
+from ultralytics.utils.metrics import SegmentMetrics, box_iou, mask_iou
+from ultralytics.utils.plotting import output_to_target, plot_images
class FastSAMValidator(DetectionValidator):
diff --git a/ultralytics/yolo/nas/__init__.py b/ultralytics/models/nas/__init__.py
similarity index 100%
rename from ultralytics/yolo/nas/__init__.py
rename to ultralytics/models/nas/__init__.py
diff --git a/ultralytics/yolo/nas/model.py b/ultralytics/models/nas/model.py
similarity index 92%
rename from ultralytics/yolo/nas/model.py
rename to ultralytics/models/nas/model.py
index bfe7dcdfd..1fece728a 100644
--- a/ultralytics/yolo/nas/model.py
+++ b/ultralytics/models/nas/model.py
@@ -13,12 +13,12 @@ from pathlib import Path
import torch
-from ultralytics.yolo.cfg import get_cfg
-from ultralytics.yolo.engine.exporter import Exporter
-from ultralytics.yolo.utils import DEFAULT_CFG, DEFAULT_CFG_DICT, LOGGER, ROOT, is_git_dir
-from ultralytics.yolo.utils.checks import check_imgsz
+from ultralytics.cfg import get_cfg
+from ultralytics.engine.exporter import Exporter
+from ultralytics.utils import DEFAULT_CFG, DEFAULT_CFG_DICT, LOGGER, ROOT, is_git_dir
+from ultralytics.utils.checks import check_imgsz
+from ultralytics.utils.torch_utils import model_info, smart_inference_mode
-from ...yolo.utils.torch_utils import model_info, smart_inference_mode
from .predict import NASPredictor
from .val import NASValidator
@@ -65,7 +65,7 @@ class NAS:
Check the 'configuration' section in the documentation for all available options.
Returns:
- (List[ultralytics.yolo.engine.results.Results]): The prediction results.
+ (List[ultralytics.engine.results.Results]): The prediction results.
"""
if source is None:
source = ROOT / 'assets' if is_git_dir() else 'https://ultralytics.com/images/bus.jpg'
diff --git a/ultralytics/yolo/nas/predict.py b/ultralytics/models/nas/predict.py
similarity index 86%
rename from ultralytics/yolo/nas/predict.py
rename to ultralytics/models/nas/predict.py
index e135bc1ef..f5ae4308a 100644
--- a/ultralytics/yolo/nas/predict.py
+++ b/ultralytics/models/nas/predict.py
@@ -2,10 +2,10 @@
import torch
-from ultralytics.yolo.engine.predictor import BasePredictor
-from ultralytics.yolo.engine.results import Results
-from ultralytics.yolo.utils import ops
-from ultralytics.yolo.utils.ops import xyxy2xywh
+from ultralytics.engine.predictor import BasePredictor
+from ultralytics.engine.results import Results
+from ultralytics.utils import ops
+from ultralytics.utils.ops import xyxy2xywh
class NASPredictor(BasePredictor):
diff --git a/ultralytics/yolo/nas/val.py b/ultralytics/models/nas/val.py
similarity index 84%
rename from ultralytics/yolo/nas/val.py
rename to ultralytics/models/nas/val.py
index 474cf6bd0..05986c035 100644
--- a/ultralytics/yolo/nas/val.py
+++ b/ultralytics/models/nas/val.py
@@ -2,9 +2,9 @@
import torch
-from ultralytics.yolo.utils import ops
-from ultralytics.yolo.utils.ops import xyxy2xywh
-from ultralytics.yolo.v8.detect import DetectionValidator
+from ultralytics.models.yolo.detect import DetectionValidator
+from ultralytics.utils import ops
+from ultralytics.utils.ops import xyxy2xywh
__all__ = ['NASValidator']
diff --git a/ultralytics/vit/rtdetr/__init__.py b/ultralytics/models/rtdetr/__init__.py
similarity index 100%
rename from ultralytics/vit/rtdetr/__init__.py
rename to ultralytics/models/rtdetr/__init__.py
diff --git a/ultralytics/vit/rtdetr/model.py b/ultralytics/models/rtdetr/model.py
similarity index 94%
rename from ultralytics/vit/rtdetr/model.py
rename to ultralytics/models/rtdetr/model.py
index 259c7c976..19c903eda 100644
--- a/ultralytics/vit/rtdetr/model.py
+++ b/ultralytics/models/rtdetr/model.py
@@ -7,12 +7,12 @@ from pathlib import Path
import torch.nn as nn
+from ultralytics.cfg import get_cfg
+from ultralytics.engine.exporter import Exporter
from ultralytics.nn.tasks import RTDETRDetectionModel, attempt_load_one_weight, yaml_model_load
-from ultralytics.yolo.cfg import get_cfg
-from ultralytics.yolo.engine.exporter import Exporter
-from ultralytics.yolo.utils import DEFAULT_CFG, DEFAULT_CFG_DICT, LOGGER, RANK, ROOT, is_git_dir
-from ultralytics.yolo.utils.checks import check_imgsz
-from ultralytics.yolo.utils.torch_utils import model_info, smart_inference_mode
+from ultralytics.utils import DEFAULT_CFG, DEFAULT_CFG_DICT, LOGGER, RANK, ROOT, is_git_dir
+from ultralytics.utils.checks import check_imgsz
+from ultralytics.utils.torch_utils import model_info, smart_inference_mode
from .predict import RTDETRPredictor
from .train import RTDETRTrainer
@@ -72,7 +72,7 @@ class RTDETR:
Check the 'configuration' section in the documentation for all available options.
Returns:
- (List[ultralytics.yolo.engine.results.Results]): The prediction results.
+ (List[ultralytics.engine.results.Results]): The prediction results.
"""
if source is None:
source = ROOT / 'assets' if is_git_dir() else 'https://ultralytics.com/images/bus.jpg'
diff --git a/ultralytics/vit/rtdetr/predict.py b/ultralytics/models/rtdetr/predict.py
similarity index 89%
rename from ultralytics/vit/rtdetr/predict.py
rename to ultralytics/models/rtdetr/predict.py
index 77c02c24d..356098d0f 100644
--- a/ultralytics/vit/rtdetr/predict.py
+++ b/ultralytics/models/rtdetr/predict.py
@@ -2,10 +2,10 @@
import torch
-from ultralytics.yolo.data.augment import LetterBox
-from ultralytics.yolo.engine.predictor import BasePredictor
-from ultralytics.yolo.engine.results import Results
-from ultralytics.yolo.utils import ops
+from ultralytics.data.augment import LetterBox
+from ultralytics.engine.predictor import BasePredictor
+from ultralytics.engine.results import Results
+from ultralytics.utils import ops
class RTDETRPredictor(BasePredictor):
diff --git a/ultralytics/vit/rtdetr/train.py b/ultralytics/models/rtdetr/train.py
similarity index 95%
rename from ultralytics/vit/rtdetr/train.py
rename to ultralytics/models/rtdetr/train.py
index 54eeaf4aa..dc3bc7f90 100644
--- a/ultralytics/vit/rtdetr/train.py
+++ b/ultralytics/models/rtdetr/train.py
@@ -4,9 +4,9 @@ from copy import copy
import torch
+from ultralytics.models.yolo.detect import DetectionTrainer
from ultralytics.nn.tasks import RTDETRDetectionModel
-from ultralytics.yolo.utils import DEFAULT_CFG, RANK, colorstr
-from ultralytics.yolo.v8.detect import DetectionTrainer
+from ultralytics.utils import DEFAULT_CFG, RANK, colorstr
from .val import RTDETRDataset, RTDETRValidator
diff --git a/ultralytics/vit/rtdetr/val.py b/ultralytics/models/rtdetr/val.py
similarity index 96%
rename from ultralytics/vit/rtdetr/val.py
rename to ultralytics/models/rtdetr/val.py
index cfee29253..ff6855a5c 100644
--- a/ultralytics/vit/rtdetr/val.py
+++ b/ultralytics/models/rtdetr/val.py
@@ -6,10 +6,10 @@ import cv2
import numpy as np
import torch
-from ultralytics.yolo.data import YOLODataset
-from ultralytics.yolo.data.augment import Compose, Format, v8_transforms
-from ultralytics.yolo.utils import colorstr, ops
-from ultralytics.yolo.v8.detect import DetectionValidator
+from ultralytics.data import YOLODataset
+from ultralytics.data.augment import Compose, Format, v8_transforms
+from ultralytics.models.yolo.detect import DetectionValidator
+from ultralytics.utils import colorstr, ops
__all__ = 'RTDETRValidator', # tuple or list
diff --git a/ultralytics/vit/sam/__init__.py b/ultralytics/models/sam/__init__.py
similarity index 100%
rename from ultralytics/vit/sam/__init__.py
rename to ultralytics/models/sam/__init__.py
diff --git a/ultralytics/vit/sam/amg.py b/ultralytics/models/sam/amg.py
similarity index 100%
rename from ultralytics/vit/sam/amg.py
rename to ultralytics/models/sam/amg.py
diff --git a/ultralytics/vit/sam/build.py b/ultralytics/models/sam/build.py
similarity index 98%
rename from ultralytics/vit/sam/build.py
rename to ultralytics/models/sam/build.py
index 3572c2e93..21da265c4 100644
--- a/ultralytics/vit/sam/build.py
+++ b/ultralytics/models/sam/build.py
@@ -10,7 +10,8 @@ from functools import partial
import torch
-from ...yolo.utils.downloads import attempt_download_asset
+from ultralytics.utils.downloads import attempt_download_asset
+
from .modules.decoders import MaskDecoder
from .modules.encoders import ImageEncoderViT, PromptEncoder
from .modules.sam import Sam
diff --git a/ultralytics/vit/sam/model.py b/ultralytics/models/sam/model.py
similarity index 96%
rename from ultralytics/vit/sam/model.py
rename to ultralytics/models/sam/model.py
index 925328ef7..4d7025143 100644
--- a/ultralytics/vit/sam/model.py
+++ b/ultralytics/models/sam/model.py
@@ -3,8 +3,8 @@
SAM model interface
"""
-from ultralytics.yolo.cfg import get_cfg
-from ultralytics.yolo.utils.torch_utils import model_info
+from ultralytics.cfg import get_cfg
+from ultralytics.utils.torch_utils import model_info
from .build import build_sam
from .predict import Predictor
diff --git a/ultralytics/vit/sam/modules/__init__.py b/ultralytics/models/sam/modules/__init__.py
similarity index 100%
rename from ultralytics/vit/sam/modules/__init__.py
rename to ultralytics/models/sam/modules/__init__.py
diff --git a/ultralytics/vit/sam/modules/decoders.py b/ultralytics/models/sam/modules/decoders.py
similarity index 100%
rename from ultralytics/vit/sam/modules/decoders.py
rename to ultralytics/models/sam/modules/decoders.py
diff --git a/ultralytics/vit/sam/modules/encoders.py b/ultralytics/models/sam/modules/encoders.py
similarity index 100%
rename from ultralytics/vit/sam/modules/encoders.py
rename to ultralytics/models/sam/modules/encoders.py
diff --git a/ultralytics/vit/sam/modules/sam.py b/ultralytics/models/sam/modules/sam.py
similarity index 100%
rename from ultralytics/vit/sam/modules/sam.py
rename to ultralytics/models/sam/modules/sam.py
diff --git a/ultralytics/vit/sam/modules/tiny_encoder.py b/ultralytics/models/sam/modules/tiny_encoder.py
similarity index 99%
rename from ultralytics/vit/sam/modules/tiny_encoder.py
rename to ultralytics/models/sam/modules/tiny_encoder.py
index e3f51017f..d00d1e814 100644
--- a/ultralytics/vit/sam/modules/tiny_encoder.py
+++ b/ultralytics/models/sam/modules/tiny_encoder.py
@@ -17,7 +17,7 @@ import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
-from ultralytics.yolo.utils.instance import to_2tuple
+from ultralytics.utils.instance import to_2tuple
class Conv2d_BN(torch.nn.Sequential):
@@ -50,7 +50,7 @@ class Conv2d_BN(torch.nn.Sequential):
# NOTE: This module and timm package is needed only for training.
-# from ultralytics.yolo.utils.checks import check_requirements
+# from ultralytics.utils.checks import check_requirements
# check_requirements('timm')
# from timm.models.layers import DropPath as TimmDropPath
# from timm.models.layers import trunc_normal_
diff --git a/ultralytics/vit/sam/modules/transformer.py b/ultralytics/models/sam/modules/transformer.py
similarity index 100%
rename from ultralytics/vit/sam/modules/transformer.py
rename to ultralytics/models/sam/modules/transformer.py
diff --git a/ultralytics/vit/sam/predict.py b/ultralytics/models/sam/predict.py
similarity index 98%
rename from ultralytics/vit/sam/predict.py
rename to ultralytics/models/sam/predict.py
index c6db86e3f..8f98d58b1 100644
--- a/ultralytics/vit/sam/predict.py
+++ b/ultralytics/models/sam/predict.py
@@ -5,11 +5,11 @@ import torch
import torch.nn.functional as F
import torchvision
-from ultralytics.yolo.data.augment import LetterBox
-from ultralytics.yolo.engine.predictor import BasePredictor
-from ultralytics.yolo.engine.results import Results
-from ultralytics.yolo.utils import DEFAULT_CFG, ops
-from ultralytics.yolo.utils.torch_utils import select_device
+from ultralytics.data.augment import LetterBox
+from ultralytics.engine.predictor import BasePredictor
+from ultralytics.engine.results import Results
+from ultralytics.utils import DEFAULT_CFG, ops
+from ultralytics.utils.torch_utils import select_device
from .amg import (batch_iterator, batched_mask_to_box, build_all_layer_point_grids, calculate_stability_score,
generate_crop_boxes, is_box_near_crop_edge, remove_small_regions, uncrop_boxes_xyxy, uncrop_masks)
diff --git a/ultralytics/vit/utils/__init__.py b/ultralytics/models/utils/__init__.py
similarity index 100%
rename from ultralytics/vit/utils/__init__.py
rename to ultralytics/models/utils/__init__.py
diff --git a/ultralytics/vit/utils/loss.py b/ultralytics/models/utils/loss.py
similarity index 98%
rename from ultralytics/vit/utils/loss.py
rename to ultralytics/models/utils/loss.py
index cb2de206f..db6fd6315 100644
--- a/ultralytics/vit/utils/loss.py
+++ b/ultralytics/models/utils/loss.py
@@ -4,9 +4,10 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
-from ultralytics.vit.utils.ops import HungarianMatcher
-from ultralytics.yolo.utils.loss import FocalLoss, VarifocalLoss
-from ultralytics.yolo.utils.metrics import bbox_iou
+from ultralytics.utils.loss import FocalLoss, VarifocalLoss
+from ultralytics.utils.metrics import bbox_iou
+
+from .ops import HungarianMatcher
class DETRLoss(nn.Module):
diff --git a/ultralytics/vit/utils/ops.py b/ultralytics/models/utils/ops.py
similarity index 99%
rename from ultralytics/vit/utils/ops.py
rename to ultralytics/models/utils/ops.py
index 4b3793192..e7f829b61 100644
--- a/ultralytics/vit/utils/ops.py
+++ b/ultralytics/models/utils/ops.py
@@ -5,8 +5,8 @@ import torch.nn as nn
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
-from ultralytics.yolo.utils.metrics import bbox_iou
-from ultralytics.yolo.utils.ops import xywh2xyxy, xyxy2xywh
+from ultralytics.utils.metrics import bbox_iou
+from ultralytics.utils.ops import xywh2xyxy, xyxy2xywh
class HungarianMatcher(nn.Module):
diff --git a/ultralytics/models/yolo/__init__.py b/ultralytics/models/yolo/__init__.py
new file mode 100644
index 000000000..a88c60b88
--- /dev/null
+++ b/ultralytics/models/yolo/__init__.py
@@ -0,0 +1,5 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+
+from ultralytics.models.yolo import classify, detect, pose, segment
+
+__all__ = 'classify', 'segment', 'detect', 'pose'
diff --git a/ultralytics/models/yolo/classify/__init__.py b/ultralytics/models/yolo/classify/__init__.py
new file mode 100644
index 000000000..84e7114ea
--- /dev/null
+++ b/ultralytics/models/yolo/classify/__init__.py
@@ -0,0 +1,7 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+
+from ultralytics.models.yolo.classify.predict import ClassificationPredictor, predict
+from ultralytics.models.yolo.classify.train import ClassificationTrainer, train
+from ultralytics.models.yolo.classify.val import ClassificationValidator, val
+
+__all__ = 'ClassificationPredictor', 'predict', 'ClassificationTrainer', 'train', 'ClassificationValidator', 'val'
diff --git a/ultralytics/yolo/v8/classify/predict.py b/ultralytics/models/yolo/classify/predict.py
similarity index 91%
rename from ultralytics/yolo/v8/classify/predict.py
rename to ultralytics/models/yolo/classify/predict.py
index fb486e292..a21b3fcd5 100644
--- a/ultralytics/yolo/v8/classify/predict.py
+++ b/ultralytics/models/yolo/classify/predict.py
@@ -2,9 +2,9 @@
import torch
-from ultralytics.yolo.engine.predictor import BasePredictor
-from ultralytics.yolo.engine.results import Results
-from ultralytics.yolo.utils import DEFAULT_CFG, ROOT
+from ultralytics.engine.predictor import BasePredictor
+from ultralytics.engine.results import Results
+from ultralytics.utils import DEFAULT_CFG, ROOT
class ClassificationPredictor(BasePredictor):
diff --git a/ultralytics/yolo/v8/classify/train.py b/ultralytics/models/yolo/classify/train.py
similarity index 92%
rename from ultralytics/yolo/v8/classify/train.py
rename to ultralytics/models/yolo/classify/train.py
index 72feb5591..21101b5e5 100644
--- a/ultralytics/yolo/v8/classify/train.py
+++ b/ultralytics/models/yolo/classify/train.py
@@ -3,13 +3,13 @@
import torch
import torchvision
+from ultralytics.data import ClassificationDataset, build_dataloader
+from ultralytics.engine.trainer import BaseTrainer
+from ultralytics.models import yolo
from ultralytics.nn.tasks import ClassificationModel, attempt_load_one_weight
-from ultralytics.yolo import v8
-from ultralytics.yolo.data import ClassificationDataset, build_dataloader
-from ultralytics.yolo.engine.trainer import BaseTrainer
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, colorstr
-from ultralytics.yolo.utils.plotting import plot_images, plot_results
-from ultralytics.yolo.utils.torch_utils import is_parallel, strip_optimizer, torch_distributed_zero_first
+from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK, colorstr
+from ultralytics.utils.plotting import plot_images, plot_results
+from ultralytics.utils.torch_utils import is_parallel, strip_optimizer, torch_distributed_zero_first
class ClassificationTrainer(BaseTrainer):
@@ -98,7 +98,7 @@ class ClassificationTrainer(BaseTrainer):
def get_validator(self):
"""Returns an instance of ClassificationValidator for validation."""
self.loss_names = ['loss']
- return v8.classify.ClassificationValidator(self.test_loader, self.save_dir)
+ return yolo.classify.ClassificationValidator(self.test_loader, self.save_dir)
def label_loss_items(self, loss_items=None, prefix='train'):
"""
diff --git a/ultralytics/yolo/v8/classify/val.py b/ultralytics/models/yolo/classify/val.py
similarity index 93%
rename from ultralytics/yolo/v8/classify/val.py
rename to ultralytics/models/yolo/classify/val.py
index f56dea0a2..76c470036 100644
--- a/ultralytics/yolo/v8/classify/val.py
+++ b/ultralytics/models/yolo/classify/val.py
@@ -2,11 +2,11 @@
import torch
-from ultralytics.yolo.data import ClassificationDataset, build_dataloader
-from ultralytics.yolo.engine.validator import BaseValidator
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER
-from ultralytics.yolo.utils.metrics import ClassifyMetrics, ConfusionMatrix
-from ultralytics.yolo.utils.plotting import plot_images
+from ultralytics.data import ClassificationDataset, build_dataloader
+from ultralytics.engine.validator import BaseValidator
+from ultralytics.utils import DEFAULT_CFG, LOGGER
+from ultralytics.utils.metrics import ClassifyMetrics, ConfusionMatrix
+from ultralytics.utils.plotting import plot_images
class ClassificationValidator(BaseValidator):
diff --git a/ultralytics/yolo/v8/detect/__init__.py b/ultralytics/models/yolo/detect/__init__.py
similarity index 100%
rename from ultralytics/yolo/v8/detect/__init__.py
rename to ultralytics/models/yolo/detect/__init__.py
diff --git a/ultralytics/yolo/v8/detect/predict.py b/ultralytics/models/yolo/detect/predict.py
similarity index 90%
rename from ultralytics/yolo/v8/detect/predict.py
rename to ultralytics/models/yolo/detect/predict.py
index 31e8a9f28..b7bf16bbe 100644
--- a/ultralytics/yolo/v8/detect/predict.py
+++ b/ultralytics/models/yolo/detect/predict.py
@@ -2,9 +2,9 @@
import torch
-from ultralytics.yolo.engine.predictor import BasePredictor
-from ultralytics.yolo.engine.results import Results
-from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
+from ultralytics.engine.predictor import BasePredictor
+from ultralytics.engine.results import Results
+from ultralytics.utils import DEFAULT_CFG, ROOT, ops
class DetectionPredictor(BasePredictor):
diff --git a/ultralytics/yolo/v8/detect/train.py b/ultralytics/models/yolo/detect/train.py
similarity index 72%
rename from ultralytics/yolo/v8/detect/train.py
rename to ultralytics/models/yolo/detect/train.py
index 1b475ed0d..e697a059b 100644
--- a/ultralytics/yolo/v8/detect/train.py
+++ b/ultralytics/models/yolo/detect/train.py
@@ -3,21 +3,21 @@ from copy import copy
import numpy as np
+from ultralytics.data import build_dataloader, build_yolo_dataset
+from ultralytics.engine.trainer import BaseTrainer
+from ultralytics.models import yolo
from ultralytics.nn.tasks import DetectionModel
-from ultralytics.yolo import v8
-from ultralytics.yolo.data import build_dataloader, build_yolo_dataset
-from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader
-from ultralytics.yolo.engine.trainer import BaseTrainer
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, colorstr
-from ultralytics.yolo.utils.plotting import plot_images, plot_labels, plot_results
-from ultralytics.yolo.utils.torch_utils import de_parallel, torch_distributed_zero_first
+from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
+from ultralytics.utils.plotting import plot_images, plot_labels, plot_results
+from ultralytics.utils.torch_utils import de_parallel, torch_distributed_zero_first
# BaseTrainer python usage
class DetectionTrainer(BaseTrainer):
def build_dataset(self, img_path, mode='train', batch=None):
- """Build YOLO Dataset
+ """
+ Build YOLO Dataset.
Args:
img_path (str): Path to the folder containing images.
@@ -28,27 +28,7 @@ class DetectionTrainer(BaseTrainer):
return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == 'val', stride=gs)
def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'):
- """TODO: manage splits differently."""
- # Calculate stride - check if model is initialized
- if self.args.v5loader:
- LOGGER.warning("WARNING ⚠️ 'v5loader' feature is deprecated and will be removed soon. You can train using "
- 'the default YOLOv8 dataloader instead, no argument is needed.')
- gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
- return create_dataloader(path=dataset_path,
- imgsz=self.args.imgsz,
- batch_size=batch_size,
- stride=gs,
- hyp=vars(self.args),
- augment=mode == 'train',
- cache=self.args.cache,
- pad=0 if mode == 'train' else 0.5,
- rect=self.args.rect or mode == 'val',
- rank=rank,
- workers=self.args.workers,
- close_mosaic=self.args.close_mosaic != 0,
- prefix=colorstr(f'{mode}: '),
- shuffle=mode == 'train',
- seed=self.args.seed)[0]
+ """Construct and return dataloader."""
assert mode in ['train', 'val']
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = self.build_dataset(dataset_path, mode, batch_size)
@@ -84,7 +64,7 @@ class DetectionTrainer(BaseTrainer):
def get_validator(self):
"""Returns a DetectionValidator for YOLO model validation."""
self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss'
- return v8.detect.DetectionValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
+ return yolo.detect.DetectionValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
def label_loss_items(self, loss_items=None, prefix='train'):
"""
diff --git a/ultralytics/yolo/v8/detect/val.py b/ultralytics/models/yolo/detect/val.py
similarity index 87%
rename from ultralytics/yolo/v8/detect/val.py
rename to ultralytics/models/yolo/detect/val.py
index 77d346ca4..f4109f575 100644
--- a/ultralytics/yolo/v8/detect/val.py
+++ b/ultralytics/models/yolo/detect/val.py
@@ -6,14 +6,13 @@ from pathlib import Path
import numpy as np
import torch
-from ultralytics.yolo.data import build_dataloader, build_yolo_dataset
-from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader
-from ultralytics.yolo.engine.validator import BaseValidator
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, colorstr, ops
-from ultralytics.yolo.utils.checks import check_requirements
-from ultralytics.yolo.utils.metrics import ConfusionMatrix, DetMetrics, box_iou
-from ultralytics.yolo.utils.plotting import output_to_target, plot_images
-from ultralytics.yolo.utils.torch_utils import de_parallel
+from ultralytics.data import build_dataloader, build_yolo_dataset
+from ultralytics.engine.validator import BaseValidator
+from ultralytics.utils import DEFAULT_CFG, LOGGER, ops
+from ultralytics.utils.checks import check_requirements
+from ultralytics.utils.metrics import ConfusionMatrix, DetMetrics, box_iou
+from ultralytics.utils.plotting import output_to_target, plot_images
+from ultralytics.utils.torch_utils import de_parallel
class DetectionValidator(BaseValidator):
@@ -186,28 +185,9 @@ class DetectionValidator(BaseValidator):
return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, stride=gs)
def get_dataloader(self, dataset_path, batch_size):
- """TODO: manage splits differently."""
- # Calculate stride - check if model is initialized
- if self.args.v5loader:
- LOGGER.warning("WARNING ⚠️ 'v5loader' feature is deprecated and will be removed soon. You can train using "
- 'the default YOLOv8 dataloader instead, no argument is needed.')
- gs = max(int(de_parallel(self.model).stride if self.model else 0), 32)
- return create_dataloader(path=dataset_path,
- imgsz=self.args.imgsz,
- batch_size=batch_size,
- stride=gs,
- hyp=vars(self.args),
- cache=False,
- pad=0.5,
- rect=self.args.rect,
- workers=self.args.workers,
- prefix=colorstr(f'{self.args.mode}: '),
- shuffle=False,
- seed=self.args.seed)[0]
-
+ """Construct and return dataloader."""
dataset = self.build_dataset(dataset_path, batch=batch_size, mode='val')
- dataloader = build_dataloader(dataset, batch_size, self.args.workers, shuffle=False, rank=-1)
- return dataloader
+ return build_dataloader(dataset, batch_size, self.args.workers, shuffle=False, rank=-1) # return dataloader
def plot_val_samples(self, batch, ni):
"""Plot validation image samples."""
diff --git a/ultralytics/yolo/v8/pose/__init__.py b/ultralytics/models/yolo/pose/__init__.py
similarity index 100%
rename from ultralytics/yolo/v8/pose/__init__.py
rename to ultralytics/models/yolo/pose/__init__.py
diff --git a/ultralytics/yolo/v8/pose/predict.py b/ultralytics/models/yolo/pose/predict.py
similarity index 92%
rename from ultralytics/yolo/v8/pose/predict.py
rename to ultralytics/models/yolo/pose/predict.py
index ad3246e11..fe7f383ae 100644
--- a/ultralytics/yolo/v8/pose/predict.py
+++ b/ultralytics/models/yolo/pose/predict.py
@@ -1,8 +1,8 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-from ultralytics.yolo.engine.results import Results
-from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
-from ultralytics.yolo.v8.detect.predict import DetectionPredictor
+from ultralytics.engine.results import Results
+from ultralytics.models.yolo.detect.predict import DetectionPredictor
+from ultralytics.utils import DEFAULT_CFG, ROOT, ops
class PosePredictor(DetectionPredictor):
diff --git a/ultralytics/yolo/v8/pose/train.py b/ultralytics/models/yolo/pose/train.py
similarity index 89%
rename from ultralytics/yolo/v8/pose/train.py
rename to ultralytics/models/yolo/pose/train.py
index af3043c19..df4a4af5b 100644
--- a/ultralytics/yolo/v8/pose/train.py
+++ b/ultralytics/models/yolo/pose/train.py
@@ -2,14 +2,14 @@
from copy import copy
+from ultralytics.models import yolo
from ultralytics.nn.tasks import PoseModel
-from ultralytics.yolo import v8
-from ultralytics.yolo.utils import DEFAULT_CFG
-from ultralytics.yolo.utils.plotting import plot_images, plot_results
+from ultralytics.utils import DEFAULT_CFG
+from ultralytics.utils.plotting import plot_images, plot_results
# BaseTrainer python usage
-class PoseTrainer(v8.detect.DetectionTrainer):
+class PoseTrainer(yolo.detect.DetectionTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
"""Initialize a PoseTrainer object with specified configurations and overrides."""
@@ -34,7 +34,7 @@ class PoseTrainer(v8.detect.DetectionTrainer):
def get_validator(self):
"""Returns an instance of the PoseValidator class for validation."""
self.loss_names = 'box_loss', 'pose_loss', 'kobj_loss', 'cls_loss', 'dfl_loss'
- return v8.pose.PoseValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
+ return yolo.pose.PoseValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
def plot_training_samples(self, batch, ni):
"""Plot a batch of training samples with annotated class labels, bounding boxes, and keypoints."""
diff --git a/ultralytics/yolo/v8/pose/val.py b/ultralytics/models/yolo/pose/val.py
similarity index 96%
rename from ultralytics/yolo/v8/pose/val.py
rename to ultralytics/models/yolo/pose/val.py
index f3fc1ac87..1fd5f3293 100644
--- a/ultralytics/yolo/v8/pose/val.py
+++ b/ultralytics/models/yolo/pose/val.py
@@ -5,11 +5,11 @@ from pathlib import Path
import numpy as np
import torch
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, ops
-from ultralytics.yolo.utils.checks import check_requirements
-from ultralytics.yolo.utils.metrics import OKS_SIGMA, PoseMetrics, box_iou, kpt_iou
-from ultralytics.yolo.utils.plotting import output_to_target, plot_images
-from ultralytics.yolo.v8.detect import DetectionValidator
+from ultralytics.models.yolo.detect import DetectionValidator
+from ultralytics.utils import DEFAULT_CFG, LOGGER, ops
+from ultralytics.utils.checks import check_requirements
+from ultralytics.utils.metrics import OKS_SIGMA, PoseMetrics, box_iou, kpt_iou
+from ultralytics.utils.plotting import output_to_target, plot_images
class PoseValidator(DetectionValidator):
diff --git a/ultralytics/yolo/v8/segment/__init__.py b/ultralytics/models/yolo/segment/__init__.py
similarity index 100%
rename from ultralytics/yolo/v8/segment/__init__.py
rename to ultralytics/models/yolo/segment/__init__.py
diff --git a/ultralytics/yolo/v8/segment/predict.py b/ultralytics/models/yolo/segment/predict.py
similarity index 93%
rename from ultralytics/yolo/v8/segment/predict.py
rename to ultralytics/models/yolo/segment/predict.py
index 0b6ebc494..cef9d7afa 100644
--- a/ultralytics/yolo/v8/segment/predict.py
+++ b/ultralytics/models/yolo/segment/predict.py
@@ -2,9 +2,9 @@
import torch
-from ultralytics.yolo.engine.results import Results
-from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
-from ultralytics.yolo.v8.detect.predict import DetectionPredictor
+from ultralytics.engine.results import Results
+from ultralytics.models.yolo.detect.predict import DetectionPredictor
+from ultralytics.utils import DEFAULT_CFG, ROOT, ops
class SegmentationPredictor(DetectionPredictor):
diff --git a/ultralytics/yolo/v8/segment/train.py b/ultralytics/models/yolo/segment/train.py
similarity index 87%
rename from ultralytics/yolo/v8/segment/train.py
rename to ultralytics/models/yolo/segment/train.py
index ab66cf061..e239c2636 100644
--- a/ultralytics/yolo/v8/segment/train.py
+++ b/ultralytics/models/yolo/segment/train.py
@@ -1,14 +1,14 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
from copy import copy
+from ultralytics.models import yolo
from ultralytics.nn.tasks import SegmentationModel
-from ultralytics.yolo import v8
-from ultralytics.yolo.utils import DEFAULT_CFG, RANK
-from ultralytics.yolo.utils.plotting import plot_images, plot_results
+from ultralytics.utils import DEFAULT_CFG, RANK
+from ultralytics.utils.plotting import plot_images, plot_results
# BaseTrainer python usage
-class SegmentationTrainer(v8.detect.DetectionTrainer):
+class SegmentationTrainer(yolo.detect.DetectionTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
"""Initialize a SegmentationTrainer object with given arguments."""
@@ -28,7 +28,7 @@ class SegmentationTrainer(v8.detect.DetectionTrainer):
def get_validator(self):
"""Return an instance of SegmentationValidator for validation of YOLO model."""
self.loss_names = 'box_loss', 'seg_loss', 'cls_loss', 'dfl_loss'
- return v8.segment.SegmentationValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
+ return yolo.segment.SegmentationValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
def plot_training_samples(self, batch, ni):
"""Creates a plot of training sample images with labels and box coordinates."""
diff --git a/ultralytics/yolo/v8/segment/val.py b/ultralytics/models/yolo/segment/val.py
similarity index 97%
rename from ultralytics/yolo/v8/segment/val.py
rename to ultralytics/models/yolo/segment/val.py
index 73c2fe834..5735d3fa6 100644
--- a/ultralytics/yolo/v8/segment/val.py
+++ b/ultralytics/models/yolo/segment/val.py
@@ -7,11 +7,11 @@ import numpy as np
import torch
import torch.nn.functional as F
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, NUM_THREADS, ops
-from ultralytics.yolo.utils.checks import check_requirements
-from ultralytics.yolo.utils.metrics import SegmentMetrics, box_iou, mask_iou
-from ultralytics.yolo.utils.plotting import output_to_target, plot_images
-from ultralytics.yolo.v8.detect import DetectionValidator
+from ultralytics.models.yolo.detect import DetectionValidator
+from ultralytics.utils import DEFAULT_CFG, LOGGER, NUM_THREADS, ops
+from ultralytics.utils.checks import check_requirements
+from ultralytics.utils.metrics import SegmentMetrics, box_iou, mask_iou
+from ultralytics.utils.plotting import output_to_target, plot_images
class SegmentationValidator(DetectionValidator):
diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py
index 78776bd24..6a8e387fa 100644
--- a/ultralytics/nn/autobackend.py
+++ b/ultralytics/nn/autobackend.py
@@ -16,10 +16,10 @@ import torch
import torch.nn as nn
from PIL import Image
-from ultralytics.yolo.utils import ARM64, LINUX, LOGGER, ROOT, yaml_load
-from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_version, check_yaml
-from ultralytics.yolo.utils.downloads import attempt_download_asset, is_url
-from ultralytics.yolo.utils.ops import xywh2xyxy
+from ultralytics.utils import ARM64, LINUX, LOGGER, ROOT, yaml_load
+from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml
+from ultralytics.utils.downloads import attempt_download_asset, is_url
+from ultralytics.utils.ops import xywh2xyxy
def check_class_names(names):
@@ -34,7 +34,7 @@ def check_class_names(names):
raise KeyError(f'{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices '
f'{min(names.keys())}-{max(names.keys())} defined in your dataset YAML.')
if isinstance(names[0], str) and names[0].startswith('n0'): # imagenet class codes, i.e. 'n01440764'
- map = yaml_load(ROOT / 'datasets/ImageNet.yaml')['map'] # human-readable names
+ map = yaml_load(ROOT / 'cfg/datasets/ImageNet.yaml')['map'] # human-readable names
names = {k: map[v] for k, v in names.items()}
return names
@@ -210,7 +210,7 @@ class AutoBackend(nn.Module):
LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
import tensorflow as tf
- from ultralytics.yolo.engine.exporter import gd_outputs
+ from ultralytics.engine.exporter import gd_outputs
def wrap_frozen_graph(gd, inputs, outputs):
"""Wrap frozen graphs for deployment."""
@@ -284,7 +284,7 @@ class AutoBackend(nn.Module):
"""
raise NotImplementedError('Triton Inference Server is not currently supported.')
else:
- from ultralytics.yolo.engine.exporter import export_formats
+ from ultralytics.engine.exporter import export_formats
raise TypeError(f"model='{w}' is not a supported model format. "
'See https://docs.ultralytics.com/modes/predict for help.'
f'\n\n{export_formats()}')
@@ -476,7 +476,7 @@ class AutoBackend(nn.Module):
"""
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
# types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
- from ultralytics.yolo.engine.exporter import export_formats
+ from ultralytics.engine.exporter import export_formats
sf = list(export_formats().Suffix) # export suffixes
if not is_url(p, check=False) and not isinstance(p, str):
check_suffix(p, sf) # checks
diff --git a/ultralytics/nn/autoshape.py b/ultralytics/nn/autoshape.py
deleted file mode 100644
index d557f7806..000000000
--- a/ultralytics/nn/autoshape.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-"""
-Common modules
-"""
-
-from copy import copy
-from pathlib import Path
-
-import cv2
-import numpy as np
-import requests
-import torch
-import torch.nn as nn
-from PIL import Image, ImageOps
-from torch.cuda import amp
-
-from ultralytics.nn.autobackend import AutoBackend
-from ultralytics.yolo.data.augment import LetterBox
-from ultralytics.yolo.utils import LOGGER, colorstr
-from ultralytics.yolo.utils.files import increment_path
-from ultralytics.yolo.utils.ops import Profile, make_divisible, non_max_suppression, scale_boxes, xyxy2xywh
-from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
-from ultralytics.yolo.utils.torch_utils import copy_attr, smart_inference_mode
-
-
-class AutoShape(nn.Module):
- """YOLOv8 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS."""
- conf = 0.25 # NMS confidence threshold
- iou = 0.45 # NMS IoU threshold
- agnostic = False # NMS class-agnostic
- multi_label = False # NMS multiple labels per box
- classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
- max_det = 1000 # maximum number of detections per image
- amp = False # Automatic Mixed Precision (AMP) inference
-
- def __init__(self, model, verbose=True):
- """Initializes object and copies attributes from model object."""
- super().__init__()
- if verbose:
- LOGGER.info('Adding AutoShape... ')
- copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes
- self.dmb = isinstance(model, AutoBackend) # DetectMultiBackend() instance
- self.pt = not self.dmb or model.pt # PyTorch model
- self.model = model.eval()
- if self.pt:
- m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
- m.inplace = False # Detect.inplace=False for safe multithread inference
- m.export = True # do not output loss values
-
- def _apply(self, fn):
- """Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers."""
- self = super()._apply(fn)
- if self.pt:
- m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
- m.stride = fn(m.stride)
- m.grid = list(map(fn, m.grid))
- if isinstance(m.anchor_grid, list):
- m.anchor_grid = list(map(fn, m.anchor_grid))
- return self
-
- @smart_inference_mode()
- def forward(self, ims, size=640, augment=False, profile=False):
- """Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:."""
- # file: ims = 'data/images/zidane.jpg' # str or PosixPath
- # URI: = 'https://ultralytics.com/images/zidane.jpg'
- # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
- # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
- # numpy: = np.zeros((640,1280,3)) # HWC
- # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
- # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
-
- dt = (Profile(), Profile(), Profile())
- with dt[0]:
- if isinstance(size, int): # expand
- size = (size, size)
- p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
- autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
- if isinstance(ims, torch.Tensor): # torch
- with amp.autocast(autocast):
- return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
-
- # Preprocess
- n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
- shape0, shape1, files = [], [], [] # image and inference shapes, filenames
- for i, im in enumerate(ims):
- f = f'image{i}' # filename
- if isinstance(im, (str, Path)): # filename or uri
- im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
- im = np.asarray(ImageOps.exif_transpose(im))
- elif isinstance(im, Image.Image): # PIL Image
- im, f = np.asarray(ImageOps.exif_transpose(im)), getattr(im, 'filename', f) or f
- files.append(Path(f).with_suffix('.jpg').name)
- if im.shape[0] < 5: # image in CHW
- im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
- im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
- s = im.shape[:2] # HWC
- shape0.append(s) # image shape
- g = max(size) / max(s) # gain
- shape1.append([y * g for y in s])
- ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
- shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape
- x = [LetterBox(shape1, auto=False)(image=im)['img'] for im in ims] # pad
- x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
- x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
-
- with amp.autocast(autocast):
- # Inference
- with dt[1]:
- y = self.model(x, augment=augment) # forward
-
- # Postprocess
- with dt[2]:
- y = non_max_suppression(y if self.dmb else y[0],
- self.conf,
- self.iou,
- self.classes,
- self.agnostic,
- self.multi_label,
- max_det=self.max_det) # NMS
- for i in range(n):
- scale_boxes(shape1, y[i][:, :4], shape0[i])
-
- return Detections(ims, y, files, dt, self.names, x.shape)
-
-
-class Detections:
- """ YOLOv8 detections class for inference results"""
-
- def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
- """Initialize object attributes for YOLO detection results."""
- super().__init__()
- d = pred[0].device # device
- gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
- self.ims = ims # list of images as numpy arrays
- self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
- self.names = names # class names
- self.files = files # image filenames
- self.times = times # profiling times
- self.xyxy = pred # xyxy pixels
- self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
- self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
- self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
- self.n = len(self.pred) # number of images (batch size)
- self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms)
- self.s = tuple(shape) # inference BCHW shape
-
- def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
- """Return performance metrics and optionally cropped/save images or results."""
- s, crops = '', []
- for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
- s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
- if pred.shape[0]:
- for c in pred[:, -1].unique():
- n = (pred[:, -1] == c).sum() # detections per class
- s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
- s = s.rstrip(', ')
- if show or save or render or crop:
- annotator = Annotator(im, example=str(self.names))
- for *box, conf, cls in reversed(pred): # xyxy, confidence, class
- label = f'{self.names[int(cls)]} {conf:.2f}'
- if crop:
- file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
- crops.append({
- 'box': box,
- 'conf': conf,
- 'cls': cls,
- 'label': label,
- 'im': save_one_box(box, im, file=file, save=save)})
- else: # all others
- annotator.box_label(box, label if labels else '', color=colors(cls))
- im = annotator.im
- else:
- s += '(no detections)'
-
- im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
- if show:
- im.show(self.files[i]) # show
- if save:
- f = self.files[i]
- im.save(save_dir / f) # save
- if i == self.n - 1:
- LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
- if render:
- self.ims[i] = np.asarray(im)
- if pprint:
- s = s.lstrip('\n')
- return f'{s}\nSpeed: %.1fms preprocess, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t
- if crop:
- if save:
- LOGGER.info(f'Saved results to {save_dir}\n')
- return crops
-
- def show(self, labels=True):
- """Displays YOLO results with detected bounding boxes."""
- self._run(show=True, labels=labels) # show results
-
- def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
- """Save detection results with optional labels to specified directory."""
- save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
- self._run(save=True, labels=labels, save_dir=save_dir) # save results
-
- def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
- """Crops images into detections and saves them if 'save' is True."""
- save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
- return self._run(crop=True, save=save, save_dir=save_dir) # crop results
-
- def render(self, labels=True):
- """Renders detected objects and returns images."""
- self._run(render=True, labels=labels) # render results
- return self.ims
-
- def pandas(self):
- """Return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])."""
- import pandas
- new = copy(self) # return copy
- ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
- cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
- for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
- a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
- setattr(new, k, [pandas.DataFrame(x, columns=c) for x in a])
- return new
-
- def tolist(self):
- """Return a list of Detections objects, i.e. 'for result in results.tolist():'."""
- r = range(self.n) # iterable
- x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
- # for d in x:
- # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
- # setattr(d, k, getattr(d, k)[0]) # pop out of list
- return x
-
- def print(self):
- """Print the results of the `self._run()` function."""
- LOGGER.info(self.__str__())
-
- def __len__(self): # override len(results)
- return self.n
-
- def __str__(self): # override print(results)
- return self._run(pprint=True) # print results
-
- def __repr__(self):
- """Returns a printable representation of the object."""
- return f'YOLOv8 {self.__class__} instance\n' + self.__str__()
diff --git a/ultralytics/nn/modules/head.py b/ultralytics/nn/modules/head.py
index 3b70d7025..e31ae8b16 100644
--- a/ultralytics/nn/modules/head.py
+++ b/ultralytics/nn/modules/head.py
@@ -9,7 +9,7 @@ import torch
import torch.nn as nn
from torch.nn.init import constant_, xavier_uniform_
-from ultralytics.yolo.utils.tal import dist2bbox, make_anchors
+from ultralytics.utils.tal import dist2bbox, make_anchors
from .block import DFL, Proto
from .conv import Conv
@@ -219,7 +219,7 @@ class RTDETRDecoder(nn.Module):
self._reset_parameters()
def forward(self, x, batch=None):
- from ultralytics.vit.utils.ops import get_cdn_group
+ from ultralytics.models.utils.ops import get_cdn_group
# input projection and embedding
feats, shapes = self._get_encoder_input(x)
diff --git a/ultralytics/nn/tasks.py b/ultralytics/nn/tasks.py
index cf1e9fe27..22425fd01 100644
--- a/ultralytics/nn/tasks.py
+++ b/ultralytics/nn/tasks.py
@@ -11,11 +11,11 @@ from ultralytics.nn.modules import (AIFI, ASFF2, ASFF3, C1, C2, C3, C3TR, SPP, S
C3Ghost, C3x, Classify, Concat, Conv, Conv2, ConvTranspose, Detect, DWConv,
DWConvTranspose2d, Focus, GhostBottleneck, GhostConv, HGBlock, HGStem, Pose, RepC3,
RepConv, RTDETRDecoder, Segment)
-from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, colorstr, emojis, yaml_load
-from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_yaml
-from ultralytics.yolo.utils.loss import v8ClassificationLoss, v8DetectionLoss, v8PoseLoss, v8SegmentationLoss
-from ultralytics.yolo.utils.plotting import feature_visualization
-from ultralytics.yolo.utils.torch_utils import (fuse_conv_and_bn, fuse_deconv_and_bn, initialize_weights,
+from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, colorstr, emojis, yaml_load
+from ultralytics.utils.checks import check_requirements, check_suffix, check_yaml
+from ultralytics.utils.loss import v8ClassificationLoss, v8DetectionLoss, v8PoseLoss, v8SegmentationLoss
+from ultralytics.utils.plotting import feature_visualization
+from ultralytics.utils.torch_utils import (fuse_conv_and_bn, fuse_deconv_and_bn, initialize_weights,
intersect_dicts, make_divisible, model_info, scale_img, time_sync)
try:
@@ -412,7 +412,7 @@ class RTDETRDetectionModel(DetectionModel):
def init_criterion(self):
"""Compute the classification loss between predictions and true labels."""
- from ultralytics.vit.utils.loss import RTDETRDetectionLoss
+ from ultralytics.models.utils.loss import RTDETRDetectionLoss
return RTDETRDetectionLoss(nc=self.nc, use_vfl=True)
@@ -498,6 +498,45 @@ class Ensemble(nn.ModuleList):
# Functions ------------------------------------------------------------------------------------------------------------
+@contextlib.contextmanager
+def temporary_modules(modules=None):
+ """
+ Context manager for temporarily adding or modifying modules in Python's module cache (`sys.modules`).
+
+ This function can be used to change the module paths during runtime. It's useful when refactoring code,
+ where you've moved a module from one location to another, but you still want to support the old import
+ paths for backwards compatibility.
+
+ Args:
+ modules (dict, optional): A dictionary mapping old module paths to new module paths.
+
+ Example:
+ with temporary_modules({'old.module.path': 'new.module.path'}):
+ import old.module.path # this will now import new.module.path
+
+ Note:
+ The changes are only in effect inside the context manager and are undone once the context manager exits.
+ Be aware that directly manipulating `sys.modules` can lead to unpredictable results, especially in larger
+ applications or libraries. Use this function with caution.
+ """
+ if not modules:
+ modules = {}
+
+ import importlib
+ import sys
+ try:
+ # Set modules in sys.modules under their old name
+ for old, new in modules.items():
+ sys.modules[old] = importlib.import_module(new)
+
+ yield
+ finally:
+ # Remove the temporary module paths
+ for old in modules:
+ if old in sys.modules:
+ del sys.modules[old]
+
+
def torch_safe_load(weight):
"""
This function attempts to load a PyTorch model with the torch.load() function. If a ModuleNotFoundError is raised,
@@ -510,12 +549,17 @@ def torch_safe_load(weight):
Returns:
(dict): The loaded PyTorch model.
"""
- from ultralytics.yolo.utils.downloads import attempt_download_asset
+ from ultralytics.utils.downloads import attempt_download_asset
check_suffix(file=weight, suffix='.pt')
file = attempt_download_asset(weight) # search online if missing locally
try:
- return torch.load(file, map_location='cpu'), file # load
+ with temporary_modules({
+ 'ultralytics.yolo.utils': 'ultralytics.utils',
+ 'ultralytics.yolo.v8': 'ultralytics.models.yolo',
+ 'ultralytics.yolo.data': 'ultralytics.data'}): # for legacy 8.0 Classify and Pose models
+ return torch.load(file, map_location='cpu'), file # load
+
except ModuleNotFoundError as e: # e.name is missing module name
if e.name == 'models':
raise TypeError(
diff --git a/ultralytics/tracker/trackers/__init__.py b/ultralytics/tracker/trackers/__init__.py
deleted file mode 100644
index a0fd890e9..000000000
--- a/ultralytics/tracker/trackers/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-
-from .bot_sort import BOTSORT
-from .byte_tracker import BYTETracker
-
-__all__ = 'BOTSORT', 'BYTETracker' # allow simpler import
diff --git a/ultralytics/tracker/README.md b/ultralytics/trackers/README.md
similarity index 95%
rename from ultralytics/tracker/README.md
rename to ultralytics/trackers/README.md
index 26ec0c324..843bd2245 100644
--- a/ultralytics/tracker/README.md
+++ b/ultralytics/trackers/README.md
@@ -69,7 +69,7 @@ while True:
## Change tracker parameters
-You can change the tracker parameters by eding the `tracker.yaml` file which is located in the ultralytics/tracker/cfg folder.
+You can change the tracker parameters by eding the `tracker.yaml` file which is located in the ultralytics/cfg/trackers folder.
## Command Line Interface (CLI)
@@ -81,6 +81,6 @@ yolo segment track source=... tracker=...
yolo pose track source=... tracker=...
```
-By default, trackers will use the configuration in `ultralytics/tracker/cfg`.
+By default, trackers will use the configuration in `ultralytics/cfg/trackers`.
We also support using a modified tracker config file. Please refer to the tracker config files
-in `ultralytics/tracker/cfg`.
+in `ultralytics/cfg/trackers`.
diff --git a/ultralytics/tracker/__init__.py b/ultralytics/trackers/__init__.py
similarity index 70%
rename from ultralytics/tracker/__init__.py
rename to ultralytics/trackers/__init__.py
index 13d3903e7..46e178e42 100644
--- a/ultralytics/tracker/__init__.py
+++ b/ultralytics/trackers/__init__.py
@@ -1,6 +1,7 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
+from .bot_sort import BOTSORT
+from .byte_tracker import BYTETracker
from .track import register_tracker
-from .trackers import BOTSORT, BYTETracker
__all__ = 'register_tracker', 'BOTSORT', 'BYTETracker' # allow simpler import
diff --git a/ultralytics/tracker/trackers/basetrack.py b/ultralytics/trackers/basetrack.py
similarity index 100%
rename from ultralytics/tracker/trackers/basetrack.py
rename to ultralytics/trackers/basetrack.py
diff --git a/ultralytics/tracker/trackers/bot_sort.py b/ultralytics/trackers/bot_sort.py
similarity index 98%
rename from ultralytics/tracker/trackers/bot_sort.py
rename to ultralytics/trackers/bot_sort.py
index 10e88682d..168b9424e 100644
--- a/ultralytics/tracker/trackers/bot_sort.py
+++ b/ultralytics/trackers/bot_sort.py
@@ -4,11 +4,11 @@ from collections import deque
import numpy as np
-from ..utils import matching
-from ..utils.gmc import GMC
-from ..utils.kalman_filter import KalmanFilterXYWH
from .basetrack import TrackState
from .byte_tracker import BYTETracker, STrack
+from .utils import matching
+from .utils.gmc import GMC
+from .utils.kalman_filter import KalmanFilterXYWH
class BOTrack(STrack):
diff --git a/ultralytics/tracker/trackers/byte_tracker.py b/ultralytics/trackers/byte_tracker.py
similarity index 99%
rename from ultralytics/tracker/trackers/byte_tracker.py
rename to ultralytics/trackers/byte_tracker.py
index 6034cdc9d..04958cda6 100644
--- a/ultralytics/tracker/trackers/byte_tracker.py
+++ b/ultralytics/trackers/byte_tracker.py
@@ -2,9 +2,9 @@
import numpy as np
-from ..utils import matching
-from ..utils.kalman_filter import KalmanFilterXYAH
from .basetrack import BaseTrack, TrackState
+from .utils import matching
+from .utils.kalman_filter import KalmanFilterXYAH
class STrack(BaseTrack):
diff --git a/ultralytics/tracker/track.py b/ultralytics/trackers/track.py
similarity index 92%
rename from ultralytics/tracker/track.py
rename to ultralytics/trackers/track.py
index d08abfc7a..cfb4b08ad 100644
--- a/ultralytics/tracker/track.py
+++ b/ultralytics/trackers/track.py
@@ -4,10 +4,11 @@ from functools import partial
import torch
-from ultralytics.yolo.utils import IterableSimpleNamespace, yaml_load
-from ultralytics.yolo.utils.checks import check_yaml
+from ultralytics.utils import IterableSimpleNamespace, yaml_load
+from ultralytics.utils.checks import check_yaml
-from .trackers import BOTSORT, BYTETracker
+from .bot_sort import BOTSORT
+from .byte_tracker import BYTETracker
TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
diff --git a/ultralytics/trackers/utils/__init__.py b/ultralytics/trackers/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/ultralytics/tracker/utils/gmc.py b/ultralytics/trackers/utils/gmc.py
similarity index 99%
rename from ultralytics/tracker/utils/gmc.py
rename to ultralytics/trackers/utils/gmc.py
index a5c910d3b..0529be4b4 100644
--- a/ultralytics/tracker/utils/gmc.py
+++ b/ultralytics/trackers/utils/gmc.py
@@ -5,7 +5,7 @@ import copy
import cv2
import numpy as np
-from ultralytics.yolo.utils import LOGGER
+from ultralytics.utils import LOGGER
class GMC:
diff --git a/ultralytics/tracker/utils/kalman_filter.py b/ultralytics/trackers/utils/kalman_filter.py
similarity index 100%
rename from ultralytics/tracker/utils/kalman_filter.py
rename to ultralytics/trackers/utils/kalman_filter.py
diff --git a/ultralytics/tracker/utils/matching.py b/ultralytics/trackers/utils/matching.py
similarity index 99%
rename from ultralytics/tracker/utils/matching.py
rename to ultralytics/trackers/utils/matching.py
index 0b22b3de8..02a322d88 100644
--- a/ultralytics/tracker/utils/matching.py
+++ b/ultralytics/trackers/utils/matching.py
@@ -11,7 +11,7 @@ try:
assert lap.__version__ # verify package is not directory
except (ImportError, AssertionError, AttributeError):
- from ultralytics.yolo.utils.checks import check_requirements
+ from ultralytics.utils.checks import check_requirements
check_requirements('lapx>=0.5.2') # update to lap package from https://github.com/rathaROG/lapx
import lap
diff --git a/ultralytics/utils/__init__.py b/ultralytics/utils/__init__.py
new file mode 100644
index 000000000..44559a69d
--- /dev/null
+++ b/ultralytics/utils/__init__.py
@@ -0,0 +1,809 @@
+# Ultralytics YOLO 🚀, AGPL-3.0 license
+
+import contextlib
+import inspect
+import logging.config
+import os
+import platform
+import re
+import subprocess
+import sys
+import threading
+import urllib
+import uuid
+from pathlib import Path
+from types import SimpleNamespace
+from typing import Union
+
+import cv2
+import matplotlib.pyplot as plt
+import numpy as np
+import torch
+import yaml
+
+from ultralytics import __version__
+
+# PyTorch Multi-GPU DDP Constants
+RANK = int(os.getenv('RANK', -1))
+LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
+WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
+
+# Other Constants
+FILE = Path(__file__).resolve()
+ROOT = FILE.parents[1] # YOLO
+DEFAULT_CFG_PATH = ROOT / 'cfg/default.yaml'
+NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads
+AUTOINSTALL = str(os.getenv('YOLO_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode
+VERBOSE = str(os.getenv('YOLO_VERBOSE', True)).lower() == 'true' # global verbose mode
+TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format
+LOGGING_NAME = 'ultralytics'
+MACOS, LINUX, WINDOWS = (platform.system() == x for x in ['Darwin', 'Linux', 'Windows']) # environment booleans
+ARM64 = platform.machine() in ('arm64', 'aarch64') # ARM64 booleans
+HELP_MSG = \
+ """
+ Usage examples for running YOLOv8:
+
+ 1. Install the ultralytics package:
+
+ pip install ultralytics
+
+ 2. Use the Python SDK:
+
+ from ultralytics import YOLO
+
+ # Load a model
+ model = YOLO('yolov8n.yaml') # build a new model from scratch
+ model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
+
+ # Use the model
+ results = model.train(data="coco128.yaml", epochs=3) # train the model
+ results = model.val() # evaluate model performance on the validation set
+ results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
+ success = model.export(format='onnx') # export the model to ONNX format
+
+ 3. Use the command line interface (CLI):
+
+ YOLOv8 'yolo' CLI commands use the following syntax:
+
+ yolo TASK MODE ARGS
+
+ Where TASK (optional) is one of [detect, segment, classify]
+ MODE (required) is one of [train, val, predict, export]
+ ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults.
+ See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
+
+ - Train a detection model for 10 epochs with an initial learning_rate of 0.01
+ yolo detect train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01
+
+ - Predict a YouTube video using a pretrained segmentation model at image size 320:
+ yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320
+
+ - Val a pretrained detection model at batch-size 1 and image size 640:
+ yolo detect val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640
+
+ - Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required)
+ yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128
+
+ - Run special commands:
+ yolo help
+ yolo checks
+ yolo version
+ yolo settings
+ yolo copy-cfg
+ yolo cfg
+
+ Docs: https://docs.ultralytics.com
+ Community: https://community.ultralytics.com
+ GitHub: https://github.com/ultralytics/ultralytics
+ """
+
+# Settings
+torch.set_printoptions(linewidth=320, precision=4, profile='default')
+np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
+cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
+os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
+os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' # for deterministic training
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab
+
+
+class SimpleClass:
+ """
+ Ultralytics SimpleClass is a base class providing helpful string representation, error reporting, and attribute
+ access methods for easier debugging and usage.
+ """
+
+ def __str__(self):
+ """Return a human-readable string representation of the object."""
+ attr = []
+ for a in dir(self):
+ v = getattr(self, a)
+ if not callable(v) and not a.startswith('_'):
+ if isinstance(v, SimpleClass):
+ # Display only the module and class name for subclasses
+ s = f'{a}: {v.__module__}.{v.__class__.__name__} object'
+ else:
+ s = f'{a}: {repr(v)}'
+ attr.append(s)
+ return f'{self.__module__}.{self.__class__.__name__} object with attributes:\n\n' + '\n'.join(attr)
+
+ def __repr__(self):
+ """Return a machine-readable string representation of the object."""
+ return self.__str__()
+
+ def __getattr__(self, attr):
+ """Custom attribute access error message with helpful information."""
+ name = self.__class__.__name__
+ raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
+
+
+class IterableSimpleNamespace(SimpleNamespace):
+ """
+ Ultralytics IterableSimpleNamespace is an extension class of SimpleNamespace that adds iterable functionality and
+ enables usage with dict() and for loops.
+ """
+
+ def __iter__(self):
+ """Return an iterator of key-value pairs from the namespace's attributes."""
+ return iter(vars(self).items())
+
+ def __str__(self):
+ """Return a human-readable string representation of the object."""
+ return '\n'.join(f'{k}={v}' for k, v in vars(self).items())
+
+ def __getattr__(self, attr):
+ """Custom attribute access error message with helpful information."""
+ name = self.__class__.__name__
+ raise AttributeError(f"""
+ '{name}' object has no attribute '{attr}'. This may be caused by a modified or out of date ultralytics
+ 'default.yaml' file.\nPlease update your code with 'pip install -U ultralytics' and if necessary replace
+ {DEFAULT_CFG_PATH} with the latest version from
+ https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/default.yaml
+ """)
+
+ def get(self, key, default=None):
+ """Return the value of the specified key if it exists; otherwise, return the default value."""
+ return getattr(self, key, default)
+
+
+def plt_settings(rcparams=None, backend='Agg'):
+ """
+ Decorator to temporarily set rc parameters and the backend for a plotting function.
+
+ Usage:
+ decorator: @plt_settings({"font.size": 12})
+ context manager: with plt_settings({"font.size": 12}):
+
+ Args:
+ rcparams (dict): Dictionary of rc parameters to set.
+ backend (str, optional): Name of the backend to use. Defaults to 'Agg'.
+
+ Returns:
+ (Callable): Decorated function with temporarily set rc parameters and backend. This decorator can be
+ applied to any function that needs to have specific matplotlib rc parameters and backend for its execution.
+ """
+
+ if rcparams is None:
+ rcparams = {'font.size': 11}
+
+ def decorator(func):
+ """Decorator to apply temporary rc parameters and backend to a function."""
+
+ def wrapper(*args, **kwargs):
+ """Sets rc parameters and backend, calls the original function, and restores the settings."""
+ original_backend = plt.get_backend()
+ plt.switch_backend(backend)
+
+ with plt.rc_context(rcparams):
+ result = func(*args, **kwargs)
+
+ plt.switch_backend(original_backend)
+ return result
+
+ return wrapper
+
+ return decorator
+
+
+def set_logging(name=LOGGING_NAME, verbose=True):
+ """Sets up logging for the given name."""
+ rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
+ level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR
+ logging.config.dictConfig({
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ name: {
+ 'format': '%(message)s'}},
+ 'handlers': {
+ name: {
+ 'class': 'logging.StreamHandler',
+ 'formatter': name,
+ 'level': level}},
+ 'loggers': {
+ name: {
+ 'level': level,
+ 'handlers': [name],
+ 'propagate': False}}})
+
+
+def emojis(string=''):
+ """Return platform-dependent emoji-safe version of string."""
+ return string.encode().decode('ascii', 'ignore') if WINDOWS else string
+
+
+class EmojiFilter(logging.Filter):
+ """
+ A custom logging filter class for removing emojis in log messages.
+
+ This filter is particularly useful for ensuring compatibility with Windows terminals
+ that may not support the display of emojis in log messages.
+ """
+
+ def filter(self, record):
+ """Filter logs by emoji unicode characters on windows."""
+ record.msg = emojis(record.msg)
+ return super().filter(record)
+
+
+# Set logger
+set_logging(LOGGING_NAME, verbose=VERBOSE) # run before defining LOGGER
+LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)
+if WINDOWS: # emoji-safe logging
+ LOGGER.addFilter(EmojiFilter())
+
+
+class ThreadingLocked:
+ """
+ A decorator class for ensuring thread-safe execution of a function or method.
+ This class can be used as a decorator to make sure that if the decorated function
+ is called from multiple threads, only one thread at a time will be able to execute the function.
+
+ Attributes:
+ lock (threading.Lock): A lock object used to manage access to the decorated function.
+
+ Usage:
+ @ThreadingLocked()
+ def my_function():
+ # Your code here
+ pass
+ """
+
+ def __init__(self):
+ self.lock = threading.Lock()
+
+ def __call__(self, f):
+ from functools import wraps
+
+ @wraps(f)
+ def decorated(*args, **kwargs):
+ with self.lock:
+ return f(*args, **kwargs)
+
+ return decorated
+
+
+def yaml_save(file='data.yaml', data=None):
+ """
+ Save YAML data to a file.
+
+ Args:
+ file (str, optional): File name. Default is 'data.yaml'.
+ data (dict): Data to save in YAML format.
+
+ Returns:
+ (None): Data is saved to the specified file.
+ """
+ if data is None:
+ data = {}
+ file = Path(file)
+ if not file.parent.exists():
+ # Create parent directories if they don't exist
+ file.parent.mkdir(parents=True, exist_ok=True)
+
+ # Convert Path objects to strings
+ for k, v in data.items():
+ if isinstance(v, Path):
+ data[k] = str(v)
+
+ # Dump data to file in YAML format
+ with open(file, 'w') as f:
+ yaml.safe_dump(data, f, sort_keys=False, allow_unicode=True)
+
+
+def yaml_load(file='data.yaml', append_filename=False):
+ """
+ Load YAML data from a file.
+
+ Args:
+ file (str, optional): File name. Default is 'data.yaml'.
+ append_filename (bool): Add the YAML filename to the YAML dictionary. Default is False.
+
+ Returns:
+ (dict): YAML data and file name.
+ """
+ with open(file, errors='ignore', encoding='utf-8') as f:
+ s = f.read() # string
+
+ # Remove special characters
+ if not s.isprintable():
+ s = re.sub(r'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]+', '', s)
+
+ # Add YAML filename to dict and return
+ return {**yaml.safe_load(s), 'yaml_file': str(file)} if append_filename else yaml.safe_load(s)
+
+
+def yaml_print(yaml_file: Union[str, Path, dict]) -> None:
+ """
+ Pretty prints a yaml file or a yaml-formatted dictionary.
+
+ Args:
+ yaml_file: The file path of the yaml file or a yaml-formatted dictionary.
+
+ Returns:
+ None
+ """
+ yaml_dict = yaml_load(yaml_file) if isinstance(yaml_file, (str, Path)) else yaml_file
+ dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True)
+ LOGGER.info(f"Printing '{colorstr('bold', 'black', yaml_file)}'\n\n{dump}")
+
+
+# Default configuration
+DEFAULT_CFG_DICT = yaml_load(DEFAULT_CFG_PATH)
+for k, v in DEFAULT_CFG_DICT.items():
+ if isinstance(v, str) and v.lower() == 'none':
+ DEFAULT_CFG_DICT[k] = None
+DEFAULT_CFG_KEYS = DEFAULT_CFG_DICT.keys()
+DEFAULT_CFG = IterableSimpleNamespace(**DEFAULT_CFG_DICT)
+
+
+def is_colab():
+ """
+ Check if the current script is running inside a Google Colab notebook.
+
+ Returns:
+ (bool): True if running inside a Colab notebook, False otherwise.
+ """
+ return 'COLAB_RELEASE_TAG' in os.environ or 'COLAB_BACKEND_VERSION' in os.environ
+
+
+def is_kaggle():
+ """
+ Check if the current script is running inside a Kaggle kernel.
+
+ Returns:
+ (bool): True if running inside a Kaggle kernel, False otherwise.
+ """
+ return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com'
+
+
+def is_jupyter():
+ """
+ Check if the current script is running inside a Jupyter Notebook.
+ Verified on Colab, Jupyterlab, Kaggle, Paperspace.
+
+ Returns:
+ (bool): True if running inside a Jupyter Notebook, False otherwise.
+ """
+ with contextlib.suppress(Exception):
+ from IPython import get_ipython
+ return get_ipython() is not None
+ return False
+
+
+def is_docker() -> bool:
+ """
+ Determine if the script is running inside a Docker container.
+
+ Returns:
+ (bool): True if the script is running inside a Docker container, False otherwise.
+ """
+ file = Path('/proc/self/cgroup')
+ if file.exists():
+ with open(file) as f:
+ return 'docker' in f.read()
+ else:
+ return False
+
+
+def is_online() -> bool:
+ """
+ Check internet connectivity by attempting to connect to a known online host.
+
+ Returns:
+ (bool): True if connection is successful, False otherwise.
+ """
+ import socket
+
+ for host in '1.1.1.1', '8.8.8.8', '223.5.5.5': # Cloudflare, Google, AliDNS:
+ try:
+ test_connection = socket.create_connection(address=(host, 53), timeout=2)
+ except (socket.timeout, socket.gaierror, OSError):
+ continue
+ else:
+ # If the connection was successful, close it to avoid a ResourceWarning
+ test_connection.close()
+ return True
+ return False
+
+
+ONLINE = is_online()
+
+
+def is_pip_package(filepath: str = __name__) -> bool:
+ """
+ Determines if the file at the given filepath is part of a pip package.
+
+ Args:
+ filepath (str): The filepath to check.
+
+ Returns:
+ (bool): True if the file is part of a pip package, False otherwise.
+ """
+ import importlib.util
+
+ # Get the spec for the module
+ spec = importlib.util.find_spec(filepath)
+
+ # Return whether the spec is not None and the origin is not None (indicating it is a package)
+ return spec is not None and spec.origin is not None
+
+
+def is_dir_writeable(dir_path: Union[str, Path]) -> bool:
+ """
+ Check if a directory is writeable.
+
+ Args:
+ dir_path (str | Path): The path to the directory.
+
+ Returns:
+ (bool): True if the directory is writeable, False otherwise.
+ """
+ return os.access(str(dir_path), os.W_OK)
+
+
+def is_pytest_running():
+ """
+ Determines whether pytest is currently running or not.
+
+ Returns:
+ (bool): True if pytest is running, False otherwise.
+ """
+ return ('PYTEST_CURRENT_TEST' in os.environ) or ('pytest' in sys.modules) or ('pytest' in Path(sys.argv[0]).stem)
+
+
+def is_github_actions_ci() -> bool:
+ """
+ Determine if the current environment is a GitHub Actions CI Python runner.
+
+ Returns:
+ (bool): True if the current environment is a GitHub Actions CI Python runner, False otherwise.
+ """
+ return 'GITHUB_ACTIONS' in os.environ and 'RUNNER_OS' in os.environ and 'RUNNER_TOOL_CACHE' in os.environ
+
+
+def is_git_dir():
+ """
+ Determines whether the current file is part of a git repository.
+ If the current file is not part of a git repository, returns None.
+
+ Returns:
+ (bool): True if current file is part of a git repository.
+ """
+ return get_git_dir() is not None
+
+
+def get_git_dir():
+ """
+ Determines whether the current file is part of a git repository and if so, returns the repository root directory.
+ If the current file is not part of a git repository, returns None.
+
+ Returns:
+ (Path | None): Git root directory if found or None if not found.
+ """
+ for d in Path(__file__).parents:
+ if (d / '.git').is_dir():
+ return d
+ return None # no .git dir found
+
+
+def get_git_origin_url():
+ """
+ Retrieves the origin URL of a git repository.
+
+ Returns:
+ (str | None): The origin URL of the git repository.
+ """
+ if is_git_dir():
+ with contextlib.suppress(subprocess.CalledProcessError):
+ origin = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url'])
+ return origin.decode().strip()
+ return None # if not git dir or on error
+
+
+def get_git_branch():
+ """
+ Returns the current git branch name. If not in a git repository, returns None.
+
+ Returns:
+ (str | None): The current git branch name.
+ """
+ if is_git_dir():
+ with contextlib.suppress(subprocess.CalledProcessError):
+ origin = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
+ return origin.decode().strip()
+ return None # if not git dir or on error
+
+
+def get_default_args(func):
+ """Returns a dictionary of default arguments for a function.
+
+ Args:
+ func (callable): The function to inspect.
+
+ Returns:
+ (dict): A dictionary where each key is a parameter name, and each value is the default value of that parameter.
+ """
+ signature = inspect.signature(func)
+ return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
+
+
+def get_user_config_dir(sub_dir='Ultralytics'):
+ """
+ Get the user config directory.
+
+ Args:
+ sub_dir (str): The name of the subdirectory to create.
+
+ Returns:
+ (Path): The path to the user config directory.
+ """
+ # Return the appropriate config directory for each operating system
+ if WINDOWS:
+ path = Path.home() / 'AppData' / 'Roaming' / sub_dir
+ elif MACOS: # macOS
+ path = Path.home() / 'Library' / 'Application Support' / sub_dir
+ elif LINUX:
+ path = Path.home() / '.config' / sub_dir
+ else:
+ raise ValueError(f'Unsupported operating system: {platform.system()}')
+
+ # GCP and AWS lambda fix, only /tmp is writeable
+ if not is_dir_writeable(str(path.parent)):
+ path = Path('/tmp') / sub_dir
+ LOGGER.warning(f"WARNING ⚠️ user config directory is not writeable, defaulting to '{path}'.")
+
+ # Create the subdirectory if it does not exist
+ path.mkdir(parents=True, exist_ok=True)
+
+ return path
+
+
+USER_CONFIG_DIR = Path(os.getenv('YOLO_CONFIG_DIR', get_user_config_dir())) # Ultralytics settings dir
+SETTINGS_YAML = USER_CONFIG_DIR / 'settings.yaml'
+
+
+def colorstr(*input):
+ """Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')."""
+ *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
+ colors = {
+ 'black': '\033[30m', # basic colors
+ 'red': '\033[31m',
+ 'green': '\033[32m',
+ 'yellow': '\033[33m',
+ 'blue': '\033[34m',
+ 'magenta': '\033[35m',
+ 'cyan': '\033[36m',
+ 'white': '\033[37m',
+ 'bright_black': '\033[90m', # bright colors
+ 'bright_red': '\033[91m',
+ 'bright_green': '\033[92m',
+ 'bright_yellow': '\033[93m',
+ 'bright_blue': '\033[94m',
+ 'bright_magenta': '\033[95m',
+ 'bright_cyan': '\033[96m',
+ 'bright_white': '\033[97m',
+ 'end': '\033[0m', # misc
+ 'bold': '\033[1m',
+ 'underline': '\033[4m'}
+ return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
+
+
+class TryExcept(contextlib.ContextDecorator):
+ """YOLOv8 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager."""
+
+ def __init__(self, msg='', verbose=True):
+ """Initialize TryExcept class with optional message and verbosity settings."""
+ self.msg = msg
+ self.verbose = verbose
+
+ def __enter__(self):
+ """Executes when entering TryExcept context, initializes instance."""
+ pass
+
+ def __exit__(self, exc_type, value, traceback):
+ """Defines behavior when exiting a 'with' block, prints error message if necessary."""
+ if self.verbose and value:
+ print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
+ return True
+
+
+def threaded(func):
+ """Multi-threads a target function and returns thread. Usage: @threaded decorator."""
+
+ def wrapper(*args, **kwargs):
+ """Multi-threads a given function and returns the thread."""
+ thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
+ thread.start()
+ return thread
+
+ return wrapper
+
+
+def set_sentry():
+ """
+ Initialize the Sentry SDK for error tracking and reporting. Only used if sentry_sdk package is installed and
+ sync=True in settings. Run 'yolo settings' to see and update settings YAML file.
+
+ Conditions required to send errors (ALL conditions must be met or no errors will be reported):
+ - sentry_sdk package is installed
+ - sync=True in YOLO settings
+ - pytest is not running
+ - running in a pip package installation
+ - running in a non-git directory
+ - running with rank -1 or 0
+ - online environment
+ - CLI used to run package (checked with 'yolo' as the name of the main CLI command)
+
+ The function also configures Sentry SDK to ignore KeyboardInterrupt and FileNotFoundError
+ exceptions and to exclude events with 'out of memory' in their exception message.
+
+ Additionally, the function sets custom tags and user information for Sentry events.
+ """
+
+ def before_send(event, hint):
+ """
+ Modify the event before sending it to Sentry based on specific exception types and messages.
+
+ Args:
+ event (dict): The event dictionary containing information about the error.
+ hint (dict): A dictionary containing additional information about the error.
+
+ Returns:
+ dict: The modified event or None if the event should not be sent to Sentry.
+ """
+ if 'exc_info' in hint:
+ exc_type, exc_value, tb = hint['exc_info']
+ if exc_type in (KeyboardInterrupt, FileNotFoundError) \
+ or 'out of memory' in str(exc_value):
+ return None # do not send event
+
+ event['tags'] = {
+ 'sys_argv': sys.argv[0],
+ 'sys_argv_name': Path(sys.argv[0]).name,
+ 'install': 'git' if is_git_dir() else 'pip' if is_pip_package() else 'other',
+ 'os': ENVIRONMENT}
+ return event
+
+ if SETTINGS['sync'] and \
+ RANK in (-1, 0) and \
+ Path(sys.argv[0]).name == 'yolo' and \
+ not TESTS_RUNNING and \
+ ONLINE and \
+ is_pip_package() and \
+ not is_git_dir():
+
+ # If sentry_sdk package is not installed then return and do not use Sentry
+ try:
+ import sentry_sdk # noqa
+ except ImportError:
+ return
+
+ sentry_sdk.init(
+ dsn='https://5ff1556b71594bfea135ff0203a0d290@o4504521589325824.ingest.sentry.io/4504521592406016',
+ debug=False,
+ traces_sample_rate=1.0,
+ release=__version__,
+ environment='production', # 'dev' or 'production'
+ before_send=before_send,
+ ignore_errors=[KeyboardInterrupt, FileNotFoundError])
+ sentry_sdk.set_user({'id': SETTINGS['uuid']}) # SHA-256 anonymized UUID hash
+
+ # Disable all sentry logging
+ for logger in 'sentry_sdk', 'sentry_sdk.errors':
+ logging.getLogger(logger).setLevel(logging.CRITICAL)
+
+
+def get_settings(file=SETTINGS_YAML, version='0.0.3'):
+ """
+ Loads a global Ultralytics settings YAML file or creates one with default values if it does not exist.
+
+ Args:
+ file (Path): Path to the Ultralytics settings YAML file. Defaults to 'settings.yaml' in the USER_CONFIG_DIR.
+ version (str): Settings version. If min settings version not met, new default settings will be saved.
+
+ Returns:
+ (dict): Dictionary of settings key-value pairs.
+ """
+ import hashlib
+
+ from ultralytics.utils.checks import check_version
+ from ultralytics.utils.torch_utils import torch_distributed_zero_first
+
+ git_dir = get_git_dir()
+ root = git_dir or Path()
+ datasets_root = (root.parent if git_dir and is_dir_writeable(root.parent) else root).resolve()
+ defaults = {
+ 'datasets_dir': str(datasets_root / 'datasets'), # default datasets directory.
+ 'weights_dir': str(root / 'weights'), # default weights directory.
+ 'runs_dir': str(root / 'runs'), # default runs directory.
+ 'uuid': hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), # SHA-256 anonymized UUID hash
+ 'sync': True, # sync analytics to help with YOLO development
+ 'api_key': '', # Ultralytics HUB API key (https://hub.ultralytics.com/)
+ 'settings_version': version} # Ultralytics settings version
+
+ with torch_distributed_zero_first(RANK):
+ if not file.exists():
+ yaml_save(file, defaults)
+ settings = yaml_load(file)
+
+ # Check that settings keys and types match defaults
+ correct = \
+ settings \
+ and settings.keys() == defaults.keys() \
+ and all(type(a) == type(b) for a, b in zip(settings.values(), defaults.values())) \
+ and check_version(settings['settings_version'], version)
+ if not correct:
+ LOGGER.warning('WARNING ⚠️ Ultralytics settings reset to defaults. This is normal and may be due to a '
+ 'recent ultralytics package update, but may have overwritten previous settings. '
+ f"\nView and update settings with 'yolo settings' or at '{file}'")
+ settings = defaults # merge **defaults with **settings (prefer **settings)
+ yaml_save(file, settings) # save updated defaults
+
+ return settings
+
+
+def set_settings(kwargs, file=SETTINGS_YAML):
+ """
+ Function that runs on a first-time ultralytics package installation to set up global settings and create necessary
+ directories.
+ """
+ SETTINGS.update(kwargs)
+ yaml_save(file, SETTINGS)
+
+
+def deprecation_warn(arg, new_arg, version=None):
+ """Issue a deprecation warning when a deprecated argument is used, suggesting an updated argument."""
+ if not version:
+ version = float(__version__[:3]) + 0.2 # deprecate after 2nd major release
+ LOGGER.warning(f"WARNING ⚠️ '{arg}' is deprecated and will be removed in 'ultralytics {version}' in the future. "
+ f"Please use '{new_arg}' instead.")
+
+
+def clean_url(url):
+ """Strip auth from URL, i.e. https://url.com/file.txt?auth -> https://url.com/file.txt."""
+ url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
+ return urllib.parse.unquote(url).split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
+
+
+def url2file(url):
+ """Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt."""
+ return Path(clean_url(url)).name
+
+
+# Run below code on utils init ------------------------------------------------------------------------------------
+
+# Check first-install steps
+PREFIX = colorstr('Ultralytics: ')
+SETTINGS = get_settings()
+DATASETS_DIR = Path(SETTINGS['datasets_dir']) # global datasets directory
+ENVIRONMENT = 'Colab' if is_colab() else 'Kaggle' if is_kaggle() else 'Jupyter' if is_jupyter() else \
+ 'Docker' if is_docker() else platform.system()
+TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
+set_sentry()
+
+# Apply monkey patches if the script is being run from within the parent directory of the script's location
+from .patches import imread, imshow, imwrite
+
+# torch.save = torch_save
+if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:
+ cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow
diff --git a/ultralytics/yolo/utils/autobatch.py b/ultralytics/utils/autobatch.py
similarity index 96%
rename from ultralytics/yolo/utils/autobatch.py
rename to ultralytics/utils/autobatch.py
index 0645f81eb..252f3db81 100644
--- a/ultralytics/yolo/utils/autobatch.py
+++ b/ultralytics/utils/autobatch.py
@@ -8,8 +8,8 @@ from copy import deepcopy
import numpy as np
import torch
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, colorstr
-from ultralytics.yolo.utils.torch_utils import profile
+from ultralytics.utils import DEFAULT_CFG, LOGGER, colorstr
+from ultralytics.utils.torch_utils import profile
def check_train_batch_size(model, imgsz=640, amp=True):
diff --git a/ultralytics/yolo/utils/benchmarks.py b/ultralytics/utils/benchmarks.py
similarity index 96%
rename from ultralytics/yolo/utils/benchmarks.py
rename to ultralytics/utils/benchmarks.py
index e84a1a6fd..c35fde462 100644
--- a/ultralytics/yolo/utils/benchmarks.py
+++ b/ultralytics/utils/benchmarks.py
@@ -3,7 +3,7 @@
Benchmark a YOLO model formats for speed and accuracy
Usage:
- from ultralytics.yolo.utils.benchmarks import ProfileModels, benchmark
+ from ultralytics.utils.benchmarks import ProfileModels, benchmark
ProfileModels(['yolov8n.yaml', 'yolov8s.yaml']).profile()
run_benchmarks(model='yolov8n.pt', imgsz=160)
@@ -34,13 +34,13 @@ import torch.cuda
from tqdm import tqdm
from ultralytics import YOLO
-from ultralytics.yolo.cfg import TASK2DATA, TASK2METRIC
-from ultralytics.yolo.engine.exporter import export_formats
-from ultralytics.yolo.utils import LINUX, LOGGER, MACOS, ROOT, SETTINGS
-from ultralytics.yolo.utils.checks import check_requirements, check_yolo
-from ultralytics.yolo.utils.downloads import download
-from ultralytics.yolo.utils.files import file_size
-from ultralytics.yolo.utils.torch_utils import select_device
+from ultralytics.cfg import TASK2DATA, TASK2METRIC
+from ultralytics.engine.exporter import export_formats
+from ultralytics.utils import LINUX, LOGGER, MACOS, ROOT, SETTINGS
+from ultralytics.utils.checks import check_requirements, check_yolo
+from ultralytics.utils.downloads import download
+from ultralytics.utils.files import file_size
+from ultralytics.utils.torch_utils import select_device
def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
diff --git a/ultralytics/yolo/utils/callbacks/__init__.py b/ultralytics/utils/callbacks/__init__.py
similarity index 100%
rename from ultralytics/yolo/utils/callbacks/__init__.py
rename to ultralytics/utils/callbacks/__init__.py
diff --git a/ultralytics/yolo/utils/callbacks/base.py b/ultralytics/utils/callbacks/base.py
similarity index 100%
rename from ultralytics/yolo/utils/callbacks/base.py
rename to ultralytics/utils/callbacks/base.py
diff --git a/ultralytics/yolo/utils/callbacks/clearml.py b/ultralytics/utils/callbacks/clearml.py
similarity index 97%
rename from ultralytics/yolo/utils/callbacks/clearml.py
rename to ultralytics/utils/callbacks/clearml.py
index 2cfdd73e0..a5a01d5ba 100644
--- a/ultralytics/yolo/utils/callbacks/clearml.py
+++ b/ultralytics/utils/callbacks/clearml.py
@@ -5,8 +5,8 @@ import re
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
-from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
-from ultralytics.yolo.utils.torch_utils import model_info_for_loggers
+from ultralytics.utils import LOGGER, TESTS_RUNNING
+from ultralytics.utils.torch_utils import model_info_for_loggers
try:
import clearml
diff --git a/ultralytics/yolo/utils/callbacks/comet.py b/ultralytics/utils/callbacks/comet.py
similarity index 98%
rename from ultralytics/yolo/utils/callbacks/comet.py
rename to ultralytics/utils/callbacks/comet.py
index 94aeb8f64..036644298 100644
--- a/ultralytics/yolo/utils/callbacks/comet.py
+++ b/ultralytics/utils/callbacks/comet.py
@@ -3,8 +3,8 @@
import os
from pathlib import Path
-from ultralytics.yolo.utils import LOGGER, RANK, TESTS_RUNNING, ops
-from ultralytics.yolo.utils.torch_utils import model_info_for_loggers
+from ultralytics.utils import LOGGER, RANK, TESTS_RUNNING, ops
+from ultralytics.utils.torch_utils import model_info_for_loggers
try:
import comet_ml
diff --git a/ultralytics/yolo/utils/callbacks/dvc.py b/ultralytics/utils/callbacks/dvc.py
similarity index 97%
rename from ultralytics/yolo/utils/callbacks/dvc.py
rename to ultralytics/utils/callbacks/dvc.py
index 138100c8d..ce3076e5f 100644
--- a/ultralytics/yolo/utils/callbacks/dvc.py
+++ b/ultralytics/utils/callbacks/dvc.py
@@ -3,8 +3,8 @@ import os
import pkg_resources as pkg
-from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
-from ultralytics.yolo.utils.torch_utils import model_info_for_loggers
+from ultralytics.utils import LOGGER, TESTS_RUNNING
+from ultralytics.utils.torch_utils import model_info_for_loggers
try:
from importlib.metadata import version
diff --git a/ultralytics/yolo/utils/callbacks/hub.py b/ultralytics/utils/callbacks/hub.py
similarity index 96%
rename from ultralytics/yolo/utils/callbacks/hub.py
rename to ultralytics/utils/callbacks/hub.py
index e3b34272e..edb8078ad 100644
--- a/ultralytics/yolo/utils/callbacks/hub.py
+++ b/ultralytics/utils/callbacks/hub.py
@@ -4,8 +4,8 @@ import json
from time import time
from ultralytics.hub.utils import PREFIX, events
-from ultralytics.yolo.utils import LOGGER
-from ultralytics.yolo.utils.torch_utils import model_info_for_loggers
+from ultralytics.utils import LOGGER
+from ultralytics.utils.torch_utils import model_info_for_loggers
def on_pretrain_routine_end(trainer):
diff --git a/ultralytics/yolo/utils/callbacks/mlflow.py b/ultralytics/utils/callbacks/mlflow.py
similarity index 97%
rename from ultralytics/yolo/utils/callbacks/mlflow.py
rename to ultralytics/utils/callbacks/mlflow.py
index 6c4b798b9..76bc0c1fb 100644
--- a/ultralytics/yolo/utils/callbacks/mlflow.py
+++ b/ultralytics/utils/callbacks/mlflow.py
@@ -4,7 +4,7 @@ import os
import re
from pathlib import Path
-from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING, colorstr
+from ultralytics.utils import LOGGER, TESTS_RUNNING, colorstr
try:
import mlflow
diff --git a/ultralytics/yolo/utils/callbacks/neptune.py b/ultralytics/utils/callbacks/neptune.py
similarity index 96%
rename from ultralytics/yolo/utils/callbacks/neptune.py
rename to ultralytics/utils/callbacks/neptune.py
index be6434124..da5ccf422 100644
--- a/ultralytics/yolo/utils/callbacks/neptune.py
+++ b/ultralytics/utils/callbacks/neptune.py
@@ -3,8 +3,8 @@
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
-from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
-from ultralytics.yolo.utils.torch_utils import model_info_for_loggers
+from ultralytics.utils import LOGGER, TESTS_RUNNING
+from ultralytics.utils.torch_utils import model_info_for_loggers
try:
import neptune
diff --git a/ultralytics/yolo/utils/callbacks/raytune.py b/ultralytics/utils/callbacks/raytune.py
similarity index 100%
rename from ultralytics/yolo/utils/callbacks/raytune.py
rename to ultralytics/utils/callbacks/raytune.py
diff --git a/ultralytics/yolo/utils/callbacks/tensorboard.py b/ultralytics/utils/callbacks/tensorboard.py
similarity index 95%
rename from ultralytics/yolo/utils/callbacks/tensorboard.py
rename to ultralytics/utils/callbacks/tensorboard.py
index a436b9ce9..7ddf1bb44 100644
--- a/ultralytics/yolo/utils/callbacks/tensorboard.py
+++ b/ultralytics/utils/callbacks/tensorboard.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING, colorstr
+from ultralytics.utils import LOGGER, TESTS_RUNNING, colorstr
try:
from torch.utils.tensorboard import SummaryWriter
diff --git a/ultralytics/yolo/utils/callbacks/wb.py b/ultralytics/utils/callbacks/wb.py
similarity index 94%
rename from ultralytics/yolo/utils/callbacks/wb.py
rename to ultralytics/utils/callbacks/wb.py
index 4b4c29b77..605a22841 100644
--- a/ultralytics/yolo/utils/callbacks/wb.py
+++ b/ultralytics/utils/callbacks/wb.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-from ultralytics.yolo.utils import TESTS_RUNNING
-from ultralytics.yolo.utils.torch_utils import model_info_for_loggers
+from ultralytics.utils import TESTS_RUNNING
+from ultralytics.utils.torch_utils import model_info_for_loggers
try:
import wandb as wb
diff --git a/ultralytics/yolo/utils/checks.py b/ultralytics/utils/checks.py
similarity index 97%
rename from ultralytics/yolo/utils/checks.py
rename to ultralytics/utils/checks.py
index 80e2cc34d..6d8bd8e3b 100644
--- a/ultralytics/yolo/utils/checks.py
+++ b/ultralytics/utils/checks.py
@@ -20,9 +20,9 @@ import requests
import torch
from matplotlib import font_manager
-from ultralytics.yolo.utils import (AUTOINSTALL, LOGGER, ONLINE, ROOT, USER_CONFIG_DIR, ThreadingLocked, TryExcept,
- clean_url, colorstr, downloads, emojis, is_colab, is_docker, is_jupyter, is_kaggle,
- is_online, is_pip_package, url2file)
+from ultralytics.utils import (AUTOINSTALL, LOGGER, ONLINE, ROOT, USER_CONFIG_DIR, ThreadingLocked, TryExcept,
+ clean_url, colorstr, downloads, emojis, is_colab, is_docker, is_jupyter, is_kaggle,
+ is_online, is_pip_package, url2file)
def is_ascii(s) -> bool:
@@ -325,9 +325,7 @@ def check_file(file, suffix='', download=True, hard=True):
downloads.safe_download(url=url, file=file, unzip=False)
return file
else: # search
- files = []
- for d in 'models', 'datasets', 'tracker/cfg', 'yolo/cfg': # search directories
- files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
+ files = glob.glob(str(ROOT / 'cfg' / '**' / file), recursive=True) # find file
if not files and hard:
raise FileNotFoundError(f"'{file}' does not exist")
elif len(files) > 1 and hard:
@@ -357,7 +355,7 @@ def check_imshow(warn=False):
def check_yolo(verbose=True, device=''):
"""Return a human-readable YOLO software and hardware summary."""
- from ultralytics.yolo.utils.torch_utils import select_device
+ from ultralytics.utils.torch_utils import select_device
if is_jupyter():
if check_requirements('wandb', install=False):
diff --git a/ultralytics/yolo/utils/dist.py b/ultralytics/utils/dist.py
similarity index 97%
rename from ultralytics/yolo/utils/dist.py
rename to ultralytics/utils/dist.py
index 6de029f5c..11900985a 100644
--- a/ultralytics/yolo/utils/dist.py
+++ b/ultralytics/utils/dist.py
@@ -29,7 +29,7 @@ def generate_ddp_file(trainer):
content = f'''overrides = {vars(trainer.args)} \nif __name__ == "__main__":
from {module} import {name}
- from ultralytics.yolo.utils import DEFAULT_CFG_DICT
+ from ultralytics.utils import DEFAULT_CFG_DICT
cfg = DEFAULT_CFG_DICT.copy()
cfg.update(save_dir='') # handle the extra key 'save_dir'
diff --git a/ultralytics/yolo/utils/downloads.py b/ultralytics/utils/downloads.py
similarity index 98%
rename from ultralytics/yolo/utils/downloads.py
rename to ultralytics/utils/downloads.py
index c13192157..0958f35bd 100644
--- a/ultralytics/yolo/utils/downloads.py
+++ b/ultralytics/utils/downloads.py
@@ -13,7 +13,7 @@ import requests
import torch
from tqdm import tqdm
-from ultralytics.yolo.utils import LOGGER, checks, clean_url, emojis, is_online, url2file
+from ultralytics.utils import LOGGER, checks, clean_url, emojis, is_online, url2file
GITHUB_ASSET_NAMES = [f'yolov8{k}{suffix}.pt' for k in 'nsmlx' for suffix in ('', '6', '-cls', '-seg', '-pose')] + \
[f'yolov5{k}u.pt' for k in 'nsmlx'] + \
@@ -164,7 +164,7 @@ def safe_download(url,
if method == 'torch':
torch.hub.download_url_to_file(url, f, progress=progress)
else:
- from ultralytics.yolo.utils import TQDM_BAR_FORMAT
+ from ultralytics.utils import TQDM_BAR_FORMAT
with request.urlopen(url) as response, tqdm(total=int(response.getheader('Content-Length', 0)),
desc=desc,
disable=not progress,
@@ -212,7 +212,7 @@ def get_github_assets(repo='ultralytics/assets', version='latest'):
def attempt_download_asset(file, repo='ultralytics/assets', release='v0.0.0'):
"""Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc."""
- from ultralytics.yolo.utils import SETTINGS # scoped for circular import
+ from ultralytics.utils import SETTINGS # scoped for circular import
# YOLOv3/5u updates
file = str(file)
diff --git a/ultralytics/yolo/utils/errors.py b/ultralytics/utils/errors.py
similarity index 86%
rename from ultralytics/yolo/utils/errors.py
rename to ultralytics/utils/errors.py
index 7163d4d25..5a7643186 100644
--- a/ultralytics/yolo/utils/errors.py
+++ b/ultralytics/utils/errors.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-from ultralytics.yolo.utils import emojis
+from ultralytics.utils import emojis
class HUBModelError(Exception):
diff --git a/ultralytics/yolo/utils/files.py b/ultralytics/utils/files.py
similarity index 100%
rename from ultralytics/yolo/utils/files.py
rename to ultralytics/utils/files.py
diff --git a/ultralytics/yolo/utils/instance.py b/ultralytics/utils/instance.py
similarity index 100%
rename from ultralytics/yolo/utils/instance.py
rename to ultralytics/utils/instance.py
diff --git a/ultralytics/yolo/utils/loss.py b/ultralytics/utils/loss.py
similarity index 98%
rename from ultralytics/yolo/utils/loss.py
rename to ultralytics/utils/loss.py
index 71ed0a526..1da0586c8 100644
--- a/ultralytics/yolo/utils/loss.py
+++ b/ultralytics/utils/loss.py
@@ -4,9 +4,9 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
-from ultralytics.yolo.utils.metrics import OKS_SIGMA
-from ultralytics.yolo.utils.ops import crop_mask, xywh2xyxy, xyxy2xywh
-from ultralytics.yolo.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors
+from ultralytics.utils.metrics import OKS_SIGMA
+from ultralytics.utils.ops import crop_mask, xywh2xyxy, xyxy2xywh
+from ultralytics.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors
from .metrics import bbox_iou
from .tal import bbox2dist
diff --git a/ultralytics/yolo/utils/metrics.py b/ultralytics/utils/metrics.py
similarity index 99%
rename from ultralytics/yolo/utils/metrics.py
rename to ultralytics/utils/metrics.py
index cd903213f..cdf933131 100644
--- a/ultralytics/yolo/utils/metrics.py
+++ b/ultralytics/utils/metrics.py
@@ -10,7 +10,7 @@ import matplotlib.pyplot as plt
import numpy as np
import torch
-from ultralytics.yolo.utils import LOGGER, SimpleClass, TryExcept, plt_settings
+from ultralytics.utils import LOGGER, SimpleClass, TryExcept, plt_settings
OKS_SIGMA = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0
diff --git a/ultralytics/yolo/utils/ops.py b/ultralytics/utils/ops.py
similarity index 99%
rename from ultralytics/yolo/utils/ops.py
rename to ultralytics/utils/ops.py
index bb9ca49a8..38f4eb0ee 100644
--- a/ultralytics/yolo/utils/ops.py
+++ b/ultralytics/utils/ops.py
@@ -11,7 +11,7 @@ import torch
import torch.nn.functional as F
import torchvision
-from ultralytics.yolo.utils import LOGGER
+from ultralytics.utils import LOGGER
from .metrics import box_iou
diff --git a/ultralytics/yolo/utils/patches.py b/ultralytics/utils/patches.py
similarity index 100%
rename from ultralytics/yolo/utils/patches.py
rename to ultralytics/utils/patches.py
diff --git a/ultralytics/yolo/utils/plotting.py b/ultralytics/utils/plotting.py
similarity index 99%
rename from ultralytics/yolo/utils/plotting.py
rename to ultralytics/utils/plotting.py
index 0236a7943..968163608 100644
--- a/ultralytics/yolo/utils/plotting.py
+++ b/ultralytics/utils/plotting.py
@@ -13,7 +13,7 @@ from PIL import Image, ImageDraw, ImageFont
from PIL import __version__ as pil_version
from scipy.ndimage import gaussian_filter1d
-from ultralytics.yolo.utils import LOGGER, TryExcept, plt_settings, threaded
+from ultralytics.utils import LOGGER, TryExcept, plt_settings, threaded
from .checks import check_font, check_version, is_ascii
from .files import increment_path
diff --git a/ultralytics/yolo/utils/tal.py b/ultralytics/utils/tal.py
similarity index 100%
rename from ultralytics/yolo/utils/tal.py
rename to ultralytics/utils/tal.py
diff --git a/ultralytics/yolo/utils/torch_utils.py b/ultralytics/utils/torch_utils.py
similarity index 97%
rename from ultralytics/yolo/utils/torch_utils.py
rename to ultralytics/utils/torch_utils.py
index 0ab1f4e66..77235aed7 100644
--- a/ultralytics/yolo/utils/torch_utils.py
+++ b/ultralytics/utils/torch_utils.py
@@ -17,8 +17,8 @@ import torch.nn as nn
import torch.nn.functional as F
import torchvision
-from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, __version__
-from ultralytics.yolo.utils.checks import check_requirements, check_version
+from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, __version__
+from ultralytics.utils.checks import check_requirements, check_version
try:
import thop
@@ -213,7 +213,7 @@ def model_info_for_loggers(trainer):
'model/speed_PyTorch(ms)': 18.755}
"""
if trainer.args.profile: # profile ONNX and TensorRT times
- from ultralytics.yolo.utils.benchmarks import ProfileModels
+ from ultralytics.utils.benchmarks import ProfileModels
results = ProfileModels([trainer.last], device=trainer.device).profile()[0]
results.pop('model/name')
else: # only return PyTorch times from most recent validation
@@ -387,7 +387,7 @@ def strip_optimizer(f: Union[str, Path] = 'best.pt', s: str = '') -> None:
Usage:
from pathlib import Path
- from ultralytics.yolo.utils.torch_utils import strip_optimizer
+ from ultralytics.utils.torch_utils import strip_optimizer
for f in Path('/Users/glennjocher/Downloads/weights').rglob('*.pt'):
strip_optimizer(f)
"""
@@ -398,6 +398,12 @@ def strip_optimizer(f: Union[str, Path] = 'best.pt', s: str = '') -> None:
import pickle
x = torch.load(f, map_location=torch.device('cpu'))
+ if 'model' not in x:
+ LOGGER.info(f'Skipping {f}, not a valid Ultralytics model.')
+ return
+
+ if hasattr(x['model'], 'args'):
+ x['model'].args = dict(x['model'].args) # convert from IterableSimpleNamespace to dict
args = {**DEFAULT_CFG_DICT, **x['train_args']} if 'train_args' in x else None # combine args
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
diff --git a/ultralytics/yolo/utils/tuner.py b/ultralytics/utils/tuner.py
similarity index 97%
rename from ultralytics/yolo/utils/tuner.py
rename to ultralytics/utils/tuner.py
index 54f10b054..c40983c49 100644
--- a/ultralytics/yolo/utils/tuner.py
+++ b/ultralytics/utils/tuner.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-from ultralytics.yolo.cfg import TASK2DATA, TASK2METRIC
-from ultralytics.yolo.utils import DEFAULT_CFG_DICT, LOGGER, NUM_THREADS
+from ultralytics.cfg import TASK2DATA, TASK2METRIC
+from ultralytics.utils import DEFAULT_CFG_DICT, LOGGER, NUM_THREADS
def run_ray_tune(model,
diff --git a/ultralytics/yolo/cfg/__init__.py b/ultralytics/yolo/cfg/__init__.py
index 71a902276..5ea5519b6 100644
--- a/ultralytics/yolo/cfg/__init__.py
+++ b/ultralytics/yolo/cfg/__init__.py
@@ -1,421 +1,10 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-
-import contextlib
-import re
-import shutil
+import importlib
import sys
-from difflib import get_close_matches
-from pathlib import Path
-from types import SimpleNamespace
-from typing import Dict, List, Union
-
-from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_PATH, LOGGER, ROOT, USER_CONFIG_DIR,
- IterableSimpleNamespace, __version__, checks, colorstr, deprecation_warn,
- get_settings, yaml_load, yaml_print)
-
-# Define valid tasks and modes
-MODES = 'train', 'val', 'predict', 'export', 'track', 'benchmark'
-TASKS = 'detect', 'segment', 'classify', 'pose'
-TASK2DATA = {'detect': 'coco8.yaml', 'segment': 'coco8-seg.yaml', 'classify': 'imagenet100', 'pose': 'coco8-pose.yaml'}
-TASK2MODEL = {
- 'detect': 'yolov8n.pt',
- 'segment': 'yolov8n-seg.pt',
- 'classify': 'yolov8n-cls.pt',
- 'pose': 'yolov8n-pose.pt'}
-TASK2METRIC = {
- 'detect': 'metrics/mAP50-95(B)',
- 'segment': 'metrics/mAP50-95(M)',
- 'classify': 'metrics/accuracy_top1',
- 'pose': 'metrics/mAP50-95(P)'}
-
-
-CLI_HELP_MSG = \
- f"""
- Arguments received: {str(['yolo'] + sys.argv[1:])}. Ultralytics 'yolo' commands use the following syntax:
-
- yolo TASK MODE ARGS
-
- Where TASK (optional) is one of {TASKS}
- MODE (required) is one of {MODES}
- ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults.
- See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
-
- 1. Train a detection model for 10 epochs with an initial learning_rate of 0.01
- yolo train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01
-
- 2. Predict a YouTube video using a pretrained segmentation model at image size 320:
- yolo predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320
-
- 3. Val a pretrained detection model at batch-size 1 and image size 640:
- yolo val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640
-
- 4. Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required)
- yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128
-
- 5. Run special commands:
- yolo help
- yolo checks
- yolo version
- yolo settings
- yolo copy-cfg
- yolo cfg
-
- Docs: https://docs.ultralytics.com
- Community: https://community.ultralytics.com
- GitHub: https://github.com/ultralytics/ultralytics
- """
-
-# Define keys for arg type checks
-CFG_FLOAT_KEYS = 'warmup_epochs', 'box', 'cls', 'dfl', 'degrees', 'shear'
-CFG_FRACTION_KEYS = ('dropout', 'iou', 'lr0', 'lrf', 'momentum', 'weight_decay', 'warmup_momentum', 'warmup_bias_lr',
- 'label_smoothing', 'hsv_h', 'hsv_s', 'hsv_v', 'translate', 'scale', 'perspective', 'flipud',
- 'fliplr', 'mosaic', 'mixup', 'copy_paste', 'conf', 'iou', 'fraction') # fraction floats 0.0 - 1.0
-CFG_INT_KEYS = ('epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride',
- 'line_width', 'workspace', 'nbs', 'save_period')
-CFG_BOOL_KEYS = ('save', 'exist_ok', 'verbose', 'deterministic', 'single_cls', 'rect', 'cos_lr', 'overlap_mask', 'val',
- 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show', 'save_txt', 'save_conf', 'save_crop',
- 'show_labels', 'show_conf', 'visualize', 'augment', 'agnostic_nms', 'retina_masks', 'boxes', 'keras',
- 'optimize', 'int8', 'dynamic', 'simplify', 'nms', 'v5loader', 'profile')
-
-
-def cfg2dict(cfg):
- """
- Convert a configuration object to a dictionary, whether it is a file path, a string, or a SimpleNamespace object.
-
- Args:
- cfg (str | Path | SimpleNamespace): Configuration object to be converted to a dictionary.
-
- Returns:
- cfg (dict): Configuration object in dictionary format.
- """
- if isinstance(cfg, (str, Path)):
- cfg = yaml_load(cfg) # load dict
- elif isinstance(cfg, SimpleNamespace):
- cfg = vars(cfg) # convert to dict
- return cfg
-
-
-def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, overrides: Dict = None):
- """
- Load and merge configuration data from a file or dictionary.
-
- Args:
- cfg (str | Path | Dict | SimpleNamespace): Configuration data.
- overrides (str | Dict | optional): Overrides in the form of a file name or a dictionary. Default is None.
-
- Returns:
- (SimpleNamespace): Training arguments namespace.
- """
- cfg = cfg2dict(cfg)
-
- # Merge overrides
- if overrides:
- overrides = cfg2dict(overrides)
- check_cfg_mismatch(cfg, overrides)
- cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides)
-
- # Special handling for numeric project/name
- for k in 'project', 'name':
- if k in cfg and isinstance(cfg[k], (int, float)):
- cfg[k] = str(cfg[k])
- if cfg.get('name') == 'model': # assign model to 'name' arg
- cfg['name'] = cfg.get('model', '').split('.')[0]
- LOGGER.warning(f"WARNING ⚠️ 'name=model' automatically updated to 'name={cfg['name']}'.")
-
- # Type and Value checks
- for k, v in cfg.items():
- if v is not None: # None values may be from optional args
- if k in CFG_FLOAT_KEYS and not isinstance(v, (int, float)):
- raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
- f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')")
- elif k in CFG_FRACTION_KEYS:
- if not isinstance(v, (int, float)):
- raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
- f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')")
- if not (0.0 <= v <= 1.0):
- raise ValueError(f"'{k}={v}' is an invalid value. "
- f"Valid '{k}' values are between 0.0 and 1.0.")
- elif k in CFG_INT_KEYS and not isinstance(v, int):
- raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
- f"'{k}' must be an int (i.e. '{k}=8')")
- elif k in CFG_BOOL_KEYS and not isinstance(v, bool):
- raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
- f"'{k}' must be a bool (i.e. '{k}=True' or '{k}=False')")
-
- # Return instance
- return IterableSimpleNamespace(**cfg)
-
-
-def _handle_deprecation(custom):
- """
- Hardcoded function to handle deprecated config keys
- """
-
- for key in custom.copy().keys():
- if key == 'hide_labels':
- deprecation_warn(key, 'show_labels')
- custom['show_labels'] = custom.pop('hide_labels') == 'False'
- if key == 'hide_conf':
- deprecation_warn(key, 'show_conf')
- custom['show_conf'] = custom.pop('hide_conf') == 'False'
- if key == 'line_thickness':
- deprecation_warn(key, 'line_width')
- custom['line_width'] = custom.pop('line_thickness')
-
- return custom
-
-
-def check_cfg_mismatch(base: Dict, custom: Dict, e=None):
- """
- This function checks for any mismatched keys between a custom configuration list and a base configuration list.
- If any mismatched keys are found, the function prints out similar keys from the base list and exits the program.
-
- Args:
- custom (dict): a dictionary of custom configuration options
- base (dict): a dictionary of base configuration options
- """
- custom = _handle_deprecation(custom)
- base, custom = (set(x.keys()) for x in (base, custom))
- mismatched = [x for x in custom if x not in base]
- if mismatched:
- string = ''
- for x in mismatched:
- matches = get_close_matches(x, base) # key list
- matches = [f'{k}={DEFAULT_CFG_DICT[k]}' if DEFAULT_CFG_DICT.get(k) is not None else k for k in matches]
- match_str = f'Similar arguments are i.e. {matches}.' if matches else ''
- string += f"'{colorstr('red', 'bold', x)}' is not a valid YOLO argument. {match_str}\n"
- raise SyntaxError(string + CLI_HELP_MSG) from e
-
-
-def merge_equals_args(args: List[str]) -> List[str]:
- """
- Merges arguments around isolated '=' args in a list of strings.
- The function considers cases where the first argument ends with '=' or the second starts with '=',
- as well as when the middle one is an equals sign.
-
- Args:
- args (List[str]): A list of strings where each element is an argument.
-
- Returns:
- List[str]: A list of strings where the arguments around isolated '=' are merged.
- """
- new_args = []
- for i, arg in enumerate(args):
- if arg == '=' and 0 < i < len(args) - 1: # merge ['arg', '=', 'val']
- new_args[-1] += f'={args[i + 1]}'
- del args[i + 1]
- elif arg.endswith('=') and i < len(args) - 1 and '=' not in args[i + 1]: # merge ['arg=', 'val']
- new_args.append(f'{arg}{args[i + 1]}')
- del args[i + 1]
- elif arg.startswith('=') and i > 0: # merge ['arg', '=val']
- new_args[-1] += arg
- else:
- new_args.append(arg)
- return new_args
-
-
-def handle_yolo_hub(args: List[str]) -> None:
- """
- Handle Ultralytics HUB command-line interface (CLI) commands.
-
- This function processes Ultralytics HUB CLI commands such as login and logout.
- It should be called when executing a script with arguments related to HUB authentication.
-
- Args:
- args (List[str]): A list of command line arguments
-
- Example:
- python my_script.py hub login your_api_key
- """
- from ultralytics import hub
-
- if args[0] == 'login':
- key = args[1] if len(args) > 1 else ''
- # Log in to Ultralytics HUB using the provided API key
- hub.login(key)
- elif args[0] == 'logout':
- # Log out from Ultralytics HUB
- hub.logout()
-
-
-def handle_yolo_settings(args: List[str]) -> None:
- """
- Handle YOLO settings command-line interface (CLI) commands.
-
- This function processes YOLO settings CLI commands such as reset.
- It should be called when executing a script with arguments related to YOLO settings management.
-
- Args:
- args (List[str]): A list of command line arguments for YOLO settings management.
-
- Example:
- python my_script.py yolo settings reset
- """
- path = USER_CONFIG_DIR / 'settings.yaml' # get SETTINGS YAML file path
- if any(args) and args[0] == 'reset':
- path.unlink() # delete the settings file
- get_settings() # create new settings
- LOGGER.info('Settings reset successfully') # inform the user that settings have been reset
- yaml_print(path) # print the current settings
-
-
-def entrypoint(debug=''):
- """
- This function is the ultralytics package entrypoint, it's responsible for parsing the command line arguments passed
- to the package.
-
- This function allows for:
- - passing mandatory YOLO args as a list of strings
- - specifying the task to be performed, either 'detect', 'segment' or 'classify'
- - specifying the mode, either 'train', 'val', 'test', or 'predict'
- - running special modes like 'checks'
- - passing overrides to the package's configuration
-
- It uses the package's default cfg and initializes it using the passed overrides.
- Then it calls the CLI function with the composed cfg
- """
- args = (debug.split(' ') if debug else sys.argv)[1:]
- if not args: # no arguments passed
- LOGGER.info(CLI_HELP_MSG)
- return
-
- special = {
- 'help': lambda: LOGGER.info(CLI_HELP_MSG),
- 'checks': checks.check_yolo,
- 'version': lambda: LOGGER.info(__version__),
- 'settings': lambda: handle_yolo_settings(args[1:]),
- 'cfg': lambda: yaml_print(DEFAULT_CFG_PATH),
- 'hub': lambda: handle_yolo_hub(args[1:]),
- 'login': lambda: handle_yolo_hub(args),
- 'copy-cfg': copy_default_cfg}
- full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special}
-
- # Define common mis-uses of special commands, i.e. -h, -help, --help
- special.update({k[0]: v for k, v in special.items()}) # singular
- special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith('s')}) # singular
- special = {**special, **{f'-{k}': v for k, v in special.items()}, **{f'--{k}': v for k, v in special.items()}}
-
- overrides = {} # basic overrides, i.e. imgsz=320
- for a in merge_equals_args(args): # merge spaces around '=' sign
- if a.startswith('--'):
- LOGGER.warning(f"WARNING ⚠️ '{a}' does not require leading dashes '--', updating to '{a[2:]}'.")
- a = a[2:]
- if a.endswith(','):
- LOGGER.warning(f"WARNING ⚠️ '{a}' does not require trailing comma ',', updating to '{a[:-1]}'.")
- a = a[:-1]
- if '=' in a:
- try:
- re.sub(r' *= *', '=', a) # remove spaces around equals sign
- k, v = a.split('=', 1) # split on first '=' sign
- assert v, f"missing '{k}' value"
- if k == 'cfg': # custom.yaml passed
- LOGGER.info(f'Overriding {DEFAULT_CFG_PATH} with {v}')
- overrides = {k: val for k, val in yaml_load(checks.check_yaml(v)).items() if k != 'cfg'}
- else:
- if v.lower() == 'none':
- v = None
- elif v.lower() == 'true':
- v = True
- elif v.lower() == 'false':
- v = False
- else:
- with contextlib.suppress(Exception):
- v = eval(v)
- overrides[k] = v
- except (NameError, SyntaxError, ValueError, AssertionError) as e:
- check_cfg_mismatch(full_args_dict, {a: ''}, e)
-
- elif a in TASKS:
- overrides['task'] = a
- elif a in MODES:
- overrides['mode'] = a
- elif a.lower() in special:
- special[a.lower()]()
- return
- elif a in DEFAULT_CFG_DICT and isinstance(DEFAULT_CFG_DICT[a], bool):
- overrides[a] = True # auto-True for default bool args, i.e. 'yolo show' sets show=True
- elif a in DEFAULT_CFG_DICT:
- raise SyntaxError(f"'{colorstr('red', 'bold', a)}' is a valid YOLO argument but is missing an '=' sign "
- f"to set its value, i.e. try '{a}={DEFAULT_CFG_DICT[a]}'\n{CLI_HELP_MSG}")
- else:
- check_cfg_mismatch(full_args_dict, {a: ''})
-
- # Check keys
- check_cfg_mismatch(full_args_dict, overrides)
-
- # Mode
- mode = overrides.get('mode', None)
- if mode is None:
- mode = DEFAULT_CFG.mode or 'predict'
- LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.")
- elif mode not in MODES:
- if mode not in ('checks', checks):
- raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}")
- LOGGER.warning("WARNING ⚠️ 'yolo mode=checks' is deprecated. Use 'yolo checks' instead.")
- checks.check_yolo()
- return
-
- # Task
- task = overrides.pop('task', None)
- if task:
- if task not in TASKS:
- raise ValueError(f"Invalid 'task={task}'. Valid tasks are {TASKS}.\n{CLI_HELP_MSG}")
- if 'model' not in overrides:
- overrides['model'] = TASK2MODEL[task]
-
- # Model
- model = overrides.pop('model', DEFAULT_CFG.model)
- if model is None:
- model = 'yolov8n.pt'
- LOGGER.warning(f"WARNING ⚠️ 'model' is missing. Using default 'model={model}'.")
- overrides['model'] = model
- if 'rtdetr' in model.lower(): # guess architecture
- from ultralytics import RTDETR
- model = RTDETR(model) # no task argument
- elif 'sam' in model.lower():
- from ultralytics import SAM
- model = SAM(model)
- else:
- from ultralytics import YOLO
- model = YOLO(model, task=task)
- if isinstance(overrides.get('pretrained'), str):
- model.load(overrides['pretrained'])
-
- # Task Update
- if task != model.task:
- if task:
- LOGGER.warning(f"WARNING ⚠️ conflicting 'task={task}' passed with 'task={model.task}' model. "
- f"Ignoring 'task={task}' and updating to 'task={model.task}' to match model.")
- task = model.task
-
- # Mode
- if mode in ('predict', 'track') and 'source' not in overrides:
- overrides['source'] = DEFAULT_CFG.source or ROOT / 'assets' if (ROOT / 'assets').exists() \
- else 'https://ultralytics.com/images/bus.jpg'
- LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using default 'source={overrides['source']}'.")
- elif mode in ('train', 'val'):
- if 'data' not in overrides:
- overrides['data'] = TASK2DATA.get(task or DEFAULT_CFG.task, DEFAULT_CFG.data)
- LOGGER.warning(f"WARNING ⚠️ 'data' is missing. Using default 'data={overrides['data']}'.")
- elif mode == 'export':
- if 'format' not in overrides:
- overrides['format'] = DEFAULT_CFG.format or 'torchscript'
- LOGGER.warning(f"WARNING ⚠️ 'format' is missing. Using default 'format={overrides['format']}'.")
-
- # Run command in python
- # getattr(model, mode)(**vars(get_cfg(overrides=overrides))) # default args using default.yaml
- getattr(model, mode)(**overrides) # default args from model
-
-# Special modes --------------------------------------------------------------------------------------------------------
-def copy_default_cfg():
- """Copy and create a new default configuration file with '_copy' appended to its name."""
- new_file = Path.cwd() / DEFAULT_CFG_PATH.name.replace('.yaml', '_copy.yaml')
- shutil.copy2(DEFAULT_CFG_PATH, new_file)
- LOGGER.info(f'{DEFAULT_CFG_PATH} copied to {new_file}\n'
- f"Example YOLO command with this new custom cfg:\n yolo cfg='{new_file}' imgsz=320 batch=8")
+from ultralytics.utils import LOGGER
+# Set modules in sys.modules under their old name
+sys.modules['ultralytics.yolo.cfg'] = importlib.import_module('ultralytics.cfg')
-if __name__ == '__main__':
- # Example Usage: entrypoint(debug='yolo predict model=yolov8n.pt')
- entrypoint(debug='')
+LOGGER.warning("WARNING ⚠️ 'ultralytics.yolo.cfg' is deprecated since '8.0.136' and will be removed in '8.1.0'. "
+ "Please use 'ultralytics.cfg' instead.")
diff --git a/ultralytics/yolo/data/__init__.py b/ultralytics/yolo/data/__init__.py
index f1d9deeed..f68391ef0 100644
--- a/ultralytics/yolo/data/__init__.py
+++ b/ultralytics/yolo/data/__init__.py
@@ -1,9 +1,17 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
+import importlib
+import sys
-from .base import BaseDataset
-from .build import build_dataloader, build_yolo_dataset, load_inference_source
-from .dataset import ClassificationDataset, SemanticDataset, YOLODataset
-from .dataset_wrappers import MixAndRectDataset
+from ultralytics.utils import LOGGER
-__all__ = ('BaseDataset', 'ClassificationDataset', 'MixAndRectDataset', 'SemanticDataset', 'YOLODataset',
- 'build_yolo_dataset', 'build_dataloader', 'load_inference_source')
+# Set modules in sys.modules under their old name
+sys.modules['ultralytics.yolo.data'] = importlib.import_module('ultralytics.data')
+# This is for updating old cls models, or the way in following warning won't work.
+sys.modules['ultralytics.yolo.data.augment'] = importlib.import_module('ultralytics.data.augment')
+
+DATA_WARNING = """WARNING ⚠️ 'ultralytics.yolo.data' is deprecated since '8.0.136' and will be removed in '8.1.0'. Please use 'ultralytics.data' instead.
+Note this warning may be related to loading older models. You can update your model to current structure with:
+ import torch
+ ckpt = torch.load("model.pt") # applies to both official and custom models
+ torch.save(ckpt, "updated-model.pt")
+"""
+LOGGER.warning(DATA_WARNING)
diff --git a/ultralytics/yolo/data/dataloaders/v5augmentations.py b/ultralytics/yolo/data/dataloaders/v5augmentations.py
deleted file mode 100644
index 8e0b3e2fd..000000000
--- a/ultralytics/yolo/data/dataloaders/v5augmentations.py
+++ /dev/null
@@ -1,407 +0,0 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-"""
-Image augmentation functions
-"""
-
-import math
-import random
-
-import cv2
-import numpy as np
-import torch
-import torchvision.transforms as T
-import torchvision.transforms.functional as TF
-
-from ultralytics.yolo.utils import LOGGER, colorstr
-from ultralytics.yolo.utils.checks import check_version
-from ultralytics.yolo.utils.metrics import bbox_ioa
-from ultralytics.yolo.utils.ops import resample_segments, segment2box, xywhn2xyxy
-
-IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean
-IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation
-
-
-class Albumentations:
- # YOLOv5 Albumentations class (optional, only used if package is installed)
- def __init__(self, size=640):
- """Instantiate object with image augmentations for YOLOv5."""
- self.transform = None
- prefix = colorstr('albumentations: ')
- try:
- import albumentations as A
- check_version(A.__version__, '1.0.3', hard=True) # version requirement
-
- T = [
- A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0),
- A.Blur(p=0.01),
- A.MedianBlur(p=0.01),
- A.ToGray(p=0.01),
- A.CLAHE(p=0.01),
- A.RandomBrightnessContrast(p=0.0),
- A.RandomGamma(p=0.0),
- A.ImageCompression(quality_lower=75, p=0.0)] # transforms
- self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
-
- LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
- except ImportError: # package not installed, skip
- pass
- except Exception as e:
- LOGGER.info(f'{prefix}{e}')
-
- def __call__(self, im, labels, p=1.0):
- """Transforms input image and labels with probability 'p'."""
- if self.transform and random.random() < p:
- new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
- im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
- return im, labels
-
-
-def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
- """Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std."""
- return TF.normalize(x, mean, std, inplace=inplace)
-
-
-def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):
- """Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean."""
- for i in range(3):
- x[:, i] = x[:, i] * std[i] + mean[i]
- return x
-
-
-def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
- """HSV color-space augmentation."""
- if hgain or sgain or vgain:
- r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
- hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
- dtype = im.dtype # uint8
-
- x = np.arange(0, 256, dtype=r.dtype)
- lut_hue = ((x * r[0]) % 180).astype(dtype)
- lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
- lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
-
- im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
- cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed
-
-
-def hist_equalize(im, clahe=True, bgr=False):
- """Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255."""
- yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
- if clahe:
- c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
- yuv[:, :, 0] = c.apply(yuv[:, :, 0])
- else:
- yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
- return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
-
-
-def replicate(im, labels):
- """Replicate labels."""
- h, w = im.shape[:2]
- boxes = labels[:, 1:].astype(int)
- x1, y1, x2, y2 = boxes.T
- s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
- for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
- x1b, y1b, x2b, y2b = boxes[i]
- bh, bw = y2b - y1b, x2b - x1b
- yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
- x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
- im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax]
- labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
-
- return im, labels
-
-
-def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
- """Resize and pad image while meeting stride-multiple constraints."""
- shape = im.shape[:2] # current shape [height, width]
- if isinstance(new_shape, int):
- new_shape = (new_shape, new_shape)
-
- # Scale ratio (new / old)
- r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
- if not scaleup: # only scale down, do not scale up (for better val mAP)
- r = min(r, 1.0)
-
- # Compute padding
- ratio = r, r # width, height ratios
- new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
- dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
- if auto: # minimum rectangle
- dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
- elif scaleFill: # stretch
- dw, dh = 0.0, 0.0
- new_unpad = (new_shape[1], new_shape[0])
- ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
-
- dw /= 2 # divide padding into 2 sides
- dh /= 2
-
- if shape[::-1] != new_unpad: # resize
- im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
- top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
- left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
- im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
- return im, ratio, (dw, dh)
-
-
-def random_perspective(im,
- targets=(),
- segments=(),
- degrees=10,
- translate=.1,
- scale=.1,
- shear=10,
- perspective=0.0,
- border=(0, 0)):
- # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))
- # targets = [cls, xyxy]
-
- height = im.shape[0] + border[0] * 2 # shape(h,w,c)
- width = im.shape[1] + border[1] * 2
-
- # Center
- C = np.eye(3)
- C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
- C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
-
- # Perspective
- P = np.eye(3)
- P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
- P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
-
- # Rotation and Scale
- R = np.eye(3)
- a = random.uniform(-degrees, degrees)
- # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
- s = random.uniform(1 - scale, 1 + scale)
- # s = 2 ** random.uniform(-scale, scale)
- R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
-
- # Shear
- S = np.eye(3)
- S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
- S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
-
- # Translation
- T = np.eye(3)
- T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
- T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
-
- # Combined rotation matrix
- M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
- if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
- if perspective:
- im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
- else: # affine
- im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
-
- # Visualize
- # import matplotlib.pyplot as plt
- # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
- # ax[0].imshow(im[:, :, ::-1]) # base
- # ax[1].imshow(im2[:, :, ::-1]) # warped
-
- # Transform label coordinates
- n = len(targets)
- if n:
- use_segments = any(x.any() for x in segments)
- new = np.zeros((n, 4))
- if use_segments: # warp segments
- segments = resample_segments(segments) # upsample
- for i, segment in enumerate(segments):
- xy = np.ones((len(segment), 3))
- xy[:, :2] = segment
- xy = xy @ M.T # transform
- xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
-
- # Clip
- new[i] = segment2box(xy, width, height)
-
- else: # warp boxes
- xy = np.ones((n * 4, 3))
- xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
- xy = xy @ M.T # transform
- xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
-
- # Create new boxes
- x = xy[:, [0, 2, 4, 6]]
- y = xy[:, [1, 3, 5, 7]]
- new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
-
- # Clip
- new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
- new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
-
- # Filter candidates
- i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
- targets = targets[i]
- targets[:, 1:5] = new[i]
-
- return im, targets
-
-
-def copy_paste(im, labels, segments, p=0.5):
- """Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)."""
- n = len(segments)
- if p and n:
- h, w, c = im.shape # height, width, channels
- im_new = np.zeros(im.shape, np.uint8)
-
- # Calculate ioa first then select indexes randomly
- boxes = np.stack([w - labels[:, 3], labels[:, 2], w - labels[:, 1], labels[:, 4]], axis=-1) # (n, 4)
- ioa = bbox_ioa(boxes, labels[:, 1:5]) # intersection over area
- indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, )
- n = len(indexes)
- for j in random.sample(list(indexes), k=round(p * n)):
- l, box, s = labels[j], boxes[j], segments[j]
- labels = np.concatenate((labels, [[l[0], *box]]), 0)
- segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
- cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED)
-
- result = cv2.flip(im, 1) # augment segments (flip left-right)
- i = cv2.flip(im_new, 1).astype(bool)
- im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug
-
- return im, labels, segments
-
-
-def cutout(im, labels, p=0.5):
- """Applies image cutout augmentation https://arxiv.org/abs/1708.04552."""
- if random.random() < p:
- h, w = im.shape[:2]
- scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
- for s in scales:
- mask_h = random.randint(1, int(h * s)) # create random masks
- mask_w = random.randint(1, int(w * s))
-
- # Box
- xmin = max(0, random.randint(0, w) - mask_w // 2)
- ymin = max(0, random.randint(0, h) - mask_h // 2)
- xmax = min(w, xmin + mask_w)
- ymax = min(h, ymin + mask_h)
-
- # Apply random color mask
- im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
-
- # Return unobscured labels
- if len(labels) and s > 0.03:
- box = np.array([[xmin, ymin, xmax, ymax]], dtype=np.float32)
- ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h))[0] # intersection over area
- labels = labels[ioa < 0.60] # remove >60% obscured labels
-
- return labels
-
-
-def mixup(im, labels, im2, labels2):
- """Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf."""
- r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
- im = (im * r + im2 * (1 - r)).astype(np.uint8)
- labels = np.concatenate((labels, labels2), 0)
- return im, labels
-
-
-def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
- # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
- w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
- w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
- ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
- return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
-
-
-def classify_albumentations(
- augment=True,
- size=224,
- scale=(0.08, 1.0),
- ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33
- hflip=0.5,
- vflip=0.0,
- jitter=0.4,
- mean=IMAGENET_MEAN,
- std=IMAGENET_STD,
- auto_aug=False):
- # YOLOv5 classification Albumentations (optional, only used if package is installed)
- prefix = colorstr('albumentations: ')
- try:
- import albumentations as A
- from albumentations.pytorch import ToTensorV2
- check_version(A.__version__, '1.0.3', hard=True) # version requirement
- if augment: # Resize and crop
- T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)]
- if auto_aug:
- # TODO: implement AugMix, AutoAug & RandAug in albumentation
- LOGGER.info(f'{prefix}auto augmentations are currently not supported')
- else:
- if hflip > 0:
- T += [A.HorizontalFlip(p=hflip)]
- if vflip > 0:
- T += [A.VerticalFlip(p=vflip)]
- if jitter > 0:
- jitter = float(jitter)
- T += [A.ColorJitter(jitter, jitter, jitter, 0)] # brightness, contrast, satuaration, 0 hue
- else: # Use fixed crop for eval set (reproducibility)
- T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
- T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor
- LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
- return A.Compose(T)
-
- except ImportError: # package not installed, skip
- LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)')
- except Exception as e:
- LOGGER.info(f'{prefix}{e}')
-
-
-def classify_transforms(size=224):
- """Transforms to apply if albumentations not installed."""
- assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)'
- # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
- return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
-
-
-class LetterBox:
- # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
- def __init__(self, size=(640, 640), auto=False, stride=32):
- """Resizes and crops an image to a specified size for YOLOv5 preprocessing."""
- super().__init__()
- self.h, self.w = (size, size) if isinstance(size, int) else size
- self.auto = auto # pass max size integer, automatically solve for short side using stride
- self.stride = stride # used with auto
-
- def __call__(self, im): # im = np.array HWC
- imh, imw = im.shape[:2]
- r = min(self.h / imh, self.w / imw) # ratio of new/old
- h, w = round(imh * r), round(imw * r) # resized image
- hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w
- top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
- im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype)
- im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
- return im_out
-
-
-class CenterCrop:
- # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])
- def __init__(self, size=640):
- """Converts input image into tensor for YOLOv5 processing."""
- super().__init__()
- self.h, self.w = (size, size) if isinstance(size, int) else size
-
- def __call__(self, im): # im = np.array HWC
- imh, imw = im.shape[:2]
- m = min(imh, imw) # min dimension
- top, left = (imh - m) // 2, (imw - m) // 2
- return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR)
-
-
-class ToTensor:
- # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
- def __init__(self, half=False):
- """Initialize ToTensor class for YOLOv5 image preprocessing."""
- super().__init__()
- self.half = half
-
- def __call__(self, im): # im = np.array HWC in BGR order
- im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous
- im = torch.from_numpy(im) # to torch
- im = im.half() if self.half else im.float() # uint8 to fp16/32
- im /= 255.0 # 0-255 to 0.0-1.0
- return im
diff --git a/ultralytics/yolo/data/dataloaders/v5loader.py b/ultralytics/yolo/data/dataloaders/v5loader.py
deleted file mode 100644
index 96549ddeb..000000000
--- a/ultralytics/yolo/data/dataloaders/v5loader.py
+++ /dev/null
@@ -1,1109 +0,0 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-"""
-Dataloaders and dataset utils
-"""
-
-import contextlib
-import glob
-import hashlib
-import math
-import os
-import random
-import shutil
-import time
-from itertools import repeat
-from multiprocessing.pool import ThreadPool
-from pathlib import Path
-from threading import Thread
-from urllib.parse import urlparse
-
-import cv2
-import numpy as np
-import psutil
-import torch
-import torchvision
-from PIL import ExifTags, Image, ImageOps
-from torch.utils.data import DataLoader, Dataset, dataloader, distributed
-from tqdm import tqdm
-
-from ultralytics.yolo.utils import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, is_colab, is_dir_writeable,
- is_kaggle)
-from ultralytics.yolo.utils.checks import check_requirements
-from ultralytics.yolo.utils.ops import clean_str, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn
-from ultralytics.yolo.utils.torch_utils import torch_distributed_zero_first
-
-from .v5augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
- letterbox, mixup, random_perspective)
-
-# Parameters
-HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data'
-IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
-VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
-LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
-RANK = int(os.getenv('RANK', -1))
-PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders
-
-# Get orientation exif tag
-for orientation in ExifTags.TAGS.keys():
- if ExifTags.TAGS[orientation] == 'Orientation':
- break
-
-
-def get_hash(paths):
- """Returns a single hash value of a list of paths (files or dirs)."""
- size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
- h = hashlib.sha256(str(size).encode()) # hash sizes
- h.update(''.join(paths).encode()) # hash paths
- return h.hexdigest() # return hash
-
-
-def exif_size(img):
- """Returns exif-corrected PIL size."""
- s = img.size # (width, height)
- with contextlib.suppress(Exception):
- rotation = dict(img._getexif().items())[orientation]
- if rotation in [6, 8]: # rotation 270 or 90
- s = (s[1], s[0])
- return s
-
-
-def exif_transpose(image):
- """
- Transpose a PIL image accordingly if it has an EXIF Orientation tag.
- Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
-
- :param image: The image to transpose.
- :return: An image.
- """
- exif = image.getexif()
- orientation = exif.get(0x0112, 1) # default 1
- if orientation > 1:
- method = {
- 2: Image.FLIP_LEFT_RIGHT,
- 3: Image.ROTATE_180,
- 4: Image.FLIP_TOP_BOTTOM,
- 5: Image.TRANSPOSE,
- 6: Image.ROTATE_270,
- 7: Image.TRANSVERSE,
- 8: Image.ROTATE_90}.get(orientation)
- if method is not None:
- image = image.transpose(method)
- del exif[0x0112]
- image.info['exif'] = exif.tobytes()
- return image
-
-
-def seed_worker(worker_id):
- """Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader."""
- worker_seed = torch.initial_seed() % 2 ** 32
- np.random.seed(worker_seed)
- random.seed(worker_seed)
-
-
-def create_dataloader(path,
- imgsz,
- batch_size,
- stride,
- single_cls=False,
- hyp=None,
- augment=False,
- cache=False,
- pad=0.0,
- rect=False,
- rank=-1,
- workers=8,
- image_weights=False,
- close_mosaic=False,
- min_items=0,
- prefix='',
- shuffle=False,
- seed=0):
- if rect and shuffle:
- LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
- shuffle = False
- with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
- dataset = LoadImagesAndLabels(
- path,
- imgsz,
- batch_size,
- augment=augment, # augmentation
- hyp=hyp, # hyperparameters
- rect=rect, # rectangular batches
- cache_images=cache,
- single_cls=single_cls,
- stride=int(stride),
- pad=pad,
- image_weights=image_weights,
- min_items=min_items,
- prefix=prefix)
-
- batch_size = min(batch_size, len(dataset))
- nd = torch.cuda.device_count() # number of CUDA devices
- nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
- sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
- loader = DataLoader if image_weights or close_mosaic else InfiniteDataLoader # DataLoader allows attribute updates
- generator = torch.Generator()
- generator.manual_seed(6148914691236517205 + seed + RANK)
- return loader(dataset,
- batch_size=batch_size,
- shuffle=shuffle and sampler is None,
- num_workers=nw,
- sampler=sampler,
- pin_memory=PIN_MEMORY,
- collate_fn=LoadImagesAndLabels.collate_fn,
- worker_init_fn=seed_worker,
- generator=generator), dataset
-
-
-class InfiniteDataLoader(dataloader.DataLoader):
- """Dataloader that reuses workers
-
- Uses same syntax as vanilla DataLoader
- """
-
- def __init__(self, *args, **kwargs):
- """Dataloader that reuses workers for same syntax as vanilla DataLoader."""
- super().__init__(*args, **kwargs)
- object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
- self.iterator = super().__iter__()
-
- def __len__(self):
- """Returns the length of batch_sampler's sampler."""
- return len(self.batch_sampler.sampler)
-
- def __iter__(self):
- """Creates a sampler that infinitely repeats."""
- for _ in range(len(self)):
- yield next(self.iterator)
-
-
-class _RepeatSampler:
- """Sampler that repeats forever
-
- Args:
- sampler (Dataset.sampler): The sampler to repeat.
- """
-
- def __init__(self, sampler):
- """Sampler that repeats dataset samples infinitely."""
- self.sampler = sampler
-
- def __iter__(self):
- """Infinite loop iterating over a given sampler."""
- while True:
- yield from iter(self.sampler)
-
-
-class LoadScreenshots:
- # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
- def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):
- """source = [screen_number left top width height] (pixels)."""
- check_requirements('mss')
- import mss
-
- source, *params = source.split()
- self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0
- if len(params) == 1:
- self.screen = int(params[0])
- elif len(params) == 4:
- left, top, width, height = (int(x) for x in params)
- elif len(params) == 5:
- self.screen, left, top, width, height = (int(x) for x in params)
- self.img_size = img_size
- self.stride = stride
- self.transforms = transforms
- self.auto = auto
- self.mode = 'stream'
- self.frame = 0
- self.sct = mss.mss()
-
- # Parse monitor shape
- monitor = self.sct.monitors[self.screen]
- self.top = monitor['top'] if top is None else (monitor['top'] + top)
- self.left = monitor['left'] if left is None else (monitor['left'] + left)
- self.width = width or monitor['width']
- self.height = height or monitor['height']
- self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}
-
- def __iter__(self):
- """Iterates over objects with the same structure as the monitor attribute."""
- return self
-
- def __next__(self):
- """mss screen capture: get raw pixels from the screen as np array."""
- im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR
- s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: '
-
- if self.transforms:
- im = self.transforms(im0) # transforms
- else:
- im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
- im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
- im = np.ascontiguousarray(im) # contiguous
- self.frame += 1
- return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s
-
-
-class LoadImages:
- # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
- def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
- """Initialize instance variables and check for valid input."""
- if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line
- path = Path(path).read_text().rsplit()
- files = []
- for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
- p = str(Path(p).resolve())
- if '*' in p:
- files.extend(sorted(glob.glob(p, recursive=True))) # glob
- elif os.path.isdir(p):
- files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir
- elif os.path.isfile(p):
- files.append(p) # files
- else:
- raise FileNotFoundError(f'{p} does not exist')
-
- images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
- videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
- ni, nv = len(images), len(videos)
-
- self.img_size = img_size
- self.stride = stride
- self.files = images + videos
- self.nf = ni + nv # number of files
- self.video_flag = [False] * ni + [True] * nv
- self.mode = 'image'
- self.auto = auto
- self.transforms = transforms # optional
- self.vid_stride = vid_stride # video frame-rate stride
- if any(videos):
- self._new_video(videos[0]) # new video
- else:
- self.cap = None
- assert self.nf > 0, f'No images or videos found in {p}. ' \
- f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
-
- def __iter__(self):
- """Returns an iterator object for iterating over images or videos found in a directory."""
- self.count = 0
- return self
-
- def __next__(self):
- """Iterator's next item, performs transformation on image and returns path, transformed image, original image, capture and size."""
- if self.count == self.nf:
- raise StopIteration
- path = self.files[self.count]
-
- if self.video_flag[self.count]:
- # Read video
- self.mode = 'video'
- for _ in range(self.vid_stride):
- self.cap.grab()
- ret_val, im0 = self.cap.retrieve()
- while not ret_val:
- self.count += 1
- self.cap.release()
- if self.count == self.nf: # last video
- raise StopIteration
- path = self.files[self.count]
- self._new_video(path)
- ret_val, im0 = self.cap.read()
-
- self.frame += 1
- # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
- s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
-
- else:
- # Read image
- self.count += 1
- im0 = cv2.imread(path) # BGR
- assert im0 is not None, f'Image Not Found {path}'
- s = f'image {self.count}/{self.nf} {path}: '
-
- if self.transforms:
- im = self.transforms(im0) # transforms
- else:
- im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
- im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
- im = np.ascontiguousarray(im) # contiguous
-
- return path, im, im0, self.cap, s
-
- def _new_video(self, path):
- """Create a new video capture object."""
- self.frame = 0
- self.cap = cv2.VideoCapture(path)
- self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
- self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
- # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
-
- def _cv2_rotate(self, im):
- """Rotate a cv2 video manually."""
- if self.orientation == 0:
- return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
- elif self.orientation == 180:
- return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
- elif self.orientation == 90:
- return cv2.rotate(im, cv2.ROTATE_180)
- return im
-
- def __len__(self):
- """Returns the number of files in the class instance."""
- return self.nf # number of files
-
-
-class LoadStreams:
- # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
- def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
- """Initialize YOLO detector with optional transforms and check input shapes."""
- torch.backends.cudnn.benchmark = True # faster for fixed-size inference
- self.mode = 'stream'
- self.img_size = img_size
- self.stride = stride
- self.vid_stride = vid_stride # video frame-rate stride
- sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
- n = len(sources)
- self.sources = [clean_str(x) for x in sources] # clean source names for later
- self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
- for i, s in enumerate(sources): # index, source
- # Start thread to read frames from video stream
- st = f'{i + 1}/{n}: {s}... '
- if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
- # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc'
- check_requirements(('pafy', 'youtube_dl==2020.12.2'))
- import pafy
- s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL
- s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
- if s == 0:
- assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'
- assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'
- cap = cv2.VideoCapture(s)
- assert cap.isOpened(), f'{st}Failed to open {s}'
- w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
- h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
- fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
- self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
- self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
-
- _, self.imgs[i] = cap.read() # guarantee first frame
- self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
- LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)')
- self.threads[i].start()
- LOGGER.info('') # newline
-
- # Check for common shapes
- s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])
- self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
- self.auto = auto and self.rect
- self.transforms = transforms # optional
- if not self.rect:
- LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')
-
- def update(self, i, cap, stream):
- """Read stream `i` frames in daemon thread."""
- n, f = 0, self.frames[i] # frame number, frame array
- while cap.isOpened() and n < f:
- n += 1
- cap.grab() # .read() = .grab() followed by .retrieve()
- if n % self.vid_stride == 0:
- success, im = cap.retrieve()
- if success:
- self.imgs[i] = im
- else:
- LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')
- self.imgs[i] = np.zeros_like(self.imgs[i])
- cap.open(stream) # re-open stream if signal was lost
- time.sleep(0.0) # wait time
-
- def __iter__(self):
- """Iterator that returns the class instance."""
- self.count = -1
- return self
-
- def __next__(self):
- """Return a tuple containing transformed and resized image data."""
- self.count += 1
- if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
- cv2.destroyAllWindows()
- raise StopIteration
-
- im0 = self.imgs.copy()
- if self.transforms:
- im = np.stack([self.transforms(x) for x in im0]) # transforms
- else:
- im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize
- im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
- im = np.ascontiguousarray(im) # contiguous
-
- return self.sources, im, im0, None, ''
-
- def __len__(self):
- """Returns the number of sources as the length of the object."""
- return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
-
-
-def img2label_paths(img_paths):
- """Define label paths as a function of image paths."""
- sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings
- return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
-
-
-class LoadImagesAndLabels(Dataset):
- """YOLOv5 train_loader/val_loader, loads images and labels for training and validation."""
- cache_version = 0.6 # dataset labels *.cache version
- rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
-
- def __init__(self,
- path,
- img_size=640,
- batch_size=16,
- augment=False,
- hyp=None,
- rect=False,
- image_weights=False,
- cache_images=False,
- single_cls=False,
- stride=32,
- pad=0.0,
- min_items=0,
- prefix=''):
- self.img_size = img_size
- self.augment = augment
- self.hyp = hyp
- self.image_weights = image_weights
- self.rect = False if image_weights else rect
- self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
- self.mosaic_border = [-img_size // 2, -img_size // 2]
- self.stride = stride
- self.path = path
- self.albumentations = Albumentations(size=img_size) if augment else None
-
- try:
- f = [] # image files
- for p in path if isinstance(path, list) else [path]:
- p = Path(p) # os-agnostic
- if p.is_dir(): # dir
- f += glob.glob(str(p / '**' / '*.*'), recursive=True)
- # f = list(p.rglob('*.*')) # pathlib
- elif p.is_file(): # file
- with open(p) as t:
- t = t.read().strip().splitlines()
- parent = str(p.parent) + os.sep
- f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path
- # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib)
- else:
- raise FileNotFoundError(f'{prefix}{p} does not exist')
- self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
- # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
- assert self.im_files, f'{prefix}No images found'
- except Exception as e:
- raise FileNotFoundError(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e
-
- # Check cache
- self.label_files = img2label_paths(self.im_files) # labels
- cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
- try:
- cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
- assert cache['version'] == self.cache_version # matches current version
- assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash
- except (FileNotFoundError, AssertionError, AttributeError):
- cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops
-
- # Display cache
- nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total
- if exists and LOCAL_RANK in (-1, 0):
- d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt'
- tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results
- if cache['msgs']:
- LOGGER.info('\n'.join(cache['msgs'])) # display warnings
- assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}'
-
- # Read cache
- [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
- labels, shapes, self.segments = zip(*cache.values())
- nl = len(np.concatenate(labels, 0)) # number of labels
- assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}'
- self.labels = list(labels)
- self.shapes = np.array(shapes)
- self.im_files = list(cache.keys()) # update
- self.label_files = img2label_paths(cache.keys()) # update
-
- # Filter images
- if min_items:
- include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int)
- LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset')
- self.im_files = [self.im_files[i] for i in include]
- self.label_files = [self.label_files[i] for i in include]
- self.labels = [self.labels[i] for i in include]
- self.segments = [self.segments[i] for i in include]
- self.shapes = self.shapes[include] # wh
-
- # Create indices
- n = len(self.shapes) # number of images
- bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index
- nb = bi[-1] + 1 # number of batches
- self.batch = bi # batch index of image
- self.n = n
- self.indices = range(n)
-
- # Update labels
- include_class = [] # filter labels to include only these classes (optional)
- include_class_array = np.array(include_class).reshape(1, -1)
- for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
- if include_class:
- j = (label[:, 0:1] == include_class_array).any(1)
- self.labels[i] = label[j]
- if segment:
- self.segments[i] = [segment[si] for si, idx in enumerate(j) if idx]
- if single_cls: # single-class training, merge all classes into 0
- self.labels[i][:, 0] = 0
-
- # Rectangular Training
- if self.rect:
- # Sort by aspect ratio
- s = self.shapes # wh
- ar = s[:, 1] / s[:, 0] # aspect ratio
- irect = ar.argsort()
- self.im_files = [self.im_files[i] for i in irect]
- self.label_files = [self.label_files[i] for i in irect]
- self.labels = [self.labels[i] for i in irect]
- self.segments = [self.segments[i] for i in irect]
- self.shapes = s[irect] # wh
- ar = ar[irect]
-
- # Set training image shapes
- shapes = [[1, 1]] * nb
- for i in range(nb):
- ari = ar[bi == i]
- mini, maxi = ari.min(), ari.max()
- if maxi < 1:
- shapes[i] = [maxi, 1]
- elif mini > 1:
- shapes[i] = [1, 1 / mini]
-
- self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride
-
- # Cache images into RAM/disk for faster training
- if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix):
- cache_images = False
- self.ims = [None] * n
- self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files]
- if cache_images:
- b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
- self.im_hw0, self.im_hw = [None] * n, [None] * n
- fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image
- with ThreadPool(NUM_THREADS) as pool:
- results = pool.imap(fcn, range(n))
- pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0)
- for i, x in pbar:
- if cache_images == 'disk':
- b += self.npy_files[i].stat().st_size
- else: # 'ram'
- self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
- b += self.ims[i].nbytes
- pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})'
- pbar.close()
-
- def check_cache_ram(self, safety_margin=0.1, prefix=''):
- """Check image caching requirements vs available memory."""
- b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
- n = min(self.n, 30) # extrapolate from 30 random images
- for _ in range(n):
- im = cv2.imread(random.choice(self.im_files)) # sample image
- ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio
- b += im.nbytes * ratio ** 2
- mem_required = b * self.n / n # GB required to cache dataset into RAM
- mem = psutil.virtual_memory()
- cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question
- if not cache:
- LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, '
- f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, '
- f"{'caching images ✅' if cache else 'not caching images ⚠️'}")
- return cache
-
- def cache_labels(self, path=Path('./labels.cache'), prefix=''):
- """Cache labels and save as numpy file for next time."""
- # Cache dataset labels, check images and read shapes
- if path.exists():
- path.unlink() # remove *.cache file if exists
- x = {} # dict
- nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
- desc = f'{prefix}Scanning {path.parent / path.stem}...'
- total = len(self.im_files)
- with ThreadPool(NUM_THREADS) as pool:
- results = pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix)))
- pbar = tqdm(results, desc=desc, total=total, bar_format=TQDM_BAR_FORMAT)
- for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
- nm += nm_f
- nf += nf_f
- ne += ne_f
- nc += nc_f
- if im_file:
- x[im_file] = [lb, shape, segments]
- if msg:
- msgs.append(msg)
- pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt'
- pbar.close()
-
- if msgs:
- LOGGER.info('\n'.join(msgs))
- if nf == 0:
- LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}')
- x['hash'] = get_hash(self.label_files + self.im_files)
- x['results'] = nf, nm, ne, nc, len(self.im_files)
- x['msgs'] = msgs # warnings
- x['version'] = self.cache_version # cache version
- if is_dir_writeable(path.parent):
- np.save(str(path), x) # save cache for next time
- path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
- LOGGER.info(f'{prefix}New cache created: {path}')
- else:
- LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable') # not writeable
- return x
-
- def __len__(self):
- """Returns the length of 'im_files' attribute."""
- return len(self.im_files)
-
- def __getitem__(self, index):
- """Get a sample and its corresponding label, filename and shape from the dataset."""
- index = self.indices[index] # linear, shuffled, or image_weights
-
- hyp = self.hyp
- mosaic = self.mosaic and random.random() < hyp['mosaic']
- if mosaic:
- # Load mosaic
- img, labels = self.load_mosaic(index)
- shapes = None
-
- # MixUp augmentation
- if random.random() < hyp['mixup']:
- img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1)))
-
- else:
- # Load image
- img, (h0, w0), (h, w) = self.load_image(index)
-
- # Letterbox
- shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
- img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
- shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
-
- labels = self.labels[index].copy()
- if labels.size: # normalized xywh to pixel xyxy format
- labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
-
- if self.augment:
- img, labels = random_perspective(img,
- labels,
- degrees=hyp['degrees'],
- translate=hyp['translate'],
- scale=hyp['scale'],
- shear=hyp['shear'],
- perspective=hyp['perspective'])
-
- nl = len(labels) # number of labels
- if nl:
- labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
-
- if self.augment:
- # Albumentations
- img, labels = self.albumentations(img, labels)
- nl = len(labels) # update after albumentations
-
- # HSV color-space
- augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
-
- # Flip up-down
- if random.random() < hyp['flipud']:
- img = np.flipud(img)
- if nl:
- labels[:, 2] = 1 - labels[:, 2]
-
- # Flip left-right
- if random.random() < hyp['fliplr']:
- img = np.fliplr(img)
- if nl:
- labels[:, 1] = 1 - labels[:, 1]
-
- # Cutouts
- # labels = cutout(img, labels, p=0.5)
- # nl = len(labels) # update after cutout
-
- labels_out = torch.zeros((nl, 6))
- if nl:
- labels_out[:, 1:] = torch.from_numpy(labels)
-
- # Convert
- img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
- img = np.ascontiguousarray(img)
-
- return torch.from_numpy(img), labels_out, self.im_files[index], shapes
-
- def load_image(self, i):
- """Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)."""
- im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],
- if im is None: # not cached in RAM
- if fn.exists(): # load npy
- im = np.load(fn)
- else: # read image
- im = cv2.imread(f) # BGR
- assert im is not None, f'Image Not Found {f}'
- h0, w0 = im.shape[:2] # orig hw
- r = self.img_size / max(h0, w0) # ratio
- if r != 1: # if sizes are not equal
- interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA
- im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp)
- return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
- return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized
-
- def cache_images_to_disk(self, i):
- """Saves an image as an *.npy file for faster loading."""
- f = self.npy_files[i]
- if not f.exists():
- np.save(f.as_posix(), cv2.imread(self.im_files[i]))
-
- def load_mosaic(self, index):
- """YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic."""
- labels4, segments4 = [], []
- s = self.img_size
- yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
- indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
- random.shuffle(indices)
- for i, index in enumerate(indices):
- # Load image
- img, _, (h, w) = self.load_image(index)
-
- # Place img in img4
- if i == 0: # top left
- img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
- x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
- x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
- elif i == 1: # top right
- x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
- x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
- elif i == 2: # bottom left
- x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
- x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
- elif i == 3: # bottom right
- x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
- x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
-
- img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
- padw = x1a - x1b
- padh = y1a - y1b
-
- # Labels
- labels, segments = self.labels[index].copy(), self.segments[index].copy()
- if labels.size:
- labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
- segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
- labels4.append(labels)
- segments4.extend(segments)
-
- # Concat/clip labels
- labels4 = np.concatenate(labels4, 0)
- for x in (labels4[:, 1:], *segments4):
- np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
- # img4, labels4 = replicate(img4, labels4) # replicate
-
- # Augment
- img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
- img4, labels4 = random_perspective(img4,
- labels4,
- segments4,
- degrees=self.hyp['degrees'],
- translate=self.hyp['translate'],
- scale=self.hyp['scale'],
- shear=self.hyp['shear'],
- perspective=self.hyp['perspective'],
- border=self.mosaic_border) # border to remove
-
- return img4, labels4
-
- def load_mosaic9(self, index):
- """YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic."""
- labels9, segments9 = [], []
- s = self.img_size
- indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
- random.shuffle(indices)
- hp, wp = -1, -1 # height, width previous
- for i, index in enumerate(indices):
- # Load image
- img, _, (h, w) = self.load_image(index)
-
- # Place img in img9
- if i == 0: # center
- img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
- h0, w0 = h, w
- c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
- elif i == 1: # top
- c = s, s - h, s + w, s
- elif i == 2: # top right
- c = s + wp, s - h, s + wp + w, s
- elif i == 3: # right
- c = s + w0, s, s + w0 + w, s + h
- elif i == 4: # bottom right
- c = s + w0, s + hp, s + w0 + w, s + hp + h
- elif i == 5: # bottom
- c = s + w0 - w, s + h0, s + w0, s + h0 + h
- elif i == 6: # bottom left
- c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
- elif i == 7: # left
- c = s - w, s + h0 - h, s, s + h0
- elif i == 8: # top left
- c = s - w, s + h0 - hp - h, s, s + h0 - hp
-
- padx, pady = c[:2]
- x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
-
- # Labels
- labels, segments = self.labels[index].copy(), self.segments[index].copy()
- if labels.size:
- labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
- segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
- labels9.append(labels)
- segments9.extend(segments)
-
- # Image
- img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
- hp, wp = h, w # height, width previous
-
- # Offset
- yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
- img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
-
- # Concat/clip labels
- labels9 = np.concatenate(labels9, 0)
- labels9[:, [1, 3]] -= xc
- labels9[:, [2, 4]] -= yc
- c = np.array([xc, yc]) # centers
- segments9 = [x - c for x in segments9]
-
- for x in (labels9[:, 1:], *segments9):
- np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
- # img9, labels9 = replicate(img9, labels9) # replicate
-
- # Augment
- img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste'])
- img9, labels9 = random_perspective(img9,
- labels9,
- segments9,
- degrees=self.hyp['degrees'],
- translate=self.hyp['translate'],
- scale=self.hyp['scale'],
- shear=self.hyp['shear'],
- perspective=self.hyp['perspective'],
- border=self.mosaic_border) # border to remove
-
- return img9, labels9
-
- @staticmethod
- def collate_fn(batch):
- """YOLOv8 collate function, outputs dict."""
- im, label, path, shapes = zip(*batch) # transposed
- for i, lb in enumerate(label):
- lb[:, 0] = i # add target image index for build_targets()
- batch_idx, cls, bboxes = torch.cat(label, 0).split((1, 1, 4), dim=1)
- return {
- 'ori_shape': tuple((x[0] if x else None) for x in shapes),
- 'ratio_pad': tuple((x[1] if x else None) for x in shapes),
- 'im_file': path,
- 'img': torch.stack(im, 0),
- 'cls': cls,
- 'bboxes': bboxes,
- 'batch_idx': batch_idx.view(-1)}
-
- @staticmethod
- def collate_fn_old(batch):
- """YOLOv5 original collate function."""
- im, label, path, shapes = zip(*batch) # transposed
- for i, lb in enumerate(label):
- lb[:, 0] = i # add target image index for build_targets()
- return torch.stack(im, 0), torch.cat(label, 0), path, shapes
-
-
-# Ancillary functions --------------------------------------------------------------------------------------------------
-def flatten_recursive(path=DATASETS_DIR / 'coco128'):
- """Flatten a recursive directory by bringing all files to top level."""
- new_path = Path(f'{str(path)}_flat')
- if os.path.exists(new_path):
- shutil.rmtree(new_path) # delete output folder
- os.makedirs(new_path) # make new output folder
- for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)):
- shutil.copyfile(file, new_path / Path(file).name)
-
-
-def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes()
- # Convert detection dataset into classification dataset, with one directory per class
- path = Path(path) # images dir
- shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing
- files = list(path.rglob('*.*'))
- n = len(files) # number of files
- for im_file in tqdm(files, total=n):
- if im_file.suffix[1:] in IMG_FORMATS:
- # Image
- im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
- h, w = im.shape[:2]
-
- # Labels
- lb_file = Path(img2label_paths([str(im_file)])[0])
- if Path(lb_file).exists():
- with open(lb_file) as f:
- lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
-
- for j, x in enumerate(lb):
- c = int(x[0]) # class
- f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
- if not f.parent.is_dir():
- f.parent.mkdir(parents=True)
-
- b = x[1:] * [w, h, w, h] # box
- # B[2:] = b[2:].max() # rectangle to square
- b[2:] = b[2:] * 1.2 + 3 # pad
- b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
-
- b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
- b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
- assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
-
-
-def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
- """Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
- Usage: from utils.dataloaders import *; autosplit()
- Arguments
- path: Path to images directory
- weights: Train, val, test weights (list, tuple)
- annotated_only: Only use images with an annotated txt file
- """
- path = Path(path) # images dir
- files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
- n = len(files) # number of files
- random.seed(0) # for reproducibility
- indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
-
- txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
- for x in txt:
- if (path.parent / x).exists():
- (path.parent / x).unlink() # remove existing
-
- print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
- for i, img in tqdm(zip(indices, files), total=n):
- if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
- with open(path.parent / txt[i], 'a') as f:
- f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file
-
-
-def verify_image_label(args):
- """Verify one image-label pair."""
- im_file, lb_file, prefix = args
- nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
- try:
- # Verify images
- im = Image.open(im_file)
- im.verify() # PIL verify
- shape = exif_size(im) # image size
- assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
- assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
- if im.format.lower() in ('jpg', 'jpeg'):
- with open(im_file, 'rb') as f:
- f.seek(-2, 2)
- if f.read() != b'\xff\xd9': # corrupt JPEG
- ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
- msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved'
-
- # Verify labels
- if os.path.isfile(lb_file):
- nf = 1 # label found
- with open(lb_file) as f:
- lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
- if any(len(x) > 6 for x in lb): # is segment
- classes = np.array([x[0] for x in lb], dtype=np.float32)
- segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
- lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
- lb = np.array(lb, dtype=np.float32)
- nl = len(lb)
- if nl:
- assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
- assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
- assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
- _, i = np.unique(lb, axis=0, return_index=True)
- if len(i) < nl: # duplicate row check
- lb = lb[i] # remove duplicates
- if segments:
- segments = [segments[x] for x in i]
- msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed'
- else:
- ne = 1 # label empty
- lb = np.zeros((0, 5), dtype=np.float32)
- else:
- nm = 1 # label missing
- lb = np.zeros((0, 5), dtype=np.float32)
- return im_file, lb, shape, segments, nm, nf, ne, nc, msg
- except Exception as e:
- nc = 1
- msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}'
- return [None, None, None, None, nm, nf, ne, nc, msg]
-
-
-# Classification dataloaders -------------------------------------------------------------------------------------------
-class ClassificationDataset(torchvision.datasets.ImageFolder):
- """
- YOLOv5 Classification Dataset.
- Arguments
- root: Dataset path
- transform: torchvision transforms, used by default
- album_transform: Albumentations transforms, used if installed
- """
-
- def __init__(self, root, augment, imgsz, cache=False):
- """Initialize YOLO dataset with root, augmentation, image size, and cache parameters."""
- super().__init__(root=root)
- self.torch_transforms = classify_transforms(imgsz)
- self.album_transforms = classify_albumentations(augment, imgsz) if augment else None
- self.cache_ram = cache is True or cache == 'ram'
- self.cache_disk = cache == 'disk'
- self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im
-
- def __getitem__(self, i):
- """Retrieves data items of 'dataset' via indices & creates InfiniteDataLoader."""
- f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image
- if self.cache_ram and im is None:
- im = self.samples[i][3] = cv2.imread(f)
- elif self.cache_disk:
- if not fn.exists(): # load npy
- np.save(fn.as_posix(), cv2.imread(f))
- im = np.load(fn)
- else: # read image
- im = cv2.imread(f) # BGR
- if self.album_transforms:
- sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image']
- else:
- sample = self.torch_transforms(im)
- return sample, j
-
-
-def create_classification_dataloader(path,
- imgsz=224,
- batch_size=16,
- augment=True,
- cache=False,
- rank=-1,
- workers=8,
- shuffle=True):
- """Returns Dataloader object to be used with YOLOv5 Classifier."""
- with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
- dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache)
- batch_size = min(batch_size, len(dataset))
- nd = torch.cuda.device_count()
- nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers])
- sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
- generator = torch.Generator()
- generator.manual_seed(6148914691236517205 + RANK)
- return InfiniteDataLoader(dataset,
- batch_size=batch_size,
- shuffle=shuffle and sampler is None,
- num_workers=nw,
- sampler=sampler,
- pin_memory=PIN_MEMORY,
- worker_init_fn=seed_worker,
- generator=generator) # or DataLoader(persistent_workers=True)
diff --git a/ultralytics/yolo/data/dataset_wrappers.py b/ultralytics/yolo/data/dataset_wrappers.py
deleted file mode 100644
index 72a6fb57a..000000000
--- a/ultralytics/yolo/data/dataset_wrappers.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-
-import collections
-from copy import deepcopy
-
-from .augment import LetterBox
-
-
-class MixAndRectDataset:
- """
- A dataset class that applies mosaic and mixup transformations as well as rectangular training.
-
- Attributes:
- dataset: The base dataset.
- imgsz: The size of the images in the dataset.
- """
-
- def __init__(self, dataset):
- """
- Args:
- dataset (BaseDataset): The base dataset to apply transformations to.
- """
- self.dataset = dataset
- self.imgsz = dataset.imgsz
-
- def __len__(self):
- """Returns the number of items in the dataset."""
- return len(self.dataset)
-
- def __getitem__(self, index):
- """
- Applies mosaic, mixup and rectangular training transformations to an item in the dataset.
-
- Args:
- index (int): Index of the item in the dataset.
-
- Returns:
- (dict): A dictionary containing the transformed item data.
- """
- labels = deepcopy(self.dataset[index])
- for transform in self.dataset.transforms.tolist():
- # Mosaic and mixup
- if hasattr(transform, 'get_indexes'):
- indexes = transform.get_indexes(self.dataset)
- if not isinstance(indexes, collections.abc.Sequence):
- indexes = [indexes]
- labels['mix_labels'] = [deepcopy(self.dataset[index]) for index in indexes]
- if self.dataset.rect and isinstance(transform, LetterBox):
- transform.new_shape = self.dataset.batch_shapes[self.dataset.batch[index]]
- labels = transform(labels)
- if 'mix_labels' in labels:
- labels.pop('mix_labels')
- return labels
diff --git a/ultralytics/yolo/engine/__init__.py b/ultralytics/yolo/engine/__init__.py
index e69de29bb..794efcd0c 100644
--- a/ultralytics/yolo/engine/__init__.py
+++ b/ultralytics/yolo/engine/__init__.py
@@ -0,0 +1,10 @@
+import importlib
+import sys
+
+from ultralytics.utils import LOGGER
+
+# Set modules in sys.modules under their old name
+sys.modules['ultralytics.yolo.engine'] = importlib.import_module('ultralytics.engine')
+
+LOGGER.warning("WARNING ⚠️ 'ultralytics.yolo.engine' is deprecated since '8.0.136' and will be removed in '8.1.0'. "
+ "Please use 'ultralytics.engine' instead.")
diff --git a/ultralytics/yolo/utils/__init__.py b/ultralytics/yolo/utils/__init__.py
index 5160322c3..71557b0a7 100644
--- a/ultralytics/yolo/utils/__init__.py
+++ b/ultralytics/yolo/utils/__init__.py
@@ -1,809 +1,15 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-
-import contextlib
-import inspect
-import logging.config
-import os
-import platform
-import re
-import subprocess
+import importlib
import sys
-import threading
-import urllib
-import uuid
-from pathlib import Path
-from types import SimpleNamespace
-from typing import Union
-
-import cv2
-import matplotlib.pyplot as plt
-import numpy as np
-import torch
-import yaml
-
-from ultralytics import __version__
-
-# PyTorch Multi-GPU DDP Constants
-RANK = int(os.getenv('RANK', -1))
-LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
-WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
-
-# Other Constants
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[2] # YOLO
-DEFAULT_CFG_PATH = ROOT / 'yolo/cfg/default.yaml'
-NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads
-AUTOINSTALL = str(os.getenv('YOLO_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode
-VERBOSE = str(os.getenv('YOLO_VERBOSE', True)).lower() == 'true' # global verbose mode
-TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format
-LOGGING_NAME = 'ultralytics'
-MACOS, LINUX, WINDOWS = (platform.system() == x for x in ['Darwin', 'Linux', 'Windows']) # environment booleans
-ARM64 = platform.machine() in ('arm64', 'aarch64') # ARM64 booleans
-HELP_MSG = \
- """
- Usage examples for running YOLOv8:
-
- 1. Install the ultralytics package:
-
- pip install ultralytics
-
- 2. Use the Python SDK:
-
- from ultralytics import YOLO
-
- # Load a model
- model = YOLO('yolov8n.yaml') # build a new model from scratch
- model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
-
- # Use the model
- results = model.train(data="coco128.yaml", epochs=3) # train the model
- results = model.val() # evaluate model performance on the validation set
- results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
- success = model.export(format='onnx') # export the model to ONNX format
-
- 3. Use the command line interface (CLI):
-
- YOLOv8 'yolo' CLI commands use the following syntax:
-
- yolo TASK MODE ARGS
-
- Where TASK (optional) is one of [detect, segment, classify]
- MODE (required) is one of [train, val, predict, export]
- ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults.
- See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
-
- - Train a detection model for 10 epochs with an initial learning_rate of 0.01
- yolo detect train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01
-
- - Predict a YouTube video using a pretrained segmentation model at image size 320:
- yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320
-
- - Val a pretrained detection model at batch-size 1 and image size 640:
- yolo detect val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640
-
- - Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required)
- yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128
-
- - Run special commands:
- yolo help
- yolo checks
- yolo version
- yolo settings
- yolo copy-cfg
- yolo cfg
-
- Docs: https://docs.ultralytics.com
- Community: https://community.ultralytics.com
- GitHub: https://github.com/ultralytics/ultralytics
- """
-
-# Settings
-torch.set_printoptions(linewidth=320, precision=4, profile='default')
-np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
-cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
-os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
-os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' # for deterministic training
-os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab
-
-
-class SimpleClass:
- """
- Ultralytics SimpleClass is a base class providing helpful string representation, error reporting, and attribute
- access methods for easier debugging and usage.
- """
-
- def __str__(self):
- """Return a human-readable string representation of the object."""
- attr = []
- for a in dir(self):
- v = getattr(self, a)
- if not callable(v) and not a.startswith('_'):
- if isinstance(v, SimpleClass):
- # Display only the module and class name for subclasses
- s = f'{a}: {v.__module__}.{v.__class__.__name__} object'
- else:
- s = f'{a}: {repr(v)}'
- attr.append(s)
- return f'{self.__module__}.{self.__class__.__name__} object with attributes:\n\n' + '\n'.join(attr)
-
- def __repr__(self):
- """Return a machine-readable string representation of the object."""
- return self.__str__()
-
- def __getattr__(self, attr):
- """Custom attribute access error message with helpful information."""
- name = self.__class__.__name__
- raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
-
-
-class IterableSimpleNamespace(SimpleNamespace):
- """
- Ultralytics IterableSimpleNamespace is an extension class of SimpleNamespace that adds iterable functionality and
- enables usage with dict() and for loops.
- """
-
- def __iter__(self):
- """Return an iterator of key-value pairs from the namespace's attributes."""
- return iter(vars(self).items())
-
- def __str__(self):
- """Return a human-readable string representation of the object."""
- return '\n'.join(f'{k}={v}' for k, v in vars(self).items())
-
- def __getattr__(self, attr):
- """Custom attribute access error message with helpful information."""
- name = self.__class__.__name__
- raise AttributeError(f"""
- '{name}' object has no attribute '{attr}'. This may be caused by a modified or out of date ultralytics
- 'default.yaml' file.\nPlease update your code with 'pip install -U ultralytics' and if necessary replace
- {DEFAULT_CFG_PATH} with the latest version from
- https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/cfg/default.yaml
- """)
-
- def get(self, key, default=None):
- """Return the value of the specified key if it exists; otherwise, return the default value."""
- return getattr(self, key, default)
-
-
-def plt_settings(rcparams=None, backend='Agg'):
- """
- Decorator to temporarily set rc parameters and the backend for a plotting function.
-
- Usage:
- decorator: @plt_settings({"font.size": 12})
- context manager: with plt_settings({"font.size": 12}):
-
- Args:
- rcparams (dict): Dictionary of rc parameters to set.
- backend (str, optional): Name of the backend to use. Defaults to 'Agg'.
-
- Returns:
- (Callable): Decorated function with temporarily set rc parameters and backend. This decorator can be
- applied to any function that needs to have specific matplotlib rc parameters and backend for its execution.
- """
-
- if rcparams is None:
- rcparams = {'font.size': 11}
-
- def decorator(func):
- """Decorator to apply temporary rc parameters and backend to a function."""
-
- def wrapper(*args, **kwargs):
- """Sets rc parameters and backend, calls the original function, and restores the settings."""
- original_backend = plt.get_backend()
- plt.switch_backend(backend)
-
- with plt.rc_context(rcparams):
- result = func(*args, **kwargs)
-
- plt.switch_backend(original_backend)
- return result
-
- return wrapper
-
- return decorator
-
-
-def set_logging(name=LOGGING_NAME, verbose=True):
- """Sets up logging for the given name."""
- rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
- level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR
- logging.config.dictConfig({
- 'version': 1,
- 'disable_existing_loggers': False,
- 'formatters': {
- name: {
- 'format': '%(message)s'}},
- 'handlers': {
- name: {
- 'class': 'logging.StreamHandler',
- 'formatter': name,
- 'level': level}},
- 'loggers': {
- name: {
- 'level': level,
- 'handlers': [name],
- 'propagate': False}}})
-
-
-def emojis(string=''):
- """Return platform-dependent emoji-safe version of string."""
- return string.encode().decode('ascii', 'ignore') if WINDOWS else string
-
-
-class EmojiFilter(logging.Filter):
- """
- A custom logging filter class for removing emojis in log messages.
-
- This filter is particularly useful for ensuring compatibility with Windows terminals
- that may not support the display of emojis in log messages.
- """
-
- def filter(self, record):
- """Filter logs by emoji unicode characters on windows."""
- record.msg = emojis(record.msg)
- return super().filter(record)
-
-
-# Set logger
-set_logging(LOGGING_NAME, verbose=VERBOSE) # run before defining LOGGER
-LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)
-if WINDOWS: # emoji-safe logging
- LOGGER.addFilter(EmojiFilter())
-
-
-class ThreadingLocked:
- """
- A decorator class for ensuring thread-safe execution of a function or method.
- This class can be used as a decorator to make sure that if the decorated function
- is called from multiple threads, only one thread at a time will be able to execute the function.
-
- Attributes:
- lock (threading.Lock): A lock object used to manage access to the decorated function.
-
- Usage:
- @ThreadingLocked()
- def my_function():
- # Your code here
- pass
- """
-
- def __init__(self):
- self.lock = threading.Lock()
-
- def __call__(self, f):
- from functools import wraps
-
- @wraps(f)
- def decorated(*args, **kwargs):
- with self.lock:
- return f(*args, **kwargs)
-
- return decorated
-
-
-def yaml_save(file='data.yaml', data=None):
- """
- Save YAML data to a file.
-
- Args:
- file (str, optional): File name. Default is 'data.yaml'.
- data (dict): Data to save in YAML format.
-
- Returns:
- (None): Data is saved to the specified file.
- """
- if data is None:
- data = {}
- file = Path(file)
- if not file.parent.exists():
- # Create parent directories if they don't exist
- file.parent.mkdir(parents=True, exist_ok=True)
-
- # Convert Path objects to strings
- for k, v in data.items():
- if isinstance(v, Path):
- data[k] = str(v)
-
- # Dump data to file in YAML format
- with open(file, 'w') as f:
- yaml.safe_dump(data, f, sort_keys=False, allow_unicode=True)
-
-
-def yaml_load(file='data.yaml', append_filename=False):
- """
- Load YAML data from a file.
-
- Args:
- file (str, optional): File name. Default is 'data.yaml'.
- append_filename (bool): Add the YAML filename to the YAML dictionary. Default is False.
-
- Returns:
- (dict): YAML data and file name.
- """
- with open(file, errors='ignore', encoding='utf-8') as f:
- s = f.read() # string
-
- # Remove special characters
- if not s.isprintable():
- s = re.sub(r'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]+', '', s)
-
- # Add YAML filename to dict and return
- return {**yaml.safe_load(s), 'yaml_file': str(file)} if append_filename else yaml.safe_load(s)
-
-
-def yaml_print(yaml_file: Union[str, Path, dict]) -> None:
- """
- Pretty prints a yaml file or a yaml-formatted dictionary.
-
- Args:
- yaml_file: The file path of the yaml file or a yaml-formatted dictionary.
-
- Returns:
- None
- """
- yaml_dict = yaml_load(yaml_file) if isinstance(yaml_file, (str, Path)) else yaml_file
- dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True)
- LOGGER.info(f"Printing '{colorstr('bold', 'black', yaml_file)}'\n\n{dump}")
-
-
-# Default configuration
-DEFAULT_CFG_DICT = yaml_load(DEFAULT_CFG_PATH)
-for k, v in DEFAULT_CFG_DICT.items():
- if isinstance(v, str) and v.lower() == 'none':
- DEFAULT_CFG_DICT[k] = None
-DEFAULT_CFG_KEYS = DEFAULT_CFG_DICT.keys()
-DEFAULT_CFG = IterableSimpleNamespace(**DEFAULT_CFG_DICT)
-
-
-def is_colab():
- """
- Check if the current script is running inside a Google Colab notebook.
-
- Returns:
- (bool): True if running inside a Colab notebook, False otherwise.
- """
- return 'COLAB_RELEASE_TAG' in os.environ or 'COLAB_BACKEND_VERSION' in os.environ
-
-
-def is_kaggle():
- """
- Check if the current script is running inside a Kaggle kernel.
-
- Returns:
- (bool): True if running inside a Kaggle kernel, False otherwise.
- """
- return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com'
-
-
-def is_jupyter():
- """
- Check if the current script is running inside a Jupyter Notebook.
- Verified on Colab, Jupyterlab, Kaggle, Paperspace.
-
- Returns:
- (bool): True if running inside a Jupyter Notebook, False otherwise.
- """
- with contextlib.suppress(Exception):
- from IPython import get_ipython
- return get_ipython() is not None
- return False
-
-
-def is_docker() -> bool:
- """
- Determine if the script is running inside a Docker container.
-
- Returns:
- (bool): True if the script is running inside a Docker container, False otherwise.
- """
- file = Path('/proc/self/cgroup')
- if file.exists():
- with open(file) as f:
- return 'docker' in f.read()
- else:
- return False
-
-
-def is_online() -> bool:
- """
- Check internet connectivity by attempting to connect to a known online host.
-
- Returns:
- (bool): True if connection is successful, False otherwise.
- """
- import socket
-
- for host in '1.1.1.1', '8.8.8.8', '223.5.5.5': # Cloudflare, Google, AliDNS:
- try:
- test_connection = socket.create_connection(address=(host, 53), timeout=2)
- except (socket.timeout, socket.gaierror, OSError):
- continue
- else:
- # If the connection was successful, close it to avoid a ResourceWarning
- test_connection.close()
- return True
- return False
-
-
-ONLINE = is_online()
-
-
-def is_pip_package(filepath: str = __name__) -> bool:
- """
- Determines if the file at the given filepath is part of a pip package.
-
- Args:
- filepath (str): The filepath to check.
-
- Returns:
- (bool): True if the file is part of a pip package, False otherwise.
- """
- import importlib.util
-
- # Get the spec for the module
- spec = importlib.util.find_spec(filepath)
-
- # Return whether the spec is not None and the origin is not None (indicating it is a package)
- return spec is not None and spec.origin is not None
-
-
-def is_dir_writeable(dir_path: Union[str, Path]) -> bool:
- """
- Check if a directory is writeable.
-
- Args:
- dir_path (str | Path): The path to the directory.
-
- Returns:
- (bool): True if the directory is writeable, False otherwise.
- """
- return os.access(str(dir_path), os.W_OK)
-
-
-def is_pytest_running():
- """
- Determines whether pytest is currently running or not.
-
- Returns:
- (bool): True if pytest is running, False otherwise.
- """
- return ('PYTEST_CURRENT_TEST' in os.environ) or ('pytest' in sys.modules) or ('pytest' in Path(sys.argv[0]).stem)
-
-
-def is_github_actions_ci() -> bool:
- """
- Determine if the current environment is a GitHub Actions CI Python runner.
-
- Returns:
- (bool): True if the current environment is a GitHub Actions CI Python runner, False otherwise.
- """
- return 'GITHUB_ACTIONS' in os.environ and 'RUNNER_OS' in os.environ and 'RUNNER_TOOL_CACHE' in os.environ
-
-
-def is_git_dir():
- """
- Determines whether the current file is part of a git repository.
- If the current file is not part of a git repository, returns None.
-
- Returns:
- (bool): True if current file is part of a git repository.
- """
- return get_git_dir() is not None
-
-
-def get_git_dir():
- """
- Determines whether the current file is part of a git repository and if so, returns the repository root directory.
- If the current file is not part of a git repository, returns None.
-
- Returns:
- (Path | None): Git root directory if found or None if not found.
- """
- for d in Path(__file__).parents:
- if (d / '.git').is_dir():
- return d
- return None # no .git dir found
-
-
-def get_git_origin_url():
- """
- Retrieves the origin URL of a git repository.
-
- Returns:
- (str | None): The origin URL of the git repository.
- """
- if is_git_dir():
- with contextlib.suppress(subprocess.CalledProcessError):
- origin = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url'])
- return origin.decode().strip()
- return None # if not git dir or on error
-
-
-def get_git_branch():
- """
- Returns the current git branch name. If not in a git repository, returns None.
-
- Returns:
- (str | None): The current git branch name.
- """
- if is_git_dir():
- with contextlib.suppress(subprocess.CalledProcessError):
- origin = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
- return origin.decode().strip()
- return None # if not git dir or on error
-
-
-def get_default_args(func):
- """Returns a dictionary of default arguments for a function.
-
- Args:
- func (callable): The function to inspect.
-
- Returns:
- (dict): A dictionary where each key is a parameter name, and each value is the default value of that parameter.
- """
- signature = inspect.signature(func)
- return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
-
-
-def get_user_config_dir(sub_dir='Ultralytics'):
- """
- Get the user config directory.
-
- Args:
- sub_dir (str): The name of the subdirectory to create.
-
- Returns:
- (Path): The path to the user config directory.
- """
- # Return the appropriate config directory for each operating system
- if WINDOWS:
- path = Path.home() / 'AppData' / 'Roaming' / sub_dir
- elif MACOS: # macOS
- path = Path.home() / 'Library' / 'Application Support' / sub_dir
- elif LINUX:
- path = Path.home() / '.config' / sub_dir
- else:
- raise ValueError(f'Unsupported operating system: {platform.system()}')
-
- # GCP and AWS lambda fix, only /tmp is writeable
- if not is_dir_writeable(str(path.parent)):
- path = Path('/tmp') / sub_dir
- LOGGER.warning(f"WARNING ⚠️ user config directory is not writeable, defaulting to '{path}'.")
-
- # Create the subdirectory if it does not exist
- path.mkdir(parents=True, exist_ok=True)
-
- return path
-
-
-USER_CONFIG_DIR = Path(os.getenv('YOLO_CONFIG_DIR', get_user_config_dir())) # Ultralytics settings dir
-SETTINGS_YAML = USER_CONFIG_DIR / 'settings.yaml'
-
-
-def colorstr(*input):
- """Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')."""
- *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
- colors = {
- 'black': '\033[30m', # basic colors
- 'red': '\033[31m',
- 'green': '\033[32m',
- 'yellow': '\033[33m',
- 'blue': '\033[34m',
- 'magenta': '\033[35m',
- 'cyan': '\033[36m',
- 'white': '\033[37m',
- 'bright_black': '\033[90m', # bright colors
- 'bright_red': '\033[91m',
- 'bright_green': '\033[92m',
- 'bright_yellow': '\033[93m',
- 'bright_blue': '\033[94m',
- 'bright_magenta': '\033[95m',
- 'bright_cyan': '\033[96m',
- 'bright_white': '\033[97m',
- 'end': '\033[0m', # misc
- 'bold': '\033[1m',
- 'underline': '\033[4m'}
- return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
-
-
-class TryExcept(contextlib.ContextDecorator):
- """YOLOv8 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager."""
-
- def __init__(self, msg='', verbose=True):
- """Initialize TryExcept class with optional message and verbosity settings."""
- self.msg = msg
- self.verbose = verbose
-
- def __enter__(self):
- """Executes when entering TryExcept context, initializes instance."""
- pass
-
- def __exit__(self, exc_type, value, traceback):
- """Defines behavior when exiting a 'with' block, prints error message if necessary."""
- if self.verbose and value:
- print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
- return True
-
-
-def threaded(func):
- """Multi-threads a target function and returns thread. Usage: @threaded decorator."""
-
- def wrapper(*args, **kwargs):
- """Multi-threads a given function and returns the thread."""
- thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
- thread.start()
- return thread
-
- return wrapper
-
-
-def set_sentry():
- """
- Initialize the Sentry SDK for error tracking and reporting. Only used if sentry_sdk package is installed and
- sync=True in settings. Run 'yolo settings' to see and update settings YAML file.
-
- Conditions required to send errors (ALL conditions must be met or no errors will be reported):
- - sentry_sdk package is installed
- - sync=True in YOLO settings
- - pytest is not running
- - running in a pip package installation
- - running in a non-git directory
- - running with rank -1 or 0
- - online environment
- - CLI used to run package (checked with 'yolo' as the name of the main CLI command)
-
- The function also configures Sentry SDK to ignore KeyboardInterrupt and FileNotFoundError
- exceptions and to exclude events with 'out of memory' in their exception message.
-
- Additionally, the function sets custom tags and user information for Sentry events.
- """
-
- def before_send(event, hint):
- """
- Modify the event before sending it to Sentry based on specific exception types and messages.
-
- Args:
- event (dict): The event dictionary containing information about the error.
- hint (dict): A dictionary containing additional information about the error.
-
- Returns:
- dict: The modified event or None if the event should not be sent to Sentry.
- """
- if 'exc_info' in hint:
- exc_type, exc_value, tb = hint['exc_info']
- if exc_type in (KeyboardInterrupt, FileNotFoundError) \
- or 'out of memory' in str(exc_value):
- return None # do not send event
-
- event['tags'] = {
- 'sys_argv': sys.argv[0],
- 'sys_argv_name': Path(sys.argv[0]).name,
- 'install': 'git' if is_git_dir() else 'pip' if is_pip_package() else 'other',
- 'os': ENVIRONMENT}
- return event
-
- if SETTINGS['sync'] and \
- RANK in (-1, 0) and \
- Path(sys.argv[0]).name == 'yolo' and \
- not TESTS_RUNNING and \
- ONLINE and \
- is_pip_package() and \
- not is_git_dir():
-
- # If sentry_sdk package is not installed then return and do not use Sentry
- try:
- import sentry_sdk # noqa
- except ImportError:
- return
-
- sentry_sdk.init(
- dsn='https://5ff1556b71594bfea135ff0203a0d290@o4504521589325824.ingest.sentry.io/4504521592406016',
- debug=False,
- traces_sample_rate=1.0,
- release=__version__,
- environment='production', # 'dev' or 'production'
- before_send=before_send,
- ignore_errors=[KeyboardInterrupt, FileNotFoundError])
- sentry_sdk.set_user({'id': SETTINGS['uuid']}) # SHA-256 anonymized UUID hash
-
- # Disable all sentry logging
- for logger in 'sentry_sdk', 'sentry_sdk.errors':
- logging.getLogger(logger).setLevel(logging.CRITICAL)
-
-
-def get_settings(file=SETTINGS_YAML, version='0.0.3'):
- """
- Loads a global Ultralytics settings YAML file or creates one with default values if it does not exist.
-
- Args:
- file (Path): Path to the Ultralytics settings YAML file. Defaults to 'settings.yaml' in the USER_CONFIG_DIR.
- version (str): Settings version. If min settings version not met, new default settings will be saved.
-
- Returns:
- (dict): Dictionary of settings key-value pairs.
- """
- import hashlib
-
- from ultralytics.yolo.utils.checks import check_version
- from ultralytics.yolo.utils.torch_utils import torch_distributed_zero_first
-
- git_dir = get_git_dir()
- root = git_dir or Path()
- datasets_root = (root.parent if git_dir and is_dir_writeable(root.parent) else root).resolve()
- defaults = {
- 'datasets_dir': str(datasets_root / 'datasets'), # default datasets directory.
- 'weights_dir': str(root / 'weights'), # default weights directory.
- 'runs_dir': str(root / 'runs'), # default runs directory.
- 'uuid': hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), # SHA-256 anonymized UUID hash
- 'sync': True, # sync analytics to help with YOLO development
- 'api_key': '', # Ultralytics HUB API key (https://hub.ultralytics.com/)
- 'settings_version': version} # Ultralytics settings version
-
- with torch_distributed_zero_first(RANK):
- if not file.exists():
- yaml_save(file, defaults)
- settings = yaml_load(file)
-
- # Check that settings keys and types match defaults
- correct = \
- settings \
- and settings.keys() == defaults.keys() \
- and all(type(a) == type(b) for a, b in zip(settings.values(), defaults.values())) \
- and check_version(settings['settings_version'], version)
- if not correct:
- LOGGER.warning('WARNING ⚠️ Ultralytics settings reset to defaults. This is normal and may be due to a '
- 'recent ultralytics package update, but may have overwritten previous settings. '
- f"\nView and update settings with 'yolo settings' or at '{file}'")
- settings = defaults # merge **defaults with **settings (prefer **settings)
- yaml_save(file, settings) # save updated defaults
-
- return settings
-
-
-def set_settings(kwargs, file=SETTINGS_YAML):
- """
- Function that runs on a first-time ultralytics package installation to set up global settings and create necessary
- directories.
- """
- SETTINGS.update(kwargs)
- yaml_save(file, SETTINGS)
-
-
-def deprecation_warn(arg, new_arg, version=None):
- """Issue a deprecation warning when a deprecated argument is used, suggesting an updated argument."""
- if not version:
- version = float(__version__[:3]) + 0.2 # deprecate after 2nd major release
- LOGGER.warning(f"WARNING ⚠️ '{arg}' is deprecated and will be removed in 'ultralytics {version}' in the future. "
- f"Please use '{new_arg}' instead.")
-
-
-def clean_url(url):
- """Strip auth from URL, i.e. https://url.com/file.txt?auth -> https://url.com/file.txt."""
- url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
- return urllib.parse.unquote(url).split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
-
-
-def url2file(url):
- """Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt."""
- return Path(clean_url(url)).name
-
-
-# Run below code on yolo/utils init ------------------------------------------------------------------------------------
-# Check first-install steps
-PREFIX = colorstr('Ultralytics: ')
-SETTINGS = get_settings()
-DATASETS_DIR = Path(SETTINGS['datasets_dir']) # global datasets directory
-ENVIRONMENT = 'Colab' if is_colab() else 'Kaggle' if is_kaggle() else 'Jupyter' if is_jupyter() else \
- 'Docker' if is_docker() else platform.system()
-TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
-set_sentry()
+from ultralytics.utils import LOGGER
-# Apply monkey patches if the script is being run from within the parent directory of the script's location
-from .patches import imread, imshow, imwrite
+# Set modules in sys.modules under their old name
+sys.modules['ultralytics.yolo.utils'] = importlib.import_module('ultralytics.utils')
-# torch.save = torch_save
-if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename:
- cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow
+UTILS_WARNING = """WARNING ⚠️ 'ultralytics.yolo.utils' is deprecated since '8.0.136' and will be removed in '8.1.0'. Please use 'ultralytics.utils' instead.
+Note this warning may be related to loading older models. You can update your model to current structure with:
+ import torch
+ ckpt = torch.load("model.pt") # applies to both official and custom models
+ torch.save(ckpt, "updated-model.pt")
+"""
+LOGGER.warning(UTILS_WARNING)
diff --git a/ultralytics/yolo/v8/__init__.py b/ultralytics/yolo/v8/__init__.py
index adc0351ba..51adf814c 100644
--- a/ultralytics/yolo/v8/__init__.py
+++ b/ultralytics/yolo/v8/__init__.py
@@ -1,5 +1,10 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
+import importlib
+import sys
-from ultralytics.yolo.v8 import classify, detect, pose, segment
+from ultralytics.utils import LOGGER
-__all__ = 'classify', 'segment', 'detect', 'pose'
+# Set modules in sys.modules under their old name
+sys.modules['ultralytics.yolo.v8'] = importlib.import_module('ultralytics.models.yolo')
+
+LOGGER.warning("WARNING ⚠️ 'ultralytics.yolo.v8' is deprecated since '8.0.136' and will be removed in '8.1.0'. "
+ "Please use 'ultralytics.models.yolo' instead.")
diff --git a/ultralytics/yolo/v8/classify/__init__.py b/ultralytics/yolo/v8/classify/__init__.py
deleted file mode 100644
index 2f049ed33..000000000
--- a/ultralytics/yolo/v8/classify/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Ultralytics YOLO 🚀, AGPL-3.0 license
-
-from ultralytics.yolo.v8.classify.predict import ClassificationPredictor, predict
-from ultralytics.yolo.v8.classify.train import ClassificationTrainer, train
-from ultralytics.yolo.v8.classify.val import ClassificationValidator, val
-
-__all__ = 'ClassificationPredictor', 'predict', 'ClassificationTrainer', 'train', 'ClassificationValidator', 'val'