diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 8f62cb54f..8cf5e41f9 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -28,7 +28,7 @@ jobs: - name: Set up Python environment uses: actions/setup-python@v5 with: - python-version: '3.10' + python-version: '3.11' cache: 'pip' # caching pip dependencies - name: Install dependencies run: | @@ -66,6 +66,7 @@ jobs: env: PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} INDEXNOW_KEY: ${{ secrets.INDEXNOW_KEY_DOCS }} + WEGLOT_KEY: ${{ secrets.WEGLOT_KEY_DOCS }} run: | python docs/build_docs.py git config --global user.name "Glenn Jocher" diff --git a/docs/build_docs.py b/docs/build_docs.py index 9c9d75ed5..b0c8880ea 100644 --- a/docs/build_docs.py +++ b/docs/build_docs.py @@ -23,14 +23,17 @@ Usage: Note: - This script is built to be run in an environment where Python and MkDocs are installed and properly configured. """ - +import os import re import shutil import subprocess from pathlib import Path +from tqdm import tqdm + DOCS = Path(__file__).parent.resolve() SITE = DOCS.parent / "site" +LANGUAGES = True def build_docs(): @@ -44,9 +47,10 @@ def build_docs(): subprocess.run(f"mkdocs build -f {DOCS}/mkdocs.yml", check=True, shell=True) # Build other localized documentations - for file in DOCS.glob("mkdocs_*.yml"): - print(f"Building MkDocs site with configuration file: {file}") - subprocess.run(f"mkdocs build -f {file}", check=True, shell=True) + if LANGUAGES: + for file in DOCS.glob("mkdocs_*.yml"): + print(f"Building MkDocs site with configuration file: {file}") + subprocess.run(f"mkdocs build -f {file}", check=True, shell=True) print(f"Site built at {SITE}") @@ -100,19 +104,51 @@ def update_page_title(file_path: Path, new_title: str): file.write(updated_content) +def update_html_head(key=""): + """Update the HTML head section of each file.""" + html_files = Path(SITE).rglob("*.html") + for html_file in tqdm(html_files, desc="Processing HTML files"): + with html_file.open("r", encoding="utf-8") as file: + html_content = file.read() + + script = f""" + + +""" + if script in html_content: # script already in HTML file + return + + head_end_index = html_content.lower().rfind("") + if head_end_index != -1: + # Add the specified JavaScript to the HTML file just before the end of the head tag. + new_html_content = html_content[:head_end_index] + script + html_content[head_end_index:] + with html_file.open("w", encoding="utf-8") as file: + file.write(new_html_content) + + def main(): # Build the docs build_docs() + # Update titles + update_page_title(SITE / "404.html", new_title="Ultralytics Docs - Not Found") + # Update .md in href links - update_html_links() + if LANGUAGES: + update_html_links() + + # Update HTML file head section + key = os.environ.get("WEGLOT_KEY") + if not LANGUAGES and key: + update_html_head(key) # Show command to serve built website print('Serve site at http://localhost:8000 with "python -m http.server --directory site"') - # Update titles - update_page_title(SITE / "404.html", new_title="Ultralytics Docs - Not Found") - if __name__ == "__main__": main() diff --git a/docs/en/datasets/detect/open-images-v7.md b/docs/en/datasets/detect/open-images-v7.md index 93d323ff9..95b396171 100644 --- a/docs/en/datasets/detect/open-images-v7.md +++ b/docs/en/datasets/detect/open-images-v7.md @@ -8,6 +8,15 @@ keywords: Open Images V7, object detection, segmentation masks, visual relations [Open Images V7](https://storage.googleapis.com/openimages/web/index.html) is a versatile and expansive dataset championed by Google. Aimed at propelling research in the realm of computer vision, it boasts a vast collection of images annotated with a plethora of data, including image-level labels, object bounding boxes, object segmentation masks, visual relationships, and localized narratives. +## Open Images V7 Pretrained Models +| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +|-------------------------------------------------------------------------------------------|-----------------------|----------------------|--------------------------------|-------------------------------------|--------------------|-------------------| +| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8n-oiv7.pt) | 640 | 18.4 | 142.4 | 1.21 | 3.5 | 10.5 | +| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8s-oiv7.pt) | 640 | 27.7 | 183.1 | 1.40 | 11.4 | 29.7 | +| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8m-oiv7.pt) | 640 | 33.6 | 408.5 | 2.26 | 26.2 | 80.6 | +| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8l-oiv7.pt) | 640 | 34.9 | 596.9 | 2.43 | 44.1 | 167.4 | +| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8x-oiv7.pt) | 640 | 36.3 | 860.6 | 3.56 | 68.7 | 260.6 | + ![Open Images V7 classes visual](https://user-images.githubusercontent.com/26833433/258660358-2dc07771-ec08-4d11-b24a-f66e07550050.png) ## Key Features diff --git a/docs/en/guides/object-counting.md b/docs/en/guides/object-counting.md index bb44d3970..2ebf00716 100644 --- a/docs/en/guides/object-counting.md +++ b/docs/en/guides/object-counting.md @@ -174,21 +174,21 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly | Name | Type | Default | Description | |---------------------|-------------|----------------------------|-----------------------------------------------| -| view_img | `bool` | `False` | Display frames with counts | -| view_in_counts | `bool` | `True` | Display incounts only on video frame | -| view_out_counts | `bool` | `True` | Display outcounts only on video frame | -| line_thickness | `int` | `2` | Increase bounding boxes thickness | -| reg_pts | `list` | `[(20, 400), (1260, 400)]` | Points defining the Region Area | -| classes_names | `dict` | `model.model.names` | Dictionary of Class Names | -| region_color | `RGB Color` | `(255, 0, 255)` | Color of the Object counting Region or Line | -| track_thickness | `int` | `2` | Thickness of Tracking Lines | -| draw_tracks | `bool` | `False` | Enable drawing Track lines | -| track_color | `RGB Color` | `(0, 255, 0)` | Color for each track line | -| line_dist_thresh | `int` | `15` | Euclidean Distance threshold for line counter | -| count_txt_thickness | `int` | `2` | Thickness of Object counts text | -| count_txt_color | `RGB Color` | `(0, 0, 0)` | Foreground color for Object counts text | -| count_color | `RGB Color` | `(255, 255, 255)` | Background color for Object counts text | -| region_thickness | `int` | `5` | Thickness for object counter region or line | +| `view_img` | `bool` | `False` | Display frames with counts | +| `view_in_counts` | `bool` | `True` | Display incounts only on video frame | +| `view_out_counts` | `bool` | `True` | Display outcounts only on video frame | +| `line_thickness` | `int` | `2` | Increase bounding boxes thickness | +| `reg_pts` | `list` | `[(20, 400), (1260, 400)]` | Points defining the Region Area | +| `classes_names` | `dict` | `model.model.names` | Dictionary of Class Names | +| `region_color` | `RGB Color` | `(255, 0, 255)` | Color of the Object counting Region or Line | +| `track_thickness` | `int` | `2` | Thickness of Tracking Lines | +| `draw_tracks` | `bool` | `False` | Enable drawing Track lines | +| `track_color` | `RGB Color` | `(0, 255, 0)` | Color for each track line | +| `line_dist_thresh` | `int` | `15` | Euclidean Distance threshold for line counter | +| `count_txt_thickness` | `int` | `2` | Thickness of Object counts text | +| `count_txt_color` | `RGB Color` | `(0, 0, 0)` | Foreground color for Object counts text | +| `count_color` | `RGB Color` | `(255, 255, 255)` | Background color for Object counts text | +| `region_thickness` | `int` | `5` | Thickness for object counter region or line | ### Arguments `model.track` diff --git a/docs/en/models/fast-sam.md b/docs/en/models/fast-sam.md index 5e881d6ff..8e2f3487b 100644 --- a/docs/en/models/fast-sam.md +++ b/docs/en/models/fast-sam.md @@ -34,10 +34,10 @@ FastSAM is designed to address the limitations of the [Segment Anything Model (S This table presents the available models with their specific pre-trained weights, the tasks they support, and their compatibility with different operating modes like [Inference](../modes/predict.md), [Validation](../modes/val.md), [Training](../modes/train.md), and [Export](../modes/export.md), indicated by ✅ emojis for supported modes and ❌ emojis for unsupported modes. -| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export | -|------------|---------------------|----------------------------------------------|-----------|------------|----------|--------| -| FastSAM-s | `FastSAM-s.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ✅ | -| FastSAM-x | `FastSAM-x.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ✅ | +| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export | +|------------|---------------------------------------------------------------------------------------------|----------------------------------------------|-----------|------------|----------|--------| +| FastSAM-s | [FastSAM-s.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/FastSAM-s.pt) | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ✅ | +| FastSAM-x | [FastSAM-x.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/FastSAM-x.pt) | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ✅ | ## Usage Examples diff --git a/docs/en/models/mobile-sam.md b/docs/en/models/mobile-sam.md index dfed1dda7..bd97e031e 100644 --- a/docs/en/models/mobile-sam.md +++ b/docs/en/models/mobile-sam.md @@ -20,9 +20,9 @@ MobileSAM is trained on a single GPU with a 100k dataset (1% of the original ima This table presents the available models with their specific pre-trained weights, the tasks they support, and their compatibility with different operating modes like [Inference](../modes/predict.md), [Validation](../modes/val.md), [Training](../modes/train.md), and [Export](../modes/export.md), indicated by ✅ emojis for supported modes and ❌ emojis for unsupported modes. -| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export | -|------------|---------------------|----------------------------------------------|-----------|------------|----------|--------| -| MobileSAM | `mobile_sam.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ | +| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export | +|------------|-----------------------------------------------------------------------------------------------|----------------------------------------------|-----------|------------|----------|--------| +| MobileSAM | [mobile_sam.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/mobile_sam.pt) | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ | ## Adapting from SAM to MobileSAM diff --git a/docs/en/models/rtdetr.md b/docs/en/models/rtdetr.md index 782af2ebd..4224b918d 100644 --- a/docs/en/models/rtdetr.md +++ b/docs/en/models/rtdetr.md @@ -63,10 +63,10 @@ This example provides simple RT-DETRR training and inference examples. For full This table presents the model types, the specific pre-trained weights, the tasks supported by each model, and the various modes ([Train](../modes/train.md) , [Val](../modes/val.md), [Predict](../modes/predict.md), [Export](../modes/export.md)) that are supported, indicated by ✅ emojis. -| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export | -|---------------------|---------------------|----------------------------------------|-----------|------------|----------|--------| -| RT-DETR Large | `rtdetr-l.pt` | [Object Detection](../tasks/detect.md) | ✅ | ✅ | ✅ | ✅ | -| RT-DETR Extra-Large | `rtdetr-x.pt` | [Object Detection](../tasks/detect.md) | ✅ | ✅ | ✅ | ✅ | +| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export | +|---------------------|-------------------------------------------------------------------------------------------|----------------------------------------|-----------|------------|----------|--------| +| RT-DETR Large | [rtdetr-l.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/rtdetr-l.pt) | [Object Detection](../tasks/detect.md) | ✅ | ✅ | ✅ | ✅ | +| RT-DETR Extra-Large | [rtdetr-x.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/rtdetr-x.pt) | [Object Detection](../tasks/detect.md) | ✅ | ✅ | ✅ | ✅ | ## Citations and Acknowledgements diff --git a/docs/en/models/sam.md b/docs/en/models/sam.md index 654726d41..e931b9e1d 100644 --- a/docs/en/models/sam.md +++ b/docs/en/models/sam.md @@ -29,10 +29,10 @@ For an in-depth look at the Segment Anything Model and the SA-1B dataset, please This table presents the available models with their specific pre-trained weights, the tasks they support, and their compatibility with different operating modes like [Inference](../modes/predict.md), [Validation](../modes/val.md), [Training](../modes/train.md), and [Export](../modes/export.md), indicated by ✅ emojis for supported modes and ❌ emojis for unsupported modes. -| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export | -|------------|---------------------|----------------------------------------------|-----------|------------|----------|--------| -| SAM base | `sam_b.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ | -| SAM large | `sam_l.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ | +| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export | +|------------|-------------------------------------------------------------------------------------|----------------------------------------------|-----------|------------|----------|--------| +| SAM base | [sam_b.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/sam_b.pt) | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ | +| SAM large | [sam_l.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/sam_l.pt) | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ | ## How to Use SAM: Versatility and Power in Image Segmentation diff --git a/ultralytics/utils/ops.py b/ultralytics/utils/ops.py index 1deb9efba..53a180902 100644 --- a/ultralytics/utils/ops.py +++ b/ultralytics/utils/ops.py @@ -547,7 +547,7 @@ def xywhr2xyxyxyxy(rboxes): be in degrees from 0 to 90. Args: - center (numpy.ndarray | torch.Tensor): Input data in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5). + rboxes (numpy.ndarray | torch.Tensor): Input data in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5). Returns: (numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2).