From 1a497f174c21993e2464725d96482e2a4a52758b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 2 Sep 2024 10:08:02 +0200
Subject: [PATCH 01/17] Bump slackapi/slack-github-action from 1.26.0 to 1.27.0
in /.github/workflows (#15945)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/ci.yaml | 2 +-
.github/workflows/docker.yaml | 2 +-
.github/workflows/publish.yml | 4 ++--
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index c079a5a075..11bc4c7771 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -344,7 +344,7 @@ jobs:
steps:
- name: Check for failure and notify
if: (needs.HUB.result == 'failure' || needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.GPU.result == 'failure' || needs.RaspberryPi.result == 'failure' || needs.Conda.result == 'failure' ) && github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push')
- uses: slackapi/slack-github-action@v1.26.0
+ uses: slackapi/slack-github-action@v1.27.0
with:
payload: |
{"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"}
diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml
index 6cd7b5ca46..5242d9efa6 100644
--- a/.github/workflows/docker.yaml
+++ b/.github/workflows/docker.yaml
@@ -175,7 +175,7 @@ jobs:
- name: Notify on failure
if: github.event_name == 'push' && failure() # do not notify on cancelled() as cancelling is performed by hand
- uses: slackapi/slack-github-action@v1.26.0
+ uses: slackapi/slack-github-action@v1.27.0
with:
payload: |
{"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"}
diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index e0bb3fa38e..3cb83f27fd 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -127,7 +127,7 @@ jobs:
echo "PR_TITLE=$PR_TITLE" >> $GITHUB_ENV
- name: Notify on Slack (Success)
if: success() && github.event_name == 'push' && steps.check_pypi.outputs.increment == 'True'
- uses: slackapi/slack-github-action@v1.26.0
+ uses: slackapi/slack-github-action@v1.27.0
with:
payload: |
{"text": " GitHub Actions success for ${{ github.workflow }} ✅\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* NEW '${{ github.repository }} ${{ steps.check_pypi.outputs.current_tag }}' pip package published 😃\n*Job Status:* ${{ job.status }}\n*Pull Request:* ${{ env.PR_TITLE }}\n"}
@@ -135,7 +135,7 @@ jobs:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }}
- name: Notify on Slack (Failure)
if: failure()
- uses: slackapi/slack-github-action@v1.26.0
+ uses: slackapi/slack-github-action@v1.27.0
with:
payload: |
{"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n*Pull Request:* ${{ env.PR_TITLE }}\n"}
From 5b3e60b4525f1b757bf4b1b8d78a273f7133e358 Mon Sep 17 00:00:00 2001
From: Glenn Jocher
Date: Mon, 2 Sep 2024 20:00:02 +0200
Subject: [PATCH 02/17] Continue on Conda CI error (#15958)
---
.github/workflows/ci.yaml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 11bc4c7771..0ea3a5a1ec 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -269,6 +269,7 @@ jobs:
Conda:
if: github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event.inputs.conda == 'true')
+ continue-on-error: true
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
From 5a6db149e45a4ff4efef6d970a87e0693b8271e2 Mon Sep 17 00:00:00 2001
From: Glenn Jocher
Date: Tue, 3 Sep 2024 11:10:09 +0200
Subject: [PATCH 03/17] Update `test_workflow` to ONNX (#15974)
---
tests/test_python.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/test_python.py b/tests/test_python.py
index f15dd48eff..8fbab54cf2 100644
--- a/tests/test_python.py
+++ b/tests/test_python.py
@@ -202,7 +202,7 @@ def test_workflow():
model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
model.val(imgsz=32)
model.predict(SOURCE, imgsz=32)
- model.export(format="torchscript")
+ model.export(format="onnx")
def test_predict_callback_and_setup():
From 7c7f456710d8f4c538453fda96fb0ea1d4e84d7a Mon Sep 17 00:00:00 2001
From: Laughing <61612323+Laughing-q@users.noreply.github.com>
Date: Tue, 3 Sep 2024 18:03:01 +0800
Subject: [PATCH 04/17] Fix `torch.cuda.amp.GradScaler` warning (#15978)
Co-authored-by: UltralyticsAssistant
---
ultralytics/engine/trainer.py | 5 ++++-
ultralytics/utils/torch_utils.py | 1 +
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/ultralytics/engine/trainer.py b/ultralytics/engine/trainer.py
index c5b8a13f30..2d5fc62461 100644
--- a/ultralytics/engine/trainer.py
+++ b/ultralytics/engine/trainer.py
@@ -42,6 +42,7 @@ from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_m
from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
from ultralytics.utils.files import get_latest_run
from ultralytics.utils.torch_utils import (
+ TORCH_2_4,
EarlyStopping,
ModelEMA,
autocast,
@@ -265,7 +266,9 @@ class BaseTrainer:
if RANK > -1 and world_size > 1: # DDP
dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None)
self.amp = bool(self.amp) # as boolean
- self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp)
+ self.scaler = (
+ torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
+ )
if world_size > 1:
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py
index c2338e184b..16bcddadd0 100644
--- a/ultralytics/utils/torch_utils.py
+++ b/ultralytics/utils/torch_utils.py
@@ -40,6 +40,7 @@ except ImportError:
TORCH_1_9 = check_version(torch.__version__, "1.9.0")
TORCH_1_13 = check_version(torch.__version__, "1.13.0")
TORCH_2_0 = check_version(torch.__version__, "2.0.0")
+TORCH_2_4 = check_version(torch.__version__, "2.4.0")
TORCHVISION_0_10 = check_version(TORCHVISION_VERSION, "0.10.0")
TORCHVISION_0_11 = check_version(TORCHVISION_VERSION, "0.11.0")
TORCHVISION_0_13 = check_version(TORCHVISION_VERSION, "0.13.0")
From b2ab667f0086c501a5fc4620049c8bf3c9646e85 Mon Sep 17 00:00:00 2001
From: TechWolf21
Date: Tue, 3 Sep 2024 16:08:23 +0530
Subject: [PATCH 05/17] Fix queue `counts` (#15971)
Co-authored-by: UltralyticsAssistant
Co-authored-by: Muhammad Rizwan Munawar
Co-authored-by: RizwanMunawar
Co-authored-by: Glenn Jocher
---
ultralytics/solutions/queue_management.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/ultralytics/solutions/queue_management.py b/ultralytics/solutions/queue_management.py
index f3536d30eb..16df28b1bb 100644
--- a/ultralytics/solutions/queue_management.py
+++ b/ultralytics/solutions/queue_management.py
@@ -89,7 +89,7 @@ class QueueManager:
"""Extracts and processes tracks for queue management in a video stream."""
# Initialize annotator and draw the queue region
self.annotator = Annotator(self.im0, self.tf, self.names)
-
+ self.counts = 0 # Reset counts every frame
if tracks[0].boxes.id is not None:
boxes = tracks[0].boxes.xyxy.cpu()
clss = tracks[0].boxes.cls.cpu().tolist()
@@ -132,7 +132,6 @@ class QueueManager:
txt_color=self.count_txt_color,
)
- self.counts = 0 # Reset counts after displaying
self.display_frames()
def display_frames(self):
From 3fc14b5fec93f365e7a2318e410b79f879cf5004 Mon Sep 17 00:00:00 2001
From: Glenn Jocher
Date: Tue, 3 Sep 2024 12:43:48 +0200
Subject: [PATCH 06/17] `ultralytics 8.2.87` Ray `shutdown()` workers after
tuning (#15976)
---
tests/test_python.py | 2 +-
ultralytics/__init__.py | 2 +-
ultralytics/utils/tuner.py | 9 +++++++--
3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/tests/test_python.py b/tests/test_python.py
index 8fbab54cf2..f15dd48eff 100644
--- a/tests/test_python.py
+++ b/tests/test_python.py
@@ -202,7 +202,7 @@ def test_workflow():
model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
model.val(imgsz=32)
model.predict(SOURCE, imgsz=32)
- model.export(format="onnx")
+ model.export(format="torchscript")
def test_predict_callback_and_setup():
diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py
index e1bbd65619..37925ac9e7 100644
--- a/ultralytics/__init__.py
+++ b/ultralytics/__init__.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-__version__ = "8.2.86"
+__version__ = "8.2.87"
import os
diff --git a/ultralytics/utils/tuner.py b/ultralytics/utils/tuner.py
index 1d52e3e0b4..1329bfe6ec 100644
--- a/ultralytics/utils/tuner.py
+++ b/ultralytics/utils/tuner.py
@@ -143,5 +143,10 @@ def run_ray_tune(
# Run the hyperparameter search
tuner.fit()
- # Return the results of the hyperparameter search
- return tuner.get_results()
+ # Get the results of the hyperparameter search
+ results = tuner.get_results()
+
+ # Shut down Ray to clean up workers
+ ray.shutdown()
+
+ return results
From 782d5bddc65a53aaeab0ee6d83f4ee5d7590cc46 Mon Sep 17 00:00:00 2001
From: Glenn Jocher
Date: Tue, 3 Sep 2024 22:00:52 +0200
Subject: [PATCH 07/17] Update `mkdocs-ultralytics-plugin>=0.1.8` (#15991)
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 9fae825d80..00366df58f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -94,7 +94,7 @@ dev = [
"mkdocstrings[python]",
"mkdocs-jupyter", # notebooks
"mkdocs-redirects", # 301 redirects
- "mkdocs-ultralytics-plugin>=0.1.6", # for meta descriptions and images, dates and authors
+ "mkdocs-ultralytics-plugin>=0.1.8", # for meta descriptions and images, dates and authors
"mkdocs-macros-plugin>=1.0.5" # duplicating content (i.e. export tables) in multiple places
]
export = [
From 5dce4917e0ce01f18e25256c8082b7baac05120f Mon Sep 17 00:00:00 2001
From: Lakshantha Dissanayake
Date: Tue, 3 Sep 2024 13:15:06 -0700
Subject: [PATCH 08/17] Remove Raspberry Pi CI reboot (#15987)
Co-authored-by: Glenn Jocher
---
.github/workflows/ci.yaml | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 0ea3a5a1ec..b25f89790e 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -264,8 +264,9 @@ jobs:
run: |
cat benchmarks.log
echo "$(cat benchmarks.log)" >> $GITHUB_STEP_SUMMARY
- - name: Reboot # run a reboot command in the background to free resources for next run and not crash main thread
- run: sudo bash -c "sleep 10; reboot" &
+ # The below is fixed in: https://github.com/ultralytics/ultralytics/pull/15987
+ # - name: Reboot # run a reboot command in the background to free resources for next run and not crash main thread
+ # run: sudo bash -c "sleep 10; reboot" &
Conda:
if: github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event.inputs.conda == 'true')
From 7a7ff4ccab06604c2e101eefdd1c7c1644132dac Mon Sep 17 00:00:00 2001
From: Lorna
Date: Wed, 4 Sep 2024 16:22:16 +0800
Subject: [PATCH 09/17] Fix 3 `Objects365.yaml` class names (#16002)
---
ultralytics/cfg/datasets/Objects365.yaml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/ultralytics/cfg/datasets/Objects365.yaml b/ultralytics/cfg/datasets/Objects365.yaml
index 9b117206f0..4994fd5f29 100644
--- a/ultralytics/cfg/datasets/Objects365.yaml
+++ b/ultralytics/cfg/datasets/Objects365.yaml
@@ -113,7 +113,7 @@ names:
95: Pot
96: Cow
97: Cake
- 98: Dinning Table
+ 98: Dining Table
99: Sheep
100: Hanger
101: Blackboard/Whiteboard
@@ -304,7 +304,7 @@ names:
286: Hammer
287: Cue
288: Avocado
- 289: Hamimelon
+ 289: Hami melon
290: Flask
291: Mushroom
292: Screwdriver
@@ -328,7 +328,7 @@ names:
310: Dishwasher
311: Crab
312: Hoverboard
- 313: Meat ball
+ 313: Meatball
314: Rice Cooker
315: Tuba
316: Calculator
From fcb73595eca116cbcdd4823bbbeabd16181b3314 Mon Sep 17 00:00:00 2001
From: Glenn Jocher
Date: Wed, 4 Sep 2024 11:18:27 +0200
Subject: [PATCH 10/17] Add UltralyticsAssistant to mkdocs_github_authors.yaml
(#15992)
Co-authored-by: UltralyticsAssistant
---
docs/mkdocs_github_authors.yaml | 3 +++
1 file changed, 3 insertions(+)
diff --git a/docs/mkdocs_github_authors.yaml b/docs/mkdocs_github_authors.yaml
index 6453f86569..b323b4a39e 100644
--- a/docs/mkdocs_github_authors.yaml
+++ b/docs/mkdocs_github_authors.yaml
@@ -115,6 +115,9 @@ sometimesocrazy@gmail.com:
stormsson@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/1133032?v=4
username: stormsson
+web@ultralytics.com:
+ avatar: https://avatars.githubusercontent.com/u/135830346?v=4
+ username: UltralyticsAssistant
xinwang614@gmail.com:
avatar: https://avatars.githubusercontent.com/u/17264618?v=4
username: GreatV
From 88102eb5085a668fff0171fdaa6297af65c2d010 Mon Sep 17 00:00:00 2001
From: Glenn Jocher
Date: Wed, 4 Sep 2024 14:14:06 +0200
Subject: [PATCH 11/17] Skip `test_workflow` on Windows CI (#16003)
Signed-off-by: UltralyticsAssistant
Co-authored-by: UltralyticsAssistant
---
tests/test_python.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tests/test_python.py b/tests/test_python.py
index f15dd48eff..aa18029d75 100644
--- a/tests/test_python.py
+++ b/tests/test_python.py
@@ -196,13 +196,14 @@ def test_all_model_yamls():
YOLO(m.name)
+@pytest.mark.skipif(WINDOWS, reason="Windows slow CI export bug https://github.com/ultralytics/ultralytics/pull/16003")
def test_workflow():
"""Test the complete workflow including training, validation, prediction, and exporting."""
model = YOLO(MODEL)
model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
model.val(imgsz=32)
model.predict(SOURCE, imgsz=32)
- model.export(format="torchscript")
+ model.export(format="torchscript") # WARNING: Windows slow CI export bug
def test_predict_callback_and_setup():
From 1e604e0d1c74863001debdb1992471af4b030868 Mon Sep 17 00:00:00 2001
From: Lakshantha Dissanayake
Date: Thu, 5 Sep 2024 05:18:01 -0700
Subject: [PATCH 12/17] Update TFLite > LiteRT docs links (#16020)
Co-authored-by: UltralyticsAssistant
Co-authored-by: Glenn Jocher
---
docs/en/integrations/tflite.md | 14 +++++++-------
docs/mkdocs_github_authors.yaml | 3 +++
2 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/docs/en/integrations/tflite.md b/docs/en/integrations/tflite.md
index d1bba90346..01c40df2e2 100644
--- a/docs/en/integrations/tflite.md
+++ b/docs/en/integrations/tflite.md
@@ -16,7 +16,7 @@ The TensorFlow Lite or TFLite export format allows you to optimize your [Ultraly
## Why should you export to TFLite?
-Introduced by Google in May 2017 as part of their TensorFlow framework, [TensorFlow Lite](https://www.tensorflow.org/lite/guide), or TFLite for short, is an open-source deep learning framework designed for on-device inference, also known as edge computing. It gives developers the necessary tools to execute their trained models on mobile, embedded, and IoT devices, as well as traditional computers.
+Introduced by Google in May 2017 as part of their TensorFlow framework, [TensorFlow Lite](https://ai.google.dev/edge/litert), or TFLite for short, is an open-source deep learning framework designed for on-device inference, also known as edge computing. It gives developers the necessary tools to execute their trained models on mobile, embedded, and IoT devices, as well as traditional computers.
TensorFlow Lite is compatible with a wide range of platforms, including embedded Linux, Android, iOS, and MCU. Exporting your model to TFLite makes your applications faster, more reliable, and capable of running offline.
@@ -107,17 +107,17 @@ For more details about the export process, visit the [Ultralytics documentation
After successfully exporting your Ultralytics YOLOv8 models to TFLite format, you can now deploy them. The primary and recommended first step for running a TFLite model is to utilize the YOLO("model.tflite") method, as outlined in the previous usage code snippet. However, for in-depth instructions on deploying your TFLite models in various other settings, take a look at the following resources:
-- **[Android](https://www.tensorflow.org/lite/android/quickstart)**: A quick start guide for integrating TensorFlow Lite into Android applications, providing easy-to-follow steps for setting up and running machine learning models.
+- **[Android](https://ai.google.dev/edge/litert/android)**: A quick start guide for integrating TensorFlow Lite into Android applications, providing easy-to-follow steps for setting up and running machine learning models.
-- **[iOS](https://www.tensorflow.org/lite/guide/ios)**: Check out this detailed guide for developers on integrating and deploying TensorFlow Lite models in iOS applications, offering step-by-step instructions and resources.
+- **[iOS](https://ai.google.dev/edge/litert/ios/quickstart)**: Check out this detailed guide for developers on integrating and deploying TensorFlow Lite models in iOS applications, offering step-by-step instructions and resources.
-- **[End-To-End Examples](https://www.tensorflow.org/lite/examples)**: This page provides an overview of various TensorFlow Lite examples, showcasing practical applications and tutorials designed to help developers implement TensorFlow Lite in their machine learning projects on mobile and edge devices.
+- **[End-To-End Examples](https://github.com/tensorflow/examples/tree/master/lite/examples)**: This page provides an overview of various TensorFlow Lite examples, showcasing practical applications and tutorials designed to help developers implement TensorFlow Lite in their machine learning projects on mobile and edge devices.
## Summary
In this guide, we focused on how to export to TFLite format. By converting your Ultralytics YOLOv8 models to TFLite model format, you can improve the efficiency and speed of YOLOv8 models, making them more effective and suitable for edge computing environments.
-For further details on usage, visit the [TFLite official documentation](https://www.tensorflow.org/lite/guide).
+For further details on usage, visit the [TFLite official documentation](https://ai.google.dev/edge/litert).
Also, if you're curious about other Ultralytics YOLOv8 integrations, make sure to check out our [integration guide page](../integrations/index.md). You'll find tons of helpful info and insights waiting for you there.
@@ -159,7 +159,7 @@ TensorFlow Lite (TFLite) is an open-source deep learning framework designed for
- **Platform compatibility**: Supports Android, iOS, embedded Linux, and MCU.
- **Performance**: Utilizes hardware acceleration to optimize model speed and efficiency.
-To learn more, check out the [TFLite guide](https://www.tensorflow.org/lite/guide).
+To learn more, check out the [TFLite guide](https://ai.google.dev/edge/litert).
### Is it possible to run YOLOv8 TFLite models on Raspberry Pi?
@@ -171,7 +171,7 @@ For further optimizations, you might consider using [Coral Edge TPU](https://cor
Yes, TFLite supports deployment on microcontrollers with limited resources. TFLite's core runtime requires only 16 KB of memory on an Arm Cortex M3 and can run basic YOLOv8 models. This makes it suitable for deployment on devices with minimal computational power and memory.
-To get started, visit the [TFLite Micro for Microcontrollers guide](https://www.tensorflow.org/lite/microcontrollers).
+To get started, visit the [TFLite Micro for Microcontrollers guide](https://ai.google.dev/edge/litert/microcontrollers/overview).
### What platforms are compatible with TFLite exported YOLOv8 models?
diff --git a/docs/mkdocs_github_authors.yaml b/docs/mkdocs_github_authors.yaml
index b323b4a39e..33ed82983b 100644
--- a/docs/mkdocs_github_authors.yaml
+++ b/docs/mkdocs_github_authors.yaml
@@ -91,6 +91,9 @@ jpedrofonseca_94@hotmail.com:
k-2feng@hotmail.com:
avatar: null
username: null
+lakshantha@ultralytics.com:
+ avatar: https://avatars.githubusercontent.com/u/20147381?v=4
+ username: lakshanthad
lakshanthad@yahoo.com:
avatar: https://avatars.githubusercontent.com/u/20147381?v=4
username: lakshanthad
From 95d54828bbd3fd3ba7df9ff7c075568f0b668506 Mon Sep 17 00:00:00 2001
From: Muhammad Rizwan Munawar
Date: Thu, 5 Sep 2024 17:34:40 +0500
Subject: [PATCH 13/17] Add https://youtu.be/w4yHORvDBw0 to docs (#16031)
Co-authored-by: Glenn Jocher
---
docs/en/guides/coral-edge-tpu-on-raspberry-pi.md | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md b/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md
index 21b9589d5c..4e5a02d3f7 100644
--- a/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md
+++ b/docs/en/guides/coral-edge-tpu-on-raspberry-pi.md
@@ -14,6 +14,17 @@ keywords: Coral Edge TPU, Raspberry Pi, YOLOv8, Ultralytics, TensorFlow Lite, ML
The Coral Edge TPU is a compact device that adds an Edge TPU coprocessor to your system. It enables low-power, high-performance ML inference for TensorFlow Lite models. Read more at the [Coral Edge TPU home page](https://coral.ai/products/accelerator).
+
+
+
+
+ Watch: How to Run Inference on Raspberry Pi using Google Coral Edge TPU
+
+
## Boost Raspberry Pi Model Performance with Coral Edge TPU
Many people want to run their models on an embedded or mobile device such as a Raspberry Pi, since they are very power efficient and can be used in many different applications. However, the inference performance on these devices is usually poor even when using formats like [onnx](../integrations/onnx.md) or [openvino](../integrations/openvino.md). The Coral Edge TPU is a great solution to this problem, since it can be used with a Raspberry Pi and accelerate inference performance greatly.
From ac2c2be8f34b0e75077866dfdf1a863af7140aad Mon Sep 17 00:00:00 2001
From: Ultralytics Assistant
<135830346+UltralyticsAssistant@users.noreply.github.com>
Date: Fri, 6 Sep 2024 03:54:35 +0800
Subject: [PATCH 14/17] Ultralytics Code Refactor
https://ultralytics.com/actions (#16047)
Co-authored-by: Glenn Jocher
---
ultralytics/data/converter.py | 11 +++----
ultralytics/hub/google/__init__.py | 4 +--
ultralytics/hub/session.py | 2 +-
ultralytics/models/fastsam/predict.py | 2 +-
ultralytics/models/sam/modules/blocks.py | 8 ++---
ultralytics/models/sam/modules/decoders.py | 9 +++---
ultralytics/models/sam/modules/encoders.py | 5 ++-
ultralytics/models/sam/modules/sam.py | 36 ++++++++-------------
ultralytics/models/yolo/classify/predict.py | 8 ++---
ultralytics/nn/modules/activation.py | 3 +-
ultralytics/utils/__init__.py | 6 ++--
ultralytics/utils/checks.py | 13 ++++----
12 files changed, 45 insertions(+), 62 deletions(-)
diff --git a/ultralytics/data/converter.py b/ultralytics/data/converter.py
index 400e928bc0..1ba09e0b90 100644
--- a/ultralytics/data/converter.py
+++ b/ultralytics/data/converter.py
@@ -370,13 +370,10 @@ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
├─ mask_yolo_03.txt
└─ mask_yolo_04.txt
"""
- import os
-
pixel_to_class_mapping = {i + 1: i for i in range(classes)}
- for mask_filename in os.listdir(masks_dir):
- if mask_filename.endswith(".png"):
- mask_path = os.path.join(masks_dir, mask_filename)
- mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) # Read the mask image in grayscale
+ for mask_path in Path(masks_dir).iterdir():
+ if mask_path.suffix == ".png":
+ mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE) # Read the mask image in grayscale
img_height, img_width = mask.shape # Get image dimensions
LOGGER.info(f"Processing {mask_path} imgsz = {img_height} x {img_width}")
@@ -406,7 +403,7 @@ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
yolo_format.append(round(point[1] / img_height, 6))
yolo_format_data.append(yolo_format)
# Save Ultralytics YOLO format data to file
- output_path = os.path.join(output_dir, os.path.splitext(mask_filename)[0] + ".txt")
+ output_path = Path(output_dir) / f"{Path(mask_filename).stem}.txt"
with open(output_path, "w") as file:
for item in yolo_format_data:
line = " ".join(map(str, item))
diff --git a/ultralytics/hub/google/__init__.py b/ultralytics/hub/google/__init__.py
index 7531b7b575..9090297a71 100644
--- a/ultralytics/hub/google/__init__.py
+++ b/ultralytics/hub/google/__init__.py
@@ -136,12 +136,12 @@ class GCPRegions:
sorted_results = sorted(results, key=lambda x: x[1])
if verbose:
- print(f"{'Region':<25} {'Location':<35} {'Tier':<5} {'Latency (ms)'}")
+ print(f"{'Region':<25} {'Location':<35} {'Tier':<5} Latency (ms)")
for region, mean, std, min_, max_ in sorted_results:
tier, city, country = self.regions[region]
location = f"{city}, {country}"
if mean == float("inf"):
- print(f"{region:<25} {location:<35} {tier:<5} {'Timeout'}")
+ print(f"{region:<25} {location:<35} {tier:<5} Timeout")
else:
print(f"{region:<25} {location:<35} {tier:<5} {mean:.0f} ± {std:.0f} ({min_:.0f} - {max_:.0f})")
print(f"\nLowest latency region{'s' if top > 1 else ''}:")
diff --git a/ultralytics/hub/session.py b/ultralytics/hub/session.py
index d93c962814..b112d29ff1 100644
--- a/ultralytics/hub/session.py
+++ b/ultralytics/hub/session.py
@@ -346,7 +346,7 @@ class HUBTrainingSession:
"""
weights = Path(weights)
if not weights.is_file():
- last = weights.with_name("last" + weights.suffix)
+ last = weights.with_name(f"last{weights.suffix}")
if final and last.is_file():
LOGGER.warning(
f"{PREFIX} WARNING ⚠️ Model 'best.pt' not found, copying 'last.pt' to 'best.pt' and uploading. "
diff --git a/ultralytics/models/fastsam/predict.py b/ultralytics/models/fastsam/predict.py
index 0dce968a9b..9910237b0f 100644
--- a/ultralytics/models/fastsam/predict.py
+++ b/ultralytics/models/fastsam/predict.py
@@ -93,7 +93,7 @@ class FastSAMPredictor(SegmentationPredictor):
else torch.zeros(len(result), dtype=torch.bool, device=self.device)
)
for point, label in zip(points, labels):
- point_idx[torch.nonzero(masks[:, point[1], point[0]], as_tuple=True)[0]] = True if label else False
+ point_idx[torch.nonzero(masks[:, point[1], point[0]], as_tuple=True)[0]] = bool(label)
idx |= point_idx
if texts is not None:
if isinstance(texts, str):
diff --git a/ultralytics/models/sam/modules/blocks.py b/ultralytics/models/sam/modules/blocks.py
index 0615037467..026443c69f 100644
--- a/ultralytics/models/sam/modules/blocks.py
+++ b/ultralytics/models/sam/modules/blocks.py
@@ -736,7 +736,7 @@ class PositionEmbeddingSine(nn.Module):
self.num_pos_feats = num_pos_feats // 2
self.temperature = temperature
self.normalize = normalize
- if scale is not None and normalize is False:
+ if scale is not None and not normalize:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
@@ -763,8 +763,7 @@ class PositionEmbeddingSine(nn.Module):
def encode_boxes(self, x, y, w, h):
"""Encodes box coordinates and dimensions into positional embeddings for detection."""
pos_x, pos_y = self._encode_xy(x, y)
- pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
- return pos
+ return torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
encode = encode_boxes # Backwards compatibility
@@ -775,8 +774,7 @@ class PositionEmbeddingSine(nn.Module):
assert bx == by and nx == ny and bx == bl and nx == nl
pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
- pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
- return pos
+ return torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
@torch.no_grad()
def forward(self, x: torch.Tensor):
diff --git a/ultralytics/models/sam/modules/decoders.py b/ultralytics/models/sam/modules/decoders.py
index cd4cc0c18c..7c27ca176b 100644
--- a/ultralytics/models/sam/modules/decoders.py
+++ b/ultralytics/models/sam/modules/decoders.py
@@ -435,9 +435,9 @@ class SAM2MaskDecoder(nn.Module):
upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
- hyper_in_list: List[torch.Tensor] = []
- for i in range(self.num_mask_tokens):
- hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
+ hyper_in_list: List[torch.Tensor] = [
+ self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens)
+ ]
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding.shape
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
@@ -459,8 +459,7 @@ class SAM2MaskDecoder(nn.Module):
stability_delta = self.dynamic_multimask_stability_delta
area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
- stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
- return stability_scores
+ return torch.where(area_u > 0, area_i / area_u, 1.0)
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
"""
diff --git a/ultralytics/models/sam/modules/encoders.py b/ultralytics/models/sam/modules/encoders.py
index 22934222a1..7fa7b405d3 100644
--- a/ultralytics/models/sam/modules/encoders.py
+++ b/ultralytics/models/sam/modules/encoders.py
@@ -491,12 +491,11 @@ class ImageEncoder(nn.Module):
features, pos = features[: -self.scalp], pos[: -self.scalp]
src = features[-1]
- output = {
+ return {
"vision_features": src,
"vision_pos_enc": pos,
"backbone_fpn": features,
}
- return output
class FpnNeck(nn.Module):
@@ -577,7 +576,7 @@ class FpnNeck(nn.Module):
self.convs.append(current)
self.fpn_interp_model = fpn_interp_model
- assert fuse_type in ["sum", "avg"]
+ assert fuse_type in {"sum", "avg"}
self.fuse_type = fuse_type
# levels to have top-down features in its outputs
diff --git a/ultralytics/models/sam/modules/sam.py b/ultralytics/models/sam/modules/sam.py
index b638ddc534..c902153f17 100644
--- a/ultralytics/models/sam/modules/sam.py
+++ b/ultralytics/models/sam/modules/sam.py
@@ -671,26 +671,19 @@ class SAM2Model(torch.nn.Module):
t_rel = self.num_maskmem - t_pos # how many frames before current frame
if t_rel == 1:
# for t_rel == 1, we take the last frame (regardless of r)
- if not track_in_reverse:
- # the frame immediately before this frame (i.e. frame_idx - 1)
- prev_frame_idx = frame_idx - t_rel
- else:
- # the frame immediately after this frame (i.e. frame_idx + 1)
- prev_frame_idx = frame_idx + t_rel
+ prev_frame_idx = frame_idx + t_rel if track_in_reverse else frame_idx - t_rel
+ elif not track_in_reverse:
+ # first find the nearest frame among every r-th frames before this frame
+ # for r=1, this would be (frame_idx - 2)
+ prev_frame_idx = ((frame_idx - 2) // r) * r
+ # then seek further among every r-th frames
+ prev_frame_idx = prev_frame_idx - (t_rel - 2) * r
else:
- # for t_rel >= 2, we take the memory frame from every r-th frames
- if not track_in_reverse:
- # first find the nearest frame among every r-th frames before this frame
- # for r=1, this would be (frame_idx - 2)
- prev_frame_idx = ((frame_idx - 2) // r) * r
- # then seek further among every r-th frames
- prev_frame_idx = prev_frame_idx - (t_rel - 2) * r
- else:
- # first find the nearest frame among every r-th frames after this frame
- # for r=1, this would be (frame_idx + 2)
- prev_frame_idx = -(-(frame_idx + 2) // r) * r
- # then seek further among every r-th frames
- prev_frame_idx = prev_frame_idx + (t_rel - 2) * r
+ # first find the nearest frame among every r-th frames after this frame
+ # for r=1, this would be (frame_idx + 2)
+ prev_frame_idx = -(-(frame_idx + 2) // r) * r
+ # then seek further among every r-th frames
+ prev_frame_idx = prev_frame_idx + (t_rel - 2) * r
out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)
if out is None:
# If an unselected conditioning frame is among the last (self.num_maskmem - 1)
@@ -739,7 +732,7 @@ class SAM2Model(torch.nn.Module):
if out is not None:
pos_and_ptrs.append((t_diff, out["obj_ptr"]))
# If we have at least one object pointer, add them to the across attention
- if len(pos_and_ptrs) > 0:
+ if pos_and_ptrs:
pos_list, ptrs_list = zip(*pos_and_ptrs)
# stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
obj_ptrs = torch.stack(ptrs_list, dim=0)
@@ -930,12 +923,11 @@ class SAM2Model(torch.nn.Module):
def _use_multimask(self, is_init_cond_frame, point_inputs):
"""Determines whether to use multiple mask outputs in the SAM head based on configuration and inputs."""
num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
- multimask_output = (
+ return (
self.multimask_output_in_sam
and (is_init_cond_frame or self.multimask_output_for_tracking)
and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num)
)
- return multimask_output
def _apply_non_overlapping_constraints(self, pred_masks):
"""Applies non-overlapping constraints to masks, keeping highest scoring object per location."""
diff --git a/ultralytics/models/yolo/classify/predict.py b/ultralytics/models/yolo/classify/predict.py
index 266075c6c1..596931a176 100644
--- a/ultralytics/models/yolo/classify/predict.py
+++ b/ultralytics/models/yolo/classify/predict.py
@@ -53,7 +53,7 @@ class ClassificationPredictor(BasePredictor):
if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
- results = []
- for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
- results.append(Results(orig_img, path=img_path, names=self.model.names, probs=pred))
- return results
+ return [
+ Results(orig_img, path=img_path, names=self.model.names, probs=pred)
+ for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0])
+ ]
diff --git a/ultralytics/nn/modules/activation.py b/ultralytics/nn/modules/activation.py
index 25cca2a508..aaf636e762 100644
--- a/ultralytics/nn/modules/activation.py
+++ b/ultralytics/nn/modules/activation.py
@@ -18,5 +18,4 @@ class AGLU(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the forward pass of the Unified activation function."""
lam = torch.clamp(self.lambd, min=0.0001)
- y = torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam)))
- return y # for AGLU simply return y * input
+ return torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam)))
diff --git a/ultralytics/utils/__init__.py b/ultralytics/utils/__init__.py
index 616639abbb..7328a5fdb2 100644
--- a/ultralytics/utils/__init__.py
+++ b/ultralytics/utils/__init__.py
@@ -1160,9 +1160,9 @@ def vscode_msg(ext="ultralytics.ultralytics-snippets") -> str:
obs_file = path / ".obsolete" # file tracks uninstalled extensions, while source directory remains
installed = any(path.glob(f"{ext}*")) and ext not in (obs_file.read_text("utf-8") if obs_file.exists() else "")
return (
- f"{colorstr('VS Code:')} view Ultralytics VS Code Extension ⚡ at https://docs.ultralytics.com/integrations/vscode"
- if not installed
- else ""
+ ""
+ if installed
+ else f"{colorstr('VS Code:')} view Ultralytics VS Code Extension ⚡ at https://docs.ultralytics.com/integrations/vscode"
)
diff --git a/ultralytics/utils/checks.py b/ultralytics/utils/checks.py
index 80c13ad787..6b308bc146 100644
--- a/ultralytics/utils/checks.py
+++ b/ultralytics/utils/checks.py
@@ -226,13 +226,12 @@ def check_version(
if not required: # if required is '' or None
return True
- if "sys_platform" in required: # i.e. required='<2.4.0,>=1.8.0; sys_platform == "win32"'
- if (
- (WINDOWS and "win32" not in required)
- or (LINUX and "linux" not in required)
- or (MACOS and "macos" not in required and "darwin" not in required)
- ):
- return True
+ if "sys_platform" in required and ( # i.e. required='<2.4.0,>=1.8.0; sys_platform == "win32"'
+ (WINDOWS and "win32" not in required)
+ or (LINUX and "linux" not in required)
+ or (MACOS and "macos" not in required and "darwin" not in required)
+ ):
+ return True
op = ""
version = ""
From 2a73bf7046d1c8eb37a51c52b1db2aaee2a09120 Mon Sep 17 00:00:00 2001
From: Ultralytics Assistant
<135830346+UltralyticsAssistant@users.noreply.github.com>
Date: Fri, 6 Sep 2024 04:47:15 +0800
Subject: [PATCH 15/17] Update URLs to redirects (#16048)
---
CONTRIBUTING.md | 4 +--
README.md | 24 ++++++++--------
README.zh-CN.md | 24 ++++++++--------
docs/README.md | 8 +++---
docs/coming_soon_template.md | 8 +++---
docs/en/datasets/classify/imagenet10.md | 2 +-
docs/en/datasets/classify/index.md | 2 +-
docs/en/datasets/detect/coco8.md | 6 ++--
docs/en/datasets/detect/roboflow-100.md | 4 +--
docs/en/datasets/obb/dota8.md | 4 +--
docs/en/datasets/pose/coco8-pose.md | 4 +--
docs/en/datasets/pose/index.md | 4 +--
docs/en/datasets/pose/tiger-pose.md | 8 +++---
docs/en/datasets/segment/carparts-seg.md | 8 +++---
docs/en/datasets/segment/coco8-seg.md | 6 ++--
docs/en/datasets/segment/crack-seg.md | 6 ++--
docs/en/datasets/segment/package-seg.md | 6 ++--
.../guides/data-collection-and-annotation.md | 2 +-
docs/en/guides/defining-project-goals.md | 2 +-
docs/en/guides/docker-quickstart.md | 4 +--
docs/en/guides/hyperparameter-tuning.md | 2 +-
docs/en/guides/model-deployment-options.md | 2 +-
docs/en/guides/model-deployment-practices.md | 2 +-
docs/en/guides/model-evaluation-insights.md | 2 +-
.../model-monitoring-and-maintenance.md | 2 +-
docs/en/guides/model-testing.md | 2 +-
docs/en/guides/model-training-tips.md | 2 +-
docs/en/guides/nvidia-jetson.md | 2 +-
...ng-openvino-latency-vs-throughput-modes.md | 2 +-
.../en/guides/preprocessing_annotated_data.md | 2 +-
docs/en/guides/raspberry-pi.md | 2 +-
docs/en/guides/steps-of-a-cv-project.md | 2 +-
docs/en/guides/streamlit-live-inference.md | 2 +-
docs/en/guides/triton-inference-server.md | 10 +++----
docs/en/guides/yolo-common-issues.md | 6 ++--
docs/en/guides/yolo-performance-metrics.md | 2 +-
docs/en/help/CI.md | 6 ++--
docs/en/help/FAQ.md | 4 +--
docs/en/help/code_of_conduct.md | 4 +--
docs/en/help/contributing.md | 4 +--
docs/en/help/minimum_reproducible_example.md | 2 +-
docs/en/help/privacy.md | 6 ++--
docs/en/help/security.md | 10 +++----
docs/en/hub/api/index.md | 6 ++--
docs/en/hub/app/android.md | 16 +++++------
docs/en/hub/cloud-training.md | 4 +--
docs/en/hub/datasets.md | 14 +++++-----
docs/en/hub/index.md | 10 +++----
docs/en/hub/inference-api.md | 14 +++++-----
docs/en/hub/integrations.md | 24 ++++++++--------
docs/en/hub/models.md | 24 ++++++++--------
docs/en/hub/pro.md | 2 +-
docs/en/hub/projects.md | 6 ++--
docs/en/hub/quickstart.md | 6 ++--
docs/en/hub/teams.md | 2 +-
docs/en/index.md | 10 +++----
docs/en/integrations/clearml.md | 2 +-
docs/en/integrations/comet.md | 4 +--
docs/en/integrations/coreml.md | 6 ++--
docs/en/integrations/dvc.md | 2 +-
docs/en/integrations/edge-tpu.md | 2 +-
docs/en/integrations/index.md | 16 +++++------
docs/en/integrations/mlflow.md | 2 +-
docs/en/integrations/neural-magic.md | 2 +-
docs/en/integrations/ray-tune.md | 2 +-
docs/en/integrations/roboflow.md | 28 +++++++++----------
docs/en/integrations/tensorboard.md | 2 +-
docs/en/models/sam.md | 2 +-
docs/en/models/yolo-world.md | 4 +--
docs/en/models/yolov10.md | 4 +--
docs/en/models/yolov5.md | 2 +-
docs/en/models/yolov8.md | 2 +-
docs/en/models/yolov9.md | 2 +-
docs/en/modes/train.md | 2 +-
docs/en/quickstart.md | 2 +-
docs/en/tasks/detect.md | 2 +-
docs/en/tasks/pose.md | 2 +-
docs/en/tasks/segment.md | 2 +-
.../docker_image_quickstart_tutorial.md | 2 +-
.../google_cloud_quickstart_tutorial.md | 2 +-
docs/en/yolov5/index.md | 4 +--
.../tutorials/hyperparameter_evolution.md | 2 +-
docs/en/yolov5/tutorials/model_ensembling.md | 2 +-
docs/en/yolov5/tutorials/model_export.md | 6 ++--
.../tutorials/model_pruning_and_sparsity.md | 2 +-
.../en/yolov5/tutorials/multi_gpu_training.md | 2 +-
.../tutorials/pytorch_hub_model_loading.md | 4 +--
.../roboflow_datasets_integration.md | 12 ++++----
.../tutorials/test_time_augmentation.md | 4 +--
docs/en/yolov5/tutorials/train_custom_data.md | 12 ++++----
.../transfer_learning_with_frozen_layers.md | 2 +-
ultralytics/cfg/models/README.md | 2 +-
92 files changed, 253 insertions(+), 253 deletions(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 0c564dadef..d884e43b4a 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -6,7 +6,7 @@ keywords: Ultralytics, YOLO, open-source, contribution, pull request, code of co
# Contributing to Ultralytics Open-Source Projects
-Welcome! We're thrilled that you're considering contributing to our [Ultralytics](https://ultralytics.com) [open-source](https://github.com/ultralytics) projects. Your involvement not only helps enhance the quality of our repositories but also benefits the entire community. This guide provides clear guidelines and best practices to help you get started.
+Welcome! We're thrilled that you're considering contributing to our [Ultralytics](https://www.ultralytics.com/) [open-source](https://github.com/ultralytics) projects. Your involvement not only helps enhance the quality of our repositories but also benefits the entire community. This guide provides clear guidelines and best practices to help you get started.
@@ -131,7 +131,7 @@ We encourage all contributors to familiarize themselves with the terms of the AG
## Conclusion
-Thank you for your interest in contributing to [Ultralytics](https://ultralytics.com) [open-source](https://github.com/ultralytics) YOLO projects. Your participation is essential in shaping the future of our software and building a vibrant community of innovation and collaboration. Whether you're enhancing code, reporting bugs, or suggesting new features, your contributions are invaluable.
+Thank you for your interest in contributing to [Ultralytics](https://www.ultralytics.com/) [open-source](https://github.com/ultralytics) YOLO projects. Your participation is essential in shaping the future of our software and building a vibrant community of innovation and collaboration. Whether you're enhancing code, reporting bugs, or suggesting new features, your contributions are invaluable.
We're excited to see your ideas come to life and appreciate your commitment to advancing object detection technology. Together, let's continue to grow and innovate in this exciting open-source journey. Happy coding! 🚀🌟
diff --git a/README.md b/README.md
index 0505701dd8..1cc1686e67 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
-[Ultralytics](https://ultralytics.com) [YOLOv8](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks.
+[Ultralytics](https://www.ultralytics.com/) [YOLOv8](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks.
We hope that the resources here will help you get the most out of YOLOv8. Please browse the YOLOv8 Docs for details, raise an issue on GitHub for support, questions, or discussions, become a member of the Ultralytics Discord, Reddit and Forums!
-To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
+To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
@@ -103,7 +103,7 @@ See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python) for more exa
### Notebooks
-Ultralytics provides interactive notebooks for YOLOv8, covering training, validation, tracking, and more. Each notebook is paired with a [YouTube](https://youtube.com/ultralytics?sub_confirmation=1) tutorial, making it easy to learn and implement advanced YOLOv8 features.
+Ultralytics provides interactive notebooks for YOLOv8, covering training, validation, tracking, and more. Each notebook is paired with a [YouTube](https://www.youtube.com/ultralytics?sub_confirmation=1) tutorial, making it easy to learn and implement advanced YOLOv8 features.
| Docs | Notebook | YouTube |
| ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
@@ -134,7 +134,7 @@ See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examp
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 |
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 |
-- **mAPval** values are for single-model single-scale on [COCO val2017](https://cocodataset.org) dataset. Reproduce by `yolo val detect data=coco.yaml device=0`
+- **mAPval** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. Reproduce by `yolo val detect data=coco.yaml device=0`
- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. Reproduce by `yolo val detect data=coco.yaml batch=1 device=0|cpu`
@@ -168,7 +168,7 @@ See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage e
| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 |
| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 |
-- **mAPval** values are for single-model single-scale on [COCO val2017](https://cocodataset.org) dataset. Reproduce by `yolo val segment data=coco-seg.yaml device=0`
+- **mAPval** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. Reproduce by `yolo val segment data=coco-seg.yaml device=0`
- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. Reproduce by `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu`
@@ -186,7 +186,7 @@ See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples wit
| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 |
| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 |
-- **mAPval** values are for single-model single-scale on [COCO Keypoints val2017](https://cocodataset.org) dataset. Reproduce by `yolo val pose data=coco-pose.yaml device=0`
+- **mAPval** values are for single-model single-scale on [COCO Keypoints val2017](https://cocodataset.org/) dataset. Reproduce by `yolo val pose data=coco-pose.yaml device=0`
- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. Reproduce by `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
@@ -255,14 +255,14 @@ Our key integrations with leading AI platforms extend the functionality of Ultra
##
Ultralytics HUB
-Experience seamless AI with [Ultralytics HUB](https://ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now!
+Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now!
##
Contribute
-We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started, and fill out our [Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors!
+We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started, and fill out our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors!
@@ -273,12 +273,12 @@ We love your input! YOLOv5 and YOLOv8 would not be possible without help from ou
Ultralytics offers two licensing options to accommodate diverse use cases:
-- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for more details.
-- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license).
+- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/license) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for more details.
+- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://www.ultralytics.com/license).
##
Contact
-For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://ultralytics.com/discord), [Reddit](https://reddit.com/r/ultralytics), or [Forums](https://community.ultralytics.com) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
+For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), or [Forums](https://community.ultralytics.com/) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
diff --git a/docs/README.md b/docs/README.md
index b3766abe9e..296b5d5a07 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -3,7 +3,7 @@
# 📚 Ultralytics Docs
-[Ultralytics](https://ultralytics.com) Docs are the gateway to understanding and utilizing our cutting-edge machine learning tools. These documents are deployed to [https://docs.ultralytics.com](https://docs.ultralytics.com) for your convenience.
+[Ultralytics](https://www.ultralytics.com/) Docs are the gateway to understanding and utilizing our cutting-edge machine learning tools. These documents are deployed to [https://docs.ultralytics.com](https://docs.ultralytics.com) for your convenience.
[](https://github.com/ultralytics/docs/actions/workflows/pages/pages-build-deployment)
[](https://github.com/ultralytics/docs/actions/workflows/links.yml)
@@ -113,7 +113,7 @@ Choose a hosting provider and deployment method for your MkDocs documentation:
## 💡 Contribute
-We cherish the community's input as it drives Ultralytics open-source initiatives. Dive into the [Contributing Guide](https://docs.ultralytics.com/help/contributing) and share your thoughts via our [Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey). A heartfelt thank you 🙏 to each contributor!
+We cherish the community's input as it drives Ultralytics open-source initiatives. Dive into the [Contributing Guide](https://docs.ultralytics.com/help/contributing) and share your thoughts via our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey). A heartfelt thank you 🙏 to each contributor!

@@ -122,11 +122,11 @@ We cherish the community's input as it drives Ultralytics open-source initiative
Ultralytics Docs presents two licensing options:
- **AGPL-3.0 License**: Perfect for academia and open collaboration. Details are in the [LICENSE](https://github.com/ultralytics/docs/blob/main/LICENSE) file.
-- **Enterprise License**: Tailored for commercial usage, offering a seamless blend of Ultralytics technology in your products. Learn more at [Ultralytics Licensing](https://ultralytics.com/license).
+- **Enterprise License**: Tailored for commercial usage, offering a seamless blend of Ultralytics technology in your products. Learn more at [Ultralytics Licensing](https://www.ultralytics.com/license).
## ✉️ Contact
-For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://ultralytics.com/discord), [Reddit](https://reddit.com/r/ultralytics), or [Forums](https://community.ultralytics.com) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
+For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), or [Forums](https://community.ultralytics.com/) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
diff --git a/docs/coming_soon_template.md b/docs/coming_soon_template.md
index 1b610d4d5c..3f0840d453 100644
--- a/docs/coming_soon_template.md
+++ b/docs/coming_soon_template.md
@@ -5,7 +5,7 @@ keywords: Ultralytics, coming soon, under construction, new features, AI updates
# Under Construction 🏗️🌟
-Welcome to the [Ultralytics](https://ultralytics.com) "Under Construction" page! Here, we're hard at work developing the next generation of AI and ML innovations. This page serves as a teaser for the exciting updates and new features we're eager to share with you!
+Welcome to the [Ultralytics](https://www.ultralytics.com/) "Under Construction" page! Here, we're hard at work developing the next generation of AI and ML innovations. This page serves as a teaser for the exciting updates and new features we're eager to share with you!
## Exciting New Features on the Way 🎉
@@ -17,13 +17,13 @@ Welcome to the [Ultralytics](https://ultralytics.com) "Under Construction" page!
This placeholder page is your first stop for upcoming developments. Keep an eye out for:
-- **Newsletter:** Subscribe [here](https://ultralytics.com/#newsletter) for the latest news.
+- **Newsletter:** Subscribe [here](https://www.ultralytics.com/#newsletter) for the latest news.
- **Social Media:** Follow us [here](https://www.linkedin.com/company/ultralytics) for updates and teasers.
-- **Blog:** Visit our [blog](https://ultralytics.com/blog) for detailed insights.
+- **Blog:** Visit our [blog](https://www.ultralytics.com/blog) for detailed insights.
## We Value Your Input 🗣️
-Your feedback shapes our future releases. Share your thoughts and suggestions [here](https://ultralytics.com/survey).
+Your feedback shapes our future releases. Share your thoughts and suggestions [here](https://www.ultralytics.com/survey).
## Thank You, Community! 🌍
diff --git a/docs/en/datasets/classify/imagenet10.md b/docs/en/datasets/classify/imagenet10.md
index cc9c9ec7e6..38764c89ec 100644
--- a/docs/en/datasets/classify/imagenet10.md
+++ b/docs/en/datasets/classify/imagenet10.md
@@ -6,7 +6,7 @@ keywords: ImageNet10, ImageNet, Ultralytics, CI tests, sanity checks, training p
# ImageNet10 Dataset
-The [ImageNet10](https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenet10.zip) dataset is a small-scale subset of the [ImageNet](https://www.image-net.org/) database, developed by [Ultralytics](https://ultralytics.com) and designed for CI tests, sanity checks, and fast testing of training pipelines. This dataset is composed of the first image in the training set and the first image from the validation set of the first 10 classes in ImageNet. Although significantly smaller, it retains the structure and diversity of the original ImageNet dataset.
+The [ImageNet10](https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenet10.zip) dataset is a small-scale subset of the [ImageNet](https://www.image-net.org/) database, developed by [Ultralytics](https://www.ultralytics.com/) and designed for CI tests, sanity checks, and fast testing of training pipelines. This dataset is composed of the first image in the training set and the first image from the validation set of the first 10 classes in ImageNet. Although significantly smaller, it retains the structure and diversity of the original ImageNet dataset.
## Key Features
diff --git a/docs/en/datasets/classify/index.md b/docs/en/datasets/classify/index.md
index 357138759f..58aaaecd0c 100644
--- a/docs/en/datasets/classify/index.md
+++ b/docs/en/datasets/classify/index.md
@@ -8,7 +8,7 @@ keywords: YOLO, image classification, dataset structure, CIFAR-10, Ultralytics,
### Dataset Structure for YOLO Classification Tasks
-For [Ultralytics](https://ultralytics.com) YOLO classification tasks, the dataset must be organized in a specific split-directory structure under the `root` directory to facilitate proper training, testing, and optional validation processes. This structure includes separate directories for training (`train`) and testing (`test`) phases, with an optional directory for validation (`val`).
+For [Ultralytics](https://www.ultralytics.com/) YOLO classification tasks, the dataset must be organized in a specific split-directory structure under the `root` directory to facilitate proper training, testing, and optional validation processes. This structure includes separate directories for training (`train`) and testing (`test`) phases, with an optional directory for validation (`val`).
Each of these directories should contain one subdirectory for each class in the dataset. The subdirectories are named after the corresponding class and contain all the images for that class. Ensure that each image file is named uniquely and stored in a common format such as JPEG or PNG.
diff --git a/docs/en/datasets/detect/coco8.md b/docs/en/datasets/detect/coco8.md
index cae9e673d6..c1df16ee22 100644
--- a/docs/en/datasets/detect/coco8.md
+++ b/docs/en/datasets/detect/coco8.md
@@ -8,7 +8,7 @@ keywords: COCO8, Ultralytics, dataset, object detection, YOLOv8, training, valid
## Introduction
-[Ultralytics](https://ultralytics.com) COCO8 is a small, but versatile object detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
+[Ultralytics](https://www.ultralytics.com/) COCO8 is a small, but versatile object detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
-This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com) and [YOLOv8](https://github.com/ultralytics/ultralytics).
+This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics).
## Dataset YAML
@@ -124,7 +124,7 @@ For a comprehensive list of available arguments, refer to the model [Training](.
### Why should I use Ultralytics HUB for managing my COCO8 training?
-Ultralytics HUB is an all-in-one web tool designed to simplify the training and deployment of YOLO models, including the Ultralytics YOLOv8 models on the COCO8 dataset. It offers cloud training, real-time tracking, and seamless dataset management. HUB allows you to start training with a single click and avoids the complexities of manual setups. Discover more about [Ultralytics HUB](https://hub.ultralytics.com) and its benefits.
+Ultralytics HUB is an all-in-one web tool designed to simplify the training and deployment of YOLO models, including the Ultralytics YOLOv8 models on the COCO8 dataset. It offers cloud training, real-time tracking, and seamless dataset management. HUB allows you to start training with a single click and avoids the complexities of manual setups. Discover more about [Ultralytics HUB](https://hub.ultralytics.com/) and its benefits.
### What are the benefits of using mosaic augmentation in training with the COCO8 dataset?
diff --git a/docs/en/datasets/detect/roboflow-100.md b/docs/en/datasets/detect/roboflow-100.md
index f27591232d..253b640f4b 100644
--- a/docs/en/datasets/detect/roboflow-100.md
+++ b/docs/en/datasets/detect/roboflow-100.md
@@ -95,7 +95,7 @@ For more ideas and inspiration on real-world applications, be sure to check out
## Usage
-The Roboflow 100 dataset is available on both [GitHub](https://github.com/roboflow/roboflow-100-benchmark) and [Roboflow Universe](https://universe.roboflow.com/roboflow-100).
+The Roboflow 100 dataset is available on both [GitHub](https://github.com/roboflow/roboflow-100-benchmark) and [Roboflow Universe](https://universe.roboflow.com/roboflow-100?ref=ultralytics).
You can access it directly from the Roboflow 100 GitHub repository. In addition, on Roboflow Universe, you have the flexibility to download individual datasets by simply clicking the export button within each dataset.
@@ -197,7 +197,7 @@ This setup allows for extensive and varied testing of models across different re
### How do I access and download the Roboflow 100 dataset?
-The **Roboflow 100** dataset is accessible on [GitHub](https://github.com/roboflow/roboflow-100-benchmark) and [Roboflow Universe](https://universe.roboflow.com/roboflow-100). You can download the entire dataset from GitHub or select individual datasets on Roboflow Universe using the export button.
+The **Roboflow 100** dataset is accessible on [GitHub](https://github.com/roboflow/roboflow-100-benchmark) and [Roboflow Universe](https://universe.roboflow.com/roboflow-100?ref=ultralytics). You can download the entire dataset from GitHub or select individual datasets on Roboflow Universe using the export button.
### What should I include when citing the Roboflow 100 dataset in my research?
diff --git a/docs/en/datasets/obb/dota8.md b/docs/en/datasets/obb/dota8.md
index 2271978e11..0bfa723ae8 100644
--- a/docs/en/datasets/obb/dota8.md
+++ b/docs/en/datasets/obb/dota8.md
@@ -8,9 +8,9 @@ keywords: DOTA8 dataset, Ultralytics, YOLOv8, object detection, debugging, train
## Introduction
-[Ultralytics](https://ultralytics.com) DOTA8 is a small, but versatile oriented object detection dataset composed of the first 8 images of 8 images of the split DOTAv1 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
+[Ultralytics](https://www.ultralytics.com/) DOTA8 is a small, but versatile oriented object detection dataset composed of the first 8 images of 8 images of the split DOTAv1 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
-This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com) and [YOLOv8](https://github.com/ultralytics/ultralytics).
+This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics).
## Dataset YAML
diff --git a/docs/en/datasets/pose/coco8-pose.md b/docs/en/datasets/pose/coco8-pose.md
index e5b2eb8657..dfa8e30123 100644
--- a/docs/en/datasets/pose/coco8-pose.md
+++ b/docs/en/datasets/pose/coco8-pose.md
@@ -8,9 +8,9 @@ keywords: COCO8-Pose, Ultralytics, pose detection dataset, object detection, YOL
## Introduction
-[Ultralytics](https://ultralytics.com) COCO8-Pose is a small, but versatile pose detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
+[Ultralytics](https://www.ultralytics.com/) COCO8-Pose is a small, but versatile pose detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
-This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com) and [YOLOv8](https://github.com/ultralytics/ultralytics).
+This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics).
## Dataset YAML
diff --git a/docs/en/datasets/pose/index.md b/docs/en/datasets/pose/index.md
index 57c20dcb7c..a2e7623420 100644
--- a/docs/en/datasets/pose/index.md
+++ b/docs/en/datasets/pose/index.md
@@ -101,7 +101,7 @@ This section outlines the datasets that are compatible with Ultralytics YOLO for
### COCO8-Pose
-- **Description**: [Ultralytics](https://ultralytics.com) COCO8-Pose is a small, but versatile pose detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation.
+- **Description**: [Ultralytics](https://www.ultralytics.com/) COCO8-Pose is a small, but versatile pose detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation.
- **Label Format**: Same as Ultralytics YOLO format as described above, with keypoints for human poses.
- **Number of Classes**: 1 (Human).
- **Keypoints**: 17 keypoints including nose, eyes, ears, shoulders, elbows, wrists, hips, knees, and ankles.
@@ -111,7 +111,7 @@ This section outlines the datasets that are compatible with Ultralytics YOLO for
### Tiger-Pose
-- **Description**: [Ultralytics](https://ultralytics.com) This animal pose dataset comprises 263 images sourced from a [YouTube Video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0), with 210 images allocated for training and 53 for validation.
+- **Description**: [Ultralytics](https://www.ultralytics.com/) This animal pose dataset comprises 263 images sourced from a [YouTube Video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0), with 210 images allocated for training and 53 for validation.
- **Label Format**: Same as Ultralytics YOLO format as described above, with 12 keypoints for animal pose and no visible dimension.
- **Number of Classes**: 1 (Tiger).
- **Keypoints**: 12 keypoints.
diff --git a/docs/en/datasets/pose/tiger-pose.md b/docs/en/datasets/pose/tiger-pose.md
index 457e8fefe7..3e8b55665a 100644
--- a/docs/en/datasets/pose/tiger-pose.md
+++ b/docs/en/datasets/pose/tiger-pose.md
@@ -8,11 +8,11 @@ keywords: Ultralytics, Tiger-Pose, dataset, pose estimation, YOLOv8, training da
## Introduction
-[Ultralytics](https://ultralytics.com) introduces the Tiger-Pose dataset, a versatile collection designed for pose estimation tasks. This dataset comprises 263 images sourced from a [YouTube Video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0), with 210 images allocated for training and 53 for validation. It serves as an excellent resource for testing and troubleshooting pose estimation algorithm.
+[Ultralytics](https://www.ultralytics.com/) introduces the Tiger-Pose dataset, a versatile collection designed for pose estimation tasks. This dataset comprises 263 images sourced from a [YouTube Video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0), with 210 images allocated for training and 53 for validation. It serves as an excellent resource for testing and troubleshooting pose estimation algorithm.
Despite its manageable size of 210 images, tiger-pose dataset offers diversity, making it suitable for assessing training pipelines, identifying potential errors, and serving as a valuable preliminary step before working with larger datasets for pose estimation.
-This dataset is intended for use with [Ultralytics HUB](https://hub.ultralytics.com) and [YOLOv8](https://github.com/ultralytics/ultralytics).
+This dataset is intended for use with [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics).
@@ -101,7 +101,7 @@ The dataset has been released available under the [AGPL-3.0 License](https://git
### What is the Ultralytics Tiger-Pose dataset used for?
-The Ultralytics Tiger-Pose dataset is designed for pose estimation tasks, consisting of 263 images sourced from a [YouTube video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0). The dataset is divided into 210 training images and 53 validation images. It is particularly useful for testing, training, and refining pose estimation algorithms using [Ultralytics HUB](https://hub.ultralytics.com) and [YOLOv8](https://github.com/ultralytics/ultralytics).
+The Ultralytics Tiger-Pose dataset is designed for pose estimation tasks, consisting of 263 images sourced from a [YouTube video](https://www.youtube.com/watch?v=MIBAT6BGE6U&pp=ygUbVGlnZXIgd2Fsa2luZyByZWZlcmVuY2UubXA0). The dataset is divided into 210 training images and 53 validation images. It is particularly useful for testing, training, and refining pose estimation algorithms using [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics).
### How do I train a YOLOv8 model on the Tiger-Pose dataset?
@@ -161,4 +161,4 @@ To perform inference using a YOLOv8 model trained on the Tiger-Pose dataset, you
### What are the benefits of using the Tiger-Pose dataset for pose estimation?
-The Tiger-Pose dataset, despite its manageable size of 210 images for training, provides a diverse collection of images that are ideal for testing pose estimation pipelines. The dataset helps identify potential errors and acts as a preliminary step before working with larger datasets. Additionally, the dataset supports the training and refinement of pose estimation algorithms using advanced tools like [Ultralytics HUB](https://hub.ultralytics.com) and [YOLOv8](https://github.com/ultralytics/ultralytics), enhancing model performance and accuracy.
+The Tiger-Pose dataset, despite its manageable size of 210 images for training, provides a diverse collection of images that are ideal for testing pose estimation pipelines. The dataset helps identify potential errors and acts as a preliminary step before working with larger datasets. Additionally, the dataset supports the training and refinement of pose estimation algorithms using advanced tools like [Ultralytics HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics), enhancing model performance and accuracy.
diff --git a/docs/en/datasets/segment/carparts-seg.md b/docs/en/datasets/segment/carparts-seg.md
index d5799954be..f0d020ff46 100644
--- a/docs/en/datasets/segment/carparts-seg.md
+++ b/docs/en/datasets/segment/carparts-seg.md
@@ -6,7 +6,7 @@ keywords: Carparts Segmentation Dataset, Roboflow, computer vision, automotive A
# Roboflow Universe Carparts Segmentation Dataset
-The [Roboflow](https://roboflow.com/?ref=ultralytics) [Carparts Segmentation Dataset](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm) is a curated collection of images and videos designed for computer vision applications, specifically focusing on segmentation tasks related to car parts. This dataset provides a diverse set of visuals captured from multiple perspectives, offering valuable annotated examples for training and testing segmentation models.
+The [Roboflow](https://roboflow.com/?ref=ultralytics) [Carparts Segmentation Dataset](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm?ref=ultralytics) is a curated collection of images and videos designed for computer vision applications, specifically focusing on segmentation tasks related to car parts. This dataset provides a diverse set of visuals captured from multiple perspectives, offering valuable annotated examples for training and testing segmentation models.
Whether you're working on automotive research, developing AI solutions for vehicle maintenance, or exploring computer vision applications, the Carparts Segmentation Dataset serves as a valuable resource for enhancing accuracy and efficiency in your projects.
@@ -100,13 +100,13 @@ If you integrate the Carparts Segmentation dataset into your research or develop
}
```
-We extend our thanks to the Roboflow team for their dedication in developing and managing the Carparts Segmentation dataset, a valuable resource for vehicle maintenance and research projects. For additional details about the Carparts Segmentation dataset and its creators, please visit the [CarParts Segmentation Dataset Page](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm).
+We extend our thanks to the Roboflow team for their dedication in developing and managing the Carparts Segmentation dataset, a valuable resource for vehicle maintenance and research projects. For additional details about the Carparts Segmentation dataset and its creators, please visit the [CarParts Segmentation Dataset Page](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm?ref=ultralytics).
## FAQ
### What is the Roboflow Carparts Segmentation Dataset?
-The [Roboflow Carparts Segmentation Dataset](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm) is a curated collection of images and videos specifically designed for car part segmentation tasks in computer vision. This dataset includes a diverse range of visuals captured from multiple perspectives, making it an invaluable resource for training and testing segmentation models for automotive applications.
+The [Roboflow Carparts Segmentation Dataset](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm?ref=ultralytics) is a curated collection of images and videos specifically designed for car part segmentation tasks in computer vision. This dataset includes a diverse range of visuals captured from multiple perspectives, making it an invaluable resource for training and testing segmentation models for automotive applications.
### How can I use the Carparts Segmentation Dataset with Ultralytics YOLOv8?
@@ -157,4 +157,4 @@ The dataset configuration file for the Carparts Segmentation dataset, `carparts-
The Carparts Segmentation Dataset provides rich, annotated data essential for developing high-accuracy segmentation models in automotive computer vision. This dataset's diversity and detailed annotations improve model training, making it ideal for applications like vehicle maintenance automation, enhancing vehicle safety systems, and supporting autonomous driving technologies. Partnering with a robust dataset accelerates AI development and ensures better model performance.
-For more details, visit the [CarParts Segmentation Dataset Page](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm).
+For more details, visit the [CarParts Segmentation Dataset Page](https://universe.roboflow.com/gianmarco-russo-vt9xr/car-seg-un1pm?ref=ultralytics).
diff --git a/docs/en/datasets/segment/coco8-seg.md b/docs/en/datasets/segment/coco8-seg.md
index f22d6a68a3..e4aa6bef84 100644
--- a/docs/en/datasets/segment/coco8-seg.md
+++ b/docs/en/datasets/segment/coco8-seg.md
@@ -8,9 +8,9 @@ keywords: COCO8-Seg, Ultralytics, segmentation dataset, YOLOv8, COCO 2017, model
## Introduction
-[Ultralytics](https://ultralytics.com) COCO8-Seg is a small, but versatile instance segmentation dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging segmentation models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
+[Ultralytics](https://www.ultralytics.com/) COCO8-Seg is a small, but versatile instance segmentation dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging segmentation models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
-This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com) and [YOLOv8](https://github.com/ultralytics/ultralytics).
+This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLOv8](https://github.com/ultralytics/ultralytics).
## Dataset YAML
@@ -82,7 +82,7 @@ We would like to acknowledge the COCO Consortium for creating and maintaining th
### What is the COCO8-Seg dataset, and how is it used in Ultralytics YOLOv8?
-The **COCO8-Seg dataset** is a compact instance segmentation dataset by Ultralytics, consisting of the first 8 images from the COCO train 2017 set—4 images for training and 4 for validation. This dataset is tailored for testing and debugging segmentation models or experimenting with new detection methods. It is particularly useful with Ultralytics [YOLOv8](https://github.com/ultralytics/ultralytics) and [HUB](https://hub.ultralytics.com) for rapid iteration and pipeline error-checking before scaling to larger datasets. For detailed usage, refer to the model [Training](../../modes/train.md) page.
+The **COCO8-Seg dataset** is a compact instance segmentation dataset by Ultralytics, consisting of the first 8 images from the COCO train 2017 set—4 images for training and 4 for validation. This dataset is tailored for testing and debugging segmentation models or experimenting with new detection methods. It is particularly useful with Ultralytics [YOLOv8](https://github.com/ultralytics/ultralytics) and [HUB](https://hub.ultralytics.com/) for rapid iteration and pipeline error-checking before scaling to larger datasets. For detailed usage, refer to the model [Training](../../modes/train.md) page.
### How can I train a YOLOv8n-seg model using the COCO8-Seg dataset?
diff --git a/docs/en/datasets/segment/crack-seg.md b/docs/en/datasets/segment/crack-seg.md
index 5fa99dfbbf..32113dfc6d 100644
--- a/docs/en/datasets/segment/crack-seg.md
+++ b/docs/en/datasets/segment/crack-seg.md
@@ -6,7 +6,7 @@ keywords: Roboflow, Crack Segmentation Dataset, Ultralytics, transportation safe
# Roboflow Universe Crack Segmentation Dataset
-The [Roboflow](https://roboflow.com/?ref=ultralytics) [Crack Segmentation Dataset](https://universe.roboflow.com/university-bswxt/crack-bphdr) stands out as an extensive resource designed specifically for individuals involved in transportation and public safety studies. It is equally beneficial for those working on the development of self-driving car models or simply exploring computer vision applications for recreational purposes.
+The [Roboflow](https://roboflow.com/?ref=ultralytics) [Crack Segmentation Dataset](https://universe.roboflow.com/university-bswxt/crack-bphdr?ref=ultralytics) stands out as an extensive resource designed specifically for individuals involved in transportation and public safety studies. It is equally beneficial for those working on the development of self-driving car models or simply exploring computer vision applications for recreational purposes.
Comprising a total of 4029 static images captured from diverse road and wall scenarios, this dataset emerges as a valuable asset for tasks related to crack segmentation. Whether you are delving into the intricacies of transportation research or seeking to enhance the accuracy of your self-driving car models, this dataset provides a rich and varied collection of images to support your endeavors.
@@ -90,13 +90,13 @@ If you incorporate the crack segmentation dataset into your research or developm
}
```
-We would like to acknowledge the Roboflow team for creating and maintaining the Crack Segmentation dataset as a valuable resource for the road safety and research projects. For more information about the Crack segmentation dataset and its creators, visit the [Crack Segmentation Dataset Page](https://universe.roboflow.com/university-bswxt/crack-bphdr).
+We would like to acknowledge the Roboflow team for creating and maintaining the Crack Segmentation dataset as a valuable resource for the road safety and research projects. For more information about the Crack segmentation dataset and its creators, visit the [Crack Segmentation Dataset Page](https://universe.roboflow.com/university-bswxt/crack-bphdr?ref=ultralytics).
## FAQ
### What is the Roboflow Crack Segmentation Dataset?
-The [Roboflow Crack Segmentation Dataset](https://universe.roboflow.com/university-bswxt/crack-bphdr) is a comprehensive collection of 4029 static images designed specifically for transportation and public safety studies. It is ideal for tasks such as self-driving car model development and infrastructure maintenance. The dataset includes training, testing, and validation sets, aiding in accurate crack detection and segmentation.
+The [Roboflow Crack Segmentation Dataset](https://universe.roboflow.com/university-bswxt/crack-bphdr?ref=ultralytics) is a comprehensive collection of 4029 static images designed specifically for transportation and public safety studies. It is ideal for tasks such as self-driving car model development and infrastructure maintenance. The dataset includes training, testing, and validation sets, aiding in accurate crack detection and segmentation.
### How do I train a model using the Crack Segmentation Dataset with Ultralytics YOLOv8?
diff --git a/docs/en/datasets/segment/package-seg.md b/docs/en/datasets/segment/package-seg.md
index bf88410fb6..2aec99a21f 100644
--- a/docs/en/datasets/segment/package-seg.md
+++ b/docs/en/datasets/segment/package-seg.md
@@ -6,7 +6,7 @@ keywords: Roboflow, Package Segmentation Dataset, computer vision, package ident
# Roboflow Universe Package Segmentation Dataset
-The [Roboflow](https://roboflow.com/?ref=ultralytics) [Package Segmentation Dataset](https://universe.roboflow.com/factorypackage/factory_package) is a curated collection of images specifically tailored for tasks related to package segmentation in the field of computer vision. This dataset is designed to assist researchers, developers, and enthusiasts working on projects related to package identification, sorting, and handling.
+The [Roboflow](https://roboflow.com/?ref=ultralytics) [Package Segmentation Dataset](https://universe.roboflow.com/factorypackage/factory_package?ref=ultralytics) is a curated collection of images specifically tailored for tasks related to package segmentation in the field of computer vision. This dataset is designed to assist researchers, developers, and enthusiasts working on projects related to package identification, sorting, and handling.
Containing a diverse set of images showcasing various packages in different contexts and environments, the dataset serves as a valuable resource for training and evaluating segmentation models. Whether you are engaged in logistics, warehouse automation, or any application requiring precise package analysis, the Package Segmentation Dataset provides a targeted and comprehensive set of images to enhance the performance of your computer vision algorithms.
@@ -89,13 +89,13 @@ If you integrate the crack segmentation dataset into your research or developmen
}
```
-We express our gratitude to the Roboflow team for their efforts in creating and maintaining the Package Segmentation dataset, a valuable asset for logistics and research projects. For additional details about the Package Segmentation dataset and its creators, please visit the [Package Segmentation Dataset Page](https://universe.roboflow.com/factorypackage/factory_package).
+We express our gratitude to the Roboflow team for their efforts in creating and maintaining the Package Segmentation dataset, a valuable asset for logistics and research projects. For additional details about the Package Segmentation dataset and its creators, please visit the [Package Segmentation Dataset Page](https://universe.roboflow.com/factorypackage/factory_package?ref=ultralytics).
## FAQ
### What is the Roboflow Package Segmentation Dataset and how can it help in computer vision projects?
-The [Roboflow Package Segmentation Dataset](https://universe.roboflow.com/factorypackage/factory_package) is a curated collection of images tailored for tasks involving package segmentation. It includes diverse images of packages in various contexts, making it invaluable for training and evaluating segmentation models. This dataset is particularly useful for applications in logistics, warehouse automation, and any project requiring precise package analysis. It helps optimize logistics and enhance vision models for accurate package identification and sorting.
+The [Roboflow Package Segmentation Dataset](https://universe.roboflow.com/factorypackage/factory_package?ref=ultralytics) is a curated collection of images tailored for tasks involving package segmentation. It includes diverse images of packages in various contexts, making it invaluable for training and evaluating segmentation models. This dataset is particularly useful for applications in logistics, warehouse automation, and any project requiring precise package analysis. It helps optimize logistics and enhance vision models for accurate package identification and sorting.
### How do I train an Ultralytics YOLOv8 model on the Package Segmentation Dataset?
diff --git a/docs/en/guides/data-collection-and-annotation.md b/docs/en/guides/data-collection-and-annotation.md
index 2a7cb149f8..7939d12a42 100644
--- a/docs/en/guides/data-collection-and-annotation.md
+++ b/docs/en/guides/data-collection-and-annotation.md
@@ -137,7 +137,7 @@ Bouncing your ideas and queries off other computer vision enthusiasts can help a
### Where to Find Help and Support
- **GitHub Issues:** Visit the YOLOv8 GitHub repository and use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers are there to help with any issues you face.
-- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://ultralytics.com/discord/) to connect with other users and developers, get support, share knowledge, and brainstorm ideas.
+- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to connect with other users and developers, get support, share knowledge, and brainstorm ideas.
### Official Documentation
diff --git a/docs/en/guides/defining-project-goals.md b/docs/en/guides/defining-project-goals.md
index 3282cfe2d5..fcd32f12f2 100644
--- a/docs/en/guides/defining-project-goals.md
+++ b/docs/en/guides/defining-project-goals.md
@@ -115,7 +115,7 @@ Connecting with other computer vision enthusiasts can be incredibly helpful for
### Community Support Channels
- **GitHub Issues:** Head over to the YOLOv8 GitHub repository. You can use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers can assist with specific problems you encounter.
-- **Ultralytics Discord Server:** Become part of the [Ultralytics Discord server](https://ultralytics.com/discord/). Connect with fellow users and developers, seek support, exchange knowledge, and discuss ideas.
+- **Ultralytics Discord Server:** Become part of the [Ultralytics Discord server](https://discord.com/invite/ultralytics). Connect with fellow users and developers, seek support, exchange knowledge, and discuss ideas.
### Comprehensive Guides and Documentation
diff --git a/docs/en/guides/docker-quickstart.md b/docs/en/guides/docker-quickstart.md
index 90b86ed6d4..6d08fac0b5 100644
--- a/docs/en/guides/docker-quickstart.md
+++ b/docs/en/guides/docker-quickstart.md
@@ -10,7 +10,7 @@ keywords: Ultralytics, Docker, Quickstart Guide, CPU support, GPU support, NVIDI
-This guide serves as a comprehensive introduction to setting up a Docker environment for your Ultralytics projects. [Docker](https://docker.com/) is a platform for developing, shipping, and running applications in containers. It is particularly beneficial for ensuring that the software will always run the same, regardless of where it's deployed. For more details, visit the Ultralytics Docker repository on [Docker Hub](https://hub.docker.com/r/ultralytics/ultralytics).
+This guide serves as a comprehensive introduction to setting up a Docker environment for your Ultralytics projects. [Docker](https://www.docker.com/) is a platform for developing, shipping, and running applications in containers. It is particularly beneficial for ensuring that the software will always run the same, regardless of where it's deployed. For more details, visit the Ultralytics Docker repository on [Docker Hub](https://hub.docker.com/r/ultralytics/ultralytics).
[](https://hub.docker.com/r/ultralytics/ultralytics)
[](https://hub.docker.com/r/ultralytics/ultralytics)
@@ -27,7 +27,7 @@ This guide serves as a comprehensive introduction to setting up a Docker environ
## Prerequisites
-- Make sure Docker is installed on your system. If not, you can download and install it from [Docker's website](https://www.docker.com/products/docker-desktop).
+- Make sure Docker is installed on your system. If not, you can download and install it from [Docker's website](https://www.docker.com/products/docker-desktop/).
- Ensure that your system has an NVIDIA GPU and NVIDIA drivers are installed.
---
diff --git a/docs/en/guides/hyperparameter-tuning.md b/docs/en/guides/hyperparameter-tuning.md
index 0915956572..ec95d2b8e1 100644
--- a/docs/en/guides/hyperparameter-tuning.md
+++ b/docs/en/guides/hyperparameter-tuning.md
@@ -204,7 +204,7 @@ The hyperparameter tuning process in Ultralytics YOLO is simplified yet powerful
2. [YOLOv5 Hyperparameter Evolution Guide](../yolov5/tutorials/hyperparameter_evolution.md)
3. [Efficient Hyperparameter Tuning with Ray Tune and YOLOv8](../integrations/ray-tune.md)
-For deeper insights, you can explore the `Tuner` class source code and accompanying documentation. Should you have any questions, feature requests, or need further assistance, feel free to reach out to us on [GitHub](https://github.com/ultralytics/ultralytics/issues/new/choose) or [Discord](https://ultralytics.com/discord).
+For deeper insights, you can explore the `Tuner` class source code and accompanying documentation. Should you have any questions, feature requests, or need further assistance, feel free to reach out to us on [GitHub](https://github.com/ultralytics/ultralytics/issues/new/choose) or [Discord](https://discord.com/invite/ultralytics).
## FAQ
diff --git a/docs/en/guides/model-deployment-options.md b/docs/en/guides/model-deployment-options.md
index 713a3dabf8..353dcabd68 100644
--- a/docs/en/guides/model-deployment-options.md
+++ b/docs/en/guides/model-deployment-options.md
@@ -288,7 +288,7 @@ When you're getting started with YOLOv8, having a helpful community and support
- **GitHub Discussions:** The YOLOv8 repository on GitHub has a "Discussions" section where you can ask questions, report issues, and suggest improvements.
-- **Ultralytics Discord Server:** Ultralytics has a [Discord server](https://ultralytics.com/discord/) where you can interact with other users and developers.
+- **Ultralytics Discord Server:** Ultralytics has a [Discord server](https://discord.com/invite/ultralytics) where you can interact with other users and developers.
### Official Documentation and Resources
diff --git a/docs/en/guides/model-deployment-practices.md b/docs/en/guides/model-deployment-practices.md
index fae69cbd44..60d95963be 100644
--- a/docs/en/guides/model-deployment-practices.md
+++ b/docs/en/guides/model-deployment-practices.md
@@ -122,7 +122,7 @@ Being part of a community of computer vision enthusiasts can help you solve prob
### Community Resources
- **GitHub Issues:** Explore the [YOLOv8 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are very active and ready to help.
-- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://ultralytics.com/discord/) to chat with other users and developers, get support, and share your experiences.
+- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to chat with other users and developers, get support, and share your experiences.
### Official Documentation
diff --git a/docs/en/guides/model-evaluation-insights.md b/docs/en/guides/model-evaluation-insights.md
index feb31ad353..08c3da64e1 100644
--- a/docs/en/guides/model-evaluation-insights.md
+++ b/docs/en/guides/model-evaluation-insights.md
@@ -128,7 +128,7 @@ Sharing your ideas and questions with other computer vision enthusiasts can insp
### Finding Help and Support
- **GitHub Issues:** Explore the YOLOv8 GitHub repository and use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to ask questions, report bugs, and suggest features. The community and maintainers are available to assist with any issues you encounter.
-- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://ultralytics.com/discord/) to connect with other users and developers, get support, share knowledge, and brainstorm ideas.
+- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to connect with other users and developers, get support, share knowledge, and brainstorm ideas.
### Official Documentation
diff --git a/docs/en/guides/model-monitoring-and-maintenance.md b/docs/en/guides/model-monitoring-and-maintenance.md
index 7864c66c98..ab5e417a3b 100644
--- a/docs/en/guides/model-monitoring-and-maintenance.md
+++ b/docs/en/guides/model-monitoring-and-maintenance.md
@@ -124,7 +124,7 @@ Joining a community of computer vision enthusiasts can help you solve problems a
### Community Resources
- **GitHub Issues:** Check out the [YOLOv8 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are highly active and supportive.
-- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://ultralytics.com/discord/) to chat with other users and developers, get support, and share your experiences.
+- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to chat with other users and developers, get support, and share your experiences.
### Official Documentation
diff --git a/docs/en/guides/model-testing.md b/docs/en/guides/model-testing.md
index 718d1d1115..71ff69b0be 100644
--- a/docs/en/guides/model-testing.md
+++ b/docs/en/guides/model-testing.md
@@ -129,7 +129,7 @@ Becoming part of a community of computer vision enthusiasts can aid in solving p
### Community Resources
- **GitHub Issues:** Explore the [YOLOv8 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are very active and ready to help.
-- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://ultralytics.com/discord/) to chat with other users and developers, get support, and share your experiences.
+- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to chat with other users and developers, get support, and share your experiences.
### Official Documentation
diff --git a/docs/en/guides/model-training-tips.md b/docs/en/guides/model-training-tips.md
index 20aaefa725..2efa53e70d 100644
--- a/docs/en/guides/model-training-tips.md
+++ b/docs/en/guides/model-training-tips.md
@@ -147,7 +147,7 @@ Being part of a community of computer vision enthusiasts can help you solve prob
### Community Resources
- **GitHub Issues:** Visit the [YOLOv8 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The community and maintainers are very active and ready to help.
-- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://ultralytics.com/discord/) to chat with other users and developers, get support, and share your experiences.
+- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to chat with other users and developers, get support, and share your experiences.
### Official Documentation
diff --git a/docs/en/guides/nvidia-jetson.md b/docs/en/guides/nvidia-jetson.md
index 504bfa9096..7e6bd41181 100644
--- a/docs/en/guides/nvidia-jetson.md
+++ b/docs/en/guides/nvidia-jetson.md
@@ -54,7 +54,7 @@ The first step after getting your hands on an NVIDIA Jetson device is to flash N
1. If you own an official NVIDIA Development Kit such as the Jetson Orin Nano Developer Kit, you can [download an image and prepare an SD card with JetPack for booting the device](https://developer.nvidia.com/embedded/learn/get-started-jetson-orin-nano-devkit).
2. If you own any other NVIDIA Development Kit, you can [flash JetPack to the device using SDK Manager](https://docs.nvidia.com/sdk-manager/install-with-sdkm-jetson/index.html).
-3. If you own a Seeed Studio reComputer J4012 device, you can [flash JetPack to the included SSD](https://wiki.seeedstudio.com/reComputer_J4012_Flash_Jetpack) and if you own a Seeed Studio reComputer J1020 v2 device, you can [flash JetPack to the eMMC/ SSD](https://wiki.seeedstudio.com/reComputer_J2021_J202_Flash_Jetpack).
+3. If you own a Seeed Studio reComputer J4012 device, you can [flash JetPack to the included SSD](https://wiki.seeedstudio.com/reComputer_J4012_Flash_Jetpack/) and if you own a Seeed Studio reComputer J1020 v2 device, you can [flash JetPack to the eMMC/ SSD](https://wiki.seeedstudio.com/reComputer_J2021_J202_Flash_Jetpack/).
4. If you own any other third party device powered by the NVIDIA Jetson module, it is recommended to follow [command-line flashing](https://docs.nvidia.com/jetson/archives/r35.5.0/DeveloperGuide/IN/QuickStart.html).
!!! Note
diff --git a/docs/en/guides/optimizing-openvino-latency-vs-throughput-modes.md b/docs/en/guides/optimizing-openvino-latency-vs-throughput-modes.md
index a9acfb123d..92c840f3a4 100644
--- a/docs/en/guides/optimizing-openvino-latency-vs-throughput-modes.md
+++ b/docs/en/guides/optimizing-openvino-latency-vs-throughput-modes.md
@@ -115,7 +115,7 @@ Balancing latency and throughput optimization requires understanding your applic
- **Latency Optimization:** Ideal for real-time applications requiring immediate responses (e.g., consumer-grade apps).
- **Throughput Optimization:** Best for scenarios with many concurrent inferences, maximizing resource use (e.g., large-scale deployments).
-Using OpenVINO's high-level performance hints and multi-device modes can help strike the right balance. Choose the appropriate [OpenVINO Performance hints](https://docs.ultralytics.com/integrations/openvino#openvino-performance-hints) based on your specific requirements.
+Using OpenVINO's high-level performance hints and multi-device modes can help strike the right balance. Choose the appropriate [OpenVINO Performance hints](https://docs.ultralytics.com/integrations/openvino/#openvino-performance-hints) based on your specific requirements.
### Can I use Ultralytics YOLO models with other AI frameworks besides OpenVINO?
diff --git a/docs/en/guides/preprocessing_annotated_data.md b/docs/en/guides/preprocessing_annotated_data.md
index ef771a28ae..36fcf9f9c1 100644
--- a/docs/en/guides/preprocessing_annotated_data.md
+++ b/docs/en/guides/preprocessing_annotated_data.md
@@ -133,7 +133,7 @@ Having discussions about your project with other computer vision enthusiasts can
### Channels to Connect with the Community
- **GitHub Issues:** Visit the YOLOv8 GitHub repository and use the [Issues tab](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features. The community and maintainers are there to help with any issues you face.
-- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://ultralytics.com/discord/) to connect with other users and developers, get support, share knowledge, and brainstorm ideas.
+- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to connect with other users and developers, get support, share knowledge, and brainstorm ideas.
### Official Documentation
diff --git a/docs/en/guides/raspberry-pi.md b/docs/en/guides/raspberry-pi.md
index 1d4b1f9dff..997c08547b 100644
--- a/docs/en/guides/raspberry-pi.md
+++ b/docs/en/guides/raspberry-pi.md
@@ -6,7 +6,7 @@ keywords: Ultralytics, YOLOv8, Raspberry Pi, setup, guide, benchmarks, computer
# Quick Start Guide: Raspberry Pi with Ultralytics YOLOv8
-This comprehensive guide provides a detailed walkthrough for deploying Ultralytics YOLOv8 on [Raspberry Pi](https://www.raspberrypi.com) devices. Additionally, it showcases performance benchmarks to demonstrate the capabilities of YOLOv8 on these small and powerful devices.
+This comprehensive guide provides a detailed walkthrough for deploying Ultralytics YOLOv8 on [Raspberry Pi](https://www.raspberrypi.com/) devices. Additionally, it showcases performance benchmarks to demonstrate the capabilities of YOLOv8 on these small and powerful devices.
diff --git a/docs/en/guides/steps-of-a-cv-project.md b/docs/en/guides/steps-of-a-cv-project.md
index 3b98171d30..ec9b84ff2f 100644
--- a/docs/en/guides/steps-of-a-cv-project.md
+++ b/docs/en/guides/steps-of-a-cv-project.md
@@ -189,7 +189,7 @@ Connecting with a community of computer vision enthusiasts can help you tackle a
### Community Resources
- **GitHub Issues:** Check out the [YOLOv8 GitHub repository](https://github.com/ultralytics/ultralytics/issues) and use the Issues tab to ask questions, report bugs, and suggest new features. The active community and maintainers are there to help with specific issues.
-- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://ultralytics.com/discord/) to interact with other users and developers, get support, and share insights.
+- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to interact with other users and developers, get support, and share insights.
### Official Documentation
diff --git a/docs/en/guides/streamlit-live-inference.md b/docs/en/guides/streamlit-live-inference.md
index d6a356136d..24388eb302 100644
--- a/docs/en/guides/streamlit-live-inference.md
+++ b/docs/en/guides/streamlit-live-inference.md
@@ -86,7 +86,7 @@ Engage with the community to learn more, troubleshoot issues, and share your pro
### Where to Find Help and Support
- **GitHub Issues:** Visit the [Ultralytics GitHub repository](https://github.com/ultralytics/ultralytics/issues) to raise questions, report bugs, and suggest features.
-- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://ultralytics.com/discord/) to connect with other users and developers, get support, share knowledge, and brainstorm ideas.
+- **Ultralytics Discord Server:** Join the [Ultralytics Discord server](https://discord.com/invite/ultralytics) to connect with other users and developers, get support, share knowledge, and brainstorm ideas.
### Official Documentation
diff --git a/docs/en/guides/triton-inference-server.md b/docs/en/guides/triton-inference-server.md
index dc69e9f390..1879bf78f3 100644
--- a/docs/en/guides/triton-inference-server.md
+++ b/docs/en/guides/triton-inference-server.md
@@ -6,7 +6,7 @@ keywords: Triton Inference Server, YOLOv8, Ultralytics, NVIDIA, deep learning, A
# Triton Inference Server with Ultralytics YOLOv8
-The [Triton Inference Server](https://developer.nvidia.com/nvidia-triton-inference-server) (formerly known as TensorRT Inference Server) is an open-source software solution developed by NVIDIA. It provides a cloud inference solution optimized for NVIDIA GPUs. Triton simplifies the deployment of AI models at scale in production. Integrating Ultralytics YOLOv8 with Triton Inference Server allows you to deploy scalable, high-performance deep learning inference workloads. This guide provides steps to set up and test the integration.
+The [Triton Inference Server](https://developer.nvidia.com/triton-inference-server) (formerly known as TensorRT Inference Server) is an open-source software solution developed by NVIDIA. It provides a cloud inference solution optimized for NVIDIA GPUs. Triton simplifies the deployment of AI models at scale in production. Integrating Ultralytics YOLOv8 with Triton Inference Server allows you to deploy scalable, high-performance deep learning inference workloads. This guide provides steps to set up and test the integration.
@@ -147,7 +147,7 @@ By following the above steps, you can deploy and run Ultralytics YOLOv8 models e
### How do I set up Ultralytics YOLOv8 with NVIDIA Triton Inference Server?
-Setting up [Ultralytics YOLOv8](https://docs.ultralytics.com/models/yolov8) with [NVIDIA Triton Inference Server](https://developer.nvidia.com/nvidia-triton-inference-server) involves a few key steps:
+Setting up [Ultralytics YOLOv8](https://docs.ultralytics.com/models/yolov8) with [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) involves a few key steps:
1. **Export YOLOv8 to ONNX format**:
@@ -213,7 +213,7 @@ This setup can help you efficiently deploy YOLOv8 models at scale on Triton Infe
### What benefits does using Ultralytics YOLOv8 with NVIDIA Triton Inference Server offer?
-Integrating [Ultralytics YOLOv8](../models/yolov8.md) with [NVIDIA Triton Inference Server](https://developer.nvidia.com/nvidia-triton-inference-server) provides several advantages:
+Integrating [Ultralytics YOLOv8](../models/yolov8.md) with [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) provides several advantages:
- **Scalable AI Inference**: Triton allows serving multiple models from a single server instance, supporting dynamic model loading and unloading, making it highly scalable for diverse AI workloads.
- **High Performance**: Optimized for NVIDIA GPUs, Triton Inference Server ensures high-speed inference operations, perfect for real-time applications such as object detection.
@@ -223,7 +223,7 @@ For detailed instructions on setting up and running YOLOv8 with Triton, you can
### Why should I export my YOLOv8 model to ONNX format before using Triton Inference Server?
-Using ONNX (Open Neural Network Exchange) format for your [Ultralytics YOLOv8](../models/yolov8.md) model before deploying it on [NVIDIA Triton Inference Server](https://developer.nvidia.com/nvidia-triton-inference-server) offers several key benefits:
+Using ONNX (Open Neural Network Exchange) format for your [Ultralytics YOLOv8](../models/yolov8.md) model before deploying it on [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server) offers several key benefits:
- **Interoperability**: ONNX format supports transfer between different deep learning frameworks (such as PyTorch, TensorFlow), ensuring broader compatibility.
- **Optimization**: Many deployment environments, including Triton, optimize for ONNX, enabling faster inference and better performance.
@@ -242,7 +242,7 @@ You can follow the steps in the [exporting guide](../modes/export.md) to complet
### Can I run inference using the Ultralytics YOLOv8 model on Triton Inference Server?
-Yes, you can run inference using the [Ultralytics YOLOv8](../models/yolov8.md) model on [NVIDIA Triton Inference Server](https://developer.nvidia.com/nvidia-triton-inference-server). Once your model is set up in the Triton Model Repository and the server is running, you can load and run inference on your model as follows:
+Yes, you can run inference using the [Ultralytics YOLOv8](../models/yolov8.md) model on [NVIDIA Triton Inference Server](https://developer.nvidia.com/triton-inference-server). Once your model is set up in the Triton Model Repository and the server is running, you can load and run inference on your model as follows:
```python
from ultralytics import YOLO
diff --git a/docs/en/guides/yolo-common-issues.md b/docs/en/guides/yolo-common-issues.md
index 849b44c42a..77351eaa06 100644
--- a/docs/en/guides/yolo-common-issues.md
+++ b/docs/en/guides/yolo-common-issues.md
@@ -121,7 +121,7 @@ You can access these metrics from the training logs or by using tools like Tenso
- [TensorBoard](https://www.tensorflow.org/tensorboard): TensorBoard is a popular choice for visualizing training metrics, including loss, accuracy, and more. You can integrate it with your YOLOv8 training process.
- [Comet](https://bit.ly/yolov8-readme-comet): Comet provides an extensive toolkit for experiment tracking and comparison. It allows you to track metrics, hyperparameters, and even model weights. Integration with YOLO models is also straightforward, providing you with a complete overview of your experiment cycle.
-- [Ultralytics HUB](https://hub.ultralytics.com): Ultralytics HUB offers a specialized environment for tracking YOLO models, giving you a one-stop platform to manage metrics, datasets, and even collaborate with your team. Given its tailored focus on YOLO, it offers more customized tracking options.
+- [Ultralytics HUB](https://hub.ultralytics.com/): Ultralytics HUB offers a specialized environment for tracking YOLO models, giving you a one-stop platform to manage metrics, datasets, and even collaborate with your team. Given its tailored focus on YOLO, it offers more customized tracking options.
Each of these tools offers its own set of advantages, so you may want to consider the specific needs of your project when making a choice.
@@ -270,7 +270,7 @@ Engaging with a community of like-minded individuals can significantly enhance y
**GitHub Issues:** The YOLOv8 repository on GitHub has an [Issues tab](https://github.com/ultralytics/ultralytics/issues) where you can ask questions, report bugs, and suggest new features. The community and maintainers are active here, and it's a great place to get help with specific problems.
-**Ultralytics Discord Server:** Ultralytics has a [Discord server](https://ultralytics.com/discord/) where you can interact with other users and the developers.
+**Ultralytics Discord Server:** Ultralytics has a [Discord server](https://discord.com/invite/ultralytics) where you can interact with other users and the developers.
### Official Documentation and Resources
@@ -312,7 +312,7 @@ This sets the training process to the first GPU. Consult the `nvidia-smi` comman
### How can I monitor and track my YOLOv8 model training progress?
-Tracking and visualizing training progress can be efficiently managed through tools like [TensorBoard](https://www.tensorflow.org/tensorboard), [Comet](https://bit.ly/yolov8-readme-comet), and [Ultralytics HUB](https://hub.ultralytics.com). These tools allow you to log and visualize metrics such as loss, precision, recall, and mAP. Implementing [early stopping](#continuous-monitoring-parameters) based on these metrics can also help achieve better training outcomes.
+Tracking and visualizing training progress can be efficiently managed through tools like [TensorBoard](https://www.tensorflow.org/tensorboard), [Comet](https://bit.ly/yolov8-readme-comet), and [Ultralytics HUB](https://hub.ultralytics.com/). These tools allow you to log and visualize metrics such as loss, precision, recall, and mAP. Implementing [early stopping](#continuous-monitoring-parameters) based on these metrics can also help achieve better training outcomes.
### What should I do if YOLOv8 is not recognizing my dataset format?
diff --git a/docs/en/guides/yolo-performance-metrics.md b/docs/en/guides/yolo-performance-metrics.md
index ad59d4eb9d..d885b9eab3 100644
--- a/docs/en/guides/yolo-performance-metrics.md
+++ b/docs/en/guides/yolo-performance-metrics.md
@@ -159,7 +159,7 @@ Tapping into a community of enthusiasts and experts can amplify your journey wit
- **GitHub Issues:** The YOLOv8 repository on GitHub has an [Issues tab](https://github.com/ultralytics/ultralytics/issues) where you can ask questions, report bugs, and suggest new features. The community and maintainers are active here, and it's a great place to get help with specific problems.
-- **Ultralytics Discord Server:** Ultralytics has a [Discord server](https://ultralytics.com/discord/) where you can interact with other users and the developers.
+- **Ultralytics Discord Server:** Ultralytics has a [Discord server](https://discord.com/invite/ultralytics) where you can interact with other users and the developers.
### Official Documentation and Resources:
diff --git a/docs/en/help/CI.md b/docs/en/help/CI.md
index 0cea46c329..93b1ad3222 100644
--- a/docs/en/help/CI.md
+++ b/docs/en/help/CI.md
@@ -40,9 +40,9 @@ Remember, a successful CI test does not mean that everything is perfect. It is a
Code coverage is a metric that represents the percentage of your codebase that is executed when your tests run. It provides insight into how well your tests exercise your code and can be crucial in identifying untested parts of your application. A high code coverage percentage is often associated with a lower likelihood of bugs. However, it's essential to understand that code coverage does not guarantee the absence of defects. It merely indicates which parts of the code have been executed by the tests.
-### Integration with [codecov.io](https://codecov.io/)
+### Integration with [codecov.io](https://about.codecov.io/)
-At Ultralytics, we have integrated our repositories with [codecov.io](https://codecov.io/), a popular online platform for measuring and visualizing code coverage. Codecov provides detailed insights, coverage comparisons between commits, and visual overlays directly on your code, indicating which lines were covered.
+At Ultralytics, we have integrated our repositories with [codecov.io](https://about.codecov.io/), a popular online platform for measuring and visualizing code coverage. Codecov provides detailed insights, coverage comparisons between commits, and visual overlays directly on your code, indicating which lines were covered.
By integrating with Codecov, we aim to maintain and improve the quality of our code by focusing on areas that might be prone to errors or need further testing.
@@ -84,4 +84,4 @@ Automated [PyPI publishing](https://github.com/ultralytics/ultralytics/actions/w
### How does Ultralytics measure code coverage and why is it important?
-Ultralytics measures code coverage by integrating with [Codecov](https://codecov.io/github/ultralytics/ultralytics), providing insights into how much of the codebase is executed during tests. High code coverage can indicate well-tested code, helping to uncover untested areas that might be prone to bugs. Detailed code coverage metrics can be explored via badges displayed on our main repositories or directly on [Codecov](https://codecov.io/gh/ultralytics/ultralytics).
+Ultralytics measures code coverage by integrating with [Codecov](https://app.codecov.io/github/ultralytics/ultralytics), providing insights into how much of the codebase is executed during tests. High code coverage can indicate well-tested code, helping to uncover untested areas that might be prone to bugs. Detailed code coverage metrics can be explored via badges displayed on our main repositories or directly on [Codecov](https://app.codecov.io/gh/ultralytics/ultralytics).
diff --git a/docs/en/help/FAQ.md b/docs/en/help/FAQ.md
index 5165a953d9..24472e77f4 100644
--- a/docs/en/help/FAQ.md
+++ b/docs/en/help/FAQ.md
@@ -6,7 +6,7 @@ keywords: Ultralytics, YOLO, FAQ, object detection, hardware requirements, fine-
# Ultralytics YOLO Frequently Asked Questions (FAQ)
-This FAQ section addresses common questions and issues users might encounter while working with [Ultralytics](https://ultralytics.com) YOLO repositories.
+This FAQ section addresses common questions and issues users might encounter while working with [Ultralytics](https://www.ultralytics.com/) YOLO repositories.
## FAQ
@@ -222,7 +222,7 @@ Ultralytics provides a wealth of resources to help you get started and master th
- 💻 [GitHub repository](https://github.com/ultralytics/ultralytics): Source code, example scripts, and community contributions.
- ✍️ [Ultralytics blog](https://www.ultralytics.com/blog): In-depth articles, use cases, and technical insights.
- 💬 [Community forums](https://community.ultralytics.com/): Connect with other users, ask questions, and share your experiences.
-- 🎥 [YouTube channel](https://youtube.com/ultralytics?sub_confirmation=1): Video tutorials, demos, and webinars on various Ultralytics topics.
+- 🎥 [YouTube channel](https://www.youtube.com/ultralytics?sub_confirmation=1): Video tutorials, demos, and webinars on various Ultralytics topics.
These resources provide code examples, real-world use cases, and step-by-step guides for various tasks using Ultralytics models.
diff --git a/docs/en/help/code_of_conduct.md b/docs/en/help/code_of_conduct.md
index c8638cc61f..625ed601e4 100644
--- a/docs/en/help/code_of_conduct.md
+++ b/docs/en/help/code_of_conduct.md
@@ -78,7 +78,7 @@ Community leaders will follow these Community Impact Guidelines in determining t
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
-Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
+Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/inclusion).
For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
@@ -104,6 +104,6 @@ Contributing to Ultralytics means engaging positively and respectfully with othe
### Where can I find additional information about the Ultralytics Code of Conduct?
-For more comprehensive details about the Ultralytics Code of Conduct, including reporting guidelines and enforcement policies, you can visit the [Contributor Covenant homepage](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html) or check the [FAQ section of Contributor Covenant](https://www.contributor-covenant.org/faq). Learn more about Ultralytics' goals and initiatives on [our brand page](https://www.ultralytics.com/brand) and [about page](https://www.ultralytics.com/about).
+For more comprehensive details about the Ultralytics Code of Conduct, including reporting guidelines and enforcement policies, you can visit the [Contributor Covenant homepage](https://www.contributor-covenant.org/version/2/0/code_of_conduct/) or check the [FAQ section of Contributor Covenant](https://www.contributor-covenant.org/faq/). Learn more about Ultralytics' goals and initiatives on [our brand page](https://www.ultralytics.com/brand) and [about page](https://www.ultralytics.com/about).
Should you have more questions or need further assistance, check our [Help Center](../help/FAQ.md) and [Contributing Guide](../help/contributing.md) for more information.
diff --git a/docs/en/help/contributing.md b/docs/en/help/contributing.md
index a4c23e99dd..637c1ae86e 100644
--- a/docs/en/help/contributing.md
+++ b/docs/en/help/contributing.md
@@ -6,7 +6,7 @@ keywords: Ultralytics, YOLO, open-source, contribution, pull request, code of co
# Contributing to Ultralytics Open-Source Projects
-Welcome! We're thrilled that you're considering contributing to our [Ultralytics](https://ultralytics.com) [open-source](https://github.com/ultralytics) projects. Your involvement not only helps enhance the quality of our repositories but also benefits the entire community. This guide provides clear guidelines and best practices to help you get started.
+Welcome! We're thrilled that you're considering contributing to our [Ultralytics](https://www.ultralytics.com/) [open-source](https://github.com/ultralytics) projects. Your involvement not only helps enhance the quality of our repositories but also benefits the entire community. This guide provides clear guidelines and best practices to help you get started.
@@ -133,7 +133,7 @@ We encourage all contributors to familiarize themselves with the terms of the AG
## Conclusion
-Thank you for your interest in contributing to [Ultralytics](https://ultralytics.com) [open-source](https://github.com/ultralytics) YOLO projects. Your participation is essential in shaping the future of our software and building a vibrant community of innovation and collaboration. Whether you're enhancing code, reporting bugs, or suggesting new features, your contributions are invaluable.
+Thank you for your interest in contributing to [Ultralytics](https://www.ultralytics.com/) [open-source](https://github.com/ultralytics) YOLO projects. Your participation is essential in shaping the future of our software and building a vibrant community of innovation and collaboration. Whether you're enhancing code, reporting bugs, or suggesting new features, your contributions are invaluable.
We're excited to see your ideas come to life and appreciate your commitment to advancing object detection technology. Together, let's continue to grow and innovate in this exciting open-source journey. Happy coding! 🚀🌟
diff --git a/docs/en/help/minimum_reproducible_example.md b/docs/en/help/minimum_reproducible_example.md
index 92eb629938..eb4e25368c 100644
--- a/docs/en/help/minimum_reproducible_example.md
+++ b/docs/en/help/minimum_reproducible_example.md
@@ -6,7 +6,7 @@ keywords: Ultralytics, YOLO, Minimum Reproducible Example, MRE, bug report, issu
# Creating a Minimum Reproducible Example for Bug Reports in Ultralytics YOLO Repositories
-When submitting a bug report for [Ultralytics](https://ultralytics.com) [YOLO](https://github.com/ultralytics) repositories, it's essential to provide a [Minimum Reproducible Example (MRE)](https://stackoverflow.com/help/minimal-reproducible-example). An MRE is a small, self-contained piece of code that demonstrates the problem you're experiencing. Providing an MRE helps maintainers and contributors understand the issue and work on a fix more efficiently. This guide explains how to create an MRE when submitting bug reports to Ultralytics YOLO repositories.
+When submitting a bug report for [Ultralytics](https://www.ultralytics.com/) [YOLO](https://github.com/ultralytics) repositories, it's essential to provide a [Minimum Reproducible Example (MRE)](https://stackoverflow.com/help/minimal-reproducible-example). An MRE is a small, self-contained piece of code that demonstrates the problem you're experiencing. Providing an MRE helps maintainers and contributors understand the issue and work on a fix more efficiently. This guide explains how to create an MRE when submitting bug reports to Ultralytics YOLO repositories.
## 1. Isolate the Problem
diff --git a/docs/en/help/privacy.md b/docs/en/help/privacy.md
index 0453569e3d..a053f199fe 100644
--- a/docs/en/help/privacy.md
+++ b/docs/en/help/privacy.md
@@ -7,7 +7,7 @@ keywords: Ultralytics, data collection, YOLO, Python package, Google Analytics,
## Overview
-[Ultralytics](https://ultralytics.com) is dedicated to the continuous enhancement of the user experience and the capabilities of our Python package, including the advanced YOLO models we develop. Our approach involves the gathering of anonymized usage statistics and crash reports, helping us identify opportunities for improvement and ensuring the reliability of our software. This transparency document outlines what data we collect, its purpose, and the choice you have regarding this data collection.
+[Ultralytics](https://www.ultralytics.com/) is dedicated to the continuous enhancement of the user experience and the capabilities of our Python package, including the advanced YOLO models we develop. Our approach involves the gathering of anonymized usage statistics and crash reports, helping us identify opportunities for improvement and ensuring the reliability of our software. This transparency document outlines what data we collect, its purpose, and the choice you have regarding this data collection.
## Anonymized Google Analytics
@@ -37,7 +37,7 @@ We take several measures to ensure the privacy and security of the data you entr
## Sentry Crash Reporting
-[Sentry](https://sentry.io/) is a developer-centric error tracking software that aids in identifying, diagnosing, and resolving issues in real-time, ensuring the robustness and reliability of applications. Within our package, it plays a crucial role by providing insights through crash reporting, significantly contributing to the stability and ongoing refinement of our software.
+[Sentry](https://sentry.io/welcome/) is a developer-centric error tracking software that aids in identifying, diagnosing, and resolving issues in real-time, ensuring the robustness and reliability of applications. Within our package, it plays a crucial role by providing insights through crash reporting, significantly contributing to the stability and ongoing refinement of our software.
!!! Note
@@ -138,7 +138,7 @@ Ultralytics takes user privacy seriously. We design our data collection practice
## Questions or Concerns
-If you have any questions or concerns about our data collection practices, please reach out to us via our [contact form](https://ultralytics.com/contact) or via [support@ultralytics.com](mailto:support@ultralytics.com). We are dedicated to ensuring our users feel informed and confident in their privacy when using our package.
+If you have any questions or concerns about our data collection practices, please reach out to us via our [contact form](https://www.ultralytics.com/contact) or via [support@ultralytics.com](mailto:support@ultralytics.com). We are dedicated to ensuring our users feel informed and confident in their privacy when using our package.
## FAQ
diff --git a/docs/en/help/security.md b/docs/en/help/security.md
index 553a0b2408..39fe3829ff 100644
--- a/docs/en/help/security.md
+++ b/docs/en/help/security.md
@@ -5,7 +5,7 @@ keywords: Ultralytics security policy, Snyk scanning, CodeQL scanning, Dependabo
# Ultralytics Security Policy
-At [Ultralytics](https://ultralytics.com), the security of our users' data and systems is of utmost importance. To ensure the safety and security of our [open-source projects](https://github.com/ultralytics), we have implemented several measures to detect and prevent security vulnerabilities.
+At [Ultralytics](https://www.ultralytics.com/), the security of our users' data and systems is of utmost importance. To ensure the safety and security of our [open-source projects](https://github.com/ultralytics), we have implemented several measures to detect and prevent security vulnerabilities.
## Snyk Scanning
@@ -15,7 +15,7 @@ We utilize [Snyk](https://snyk.io/advisor/python/ultralytics) to conduct compreh
## GitHub CodeQL Scanning
-Our security strategy includes GitHub's [CodeQL](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/about-code-scanning-with-codeql) scanning. CodeQL delves deep into our codebase, identifying complex vulnerabilities like SQL injection and XSS by analyzing the code's semantic structure. This advanced level of analysis ensures early detection and resolution of potential security risks.
+Our security strategy includes GitHub's [CodeQL](https://docs.github.com/en/code-security/code-scanning/introduction-to-code-scanning/about-code-scanning-with-codeql) scanning. CodeQL delves deep into our codebase, identifying complex vulnerabilities like SQL injection and XSS by analyzing the code's semantic structure. This advanced level of analysis ensures early detection and resolution of potential security risks.
[](https://github.com/ultralytics/ultralytics/actions/workflows/codeql.yaml)
@@ -31,7 +31,7 @@ We employ GitHub [secret scanning](https://docs.github.com/en/code-security/secr
We enable private vulnerability reporting, allowing users to discreetly report potential security issues. This approach facilitates responsible disclosure, ensuring vulnerabilities are handled securely and efficiently.
-If you suspect or discover a security vulnerability in any of our repositories, please let us know immediately. You can reach out to us directly via our [contact form](https://ultralytics.com/contact) or via [security@ultralytics.com](mailto:security@ultralytics.com). Our security team will investigate and respond as soon as possible.
+If you suspect or discover a security vulnerability in any of our repositories, please let us know immediately. You can reach out to us directly via our [contact form](https://www.ultralytics.com/contact) or via [security@ultralytics.com](mailto:security@ultralytics.com). Our security team will investigate and respond as soon as possible.
We appreciate your help in keeping all Ultralytics open-source projects secure and safe for everyone 🙏.
@@ -57,7 +57,7 @@ To see the Snyk badge and learn more about its deployment, check the [Snyk Scann
### What is CodeQL and how does it enhance security for Ultralytics?
-[CodeQL](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/about-code-scanning-with-codeql) is a security analysis tool integrated into Ultralytics' workflow via GitHub. It delves deep into the codebase to identify complex vulnerabilities such as SQL injection and Cross-Site Scripting (XSS). CodeQL analyzes the semantic structure of the code to provide an advanced level of security, ensuring early detection and mitigation of potential risks.
+[CodeQL](https://docs.github.com/en/code-security/code-scanning/introduction-to-code-scanning/about-code-scanning-with-codeql) is a security analysis tool integrated into Ultralytics' workflow via GitHub. It delves deep into the codebase to identify complex vulnerabilities such as SQL injection and Cross-Site Scripting (XSS). CodeQL analyzes the semantic structure of the code to provide an advanced level of security, ensuring early detection and mitigation of potential risks.
For more information on how CodeQL is used, visit the [GitHub CodeQL Scanning section](#github-codeql-scanning).
@@ -69,6 +69,6 @@ For more details, explore the [GitHub Dependabot Alerts section](#github-dependa
### How does Ultralytics handle private vulnerability reporting?
-Ultralytics encourages users to report potential security issues through private channels. Users can report vulnerabilities discreetly via the [contact form](https://ultralytics.com/contact) or by emailing [security@ultralytics.com](mailto:security@ultralytics.com). This ensures responsible disclosure and allows the security team to investigate and address vulnerabilities securely and efficiently.
+Ultralytics encourages users to report potential security issues through private channels. Users can report vulnerabilities discreetly via the [contact form](https://www.ultralytics.com/contact) or by emailing [security@ultralytics.com](mailto:security@ultralytics.com). This ensures responsible disclosure and allows the security team to investigate and address vulnerabilities securely and efficiently.
For more information on private vulnerability reporting, refer to the [Private Vulnerability Reporting section](#private-vulnerability-reporting).
diff --git a/docs/en/hub/api/index.md b/docs/en/hub/api/index.md
index b417161514..9ae12c3db5 100644
--- a/docs/en/hub/api/index.md
+++ b/docs/en/hub/api/index.md
@@ -17,13 +17,13 @@ Welcome to the Ultralytics "Under Construction" page! Here, we're hard at work d
This placeholder page is your first stop for upcoming developments. Keep an eye out for:
-- **Newsletter:** Subscribe [here](https://ultralytics.com/#newsletter) for the latest news.
+- **Newsletter:** Subscribe [here](https://www.ultralytics.com/#newsletter) for the latest news.
- **Social Media:** Follow us [here](https://www.linkedin.com/company/ultralytics) for updates and teasers.
-- **Blog:** Visit our [blog](https://ultralytics.com/blog) for detailed insights.
+- **Blog:** Visit our [blog](https://www.ultralytics.com/blog) for detailed insights.
## We Value Your Input 🗣️
-Your feedback shapes our future releases. Share your thoughts and suggestions [here](https://ultralytics.com/contact).
+Your feedback shapes our future releases. Share your thoughts and suggestions [here](https://www.ultralytics.com/contact).
## Thank You, Community! 🌍
diff --git a/docs/en/hub/app/android.md b/docs/en/hub/app/android.md
index c3c19b0c17..365180545d 100644
--- a/docs/en/hub/app/android.md
+++ b/docs/en/hub/app/android.md
@@ -60,7 +60,7 @@ INT8 (or 8-bit integer) quantization further reduces the model's size and comput
## Delegates and Performance Variability
-Different delegates are available on Android devices to accelerate model inference. These delegates include CPU, [GPU](https://www.tensorflow.org/lite/android/delegates/gpu), [Hexagon](https://www.tensorflow.org/lite/android/delegates/hexagon) and [NNAPI](https://www.tensorflow.org/lite/android/delegates/nnapi). The performance of these delegates varies depending on the device's hardware vendor, product line, and specific chipsets used in the device.
+Different delegates are available on Android devices to accelerate model inference. These delegates include CPU, [GPU](https://ai.google.dev/edge/litert/android/gpu), [Hexagon](https://developer.android.com/ndk/guides/neuralnetworks/migration-guide) and [NNAPI](https://developer.android.com/ndk/guides/neuralnetworks/migration-guide). The performance of these delegates varies depending on the device's hardware vendor, product line, and specific chipsets used in the device.
1. **CPU**: The default option, with reasonable performance on most devices.
2. **GPU**: Utilizes the device's GPU for faster inference. It can provide a significant performance boost on devices with powerful GPUs.
@@ -69,13 +69,13 @@ Different delegates are available on Android devices to accelerate model inferen
Here's a table showing the primary vendors, their product lines, popular devices, and supported delegates:
-| Vendor | Product Lines | Popular Devices | Delegates Supported |
-| --------------------------------------- | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------ |
-| [Qualcomm](https://www.qualcomm.com/) | [Snapdragon (e.g., 800 series)](https://www.qualcomm.com/snapdragon) | [Samsung Galaxy S21](https://www.samsung.com/global/galaxy/galaxy-s21-5g/), [OnePlus 9](https://www.oneplus.com/9), [Google Pixel 6](https://store.google.com/product/pixel_6) | CPU, GPU, Hexagon, NNAPI |
-| [Samsung](https://www.samsung.com/) | [Exynos (e.g., Exynos 2100)](https://www.samsung.com/semiconductor/minisite/exynos/) | [Samsung Galaxy S21 (Global version)](https://www.samsung.com/global/galaxy/galaxy-s21-5g/) | CPU, GPU, NNAPI |
-| [MediaTek](https://i.mediatek.com/) | [Dimensity (e.g., Dimensity 1200)](https://i.mediatek.com/dimensity-1200) | [Realme GT](https://www.realme.com/global/realme-gt), [Xiaomi Redmi Note](https://www.mi.com/en/phone/redmi/note-list) | CPU, GPU, NNAPI |
-| [HiSilicon](https://www.hisilicon.com/) | [Kirin (e.g., Kirin 990)](https://www.hisilicon.com/en/products/Kirin) | [Huawei P40 Pro](https://consumer.huawei.com/en/phones/p40-pro/), [Huawei Mate 30 Pro](https://consumer.huawei.com/en/phones/mate30-pro/) | CPU, GPU, NNAPI |
-| [NVIDIA](https://www.nvidia.com/) | [Tegra (e.g., Tegra X1)](https://developer.nvidia.com/content/tegra-x1) | [NVIDIA Shield TV](https://www.nvidia.com/en-us/shield/shield-tv/), [Nintendo Switch](https://www.nintendo.com/switch/) | CPU, GPU, NNAPI |
+| Vendor | Product Lines | Popular Devices | Delegates Supported |
+| ----------------------------------------- | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------ |
+| [Qualcomm](https://www.qualcomm.com/) | [Snapdragon (e.g., 800 series)](https://www.qualcomm.com/snapdragon/overview) | [Samsung Galaxy S21](https://www.samsung.com/global/galaxy/galaxy-s21-5g/), [OnePlus 9](https://www.oneplus.com/9), [Google Pixel 6](https://store.google.com/product/pixel_6) | CPU, GPU, Hexagon, NNAPI |
+| [Samsung](https://www.samsung.com/) | [Exynos (e.g., Exynos 2100)](https://www.samsung.com/semiconductor/minisite/exynos/) | [Samsung Galaxy S21 (Global version)](https://www.samsung.com/global/galaxy/galaxy-s21-5g/) | CPU, GPU, NNAPI |
+| [MediaTek](https://i.mediatek.com/) | [Dimensity (e.g., Dimensity 1200)](https://i.mediatek.com/dimensity-1200) | [Realme GT](https://www.realme.com/global/realme-gt), [Xiaomi Redmi Note](https://www.mi.com/global/phone/redmi/note-list) | CPU, GPU, NNAPI |
+| [HiSilicon](https://www.hisilicon.com/cn) | [Kirin (e.g., Kirin 990)](https://www.hisilicon.com/en/products/Kirin) | [Huawei P40 Pro](https://consumer.huawei.com/en/phones/), [Huawei Mate 30 Pro](https://consumer.huawei.com/en/phones/) | CPU, GPU, NNAPI |
+| [NVIDIA](https://www.nvidia.com/) | [Tegra (e.g., Tegra X1)](https://developer.nvidia.com/content/tegra-x1) | [NVIDIA Shield TV](https://www.nvidia.com/en-us/shield/shield-tv/), [Nintendo Switch](https://www.nintendo.com/switch/) | CPU, GPU, NNAPI |
Please note that the list of devices mentioned is not exhaustive and may vary depending on the specific chipsets and device models. Always test your models on your target devices to ensure compatibility and optimal performance.
diff --git a/docs/en/hub/cloud-training.md b/docs/en/hub/cloud-training.md
index 9d09a18fc0..42dd681080 100644
--- a/docs/en/hub/cloud-training.md
+++ b/docs/en/hub/cloud-training.md
@@ -6,9 +6,9 @@ keywords: Ultralytics HUB, cloud training, model training, Pro Plan, easy AI set
# Ultralytics HUB Cloud Training
-We've listened to the high demand and widespread interest and are thrilled to unveil [Ultralytics HUB](https://ultralytics.com/hub) Cloud Training, offering a single-click training experience for our [Pro](./pro.md) users!
+We've listened to the high demand and widespread interest and are thrilled to unveil [Ultralytics HUB](https://www.ultralytics.com/hub) Cloud Training, offering a single-click training experience for our [Pro](./pro.md) users!
-[Ultralytics HUB](https://ultralytics.com/hub) [Pro](./pro.md) users can finetune [Ultralytics HUB](https://ultralytics.com/hub) models on a custom dataset using our Cloud Training solution, making the model training process easy. Say goodbye to complex setups and hello to streamlined workflows with [Ultralytics HUB](https://ultralytics.com/hub)'s intuitive interface.
+[Ultralytics HUB](https://www.ultralytics.com/hub) [Pro](./pro.md) users can finetune [Ultralytics HUB](https://www.ultralytics.com/hub) models on a custom dataset using our Cloud Training solution, making the model training process easy. Say goodbye to complex setups and hello to streamlined workflows with [Ultralytics HUB](https://www.ultralytics.com/hub)'s intuitive interface.
-Introducing [Ultralytics](https://ultralytics.com) [YOLOv8](https://github.com/ultralytics/ultralytics), the latest version of the acclaimed real-time object detection and image segmentation model. YOLOv8 is built on cutting-edge advancements in deep learning and computer vision, offering unparalleled performance in terms of speed and accuracy. Its streamlined design makes it suitable for various applications and easily adaptable to different hardware platforms, from edge devices to cloud APIs.
+Introducing [Ultralytics](https://www.ultralytics.com/) [YOLOv8](https://github.com/ultralytics/ultralytics), the latest version of the acclaimed real-time object detection and image segmentation model. YOLOv8 is built on cutting-edge advancements in deep learning and computer vision, offering unparalleled performance in terms of speed and accuracy. Its streamlined design makes it suitable for various applications and easily adaptable to different hardware platforms, from edge devices to cloud APIs.
Explore the YOLOv8 Docs, a comprehensive resource designed to help you understand and utilize its features and capabilities. Whether you are a seasoned machine learning practitioner or new to the field, this hub aims to maximize YOLOv8's potential in your projects
@@ -83,14 +83,14 @@ Explore the YOLOv8 Docs, a comprehensive resource designed to help you understan
- [YOLOv7](https://github.com/WongKinYiu/yolov7) added additional tasks such as pose estimation on the COCO keypoints dataset.
- [YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of YOLO by Ultralytics. As a cutting-edge, state-of-the-art (SOTA) model, YOLOv8 builds on the success of previous versions, introducing new features and improvements for enhanced performance, flexibility, and efficiency. YOLOv8 supports a full range of vision AI tasks, including [detection](tasks/detect.md), [segmentation](tasks/segment.md), [pose estimation](tasks/pose.md), [tracking](modes/track.md), and [classification](tasks/classify.md). This versatility allows users to leverage YOLOv8's capabilities across diverse applications and domains.
- [YOLOv9](models/yolov9.md) introduces innovative methods like Programmable Gradient Information (PGI) and the Generalized Efficient Layer Aggregation Network (GELAN).
-- [YOLOv10](models/yolov10.md) is created by researchers from [Tsinghua University](https://www.tsinghua.edu.cn/en/) using the [Ultralytics](https://ultralytics.com/) [Python package](https://pypi.org/project/ultralytics/). This version provides real-time [object detection](tasks/detect.md) advancements by introducing an End-to-End head that eliminates Non-Maximum Suppression (NMS) requirements.
+- [YOLOv10](models/yolov10.md) is created by researchers from [Tsinghua University](https://www.tsinghua.edu.cn/en/) using the [Ultralytics](https://www.ultralytics.com/) [Python package](https://pypi.org/project/ultralytics/). This version provides real-time [object detection](tasks/detect.md) advancements by introducing an End-to-End head that eliminates Non-Maximum Suppression (NMS) requirements.
## YOLO Licenses: How is Ultralytics YOLO licensed?
Ultralytics offers two licensing options to accommodate diverse use cases:
-- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for more details.
-- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license).
+- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/license) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for more details.
+- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://www.ultralytics.com/license).
Our licensing strategy is designed to ensure that any improvements to our open-source projects are returned to the community. We hold the principles of open source close to our hearts ❤️, and our mission is to guarantee that our contributions can be utilized and expanded upon in ways that are beneficial to all.
@@ -133,7 +133,7 @@ Ultralytics offers two licensing options for YOLO:
- **AGPL-3.0 License**: This open-source license is ideal for educational and non-commercial use, promoting open collaboration.
- **Enterprise License**: This is designed for commercial applications, allowing seamless integration of Ultralytics software into commercial products without the restrictions of the AGPL-3.0 license.
-For more details, visit our [Licensing](https://ultralytics.com/license) page.
+For more details, visit our [Licensing](https://www.ultralytics.com/license) page.
### How can Ultralytics YOLO be used for real-time object tracking?
diff --git a/docs/en/integrations/clearml.md b/docs/en/integrations/clearml.md
index f32622de7b..566a6db8bd 100644
--- a/docs/en/integrations/clearml.md
+++ b/docs/en/integrations/clearml.md
@@ -8,7 +8,7 @@ keywords: YOLOv8, ClearML, MLOps, Ultralytics, machine learning, object detectio
MLOps bridges the gap between creating and deploying machine learning models in real-world settings. It focuses on efficient deployment, scalability, and ongoing management to ensure models perform well in practical applications.
-[Ultralytics YOLOv8](https://ultralytics.com) effortlessly integrates with ClearML, streamlining and enhancing your object detection model's training and management. This guide will walk you through the integration process, detailing how to set up ClearML, manage experiments, automate model management, and collaborate effectively.
+[Ultralytics YOLOv8](https://www.ultralytics.com/) effortlessly integrates with ClearML, streamlining and enhancing your object detection model's training and management. This guide will walk you through the integration process, detailing how to set up ClearML, manage experiments, automate model management, and collaborate effectively.
## ClearML
diff --git a/docs/en/integrations/comet.md b/docs/en/integrations/comet.md
index f2db7f459c..a705985449 100644
--- a/docs/en/integrations/comet.md
+++ b/docs/en/integrations/comet.md
@@ -8,7 +8,7 @@ keywords: YOLOv8, Comet ML, logging, machine learning, training, model checkpoin
Logging key training details such as parameters, metrics, image predictions, and model checkpoints is essential in machine learning—it keeps your project transparent, your progress measurable, and your results repeatable.
-[Ultralytics YOLOv8](https://ultralytics.com) seamlessly integrates with Comet ML, efficiently capturing and optimizing every aspect of your YOLOv8 object detection model's training process. In this guide, we'll cover the installation process, Comet ML setup, real-time insights, custom logging, and offline usage, ensuring that your YOLOv8 training is thoroughly documented and fine-tuned for outstanding results.
+[Ultralytics YOLOv8](https://www.ultralytics.com/) seamlessly integrates with Comet ML, efficiently capturing and optimizing every aspect of your YOLOv8 object detection model's training process. In this guide, we'll cover the installation process, Comet ML setup, real-time insights, custom logging, and offline usage, ensuring that your YOLOv8 training is thoroughly documented and fine-tuned for outstanding results.
## Comet ML
@@ -16,7 +16,7 @@ Logging key training details such as parameters, metrics, image predictions, and
-[Comet ML](https://www.comet.ml/) is a platform for tracking, comparing, explaining, and optimizing machine learning models and experiments. It allows you to log metrics, parameters, media, and more during your model training and monitor your experiments through an aesthetically pleasing web interface. Comet ML helps data scientists iterate more rapidly, enhances transparency and reproducibility, and aids in the development of production models.
+[Comet ML](https://www.comet.com/site/) is a platform for tracking, comparing, explaining, and optimizing machine learning models and experiments. It allows you to log metrics, parameters, media, and more during your model training and monitor your experiments through an aesthetically pleasing web interface. Comet ML helps data scientists iterate more rapidly, enhances transparency and reproducibility, and aids in the development of production models.
## Harnessing the Power of YOLOv8 and Comet ML
diff --git a/docs/en/integrations/coreml.md b/docs/en/integrations/coreml.md
index 4f210084ba..2f22e6c444 100644
--- a/docs/en/integrations/coreml.md
+++ b/docs/en/integrations/coreml.md
@@ -113,9 +113,9 @@ Having successfully exported your Ultralytics YOLOv8 models to CoreML, the next
- **[CoreML Tools](https://apple.github.io/coremltools/docs-guides/)**: This guide includes instructions and examples to convert models from TensorFlow, PyTorch, and other libraries to Core ML.
-- **[ML and Vision](https://developer.apple.com/videos/ml-vision)**: A collection of comprehensive videos that cover various aspects of using and implementing CoreML models.
+- **[ML and Vision](https://developer.apple.com/videos/)**: A collection of comprehensive videos that cover various aspects of using and implementing CoreML models.
-- **[Integrating a Core ML Model into Your App](https://developer.apple.com/documentation/coreml/integrating_a_core_ml_model_into_your_app)**: A comprehensive guide on integrating a CoreML model into an iOS application, detailing steps from preparing the model to implementing it in the app for various functionalities.
+- **[Integrating a Core ML Model into Your App](https://developer.apple.com/documentation/coreml/integrating-a-core-ml-model-into-your-app)**: A comprehensive guide on integrating a CoreML model into an iOS application, detailing steps from preparing the model to implementing it in the app for various functionalities.
## Summary
@@ -169,7 +169,7 @@ CoreML provides numerous advantages for deploying [Ultralytics YOLOv8](https://g
- **Ease of Integration**: Offers a seamless integration experience with Apple's ecosystems, including iOS, macOS, watchOS, and tvOS.
- **Versatility**: Supports a wide range of machine learning tasks such as image analysis, audio processing, and natural language processing using the CoreML framework.
-For more details on integrating your CoreML model into an iOS app, check out the guide on [Integrating a Core ML Model into Your App](https://developer.apple.com/documentation/coreml/integrating_a_core_ml_model_into_your_app).
+For more details on integrating your CoreML model into an iOS app, check out the guide on [Integrating a Core ML Model into Your App](https://developer.apple.com/documentation/coreml/integrating-a-core-ml-model-into-your-app).
### What are the deployment options for YOLOv8 models exported to CoreML?
diff --git a/docs/en/integrations/dvc.md b/docs/en/integrations/dvc.md
index 00588fb129..7e1325b9d1 100644
--- a/docs/en/integrations/dvc.md
+++ b/docs/en/integrations/dvc.md
@@ -8,7 +8,7 @@ keywords: YOLOv8, DVCLive, experiment tracking, machine learning, model training
Experiment tracking in machine learning is critical to model development and evaluation. It involves recording and analyzing various parameters, metrics, and outcomes from numerous training runs. This process is essential for understanding model performance and making data-driven decisions to refine and optimize models.
-Integrating DVCLive with [Ultralytics YOLOv8](https://ultralytics.com) transforms the way experiments are tracked and managed. This integration offers a seamless solution for automatically logging key experiment details, comparing results across different runs, and visualizing data for in-depth analysis. In this guide, we'll understand how DVCLive can be used to streamline the process.
+Integrating DVCLive with [Ultralytics YOLOv8](https://www.ultralytics.com/) transforms the way experiments are tracked and managed. This integration offers a seamless solution for automatically logging key experiment details, comparing results across different runs, and visualizing data for in-depth analysis. In this guide, we'll understand how DVCLive can be used to streamline the process.
## DVCLive
diff --git a/docs/en/integrations/edge-tpu.md b/docs/en/integrations/edge-tpu.md
index cb277ed385..71941ef337 100644
--- a/docs/en/integrations/edge-tpu.md
+++ b/docs/en/integrations/edge-tpu.md
@@ -6,7 +6,7 @@ keywords: YOLOv8, TFLite Edge TPU, TensorFlow Lite, model export, machine learni
# Learn to Export to TFLite Edge TPU Format From YOLOv8 Model
-Deploying computer vision models on devices with limited computational power, such as mobile or embedded systems, can be tricky. Using a model format that is optimized for faster performance simplifies the process. The [TensorFlow Lite](https://www.tensorflow.org/lite) [Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) or TFLite Edge TPU model format is designed to use minimal power while delivering fast performance for neural networks.
+Deploying computer vision models on devices with limited computational power, such as mobile or embedded systems, can be tricky. Using a model format that is optimized for faster performance simplifies the process. The [TensorFlow Lite](https://ai.google.dev/edge/litert) [Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) or TFLite Edge TPU model format is designed to use minimal power while delivering fast performance for neural networks.
The export to TFLite Edge TPU format feature allows you to optimize your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models for high-speed and low-power inferencing. In this guide, we'll walk you through converting your models to the TFLite Edge TPU format, making it easier for your models to perform well on various mobile and embedded devices.
diff --git a/docs/en/integrations/index.md b/docs/en/integrations/index.md
index 5a07c966d7..92251e75ba 100644
--- a/docs/en/integrations/index.md
+++ b/docs/en/integrations/index.md
@@ -35,7 +35,7 @@ Welcome to the Ultralytics Integrations page! This page provides an overview of
- [MLFlow](mlflow.md): Streamline the entire ML lifecycle of Ultralytics models, from experimentation and reproducibility to deployment.
-- [Ultralytics HUB](https://hub.ultralytics.com): Access and contribute to a community of pre-trained Ultralytics models.
+- [Ultralytics HUB](https://hub.ultralytics.com/): Access and contribute to a community of pre-trained Ultralytics models.
- [Neptune](https://neptune.ai/): Maintain a comprehensive log of your ML experiments with Ultralytics in this metadata store designed for MLOps.
@@ -65,7 +65,7 @@ Welcome to the Ultralytics Integrations page! This page provides an overview of
- [TorchScript](torchscript.md): Developed as part of the [PyTorch](https://pytorch.org/) framework, TorchScript enables efficient execution and deployment of machine learning models in various production environments without the need for Python dependencies.
-- [ONNX](onnx.md): An open-source format created by [Microsoft](https://www.microsoft.com) for facilitating the transfer of AI models between various frameworks, enhancing the versatility and deployment flexibility of Ultralytics models.
+- [ONNX](onnx.md): An open-source format created by [Microsoft](https://www.microsoft.com/) for facilitating the transfer of AI models between various frameworks, enhancing the versatility and deployment flexibility of Ultralytics models.
- [OpenVINO](openvino.md): Intel's toolkit for optimizing and deploying computer vision models efficiently across various Intel CPU and GPU platforms.
@@ -73,15 +73,15 @@ Welcome to the Ultralytics Integrations page! This page provides an overview of
- [CoreML](coreml.md): CoreML, developed by [Apple](https://www.apple.com/), is a framework designed for efficiently integrating machine learning models into applications across iOS, macOS, watchOS, and tvOS, using Apple's hardware for effective and secure model deployment.
-- [TF SavedModel](tf-savedmodel.md): Developed by [Google](https://www.google.com), TF SavedModel is a universal serialization format for TensorFlow models, enabling easy sharing and deployment across a wide range of platforms, from servers to edge devices.
+- [TF SavedModel](tf-savedmodel.md): Developed by [Google](https://www.google.com/), TF SavedModel is a universal serialization format for TensorFlow models, enabling easy sharing and deployment across a wide range of platforms, from servers to edge devices.
-- [TF GraphDef](tf-graphdef.md): Developed by [Google](https://www.google.com), GraphDef is TensorFlow's format for representing computation graphs, enabling optimized execution of machine learning models across diverse hardware.
+- [TF GraphDef](tf-graphdef.md): Developed by [Google](https://www.google.com/), GraphDef is TensorFlow's format for representing computation graphs, enabling optimized execution of machine learning models across diverse hardware.
-- [TFLite](tflite.md): Developed by [Google](https://www.google.com), TFLite is a lightweight framework for deploying machine learning models on mobile and edge devices, ensuring fast, efficient inference with minimal memory footprint.
+- [TFLite](tflite.md): Developed by [Google](https://www.google.com/), TFLite is a lightweight framework for deploying machine learning models on mobile and edge devices, ensuring fast, efficient inference with minimal memory footprint.
-- [TFLite Edge TPU](edge-tpu.md): Developed by [Google](https://www.google.com) for optimizing TensorFlow Lite models on Edge TPUs, this model format ensures high-speed, efficient edge computing.
+- [TFLite Edge TPU](edge-tpu.md): Developed by [Google](https://www.google.com/) for optimizing TensorFlow Lite models on Edge TPUs, this model format ensures high-speed, efficient edge computing.
-- [TF.js](tfjs.md): Developed by [Google](https://www.google.com) to facilitate machine learning in browsers and Node.js, TF.js allows JavaScript-based deployment of ML models.
+- [TF.js](tfjs.md): Developed by [Google](https://www.google.com/) to facilitate machine learning in browsers and Node.js, TF.js allows JavaScript-based deployment of ML models.
- [PaddlePaddle](paddlepaddle.md): An open-source deep learning platform by [Baidu](https://www.baidu.com/), PaddlePaddle enables the efficient deployment of AI models and focuses on the scalability of industrial applications.
@@ -111,7 +111,7 @@ Let's collaborate to make the Ultralytics YOLO ecosystem more expansive and feat
### What is Ultralytics HUB, and how does it streamline the ML workflow?
-Ultralytics HUB is a cloud-based platform designed to make machine learning (ML) workflows for Ultralytics models seamless and efficient. By using this tool, you can easily upload datasets, train models, perform real-time tracking, and deploy YOLOv8 models without needing extensive coding skills. You can explore the key features on the [Ultralytics HUB](https://hub.ultralytics.com) page and get started quickly with our [Quickstart](https://docs.ultralytics.com/hub/quickstart/) guide.
+Ultralytics HUB is a cloud-based platform designed to make machine learning (ML) workflows for Ultralytics models seamless and efficient. By using this tool, you can easily upload datasets, train models, perform real-time tracking, and deploy YOLOv8 models without needing extensive coding skills. You can explore the key features on the [Ultralytics HUB](https://hub.ultralytics.com/) page and get started quickly with our [Quickstart](https://docs.ultralytics.com/hub/quickstart/) guide.
### How do I integrate Ultralytics YOLO models with Roboflow for dataset management?
diff --git a/docs/en/integrations/mlflow.md b/docs/en/integrations/mlflow.md
index 2b51bb47b1..23a8ad47fb 100644
--- a/docs/en/integrations/mlflow.md
+++ b/docs/en/integrations/mlflow.md
@@ -10,7 +10,7 @@ keywords: MLflow, Ultralytics YOLO, machine learning, experiment tracking, metri
## Introduction
-Experiment logging is a crucial aspect of machine learning workflows that enables tracking of various metrics, parameters, and artifacts. It helps to enhance model reproducibility, debug issues, and improve model performance. [Ultralytics](https://ultralytics.com) YOLO, known for its real-time object detection capabilities, now offers integration with [MLflow](https://mlflow.org/), an open-source platform for complete machine learning lifecycle management.
+Experiment logging is a crucial aspect of machine learning workflows that enables tracking of various metrics, parameters, and artifacts. It helps to enhance model reproducibility, debug issues, and improve model performance. [Ultralytics](https://www.ultralytics.com/) YOLO, known for its real-time object detection capabilities, now offers integration with [MLflow](https://mlflow.org/), an open-source platform for complete machine learning lifecycle management.
This documentation page is a comprehensive guide to setting up and utilizing the MLflow logging capabilities for your Ultralytics YOLO project.
diff --git a/docs/en/integrations/neural-magic.md b/docs/en/integrations/neural-magic.md
index 65e91ece81..7d4d5bed74 100644
--- a/docs/en/integrations/neural-magic.md
+++ b/docs/en/integrations/neural-magic.md
@@ -6,7 +6,7 @@ keywords: YOLOv8, DeepSparse, Neural Magic, model optimization, object detection
# Optimizing YOLOv8 Inferences with Neural Magic's DeepSparse Engine
-When deploying object detection models like [Ultralytics YOLOv8](https://ultralytics.com) on various hardware, you can bump into unique issues like optimization. This is where YOLOv8's integration with Neural Magic's DeepSparse Engine steps in. It transforms the way YOLOv8 models are executed and enables GPU-level performance directly on CPUs.
+When deploying object detection models like [Ultralytics YOLOv8](https://www.ultralytics.com/) on various hardware, you can bump into unique issues like optimization. This is where YOLOv8's integration with Neural Magic's DeepSparse Engine steps in. It transforms the way YOLOv8 models are executed and enables GPU-level performance directly on CPUs.
This guide shows you how to deploy YOLOv8 using Neural Magic's DeepSparse, how to run inferences, and also how to benchmark performance to ensure it is optimized.
diff --git a/docs/en/integrations/ray-tune.md b/docs/en/integrations/ray-tune.md
index a3a86dc874..55d77a64ed 100644
--- a/docs/en/integrations/ray-tune.md
+++ b/docs/en/integrations/ray-tune.md
@@ -10,7 +10,7 @@ Hyperparameter tuning is vital in achieving peak model performance by discoverin
## Accelerate Tuning with Ultralytics YOLOv8 and Ray Tune
-[Ultralytics YOLOv8](https://ultralytics.com) incorporates Ray Tune for hyperparameter tuning, streamlining the optimization of YOLOv8 model hyperparameters. With Ray Tune, you can utilize advanced search strategies, parallelism, and early stopping to expedite the tuning process.
+[Ultralytics YOLOv8](https://www.ultralytics.com/) incorporates Ray Tune for hyperparameter tuning, streamlining the optimization of YOLOv8 model hyperparameters. With Ray Tune, you can utilize advanced search strategies, parallelism, and early stopping to expedite the tuning process.
### Ray Tune
diff --git a/docs/en/integrations/roboflow.md b/docs/en/integrations/roboflow.md
index e53a4841cb..a60081c83b 100644
--- a/docs/en/integrations/roboflow.md
+++ b/docs/en/integrations/roboflow.md
@@ -12,10 +12,10 @@ keywords: Roboflow, YOLOv8, data labeling, computer vision, model training, mode
Ultralytics offers two licensing options:
- - The [AGPL-3.0 License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE), an [OSI-approved](https://opensource.org/licenses/) open-source license ideal for students and enthusiasts.
- - The [Enterprise License](https://ultralytics.com/license) for businesses seeking to incorporate our AI models into their products and services.
+ - The [AGPL-3.0 License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE), an [OSI-approved](https://opensource.org/license) open-source license ideal for students and enthusiasts.
+ - The [Enterprise License](https://www.ultralytics.com/license) for businesses seeking to incorporate our AI models into their products and services.
- For more details see [Ultralytics Licensing](https://ultralytics.com/license).
+ For more details see [Ultralytics Licensing](https://www.ultralytics.com/license).
In this guide, we are going to showcase how to find, label, and organize data for use in training a custom Ultralytics YOLOv8 model. Use the table of contents below to jump directly to a specific section:
@@ -27,7 +27,7 @@ In this guide, we are going to showcase how to find, label, and organize data fo
- Upload custom YOLOv8 model weights for testing and deployment
- Gather Data for Training a Custom YOLOv8 Model
-Roboflow provides two services that can help you collect data for YOLOv8 models: [Universe](https://universe.roboflow.com/?ref=ultralytics) and [Collect](https://roboflow.com/collect?ref=ultralytics).
+Roboflow provides two services that can help you collect data for YOLOv8 models: [Universe](https://universe.roboflow.com/?ref=ultralytics) and [Collect](https://github.com/roboflow/roboflow-collect?ref=ultralytics).
Universe is an online repository with over 250,000 vision datasets totalling over 100 million images.
@@ -47,13 +47,13 @@ For YOLOv8, select "YOLOv8" as the export format:
-Universe also has a page that aggregates all [public fine-tuned YOLOv8 models uploaded to Roboflow](https://universe.roboflow.com/search?q=model:yolov8). You can use this page to explore pre-trained models you can use for testing or [for automated data labeling](https://docs.roboflow.com/annotate/use-roboflow-annotate/model-assisted-labeling) or to prototype with [Roboflow inference](https://roboflow.com/inference?ref=ultralytics).
+Universe also has a page that aggregates all [public fine-tuned YOLOv8 models uploaded to Roboflow](https://universe.roboflow.com/search?q=model%3Ayolov8&ref=ultralytics). You can use this page to explore pre-trained models you can use for testing or [for automated data labeling](https://docs.roboflow.com/annotate/use-roboflow-annotate/model-assisted-labeling?ref=ultralytics) or to prototype with [Roboflow inference](https://github.com/roboflow/inference?ref=ultralytics).
If you want to gather images yourself, try [Collect](https://github.com/roboflow/roboflow-collect), an open source project that allows you to automatically gather images using a webcam on the edge. You can use text or image prompts with Collect to instruct what data should be collected, allowing you to capture only the useful data you need to build your vision model.
## Upload, Convert and Label Data for YOLOv8 Format
-[Roboflow Annotate](https://docs.roboflow.com/annotate/use-roboflow-annotate) is an online annotation tool for use in labeling images for object detection, classification, and segmentation.
+[Roboflow Annotate](https://docs.roboflow.com/annotate/use-roboflow-annotate?ref=ultralytics) is an online annotation tool for use in labeling images for object detection, classification, and segmentation.
To label data for a YOLOv8 object detection, instance segmentation, or classification model, first create a project in Roboflow.
@@ -127,7 +127,7 @@ You can narrow your search to images with a particular tag using the "Tags" sele
-Before you start training a model with your dataset, we recommend using Roboflow [Health Check](https://docs.roboflow.com/datasets/dataset-health-check), a web tool that provides an insight into your dataset and how you can improve the dataset prior to training a vision model.
+Before you start training a model with your dataset, we recommend using Roboflow [Health Check](https://docs.roboflow.com/datasets/dataset-health-check?ref=ultralytics), a web tool that provides an insight into your dataset and how you can improve the dataset prior to training a vision model.
To use Health Check, click the "Health Check" sidebar link. A list of statistics will appear that show the average size of images in your dataset, class balance, a heatmap of where annotations are in your images, and more.
@@ -157,7 +157,7 @@ When your dataset version has been generated, you can export your data into a ra
-You are now ready to train YOLOv8 on a custom dataset. Follow this [written guide](https://blog.roboflow.com/how-to-train-yolov8-on-a-custom-dataset/) and [YouTube video](https://www.youtube.com/watch?v=wuZtUMEiKWY) for step-by-step instructions or refer to the [Ultralytics documentation](../modes/train.md).
+You are now ready to train YOLOv8 on a custom dataset. Follow this [written guide](https://blog.roboflow.com/how-to-train-yolov8-on-a-custom-dataset/?ref=ultralytics) and [YouTube video](https://www.youtube.com/watch?v=wuZtUMEiKWY) for step-by-step instructions or refer to the [Ultralytics documentation](../modes/train.md).
## Upload Custom YOLOv8 Model Weights for Testing and Deployment
@@ -178,7 +178,7 @@ dataset = project.version(VERSION).download("yolov8")
project.version(dataset.version).deploy(model_type="yolov8", model_path=f"{HOME}/runs/detect/train/")
```
-In this code, replace the project ID and version ID with the values for your account and project. [Learn how to retrieve your Roboflow API key](https://docs.roboflow.com/api-reference/authentication#retrieve-an-api-key).
+In this code, replace the project ID and version ID with the values for your account and project. [Learn how to retrieve your Roboflow API key](https://docs.roboflow.com/api-reference/authentication?ref=ultralytics#retrieve-an-api-key).
When you run the code above, you will be asked to authenticate. Then, your model will be uploaded and an API will be created for your project. This process can take up to 30 minutes to complete.
@@ -188,7 +188,7 @@ To test your model and find deployment instructions for supported SDKs, go to th
-You can also use your uploaded model as a [labeling assistant](https://docs.roboflow.com/annotate/use-roboflow-annotate/model-assisted-labeling). This feature uses your trained model to recommend annotations on images uploaded to Roboflow.
+You can also use your uploaded model as a [labeling assistant](https://docs.roboflow.com/annotate/use-roboflow-annotate/model-assisted-labeling?ref=ultralytics). This feature uses your trained model to recommend annotations on images uploaded to Roboflow.
## How to Evaluate YOLOv8 Models
@@ -227,9 +227,9 @@ You can use Vector Analysis to:
Want to learn more about using Roboflow for creating YOLOv8 models? The following resources may be helpful in your work.
- [Train YOLOv8 on a Custom Dataset](https://github.com/roboflow/notebooks/blob/main/notebooks/train-yolov8-object-detection-on-custom-dataset.ipynb): Follow our interactive notebook that shows you how to train a YOLOv8 model on a custom dataset.
-- [Autodistill](https://autodistill.github.io/autodistill/): Use large foundation vision models to label data for specific models. You can label images for use in training YOLOv8 classification, detection, and segmentation models with Autodistill.
-- [Supervision](https://roboflow.github.io/supervision/): A Python package with helpful utilities for use in working with computer vision models. You can use supervision to filter detections, compute confusion matrices, and more, all in a few lines of Python code.
-- [Roboflow Blog](https://blog.roboflow.com/): The Roboflow Blog features over 500 articles on computer vision, covering topics from how to train a YOLOv8 model to annotation best practices.
+- [Autodistill](https://docs.autodistill.com/): Use large foundation vision models to label data for specific models. You can label images for use in training YOLOv8 classification, detection, and segmentation models with Autodistill.
+- [Supervision](https://supervision.roboflow.com/?ref=ultralytics): A Python package with helpful utilities for use in working with computer vision models. You can use supervision to filter detections, compute confusion matrices, and more, all in a few lines of Python code.
+- [Roboflow Blog](https://blog.roboflow.com/?ref=ultralytics): The Roboflow Blog features over 500 articles on computer vision, covering topics from how to train a YOLOv8 model to annotation best practices.
- [Roboflow YouTube channel](https://www.youtube.com/@Roboflow): Browse dozens of in-depth computer vision guides on our YouTube channel, covering topics from training YOLOv8 models to automated image labeling.
## Project Showcase
@@ -250,7 +250,7 @@ Labeling data for YOLOv8 models using Roboflow is straightforward with Roboflow
### What services does Roboflow offer for collecting YOLOv8 training data?
-Roboflow provides two key services for collecting YOLOv8 training data: [Universe](https://universe.roboflow.com/?ref=ultralytics) and [Collect](https://roboflow.com/collect?ref=ultralytics). Universe offers access to over 250,000 vision datasets, while Collect helps you gather images using a webcam and automated prompts.
+Roboflow provides two key services for collecting YOLOv8 training data: [Universe](https://universe.roboflow.com/?ref=ultralytics) and [Collect](https://github.com/roboflow/roboflow-collect?ref=ultralytics). Universe offers access to over 250,000 vision datasets, while Collect helps you gather images using a webcam and automated prompts.
### How can I manage and analyze my YOLOv8 dataset using Roboflow?
diff --git a/docs/en/integrations/tensorboard.md b/docs/en/integrations/tensorboard.md
index f9f4916c7e..333a3085ec 100644
--- a/docs/en/integrations/tensorboard.md
+++ b/docs/en/integrations/tensorboard.md
@@ -6,7 +6,7 @@ keywords: YOLOv8, TensorBoard, model training, visualization, machine learning,
# Gain Visual Insights with YOLOv8's Integration with TensorBoard
-Understanding and fine-tuning computer vision models like [Ultralytics' YOLOv8](https://ultralytics.com) becomes more straightforward when you take a closer look at their training processes. Model training visualization helps with getting insights into the model's learning patterns, performance metrics, and overall behavior. YOLOv8's integration with TensorBoard makes this process of visualization and analysis easier and enables more efficient and informed adjustments to the model.
+Understanding and fine-tuning computer vision models like [Ultralytics' YOLOv8](https://www.ultralytics.com/) becomes more straightforward when you take a closer look at their training processes. Model training visualization helps with getting insights into the model's learning patterns, performance metrics, and overall behavior. YOLOv8's integration with TensorBoard makes this process of visualization and analysis easier and enables more efficient and informed adjustments to the model.
This guide covers how to use TensorBoard with YOLOv8. You'll learn about various visualizations, from tracking metrics to analyzing model graphs. These tools will help you understand your YOLOv8 model's performance better.
diff --git a/docs/en/models/sam.md b/docs/en/models/sam.md
index b19b968019..f7fc410390 100644
--- a/docs/en/models/sam.md
+++ b/docs/en/models/sam.md
@@ -23,7 +23,7 @@ SAM's advanced design allows it to adapt to new image distributions and tasks wi
- **The SA-1B Dataset:** Introduced by the Segment Anything project, the SA-1B dataset features over 1 billion masks on 11 million images. As the largest segmentation dataset to date, it provides SAM with a diverse and large-scale training data source.
- **Zero-Shot Performance:** SAM displays outstanding zero-shot performance across various segmentation tasks, making it a ready-to-use tool for diverse applications with minimal need for prompt engineering.
-For an in-depth look at the Segment Anything Model and the SA-1B dataset, please visit the [Segment Anything website](https://segment-anything.com) and check out the research paper [Segment Anything](https://arxiv.org/abs/2304.02643).
+For an in-depth look at the Segment Anything Model and the SA-1B dataset, please visit the [Segment Anything website](https://segment-anything.com/) and check out the research paper [Segment Anything](https://arxiv.org/abs/2304.02643).
## Available Models, Supported Tasks, and Operating Modes
diff --git a/docs/en/models/yolo-world.md b/docs/en/models/yolo-world.md
index e45f391529..b5521694df 100644
--- a/docs/en/models/yolo-world.md
+++ b/docs/en/models/yolo-world.md
@@ -6,7 +6,7 @@ keywords: YOLO-World, Ultralytics, open-vocabulary detection, YOLOv8, real-time
# YOLO-World Model
-The YOLO-World Model introduces an advanced, real-time [Ultralytics](https://ultralytics.com) [YOLOv8](yolov8.md)-based approach for Open-Vocabulary Detection tasks. This innovation enables the detection of any object within an image based on descriptive texts. By significantly lowering computational demands while preserving competitive performance, YOLO-World emerges as a versatile tool for numerous vision-based applications.
+The YOLO-World Model introduces an advanced, real-time [Ultralytics](https://www.ultralytics.com/) [YOLOv8](yolov8.md)-based approach for Open-Vocabulary Detection tasks. This innovation enables the detection of any object within an image based on descriptive texts. By significantly lowering computational demands while preserving competitive performance, YOLO-World emerges as a versatile tool for numerous vision-based applications.
@@ -275,7 +275,7 @@ This approach provides a powerful means of customizing state-of-the-art object d
| Dataset | Type | Samples | Boxes | Annotation Files |
| ----------------------------------------------------------------- | --------- | ------- | ----- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| [Objects365v1](https://opendatalab.com/OpenDataLab/Objects365_v1) | Detection | 609k | 9621k | [objects365_train.json](https://opendatalab.com/OpenDataLab/Objects365_v1) |
-| [GQA](https://nlp.stanford.edu/data/gqa/images.zip) | Grounding | 621k | 3681k | [final_mixed_train_no_coco.json](https://huggingface.co/GLIPModel/GLIP/blob/main/mdetr_annotations/final_mixed_train_no_coco.json) |
+| [GQA](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip) | Grounding | 621k | 3681k | [final_mixed_train_no_coco.json](https://huggingface.co/GLIPModel/GLIP/blob/main/mdetr_annotations/final_mixed_train_no_coco.json) |
| [Flickr30k](https://shannon.cs.illinois.edu/DenotationGraph/) | Grounding | 149k | 641k | [final_flickr_separateGT_train.json](https://huggingface.co/GLIPModel/GLIP/blob/main/mdetr_annotations/final_flickr_separateGT_train.json) |
- Val data
diff --git a/docs/en/models/yolov10.md b/docs/en/models/yolov10.md
index 482c5cda39..fb99f4d1ad 100644
--- a/docs/en/models/yolov10.md
+++ b/docs/en/models/yolov10.md
@@ -6,7 +6,7 @@ keywords: YOLOv10, real-time object detection, NMS-free, deep learning, Tsinghua
# YOLOv10: Real-Time End-to-End Object Detection
-YOLOv10, built on the [Ultralytics](https://ultralytics.com) [Python package](https://pypi.org/project/ultralytics/) by researchers at [Tsinghua University](https://www.tsinghua.edu.cn/en/), introduces a new approach to real-time object detection, addressing both the post-processing and model architecture deficiencies found in previous YOLO versions. By eliminating non-maximum suppression (NMS) and optimizing various model components, YOLOv10 achieves state-of-the-art performance with significantly reduced computational overhead. Extensive experiments demonstrate its superior accuracy-latency trade-offs across multiple model scales.
+YOLOv10, built on the [Ultralytics](https://www.ultralytics.com/) [Python package](https://pypi.org/project/ultralytics/) by researchers at [Tsinghua University](https://www.tsinghua.edu.cn/en/), introduces a new approach to real-time object detection, addressing both the post-processing and model architecture deficiencies found in previous YOLO versions. By eliminating non-maximum suppression (NMS) and optimizing various model components, YOLOv10 achieves state-of-the-art performance with significantly reduced computational overhead. Extensive experiments demonstrate its superior accuracy-latency trade-offs across multiple model scales.

@@ -223,7 +223,7 @@ YOLOv10 sets a new standard in real-time object detection by addressing the shor
## Citations and Acknowledgements
-We would like to acknowledge the YOLOv10 authors from [Tsinghua University](https://www.tsinghua.edu.cn/en/) for their extensive research and significant contributions to the [Ultralytics](https://ultralytics.com) framework:
+We would like to acknowledge the YOLOv10 authors from [Tsinghua University](https://www.tsinghua.edu.cn/en/) for their extensive research and significant contributions to the [Ultralytics](https://www.ultralytics.com/) framework:
!!! Quote ""
diff --git a/docs/en/models/yolov5.md b/docs/en/models/yolov5.md
index 9927d06c5d..57b562423a 100644
--- a/docs/en/models/yolov5.md
+++ b/docs/en/models/yolov5.md
@@ -111,7 +111,7 @@ If you use YOLOv5 or YOLOv5u in your research, please cite the Ultralytics YOLOv
}
```
-Please note that YOLOv5 models are provided under [AGPL-3.0](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) and [Enterprise](https://ultralytics.com/license) licenses.
+Please note that YOLOv5 models are provided under [AGPL-3.0](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) and [Enterprise](https://www.ultralytics.com/license) licenses.
## FAQ
diff --git a/docs/en/models/yolov8.md b/docs/en/models/yolov8.md
index 72ee275099..aecbc157c0 100644
--- a/docs/en/models/yolov8.md
+++ b/docs/en/models/yolov8.md
@@ -183,7 +183,7 @@ If you use the YOLOv8 model or any other software from this repository in your w
}
```
-Please note that the DOI is pending and will be added to the citation once it is available. YOLOv8 models are provided under [AGPL-3.0](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) and [Enterprise](https://ultralytics.com/license) licenses.
+Please note that the DOI is pending and will be added to the citation once it is available. YOLOv8 models are provided under [AGPL-3.0](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) and [Enterprise](https://www.ultralytics.com/license) licenses.
## FAQ
diff --git a/docs/en/models/yolov9.md b/docs/en/models/yolov9.md
index 3cefff6f25..2a32176086 100644
--- a/docs/en/models/yolov9.md
+++ b/docs/en/models/yolov9.md
@@ -6,7 +6,7 @@ keywords: YOLOv9, object detection, real-time, PGI, GELAN, deep learning, MS COC
# YOLOv9: A Leap Forward in Object Detection Technology
-YOLOv9 marks a significant advancement in real-time object detection, introducing groundbreaking techniques such as Programmable Gradient Information (PGI) and the Generalized Efficient Layer Aggregation Network (GELAN). This model demonstrates remarkable improvements in efficiency, accuracy, and adaptability, setting new benchmarks on the MS COCO dataset. The YOLOv9 project, while developed by a separate open-source team, builds upon the robust codebase provided by [Ultralytics](https://ultralytics.com) [YOLOv5](yolov5.md), showcasing the collaborative spirit of the AI research community.
+YOLOv9 marks a significant advancement in real-time object detection, introducing groundbreaking techniques such as Programmable Gradient Information (PGI) and the Generalized Efficient Layer Aggregation Network (GELAN). This model demonstrates remarkable improvements in efficiency, accuracy, and adaptability, setting new benchmarks on the MS COCO dataset. The YOLOv9 project, while developed by a separate open-source team, builds upon the robust codebase provided by [Ultralytics](https://www.ultralytics.com/) [YOLOv5](yolov5.md), showcasing the collaborative spirit of the AI research community.
## Supported Environments
-Ultralytics provides a range of ready-to-use environments, each pre-installed with essential dependencies such as [CUDA](https://developer.nvidia.com/cuda), [CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/), to kickstart your projects.
+Ultralytics provides a range of ready-to-use environments, each pre-installed with essential dependencies such as [CUDA](https://developer.nvidia.com/cuda-zone), [CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/), to kickstart your projects.
- **Free GPU Notebooks**:
- **Google Cloud**: [GCP Quickstart Guide](../environments/google_cloud_quickstart_tutorial.md)
diff --git a/docs/en/yolov5/tutorials/test_time_augmentation.md b/docs/en/yolov5/tutorials/test_time_augmentation.md
index 17c9dd5802..d2b959171c 100644
--- a/docs/en/yolov5/tutorials/test_time_augmentation.md
+++ b/docs/en/yolov5/tutorials/test_time_augmentation.md
@@ -125,7 +125,7 @@ Done. (0.156s)
### PyTorch Hub TTA
-TTA is automatically integrated into all [YOLOv5 PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5) models, and can be accessed by passing `augment=True` at inference time.
+TTA is automatically integrated into all [YOLOv5 PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) models, and can be accessed by passing `augment=True` at inference time.
```python
import torch
@@ -149,7 +149,7 @@ You can customize the TTA ops applied in the YOLOv5 `forward_augment()` method [
## Supported Environments
-Ultralytics provides a range of ready-to-use environments, each pre-installed with essential dependencies such as [CUDA](https://developer.nvidia.com/cuda), [CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/), to kickstart your projects.
+Ultralytics provides a range of ready-to-use environments, each pre-installed with essential dependencies such as [CUDA](https://developer.nvidia.com/cuda-zone), [CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/), to kickstart your projects.
- **Free GPU Notebooks**:
- **Google Cloud**: [GCP Quickstart Guide](../environments/google_cloud_quickstart_tutorial.md)
diff --git a/docs/en/yolov5/tutorials/train_custom_data.md b/docs/en/yolov5/tutorials/train_custom_data.md
index 80dea1be45..69888785cd 100644
--- a/docs/en/yolov5/tutorials/train_custom_data.md
+++ b/docs/en/yolov5/tutorials/train_custom_data.md
@@ -29,10 +29,10 @@ Creating a custom model to detect your objects is an iterative process of collec
Ultralytics offers two licensing options:
- - The [AGPL-3.0 License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE), an [OSI-approved](https://opensource.org/licenses/) open-source license ideal for students and enthusiasts.
- - The [Enterprise License](https://ultralytics.com/license) for businesses seeking to incorporate our AI models into their products and services.
+ - The [AGPL-3.0 License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE), an [OSI-approved](https://opensource.org/license) open-source license ideal for students and enthusiasts.
+ - The [Enterprise License](https://www.ultralytics.com/license) for businesses seeking to incorporate our AI models into their products and services.
- For more details see [Ultralytics Licensing](https://ultralytics.com/license).
+ For more details see [Ultralytics Licensing](https://www.ultralytics.com/license).
YOLOv5 models must be trained on labelled data in order to learn classes of objects in that data. There are two options for creating your dataset before you start training:
@@ -209,7 +209,7 @@ Once your model is trained you can use your best checkpoint `best.pt` to:
## Supported Environments
-Ultralytics provides a range of ready-to-use environments, each pre-installed with essential dependencies such as [CUDA](https://developer.nvidia.com/cuda), [CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/), to kickstart your projects.
+Ultralytics provides a range of ready-to-use environments, each pre-installed with essential dependencies such as [CUDA](https://developer.nvidia.com/cuda-zone), [CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/), to kickstart your projects.
- **Free GPU Notebooks**:
- **Google Cloud**: [GCP Quickstart Guide](../environments/google_cloud_quickstart_tutorial.md)
@@ -269,6 +269,6 @@ To convert annotated data to YOLOv5 format using Roboflow:
Ultralytics offers two licensing options:
- **AGPL-3.0 License**: An open-source license suitable for non-commercial use, ideal for students and enthusiasts.
-- **Enterprise License**: Tailored for businesses seeking to integrate YOLOv5 into commercial products and services. For detailed information, visit our [Licensing page](https://ultralytics.com/license).
+- **Enterprise License**: Tailored for businesses seeking to integrate YOLOv5 into commercial products and services. For detailed information, visit our [Licensing page](https://www.ultralytics.com/license).
-For more details, refer to our guide on [Ultralytics Licensing](https://ultralytics.com/license).
+For more details, refer to our guide on [Ultralytics Licensing](https://www.ultralytics.com/license).
diff --git a/docs/en/yolov5/tutorials/transfer_learning_with_frozen_layers.md b/docs/en/yolov5/tutorials/transfer_learning_with_frozen_layers.md
index be779d4f23..be6a087015 100644
--- a/docs/en/yolov5/tutorials/transfer_learning_with_frozen_layers.md
+++ b/docs/en/yolov5/tutorials/transfer_learning_with_frozen_layers.md
@@ -139,7 +139,7 @@ Interestingly, the more modules are frozen the less GPU memory is required to tr
## Supported Environments
-Ultralytics provides a range of ready-to-use environments, each pre-installed with essential dependencies such as [CUDA](https://developer.nvidia.com/cuda), [CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/), to kickstart your projects.
+Ultralytics provides a range of ready-to-use environments, each pre-installed with essential dependencies such as [CUDA](https://developer.nvidia.com/cuda-zone), [CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/), to kickstart your projects.
- **Free GPU Notebooks**:
- **Google Cloud**: [GCP Quickstart Guide](../environments/google_cloud_quickstart_tutorial.md)
diff --git a/ultralytics/cfg/models/README.md b/ultralytics/cfg/models/README.md
index d699257dd4..274a594eb8 100644
--- a/ultralytics/cfg/models/README.md
+++ b/ultralytics/cfg/models/README.md
@@ -1,6 +1,6 @@
## Models
-Welcome to the [Ultralytics](https://ultralytics.com) Models directory! Here you will find a wide variety of pre-configured model configuration files (`*.yaml`s) that can be used to create custom YOLO models. The models in this directory have been expertly crafted and fine-tuned by the Ultralytics team to provide the best performance for a wide range of object detection and image segmentation tasks.
+Welcome to the [Ultralytics](https://www.ultralytics.com/) Models directory! Here you will find a wide variety of pre-configured model configuration files (`*.yaml`s) that can be used to create custom YOLO models. The models in this directory have been expertly crafted and fine-tuned by the Ultralytics team to provide the best performance for a wide range of object detection and image segmentation tasks.
These model configurations cover a wide range of scenarios, from simple object detection to more complex tasks like instance segmentation and object tracking. They are also designed to run efficiently on a variety of hardware platforms, from CPUs to GPUs. Whether you are a seasoned machine learning practitioner or just getting started with YOLO, this directory provides a great starting point for your custom model development needs.
From 41dfd65cc16fcfb5fb164179b6d367dcac787680 Mon Sep 17 00:00:00 2001
From: Burhan <62214284+Burhan-Q@users.noreply.github.com>
Date: Thu, 5 Sep 2024 16:50:14 -0400
Subject: [PATCH 16/17] Allows any PyTorch install except `torch==2.4.0` on
Windows (#16019)
Co-authored-by: Glenn Jocher
---
pyproject.toml | 2 +-
ultralytics/utils/torch_utils.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 00366df58f..03e557e941 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -71,8 +71,8 @@ dependencies = [
"pyyaml>=5.3.1",
"requests>=2.23.0",
"scipy>=1.4.1",
- "torch>=1.8.0,<2.4.0; sys_platform == 'win32'", # Windows CPU errors https://github.com/ultralytics/ultralytics/issues/15049
"torch>=1.8.0",
+ "torch>=1.8.0,!=2.4.0; sys_platform == 'win32'", # Windows CPU errors w/ 2.4.0 https://github.com/ultralytics/ultralytics/issues/15049
"torchvision>=0.9.0",
"tqdm>=4.64.0", # progress bars
"psutil", # system utilization
diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py
index 16bcddadd0..7cde9dc7a8 100644
--- a/ultralytics/utils/torch_utils.py
+++ b/ultralytics/utils/torch_utils.py
@@ -45,9 +45,9 @@ TORCHVISION_0_10 = check_version(TORCHVISION_VERSION, "0.10.0")
TORCHVISION_0_11 = check_version(TORCHVISION_VERSION, "0.11.0")
TORCHVISION_0_13 = check_version(TORCHVISION_VERSION, "0.13.0")
TORCHVISION_0_18 = check_version(TORCHVISION_VERSION, "0.18.0")
-if WINDOWS and torch.__version__[:3] == "2.4": # reject all versions of 2.4 on Windows
+if WINDOWS and check_version(torch.__version__, "==2.4.0"): # reject version 2.4.0 on Windows
LOGGER.warning(
- "WARNING ⚠️ Known issue with torch>=2.4.0 on Windows with CPU, recommend downgrading to torch<=2.3.1 to resolve "
+ "WARNING ⚠️ Known issue with torch==2.4.0 on Windows with CPU, recommend upgrading to torch>=2.4.1 to resolve "
"https://github.com/ultralytics/ultralytics/issues/15049"
)
From 4673fae31d7f39902250982a33a67fa651cc7b5d Mon Sep 17 00:00:00 2001
From: Muhammad Rizwan Munawar
Date: Fri, 6 Sep 2024 02:00:55 +0500
Subject: [PATCH 17/17] `ultralytics 8.2.88` Update `distance-calculation` to
pixels (#15984)
Co-authored-by: UltralyticsAssistant
Co-authored-by: Glenn Jocher
---
docs/en/guides/distance-calculation.md | 19 ++---
ultralytics/__init__.py | 2 +-
ultralytics/solutions/distance_calculation.py | 75 +++++--------------
ultralytics/utils/plotting.py | 34 ++++-----
4 files changed, 43 insertions(+), 87 deletions(-)
diff --git a/docs/en/guides/distance-calculation.md b/docs/en/guides/distance-calculation.md
index dbc15a4a2a..761d4ea9cf 100644
--- a/docs/en/guides/distance-calculation.md
+++ b/docs/en/guides/distance-calculation.md
@@ -30,8 +30,7 @@ Measuring the gap between two objects is known as distance calculation within a
## Advantages of Distance Calculation?
- **Localization Precision:** Enhances accurate spatial positioning in computer vision tasks.
-- **Size Estimation:** Allows estimation of physical sizes for better contextual understanding.
-- **Scene Understanding:** Contributes to a 3D understanding of the environment for improved decision-making.
+- **Size Estimation:** Allows estimation of object size for better contextual understanding.
???+ tip "Distance Calculation"
@@ -85,14 +84,13 @@ Measuring the gap between two objects is known as distance calculation within a
### Arguments `DistanceCalculation()`
-| `Name` | `Type` | `Default` | Description |
-| ------------------ | ------- | --------------- | --------------------------------------------------------- |
-| `names` | `dict` | `None` | Dictionary of classes names. |
-| `pixels_per_meter` | `int` | `10` | Conversion factor from pixels to meters. |
-| `view_img` | `bool` | `False` | Flag to indicate if the video stream should be displayed. |
-| `line_thickness` | `int` | `2` | Thickness of the lines drawn on the image. |
-| `line_color` | `tuple` | `(255, 255, 0)` | Color of the lines drawn on the image (BGR format). |
-| `centroid_color` | `tuple` | `(255, 0, 255)` | Color of the centroids drawn (BGR format). |
+| `Name` | `Type` | `Default` | Description |
+| ---------------- | ------- | --------------- | --------------------------------------------------------- |
+| `names` | `dict` | `None` | Dictionary of classes names. |
+| `view_img` | `bool` | `False` | Flag to indicate if the video stream should be displayed. |
+| `line_thickness` | `int` | `2` | Thickness of the lines drawn on the image. |
+| `line_color` | `tuple` | `(255, 255, 0)` | Color of the lines drawn on the image (BGR format). |
+| `centroid_color` | `tuple` | `(255, 0, 255)` | Color of the centroids drawn (BGR format). |
### Arguments `model.track`
@@ -133,7 +131,6 @@ To delete points drawn during distance calculation with Ultralytics YOLOv8, you
The key arguments for initializing the `DistanceCalculation` class in Ultralytics YOLOv8 include:
- `names`: Dictionary mapping class indices to class names.
-- `pixels_per_meter`: Conversion factor from pixels to meters.
- `view_img`: Flag to indicate if the video stream should be displayed.
- `line_thickness`: Thickness of the lines drawn on the image.
- `line_color`: Color of the lines drawn on the image (BGR format).
diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py
index 37925ac9e7..c5d3a82d51 100644
--- a/ultralytics/__init__.py
+++ b/ultralytics/__init__.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-__version__ = "8.2.87"
+__version__ = "8.2.88"
import os
diff --git a/ultralytics/solutions/distance_calculation.py b/ultralytics/solutions/distance_calculation.py
index 35a791ca5f..dccd1687c6 100644
--- a/ultralytics/solutions/distance_calculation.py
+++ b/ultralytics/solutions/distance_calculation.py
@@ -14,18 +14,16 @@ class DistanceCalculation:
def __init__(
self,
names,
- pixels_per_meter=10,
view_img=False,
line_thickness=2,
- line_color=(255, 255, 0),
- centroid_color=(255, 0, 255),
+ line_color=(255, 0, 255),
+ centroid_color=(104, 31, 17),
):
"""
Initializes the DistanceCalculation class with the given parameters.
Args:
names (dict): Dictionary of classes names.
- pixels_per_meter (int, optional): Conversion factor from pixels to meters. Defaults to 10.
view_img (bool, optional): Flag to indicate if the video stream should be displayed. Defaults to False.
line_thickness (int, optional): Thickness of the lines drawn on the image. Defaults to 2.
line_color (tuple, optional): Color of the lines drawn on the image (BGR format). Defaults to (255, 255, 0).
@@ -39,7 +37,6 @@ class DistanceCalculation:
self.centroid_color = centroid_color
# Prediction & tracking information
- self.clss = None
self.names = names
self.boxes = None
self.line_thickness = line_thickness
@@ -47,7 +44,6 @@ class DistanceCalculation:
# Distance calculation information
self.centroids = []
- self.pixel_per_meter = pixels_per_meter
# Mouse event information
self.left_mouse_count = 0
@@ -55,6 +51,7 @@ class DistanceCalculation:
# Check if environment supports imshow
self.env_check = check_imshow(warn=True)
+ self.window_name = "Ultralytics Solutions"
def mouse_event_for_distance(self, event, x, y, flags, param):
"""
@@ -78,46 +75,6 @@ class DistanceCalculation:
self.selected_boxes = {}
self.left_mouse_count = 0
- def extract_tracks(self, tracks):
- """
- Extracts tracking results from the provided data.
-
- Args:
- tracks (list): List of tracks obtained from the object tracking process.
- """
- self.boxes = tracks[0].boxes.xyxy.cpu()
- self.clss = tracks[0].boxes.cls.cpu().tolist()
- self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
-
- @staticmethod
- def calculate_centroid(box):
- """
- Calculates the centroid of a bounding box.
-
- Args:
- box (list): Bounding box coordinates [x1, y1, x2, y2].
-
- Returns:
- (tuple): Centroid coordinates (x, y).
- """
- return int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)
-
- def calculate_distance(self, centroid1, centroid2):
- """
- Calculates the distance between two centroids.
-
- Args:
- centroid1 (tuple): Coordinates of the first centroid (x, y).
- centroid2 (tuple): Coordinates of the second centroid (x, y).
-
- Returns:
- (tuple): Distance in meters and millimeters.
- """
- pixel_distance = math.sqrt((centroid1[0] - centroid2[0]) ** 2 + (centroid1[1] - centroid2[1]) ** 2)
- distance_m = pixel_distance / self.pixel_per_meter
- distance_mm = distance_m * 1000
- return distance_m, distance_mm
-
def start_process(self, im0, tracks):
"""
Processes the video frame and calculates the distance between two bounding boxes.
@@ -135,10 +92,13 @@ class DistanceCalculation:
self.display_frames()
return im0
- self.extract_tracks(tracks)
+ self.boxes = tracks[0].boxes.xyxy.cpu()
+ clss = tracks[0].boxes.cls.cpu().tolist()
+ self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
+
self.annotator = Annotator(self.im0, line_width=self.line_thickness)
- for box, cls, track_id in zip(self.boxes, self.clss, self.trk_ids):
+ for box, cls, track_id in zip(self.boxes, clss, self.trk_ids):
self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
if len(self.selected_boxes) == 2:
@@ -147,12 +107,15 @@ class DistanceCalculation:
self.selected_boxes[track_id] = box
if len(self.selected_boxes) == 2:
- self.centroids = [self.calculate_centroid(self.selected_boxes[trk_id]) for trk_id in self.selected_boxes]
-
- distance_m, distance_mm = self.calculate_distance(self.centroids[0], self.centroids[1])
- self.annotator.plot_distance_and_line(
- distance_m, distance_mm, self.centroids, self.line_color, self.centroid_color
+ # Store user selected boxes in centroids list
+ self.centroids.extend(
+ [[int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)] for box in self.selected_boxes.values()]
+ )
+ # Calculate pixels distance
+ pixels_distance = math.sqrt(
+ (self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
)
+ self.annotator.plot_distance_and_line(pixels_distance, self.centroids, self.line_color, self.centroid_color)
self.centroids = []
@@ -163,9 +126,9 @@ class DistanceCalculation:
def display_frames(self):
"""Displays the current frame with annotations."""
- cv2.namedWindow("Ultralytics Distance Estimation")
- cv2.setMouseCallback("Ultralytics Distance Estimation", self.mouse_event_for_distance)
- cv2.imshow("Ultralytics Distance Estimation", self.im0)
+ cv2.namedWindow(self.window_name)
+ cv2.setMouseCallback(self.window_name, self.mouse_event_for_distance)
+ cv2.imshow(self.window_name, self.im0)
if cv2.waitKey(1) & 0xFF == ord("q"):
return
diff --git a/ultralytics/utils/plotting.py b/ultralytics/utils/plotting.py
index 5fc7d0e741..dfaa21490a 100644
--- a/ultralytics/utils/plotting.py
+++ b/ultralytics/utils/plotting.py
@@ -756,39 +756,35 @@ class Annotator:
self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
)
- def plot_distance_and_line(self, distance_m, distance_mm, centroids, line_color, centroid_color):
+ def plot_distance_and_line(self, pixels_distance, centroids, line_color, centroid_color):
"""
Plot the distance and line on frame.
Args:
- distance_m (float): Distance between two bbox centroids in meters.
- distance_mm (float): Distance between two bbox centroids in millimeters.
+ pixels_distance (float): Pixels distance between two bbox centroids.
centroids (list): Bounding box centroids data.
line_color (RGB): Distance line color.
centroid_color (RGB): Bounding box centroid color.
"""
- (text_width_m, text_height_m), _ = cv2.getTextSize(f"Distance M: {distance_m:.2f}m", 0, self.sf, self.tf)
- cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 10, 25 + text_height_m + 20), line_color, -1)
- cv2.putText(
- self.im,
- f"Distance M: {distance_m:.2f}m",
- (20, 50),
- 0,
- self.sf,
- centroid_color,
- self.tf,
- cv2.LINE_AA,
+ # Get the text size
+ (text_width_m, text_height_m), _ = cv2.getTextSize(
+ f"Pixels Distance: {pixels_distance:.2f}", 0, self.sf, self.tf
)
- (text_width_mm, text_height_mm), _ = cv2.getTextSize(f"Distance MM: {distance_mm:.2f}mm", 0, self.sf, self.tf)
- cv2.rectangle(self.im, (15, 75), (15 + text_width_mm + 10, 75 + text_height_mm + 20), line_color, -1)
+ # Define corners with 10-pixel margin and draw rectangle
+ top_left = (15, 25)
+ bottom_right = (15 + text_width_m + 20, 25 + text_height_m + 20)
+ cv2.rectangle(self.im, top_left, bottom_right, centroid_color, -1)
+
+ # Calculate the position for the text with a 10-pixel margin and draw text
+ text_position = (top_left[0] + 10, top_left[1] + text_height_m + 10)
cv2.putText(
self.im,
- f"Distance MM: {distance_mm:.2f}mm",
- (20, 100),
+ f"Pixels Distance: {pixels_distance:.2f}",
+ text_position,
0,
self.sf,
- centroid_color,
+ (255, 255, 255),
self.tf,
cv2.LINE_AA,
)