diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 5ca3abefba..7e0dadc80a 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -30,7 +30,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Must be repository secret PAT - PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + PERSONAL_ACCESS_TOKEN: ${{ secrets._GITHUB_TOKEN }} with: path-to-signatures: "signatures/version1/cla.json" path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index 0f0fe5e942..8d9f749e19 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -182,8 +182,9 @@ jobs: steps: - name: Trigger Additional GitHub Actions env: - GH_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + GH_TOKEN: ${{ secrets._GITHUB_TOKEN }} run: | + sleep 60 gh workflow run deploy_cloud_run.yml \ --repo ultralytics/assistant \ --ref main diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 02bc506a14..5c8baafc0a 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -34,7 +34,7 @@ jobs: uses: actions/checkout@v4 with: repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} - token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} + token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.head_ref || github.ref }} fetch-depth: 0 - name: Set up Python @@ -94,5 +94,5 @@ jobs: else LATEST_HASH=$(git rev-parse --short=7 HEAD) git commit -m "Update Docs for 'ultralytics ${{ steps.check_pypi.outputs.version }} - $LATEST_HASH'" - git push https://${{ secrets.PERSONAL_ACCESS_TOKEN }}@github.com/ultralytics/docs.git gh-pages + git push https://${{ secrets._GITHUB_TOKEN }}@github.com/ultralytics/docs.git gh-pages fi diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 516450f876..f1e6ba908e 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -20,15 +20,14 @@ jobs: - name: Run Ultralytics Formatting uses: ultralytics/actions@main with: - token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated + token: ${{ secrets._GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated labels: true # autolabel issues and PRs python: true # format Python code and docstrings prettier: true # format YAML, JSON, Markdown and CSS spelling: true # check spelling links: false # check broken links summary: true # print PR summary with GPT4o (requires 'openai_api_key') - openai_azure_api_key: ${{ secrets.OPENAI_AZURE_API_KEY }} - openai_azure_endpoint: ${{ secrets.OPENAI_AZURE_ENDPOINT }} + openai_api_key: ${{ secrets.OPENAI_API_KEY }} first_issue_response: | ๐ Hello @${{ github.actor }}, thank you for your interest in Ultralytics ๐! We recommend a visit to the [Docs](https://docs.ultralytics.com) for new users where you can find many [Python](https://docs.ultralytics.com/usage/python/) and [CLI](https://docs.ultralytics.com/usage/cli/) usage examples and where many of the most common questions may already be answered. diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index 347ec1b99c..68ecf94723 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -33,7 +33,7 @@ jobs: import os import time - g = Github("${{ secrets.PERSONAL_ACCESS_TOKEN }}") + g = Github("${{ secrets._GITHUB_TOKEN }}") repo = g.get_repo("${{ github.repository }}") # Fetch the default branch name diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index d59dd901ab..e82aecc5a0 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -23,7 +23,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # use your PAT here + token: ${{ secrets._GITHUB_TOKEN }} # use your PAT here - name: Git config run: | git config --global user.name "UltralyticsAssistant" @@ -36,7 +36,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip wheel - pip install requests build twine toml + pip install ultralytics-actions build twine toml - name: Check PyPI version shell: python run: | @@ -103,15 +103,14 @@ jobs: if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True' env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ secrets._GITHUB_TOKEN }} CURRENT_TAG: ${{ steps.check_pypi.outputs.current_tag }} PREVIOUS_TAG: ${{ steps.check_pypi.outputs.previous_tag }} - run: | - curl -s "https://raw.githubusercontent.com/ultralytics/actions/main/utils/summarize_release.py" | python - + run: ultralytics-actions-summarize-release shell: bash - name: Extract PR Details env: - GH_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} + GH_TOKEN: ${{ secrets._GITHUB_TOKEN }} run: | # Check if the event is a pull request or pull_request_target if [ "${{ github.event_name }}" = "pull_request" ] || [ "${{ github.event_name }}" = "pull_request_target" ]; then diff --git a/docker/Dockerfile-runner b/docker/Dockerfile-runner index 642f1a1bae..539f0aa03e 100644 --- a/docker/Dockerfile-runner +++ b/docker/Dockerfile-runner @@ -17,8 +17,8 @@ ENV PYTHONUNBUFFERED=1 \ WORKDIR /actions-runner # Download and unpack the latest runner from https://github.com/actions/runner -RUN FILENAME=actions-runner-linux-x64-2.317.0.tar.gz && \ - curl -o $FILENAME -L https://github.com/actions/runner/releases/download/v2.317.0/$FILENAME && \ +RUN FILENAME=actions-runner-linux-x64-2.320.0.tar.gz && \ + curl -o $FILENAME -L https://github.com/actions/runner/releases/download/v2.320.0/$FILENAME && \ tar xzf $FILENAME && \ rm $FILENAME diff --git a/docs/en/guides/conda-quickstart.md b/docs/en/guides/conda-quickstart.md index e37e89911f..b958c85f27 100644 --- a/docs/en/guides/conda-quickstart.md +++ b/docs/en/guides/conda-quickstart.md @@ -37,7 +37,7 @@ This guide provides a comprehensive introduction to setting up a Conda environme First, let's create a new Conda environment. Open your terminal and run the following command: ```bash -conda create --name ultralytics-env python=3.8 -y +conda create --name ultralytics-env python=3.11 -y ``` Activate the new environment: @@ -135,7 +135,7 @@ Congratulations! You have successfully set up a Conda environment, installed the Setting up a Conda environment for Ultralytics projects is straightforward and ensures smooth package management. First, create a new Conda environment using the following command: ```bash -conda create --name ultralytics-env python=3.8 -y +conda create --name ultralytics-env python=3.11 -y ``` Then, activate the new environment with: diff --git a/docs/en/guides/distance-calculation.md b/docs/en/guides/distance-calculation.md index b0b12f919b..009899ae3c 100644 --- a/docs/en/guides/distance-calculation.md +++ b/docs/en/guides/distance-calculation.md @@ -43,12 +43,9 @@ Measuring the gap between two objects is known as distance calculation within a ```python import cv2 - from ultralytics import YOLO, solutions + from ultralytics import solutions - model = YOLO("yolo11n.pt") - names = model.model.names - - cap = cv2.VideoCapture("path/to/video/file.mp4") + cap = cv2.VideoCapture("Path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) @@ -56,16 +53,14 @@ Measuring the gap between two objects is known as distance calculation within a video_writer = cv2.VideoWriter("distance_calculation.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) # Init distance-calculation obj - dist_obj = solutions.DistanceCalculation(names=names, view_img=True) + distance = solutions.DistanceCalculation(model="yolo11n.pt", show=True) while cap.isOpened(): success, im0 = cap.read() if not success: print("Video frame is empty or video processing has been successfully completed.") break - - tracks = model.track(im0, persist=True, show=False) - im0 = dist_obj.start_process(im0, tracks) + im0 = distance.calculate(im0) video_writer.write(im0) cap.release() @@ -84,13 +79,11 @@ Measuring the gap between two objects is known as distance calculation within a ### Arguments `DistanceCalculation()` -| `Name` | `Type` | `Default` | Description | -| ---------------- | ------- | --------------- | --------------------------------------------------------- | -| `names` | `dict` | `None` | Dictionary of classes names. | -| `view_img` | `bool` | `False` | Flag to indicate if the video stream should be displayed. | -| `line_thickness` | `int` | `2` | Thickness of the lines drawn on the image. | -| `line_color` | `tuple` | `(255, 255, 0)` | Color of the lines drawn on the image (BGR format). | -| `centroid_color` | `tuple` | `(255, 0, 255)` | Color of the centroids drawn (BGR format). | +| `Name` | `Type` | `Default` | Description | +| ------------ | ------ | --------- | ---------------------------------------------------- | +| `model` | `str` | `None` | Path to Ultralytics YOLO Model File | +| `line_width` | `int` | `2` | Line thickness for bounding boxes. | +| `show` | `bool` | `False` | Flag to control whether to display the video stream. | ### Arguments `model.track` @@ -122,10 +115,8 @@ To delete points drawn during distance calculation with Ultralytics YOLO11, you The key arguments for initializing the `DistanceCalculation` class in Ultralytics YOLO11 include: -- `names`: Dictionary mapping class indices to class names. -- `view_img`: Flag to indicate if the video stream should be displayed. -- `line_thickness`: Thickness of the lines drawn on the image. -- `line_color`: Color of the lines drawn on the image (BGR format). -- `centroid_color`: Color of the centroids (BGR format). +- `model`: Model file path. +- `show`: Flag to indicate if the video stream should be displayed. +- `line_width`: Thickness of bounding box and the lines drawn on the image. For an exhaustive list and default values, see the [arguments of DistanceCalculation](#arguments-distancecalculation). diff --git a/docs/en/guides/heatmaps.md b/docs/en/guides/heatmaps.md index f33993134f..7919bc7d94 100644 --- a/docs/en/guides/heatmaps.md +++ b/docs/en/guides/heatmaps.md @@ -222,6 +222,7 @@ A heatmap generated with [Ultralytics YOLO11](https://github.com/ultralytics/ult | Name | Type | Default | Description | | ------------ | ------ | ------------------ | ----------------------------------------------------------------- | +| `model` | `str` | `None` | Path to Ultralytics YOLO Model File | | `colormap` | `int` | `cv2.COLORMAP_JET` | Colormap to use for the heatmap. | | `show` | `bool` | `False` | Whether to display the image with the heatmap overlay. | | `show_in` | `bool` | `True` | Whether to display the count of objects entering the region. | diff --git a/docs/en/guides/model-deployment-practices.md b/docs/en/guides/model-deployment-practices.md index 603371c1fb..5f6d2730aa 100644 --- a/docs/en/guides/model-deployment-practices.md +++ b/docs/en/guides/model-deployment-practices.md @@ -106,7 +106,7 @@ When deploying [machine learning](https://www.ultralytics.com/glossary/machine-l - **Profile the Inference Pipeline:** Identifying bottlenecks in the inference pipeline can help pinpoint the source of delays. Use profiling tools to analyze each step of the inference process, identifying and addressing any stages that cause significant delays, such as inefficient layers or data transfer issues. - **Use Appropriate Precision:** Using higher precision than necessary can slow down inference times. Experiment with using lower precision, such as FP16 (half-precision), instead of FP32 (full-precision). While FP16 can reduce inference time, also keep in mind that it can impact model accuracy. -If you are facing this issue while deploying YOLO11, consider that YOLO11 offers [various model sizes](../models/yolov8.md), such as YOLO11n (nano) for devices with lower memory capacity and YOLOv8x (extra-large) for more powerful GPUs. Choosing the right model variant for your hardware can help balance memory usage and processing time. +If you are facing this issue while deploying YOLO11, consider that YOLO11 offers [various model sizes](../models/yolo11.md), such as YOLO11n (nano) for devices with lower memory capacity and YOLO11x (extra-large) for more powerful GPUs. Choosing the right model variant for your hardware can help balance memory usage and processing time. Also keep in mind that the size of the input images directly impacts memory usage and processing time. Lower resolutions reduce memory usage and speed up inference, while higher resolutions improve accuracy but require more memory and processing power. diff --git a/docs/en/guides/raspberry-pi.md b/docs/en/guides/raspberry-pi.md index 96e903b5b3..a834d8f074 100644 --- a/docs/en/guides/raspberry-pi.md +++ b/docs/en/guides/raspberry-pi.md @@ -1,12 +1,12 @@ --- comments: true -description: Learn how to deploy Ultralytics YOLOv8 on Raspberry Pi with our comprehensive guide. Get performance benchmarks, setup instructions, and best practices. -keywords: Ultralytics, YOLOv8, Raspberry Pi, setup, guide, benchmarks, computer vision, object detection, NCNN, Docker, camera modules +description: Learn how to deploy Ultralytics YOLO11 on Raspberry Pi with our comprehensive guide. Get performance benchmarks, setup instructions, and best practices. +keywords: Ultralytics, YOLO11, Raspberry Pi, setup, guide, benchmarks, computer vision, object detection, NCNN, Docker, camera modules --- -# Quick Start Guide: Raspberry Pi with Ultralytics YOLOv8 +# Quick Start Guide: Raspberry Pi with Ultralytics YOLO11 -This comprehensive guide provides a detailed walkthrough for deploying Ultralytics YOLOv8 on [Raspberry Pi](https://www.raspberrypi.com/) devices. Additionally, it showcases performance benchmarks to demonstrate the capabilities of YOLOv8 on these small and powerful devices. +This comprehensive guide provides a detailed walkthrough for deploying Ultralytics YOLO11 on [Raspberry Pi](https://www.raspberrypi.com/) devices. Additionally, it showcases performance benchmarks to demonstrate the capabilities of YOLO11 on these small and powerful devices.
@@ -56,7 +56,7 @@ There are two ways of setting up Ultralytics package on Raspberry Pi to build yo
### Start with Docker
-The fastest way to get started with Ultralytics YOLOv8 on Raspberry Pi is to run with pre-built docker image for Raspberry Pi.
+The fastest way to get started with Ultralytics YOLO11 on Raspberry Pi is to run with pre-built docker image for Raspberry Pi.
Execute the below command to pull the Docker container and run on Raspberry Pi. This is based on [arm64v8/debian](https://hub.docker.com/r/arm64v8/debian) docker image which contains Debian 12 (Bookworm) in a Python3 environment.
@@ -98,7 +98,7 @@ Out of all the model export formats supported by Ultralytics, [NCNN](https://doc
## Convert Model to NCNN and Run Inference
-The YOLOv8n model in PyTorch format is converted to NCNN to run inference with the exported model.
+The YOLO11n model in PyTorch format is converted to NCNN to run inference with the exported model.
!!! example
@@ -107,14 +107,14 @@ The YOLOv8n model in PyTorch format is converted to NCNN to run inference with t
```python
from ultralytics import YOLO
- # Load a YOLOv8n PyTorch model
- model = YOLO("yolov8n.pt")
+ # Load a YOLO11n PyTorch model
+ model = YOLO("yolo11n.pt")
# Export the model to NCNN format
- model.export(format="ncnn") # creates 'yolov8n_ncnn_model'
+ model.export(format="ncnn") # creates 'yolo11n_ncnn_model'
# Load the exported NCNN model
- ncnn_model = YOLO("yolov8n_ncnn_model")
+ ncnn_model = YOLO("yolo11n_ncnn_model")
# Run inference
results = ncnn_model("https://ultralytics.com/images/bus.jpg")
@@ -123,102 +123,62 @@ The YOLOv8n model in PyTorch format is converted to NCNN to run inference with t
=== "CLI"
```bash
- # Export a YOLOv8n PyTorch model to NCNN format
- yolo export model=yolov8n.pt format=ncnn # creates 'yolov8n_ncnn_model'
+ # Export a YOLO11n PyTorch model to NCNN format
+ yolo export model=yolo11n.pt format=ncnn # creates 'yolo11n_ncnn_model'
# Run inference with the exported model
- yolo predict model='yolov8n_ncnn_model' source='https://ultralytics.com/images/bus.jpg'
+ yolo predict model='yolo11n_ncnn_model' source='https://ultralytics.com/images/bus.jpg'
```
!!! tip
For more details about supported export options, visit the [Ultralytics documentation page on deployment options](https://docs.ultralytics.com/guides/model-deployment-options/).
-## Raspberry Pi 5 vs Raspberry Pi 4 YOLOv8 Benchmarks
+## Raspberry Pi 5 YOLO11 Benchmarks
-YOLOv8 benchmarks were run by the Ultralytics team on nine different model formats measuring speed and [accuracy](https://www.ultralytics.com/glossary/accuracy): PyTorch, TorchScript, ONNX, OpenVINO, TF SavedModel, TF GraphDef, TF Lite, PaddlePaddle, NCNN. Benchmarks were run on both Raspberry Pi 5 and Raspberry Pi 4 at FP32 [precision](https://www.ultralytics.com/glossary/precision) with default input image size of 640.
-
-!!! note
-
- We have only included benchmarks for YOLOv8n and YOLOv8s models because other models sizes are too big to run on the Raspberry Pis and does not offer decent performance.
+YOLO11 benchmarks were run by the Ultralytics team on nine different model formats measuring speed and [accuracy](https://www.ultralytics.com/glossary/accuracy): PyTorch, TorchScript, ONNX, OpenVINO, TF SavedModel, TF GraphDef, TF Lite, PaddlePaddle, NCNN. Benchmarks were run on a Raspberry Pi 5 at FP32 [precision](https://www.ultralytics.com/glossary/precision) with default input image size of 640.
### Comparison Chart
-!!! tip "Performance"
-
- === "YOLOv8n"
-
-
@@ -105,7 +163,7 @@ Ultralytics YOLO is the latest advancement in the acclaimed YOLO (You Only Look
Getting started with YOLO is quick and straightforward. You can install the Ultralytics package using [pip](https://pypi.org/project/ultralytics/) and get up and running in minutes. Here's a basic installation command:
-!!! example
+!!! example "Installation using pip"
=== "CLI"
@@ -121,11 +179,11 @@ Training a custom YOLO model on your dataset involves a few detailed steps:
1. Prepare your annotated dataset.
2. Configure the training parameters in a YAML file.
-3. Use the `yolo train` command to start training.
+3. Use the `yolo TASK train` command to start training. (Each `TASK` has its own argument)
-Here's example code:
+Here's example code for the Object Detection Task:
-!!! example
+!!! example "Train Example for Object Detection Task"
=== "Python"
@@ -143,7 +201,7 @@ Here's example code:
```bash
# Train a YOLO model from the command line
- yolo train data=path/to/dataset.yaml epochs=100 imgsz=640
+ yolo detect train data=path/to/dataset.yaml epochs=100 imgsz=640
```
For a detailed walkthrough, check out our [Train a Model](modes/train.md) guide, which includes examples and tips for optimizing your training process.
@@ -161,7 +219,7 @@ For more details, visit our [Licensing](https://www.ultralytics.com/license) pag
Ultralytics YOLO supports efficient and customizable multi-object tracking. To utilize tracking capabilities, you can use the `yolo track` command as shown below:
-!!! example
+!!! example "Example for Object Tracking on a Video"
=== "Python"
diff --git a/docs/en/integrations/comet.md b/docs/en/integrations/comet.md
index 2774b2fed9..24d69c0445 100644
--- a/docs/en/integrations/comet.md
+++ b/docs/en/integrations/comet.md
@@ -50,17 +50,21 @@ After installing the required packages, you'll need to sign up, get a [Comet API
Then, you can initialize your Comet project. Comet will automatically detect the API key and proceed with the setup.
-```python
-import comet_ml
+!!! example "Initialize Comet project"
-comet_ml.login(project_name="comet-example-yolov8-coco128")
-```
+ === "Python"
+
+ ```python
+ import comet_ml
+
+ comet_ml.login(project_name="comet-example-yolo11-coco128")
+ ```
If you are using a Google Colab notebook, the code above will prompt you to enter your API key for initialization.
## Usage
-Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/index.md). This will help you choose the most appropriate model for your project requirements.
+Before diving into the usage instructions, be sure to check out the range of [YOLO11 models offered by Ultralytics](../models/yolo11.md). This will help you choose the most appropriate model for your project requirements.
!!! example "Usage"
@@ -75,7 +79,7 @@ Before diving into the usage instructions, be sure to check out the range of [YO
# Train the model
results = model.train(
data="coco8.yaml",
- project="comet-example-yolov8-coco128",
+ project="comet-example-yolo11-coco128",
batch=32,
save_period=1,
save_json=True,
@@ -200,7 +204,7 @@ To integrate Comet ML with Ultralytics YOLO11, follow these steps:
```python
import comet_ml
- comet_ml.login(project_name="comet-example-yolov8-coco128")
+ comet_ml.login(project_name="comet-example-yolo11-coco128")
```
4. **Train your YOLO11 model and log metrics**:
@@ -211,7 +215,7 @@ To integrate Comet ML with Ultralytics YOLO11, follow these steps:
model = YOLO("yolo11n.pt")
results = model.train(
data="coco8.yaml",
- project="comet-example-yolov8-coco128",
+ project="comet-example-yolo11-coco128",
batch=32,
save_period=1,
save_json=True,
diff --git a/docs/en/integrations/weights-biases.md b/docs/en/integrations/weights-biases.md
index 9777632e4c..55eee2eeb4 100644
--- a/docs/en/integrations/weights-biases.md
+++ b/docs/en/integrations/weights-biases.md
@@ -210,7 +210,7 @@ These features help in tracking experiments, optimizing models, and collaboratin
After running your training script with W&B integration:
1. A link to your W&B dashboard will be provided in the console output.
-2. Click on the link or go to [wandb.ai](https://wandb.ai) and log in to your account.
+2. Click on the link or go to [wandb.ai](https://wandb.ai/) and log in to your account.
3. Navigate to your project to view detailed metrics, visualizations, and model performance data.
The dashboard offers insights into your model's training process, allowing you to analyze and improve your YOLO11 models effectively.
diff --git a/docs/en/models/sam-2.md b/docs/en/models/sam-2.md
index 562a130029..5120498e24 100644
--- a/docs/en/models/sam-2.md
+++ b/docs/en/models/sam-2.md
@@ -142,11 +142,20 @@ SAM 2 can be utilized across a broad spectrum of tasks, including real-time vide
# Display model information (optional)
model.info()
- # Segment with bounding box prompt
+ # Run inference with bboxes prompt
results = model("path/to/image.jpg", bboxes=[100, 100, 200, 200])
- # Segment with point prompt
- results = model("path/to/image.jpg", points=[150, 150], labels=[1])
+ # Run inference with single point
+ results = model(points=[900, 370], labels=[1])
+
+ # Run inference with multiple points
+ results = model(points=[[400, 370], [900, 370]], labels=[1, 1])
+
+ # Run inference with multiple points prompt per object
+ results = model(points=[[[400, 370], [900, 370]]], labels=[[1, 1]])
+
+ # Run inference with negative points prompt
+ results = model(points=[[[400, 370], [900, 370]]], labels=[[1, 0]])
```
#### Segment Everything
diff --git a/docs/en/models/sam.md b/docs/en/models/sam.md
index 1a5c0db4a7..f9acad72df 100644
--- a/docs/en/models/sam.md
+++ b/docs/en/models/sam.md
@@ -59,16 +59,16 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
results = model("ultralytics/assets/zidane.jpg", bboxes=[439, 437, 524, 709])
# Run inference with single point
- results = predictor(points=[900, 370], labels=[1])
+ results = model(points=[900, 370], labels=[1])
# Run inference with multiple points
- results = predictor(points=[[400, 370], [900, 370]], labels=[1, 1])
+ results = model(points=[[400, 370], [900, 370]], labels=[1, 1])
# Run inference with multiple points prompt per object
- results = predictor(points=[[[400, 370], [900, 370]]], labels=[[1, 1]])
+ results = model(points=[[[400, 370], [900, 370]]], labels=[[1, 1]])
# Run inference with negative points prompt
- results = predictor(points=[[[400, 370], [900, 370]]], labels=[[1, 0]])
+ results = model(points=[[[400, 370], [900, 370]]], labels=[[1, 0]])
```
!!! example "Segment everything"
diff --git a/examples/YOLOv8-OpenCV-int8-tflite-Python/main.py b/examples/YOLOv8-OpenCV-int8-tflite-Python/main.py
index 70bccfa186..46d7fb4272 100644
--- a/examples/YOLOv8-OpenCV-int8-tflite-Python/main.py
+++ b/examples/YOLOv8-OpenCV-int8-tflite-Python/main.py
@@ -188,38 +188,48 @@ class Yolov8TFLite:
Returns:
numpy.ndarray: The input image with detections drawn on it.
"""
+ # Transpose predictions outside the loop
+ output = [np.transpose(pred) for pred in output]
+
boxes = []
scores = []
class_ids = []
+
+ # Vectorize extraction of bounding boxes, scores, and class IDs
for pred in output:
- pred = np.transpose(pred)
- for box in pred:
- x, y, w, h = box[:4]
- x1 = x - w / 2
- y1 = y - h / 2
- boxes.append([x1, y1, w, h])
- idx = np.argmax(box[4:])
- scores.append(box[idx + 4])
- class_ids.append(idx)
+ x, y, w, h = pred[:, 0], pred[:, 1], pred[:, 2], pred[:, 3]
+ x1 = x - w / 2
+ y1 = y - h / 2
+ boxes.extend(np.column_stack([x1, y1, w, h]))
+
+ # Argmax and score extraction for all predictions at once
+ idx = np.argmax(pred[:, 4:], axis=1)
+ scores.extend(pred[np.arange(pred.shape[0]), idx + 4])
+ class_ids.extend(idx)
+
+ # Precompute gain and pad once
+ img_height, img_width = input_image.shape[:2]
+ gain = min(img_width / self.img_width, img_height / self.img_height)
+ pad = (
+ round((img_width - self.img_width * gain) / 2 - 0.1),
+ round((img_height - self.img_height * gain) / 2 - 0.1),
+ )
+ # Non-Maximum Suppression (NMS) in one go
indices = cv2.dnn.NMSBoxes(boxes, scores, self.confidence_thres, self.iou_thres)
- for i in indices:
- # Get the box, score, and class ID corresponding to the index
+ # Process selected indices
+ for i in indices.flatten():
box = boxes[i]
- gain = min(img_width / self.img_width, img_height / self.img_height)
- pad = (
- round((img_width - self.img_width * gain) / 2 - 0.1),
- round((img_height - self.img_height * gain) / 2 - 0.1),
- )
box[0] = (box[0] - pad[0]) / gain
box[1] = (box[1] - pad[1]) / gain
box[2] = box[2] / gain
box[3] = box[3] / gain
+
score = scores[i]
class_id = class_ids[i]
+
if score > 0.25:
- print(box, score, class_id)
# Draw the detection on the input image
self.draw_detections(input_image, box, score, class_id)
diff --git a/examples/tutorial.ipynb b/examples/tutorial.ipynb
index 0992abf460..c52b03c990 100644
--- a/examples/tutorial.ipynb
+++ b/examples/tutorial.ipynb
@@ -582,7 +582,7 @@
"from ultralytics import YOLO\n",
"\n",
"model = YOLO('yolo11n-obb.pt') # load a pretrained YOLO OBB model\n",
- "model.train(data='coco8-dota.yaml', epochs=3) # train the model\n",
+ "model.train(data='dota8.yaml', epochs=3) # train the model\n",
"model('https://ultralytics.com/images/bus.jpg') # predict on an image"
],
"metadata": {
diff --git a/mkdocs.yml b/mkdocs.yml
index ee1a83766c..771084066f 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -162,7 +162,7 @@ nav:
- solutions/index.md
- Guides:
- guides/index.md
- - Live Inference ๐ NEW: guides/streamlit-live-inference.md # for promotion of new pages
+ - YOLO11 ๐ NEW: models/yolo11.md # for promotion of new pages
- Languages:
- ๐ฌ๐ง  English: https://ultralytics.com/docs/
- ๐จ๐ณ  ็ฎไฝไธญๆ: https://docs.ultralytics.com/zh/
diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py
index 5360c25e18..06ee07e308 100644
--- a/ultralytics/__init__.py
+++ b/ultralytics/__init__.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO ๐, AGPL-3.0 license
-__version__ = "8.3.12"
+__version__ = "8.3.13"
import os
diff --git a/ultralytics/cfg/__init__.py b/ultralytics/cfg/__init__.py
index 2eb7ff1c01..c8d8f44f02 100644
--- a/ultralytics/cfg/__init__.py
+++ b/ultralytics/cfg/__init__.py
@@ -639,7 +639,7 @@ def smart_value(v):
else:
try:
return eval(v)
- except: # noqa E722
+ except Exception:
return v
diff --git a/ultralytics/data/utils.py b/ultralytics/data/utils.py
index 3748ac2db5..6307c4e46b 100644
--- a/ultralytics/data/utils.py
+++ b/ultralytics/data/utils.py
@@ -65,7 +65,7 @@ def exif_size(img: Image.Image):
rotation = exif.get(274, None) # the EXIF key for the orientation tag is 274
if rotation in {6, 8}: # rotation 270 or 90
s = s[1], s[0]
- except: # noqa E722
+ except Exception:
pass
return s
diff --git a/ultralytics/engine/exporter.py b/ultralytics/engine/exporter.py
index b2c0329168..dab9c69e60 100644
--- a/ultralytics/engine/exporter.py
+++ b/ultralytics/engine/exporter.py
@@ -965,7 +965,7 @@ class Exporter:
f'--out_dir "{Path(f).parent}" '
"--show_operations "
"--search_delegate "
- "--delegate_search_step 3 "
+ "--delegate_search_step 30 "
"--timeout_sec 180 "
f'"{tflite_model}"'
)
diff --git a/ultralytics/models/sam/predict.py b/ultralytics/models/sam/predict.py
index 978f7cfd68..4002e092b6 100644
--- a/ultralytics/models/sam/predict.py
+++ b/ultralytics/models/sam/predict.py
@@ -235,7 +235,42 @@ class Predictor(BasePredictor):
"""
features = self.get_im_features(im) if self.features is None else self.features
- src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]
+ bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
+ points = (points, labels) if points is not None else None
+ # Embed prompts
+ sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
+
+ # Predict masks
+ pred_masks, pred_scores = self.model.mask_decoder(
+ image_embeddings=features,
+ image_pe=self.model.prompt_encoder.get_dense_pe(),
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ )
+
+ # (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
+ # `d` could be 1 or 3 depends on `multimask_output`.
+ return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
+
+ def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
+ """
+ Prepares and transforms the input prompts for processing based on the destination shape.
+
+ Args:
+ dst_shape (tuple): The target shape (height, width) for the prompts.
+ bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
+ points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
+ labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
+ masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
+
+ Raises:
+ AssertionError: If the number of points don't match the number of labels, in case labels were passed.
+
+ Returns:
+ (tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
+ """
+ src_shape = self.batch[1][0].shape[:2]
r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
# Transform input prompts
if points is not None:
@@ -258,23 +293,7 @@ class Predictor(BasePredictor):
bboxes *= r
if masks is not None:
masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
-
- points = (points, labels) if points is not None else None
- # Embed prompts
- sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
-
- # Predict masks
- pred_masks, pred_scores = self.model.mask_decoder(
- image_embeddings=features,
- image_pe=self.model.prompt_encoder.get_dense_pe(),
- sparse_prompt_embeddings=sparse_embeddings,
- dense_prompt_embeddings=dense_embeddings,
- multimask_output=multimask_output,
- )
-
- # (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
- # `d` could be 1 or 3 depends on `multimask_output`.
- return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
+ return bboxes, points, labels, masks
def generate(
self,
@@ -693,34 +712,7 @@ class SAM2Predictor(Predictor):
"""
features = self.get_im_features(im) if self.features is None else self.features
- src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]
- r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
- # Transform input prompts
- if points is not None:
- points = torch.as_tensor(points, dtype=torch.float32, device=self.device)
- points = points[None] if points.ndim == 1 else points
- # Assuming labels are all positive if users don't pass labels.
- if labels is None:
- labels = torch.ones(points.shape[0])
- labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
- points *= r
- # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
- points, labels = points[:, None], labels[:, None]
- if bboxes is not None:
- bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
- bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
- bboxes = bboxes.view(-1, 2, 2) * r
- bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
- # NOTE: merge "boxes" and "points" into a single "points" input
- # (where boxes are added at the beginning) to model.sam_prompt_encoder
- if points is not None:
- points = torch.cat([bboxes, points], dim=1)
- labels = torch.cat([bbox_labels, labels], dim=1)
- else:
- points, labels = bboxes, bbox_labels
- if masks is not None:
- masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
-
+ bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
points = (points, labels) if points is not None else None
sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
@@ -744,6 +736,36 @@ class SAM2Predictor(Predictor):
# `d` could be 1 or 3 depends on `multimask_output`.
return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
+ def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
+ """
+ Prepares and transforms the input prompts for processing based on the destination shape.
+
+ Args:
+ dst_shape (tuple): The target shape (height, width) for the prompts.
+ bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
+ points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
+ labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
+ masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
+
+ Raises:
+ AssertionError: If the number of points don't match the number of labels, in case labels were passed.
+
+ Returns:
+ (tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
+ """
+ bboxes, points, labels, masks = super()._prepare_prompts(dst_shape, bboxes, points, labels, masks)
+ if bboxes is not None:
+ bboxes = bboxes.view(-1, 2, 2)
+ bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
+ # NOTE: merge "boxes" and "points" into a single "points" input
+ # (where boxes are added at the beginning) to model.sam_prompt_encoder
+ if points is not None:
+ points = torch.cat([bboxes, points], dim=1)
+ labels = torch.cat([bbox_labels, labels], dim=1)
+ else:
+ points, labels = bboxes, bbox_labels
+ return bboxes, points, labels, masks
+
def set_image(self, image):
"""
Preprocesses and sets a single image for inference using the SAM2 model.
diff --git a/ultralytics/models/yolo/classify/train.py b/ultralytics/models/yolo/classify/train.py
index e51349fa98..9ff353858b 100644
--- a/ultralytics/models/yolo/classify/train.py
+++ b/ultralytics/models/yolo/classify/train.py
@@ -8,7 +8,7 @@ from ultralytics.data import ClassificationDataset, build_dataloader
from ultralytics.engine.trainer import BaseTrainer
from ultralytics.models import yolo
from ultralytics.nn.tasks import ClassificationModel
-from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK, colorstr
+from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
from ultralytics.utils.plotting import plot_images, plot_results
from ultralytics.utils.torch_utils import is_parallel, strip_optimizer, torch_distributed_zero_first
@@ -141,7 +141,6 @@ class ClassificationTrainer(BaseTrainer):
self.metrics = self.validator(model=f)
self.metrics.pop("fitness", None)
self.run_callbacks("on_fit_epoch_end")
- LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
def plot_training_samples(self, batch, ni):
"""Plots training samples with their annotations."""
diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py
index 78949cb631..12977f0184 100644
--- a/ultralytics/nn/autobackend.py
+++ b/ultralytics/nn/autobackend.py
@@ -46,7 +46,7 @@ def default_class_names(data=None):
if data:
try:
return yaml_load(check_yaml(data))["names"]
- except: # noqa E722
+ except Exception:
pass
return {i: f"class{i}" for i in range(999)} # return default if above errors
diff --git a/ultralytics/nn/tasks.py b/ultralytics/nn/tasks.py
index 407021c82a..12de1cfbf6 100644
--- a/ultralytics/nn/tasks.py
+++ b/ultralytics/nn/tasks.py
@@ -963,7 +963,6 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
args[j] = locals()[a] if a in locals() else ast.literal_eval(a)
except ValueError:
pass
-
n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain
if m in {
Classify,
@@ -1102,7 +1101,7 @@ def guess_model_scale(model_path):
(str): The size character of the model's scale, which can be n, s, m, l, or x.
"""
try:
- return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # n, s, m, l, or x
+ return re.search(r"yolo[v]?\d+([nslmx])", Path(model_path).stem).group(1) # noqa, returns n, s, m, l, or x
except AttributeError:
return ""
@@ -1139,7 +1138,7 @@ def guess_model_task(model):
if isinstance(model, dict):
try:
return cfg2task(model)
- except: # noqa E722
+ except Exception:
pass
# Guess from PyTorch model
@@ -1147,12 +1146,12 @@ def guess_model_task(model):
for x in "model.args", "model.model.args", "model.model.model.args":
try:
return eval(x)["task"]
- except: # noqa E722
+ except Exception:
pass
for x in "model.yaml", "model.model.yaml", "model.model.model.yaml":
try:
return cfg2task(eval(x))
- except: # noqa E722
+ except Exception:
pass
for m in model.modules():
diff --git a/ultralytics/solutions/analytics.py b/ultralytics/solutions/analytics.py
index ade3431bf1..38489827af 100644
--- a/ultralytics/solutions/analytics.py
+++ b/ultralytics/solutions/analytics.py
@@ -61,11 +61,11 @@ class Analytics(BaseSolution):
self.extract_tracks(im0) # Extract tracks
if self.type == "line":
- for box in self.boxes:
+ for _ in self.boxes:
self.total_counts += 1
im0 = self.update_graph(frame_number=frame_number)
self.total_counts = 0
- elif self.type == "pie" or self.type == "bar" or self.type == "area":
+ elif self.type in {"pie", "bar", "area"}:
self.clswise_count = {}
for box, cls in zip(self.boxes, self.clss):
if self.names[int(cls)] in self.clswise_count:
diff --git a/ultralytics/solutions/distance_calculation.py b/ultralytics/solutions/distance_calculation.py
index dccd1687c6..773b6086da 100644
--- a/ultralytics/solutions/distance_calculation.py
+++ b/ultralytics/solutions/distance_calculation.py
@@ -4,55 +4,21 @@ import math
import cv2
-from ultralytics.utils.checks import check_imshow
+from ultralytics.solutions.solutions import BaseSolution # Import a parent class
from ultralytics.utils.plotting import Annotator, colors
-class DistanceCalculation:
+class DistanceCalculation(BaseSolution):
"""A class to calculate distance between two objects in a real-time video stream based on their tracks."""
- def __init__(
- self,
- names,
- view_img=False,
- line_thickness=2,
- line_color=(255, 0, 255),
- centroid_color=(104, 31, 17),
- ):
- """
- Initializes the DistanceCalculation class with the given parameters.
-
- Args:
- names (dict): Dictionary of classes names.
- view_img (bool, optional): Flag to indicate if the video stream should be displayed. Defaults to False.
- line_thickness (int, optional): Thickness of the lines drawn on the image. Defaults to 2.
- line_color (tuple, optional): Color of the lines drawn on the image (BGR format). Defaults to (255, 255, 0).
- centroid_color (tuple, optional): Color of the centroids drawn (BGR format). Defaults to (255, 0, 255).
- """
- # Visual & image information
- self.im0 = None
- self.annotator = None
- self.view_img = view_img
- self.line_color = line_color
- self.centroid_color = centroid_color
-
- # Prediction & tracking information
- self.names = names
- self.boxes = None
- self.line_thickness = line_thickness
- self.trk_ids = None
-
- # Distance calculation information
- self.centroids = []
+ def __init__(self, **kwargs):
+ """Initializes the DistanceCalculation class with the given parameters."""
+ super().__init__(**kwargs)
# Mouse event information
self.left_mouse_count = 0
self.selected_boxes = {}
- # Check if environment supports imshow
- self.env_check = check_imshow(warn=True)
- self.window_name = "Ultralytics Solutions"
-
def mouse_event_for_distance(self, event, x, y, flags, param):
"""
Handles mouse events to select regions in a real-time video stream.
@@ -67,7 +33,7 @@ class DistanceCalculation:
if event == cv2.EVENT_LBUTTONDOWN:
self.left_mouse_count += 1
if self.left_mouse_count <= 2:
- for box, track_id in zip(self.boxes, self.trk_ids):
+ for box, track_id in zip(self.boxes, self.track_ids):
if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
self.selected_boxes[track_id] = box
@@ -75,30 +41,21 @@ class DistanceCalculation:
self.selected_boxes = {}
self.left_mouse_count = 0
- def start_process(self, im0, tracks):
+ def calculate(self, im0):
"""
Processes the video frame and calculates the distance between two bounding boxes.
Args:
im0 (ndarray): The image frame.
- tracks (list): List of tracks obtained from the object tracking process.
Returns:
(ndarray): The processed image frame.
"""
- self.im0 = im0
- if tracks[0].boxes.id is None:
- if self.view_img:
- self.display_frames()
- return im0
+ self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
+ self.extract_tracks(im0) # Extract tracks
- self.boxes = tracks[0].boxes.xyxy.cpu()
- clss = tracks[0].boxes.cls.cpu().tolist()
- self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
-
- self.annotator = Annotator(self.im0, line_width=self.line_thickness)
-
- for box, cls, track_id in zip(self.boxes, clss, self.trk_ids):
+ # Iterate over bounding boxes, track ids and classes index
+ for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
if len(self.selected_boxes) == 2:
@@ -115,25 +72,11 @@ class DistanceCalculation:
pixels_distance = math.sqrt(
(self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
)
- self.annotator.plot_distance_and_line(pixels_distance, self.centroids, self.line_color, self.centroid_color)
+ self.annotator.plot_distance_and_line(pixels_distance, self.centroids)
self.centroids = []
- if self.view_img and self.env_check:
- self.display_frames()
-
- return im0
-
- def display_frames(self):
- """Displays the current frame with annotations."""
- cv2.namedWindow(self.window_name)
- cv2.setMouseCallback(self.window_name, self.mouse_event_for_distance)
- cv2.imshow(self.window_name, self.im0)
-
- if cv2.waitKey(1) & 0xFF == ord("q"):
- return
-
+ self.display_output(im0) # display output with base class function
+ cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
-if __name__ == "__main__":
- names = {0: "person", 1: "car"} # example class names
- distance_calculation = DistanceCalculation(names)
+ return im0 # return output image for more usage
diff --git a/ultralytics/solutions/heatmap.py b/ultralytics/solutions/heatmap.py
index 30d1817d76..d7dcf71cff 100644
--- a/ultralytics/solutions/heatmap.py
+++ b/ultralytics/solutions/heatmap.py
@@ -52,7 +52,8 @@ class Heatmap(ObjectCounter):
Returns:
im0 (ndarray): Processed image for further usage
"""
- self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99 if not self.initialized else self.heatmap
+ if not self.initialized:
+ self.heatmap = np.zeros_like(im0, dtype=np.float32) * 0.99
self.initialized = True # Initialize heatmap only once
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
diff --git a/ultralytics/solutions/object_counter.py b/ultralytics/solutions/object_counter.py
index 7d9bb8c9f4..d576746421 100644
--- a/ultralytics/solutions/object_counter.py
+++ b/ultralytics/solutions/object_counter.py
@@ -112,13 +112,13 @@ class ObjectCounter(BaseSolution):
# Iterate over bounding boxes, track ids and classes index
for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
# Draw bounding box and counting region
- self.annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
+ self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
self.store_tracking_history(track_id, box) # Store track history
self.store_classwise_counts(cls) # store classwise counts in dict
# Draw tracks of objects
self.annotator.draw_centroid_and_tracks(
- self.track_line, color=colors(int(track_id), True), track_thickness=self.line_width
+ self.track_line, color=colors(int(cls), True), track_thickness=self.line_width
)
# store previous position of track for object counting
diff --git a/ultralytics/utils/__init__.py b/ultralytics/utils/__init__.py
index 0ae25a8980..6e19188ca8 100644
--- a/ultralytics/utils/__init__.py
+++ b/ultralytics/utils/__init__.py
@@ -526,7 +526,7 @@ def read_device_model() -> str:
try:
with open("/proc/device-tree/model") as f:
return f.read()
- except: # noqa E722
+ except Exception:
return ""
@@ -584,7 +584,7 @@ def is_docker() -> bool:
try:
with open("/proc/self/cgroup") as f:
return "docker" in f.read()
- except: # noqa E722
+ except Exception:
return False
@@ -623,7 +623,7 @@ def is_online() -> bool:
for dns in ("1.1.1.1", "8.8.8.8"): # check Cloudflare and Google DNS
socket.create_connection(address=(dns, 80), timeout=2.0).close()
return True
- except: # noqa E722
+ except Exception:
return False
diff --git a/ultralytics/utils/callbacks/tensorboard.py b/ultralytics/utils/callbacks/tensorboard.py
index f0ff02fa89..5f4e0f0260 100644
--- a/ultralytics/utils/callbacks/tensorboard.py
+++ b/ultralytics/utils/callbacks/tensorboard.py
@@ -50,7 +50,7 @@ def _log_tensorboard_graph(trainer):
LOGGER.info(f"{PREFIX}model graph visualization added โ
")
return
- except: # noqa E722
+ except Exception:
# Fallback to TorchScript export steps (RTDETR)
try:
model = deepcopy(de_parallel(trainer.model))
diff --git a/ultralytics/utils/checks.py b/ultralytics/utils/checks.py
index 76455e2329..c483e31366 100644
--- a/ultralytics/utils/checks.py
+++ b/ultralytics/utils/checks.py
@@ -277,7 +277,7 @@ def check_latest_pypi_version(package_name="ultralytics"):
response = requests.get(f"https://pypi.org/pypi/{package_name}/json", timeout=3)
if response.status_code == 200:
return response.json()["info"]["version"]
- except: # noqa E722
+ except Exception:
return None
@@ -299,7 +299,7 @@ def check_pip_update_available():
f"Update with 'pip install -U ultralytics'"
)
return True
- except: # noqa E722
+ except Exception:
pass
return False
@@ -715,7 +715,7 @@ def git_describe(path=ROOT): # path must be a directory
"""Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe."""
try:
return subprocess.check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1]
- except: # noqa E722
+ except Exception:
return ""
diff --git a/ultralytics/utils/downloads.py b/ultralytics/utils/downloads.py
index f356f47bb1..be182f40b2 100644
--- a/ultralytics/utils/downloads.py
+++ b/ultralytics/utils/downloads.py
@@ -60,7 +60,7 @@ def is_url(url, check=False):
with request.urlopen(url) as response:
return response.getcode() == 200 # check if exists online
return True
- except: # noqa E722
+ except Exception:
return False
diff --git a/ultralytics/utils/metrics.py b/ultralytics/utils/metrics.py
index fc9862dd36..2b80c02fe1 100644
--- a/ultralytics/utils/metrics.py
+++ b/ultralytics/utils/metrics.py
@@ -598,7 +598,7 @@ def ap_per_class(
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
- if plot and j == 0:
+ if j == 0:
prec_values.append(np.interp(x, mrec, mpre)) # precision at mAP@0.5
prec_values = np.array(prec_values) # (nc, 1000)
diff --git a/ultralytics/utils/plotting.py b/ultralytics/utils/plotting.py
index b622bcc8cb..6e257634d7 100644
--- a/ultralytics/utils/plotting.py
+++ b/ultralytics/utils/plotting.py
@@ -804,31 +804,30 @@ class Annotator:
self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
)
- def plot_distance_and_line(self, pixels_distance, centroids, line_color, centroid_color):
+ def plot_distance_and_line(
+ self, pixels_distance, centroids, line_color=(104, 31, 17), centroid_color=(255, 0, 255)
+ ):
"""
Plot the distance and line on frame.
Args:
pixels_distance (float): Pixels distance between two bbox centroids.
centroids (list): Bounding box centroids data.
- line_color (tuple): RGB distance line color.
- centroid_color (tuple): RGB bounding box centroid color.
+ line_color (tuple, optional): Distance line color.
+ centroid_color (tuple, optional): Bounding box centroid color.
"""
# Get the text size
- (text_width_m, text_height_m), _ = cv2.getTextSize(
- f"Pixels Distance: {pixels_distance:.2f}", 0, self.sf, self.tf
- )
+ text = f"Pixels Distance: {pixels_distance:.2f}"
+ (text_width_m, text_height_m), _ = cv2.getTextSize(text, 0, self.sf, self.tf)
# Define corners with 10-pixel margin and draw rectangle
- top_left = (15, 25)
- bottom_right = (15 + text_width_m + 20, 25 + text_height_m + 20)
- cv2.rectangle(self.im, top_left, bottom_right, centroid_color, -1)
+ cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 20, 25 + text_height_m + 20), line_color, -1)
# Calculate the position for the text with a 10-pixel margin and draw text
- text_position = (top_left[0] + 10, top_left[1] + text_height_m + 10)
+ text_position = (25, 25 + text_height_m + 10)
cv2.putText(
self.im,
- f"Pixels Distance: {pixels_distance:.2f}",
+ text,
text_position,
0,
self.sf,
@@ -1118,7 +1117,7 @@ def plot_images(
im[y : y + h, x : x + w, :][mask] = (
im[y : y + h, x : x + w, :][mask] * 0.4 + np.array(color) * 0.6
)
- except: # noqa E722
+ except Exception:
pass
annotator.fromarray(im)
if not save:
@@ -1156,16 +1155,16 @@ def plot_results(file="path/to/results.csv", dir="", segment=False, pose=False,
save_dir = Path(file).parent if file else Path(dir)
if classify:
fig, ax = plt.subplots(2, 2, figsize=(6, 6), tight_layout=True)
- index = [1, 4, 2, 3]
+ index = [2, 5, 3, 4]
elif segment:
fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)
- index = [1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]
+ index = [2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 8, 9, 12, 13]
elif pose:
fig, ax = plt.subplots(2, 9, figsize=(21, 6), tight_layout=True)
- index = [1, 2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 18, 8, 9, 12, 13]
+ index = [2, 3, 4, 5, 6, 7, 8, 11, 12, 15, 16, 17, 18, 19, 9, 10, 13, 14]
else:
fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
- index = [1, 2, 3, 4, 5, 8, 9, 10, 6, 7]
+ index = [2, 3, 4, 5, 6, 9, 10, 11, 7, 8]
ax = ax.ravel()
files = list(save_dir.glob("results*.csv"))
assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py
index 52e812757a..0143b933d8 100644
--- a/ultralytics/utils/torch_utils.py
+++ b/ultralytics/utils/torch_utils.py
@@ -119,7 +119,7 @@ def get_cpu_info():
info = cpuinfo.get_cpu_info() # info dict
string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], "unknown")
PERSISTENT_CACHE["cpu_info"] = string.replace("(R)", "").replace("CPU ", "").replace("@ ", "")
- except: # noqa E722
+ except Exception:
pass
return PERSISTENT_CACHE.get("cpu_info", "unknown")