diff --git a/docs/en/guides/sahi-tiled-inference.md b/docs/en/guides/sahi-tiled-inference.md index ec48d95b7..f6c342e01 100644 --- a/docs/en/guides/sahi-tiled-inference.md +++ b/docs/en/guides/sahi-tiled-inference.md @@ -72,11 +72,11 @@ Here's how to import the necessary modules and download a YOLO11 model and some ```python from sahi.utils.file import download_from_url -from sahi.utils.yolov8 import download_yolov8s_model +from sahi.utils.ultralytics import download_yolo11n_model # Download YOLO11 model -model_path = "models/yolo11s.pt" -download_yolov8s_model(model_path) +model_path = "models/yolo11n.pt" +download_yolo11n_model(model_path) # Download test images download_from_url( @@ -99,8 +99,8 @@ You can instantiate a YOLO11 model for object detection like this: from sahi import AutoDetectionModel detection_model = AutoDetectionModel.from_pretrained( - model_type="yolov8", - model_path=yolov8_model_path, + model_type="ultralytics", + model_path=model_path, confidence_threshold=0.3, device="cpu", # or 'cuda:0' ) @@ -169,7 +169,7 @@ For batch prediction on a directory of images: from sahi.predict import predict predict( - model_type="yolov8", + model_type="ultralytics", model_path="path/to/yolo11n.pt", model_device="cpu", # or 'cuda:0' model_confidence_threshold=0.4, @@ -218,11 +218,11 @@ Then, download a YOLO11 model and test images: ```python from sahi.utils.file import download_from_url -from sahi.utils.yolov8 import download_yolov8s_model +from sahi.utils.ultralytics import download_yolo11n_model # Download YOLO11 model -model_path = "models/yolo11s.pt" -download_yolov8s_model(model_path) +model_path = "models/yolo11n.pt" +download_yolo11n_model(model_path) # Download test images download_from_url( @@ -280,7 +280,7 @@ Example for batch prediction: from sahi.predict import predict predict( - model_type="yolov8", + model_type="ultralytics", model_path="path/to/yolo11n.pt", model_device="cpu", # or 'cuda:0' model_confidence_threshold=0.4, diff --git a/examples/YOLOv8-Region-Counter/readme.md b/examples/YOLOv8-Region-Counter/readme.md index a0811359e..b0ab5b95a 100644 --- a/examples/YOLOv8-Region-Counter/readme.md +++ b/examples/YOLOv8-Region-Counter/readme.md @@ -1,7 +1,14 @@ # Regions Counting Using YOLOv8 (Inference on Video) -- Region counting is a method employed to tally the objects within a specified area, allowing for more sophisticated analyses when multiple regions are considered. These regions can be adjusted interactively using a Left Mouse Click, and the counting process occurs in real time. -- Regions can be adjusted to suit the user's preferences and requirements. +> **Region Counter** is now part of **[Ultralytics Solutions](https://docs.ultralytics.com/solutions/)**, offering improved features and regular updates. Enjoy improved features and regular updates! + +🔗 **[Explore Object Counting in Regions Here](https://docs.ultralytics.com/guides/region-counting/)** + +> 🔔 **Notice:** + +> The GitHub example will remain available but **will no longer be actively maintained**. For the latest updates and improvements, please use the official [link](https://docs.ultralytics.com/guides/region-counting/). Thank you! + +Region counting is a method employed to tally the objects within a specified area, allowing for more sophisticated analyses when multiple regions are considered. These regions can be adjusted interactively using a Left Mouse Click, and the counting process occurs in real time. Regions can be adjusted to suit the user's preferences and requirements.

diff --git a/examples/YOLOv8-SAHI-Inference-Video/readme.md b/examples/YOLOv8-SAHI-Inference-Video/readme.md index 525aca5ac..4dc169b3e 100644 --- a/examples/YOLOv8-SAHI-Inference-Video/readme.md +++ b/examples/YOLOv8-SAHI-Inference-Video/readme.md @@ -1,11 +1,11 @@ -# YOLOv8 with SAHI (Inference on Video) +# YOLO11 with SAHI (Inference on Video) -[SAHI](https://docs.ultralytics.com/guides/sahi-tiled-inference/) is designed to optimize object detection algorithms for large-scale and high-resolution imagery. It partitions images into manageable slices, performs object detection on each slice, and then stitches the results back together. This tutorial will guide you through the process of running YOLOv8 inference on video files with the aid of SAHI. +[SAHI](https://docs.ultralytics.com/guides/sahi-tiled-inference/) is designed to optimize object detection algorithms for large-scale and high-resolution imagery. It partitions images into manageable slices, performs object detection on each slice, and then stitches the results back together. This tutorial will guide you through the process of running YOLO11 inference on video files with the aid of SAHI. ## Table of Contents - [Step 1: Install the Required Libraries](#step-1-install-the-required-libraries) -- [Step 2: Run the Inference with SAHI using Ultralytics YOLOv8](#step-2-run-the-inference-with-sahi-using-ultralytics-yolov8) +- [Step 2: Run the Inference with SAHI using Ultralytics YOLO11](#step-2-run-the-inference-with-sahi-using-ultralytics-yolo11) - [Usage Options](#usage-options) - [FAQ](#faq) @@ -18,13 +18,13 @@ Clone the repository, install dependencies and `cd` to this local directory for git clone https://github.com/ultralytics/ultralytics # Install dependencies -pip install sahi ultralytics +pip install -U sahi ultralytics # cd to local directory cd ultralytics/examples/YOLOv8-SAHI-Inference-Video ``` -## Step 2: Run the Inference with SAHI using Ultralytics YOLOv8 +## Step 2: Run the Inference with SAHI using Ultralytics YOLO11 Here are the basic commands for running the inference: @@ -33,14 +33,14 @@ Here are the basic commands for running the inference: python yolov8_sahi.py --source "path/to/video.mp4" --save-img #if you want to change model file -python yolov8_sahi.py --source "path/to/video.mp4" --save-img --weights "yolov8n.pt" +python yolov8_sahi.py --source "path/to/video.mp4" --save-img --weights "yolo11n.pt" ``` ## Usage Options - `--source`: Specifies the path to the video file you want to run inference on. - `--save-img`: Flag to save the detection results as images. -- `--weights`: Specifies a different YOLOv8 model file (e.g., `yolov8n.pt`, `yolov8s.pt`, `yolov8m.pt`, `yolov8l.pt`, `yolov8x.pt`). +- `--weights`: Specifies a different YOLO11 model file (e.g., `yolo11n.pt`, `yolov8s.pt`, `yolo11m.pt`, `yolo11l.pt`, `yolo11x.pt`). ## FAQ @@ -48,9 +48,9 @@ python yolov8_sahi.py --source "path/to/video.mp4" --save-img --weights "yolov8n SAHI stands for Slicing Aided Hyper Inference. It is a library designed to optimize object detection algorithms for large-scale and high-resolution images. The library source code is available on [GitHub](https://github.com/obss/sahi). -**2. Why use SAHI with YOLOv8?** +**2. Why use SAHI with YOLO11?** -SAHI can handle large-scale images by slicing them into smaller, more manageable sizes without compromising the detection quality. This makes it a great companion to YOLOv8, especially when working with high-resolution videos. +SAHI can handle large-scale images by slicing them into smaller, more manageable sizes without compromising the detection quality. This makes it a great companion to YOLO11, especially when working with high-resolution videos. **3. How do I debug issues?** @@ -66,4 +66,4 @@ Yes, you can specify different YOLO model weights using the `--weights` option. **5. Where can I find more information?** -For a full guide to YOLOv8 with SAHI see [https://docs.ultralytics.com/guides/sahi-tiled-inference](https://docs.ultralytics.com/guides/sahi-tiled-inference/). +For a full guide to YOLO11 with SAHI see [https://docs.ultralytics.com/guides/sahi-tiled-inference](https://docs.ultralytics.com/guides/sahi-tiled-inference/). diff --git a/examples/YOLOv8-SAHI-Inference-Video/yolov8_sahi.py b/examples/YOLOv8-SAHI-Inference-Video/yolov8_sahi.py index 9e0ba13d9..40eaf2ef2 100644 --- a/examples/YOLOv8-SAHI-Inference-Video/yolov8_sahi.py +++ b/examples/YOLOv8-SAHI-Inference-Video/yolov8_sahi.py @@ -6,32 +6,32 @@ from pathlib import Path import cv2 from sahi import AutoDetectionModel from sahi.predict import get_sliced_prediction -from sahi.utils.yolov8 import download_yolov8s_model +from sahi.utils.ultralytics import download_yolo11n_model from ultralytics.utils.files import increment_path from ultralytics.utils.plotting import Annotator, colors class SAHIInference: - """Runs YOLOv8 and SAHI for object detection on video with options to view, save, and track results.""" + """Runs Ultralytics YOLO11 and SAHI for object detection on video with options to view, save, and track results.""" def __init__(self): - """Initializes the SAHIInference class for performing sliced inference using SAHI with YOLOv8 models.""" + """Initializes the SAHIInference class for performing sliced inference using SAHI with YOLO11 models.""" self.detection_model = None def load_model(self, weights): - """Loads a YOLOv8 model with specified weights for object detection using SAHI.""" - yolov8_model_path = f"models/{weights}" - download_yolov8s_model(yolov8_model_path) + """Loads a YOLO11 model with specified weights for object detection using SAHI.""" + yolo11_model_path = f"models/{weights}" + download_yolo11n_model(yolo11_model_path) self.detection_model = AutoDetectionModel.from_pretrained( - model_type="yolov8", model_path=yolov8_model_path, confidence_threshold=0.3, device="cpu" + model_type="ultralytics", model_path=yolo11_model_path, confidence_threshold=0.3, device="cpu" ) def inference( - self, weights="yolov8n.pt", source="test.mp4", view_img=False, save_img=False, exist_ok=False, track=False + self, weights="yolo11n.pt", source="test.mp4", view_img=False, save_img=False, exist_ok=False, track=False ): """ - Run object detection on a video using YOLOv8 and SAHI. + Run object detection on a video using YOLO11 and SAHI. Args: weights (str): Model weights path. @@ -93,7 +93,7 @@ class SAHIInference: def parse_opt(self): """Parse command line arguments.""" parser = argparse.ArgumentParser() - parser.add_argument("--weights", type=str, default="yolov8n.pt", help="initial weights path") + parser.add_argument("--weights", type=str, default="yolo11n.pt", help="initial weights path") parser.add_argument("--source", type=str, required=True, help="video file path") parser.add_argument("--view-img", action="store_true", help="show results") parser.add_argument("--save-img", action="store_true", help="save results")