From 15060e13cf71eb12467598dab49584e1e26f3db5 Mon Sep 17 00:00:00 2001 From: Muhammad Rizwan Munawar Date: Wed, 1 May 2024 18:55:12 +0500 Subject: [PATCH] Add FastSAM and YOLO-World tracking docs (#10733) Co-authored-by: Glenn Jocher --- docs/en/guides/parking-management.md | 12 +++++++++--- docs/en/integrations/google-colab.md | 6 +++--- docs/en/models/fast-sam.md | 24 ++++++++++++++++++++++++ docs/en/models/yolo-world.md | 25 +++++++++++++++++++++++++ 4 files changed, 61 insertions(+), 6 deletions(-) diff --git a/docs/en/guides/parking-management.md b/docs/en/guides/parking-management.md index 43ae97fda0..e68e0194e5 100644 --- a/docs/en/guides/parking-management.md +++ b/docs/en/guides/parking-management.md @@ -64,11 +64,16 @@ root.mainloop() # Path to json file, that created with above point selection app polygon_json_path = "bounding_boxes.json" - # Video Capture + # Video capture cap = cv2.VideoCapture("Path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" - w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) - video_writer = cv2.VideoWriter("parking management.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, + cv2.CAP_PROP_FRAME_HEIGHT, + cv2.CAP_PROP_FPS)) + + # Video writer + video_writer = cv2.VideoWriter("parking management.avi", + cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) # Initialize parking management object management = ParkingManagement(model_path="yolov8n.pt") @@ -77,6 +82,7 @@ root.mainloop() ret, im0 = cap.read() if not ret: break + json_data = management.parking_regions_extraction(polygon_json_path) results = management.model.track(im0, persist=True, show=False) diff --git a/docs/en/integrations/google-colab.md b/docs/en/integrations/google-colab.md index 6df3fe894a..05707936da 100644 --- a/docs/en/integrations/google-colab.md +++ b/docs/en/integrations/google-colab.md @@ -18,7 +18,7 @@ You can use Google Colab regardless of the specifications and configurations of ## Training YOLOv8 Using Google Colaboratory -Training YOLOv8 models on Google Colab is pretty straightforward. Thanks to the integration, you can access the [Google Colab YOLOv8 Notebook]](https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb) and start training your model immediately. For a detailed understanding of the model training process and best practices, refer to our [YOLOv8 Model Training guide](../modes/train.md). +Training YOLOv8 models on Google Colab is pretty straightforward. Thanks to the integration, you can access the [Google Colab YOLOv8 Notebook](https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb) and start training your model immediately. For a detailed understanding of the model training process and best practices, refer to our [YOLOv8 Model Training guide](../modes/train.md). Sign in to your Google account and run the notebook's cells to train your model. @@ -69,7 +69,7 @@ Now, let's look at some of the standout features that make Google Colab a go-to ![Runtime Settings](https://github.com/ultralytics/ultralytics/assets/25847604/89a71a81-2784-4182-ad53-f024807d1dcc) -- **Colaboration:** Google Colab makes collaborating and working with other developers easy. You can easily share your notebooks with others and perform edits in real-time. +- **Collaboration:** Google Colab makes collaborating and working with other developers easy. You can easily share your notebooks with others and perform edits in real-time. - **Custom Environment:** Users can install dependencies, configure the system, and use shell commands directly in the notebook. @@ -103,7 +103,7 @@ If you’d like to dive deeper into Google Colab, here are a few resources to gu ## Summary -We’ve dicsussed how you can easily experiment with Ultralytics YOLOv8 models on Google Colab. You can use Google Colab to train and evaluate your models on GPUs and TPUs with a few clicks. +We’ve discussed how you can easily experiment with Ultralytics YOLOv8 models on Google Colab. You can use Google Colab to train and evaluate your models on GPUs and TPUs with a few clicks. For more details, visit [Google Colab’s FAQ page](https://research.google.com/colaboratory/intl/en-GB/faq.html). diff --git a/docs/en/models/fast-sam.md b/docs/en/models/fast-sam.md index 1428ad9c30..4e5d77a10a 100644 --- a/docs/en/models/fast-sam.md +++ b/docs/en/models/fast-sam.md @@ -119,6 +119,30 @@ Validation of the model on a dataset can be done as follows: Please note that FastSAM only supports detection and segmentation of a single class of object. This means it will recognize and segment all objects as the same class. Therefore, when preparing the dataset, you need to convert all object category IDs to 0. +### Track Usage + +To perform object tracking on an image, use the `track` method as shown below: + +!!! Example + + === "Python" + + ```python + from ultralytics import FastSAM + + # Create a FastSAM model + model = FastSAM('FastSAM-s.pt') # or FastSAM-x.pt + + # Track with a FastSAM model on a video + results = model.track(source="path/to/video.mp4", imgsz=640) + ``` + + === "CLI" + + ```bash + yolo segment track model=FastSAM-s.pt source="path/to/video/file.mp4" imgsz=640 + ``` + ## FastSAM official Usage FastSAM is also available directly from the [https://github.com/CASIA-IVA-Lab/FastSAM](https://github.com/CASIA-IVA-Lab/FastSAM) repository. Here is a brief overview of the typical steps you might take to use FastSAM: diff --git a/docs/en/models/yolo-world.md b/docs/en/models/yolo-world.md index 33fc277d59..93e78ceec4 100644 --- a/docs/en/models/yolo-world.md +++ b/docs/en/models/yolo-world.md @@ -152,6 +152,31 @@ Model validation on a dataset is streamlined as follows: yolo val model=yolov8s-world.pt data=coco8.yaml imgsz=640 ``` +### Track Usage + +Object tracking with YOLO-World model on a video/images is streamlined as follows: + +!!! Example + + === "Python" + + ```python + from ultralytics import YOLO + + # Create a YOLO-World model + model = YOLO('yolov8s-world.pt') # or select yolov8m/l-world.pt for different sizes + + # Track with a YOLO-World model on a video + results = model.track(source="path/to/video.mp4") + ``` + + === "CLI" + + ```bash + # Track with a YOLO-World model on the video with a specified image size + yolo track model=yolov8s-world.pt imgsz=640 source="path/to/video/file.mp4" + ``` + !!! Note The YOLO-World models provided by Ultralytics come pre-configured with [COCO dataset](../datasets/detect/coco.md) categories as part of their offline vocabulary, enhancing efficiency for immediate application. This integration allows the YOLOv8-World models to directly recognize and predict the 80 standard categories defined in the COCO dataset without requiring additional setup or customization.