Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
pull/14059/head^2
Glenn Jocher 5 months ago committed by GitHub
parent ff63a56a42
commit 691b5daccb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 32
      examples/YOLOv8-Action-Recognition/action_recognition.py
  2. 1
      examples/YOLOv8-OpenCV-int8-tflite-Python/main.py
  3. 22
      tests/conftest.py
  4. 16
      tests/test_cli.py
  5. 10
      tests/test_cuda.py
  6. 10
      tests/test_engine.py
  7. 8
      tests/test_explorer.py
  8. 36
      tests/test_exports.py
  9. 14
      tests/test_integrations.py
  10. 74
      tests/test_python.py
  11. 3
      ultralytics/data/explorer/explorer.py
  12. 2
      ultralytics/hub/session.py
  13. 1
      ultralytics/nn/modules/block.py
  14. 6
      ultralytics/solutions/ai_gym.py
  15. 1
      ultralytics/utils/__init__.py
  16. 1
      ultralytics/utils/metrics.py

@ -69,7 +69,7 @@ class TorchVisionVideoClassifier:
"""
return list(TorchVisionVideoClassifier.model_name_to_model_and_weights.keys())
def preprocess_crops_for_video_cls(self, crops: List[np.ndarray], input_size: list = [224, 224]) -> torch.Tensor:
def preprocess_crops_for_video_cls(self, crops: List[np.ndarray], input_size: list = None) -> torch.Tensor:
"""
Preprocess a list of crops for video classification.
@ -80,6 +80,8 @@ class TorchVisionVideoClassifier:
Returns:
torch.Tensor: Preprocessed crops as a tensor with dimensions (1, T, C, H, W).
"""
if input_size is None:
input_size = [224, 224]
from torchvision.transforms import v2
transform = v2.Compose(
@ -156,7 +158,7 @@ class HuggingFaceVideoClassifier:
model = model.half()
self.model = model.eval()
def preprocess_crops_for_video_cls(self, crops: List[np.ndarray], input_size: list = [224, 224]) -> torch.Tensor:
def preprocess_crops_for_video_cls(self, crops: List[np.ndarray], input_size: list = None) -> torch.Tensor:
"""
Preprocess a list of crops for video classification.
@ -167,6 +169,8 @@ class HuggingFaceVideoClassifier:
Returns:
torch.Tensor: Preprocessed crops as a tensor (1, T, C, H, W).
"""
if input_size is None:
input_size = [224, 224]
from torchvision.transforms import v2
transform = v2.Compose(
@ -266,15 +270,7 @@ def run(
video_cls_overlap_ratio: float = 0.25,
fp16: bool = False,
video_classifier_model: str = "microsoft/xclip-base-patch32",
labels: List[str] = [
"walking",
"running",
"brushing teeth",
"looking into phone",
"weight lifting",
"cooking",
"sitting",
],
labels: List[str] = None,
) -> None:
"""
Run action recognition on a video source using YOLO for object detection and a video classifier.
@ -295,6 +291,16 @@ def run(
Returns:
None</edit>
"""
if labels is None:
labels = [
"walking",
"running",
"brushing teeth",
"looking into phone",
"weight lifting",
"cooking",
"sitting",
]
# Initialize models and device
device = select_device(device)
yolo_model = YOLO(weights).to(device)
@ -312,9 +318,7 @@ def run(
# Initialize video capture
if source.startswith("http") and urlparse(source).hostname in {"www.youtube.com", "youtube.com", "youtu.be"}:
source = get_best_youtube_url(source)
elif source.endswith(".mp4"):
pass
else:
elif not source.endswith(".mp4"):
raise ValueError("Invalid source. Supported sources are YouTube URLs and MP4 files.")
cap = cv2.VideoCapture(source)

@ -18,6 +18,7 @@ class LetterBox:
def __init__(
self, new_shape=(img_width, img_height), auto=False, scaleFill=False, scaleup=True, center=True, stride=32
):
"""Initializes LetterBox with parameters for reshaping and transforming image while maintaining aspect ratio."""
self.new_shape = new_shape
self.auto = auto
self.scaleFill = scaleFill

@ -11,18 +11,24 @@ def pytest_addoption(parser):
Add custom command-line options to pytest.
Args:
parser (pytest.config.Parser): The pytest parser object.
parser (pytest.config.Parser): The pytest parser object for adding custom command-line options.
Returns:
(None)
"""
parser.addoption("--slow", action="store_true", default=False, help="Run slow tests")
def pytest_collection_modifyitems(config, items):
"""
Modify the list of test items to remove tests marked as slow if the --slow option is not provided.
Modify the list of test items to exclude tests marked as slow if the --slow option is not specified.
Args:
config (pytest.config.Config): The pytest config object.
items (list): List of test items to be executed.
config (pytest.config.Config): The pytest configuration object that provides access to command-line options.
items (list): The list of collected pytest item objects to be modified based on the presence of --slow option.
Returns:
(None) The function modifies the 'items' list in place, and does not return a value.
"""
if not config.getoption("--slow"):
# Remove the item entirely from the list of test items if it's marked as 'slow'
@ -38,6 +44,9 @@ def pytest_sessionstart(session):
Args:
session (pytest.Session): The pytest session object.
Returns:
(None)
"""
from ultralytics.utils.torch_utils import init_seeds
@ -54,9 +63,12 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
and directories used during testing.
Args:
terminalreporter (pytest.terminal.TerminalReporter): The terminal reporter object.
terminalreporter (pytest.terminal.TerminalReporter): The terminal reporter object used for terminal output.
exitstatus (int): The exit status of the test run.
config (pytest.config.Config): The pytest config object.
Returns:
(None)
"""
from ultralytics.utils import WEIGHTS_DIR

@ -20,7 +20,7 @@ def run(cmd):
def test_special_modes():
"""Test various special command modes of YOLO."""
"""Test various special command-line modes for YOLO functionality."""
run("yolo help")
run("yolo checks")
run("yolo version")
@ -30,30 +30,30 @@ def test_special_modes():
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
def test_train(task, model, data):
"""Test YOLO training for a given task, model, and data."""
"""Test YOLO training for different tasks, models, and datasets."""
run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 cache=disk")
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
def test_val(task, model, data):
"""Test YOLO validation for a given task, model, and data."""
"""Test YOLO validation process for specified task, model, and data using a shell command."""
run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json")
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
def test_predict(task, model, data):
"""Test YOLO prediction on sample assets for a given task and model."""
"""Test YOLO prediction on provided sample assets for specified task and model."""
run(f"yolo predict model={model} source={ASSETS} imgsz=32 save save_crop save_txt")
@pytest.mark.parametrize("model", MODELS)
def test_export(model):
"""Test exporting a YOLO model to different formats."""
"""Test exporting a YOLO model to TorchScript format."""
run(f"yolo export model={model} format=torchscript imgsz=32")
def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
"""Test the RTDETR functionality with the Ultralytics framework."""
"""Test the RTDETR functionality within Ultralytics for detection tasks using specified model and data."""
# Warning: must use imgsz=640 (note also add coma, spaces, fraction=0.25 args to test single-image training)
run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25")
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
@ -61,7 +61,7 @@ def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8-seg.yaml"):
"""Test FastSAM segmentation functionality within Ultralytics."""
"""Test FastSAM model for segmenting objects in images using various prompts within Ultralytics."""
source = ASSETS / "bus.jpg"
run(f"yolo segment val {task} model={model} data={data} imgsz=32")
@ -99,7 +99,7 @@ def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8
def test_mobilesam():
"""Test MobileSAM segmentation functionality using Ultralytics."""
"""Test MobileSAM segmentation with point prompts using Ultralytics."""
from ultralytics import SAM
# Load the model

@ -32,7 +32,7 @@ def test_checks():
],
)
def test_export_engine_matrix(task, dynamic, int8, half, batch):
"""Test YOLO exports to TensorRT format."""
"""Test YOLO model export to TensorRT format for various configurations and run inference."""
file = YOLO(TASK2MODEL[task]).export(
format="engine",
imgsz=32,
@ -51,7 +51,7 @@ def test_export_engine_matrix(task, dynamic, int8, half, batch):
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_train():
"""Test model training on a minimal dataset."""
"""Test model training on a minimal dataset using available CUDA devices."""
device = 0 if CUDA_DEVICE_COUNT == 1 else [0, 1]
YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device) # requires imgsz>=64
@ -59,7 +59,7 @@ def test_train():
@pytest.mark.slow
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_predict_multiple_devices():
"""Validate model prediction on multiple devices."""
"""Validate model prediction consistency across CPU and CUDA devices."""
model = YOLO("yolov8n.pt")
model = model.cpu()
assert str(model.device) == "cpu"
@ -84,7 +84,7 @@ def test_predict_multiple_devices():
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_autobatch():
"""Check batch size for YOLO model using autobatch."""
"""Check optimal batch size for YOLO model training using autobatch utility."""
from ultralytics.utils.autobatch import check_train_batch_size
check_train_batch_size(YOLO(MODEL).model.cuda(), imgsz=128, amp=True)
@ -103,7 +103,7 @@ def test_utils_benchmarks():
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_predict_sam():
"""Test SAM model prediction with various prompts."""
"""Test SAM model predictions using different prompts, including bounding boxes and point annotations."""
from ultralytics import SAM
from ultralytics.models.sam import Predictor as SAMPredictor

@ -12,12 +12,12 @@ from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
def test_func(*args): # noqa
"""Test function callback."""
"""Test function callback for evaluating YOLO model performance metrics."""
print("callback test passed")
def test_export():
"""Test model exporting functionality."""
"""Tests the model exporting function by adding a callback and asserting its execution."""
exporter = Exporter()
exporter.add_callback("on_export_start", test_func)
assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
@ -26,7 +26,7 @@ def test_export():
def test_detect():
"""Test object detection functionality."""
"""Test YOLO object detection training, validation, and prediction functionality."""
overrides = {"data": "coco8.yaml", "model": "yolov8n.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG)
cfg.data = "coco8.yaml"
@ -65,7 +65,7 @@ def test_detect():
def test_segment():
"""Test image segmentation functionality."""
"""Tests image segmentation training, validation, and prediction pipelines using YOLO models."""
overrides = {"data": "coco8-seg.yaml", "model": "yolov8n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG)
cfg.data = "coco8-seg.yaml"
@ -104,7 +104,7 @@ def test_segment():
def test_classify():
"""Test image classification functionality."""
"""Test image classification including training, validation, and prediction phases."""
overrides = {"data": "imagenet10", "model": "yolov8n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG)
cfg.data = "imagenet10"

@ -9,7 +9,7 @@ from ultralytics.utils import ASSETS
@pytest.mark.slow
def test_similarity():
"""Test similarity calculations and SQL queries for correctness and response length."""
"""Test the correctness and response length of similarity calculations and SQL queries in the Explorer."""
exp = Explorer(data="coco8.yaml")
exp.create_embeddings_table()
similar = exp.get_similar(idx=1)
@ -26,7 +26,7 @@ def test_similarity():
@pytest.mark.slow
def test_det():
"""Test detection functionalities and ensure the embedding table has bounding boxes."""
"""Test detection functionalities and verify embedding table includes bounding boxes."""
exp = Explorer(data="coco8.yaml", model="yolov8n.pt")
exp.create_embeddings_table(force=True)
assert len(exp.table.head()["bboxes"]) > 0
@ -39,7 +39,7 @@ def test_det():
@pytest.mark.slow
def test_seg():
"""Test segmentation functionalities and verify the embedding table includes masks."""
"""Test segmentation functionalities and ensure the embedding table includes segmentation masks."""
exp = Explorer(data="coco8-seg.yaml", model="yolov8n-seg.pt")
exp.create_embeddings_table(force=True)
assert len(exp.table.head()["masks"]) > 0
@ -51,7 +51,7 @@ def test_seg():
@pytest.mark.slow
def test_pose():
"""Test pose estimation functionalities and check the embedding table for keypoints."""
"""Test pose estimation functionality and verify the embedding table includes keypoints."""
exp = Explorer(data="coco8-pose.yaml", model="yolov8n-pose.pt")
exp.create_embeddings_table(force=True)
assert len(exp.table.head()["keypoints"]) > 0

@ -21,13 +21,13 @@ from ultralytics.utils.torch_utils import TORCH_1_9, TORCH_1_13
def test_export_torchscript():
"""Test YOLO exports to TorchScript format."""
"""Test YOLO model exporting to TorchScript format for compatibility and correctness."""
file = YOLO(MODEL).export(format="torchscript", optimize=False, imgsz=32)
YOLO(file)(SOURCE, imgsz=32) # exported model inference
def test_export_onnx():
"""Test YOLO exports to ONNX format."""
"""Test YOLO model export to ONNX format with dynamic axes."""
file = YOLO(MODEL).export(format="onnx", dynamic=True, imgsz=32)
YOLO(file)(SOURCE, imgsz=32) # exported model inference
@ -35,7 +35,7 @@ def test_export_onnx():
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="OpenVINO not supported in Python 3.12")
@pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
def test_export_openvino():
"""Test YOLO exports to OpenVINO format."""
"""Test YOLO exports to OpenVINO format for model inference compatibility."""
file = YOLO(MODEL).export(format="openvino", imgsz=32)
YOLO(file)(SOURCE, imgsz=32) # exported model inference
@ -52,7 +52,7 @@ def test_export_openvino():
],
)
def test_export_openvino_matrix(task, dynamic, int8, half, batch):
"""Test YOLO exports to OpenVINO format."""
"""Test YOLO model exports to OpenVINO under various configuration matrix conditions."""
file = YOLO(TASK2MODEL[task]).export(
format="openvino",
imgsz=32,
@ -76,7 +76,7 @@ def test_export_openvino_matrix(task, dynamic, int8, half, batch):
"task, dynamic, int8, half, batch, simplify", product(TASKS, [True, False], [False], [False], [1, 2], [True, False])
)
def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify):
"""Test YOLO exports to ONNX format."""
"""Test YOLO exports to ONNX format with various configurations and parameters."""
file = YOLO(TASK2MODEL[task]).export(
format="onnx",
imgsz=32,
@ -93,7 +93,7 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify):
@pytest.mark.slow
@pytest.mark.parametrize("task, dynamic, int8, half, batch", product(TASKS, [False], [False], [False], [1, 2]))
def test_export_torchscript_matrix(task, dynamic, int8, half, batch):
"""Test YOLO exports to TorchScript format."""
"""Tests YOLO model exports to TorchScript format under varied configurations."""
file = YOLO(TASK2MODEL[task]).export(
format="torchscript",
imgsz=32,
@ -119,7 +119,7 @@ def test_export_torchscript_matrix(task, dynamic, int8, half, batch):
],
)
def test_export_coreml_matrix(task, dynamic, int8, half, batch):
"""Test YOLO exports to CoreML format."""
"""Test YOLO exports to CoreML format with various parameter configurations."""
file = YOLO(TASK2MODEL[task]).export(
format="coreml",
imgsz=32,
@ -144,7 +144,7 @@ def test_export_coreml_matrix(task, dynamic, int8, half, batch):
],
)
def test_export_tflite_matrix(task, dynamic, int8, half, batch):
"""Test YOLO exports to TFLite format."""
"""Test YOLO exports to TFLite format considering various export configurations."""
file = YOLO(TASK2MODEL[task]).export(
format="tflite",
imgsz=32,
@ -162,7 +162,7 @@ def test_export_tflite_matrix(task, dynamic, int8, half, batch):
@pytest.mark.skipif(IS_RASPBERRYPI, reason="CoreML not supported on Raspberry Pi")
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="CoreML not supported in Python 3.12")
def test_export_coreml():
"""Test YOLO exports to CoreML format."""
"""Test YOLO exports to CoreML format, optimized for macOS only."""
if MACOS:
file = YOLO(MODEL).export(format="coreml", imgsz=32)
YOLO(file)(SOURCE, imgsz=32) # model prediction only supported on macOS for nms=False models
@ -173,11 +173,7 @@ def test_export_coreml():
@pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10, reason="TFLite export requires Python>=3.10")
@pytest.mark.skipif(not LINUX, reason="Test disabled as TF suffers from install conflicts on Windows and macOS")
def test_export_tflite():
"""
Test YOLO exports to TFLite format.
Note TF suffers from install conflicts on Windows and macOS.
"""
"""Test YOLO exports to TFLite format under specific OS and Python version conditions."""
model = YOLO(MODEL)
file = model.export(format="tflite", imgsz=32)
YOLO(file)(SOURCE, imgsz=32)
@ -186,11 +182,7 @@ def test_export_tflite():
@pytest.mark.skipif(True, reason="Test disabled")
@pytest.mark.skipif(not LINUX, reason="TF suffers from install conflicts on Windows and macOS")
def test_export_pb():
"""
Test YOLO exports to *.pb format.
Note TF suffers from install conflicts on Windows and macOS.
"""
"""Test YOLO exports to TensorFlow's Protobuf (*.pb) format."""
model = YOLO(MODEL)
file = model.export(format="pb", imgsz=32)
YOLO(file)(SOURCE, imgsz=32)
@ -198,11 +190,7 @@ def test_export_pb():
@pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirementsk conflict.")
def test_export_paddle():
"""
Test YOLO exports to Paddle format.
Note Paddle protobuf requirements conflicting with onnx protobuf requirements.
"""
"""Test YOLO exports to Paddle format, noting protobuf conflicts with ONNX."""
YOLO(MODEL).export(format="paddle", imgsz=32)

@ -16,7 +16,7 @@ from ultralytics.utils.checks import check_requirements
@pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
def test_model_ray_tune():
"""Tune YOLO model with Ray optimization library."""
"""Tune YOLO model using Ray for hyperparameter optimization."""
YOLO("yolov8n-cls.yaml").tune(
use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
)
@ -24,7 +24,7 @@ def test_model_ray_tune():
@pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
def test_mlflow():
"""Test training with MLflow tracking enabled."""
"""Test training with MLflow tracking enabled (see https://mlflow.org/ for details)."""
SETTINGS["mlflow"] = True
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
@ -32,9 +32,9 @@ def test_mlflow():
@pytest.mark.skipif(True, reason="Test failing in scheduled CI https://github.com/ultralytics/ultralytics/pull/8868")
@pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
def test_mlflow_keep_run_active():
"""Ensure MLflow run status matches MLFLOW_KEEP_RUN_ACTIVE environment variable settings."""
import mlflow
"""Test training with MLflow tracking enabled."""
SETTINGS["mlflow"] = True
run_name = "Test Run"
os.environ["MLFLOW_RUN"] = run_name
@ -62,7 +62,11 @@ def test_mlflow_keep_run_active():
@pytest.mark.skipif(not check_requirements("tritonclient", install=False), reason="tritonclient[all] not installed")
def test_triton():
"""Test NVIDIA Triton Server functionalities."""
"""
Test NVIDIA Triton Server functionalities with YOLO model.
See https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver.
"""
check_requirements("tritonclient[all]")
from tritonclient.http import InferenceServerClient # noqa
@ -114,7 +118,7 @@ def test_triton():
@pytest.mark.skipif(not check_requirements("pycocotools", install=False), reason="pycocotools not installed")
def test_pycocotools():
"""Validate model predictions using pycocotools."""
"""Validate YOLO model predictions on COCO dataset using pycocotools."""
from ultralytics.models.yolo.detect import DetectionValidator
from ultralytics.models.yolo.pose import PoseValidator
from ultralytics.models.yolo.segment import SegmentationValidator

@ -38,7 +38,7 @@ def test_model_forward():
def test_model_methods():
"""Test various methods and properties of the YOLO model."""
"""Test various methods and properties of the YOLO model to ensure correct functionality."""
model = YOLO(MODEL)
# Model methods
@ -58,7 +58,7 @@ def test_model_methods():
def test_model_profile():
"""Test profiling of the YOLO model with 'profile=True' argument."""
"""Test profiling of the YOLO model with `profile=True` to assess performance and resource usage."""
from ultralytics.nn.tasks import DetectionModel
model = DetectionModel() # build model
@ -68,7 +68,7 @@ def test_model_profile():
@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
def test_predict_txt():
"""Test YOLO predictions with sources (file, dir, glob, recursive glob) specified in a text file."""
"""Tests YOLO predictions with file, directory, and pattern sources listed in a text file."""
txt_file = TMP / "sources.txt"
with open(txt_file, "w") as f:
for x in [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]:
@ -78,7 +78,7 @@ def test_predict_txt():
@pytest.mark.parametrize("model_name", MODELS)
def test_predict_img(model_name):
"""Test YOLO prediction on various types of image sources."""
"""Test YOLO model predictions on various image input types and sources, including online images."""
model = YOLO(WEIGHTS_DIR / model_name)
im = cv2.imread(str(SOURCE)) # uint8 numpy array
assert len(model(source=Image.open(SOURCE), save=True, verbose=True, imgsz=32)) == 1 # PIL
@ -100,12 +100,12 @@ def test_predict_img(model_name):
@pytest.mark.parametrize("model", MODELS)
def test_predict_visualize(model):
"""Test model predict methods with 'visualize=True' arguments."""
"""Test model prediction methods with 'visualize=True' to generate and display prediction visualizations."""
YOLO(WEIGHTS_DIR / model)(SOURCE, imgsz=32, visualize=True)
def test_predict_grey_and_4ch():
"""Test YOLO prediction on SOURCE converted to greyscale and 4-channel images."""
"""Test YOLO prediction on SOURCE converted to greyscale and 4-channel images with various filenames."""
im = Image.open(SOURCE)
directory = TMP / "im4"
directory.mkdir(parents=True, exist_ok=True)
@ -132,11 +132,7 @@ def test_predict_grey_and_4ch():
@pytest.mark.slow
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_youtube():
"""
Test YouTube inference.
Note: ConnectionError may occur during this test due to network instability or YouTube server availability.
"""
"""Test YOLO model on a YouTube video stream, handling potential network-related errors."""
model = YOLO(MODEL)
try:
model.predict("https://youtu.be/G17sBkb38XQ", imgsz=96, save=True)
@ -149,9 +145,9 @@ def test_youtube():
@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
def test_track_stream():
"""
Test streaming tracking (short 10 frame video) with non-default ByteTrack tracker.
Tests streaming tracking on a short 10 frame video using ByteTrack tracker and different GMC methods.
Note imgsz=160 required for tracking for higher confidence and better matches
Note imgsz=160 required for tracking for higher confidence and better matches.
"""
video_url = "https://ultralytics.com/assets/decelera_portrait_min.mov"
model = YOLO(MODEL)
@ -175,21 +171,21 @@ def test_val():
def test_train_scratch():
"""Test training the YOLO model from scratch."""
"""Test training the YOLO model from scratch using the provided configuration."""
model = YOLO(CFG)
model.train(data="coco8.yaml", epochs=2, imgsz=32, cache="disk", batch=-1, close_mosaic=1, name="model")
model(SOURCE)
def test_train_pretrained():
"""Test training the YOLO model from a pre-trained state."""
"""Test training of the YOLO model starting from a pre-trained checkpoint."""
model = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0)
model(SOURCE)
def test_all_model_yamls():
"""Test YOLO model creation for all available YAML configurations."""
"""Test YOLO model creation for all available YAML configurations in the `cfg/models` directory."""
for m in (ROOT / "cfg" / "models").rglob("*.yaml"):
if "rtdetr" in m.name:
if TORCH_1_9: # torch<=1.8 issue - TypeError: __init__() got an unexpected keyword argument 'batch_first'
@ -208,7 +204,7 @@ def test_workflow():
def test_predict_callback_and_setup():
"""Test callback functionality during YOLO prediction."""
"""Test callback functionality during YOLO prediction setup and execution."""
def on_predict_batch_end(predictor):
"""Callback function that handles operations at the end of a prediction batch."""
@ -232,7 +228,7 @@ def test_predict_callback_and_setup():
@pytest.mark.parametrize("model", MODELS)
def test_results(model):
"""Test various result formats for the YOLO model."""
"""Ensure YOLO model predictions can be processed and printed in various formats."""
results = YOLO(WEIGHTS_DIR / model)([SOURCE, SOURCE], imgsz=160)
for r in results:
r = r.cpu().numpy()
@ -247,7 +243,7 @@ def test_results(model):
def test_labels_and_crops():
"""Test output from prediction args for saving detection labels and crops."""
"""Test output from prediction args for saving YOLO detection labels and crops; ensures accurate saving."""
imgs = [SOURCE, ASSETS / "zidane.jpg"]
results = YOLO(WEIGHTS_DIR / "yolov8n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
save_path = Path(results[0].save_dir)
@ -270,7 +266,7 @@ def test_labels_and_crops():
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_data_utils():
"""Test utility functions in ultralytics/data/utils.py."""
"""Test utility functions in ultralytics/data/utils.py, including dataset stats and auto-splitting."""
from ultralytics.data.utils import HUBDatasetStats, autosplit
from ultralytics.utils.downloads import zip_directory
@ -290,7 +286,7 @@ def test_data_utils():
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_data_converter():
"""Test dataset converters."""
"""Test dataset conversion functions from COCO to YOLO format and class mappings."""
from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
file = "instances_val2017.json"
@ -300,7 +296,7 @@ def test_data_converter():
def test_data_annotator():
"""Test automatic data annotation."""
"""Automatically annotate data using specified detection and segmentation models."""
from ultralytics.data.annotator import auto_annotate
auto_annotate(
@ -323,7 +319,7 @@ def test_events():
def test_cfg_init():
"""Test configuration initialization utilities."""
"""Test configuration initialization utilities from the 'ultralytics.cfg' module."""
from ultralytics.cfg import check_dict_alignment, copy_default_cfg, smart_value
with contextlib.suppress(SyntaxError):
@ -334,7 +330,7 @@ def test_cfg_init():
def test_utils_init():
"""Test initialization utilities."""
"""Test initialization utilities in the Ultralytics library."""
from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_action_running
get_ubuntu_version()
@ -344,7 +340,7 @@ def test_utils_init():
def test_utils_checks():
"""Test various utility checks."""
"""Test various utility checks for filenames, git status, requirements, image sizes, and versions."""
checks.check_yolov5u_filename("yolov5n.pt")
checks.git_describe(ROOT)
checks.check_requirements() # check requirements.txt
@ -356,14 +352,14 @@ def test_utils_checks():
@pytest.mark.skipif(WINDOWS, reason="Windows profiling is extremely slow (cause unknown)")
def test_utils_benchmarks():
"""Test model benchmarking."""
"""Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
from ultralytics.utils.benchmarks import ProfileModels
ProfileModels(["yolov8n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
def test_utils_torchutils():
"""Test Torch utility functions."""
"""Test Torch utility functions including profiling and FLOP calculations."""
from ultralytics.nn.modules.conv import Conv
from ultralytics.utils.torch_utils import get_flops_with_torch_profiler, profile, time_sync
@ -378,14 +374,14 @@ def test_utils_torchutils():
@pytest.mark.slow
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_utils_downloads():
"""Test file download utilities."""
"""Test file download utilities from ultralytics.utils.downloads."""
from ultralytics.utils.downloads import get_google_drive_file_info
get_google_drive_file_info("https://drive.google.com/file/d/1cqT-cJgANNrhIHCrEufUYhQ4RqiWG_lJ/view?usp=drive_link")
def test_utils_ops():
"""Test various operations utilities."""
"""Test utility operations functions for coordinate transformation and normalization."""
from ultralytics.utils.ops import (
ltwh2xywh,
ltwh2xyxy,
@ -414,7 +410,7 @@ def test_utils_ops():
def test_utils_files():
"""Test file handling utilities."""
"""Test file handling utilities including file age, date, and paths with spaces."""
from ultralytics.utils.files import file_age, file_date, get_latest_run, spaces_in_path
file_age(SOURCE)
@ -429,7 +425,7 @@ def test_utils_files():
@pytest.mark.slow
def test_utils_patches_torch_save():
"""Test torch_save backoff when _torch_save throws RuntimeError."""
"""Test torch_save backoff when _torch_save raises RuntimeError to ensure robustness."""
from unittest.mock import MagicMock, patch
from ultralytics.utils.patches import torch_save
@ -444,7 +440,7 @@ def test_utils_patches_torch_save():
def test_nn_modules_conv():
"""Test Convolutional Neural Network modules."""
"""Test Convolutional Neural Network modules including CBAM, Conv2, and ConvTranspose."""
from ultralytics.nn.modules.conv import CBAM, Conv2, ConvTranspose, DWConvTranspose2d, Focus
c1, c2 = 8, 16 # input and output channels
@ -463,7 +459,7 @@ def test_nn_modules_conv():
def test_nn_modules_block():
"""Test Neural Network block modules."""
"""Test various blocks in neural network modules including C1, C3TR, BottleneckCSP, C3Ghost, and C3x."""
from ultralytics.nn.modules.block import C1, C3TR, BottleneckCSP, C3Ghost, C3x
c1, c2 = 8, 16 # input and output channels
@ -479,7 +475,7 @@ def test_nn_modules_block():
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_hub():
"""Test Ultralytics HUB functionalities."""
"""Test Ultralytics HUB functionalities (e.g. export formats, logout)."""
from ultralytics.hub import export_fmts_hub, logout
from ultralytics.hub.utils import smart_request
@ -490,7 +486,7 @@ def test_hub():
@pytest.fixture
def image():
"""Loads an image from a predefined source using OpenCV."""
"""Load and return an image from a predefined source using OpenCV."""
return cv2.imread(str(SOURCE))
@ -504,7 +500,7 @@ def image():
],
)
def test_classify_transforms_train(image, auto_augment, erasing, force_color_jitter):
"""Tests classification transforms during training with various augmentation settings."""
"""Tests classification transforms during training with various augmentations to ensure proper functionality."""
from ultralytics.data.augment import classify_augmentations
transform = classify_augmentations(
@ -533,7 +529,7 @@ def test_classify_transforms_train(image, auto_augment, erasing, force_color_jit
@pytest.mark.slow
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_model_tune():
"""Tune YOLO model for performance."""
"""Tune YOLO model for performance improvement."""
YOLO("yolov8n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
YOLO("yolov8n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
@ -550,7 +546,7 @@ def test_model_embeddings():
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
def test_yolo_world():
"""Tests YOLO world models with different configurations, including classes, detection, and training scenarios."""
"""Tests YOLO world models with CLIP support, including detection and training scenarios."""
model = YOLO("yolov8s-world.pt") # no YOLOv8n-world model yet
model.set_classes(["tree", "window"])
model(SOURCE, conf=0.01)
@ -581,7 +577,7 @@ def test_yolo_world():
def test_yolov10():
"""A simple test for yolov10 for now."""
"""Test YOLOv10 model training, validation, and prediction steps with minimal configurations."""
model = YOLO("yolov10n.yaml")
# train/val/predict
model.train(data="coco8.yaml", epochs=1, imgsz=32, close_mosaic=1, cache="disk")

@ -22,6 +22,7 @@ from .utils import get_sim_index_schema, get_table_schema, plot_query_result, pr
class ExplorerDataset(YOLODataset):
def __init__(self, *args, data: dict = None, **kwargs) -> None:
"""Initializes the ExplorerDataset with the provided data arguments, extending the YOLODataset class."""
super().__init__(*args, data=data, **kwargs)
def load_image(self, i: int) -> Union[Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]], Tuple[None, None, None]]:
@ -59,6 +60,7 @@ class Explorer:
model: str = "yolov8n.pt",
uri: str = USER_CONFIG_DIR / "explorer",
) -> None:
"""Initializes the Explorer class with dataset path, model, and URI for database connection."""
# Note duckdb==0.10.0 bug https://github.com/ultralytics/ultralytics/pull/8181
checks.check_requirements(["lancedb>=0.4.3", "duckdb<=0.9.2"])
import lancedb
@ -416,6 +418,7 @@ class Explorer:
def _check_imgs_or_idxs(
self, img: Union[str, np.ndarray, List[str], List[np.ndarray], None], idx: Union[None, int, List[int]]
) -> List[np.ndarray]:
"""Determines whether to fetch images or indexes based on provided arguments and returns image paths."""
if img is None and idx is None:
raise ValueError("Either img or idx must be provided.")
if img is not None and idx is not None:

@ -230,6 +230,8 @@ class HUBTrainingSession:
*args,
**kwargs,
):
"""Attempts to execute `request_func` with retries, timeout handling, optional threading, and progress."""
def retry_request():
"""Attempts to call `request_func` with retries, timeout, and optional threading."""
t0 = time.time() # Record the start time for the timeout

@ -712,6 +712,7 @@ class RepVGGDW(torch.nn.Module):
"""RepVGGDW is a class that represents a depth wise separable convolutional block in RepVGG architecture."""
def __init__(self, ed) -> None:
"""Initializes RepVGGDW with depthwise separable convolutional layers for efficient processing."""
super().__init__()
self.conv = Conv(ed, ed, 7, 1, 3, g=ed, act=False)
self.conv1 = Conv(ed, ed, 3, 1, 1, g=ed, act=False)

@ -53,9 +53,9 @@ class AIGym:
# Check if environment supports imshow
self.env_check = check_imshow(warn=True)
self.count = list()
self.angle = list()
self.stage = list()
self.count = []
self.angle = []
self.stage = []
def start_counting(self, im0, results):
"""

@ -305,7 +305,6 @@ class ThreadingLocked:
@ThreadingLocked()
def my_function():
# Your code here
pass
```
"""

@ -1224,6 +1224,7 @@ class ClassifyMetrics(SimpleClass):
class OBBMetrics(SimpleClass):
def __init__(self, save_dir=Path("."), plot=False, on_plot=None, names=()) -> None:
"""Initialize an OBBMetrics instance with directory, plotting, callback, and class names."""
self.save_dir = save_dir
self.plot = plot
self.on_plot = on_plot

Loading…
Cancel
Save