`ultralytics 8.1.44` add `IS_RASPBERRYPI` and constants refactor (#9827)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
pull/9346/head^2 v8.1.44
Glenn Jocher 8 months ago committed by GitHub
parent 3f34a7c3af
commit 7d891a4aa4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 3
      tests/test_engine.py
  2. 1
      tests/test_integrations.py
  3. 6
      tests/test_python.py
  4. 5
      ultralytics/__init__.py
  5. 2
      ultralytics/cfg/models/v9/yolov9c.yaml
  6. 1
      ultralytics/cfg/models/v9/yolov9e-seg.yaml
  7. 1
      ultralytics/cfg/models/v9/yolov9e.yaml
  8. 11
      ultralytics/data/__init__.py
  9. 4
      ultralytics/data/augment.py
  10. 2
      ultralytics/data/base.py
  11. 2
      ultralytics/data/build.py
  12. 7
      ultralytics/data/converter.py
  13. 4
      ultralytics/data/dataset.py
  14. 11
      ultralytics/data/explorer/explorer.py
  15. 6
      ultralytics/data/loaders.py
  16. 2
      ultralytics/data/utils.py
  17. 3
      ultralytics/engine/model.py
  18. 1
      ultralytics/engine/trainer.py
  19. 6
      ultralytics/hub/auth.py
  20. 6
      ultralytics/hub/session.py
  21. 12
      ultralytics/hub/utils.py
  22. 2
      ultralytics/models/rtdetr/val.py
  23. 4
      ultralytics/models/sam/modules/tiny_encoder.py
  24. 2
      ultralytics/models/sam/modules/transformer.py
  25. 24
      ultralytics/models/sam/predict.py
  26. 2
      ultralytics/models/yolo/detect/val.py
  27. 2
      ultralytics/models/yolo/model.py
  28. 2
      ultralytics/models/yolo/obb/val.py
  29. 4
      ultralytics/models/yolo/world/train_world.py
  30. 16
      ultralytics/nn/modules/__init__.py
  31. 2
      ultralytics/nn/modules/head.py
  32. 14
      ultralytics/nn/tasks.py
  33. 1
      ultralytics/solutions/object_counter.py
  34. 2
      ultralytics/trackers/byte_tracker.py
  35. 8
      ultralytics/trackers/utils/kalman_filter.py
  36. 2
      ultralytics/trackers/utils/matching.py
  37. 58
      ultralytics/utils/__init__.py
  38. 5
      ultralytics/utils/callbacks/hub.py
  39. 2
      ultralytics/utils/callbacks/mlflow.py
  40. 1
      ultralytics/utils/callbacks/tensorboard.py
  41. 24
      ultralytics/utils/checks.py
  42. 2
      ultralytics/utils/torch_utils.py

@ -2,6 +2,7 @@
import sys
from unittest import mock
from ultralytics import YOLO
from ultralytics.cfg import get_cfg
from ultralytics.engine.exporter import Exporter
@ -52,7 +53,7 @@ def test_detect():
pred.add_callback("on_predict_start", test_func)
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
# Confirm there is no issue with sys.argv being empty.
with mock.patch.object(sys, 'argv', []):
with mock.patch.object(sys, "argv", []):
result = pred(source=ASSETS, model=f"{MODEL}.pt")
assert len(result), "predictor test failed"

@ -34,6 +34,7 @@ def test_mlflow():
@pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
def test_mlflow_keep_run_active():
import os
import mlflow
"""Test training with MLflow tracking enabled."""

@ -514,7 +514,8 @@ def test_utils_files():
@pytest.mark.slow
def test_utils_patches_torch_save():
"""Test torch_save backoff when _torch_save throws RuntimeError."""
from unittest.mock import patch, MagicMock
from unittest.mock import MagicMock, patch
from ultralytics.utils.patches import torch_save
mock = MagicMock(side_effect=RuntimeError)
@ -651,9 +652,8 @@ def test_yolo_world():
from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
model = YOLO("yolov8s-worldv2.yaml") # no YOLOv8n-world model yet
data = dict(train=dict(yolo_data=["coco8.yaml"]), val=dict(yolo_data=["coco8.yaml"]))
model.train(
data=data,
data={"train": {"yolo_data": ["coco8.yaml"]}, "val": {"yolo_data": ["coco8.yaml"]}},
epochs=2,
imgsz=32,
cache="disk",

@ -1,15 +1,16 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.1.43"
__version__ = "8.1.44"
from ultralytics.data.explorer.explorer import Explorer
from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld
from ultralytics.models.fastsam import FastSAM
from ultralytics.models.nas import NAS
from ultralytics.utils import ASSETS, SETTINGS as settings
from ultralytics.utils import ASSETS, SETTINGS
from ultralytics.utils.checks import check_yolo as checks
from ultralytics.utils.downloads import download
settings = SETTINGS
__all__ = (
"__version__",
"ASSETS",

@ -35,4 +35,4 @@ head:
- [[-1, 9], 1, Concat, [1]] # cat head P5
- [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 21 (P5/32-large)
- [[15, 18, 21], 1, Detect, [nc]] # DDetect(P3, P4, P5)
- [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)

@ -58,5 +58,4 @@ head:
- [[-1, 29], 1, Concat, [1]] # cat head P5
- [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large)
# segment
- [[35, 38, 41], 1, Segment, [nc, 32, 256]] # Segment (P3, P4, P5)

@ -58,5 +58,4 @@ head:
- [[-1, 29], 1, Concat, [1]] # cat head P5
- [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large)
# detect
- [[35, 38, 41], 1, Detect, [nc]] # Detect(P3, P4, P5)

@ -1,19 +1,14 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
from .base import BaseDataset
from .build import (
build_dataloader,
build_yolo_dataset,
build_grounding,
load_inference_source,
)
from .build import build_dataloader, build_grounding, build_yolo_dataset, load_inference_source
from .dataset import (
ClassificationDataset,
GroundingDataset,
SemanticDataset,
YOLOConcatDataset,
YOLODataset,
YOLOMultiModalDataset,
GroundingDataset,
YOLOConcatDataset,
)
__all__ = (

@ -20,7 +20,7 @@ from .utils import polygons2masks, polygons2masks_overlap
DEFAULT_MEAN = (0.0, 0.0, 0.0)
DEFAULT_STD = (1.0, 1.0, 1.0)
DEFAULT_CROP_FTACTION = 1.0
DEFAULT_CROP_FRACTION = 1.0
# TODO: we might need a BaseTransform to make all these augments be compatible with both classification and semantic
@ -1134,7 +1134,7 @@ def classify_transforms(
mean=DEFAULT_MEAN,
std=DEFAULT_STD,
interpolation=Image.BILINEAR,
crop_fraction: float = DEFAULT_CROP_FTACTION,
crop_fraction: float = DEFAULT_CROP_FRACTION,
):
"""
Classification transforms for evaluation/inference. Inspired by timm/data/transforms_factory.py.

@ -15,7 +15,7 @@ import psutil
from torch.utils.data import Dataset
from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM
from .utils import HELP_URL, FORMATS_HELP_MSG, IMG_FORMATS
from .utils import FORMATS_HELP_MSG, HELP_URL, IMG_FORMATS
class BaseDataset(Dataset):

@ -22,7 +22,7 @@ from ultralytics.data.loaders import (
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
from ultralytics.utils import RANK, colorstr
from ultralytics.utils.checks import check_file
from .dataset import YOLODataset, YOLOMultiModalDataset, GroundingDataset
from .dataset import GroundingDataset, YOLODataset, YOLOMultiModalDataset
from .utils import PIN_MEMORY

@ -519,11 +519,12 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
..
NNN.txt
"""
from tqdm import tqdm
from ultralytics import SAM
from ultralytics.data import YOLODataset
from ultralytics.utils.ops import xywh2xyxy
from ultralytics.utils import LOGGER
from ultralytics import SAM
from tqdm import tqdm
from ultralytics.utils.ops import xywh2xyxy
# NOTE: add placeholder to pass class index check
dataset = YOLODataset(im_dir, data=dict(names=list(range(1000))))

@ -30,10 +30,10 @@ from .utils import (
LOGGER,
get_hash,
img2label_paths,
verify_image,
verify_image_label,
load_dataset_cache_file,
save_dataset_cache_file,
verify_image,
verify_image_label,
)
# Ultralytics dataset *.cache version, >= 1.0.0 for YOLOv8

@ -15,7 +15,7 @@ from ultralytics.data.augment import Format
from ultralytics.data.dataset import YOLODataset
from ultralytics.data.utils import check_det_dataset
from ultralytics.models.yolo.model import YOLO
from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR
from ultralytics.utils import LOGGER, USER_CONFIG_DIR, IterableSimpleNamespace, checks
from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch
@ -203,7 +203,8 @@ class Explorer:
table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB
if not query.startswith("SELECT") and not query.startswith("WHERE"):
raise ValueError(
f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}"
f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE "
f"clause. found {query}"
)
if query.startswith("WHERE"):
query = f"SELECT * FROM 'table' {query}"
@ -318,13 +319,13 @@ class Explorer:
Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running
top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit.
vector search. Defaults: None.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns:
(pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns
include indices of similar images and their respective distances.
(pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image,
and columns include indices of similar images and their respective distances.
Example:
```python

@ -15,8 +15,8 @@ import requests
import torch
from PIL import Image
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS, FORMATS_HELP_MSG
from ultralytics.utils import LOGGER, is_colab, is_kaggle, ops
from ultralytics.data.utils import FORMATS_HELP_MSG, IMG_FORMATS, VID_FORMATS
from ultralytics.utils import IS_COLAB, IS_KAGGLE, LOGGER, ops
from ultralytics.utils.checks import check_requirements
@ -87,7 +87,7 @@ class LoadStreams:
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
s = get_best_youtube_url(s)
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
if s == 0 and (is_colab() or is_kaggle()):
if s == 0 and (IS_COLAB or IS_KAGGLE):
raise NotImplementedError(
"'source=0' webcam not supported in Colab and Kaggle notebooks. "
"Try running 'source=0' in a local environment."

@ -27,9 +27,9 @@ from ultralytics.utils import (
clean_url,
colorstr,
emojis,
is_dir_writeable,
yaml_load,
yaml_save,
is_dir_writeable,
)
from ultralytics.utils.checks import check_file, check_font, is_ascii
from ultralytics.utils.downloads import download, safe_download, unzip_file

@ -321,9 +321,10 @@ class Model(nn.Module):
AssertionError: If the model is not a PyTorch model.
"""
self._check_is_pytorch_model()
from ultralytics import __version__
from datetime import datetime
from ultralytics import __version__
updates = {
"date": datetime.now().isoformat(),
"version": __version__,

@ -464,6 +464,7 @@ class BaseTrainer:
def save_model(self):
"""Save model training checkpoints with additional metadata."""
import io
import pandas as pd # scope for faster 'import ultralytics'
# Serialize ckpt to a byte buffer once (faster than repeated torch.save() calls)

@ -3,7 +3,7 @@
import requests
from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, request_with_credentials
from ultralytics.utils import LOGGER, SETTINGS, emojis, is_colab
from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, emojis
API_KEY_URL = f"{HUB_WEB_ROOT}/settings?tab=api+keys"
@ -50,7 +50,7 @@ class Auth:
# Attempt to authenticate with the provided API key
success = self.authenticate()
# If the API key is not provided and the environment is a Google Colab notebook
elif is_colab():
elif IS_COLAB:
# Attempt to authenticate using browser cookies
success = self.auth_with_cookies()
else:
@ -109,7 +109,7 @@ class Auth:
Returns:
(bool): True if authentication is successful, False otherwise.
"""
if not is_colab():
if not IS_COLAB:
return False # Currently only works with Colab
try:
authn = request_with_credentials(f"{HUB_API_ROOT}/v1/auth/auto")

@ -7,11 +7,11 @@ from pathlib import Path
import requests
from ultralytics.hub.utils import HUB_WEB_ROOT, HELP_MSG, PREFIX, TQDM
from ultralytics.utils import LOGGER, SETTINGS, __version__, checks, emojis, is_colab
from ultralytics.hub.utils import HELP_MSG, HUB_WEB_ROOT, PREFIX, TQDM
from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, __version__, checks, emojis
from ultralytics.utils.errors import HUBModelError
AGENT_NAME = f"python-{__version__}-colab" if is_colab() else f"python-{__version__}-local"
AGENT_NAME = f"python-{__version__}-colab" if IS_COLAB else f"python-{__version__}-local"
class HUBTrainingSession:

@ -12,6 +12,9 @@ import requests
from ultralytics.utils import (
ARGV,
ENVIRONMENT,
IS_COLAB,
IS_GIT_DIR,
IS_PIP_PACKAGE,
LOGGER,
ONLINE,
RANK,
@ -22,9 +25,6 @@ from ultralytics.utils import (
__version__,
colorstr,
get_git_origin_url,
is_colab,
is_git_dir,
is_pip_package,
)
from ultralytics.utils.downloads import GITHUB_ASSETS_NAMES
@ -48,7 +48,7 @@ def request_with_credentials(url: str) -> any:
Raises:
OSError: If the function is not run in a Google Colab environment.
"""
if not is_colab():
if not IS_COLAB:
raise OSError("request_with_credentials() must run in a Colab environment")
from google.colab import output # noqa
from IPython import display # noqa
@ -189,7 +189,7 @@ class Events:
self.t = 0.0 # rate limit timer (seconds)
self.metadata = {
"cli": Path(ARGV[0]).name == "yolo",
"install": "git" if is_git_dir() else "pip" if is_pip_package() else "other",
"install": "git" if IS_GIT_DIR else "pip" if IS_PIP_PACKAGE else "other",
"python": ".".join(platform.python_version_tuple()[:2]), # i.e. 3.10
"version": __version__,
"env": ENVIRONMENT,
@ -201,7 +201,7 @@ class Events:
and RANK in {-1, 0}
and not TESTS_RUNNING
and ONLINE
and (is_pip_package() or get_git_origin_url() == "https://github.com/ultralytics/ultralytics.git")
and (IS_PIP_PACKAGE or get_git_origin_url() == "https://github.com/ultralytics/ultralytics.git")
)
def __call__(self, cfg):

@ -125,7 +125,7 @@ class RTDETRValidator(DetectionValidator):
bbox = ops.xywh2xyxy(bbox) # target boxes
bbox[..., [0, 2]] *= ori_shape[1] # native-space pred
bbox[..., [1, 3]] *= ori_shape[0] # native-space pred
return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad)
return {"cls": cls, "bbox": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
def _prepare_pred(self, pred, pbatch):
"""Prepares and returns a batch with transformed bounding boxes and class labels."""

@ -584,9 +584,9 @@ class TinyViT(nn.Module):
img_size (int, optional): The input image size. Defaults to 224.
in_chans (int, optional): Number of input channels. Defaults to 3.
num_classes (int, optional): Number of classification classes. Defaults to 1000.
embed_dims (List[int], optional): List of embedding dimensions for each layer. Defaults to [96, 192, 384, 768].
embed_dims (List[int], optional): List of embedding dimensions per layer. Defaults to [96, 192, 384, 768].
depths (List[int], optional): List of depths for each layer. Defaults to [2, 2, 6, 2].
num_heads (List[int], optional): List of number of attention heads for each layer. Defaults to [3, 6, 12, 24].
num_heads (List[int], optional): List of number of attention heads per layer. Defaults to [3, 6, 12, 24].
window_sizes (List[int], optional): List of window sizes for each layer. Defaults to [7, 7, 14, 7].
mlp_ratio (float, optional): Ratio of MLP hidden dimension to embedding dimension. Defaults to 4.
drop_rate (float, optional): Dropout rate. Defaults to 0.

@ -222,7 +222,7 @@ class Attention(nn.Module):
downsample_rate (int, optional): The factor by which the internal dimensions are downsampled. Defaults to 1.
Raises:
AssertionError: If 'num_heads' does not evenly divide the internal dimension (embedding_dim / downsample_rate).
AssertionError: If 'num_heads' does not evenly divide the internal dim (embedding_dim / downsample_rate).
"""
super().__init__()
self.embedding_dim = embedding_dim

@ -127,10 +127,10 @@ class Predictor(BasePredictor):
Args:
im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W).
bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format.
points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates.
labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background.
masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256.
multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False.
points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixels.
labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 = foreground, 0 = background.
masks (np.ndarray, optional): Low-resolution masks from previous predictions shape (N,H,W). For SAM H=W=256.
multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts.
Returns:
(tuple): Contains the following three elements.
@ -156,10 +156,10 @@ class Predictor(BasePredictor):
Args:
im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W).
bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format.
points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates.
labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background.
masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256.
multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False.
points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixels.
labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 = foreground, 0 = background.
masks (np.ndarray, optional): Low-resolution masks from previous predictions shape (N,H,W). For SAM H=W=256.
multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts.
Returns:
(tuple): Contains the following three elements.
@ -230,7 +230,7 @@ class Predictor(BasePredictor):
im (torch.Tensor): Input tensor representing the preprocessed image with dimensions (N, C, H, W).
crop_n_layers (int): Specifies the number of layers for additional mask predictions on image crops.
Each layer produces 2**i_layer number of image crops.
crop_overlap_ratio (float): Determines the extent of overlap between crops. Scaled down in subsequent layers.
crop_overlap_ratio (float): Determines the overlap between crops. Scaled down in subsequent layers.
crop_downscale_factor (int): Scaling factor for the number of sampled points-per-side in each layer.
point_grids (list[np.ndarray], optional): Custom grids for point sampling normalized to [0,1].
Used in the nth crop layer.
@ -240,7 +240,7 @@ class Predictor(BasePredictor):
conf_thres (float): Confidence threshold [0,1] for filtering based on the model's mask quality prediction.
stability_score_thresh (float): Stability threshold [0,1] for mask filtering based on mask stability.
stability_score_offset (float): Offset value for calculating stability score.
crop_nms_thresh (float): IoU cutoff for Non-Maximum Suppression (NMS) to remove duplicate masks between crops.
crop_nms_thresh (float): IoU cutoff for NMS to remove duplicate masks between crops.
Returns:
(tuple): A tuple containing segmented masks, confidence scores, and bounding boxes.
@ -351,8 +351,8 @@ class Predictor(BasePredictor):
"""
Post-processes SAM's inference outputs to generate object detection masks and bounding boxes.
The method scales masks and boxes to the original image size and applies a threshold to the mask predictions. The
SAM model uses advanced architecture and promptable segmentation tasks to achieve real-time performance.
The method scales masks and boxes to the original image size and applies a threshold to the mask predictions.
The SAM model uses advanced architecture and promptable segmentation tasks to achieve real-time performance.
Args:
preds (tuple): The output from SAM model inference, containing masks, scores, and optional bounding boxes.

@ -106,7 +106,7 @@ class DetectionValidator(BaseValidator):
if len(cls):
bbox = ops.xywh2xyxy(bbox) * torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]] # target boxes
ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad) # native-space labels
return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad)
return {"cls": cls, "bbox": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
def _prepare_pred(self, pred, pbatch):
"""Prepares a batch of images and annotations for validation."""

@ -5,7 +5,7 @@ from pathlib import Path
from ultralytics.engine.model import Model
from ultralytics.models import yolo
from ultralytics.nn.tasks import ClassificationModel, DetectionModel, OBBModel, PoseModel, SegmentationModel, WorldModel
from ultralytics.utils import yaml_load, ROOT
from ultralytics.utils import ROOT, yaml_load
class YOLO(Model):

@ -78,7 +78,7 @@ class OBBValidator(DetectionValidator):
if len(cls):
bbox[..., :4].mul_(torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]]) # target boxes
ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad, xywh=True) # native-space labels
return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad)
return {"cls": cls, "bbox": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
def _prepare_pred(self, pred, pbatch):
"""Prepares and returns a batch for OBB validation with scaled and padded bounding boxes."""

@ -1,8 +1,8 @@
from ultralytics.data import build_yolo_dataset, build_grounding, YOLOConcatDataset
from ultralytics.data import YOLOConcatDataset, build_grounding, build_yolo_dataset
from ultralytics.data.utils import check_det_dataset
from ultralytics.models.yolo.world import WorldTrainer
from ultralytics.utils.torch_utils import de_parallel
from ultralytics.utils import DEFAULT_CFG
from ultralytics.utils.torch_utils import de_parallel
class WorldTrainerFromScratch(WorldTrainer):

@ -24,27 +24,27 @@ from .block import (
C3TR,
DFL,
SPP,
SPPELAN,
SPPF,
ADown,
BNContrastiveHead,
Bottleneck,
BottleneckCSP,
C2f,
C2fAttn,
ImagePoolingAttn,
C3Ghost,
C3x,
CBFuse,
CBLinear,
ContrastiveHead,
GhostBottleneck,
HGBlock,
HGStem,
ImagePoolingAttn,
Proto,
RepC3,
ResNetLayer,
ContrastiveHead,
BNContrastiveHead,
RepNCSPELAN4,
ADown,
SPPELAN,
CBFuse,
CBLinear,
ResNetLayer,
Silence,
)
from .conv import (

@ -8,7 +8,7 @@ import torch.nn as nn
from torch.nn.init import constant_, xavier_uniform_
from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors
from .block import DFL, Proto, ContrastiveHead, BNContrastiveHead
from .block import DFL, BNContrastiveHead, ContrastiveHead, Proto
from .conv import Conv
from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer
from .utils import bias_init_with_prob, linear_init

@ -15,14 +15,17 @@ from ultralytics.nn.modules import (
C3TR,
OBB,
SPP,
SPPELAN,
SPPF,
ADown,
Bottleneck,
BottleneckCSP,
C2f,
C2fAttn,
ImagePoolingAttn,
C3Ghost,
C3x,
CBFuse,
CBLinear,
Classify,
Concat,
Conv,
@ -36,19 +39,16 @@ from ultralytics.nn.modules import (
GhostConv,
HGBlock,
HGStem,
ImagePoolingAttn,
Pose,
RepC3,
RepConv,
RepNCSPELAN4,
ResNetLayer,
RTDETRDecoder,
Segment,
WorldDetect,
RepNCSPELAN4,
ADown,
SPPELAN,
CBFuse,
CBLinear,
Silence,
WorldDetect,
)
from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, colorstr, emojis, yaml_load
from ultralytics.utils.checks import check_requirements, check_suffix, check_yaml

@ -1,6 +1,7 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
from collections import defaultdict
import cv2
from ultralytics.utils.checks import check_imshow, check_requirements

@ -5,8 +5,8 @@ import numpy as np
from .basetrack import BaseTrack, TrackState
from .utils import matching
from .utils.kalman_filter import KalmanFilterXYAH
from ..utils.ops import xywh2ltwh
from ..utils import LOGGER
from ..utils.ops import xywh2ltwh
class STrack(BaseTrack):

@ -39,8 +39,8 @@ class KalmanFilterXYAH:
and height h.
Returns:
(tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of
the new track. Unobserved velocities are initialized to 0 mean.
(tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional)
of the new track. Unobserved velocities are initialized to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
@ -235,8 +235,8 @@ class KalmanFilterXYWH(KalmanFilterXYAH):
measurement (ndarray): Bounding box coordinates (x, y, w, h) with center position (x, y), width, and height.
Returns:
(tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of
the new track. Unobserved velocities are initialized to 0 mean.
(tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional)
of the new track. Unobserved velocities are initialized to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)

@ -4,7 +4,7 @@ import numpy as np
import scipy
from scipy.spatial.distance import cdist
from ultralytics.utils.metrics import bbox_ioa, batch_probiou
from ultralytics.utils.metrics import batch_probiou, bbox_ioa
try:
import lap # for linear_assignment

@ -495,9 +495,6 @@ def is_online() -> bool:
return False
ONLINE = is_online()
def is_pip_package(filepath: str = __name__) -> bool:
"""
Determines if the file at the given filepath is part of a pip package.
@ -550,17 +547,6 @@ def is_github_action_running() -> bool:
return "GITHUB_ACTIONS" in os.environ and "GITHUB_WORKFLOW" in os.environ and "RUNNER_OS" in os.environ
def is_git_dir():
"""
Determines whether the current file is part of a git repository. If the current file is not part of a git
repository, returns None.
Returns:
(bool): True if current file is part of a git repository.
"""
return get_git_dir() is not None
def get_git_dir():
"""
Determines whether the current file is part of a git repository and if so, returns the repository root directory. If
@ -574,6 +560,17 @@ def get_git_dir():
return d
def is_git_dir():
"""
Determines whether the current file is part of a git repository. If the current file is not part of a git
repository, returns None.
Returns:
(bool): True if current file is part of a git repository.
"""
return GIT_DIR is not None
def get_git_origin_url():
"""
Retrieves the origin URL of a git repository.
@ -581,7 +578,7 @@ def get_git_origin_url():
Returns:
(str | None): The origin URL of the git repository or None if not git directory.
"""
if is_git_dir():
if IS_GIT_DIR:
with contextlib.suppress(subprocess.CalledProcessError):
origin = subprocess.check_output(["git", "config", "--get", "remote.origin.url"])
return origin.decode().strip()
@ -594,7 +591,7 @@ def get_git_branch():
Returns:
(str | None): The current git branch name or None if not a git directory.
"""
if is_git_dir():
if IS_GIT_DIR:
with contextlib.suppress(subprocess.CalledProcessError):
origin = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
return origin.decode().strip()
@ -660,6 +657,16 @@ def get_user_config_dir(sub_dir="Ultralytics"):
return path
# Define constants (required below)
ONLINE = is_online()
IS_COLAB = is_colab()
IS_DOCKER = is_docker()
IS_JUPYTER = is_jupyter()
IS_KAGGLE = is_kaggle()
IS_PIP_PACKAGE = is_pip_package()
IS_RASPBERRYPI = is_raspberrypi()
GIT_DIR = get_git_dir()
IS_GIT_DIR = is_git_dir()
USER_CONFIG_DIR = Path(os.getenv("YOLO_CONFIG_DIR") or get_user_config_dir()) # Ultralytics settings dir
SETTINGS_YAML = USER_CONFIG_DIR / "settings.yaml"
@ -886,7 +893,7 @@ def set_sentry():
event["tags"] = {
"sys_argv": ARGV[0],
"sys_argv_name": Path(ARGV[0]).name,
"install": "git" if is_git_dir() else "pip" if is_pip_package() else "other",
"install": "git" if IS_GIT_DIR else "pip" if IS_PIP_PACKAGE else "other",
"os": ENVIRONMENT,
}
return event
@ -897,8 +904,8 @@ def set_sentry():
and Path(ARGV[0]).name == "yolo"
and not TESTS_RUNNING
and ONLINE
and is_pip_package()
and not is_git_dir()
and IS_PIP_PACKAGE
and not IS_GIT_DIR
):
# If sentry_sdk package is not installed then return and do not use Sentry
try:
@ -937,9 +944,8 @@ class SettingsManager(dict):
from ultralytics.utils.checks import check_version
from ultralytics.utils.torch_utils import torch_distributed_zero_first
git_dir = get_git_dir()
root = git_dir or Path()
datasets_root = (root.parent if git_dir and is_dir_writeable(root.parent) else root).resolve()
root = GIT_DIR or Path()
datasets_root = (root.parent if GIT_DIR and is_dir_writeable(root.parent) else root).resolve()
self.file = Path(file)
self.version = version
@ -1043,13 +1049,13 @@ WEIGHTS_DIR = Path(SETTINGS["weights_dir"]) # global weights directory
RUNS_DIR = Path(SETTINGS["runs_dir"]) # global runs directory
ENVIRONMENT = (
"Colab"
if is_colab()
if IS_COLAB
else "Kaggle"
if is_kaggle()
if IS_KAGGLE
else "Jupyter"
if is_jupyter()
if IS_JUPYTER
else "Docker"
if is_docker()
if IS_DOCKER
else platform.system()
)
TESTS_RUNNING = is_pytest_running() or is_github_action_running()

@ -12,10 +12,7 @@ def on_pretrain_routine_end(trainer):
session = getattr(trainer, "hub_session", None)
if session:
# Start timer for upload rate limit
session.timers = {
"metrics": time(),
"ckpt": time(),
} # start timer on session.rate_limit
session.timers = {"metrics": time(), "ckpt": time()} # start timer on session.rate_limit
def on_fit_epoch_end(trainer):

@ -58,7 +58,7 @@ def on_pretrain_routine_end(trainer):
MLFLOW_TRACKING_URI: The URI for MLflow tracking. If not set, defaults to 'runs/mlflow'.
MLFLOW_EXPERIMENT_NAME: The name of the MLflow experiment. If not set, defaults to trainer.args.project.
MLFLOW_RUN: The name of the MLflow run. If not set, defaults to trainer.args.name.
MLFLOW_KEEP_RUN_ACTIVE: Boolean indicating whether to keep the MLflow run active after the end of the training phase.
MLFLOW_KEEP_RUN_ACTIVE: Boolean indicating whether to keep the MLflow run active after the end of training.
"""
global mlflow

@ -15,6 +15,7 @@ try:
# Imports below only required if TensorBoard enabled
import warnings
from copy import deepcopy
from ultralytics.utils.torch_utils import de_parallel, torch
except (ImportError, AssertionError, TypeError, AttributeError):

@ -22,10 +22,15 @@ import torch
from ultralytics.utils import (
ASSETS,
AUTOINSTALL,
IS_COLAB,
IS_DOCKER,
IS_JUPYTER,
IS_KAGGLE,
IS_PIP_PACKAGE,
LINUX,
LOGGER,
PYTHON_VERSION,
ONLINE,
PYTHON_VERSION,
ROOT,
TORCHVISION_VERSION,
USER_CONFIG_DIR,
@ -37,12 +42,7 @@ from ultralytics.utils import (
colorstr,
downloads,
emojis,
is_colab,
is_docker,
is_github_action_running,
is_jupyter,
is_kaggle,
is_pip_package,
url2file,
)
@ -277,7 +277,7 @@ def check_pip_update_available():
Returns:
(bool): True if an update is available, False otherwise.
"""
if ONLINE and is_pip_package():
if ONLINE and IS_PIP_PACKAGE:
with contextlib.suppress(Exception):
from ultralytics import __version__
@ -528,7 +528,7 @@ def check_imshow(warn=False):
"""Check if environment supports image displays."""
try:
if LINUX:
assert "DISPLAY" in os.environ and not is_docker() and not is_colab() and not is_kaggle()
assert "DISPLAY" in os.environ and not IS_DOCKER and not IS_COLAB and not IS_KAGGLE
cv2.imshow("test", np.zeros((8, 8, 3), dtype=np.uint8)) # show a small 8-pixel image
cv2.waitKey(1)
cv2.destroyAllWindows()
@ -546,10 +546,10 @@ def check_yolo(verbose=True, device=""):
from ultralytics.utils.torch_utils import select_device
if is_jupyter():
if IS_JUPYTER:
if check_requirements("wandb", install=False):
os.system("pip uninstall -y wandb") # uninstall wandb: unwanted account creation prompt with infinite hang
if is_colab():
if IS_COLAB:
shutil.rmtree("sample_data", ignore_errors=True) # remove colab /sample_data directory
if verbose:
@ -574,7 +574,7 @@ def collect_system_info():
import psutil
from ultralytics.utils import ENVIRONMENT, is_git_dir
from ultralytics.utils import ENVIRONMENT, IS_GIT_DIR
from ultralytics.utils.torch_utils import get_cpu_info
ram_info = psutil.virtual_memory().total / (1024**3) # Convert bytes to GB
@ -583,7 +583,7 @@ def collect_system_info():
f"\n{'OS':<20}{platform.platform()}\n"
f"{'Environment':<20}{ENVIRONMENT}\n"
f"{'Python':<20}{PYTHON_VERSION}\n"
f"{'Install':<20}{'git' if is_git_dir() else 'pip' if is_pip_package() else 'other'}\n"
f"{'Install':<20}{'git' if IS_GIT_DIR else 'pip' if IS_PIP_PACKAGE else 'other'}\n"
f"{'RAM':<20}{ram_info:.2f} GB\n"
f"{'CPU':<20}{get_cpu_info()}\n"
f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n"

@ -21,8 +21,8 @@ from ultralytics.utils import (
LOGGER,
PYTHON_VERSION,
TORCHVISION_VERSION,
colorstr,
__version__,
colorstr,
)
from ultralytics.utils.checks import check_version

Loading…
Cancel
Save