`ultralytics 8.1.44` add `IS_RASPBERRYPI` and constants refactor (#9827)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
pull/9346/head^2 v8.1.44
Glenn Jocher 11 months ago committed by GitHub
parent 3f34a7c3af
commit 7d891a4aa4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 3
      tests/test_engine.py
  2. 1
      tests/test_integrations.py
  3. 6
      tests/test_python.py
  4. 5
      ultralytics/__init__.py
  5. 2
      ultralytics/cfg/models/v9/yolov9c-seg.yaml
  6. 2
      ultralytics/cfg/models/v9/yolov9c.yaml
  7. 5
      ultralytics/cfg/models/v9/yolov9e-seg.yaml
  8. 5
      ultralytics/cfg/models/v9/yolov9e.yaml
  9. 11
      ultralytics/data/__init__.py
  10. 4
      ultralytics/data/augment.py
  11. 2
      ultralytics/data/base.py
  12. 2
      ultralytics/data/build.py
  13. 7
      ultralytics/data/converter.py
  14. 4
      ultralytics/data/dataset.py
  15. 11
      ultralytics/data/explorer/explorer.py
  16. 6
      ultralytics/data/loaders.py
  17. 2
      ultralytics/data/utils.py
  18. 3
      ultralytics/engine/model.py
  19. 1
      ultralytics/engine/trainer.py
  20. 6
      ultralytics/hub/auth.py
  21. 6
      ultralytics/hub/session.py
  22. 12
      ultralytics/hub/utils.py
  23. 2
      ultralytics/models/rtdetr/val.py
  24. 4
      ultralytics/models/sam/modules/tiny_encoder.py
  25. 2
      ultralytics/models/sam/modules/transformer.py
  26. 24
      ultralytics/models/sam/predict.py
  27. 2
      ultralytics/models/yolo/detect/val.py
  28. 2
      ultralytics/models/yolo/model.py
  29. 2
      ultralytics/models/yolo/obb/val.py
  30. 4
      ultralytics/models/yolo/world/train_world.py
  31. 16
      ultralytics/nn/modules/__init__.py
  32. 2
      ultralytics/nn/modules/head.py
  33. 14
      ultralytics/nn/tasks.py
  34. 1
      ultralytics/solutions/object_counter.py
  35. 2
      ultralytics/trackers/byte_tracker.py
  36. 8
      ultralytics/trackers/utils/kalman_filter.py
  37. 2
      ultralytics/trackers/utils/matching.py
  38. 58
      ultralytics/utils/__init__.py
  39. 5
      ultralytics/utils/callbacks/hub.py
  40. 2
      ultralytics/utils/callbacks/mlflow.py
  41. 1
      ultralytics/utils/callbacks/tensorboard.py
  42. 24
      ultralytics/utils/checks.py
  43. 2
      ultralytics/utils/torch_utils.py

@ -2,6 +2,7 @@
import sys import sys
from unittest import mock from unittest import mock
from ultralytics import YOLO from ultralytics import YOLO
from ultralytics.cfg import get_cfg from ultralytics.cfg import get_cfg
from ultralytics.engine.exporter import Exporter from ultralytics.engine.exporter import Exporter
@ -52,7 +53,7 @@ def test_detect():
pred.add_callback("on_predict_start", test_func) pred.add_callback("on_predict_start", test_func)
assert test_func in pred.callbacks["on_predict_start"], "callback test failed" assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
# Confirm there is no issue with sys.argv being empty. # Confirm there is no issue with sys.argv being empty.
with mock.patch.object(sys, 'argv', []): with mock.patch.object(sys, "argv", []):
result = pred(source=ASSETS, model=f"{MODEL}.pt") result = pred(source=ASSETS, model=f"{MODEL}.pt")
assert len(result), "predictor test failed" assert len(result), "predictor test failed"

@ -34,6 +34,7 @@ def test_mlflow():
@pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed") @pytest.mark.skipif(not check_requirements("mlflow", install=False), reason="mlflow not installed")
def test_mlflow_keep_run_active(): def test_mlflow_keep_run_active():
import os import os
import mlflow import mlflow
"""Test training with MLflow tracking enabled.""" """Test training with MLflow tracking enabled."""

@ -514,7 +514,8 @@ def test_utils_files():
@pytest.mark.slow @pytest.mark.slow
def test_utils_patches_torch_save(): def test_utils_patches_torch_save():
"""Test torch_save backoff when _torch_save throws RuntimeError.""" """Test torch_save backoff when _torch_save throws RuntimeError."""
from unittest.mock import patch, MagicMock from unittest.mock import MagicMock, patch
from ultralytics.utils.patches import torch_save from ultralytics.utils.patches import torch_save
mock = MagicMock(side_effect=RuntimeError) mock = MagicMock(side_effect=RuntimeError)
@ -651,9 +652,8 @@ def test_yolo_world():
from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
model = YOLO("yolov8s-worldv2.yaml") # no YOLOv8n-world model yet model = YOLO("yolov8s-worldv2.yaml") # no YOLOv8n-world model yet
data = dict(train=dict(yolo_data=["coco8.yaml"]), val=dict(yolo_data=["coco8.yaml"]))
model.train( model.train(
data=data, data={"train": {"yolo_data": ["coco8.yaml"]}, "val": {"yolo_data": ["coco8.yaml"]}},
epochs=2, epochs=2,
imgsz=32, imgsz=32,
cache="disk", cache="disk",

@ -1,15 +1,16 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.1.43" __version__ = "8.1.44"
from ultralytics.data.explorer.explorer import Explorer from ultralytics.data.explorer.explorer import Explorer
from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld
from ultralytics.models.fastsam import FastSAM from ultralytics.models.fastsam import FastSAM
from ultralytics.models.nas import NAS from ultralytics.models.nas import NAS
from ultralytics.utils import ASSETS, SETTINGS as settings from ultralytics.utils import ASSETS, SETTINGS
from ultralytics.utils.checks import check_yolo as checks from ultralytics.utils.checks import check_yolo as checks
from ultralytics.utils.downloads import download from ultralytics.utils.downloads import download
settings = SETTINGS
__all__ = ( __all__ = (
"__version__", "__version__",
"ASSETS", "ASSETS",

@ -35,4 +35,4 @@ head:
- [[-1, 9], 1, Concat, [1]] # cat head P5 - [[-1, 9], 1, Concat, [1]] # cat head P5
- [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 21 (P5/32-large) - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 21 (P5/32-large)
- [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5) - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5)

@ -35,4 +35,4 @@ head:
- [[-1, 9], 1, Concat, [1]] # cat head P5 - [[-1, 9], 1, Concat, [1]] # cat head P5
- [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 21 (P5/32-large) - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 21 (P5/32-large)
- [[15, 18, 21], 1, Detect, [nc]] # DDetect(P3, P4, P5) - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)

@ -17,13 +17,13 @@ backbone:
- [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 7 - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 7
- [-1, 1, ADown, [1024]] # 8-P5/32 - [-1, 1, ADown, [1024]] # 8-P5/32
- [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 9 - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 9
- [1, 1, CBLinear, [[64]]] # 10 - [1, 1, CBLinear, [[64]]] # 10
- [3, 1, CBLinear, [[64, 128]]] # 11 - [3, 1, CBLinear, [[64, 128]]] # 11
- [5, 1, CBLinear, [[64, 128, 256]]] # 12 - [5, 1, CBLinear, [[64, 128, 256]]] # 12
- [7, 1, CBLinear, [[64, 128, 256, 512]]] # 13 - [7, 1, CBLinear, [[64, 128, 256, 512]]] # 13
- [9, 1, CBLinear, [[64, 128, 256, 512, 1024]]] # 14 - [9, 1, CBLinear, [[64, 128, 256, 512, 1024]]] # 14
- [0, 1, Conv, [64, 3, 2]] # 15-P1/2 - [0, 1, Conv, [64, 3, 2]] # 15-P1/2
- [[10, 11, 12, 13, 14, -1], 1, CBFuse, [[0, 0, 0, 0, 0]]] # 16 - [[10, 11, 12, 13, 14, -1], 1, CBFuse, [[0, 0, 0, 0, 0]]] # 16
- [-1, 1, Conv, [128, 3, 2]] # 17-P2/4 - [-1, 1, Conv, [128, 3, 2]] # 17-P2/4
@ -58,5 +58,4 @@ head:
- [[-1, 29], 1, Concat, [1]] # cat head P5 - [[-1, 29], 1, Concat, [1]] # cat head P5
- [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large) - [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large)
# segment
- [[35, 38, 41], 1, Segment, [nc, 32, 256]] # Segment (P3, P4, P5) - [[35, 38, 41], 1, Segment, [nc, 32, 256]] # Segment (P3, P4, P5)

@ -17,13 +17,13 @@ backbone:
- [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 7 - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 7
- [-1, 1, ADown, [1024]] # 8-P5/32 - [-1, 1, ADown, [1024]] # 8-P5/32
- [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 9 - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 9
- [1, 1, CBLinear, [[64]]] # 10 - [1, 1, CBLinear, [[64]]] # 10
- [3, 1, CBLinear, [[64, 128]]] # 11 - [3, 1, CBLinear, [[64, 128]]] # 11
- [5, 1, CBLinear, [[64, 128, 256]]] # 12 - [5, 1, CBLinear, [[64, 128, 256]]] # 12
- [7, 1, CBLinear, [[64, 128, 256, 512]]] # 13 - [7, 1, CBLinear, [[64, 128, 256, 512]]] # 13
- [9, 1, CBLinear, [[64, 128, 256, 512, 1024]]] # 14 - [9, 1, CBLinear, [[64, 128, 256, 512, 1024]]] # 14
- [0, 1, Conv, [64, 3, 2]] # 15-P1/2 - [0, 1, Conv, [64, 3, 2]] # 15-P1/2
- [[10, 11, 12, 13, 14, -1], 1, CBFuse, [[0, 0, 0, 0, 0]]] # 16 - [[10, 11, 12, 13, 14, -1], 1, CBFuse, [[0, 0, 0, 0, 0]]] # 16
- [-1, 1, Conv, [128, 3, 2]] # 17-P2/4 - [-1, 1, Conv, [128, 3, 2]] # 17-P2/4
@ -58,5 +58,4 @@ head:
- [[-1, 29], 1, Concat, [1]] # cat head P5 - [[-1, 29], 1, Concat, [1]] # cat head P5
- [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large) - [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large)
# detect
- [[35, 38, 41], 1, Detect, [nc]] # Detect(P3, P4, P5) - [[35, 38, 41], 1, Detect, [nc]] # Detect(P3, P4, P5)

@ -1,19 +1,14 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
from .base import BaseDataset from .base import BaseDataset
from .build import ( from .build import build_dataloader, build_grounding, build_yolo_dataset, load_inference_source
build_dataloader,
build_yolo_dataset,
build_grounding,
load_inference_source,
)
from .dataset import ( from .dataset import (
ClassificationDataset, ClassificationDataset,
GroundingDataset,
SemanticDataset, SemanticDataset,
YOLOConcatDataset,
YOLODataset, YOLODataset,
YOLOMultiModalDataset, YOLOMultiModalDataset,
GroundingDataset,
YOLOConcatDataset,
) )
__all__ = ( __all__ = (

@ -20,7 +20,7 @@ from .utils import polygons2masks, polygons2masks_overlap
DEFAULT_MEAN = (0.0, 0.0, 0.0) DEFAULT_MEAN = (0.0, 0.0, 0.0)
DEFAULT_STD = (1.0, 1.0, 1.0) DEFAULT_STD = (1.0, 1.0, 1.0)
DEFAULT_CROP_FTACTION = 1.0 DEFAULT_CROP_FRACTION = 1.0
# TODO: we might need a BaseTransform to make all these augments be compatible with both classification and semantic # TODO: we might need a BaseTransform to make all these augments be compatible with both classification and semantic
@ -1134,7 +1134,7 @@ def classify_transforms(
mean=DEFAULT_MEAN, mean=DEFAULT_MEAN,
std=DEFAULT_STD, std=DEFAULT_STD,
interpolation=Image.BILINEAR, interpolation=Image.BILINEAR,
crop_fraction: float = DEFAULT_CROP_FTACTION, crop_fraction: float = DEFAULT_CROP_FRACTION,
): ):
""" """
Classification transforms for evaluation/inference. Inspired by timm/data/transforms_factory.py. Classification transforms for evaluation/inference. Inspired by timm/data/transforms_factory.py.

@ -15,7 +15,7 @@ import psutil
from torch.utils.data import Dataset from torch.utils.data import Dataset
from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM
from .utils import HELP_URL, FORMATS_HELP_MSG, IMG_FORMATS from .utils import FORMATS_HELP_MSG, HELP_URL, IMG_FORMATS
class BaseDataset(Dataset): class BaseDataset(Dataset):

@ -22,7 +22,7 @@ from ultralytics.data.loaders import (
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
from ultralytics.utils import RANK, colorstr from ultralytics.utils import RANK, colorstr
from ultralytics.utils.checks import check_file from ultralytics.utils.checks import check_file
from .dataset import YOLODataset, YOLOMultiModalDataset, GroundingDataset from .dataset import GroundingDataset, YOLODataset, YOLOMultiModalDataset
from .utils import PIN_MEMORY from .utils import PIN_MEMORY

@ -519,11 +519,12 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
.. ..
NNN.txt NNN.txt
""" """
from tqdm import tqdm
from ultralytics import SAM
from ultralytics.data import YOLODataset from ultralytics.data import YOLODataset
from ultralytics.utils.ops import xywh2xyxy
from ultralytics.utils import LOGGER from ultralytics.utils import LOGGER
from ultralytics import SAM from ultralytics.utils.ops import xywh2xyxy
from tqdm import tqdm
# NOTE: add placeholder to pass class index check # NOTE: add placeholder to pass class index check
dataset = YOLODataset(im_dir, data=dict(names=list(range(1000)))) dataset = YOLODataset(im_dir, data=dict(names=list(range(1000))))

@ -30,10 +30,10 @@ from .utils import (
LOGGER, LOGGER,
get_hash, get_hash,
img2label_paths, img2label_paths,
verify_image,
verify_image_label,
load_dataset_cache_file, load_dataset_cache_file,
save_dataset_cache_file, save_dataset_cache_file,
verify_image,
verify_image_label,
) )
# Ultralytics dataset *.cache version, >= 1.0.0 for YOLOv8 # Ultralytics dataset *.cache version, >= 1.0.0 for YOLOv8

@ -15,7 +15,7 @@ from ultralytics.data.augment import Format
from ultralytics.data.dataset import YOLODataset from ultralytics.data.dataset import YOLODataset
from ultralytics.data.utils import check_det_dataset from ultralytics.data.utils import check_det_dataset
from ultralytics.models.yolo.model import YOLO from ultralytics.models.yolo.model import YOLO
from ultralytics.utils import LOGGER, IterableSimpleNamespace, checks, USER_CONFIG_DIR from ultralytics.utils import LOGGER, USER_CONFIG_DIR, IterableSimpleNamespace, checks
from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch from .utils import get_sim_index_schema, get_table_schema, plot_query_result, prompt_sql_query, sanitize_batch
@ -203,7 +203,8 @@ class Explorer:
table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB table = self.table.to_arrow() # noqa NOTE: Don't comment this. This line is used by DuckDB
if not query.startswith("SELECT") and not query.startswith("WHERE"): if not query.startswith("SELECT") and not query.startswith("WHERE"):
raise ValueError( raise ValueError(
f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE clause. found {query}" f"Query must start with SELECT or WHERE. You can either pass the entire query or just the WHERE "
f"clause. found {query}"
) )
if query.startswith("WHERE"): if query.startswith("WHERE"):
query = f"SELECT * FROM 'table' {query}" query = f"SELECT * FROM 'table' {query}"
@ -318,13 +319,13 @@ class Explorer:
Args: Args:
max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2. max_dist (float): maximum L2 distance between the embeddings to consider. Defaults to 0.2.
top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit when running top_k (float): Percentage of the closest data points to consider when counting. Used to apply limit.
vector search. Defaults: None. vector search. Defaults: None.
force (bool): Whether to overwrite the existing similarity index or not. Defaults to True. force (bool): Whether to overwrite the existing similarity index or not. Defaults to True.
Returns: Returns:
(pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image, and columns (pandas.DataFrame): A dataframe containing the similarity index. Each row corresponds to an image,
include indices of similar images and their respective distances. and columns include indices of similar images and their respective distances.
Example: Example:
```python ```python

@ -15,8 +15,8 @@ import requests
import torch import torch
from PIL import Image from PIL import Image
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS, FORMATS_HELP_MSG from ultralytics.data.utils import FORMATS_HELP_MSG, IMG_FORMATS, VID_FORMATS
from ultralytics.utils import LOGGER, is_colab, is_kaggle, ops from ultralytics.utils import IS_COLAB, IS_KAGGLE, LOGGER, ops
from ultralytics.utils.checks import check_requirements from ultralytics.utils.checks import check_requirements
@ -87,7 +87,7 @@ class LoadStreams:
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4' # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
s = get_best_youtube_url(s) s = get_best_youtube_url(s)
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
if s == 0 and (is_colab() or is_kaggle()): if s == 0 and (IS_COLAB or IS_KAGGLE):
raise NotImplementedError( raise NotImplementedError(
"'source=0' webcam not supported in Colab and Kaggle notebooks. " "'source=0' webcam not supported in Colab and Kaggle notebooks. "
"Try running 'source=0' in a local environment." "Try running 'source=0' in a local environment."

@ -27,9 +27,9 @@ from ultralytics.utils import (
clean_url, clean_url,
colorstr, colorstr,
emojis, emojis,
is_dir_writeable,
yaml_load, yaml_load,
yaml_save, yaml_save,
is_dir_writeable,
) )
from ultralytics.utils.checks import check_file, check_font, is_ascii from ultralytics.utils.checks import check_file, check_font, is_ascii
from ultralytics.utils.downloads import download, safe_download, unzip_file from ultralytics.utils.downloads import download, safe_download, unzip_file

@ -321,9 +321,10 @@ class Model(nn.Module):
AssertionError: If the model is not a PyTorch model. AssertionError: If the model is not a PyTorch model.
""" """
self._check_is_pytorch_model() self._check_is_pytorch_model()
from ultralytics import __version__
from datetime import datetime from datetime import datetime
from ultralytics import __version__
updates = { updates = {
"date": datetime.now().isoformat(), "date": datetime.now().isoformat(),
"version": __version__, "version": __version__,

@ -464,6 +464,7 @@ class BaseTrainer:
def save_model(self): def save_model(self):
"""Save model training checkpoints with additional metadata.""" """Save model training checkpoints with additional metadata."""
import io import io
import pandas as pd # scope for faster 'import ultralytics' import pandas as pd # scope for faster 'import ultralytics'
# Serialize ckpt to a byte buffer once (faster than repeated torch.save() calls) # Serialize ckpt to a byte buffer once (faster than repeated torch.save() calls)

@ -3,7 +3,7 @@
import requests import requests
from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, request_with_credentials from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX, request_with_credentials
from ultralytics.utils import LOGGER, SETTINGS, emojis, is_colab from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, emojis
API_KEY_URL = f"{HUB_WEB_ROOT}/settings?tab=api+keys" API_KEY_URL = f"{HUB_WEB_ROOT}/settings?tab=api+keys"
@ -50,7 +50,7 @@ class Auth:
# Attempt to authenticate with the provided API key # Attempt to authenticate with the provided API key
success = self.authenticate() success = self.authenticate()
# If the API key is not provided and the environment is a Google Colab notebook # If the API key is not provided and the environment is a Google Colab notebook
elif is_colab(): elif IS_COLAB:
# Attempt to authenticate using browser cookies # Attempt to authenticate using browser cookies
success = self.auth_with_cookies() success = self.auth_with_cookies()
else: else:
@ -109,7 +109,7 @@ class Auth:
Returns: Returns:
(bool): True if authentication is successful, False otherwise. (bool): True if authentication is successful, False otherwise.
""" """
if not is_colab(): if not IS_COLAB:
return False # Currently only works with Colab return False # Currently only works with Colab
try: try:
authn = request_with_credentials(f"{HUB_API_ROOT}/v1/auth/auto") authn = request_with_credentials(f"{HUB_API_ROOT}/v1/auth/auto")

@ -7,11 +7,11 @@ from pathlib import Path
import requests import requests
from ultralytics.hub.utils import HUB_WEB_ROOT, HELP_MSG, PREFIX, TQDM from ultralytics.hub.utils import HELP_MSG, HUB_WEB_ROOT, PREFIX, TQDM
from ultralytics.utils import LOGGER, SETTINGS, __version__, checks, emojis, is_colab from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, __version__, checks, emojis
from ultralytics.utils.errors import HUBModelError from ultralytics.utils.errors import HUBModelError
AGENT_NAME = f"python-{__version__}-colab" if is_colab() else f"python-{__version__}-local" AGENT_NAME = f"python-{__version__}-colab" if IS_COLAB else f"python-{__version__}-local"
class HUBTrainingSession: class HUBTrainingSession:

@ -12,6 +12,9 @@ import requests
from ultralytics.utils import ( from ultralytics.utils import (
ARGV, ARGV,
ENVIRONMENT, ENVIRONMENT,
IS_COLAB,
IS_GIT_DIR,
IS_PIP_PACKAGE,
LOGGER, LOGGER,
ONLINE, ONLINE,
RANK, RANK,
@ -22,9 +25,6 @@ from ultralytics.utils import (
__version__, __version__,
colorstr, colorstr,
get_git_origin_url, get_git_origin_url,
is_colab,
is_git_dir,
is_pip_package,
) )
from ultralytics.utils.downloads import GITHUB_ASSETS_NAMES from ultralytics.utils.downloads import GITHUB_ASSETS_NAMES
@ -48,7 +48,7 @@ def request_with_credentials(url: str) -> any:
Raises: Raises:
OSError: If the function is not run in a Google Colab environment. OSError: If the function is not run in a Google Colab environment.
""" """
if not is_colab(): if not IS_COLAB:
raise OSError("request_with_credentials() must run in a Colab environment") raise OSError("request_with_credentials() must run in a Colab environment")
from google.colab import output # noqa from google.colab import output # noqa
from IPython import display # noqa from IPython import display # noqa
@ -189,7 +189,7 @@ class Events:
self.t = 0.0 # rate limit timer (seconds) self.t = 0.0 # rate limit timer (seconds)
self.metadata = { self.metadata = {
"cli": Path(ARGV[0]).name == "yolo", "cli": Path(ARGV[0]).name == "yolo",
"install": "git" if is_git_dir() else "pip" if is_pip_package() else "other", "install": "git" if IS_GIT_DIR else "pip" if IS_PIP_PACKAGE else "other",
"python": ".".join(platform.python_version_tuple()[:2]), # i.e. 3.10 "python": ".".join(platform.python_version_tuple()[:2]), # i.e. 3.10
"version": __version__, "version": __version__,
"env": ENVIRONMENT, "env": ENVIRONMENT,
@ -201,7 +201,7 @@ class Events:
and RANK in {-1, 0} and RANK in {-1, 0}
and not TESTS_RUNNING and not TESTS_RUNNING
and ONLINE and ONLINE
and (is_pip_package() or get_git_origin_url() == "https://github.com/ultralytics/ultralytics.git") and (IS_PIP_PACKAGE or get_git_origin_url() == "https://github.com/ultralytics/ultralytics.git")
) )
def __call__(self, cfg): def __call__(self, cfg):

@ -125,7 +125,7 @@ class RTDETRValidator(DetectionValidator):
bbox = ops.xywh2xyxy(bbox) # target boxes bbox = ops.xywh2xyxy(bbox) # target boxes
bbox[..., [0, 2]] *= ori_shape[1] # native-space pred bbox[..., [0, 2]] *= ori_shape[1] # native-space pred
bbox[..., [1, 3]] *= ori_shape[0] # native-space pred bbox[..., [1, 3]] *= ori_shape[0] # native-space pred
return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad) return {"cls": cls, "bbox": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
def _prepare_pred(self, pred, pbatch): def _prepare_pred(self, pred, pbatch):
"""Prepares and returns a batch with transformed bounding boxes and class labels.""" """Prepares and returns a batch with transformed bounding boxes and class labels."""

@ -584,9 +584,9 @@ class TinyViT(nn.Module):
img_size (int, optional): The input image size. Defaults to 224. img_size (int, optional): The input image size. Defaults to 224.
in_chans (int, optional): Number of input channels. Defaults to 3. in_chans (int, optional): Number of input channels. Defaults to 3.
num_classes (int, optional): Number of classification classes. Defaults to 1000. num_classes (int, optional): Number of classification classes. Defaults to 1000.
embed_dims (List[int], optional): List of embedding dimensions for each layer. Defaults to [96, 192, 384, 768]. embed_dims (List[int], optional): List of embedding dimensions per layer. Defaults to [96, 192, 384, 768].
depths (List[int], optional): List of depths for each layer. Defaults to [2, 2, 6, 2]. depths (List[int], optional): List of depths for each layer. Defaults to [2, 2, 6, 2].
num_heads (List[int], optional): List of number of attention heads for each layer. Defaults to [3, 6, 12, 24]. num_heads (List[int], optional): List of number of attention heads per layer. Defaults to [3, 6, 12, 24].
window_sizes (List[int], optional): List of window sizes for each layer. Defaults to [7, 7, 14, 7]. window_sizes (List[int], optional): List of window sizes for each layer. Defaults to [7, 7, 14, 7].
mlp_ratio (float, optional): Ratio of MLP hidden dimension to embedding dimension. Defaults to 4. mlp_ratio (float, optional): Ratio of MLP hidden dimension to embedding dimension. Defaults to 4.
drop_rate (float, optional): Dropout rate. Defaults to 0. drop_rate (float, optional): Dropout rate. Defaults to 0.

@ -222,7 +222,7 @@ class Attention(nn.Module):
downsample_rate (int, optional): The factor by which the internal dimensions are downsampled. Defaults to 1. downsample_rate (int, optional): The factor by which the internal dimensions are downsampled. Defaults to 1.
Raises: Raises:
AssertionError: If 'num_heads' does not evenly divide the internal dimension (embedding_dim / downsample_rate). AssertionError: If 'num_heads' does not evenly divide the internal dim (embedding_dim / downsample_rate).
""" """
super().__init__() super().__init__()
self.embedding_dim = embedding_dim self.embedding_dim = embedding_dim

@ -127,10 +127,10 @@ class Predictor(BasePredictor):
Args: Args:
im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W). im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W).
bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format. bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format.
points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates. points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixels.
labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background. labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 = foreground, 0 = background.
masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256. masks (np.ndarray, optional): Low-resolution masks from previous predictions shape (N,H,W). For SAM H=W=256.
multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False. multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts.
Returns: Returns:
(tuple): Contains the following three elements. (tuple): Contains the following three elements.
@ -156,10 +156,10 @@ class Predictor(BasePredictor):
Args: Args:
im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W). im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W).
bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format. bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format.
points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates. points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixels.
labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background. labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 = foreground, 0 = background.
masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256. masks (np.ndarray, optional): Low-resolution masks from previous predictions shape (N,H,W). For SAM H=W=256.
multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False. multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts.
Returns: Returns:
(tuple): Contains the following three elements. (tuple): Contains the following three elements.
@ -230,7 +230,7 @@ class Predictor(BasePredictor):
im (torch.Tensor): Input tensor representing the preprocessed image with dimensions (N, C, H, W). im (torch.Tensor): Input tensor representing the preprocessed image with dimensions (N, C, H, W).
crop_n_layers (int): Specifies the number of layers for additional mask predictions on image crops. crop_n_layers (int): Specifies the number of layers for additional mask predictions on image crops.
Each layer produces 2**i_layer number of image crops. Each layer produces 2**i_layer number of image crops.
crop_overlap_ratio (float): Determines the extent of overlap between crops. Scaled down in subsequent layers. crop_overlap_ratio (float): Determines the overlap between crops. Scaled down in subsequent layers.
crop_downscale_factor (int): Scaling factor for the number of sampled points-per-side in each layer. crop_downscale_factor (int): Scaling factor for the number of sampled points-per-side in each layer.
point_grids (list[np.ndarray], optional): Custom grids for point sampling normalized to [0,1]. point_grids (list[np.ndarray], optional): Custom grids for point sampling normalized to [0,1].
Used in the nth crop layer. Used in the nth crop layer.
@ -240,7 +240,7 @@ class Predictor(BasePredictor):
conf_thres (float): Confidence threshold [0,1] for filtering based on the model's mask quality prediction. conf_thres (float): Confidence threshold [0,1] for filtering based on the model's mask quality prediction.
stability_score_thresh (float): Stability threshold [0,1] for mask filtering based on mask stability. stability_score_thresh (float): Stability threshold [0,1] for mask filtering based on mask stability.
stability_score_offset (float): Offset value for calculating stability score. stability_score_offset (float): Offset value for calculating stability score.
crop_nms_thresh (float): IoU cutoff for Non-Maximum Suppression (NMS) to remove duplicate masks between crops. crop_nms_thresh (float): IoU cutoff for NMS to remove duplicate masks between crops.
Returns: Returns:
(tuple): A tuple containing segmented masks, confidence scores, and bounding boxes. (tuple): A tuple containing segmented masks, confidence scores, and bounding boxes.
@ -351,8 +351,8 @@ class Predictor(BasePredictor):
""" """
Post-processes SAM's inference outputs to generate object detection masks and bounding boxes. Post-processes SAM's inference outputs to generate object detection masks and bounding boxes.
The method scales masks and boxes to the original image size and applies a threshold to the mask predictions. The The method scales masks and boxes to the original image size and applies a threshold to the mask predictions.
SAM model uses advanced architecture and promptable segmentation tasks to achieve real-time performance. The SAM model uses advanced architecture and promptable segmentation tasks to achieve real-time performance.
Args: Args:
preds (tuple): The output from SAM model inference, containing masks, scores, and optional bounding boxes. preds (tuple): The output from SAM model inference, containing masks, scores, and optional bounding boxes.

@ -106,7 +106,7 @@ class DetectionValidator(BaseValidator):
if len(cls): if len(cls):
bbox = ops.xywh2xyxy(bbox) * torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]] # target boxes bbox = ops.xywh2xyxy(bbox) * torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]] # target boxes
ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad) # native-space labels ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad) # native-space labels
return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad) return {"cls": cls, "bbox": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
def _prepare_pred(self, pred, pbatch): def _prepare_pred(self, pred, pbatch):
"""Prepares a batch of images and annotations for validation.""" """Prepares a batch of images and annotations for validation."""

@ -5,7 +5,7 @@ from pathlib import Path
from ultralytics.engine.model import Model from ultralytics.engine.model import Model
from ultralytics.models import yolo from ultralytics.models import yolo
from ultralytics.nn.tasks import ClassificationModel, DetectionModel, OBBModel, PoseModel, SegmentationModel, WorldModel from ultralytics.nn.tasks import ClassificationModel, DetectionModel, OBBModel, PoseModel, SegmentationModel, WorldModel
from ultralytics.utils import yaml_load, ROOT from ultralytics.utils import ROOT, yaml_load
class YOLO(Model): class YOLO(Model):

@ -78,7 +78,7 @@ class OBBValidator(DetectionValidator):
if len(cls): if len(cls):
bbox[..., :4].mul_(torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]]) # target boxes bbox[..., :4].mul_(torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]]) # target boxes
ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad, xywh=True) # native-space labels ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad, xywh=True) # native-space labels
return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad) return {"cls": cls, "bbox": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
def _prepare_pred(self, pred, pbatch): def _prepare_pred(self, pred, pbatch):
"""Prepares and returns a batch for OBB validation with scaled and padded bounding boxes.""" """Prepares and returns a batch for OBB validation with scaled and padded bounding boxes."""

@ -1,8 +1,8 @@
from ultralytics.data import build_yolo_dataset, build_grounding, YOLOConcatDataset from ultralytics.data import YOLOConcatDataset, build_grounding, build_yolo_dataset
from ultralytics.data.utils import check_det_dataset from ultralytics.data.utils import check_det_dataset
from ultralytics.models.yolo.world import WorldTrainer from ultralytics.models.yolo.world import WorldTrainer
from ultralytics.utils.torch_utils import de_parallel
from ultralytics.utils import DEFAULT_CFG from ultralytics.utils import DEFAULT_CFG
from ultralytics.utils.torch_utils import de_parallel
class WorldTrainerFromScratch(WorldTrainer): class WorldTrainerFromScratch(WorldTrainer):

@ -24,27 +24,27 @@ from .block import (
C3TR, C3TR,
DFL, DFL,
SPP, SPP,
SPPELAN,
SPPF, SPPF,
ADown,
BNContrastiveHead,
Bottleneck, Bottleneck,
BottleneckCSP, BottleneckCSP,
C2f, C2f,
C2fAttn, C2fAttn,
ImagePoolingAttn,
C3Ghost, C3Ghost,
C3x, C3x,
CBFuse,
CBLinear,
ContrastiveHead,
GhostBottleneck, GhostBottleneck,
HGBlock, HGBlock,
HGStem, HGStem,
ImagePoolingAttn,
Proto, Proto,
RepC3, RepC3,
ResNetLayer,
ContrastiveHead,
BNContrastiveHead,
RepNCSPELAN4, RepNCSPELAN4,
ADown, ResNetLayer,
SPPELAN,
CBFuse,
CBLinear,
Silence, Silence,
) )
from .conv import ( from .conv import (

@ -8,7 +8,7 @@ import torch.nn as nn
from torch.nn.init import constant_, xavier_uniform_ from torch.nn.init import constant_, xavier_uniform_
from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors
from .block import DFL, Proto, ContrastiveHead, BNContrastiveHead from .block import DFL, BNContrastiveHead, ContrastiveHead, Proto
from .conv import Conv from .conv import Conv
from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer
from .utils import bias_init_with_prob, linear_init from .utils import bias_init_with_prob, linear_init

@ -15,14 +15,17 @@ from ultralytics.nn.modules import (
C3TR, C3TR,
OBB, OBB,
SPP, SPP,
SPPELAN,
SPPF, SPPF,
ADown,
Bottleneck, Bottleneck,
BottleneckCSP, BottleneckCSP,
C2f, C2f,
C2fAttn, C2fAttn,
ImagePoolingAttn,
C3Ghost, C3Ghost,
C3x, C3x,
CBFuse,
CBLinear,
Classify, Classify,
Concat, Concat,
Conv, Conv,
@ -36,19 +39,16 @@ from ultralytics.nn.modules import (
GhostConv, GhostConv,
HGBlock, HGBlock,
HGStem, HGStem,
ImagePoolingAttn,
Pose, Pose,
RepC3, RepC3,
RepConv, RepConv,
RepNCSPELAN4,
ResNetLayer, ResNetLayer,
RTDETRDecoder, RTDETRDecoder,
Segment, Segment,
WorldDetect,
RepNCSPELAN4,
ADown,
SPPELAN,
CBFuse,
CBLinear,
Silence, Silence,
WorldDetect,
) )
from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, colorstr, emojis, yaml_load from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, colorstr, emojis, yaml_load
from ultralytics.utils.checks import check_requirements, check_suffix, check_yaml from ultralytics.utils.checks import check_requirements, check_suffix, check_yaml

@ -1,6 +1,7 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
from collections import defaultdict from collections import defaultdict
import cv2 import cv2
from ultralytics.utils.checks import check_imshow, check_requirements from ultralytics.utils.checks import check_imshow, check_requirements

@ -5,8 +5,8 @@ import numpy as np
from .basetrack import BaseTrack, TrackState from .basetrack import BaseTrack, TrackState
from .utils import matching from .utils import matching
from .utils.kalman_filter import KalmanFilterXYAH from .utils.kalman_filter import KalmanFilterXYAH
from ..utils.ops import xywh2ltwh
from ..utils import LOGGER from ..utils import LOGGER
from ..utils.ops import xywh2ltwh
class STrack(BaseTrack): class STrack(BaseTrack):

@ -39,8 +39,8 @@ class KalmanFilterXYAH:
and height h. and height h.
Returns: Returns:
(tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of (tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional)
the new track. Unobserved velocities are initialized to 0 mean. of the new track. Unobserved velocities are initialized to 0 mean.
""" """
mean_pos = measurement mean_pos = measurement
mean_vel = np.zeros_like(mean_pos) mean_vel = np.zeros_like(mean_pos)
@ -235,8 +235,8 @@ class KalmanFilterXYWH(KalmanFilterXYAH):
measurement (ndarray): Bounding box coordinates (x, y, w, h) with center position (x, y), width, and height. measurement (ndarray): Bounding box coordinates (x, y, w, h) with center position (x, y), width, and height.
Returns: Returns:
(tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of (tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional)
the new track. Unobserved velocities are initialized to 0 mean. of the new track. Unobserved velocities are initialized to 0 mean.
""" """
mean_pos = measurement mean_pos = measurement
mean_vel = np.zeros_like(mean_pos) mean_vel = np.zeros_like(mean_pos)

@ -4,7 +4,7 @@ import numpy as np
import scipy import scipy
from scipy.spatial.distance import cdist from scipy.spatial.distance import cdist
from ultralytics.utils.metrics import bbox_ioa, batch_probiou from ultralytics.utils.metrics import batch_probiou, bbox_ioa
try: try:
import lap # for linear_assignment import lap # for linear_assignment

@ -495,9 +495,6 @@ def is_online() -> bool:
return False return False
ONLINE = is_online()
def is_pip_package(filepath: str = __name__) -> bool: def is_pip_package(filepath: str = __name__) -> bool:
""" """
Determines if the file at the given filepath is part of a pip package. Determines if the file at the given filepath is part of a pip package.
@ -550,17 +547,6 @@ def is_github_action_running() -> bool:
return "GITHUB_ACTIONS" in os.environ and "GITHUB_WORKFLOW" in os.environ and "RUNNER_OS" in os.environ return "GITHUB_ACTIONS" in os.environ and "GITHUB_WORKFLOW" in os.environ and "RUNNER_OS" in os.environ
def is_git_dir():
"""
Determines whether the current file is part of a git repository. If the current file is not part of a git
repository, returns None.
Returns:
(bool): True if current file is part of a git repository.
"""
return get_git_dir() is not None
def get_git_dir(): def get_git_dir():
""" """
Determines whether the current file is part of a git repository and if so, returns the repository root directory. If Determines whether the current file is part of a git repository and if so, returns the repository root directory. If
@ -574,6 +560,17 @@ def get_git_dir():
return d return d
def is_git_dir():
"""
Determines whether the current file is part of a git repository. If the current file is not part of a git
repository, returns None.
Returns:
(bool): True if current file is part of a git repository.
"""
return GIT_DIR is not None
def get_git_origin_url(): def get_git_origin_url():
""" """
Retrieves the origin URL of a git repository. Retrieves the origin URL of a git repository.
@ -581,7 +578,7 @@ def get_git_origin_url():
Returns: Returns:
(str | None): The origin URL of the git repository or None if not git directory. (str | None): The origin URL of the git repository or None if not git directory.
""" """
if is_git_dir(): if IS_GIT_DIR:
with contextlib.suppress(subprocess.CalledProcessError): with contextlib.suppress(subprocess.CalledProcessError):
origin = subprocess.check_output(["git", "config", "--get", "remote.origin.url"]) origin = subprocess.check_output(["git", "config", "--get", "remote.origin.url"])
return origin.decode().strip() return origin.decode().strip()
@ -594,7 +591,7 @@ def get_git_branch():
Returns: Returns:
(str | None): The current git branch name or None if not a git directory. (str | None): The current git branch name or None if not a git directory.
""" """
if is_git_dir(): if IS_GIT_DIR:
with contextlib.suppress(subprocess.CalledProcessError): with contextlib.suppress(subprocess.CalledProcessError):
origin = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]) origin = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
return origin.decode().strip() return origin.decode().strip()
@ -660,6 +657,16 @@ def get_user_config_dir(sub_dir="Ultralytics"):
return path return path
# Define constants (required below)
ONLINE = is_online()
IS_COLAB = is_colab()
IS_DOCKER = is_docker()
IS_JUPYTER = is_jupyter()
IS_KAGGLE = is_kaggle()
IS_PIP_PACKAGE = is_pip_package()
IS_RASPBERRYPI = is_raspberrypi()
GIT_DIR = get_git_dir()
IS_GIT_DIR = is_git_dir()
USER_CONFIG_DIR = Path(os.getenv("YOLO_CONFIG_DIR") or get_user_config_dir()) # Ultralytics settings dir USER_CONFIG_DIR = Path(os.getenv("YOLO_CONFIG_DIR") or get_user_config_dir()) # Ultralytics settings dir
SETTINGS_YAML = USER_CONFIG_DIR / "settings.yaml" SETTINGS_YAML = USER_CONFIG_DIR / "settings.yaml"
@ -886,7 +893,7 @@ def set_sentry():
event["tags"] = { event["tags"] = {
"sys_argv": ARGV[0], "sys_argv": ARGV[0],
"sys_argv_name": Path(ARGV[0]).name, "sys_argv_name": Path(ARGV[0]).name,
"install": "git" if is_git_dir() else "pip" if is_pip_package() else "other", "install": "git" if IS_GIT_DIR else "pip" if IS_PIP_PACKAGE else "other",
"os": ENVIRONMENT, "os": ENVIRONMENT,
} }
return event return event
@ -897,8 +904,8 @@ def set_sentry():
and Path(ARGV[0]).name == "yolo" and Path(ARGV[0]).name == "yolo"
and not TESTS_RUNNING and not TESTS_RUNNING
and ONLINE and ONLINE
and is_pip_package() and IS_PIP_PACKAGE
and not is_git_dir() and not IS_GIT_DIR
): ):
# If sentry_sdk package is not installed then return and do not use Sentry # If sentry_sdk package is not installed then return and do not use Sentry
try: try:
@ -937,9 +944,8 @@ class SettingsManager(dict):
from ultralytics.utils.checks import check_version from ultralytics.utils.checks import check_version
from ultralytics.utils.torch_utils import torch_distributed_zero_first from ultralytics.utils.torch_utils import torch_distributed_zero_first
git_dir = get_git_dir() root = GIT_DIR or Path()
root = git_dir or Path() datasets_root = (root.parent if GIT_DIR and is_dir_writeable(root.parent) else root).resolve()
datasets_root = (root.parent if git_dir and is_dir_writeable(root.parent) else root).resolve()
self.file = Path(file) self.file = Path(file)
self.version = version self.version = version
@ -1043,13 +1049,13 @@ WEIGHTS_DIR = Path(SETTINGS["weights_dir"]) # global weights directory
RUNS_DIR = Path(SETTINGS["runs_dir"]) # global runs directory RUNS_DIR = Path(SETTINGS["runs_dir"]) # global runs directory
ENVIRONMENT = ( ENVIRONMENT = (
"Colab" "Colab"
if is_colab() if IS_COLAB
else "Kaggle" else "Kaggle"
if is_kaggle() if IS_KAGGLE
else "Jupyter" else "Jupyter"
if is_jupyter() if IS_JUPYTER
else "Docker" else "Docker"
if is_docker() if IS_DOCKER
else platform.system() else platform.system()
) )
TESTS_RUNNING = is_pytest_running() or is_github_action_running() TESTS_RUNNING = is_pytest_running() or is_github_action_running()

@ -12,10 +12,7 @@ def on_pretrain_routine_end(trainer):
session = getattr(trainer, "hub_session", None) session = getattr(trainer, "hub_session", None)
if session: if session:
# Start timer for upload rate limit # Start timer for upload rate limit
session.timers = { session.timers = {"metrics": time(), "ckpt": time()} # start timer on session.rate_limit
"metrics": time(),
"ckpt": time(),
} # start timer on session.rate_limit
def on_fit_epoch_end(trainer): def on_fit_epoch_end(trainer):

@ -58,7 +58,7 @@ def on_pretrain_routine_end(trainer):
MLFLOW_TRACKING_URI: The URI for MLflow tracking. If not set, defaults to 'runs/mlflow'. MLFLOW_TRACKING_URI: The URI for MLflow tracking. If not set, defaults to 'runs/mlflow'.
MLFLOW_EXPERIMENT_NAME: The name of the MLflow experiment. If not set, defaults to trainer.args.project. MLFLOW_EXPERIMENT_NAME: The name of the MLflow experiment. If not set, defaults to trainer.args.project.
MLFLOW_RUN: The name of the MLflow run. If not set, defaults to trainer.args.name. MLFLOW_RUN: The name of the MLflow run. If not set, defaults to trainer.args.name.
MLFLOW_KEEP_RUN_ACTIVE: Boolean indicating whether to keep the MLflow run active after the end of the training phase. MLFLOW_KEEP_RUN_ACTIVE: Boolean indicating whether to keep the MLflow run active after the end of training.
""" """
global mlflow global mlflow

@ -15,6 +15,7 @@ try:
# Imports below only required if TensorBoard enabled # Imports below only required if TensorBoard enabled
import warnings import warnings
from copy import deepcopy from copy import deepcopy
from ultralytics.utils.torch_utils import de_parallel, torch from ultralytics.utils.torch_utils import de_parallel, torch
except (ImportError, AssertionError, TypeError, AttributeError): except (ImportError, AssertionError, TypeError, AttributeError):

@ -22,10 +22,15 @@ import torch
from ultralytics.utils import ( from ultralytics.utils import (
ASSETS, ASSETS,
AUTOINSTALL, AUTOINSTALL,
IS_COLAB,
IS_DOCKER,
IS_JUPYTER,
IS_KAGGLE,
IS_PIP_PACKAGE,
LINUX, LINUX,
LOGGER, LOGGER,
PYTHON_VERSION,
ONLINE, ONLINE,
PYTHON_VERSION,
ROOT, ROOT,
TORCHVISION_VERSION, TORCHVISION_VERSION,
USER_CONFIG_DIR, USER_CONFIG_DIR,
@ -37,12 +42,7 @@ from ultralytics.utils import (
colorstr, colorstr,
downloads, downloads,
emojis, emojis,
is_colab,
is_docker,
is_github_action_running, is_github_action_running,
is_jupyter,
is_kaggle,
is_pip_package,
url2file, url2file,
) )
@ -277,7 +277,7 @@ def check_pip_update_available():
Returns: Returns:
(bool): True if an update is available, False otherwise. (bool): True if an update is available, False otherwise.
""" """
if ONLINE and is_pip_package(): if ONLINE and IS_PIP_PACKAGE:
with contextlib.suppress(Exception): with contextlib.suppress(Exception):
from ultralytics import __version__ from ultralytics import __version__
@ -528,7 +528,7 @@ def check_imshow(warn=False):
"""Check if environment supports image displays.""" """Check if environment supports image displays."""
try: try:
if LINUX: if LINUX:
assert "DISPLAY" in os.environ and not is_docker() and not is_colab() and not is_kaggle() assert "DISPLAY" in os.environ and not IS_DOCKER and not IS_COLAB and not IS_KAGGLE
cv2.imshow("test", np.zeros((8, 8, 3), dtype=np.uint8)) # show a small 8-pixel image cv2.imshow("test", np.zeros((8, 8, 3), dtype=np.uint8)) # show a small 8-pixel image
cv2.waitKey(1) cv2.waitKey(1)
cv2.destroyAllWindows() cv2.destroyAllWindows()
@ -546,10 +546,10 @@ def check_yolo(verbose=True, device=""):
from ultralytics.utils.torch_utils import select_device from ultralytics.utils.torch_utils import select_device
if is_jupyter(): if IS_JUPYTER:
if check_requirements("wandb", install=False): if check_requirements("wandb", install=False):
os.system("pip uninstall -y wandb") # uninstall wandb: unwanted account creation prompt with infinite hang os.system("pip uninstall -y wandb") # uninstall wandb: unwanted account creation prompt with infinite hang
if is_colab(): if IS_COLAB:
shutil.rmtree("sample_data", ignore_errors=True) # remove colab /sample_data directory shutil.rmtree("sample_data", ignore_errors=True) # remove colab /sample_data directory
if verbose: if verbose:
@ -574,7 +574,7 @@ def collect_system_info():
import psutil import psutil
from ultralytics.utils import ENVIRONMENT, is_git_dir from ultralytics.utils import ENVIRONMENT, IS_GIT_DIR
from ultralytics.utils.torch_utils import get_cpu_info from ultralytics.utils.torch_utils import get_cpu_info
ram_info = psutil.virtual_memory().total / (1024**3) # Convert bytes to GB ram_info = psutil.virtual_memory().total / (1024**3) # Convert bytes to GB
@ -583,7 +583,7 @@ def collect_system_info():
f"\n{'OS':<20}{platform.platform()}\n" f"\n{'OS':<20}{platform.platform()}\n"
f"{'Environment':<20}{ENVIRONMENT}\n" f"{'Environment':<20}{ENVIRONMENT}\n"
f"{'Python':<20}{PYTHON_VERSION}\n" f"{'Python':<20}{PYTHON_VERSION}\n"
f"{'Install':<20}{'git' if is_git_dir() else 'pip' if is_pip_package() else 'other'}\n" f"{'Install':<20}{'git' if IS_GIT_DIR else 'pip' if IS_PIP_PACKAGE else 'other'}\n"
f"{'RAM':<20}{ram_info:.2f} GB\n" f"{'RAM':<20}{ram_info:.2f} GB\n"
f"{'CPU':<20}{get_cpu_info()}\n" f"{'CPU':<20}{get_cpu_info()}\n"
f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n" f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n"

@ -21,8 +21,8 @@ from ultralytics.utils import (
LOGGER, LOGGER,
PYTHON_VERSION, PYTHON_VERSION,
TORCHVISION_VERSION, TORCHVISION_VERSION,
colorstr,
__version__, __version__,
colorstr,
) )
from ultralytics.utils.checks import check_version from ultralytics.utils.checks import check_version

Loading…
Cancel
Save