`ultralytics 8.1.15` add Python 3.12 compatibility (#8210)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: Johnny <johnnynuca14@gmail.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
pull/8234/head v8.1.15
Glenn Jocher 12 months ago committed by GitHub
parent e38c7a3022
commit fbed8499da
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 5
      .github/workflows/ci.yaml
  2. 12
      pyproject.toml
  3. 8
      tests/test_cli.py
  4. 12
      tests/test_integrations.py
  5. 2
      tests/test_python.py
  6. 2
      ultralytics/__init__.py
  7. 6
      ultralytics/engine/exporter.py
  8. 2
      ultralytics/engine/trainer.py
  9. 11
      ultralytics/utils/benchmarks.py
  10. 9
      ultralytics/utils/callbacks/wb.py
  11. 11
      ultralytics/utils/checks.py
  12. 2
      ultralytics/utils/ops.py
  13. 5
      ultralytics/utils/torch_utils.py

@ -179,7 +179,7 @@ jobs:
if [ "${{ matrix.torch }}" == "1.8.0" ]; then
torch="torch==1.8.0 torchvision==0.9.0"
fi
pip install -e . $torch pytest-cov "coremltools>=7.0" --extra-index-url https://download.pytorch.org/whl/cpu
pip install -e . $torch pytest-cov "coremltools>=7.0; platform_system != 'Windows' and python_version <= '3.11'" --extra-index-url https://download.pytorch.org/whl/cpu
- name: Check environment
run: |
yolo checks
@ -249,7 +249,8 @@ jobs:
conda install -c pytorch -c conda-forge pytorch torchvision ultralytics openvino
- name: Install pip packages
run: |
pip install pytest 'coremltools>=7.0'
# CoreML must be installed before export due to protobuf error from AutoInstall
pip install pytest "coremltools>=7.0; platform_system != 'Windows' and python_version <= '3.11'"
- name: Check environment
run: |
conda list

@ -52,6 +52,7 @@ classifiers = [
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
@ -64,7 +65,6 @@ classifiers = [
# Required dependencies ------------------------------------------------------------------------------------------------
dependencies = [
"matplotlib>=3.3.0",
"numpy>=1.22.2",
"opencv-python>=4.6.0",
"pillow>=7.1.2",
"pyyaml>=5.3.1",
@ -97,10 +97,10 @@ dev = [
]
export = [
"onnx>=1.12.0", # ONNX export
"coremltools>=7.0; platform_system != 'Windows'", # CoreML only supported on macOS and Linux
"openvino-dev>=2023.0", # OpenVINO export
"tensorflow<=2.13.1", # TF bug https://github.com/ultralytics/ultralytics/issues/5161
"tensorflowjs>=3.9.0", # TF.js export, automatically installs tensorflow
"coremltools>=7.0; platform_system != 'Windows' and python_version <= '3.11'", # CoreML supported on macOS and Linux
"openvino-dev>=2023.0; python_version <= '3.11'", # OpenVINO export
"tensorflow<=2.13.1; python_version <= '3.11'", # TF bug https://github.com/ultralytics/ultralytics/issues/5161
"tensorflowjs>=3.9.0; python_version <= '3.11'", # TF.js export, automatically installs tensorflow
]
explorer = [
"lancedb", # vector search
@ -120,7 +120,7 @@ extra = [
"hub-sdk>=0.0.2", # Ultralytics HUB
"ipython", # interactive notebook
"albumentations>=1.0.3", # training augmentations
"pycocotools>=2.0.6", # COCO mAP
"pycocotools>=2.0.7", # COCO mAP
]
[project.urls]

@ -4,11 +4,10 @@ import subprocess
import pytest
from ultralytics.utils import ASSETS, WEIGHTS_DIR
from ultralytics.utils.checks import cuda_device_count, cuda_is_available
from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
CUDA_IS_AVAILABLE = cuda_is_available()
CUDA_DEVICE_COUNT = cuda_device_count()
CUDA_IS_AVAILABLE = checks.cuda_is_available()
CUDA_DEVICE_COUNT = checks.cuda_device_count()
TASK_ARGS = [
("detect", "yolov8n", "coco8.yaml"),
("segment", "yolov8n-seg", "coco8-seg.yaml"),
@ -70,6 +69,7 @@ def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=640 save save_crop save_txt")
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM Clip is not supported in Python 3.12")
def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8-seg.yaml"):
"""Test FastSAM segmentation functionality within Ultralytics."""
source = ASSETS / "bus.jpg"

@ -41,16 +41,16 @@ def test_triton():
# Create variables
model_name = "yolo"
triton_repo_path = TMP / "triton_repo"
triton_model_path = triton_repo_path / model_name
triton_repo = TMP / "triton_repo" # Triton repo path
triton_model = triton_repo / model_name # Triton model path
# Export model to ONNX
f = YOLO(MODEL).export(format="onnx", dynamic=True)
# Prepare Triton repo
(triton_model_path / "1").mkdir(parents=True, exist_ok=True)
Path(f).rename(triton_model_path / "1" / "model.onnx")
(triton_model_path / "config.pbtxt").touch()
(triton_model / "1").mkdir(parents=True, exist_ok=True)
Path(f).rename(triton_model / "1" / "model.onnx")
(triton_model / "config.pbtxt").touch()
# Define image https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver
tag = "nvcr.io/nvidia/tritonserver:23.09-py3" # 6.4 GB
@ -61,7 +61,7 @@ def test_triton():
# Run the Triton server and capture the container ID
container_id = (
subprocess.check_output(
f"docker run -d --rm -v {triton_repo_path}:/models -p 8000:8000 {tag} tritonserver --model-repository=/models",
f"docker run -d --rm -v {triton_repo}:/models -p 8000:8000 {tag} tritonserver --model-repository=/models",
shell=True,
)
.decode("utf-8")

@ -218,12 +218,14 @@ def test_export_onnx():
YOLO(f)(SOURCE) # exported model inference
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="OpenVINO not supported in Python 3.12")
def test_export_openvino():
"""Test exporting the YOLO model to OpenVINO format."""
f = YOLO(MODEL).export(format="openvino")
YOLO(f)(SOURCE) # exported model inference
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="CoreML not supported in Python 3.12")
def test_export_coreml():
"""Test exporting the YOLO model to CoreML format."""
if not WINDOWS: # RuntimeError: BlobWriter not loaded with coremltools 7.0 on windows

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.1.14"
__version__ = "8.1.15"
from ultralytics.data.explorer.explorer import Explorer
from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld

@ -81,7 +81,7 @@ from ultralytics.utils import (
get_default_args,
yaml_save,
)
from ultralytics.utils.checks import check_imgsz, check_is_path_safe, check_requirements, check_version
from ultralytics.utils.checks import PYTHON_VERSION, check_imgsz, check_is_path_safe, check_requirements, check_version
from ultralytics.utils.downloads import attempt_download_asset, get_github_assets
from ultralytics.utils.files import file_size, spaces_in_path
from ultralytics.utils.ops import Profile
@ -609,10 +609,8 @@ class Exporter:
ct_model = cto.palettize_weights(ct_model, config=config)
if self.args.nms and self.model.task == "detect":
if mlmodel:
import platform
# coremltools<=6.2 NMS export requires Python<3.11
check_version(platform.python_version(), "<3.11", name="Python ", hard=True)
check_version(PYTHON_VERSION, "<3.11", name="Python ", hard=True)
weights_dir = None
else:
ct_model.save(str(f)) # save otherwise weights_dir does not exist

@ -285,7 +285,7 @@ class BaseTrainer:
batch_size = self.batch_size // max(world_size, 1)
self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=RANK, mode="train")
if RANK in (-1, 0):
# NOTE: When training DOTA dataset, double batch size could get OOM cause some images got more than 2000 objects.
# Note: When training DOTA dataset, double batch size could get OOM on images with >2000 objects.
self.test_loader = self.get_dataloader(
self.testset, batch_size=batch_size if self.args.task == "obb" else batch_size * 2, rank=-1, mode="val"
)

@ -36,7 +36,7 @@ from ultralytics import YOLO
from ultralytics.cfg import TASK2DATA, TASK2METRIC
from ultralytics.engine.exporter import export_formats
from ultralytics.utils import ASSETS, LINUX, LOGGER, MACOS, TQDM, WEIGHTS_DIR
from ultralytics.utils.checks import check_requirements, check_yolo
from ultralytics.utils.checks import IS_PYTHON_3_12, check_requirements, check_yolo
from ultralytics.utils.files import file_size
from ultralytics.utils.torch_utils import select_device
@ -90,6 +90,8 @@ def benchmark(
assert model.task != "obb", "TensorFlow GraphDef not supported for OBB task"
elif i in {5, 10}: # CoreML and TF.js
assert MACOS or LINUX, "export only supported on macOS and Linux"
if i in {3, 5}: # CoreML and OpenVINO
assert not IS_PYTHON_3_12, "CoreML and OpenVINO not supported on Python 3.12"
if "cpu" in device.type:
assert cpu, "inference not supported on CPU"
if "cuda" in device.type:
@ -147,8 +149,7 @@ class ProfileModels:
"""
ProfileModels class for profiling different models on ONNX and TensorRT.
This class profiles the performance of different models, provided their paths. The profiling includes parameters such as
model speed and FLOPs.
This class profiles the performance of different models, returning results such as model speed and FLOPs.
Attributes:
paths (list): Paths of the models to profile.
@ -188,9 +189,9 @@ class ProfileModels:
num_warmup_runs (int, optional): Number of warmup runs before the actual profiling starts. Default is 10.
min_time (float, optional): Minimum time in seconds for profiling a model. Default is 60.
imgsz (int, optional): Size of the image used during profiling. Default is 640.
half (bool, optional): Flag to indicate whether to use half-precision floating point for profiling. Default is True.
half (bool, optional): Flag to indicate whether to use half-precision floating point for profiling.
trt (bool, optional): Flag to indicate whether to profile using TensorRT. Default is True.
device (torch.device, optional): Device used for profiling. If None, it is determined automatically. Default is None.
device (torch.device, optional): Device used for profiling. If None, it is determined automatically.
"""
self.paths = paths
self.num_timed_runs = num_timed_runs

@ -23,8 +23,9 @@ def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall
"""
Create and log a custom metric visualization to wandb.plot.pr_curve.
This function crafts a custom metric visualization that mimics the behavior of wandb's default precision-recall curve
while allowing for enhanced customization. The visual metric is useful for monitoring model performance across different classes.
This function crafts a custom metric visualization that mimics the behavior of wandb's default precision-recall
curve while allowing for enhanced customization. The visual metric is useful for monitoring model performance across
different classes.
Args:
x (List): Values for the x-axis; expected to have length N.
@ -64,8 +65,8 @@ def _plot_curve(
Args:
x (np.ndarray): Data points for the x-axis with length N.
y (np.ndarray): Corresponding data points for the y-axis with shape CxN, where C represents the number of classes.
names (list, optional): Names of the classes corresponding to the y-axis data; length C. Defaults to an empty list.
y (np.ndarray): Corresponding data points for the y-axis with shape CxN, where C is the number of classes.
names (list, optional): Names of the classes corresponding to the y-axis data; length C. Defaults to [].
id (str, optional): Unique identifier for the logged data in wandb. Defaults to 'precision-recall'.
title (str, optional): Title for the visualization plot. Defaults to 'Precision Recall Curve'.
x_title (str, optional): Label for the x-axis. Defaults to 'Recall'.

@ -9,7 +9,6 @@ import platform
import re
import shutil
import subprocess
import sys
import time
from importlib import metadata
from pathlib import Path
@ -46,6 +45,8 @@ from ultralytics.utils import (
url2file,
)
PYTHON_VERSION = platform.python_version()
def parse_requirements(file_path=ROOT.parent / "requirements.txt", package=""):
"""
@ -329,7 +330,7 @@ def check_python(minimum: str = "3.8.0") -> bool:
Returns:
(bool): Whether the installed Python version meets the minimum constraints.
"""
return check_version(platform.python_version(), minimum, name="Python ", hard=True)
return check_version(PYTHON_VERSION, minimum, name="Python ", hard=True)
@TryExcept()
@ -580,7 +581,7 @@ def collect_system_info():
LOGGER.info(
f"\n{'OS':<20}{platform.platform()}\n"
f"{'Environment':<20}{ENVIRONMENT}\n"
f"{'Python':<20}{sys.version.split()[0]}\n"
f"{'Python':<20}{PYTHON_VERSION}\n"
f"{'Install':<20}{'git' if is_git_dir() else 'pip' if is_pip_package() else 'other'}\n"
f"{'RAM':<20}{ram_info:.2f} GB\n"
f"{'CPU':<20}{get_cpu_info()}\n"
@ -722,3 +723,7 @@ def cuda_is_available() -> bool:
(bool): True if one or more NVIDIA GPUs are available, False otherwise.
"""
return cuda_device_count() > 0
# Define constants
IS_PYTHON_3_12 = check_version(PYTHON_VERSION, "==3.12", name="Python ", hard=False)

@ -546,7 +546,7 @@ def xywhr2xyxyxyxy(rboxes):
be in degrees from 0 to 90.
Args:
rboxes (numpy.ndarray | torch.Tensor): Input data in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5).
rboxes (numpy.ndarray | torch.Tensor): Boxes in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5).
Returns:
(numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2).

@ -2,7 +2,6 @@
import math
import os
import platform
import random
import time
from contextlib import contextmanager
@ -18,7 +17,7 @@ import torch.nn.functional as F
import torchvision
from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, __version__
from ultralytics.utils.checks import check_version
from ultralytics.utils.checks import PYTHON_VERSION, check_version
try:
import thop
@ -103,7 +102,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
if isinstance(device, torch.device):
return device
s = f"Ultralytics YOLOv{__version__} 🚀 Python-{platform.python_version()} torch-{torch.__version__} "
s = f"Ultralytics YOLOv{__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} "
device = str(device).lower()
for remove in "cuda:", "none", "(", ")", "[", "]", "'", " ":
device = device.replace(remove, "") # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1'

Loading…
Cancel
Save