Prevent final val if val=False (#16776)

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
pull/16772/head^2
Glenn Jocher 2 months ago committed by GitHub
parent 28eb65694c
commit 84fd8f6e49
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      ultralytics/models/yolo/detect/val.py
  2. 3
      ultralytics/utils/autobatch.py

@ -75,7 +75,7 @@ class DetectionValidator(BaseValidator):
) # is COCO ) # is COCO
self.is_lvis = isinstance(val, str) and "lvis" in val and not self.is_coco # is LVIS self.is_lvis = isinstance(val, str) and "lvis" in val and not self.is_coco # is LVIS
self.class_map = converter.coco80_to_coco91_class() if self.is_coco else list(range(len(model.names))) self.class_map = converter.coco80_to_coco91_class() if self.is_coco else list(range(len(model.names)))
self.args.save_json |= (self.is_coco or self.is_lvis) and not self.training # run on final val if training COCO self.args.save_json |= self.args.val and (self.is_coco or self.is_lvis) and not self.training # run final val
self.names = model.names self.names = model.names
self.nc = len(model.names) self.nc = len(model.names)
self.metrics.names = self.names self.metrics.names = self.names

@ -1,6 +1,7 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
"""Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch.""" """Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch."""
import os
from copy import deepcopy from copy import deepcopy
import numpy as np import numpy as np
@ -57,7 +58,7 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
# Inspect CUDA memory # Inspect CUDA memory
gb = 1 << 30 # bytes to GiB (1024 ** 3) gb = 1 << 30 # bytes to GiB (1024 ** 3)
d = str(device).upper() # 'CUDA:0' d = f"CUDA:{os.getenv('CUDA_VISIBLE_DEVICES', '0').strip()[0]}" # 'CUDA:0'
properties = torch.cuda.get_device_properties(device) # device properties properties = torch.cuda.get_device_properties(device) # device properties
t = properties.total_memory / gb # GiB total t = properties.total_memory / gb # GiB total
r = torch.cuda.memory_reserved(device) / gb # GiB reserved r = torch.cuda.memory_reserved(device) / gb # GiB reserved

Loading…
Cancel
Save