|
|
@ -1,6 +1,7 @@ |
|
|
|
# Ultralytics YOLO 🚀, AGPL-3.0 license |
|
|
|
# Ultralytics YOLO 🚀, AGPL-3.0 license |
|
|
|
"""Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch.""" |
|
|
|
"""Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
|
from copy import deepcopy |
|
|
|
from copy import deepcopy |
|
|
|
|
|
|
|
|
|
|
|
import numpy as np |
|
|
|
import numpy as np |
|
|
@ -57,7 +58,7 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch): |
|
|
|
|
|
|
|
|
|
|
|
# Inspect CUDA memory |
|
|
|
# Inspect CUDA memory |
|
|
|
gb = 1 << 30 # bytes to GiB (1024 ** 3) |
|
|
|
gb = 1 << 30 # bytes to GiB (1024 ** 3) |
|
|
|
d = str(device).upper() # 'CUDA:0' |
|
|
|
d = f"CUDA:{os.getenv('CUDA_VISIBLE_DEVICES', '0').strip()[0]}" # 'CUDA:0' |
|
|
|
properties = torch.cuda.get_device_properties(device) # device properties |
|
|
|
properties = torch.cuda.get_device_properties(device) # device properties |
|
|
|
t = properties.total_memory / gb # GiB total |
|
|
|
t = properties.total_memory / gb # GiB total |
|
|
|
r = torch.cuda.memory_reserved(device) / gb # GiB reserved |
|
|
|
r = torch.cuda.memory_reserved(device) / gb # GiB reserved |
|
|
|