`ultralytics 8.2.94` Apple MPS train memory display (#16272)

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
pull/3531/head v8.2.94
Quet Almahdi Morris 2 months ago committed by GitHub
parent 87296e9e75
commit fa6362a6f5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 6
      .github/workflows/format.yml
  2. 14
      .github/workflows/publish.yml
  3. 2
      ultralytics/__init__.py
  4. 42
      ultralytics/engine/trainer.py

@ -38,12 +38,12 @@ jobs:
Join the Ultralytics community where it suits you best. For real-time chat, head to [Discord](https://ultralytics.com/discord) 🎧. Prefer in-depth discussions? Check out [Discourse](https://community.ultralytics.com). Or dive into threads on our [Subreddit](https://reddit.com/r/ultralytics) to share knowledge with the community.
## Install
## Upgrade
Pip install the `ultralytics` package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) in a [**Python>=3.8**](https://www.python.org/) environment with [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/).
Upgrade to the latest `ultralytics` package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) in a [**Python>=3.8**](https://www.python.org/) environment with [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) to verify your issue is not already resolved in the latest version:
```bash
pip install ultralytics
pip install -U ultralytics
```
## Environments

@ -85,6 +85,12 @@ jobs:
if publish:
print('Ready to publish new version to PyPI ✅.')
id: check_pypi
- name: Publish to PyPI
continue-on-error: true
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
run: |
python -m build
python -m twine upload dist/* -u __token__ -p ${{ secrets.PYPI_TOKEN }}
- name: Publish new tag
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
run: |
@ -100,14 +106,6 @@ jobs:
run: |
curl -s "https://raw.githubusercontent.com/ultralytics/actions/main/utils/summarize_release.py" | python -
shell: bash
- name: Publish to PyPI
continue-on-error: true
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
env:
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
run: |
python -m build
python -m twine upload dist/* -u __token__ -p $PYPI_TOKEN
- name: Extract PR Details
env:
GH_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.2.93"
__version__ = "8.2.94"
import os

@ -28,7 +28,6 @@ from ultralytics.utils import (
DEFAULT_CFG,
LOCAL_RANK,
LOGGER,
MACOS,
RANK,
TQDM,
__version__,
@ -409,13 +408,17 @@ class BaseTrainer:
break
# Log
mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
loss_len = self.tloss.shape[0] if len(self.tloss.shape) else 1
losses = self.tloss if loss_len > 1 else torch.unsqueeze(self.tloss, 0)
if RANK in {-1, 0}:
loss_length = self.tloss.shape[0] if len(self.tloss.shape) else 1
pbar.set_description(
("%11s" * 2 + "%11.4g" * (2 + loss_len))
% (f"{epoch + 1}/{self.epochs}", mem, *losses, batch["cls"].shape[0], batch["img"].shape[-1])
("%11s" * 2 + "%11.4g" * (2 + loss_length))
% (
f"{epoch + 1}/{self.epochs}",
f"{self._get_memory():.3g}G", # (GB) GPU memory util
*(self.tloss if loss_length > 1 else torch.unsqueeze(self.tloss, 0)), # losses
batch["cls"].shape[0], # batch size, i.e. 8
batch["img"].shape[-1], # imgsz, i.e 640
)
)
self.run_callbacks("on_batch_end")
if self.args.plots and ni in self.plot_idx:
@ -453,11 +456,7 @@ class BaseTrainer:
self.scheduler.last_epoch = self.epoch # do not move
self.stop |= epoch >= self.epochs # stop if exceeded epochs
self.run_callbacks("on_fit_epoch_end")
gc.collect()
if MACOS and self.device.type == "mps":
torch.mps.empty_cache() # clear unified memory at end of epoch, may help MPS' management of 'unlimited' virtual memoy
else:
torch.cuda.empty_cache() # clear GPU memory at end of epoch, may help reduce CUDA out of memory errors
self._clear_memory()
# Early Stopping
if RANK != -1: # if DDP training
@ -478,14 +477,29 @@ class BaseTrainer:
if self.args.plots:
self.plot_metrics()
self.run_callbacks("on_train_end")
self._clear_memory()
self.run_callbacks("teardown")
def _get_memory(self):
"""Get accelerator memory utilization in GB."""
if self.device.type == "mps":
memory = torch.mps.driver_allocated_memory()
elif self.device.type == "cpu":
memory = 0
else:
memory = torch.cuda.memory_reserved()
return memory / 1e9
def _clear_memory(self):
"""Clear accelerator memory on different platforms."""
gc.collect()
if MACOS and self.device.type == "mps":
if self.device.type == "mps":
torch.mps.empty_cache()
elif self.device.type == "cpu":
return
else:
torch.cuda.empty_cache()
self.run_callbacks("teardown")
def read_results_csv(self):
"""Read results.csv into a dict using pandas."""
import pandas as pd # scope for faster 'import ultralytics'

Loading…
Cancel
Save