Merge branch 'main' into action-recog

action-recog
fatih c. akyon 4 months ago committed by GitHub
commit e559bc0119
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 16
      docs/en/reference/nn/modules/activation.md
  2. 1
      mkdocs.yml
  3. 6
      ultralytics/data/dataset.py
  4. 7
      ultralytics/engine/trainer.py
  5. 22
      ultralytics/nn/modules/activation.py

@ -0,0 +1,16 @@
---
description: Explore activation functions in Ultralytics, including the Unified activation function and other custom implementations for neural networks.
keywords: ultralytics, activation functions, neural networks, Unified activation, AGLU, SiLU, ReLU, PyTorch, deep learning, custom activations
---
# Reference for `ultralytics/nn/modules/activation.py`
!!! Note
This file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/activation.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/nn/modules/activation.py). If you spot a problem please help fix it by [contributing](https://docs.ultralytics.com/help/contributing/) a [Pull Request](https://github.com/ultralytics/ultralytics/edit/main/ultralytics/nn/modules/activation.py) 🛠. Thank you 🙏!
<br>
## ::: ultralytics.nn.modules.activation.AGLU
<br><br>

@ -537,6 +537,7 @@ nav:
- nn:
- autobackend: reference/nn/autobackend.md
- modules:
- activation: reference/nn/modules/activation.md
- block: reference/nn/modules/block.md
- conv: reference/nn/modules/conv.md
- head: reference/nn/modules/head.md

@ -431,6 +431,12 @@ class ClassificationDataset:
self.samples = self.samples[: round(len(self.samples) * args.fraction)]
self.prefix = colorstr(f"{prefix}: ") if prefix else ""
self.cache_ram = args.cache is True or str(args.cache).lower() == "ram" # cache images into RAM
if self.cache_ram:
LOGGER.warning(
"WARNING ⚠ Classification `cache_ram` training has known memory leak in "
"https://github.com/ultralytics/ultralytics/issues/9824, setting `cache_ram=False`."
)
self.cache_ram = False
self.cache_disk = str(args.cache).lower() == "disk" # cache images on hard drive as uncompressed *.npy files
self.samples = self.verify_images() # filter out bad images
self.samples = [list(x) + [Path(x[0]).with_suffix(".npy"), None] for x in self.samples] # file, index, npy, im

@ -41,7 +41,6 @@ from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_m
from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
from ultralytics.utils.files import get_latest_run
from ultralytics.utils.torch_utils import (
TORCH_1_13,
EarlyStopping,
ModelEMA,
autocast,
@ -266,11 +265,7 @@ class BaseTrainer:
if RANK > -1 and world_size > 1: # DDP
dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None)
self.amp = bool(self.amp) # as boolean
self.scaler = (
torch.amp.GradScaler("cuda", enabled=self.amp)
if TORCH_1_13
else torch.cuda.amp.GradScaler(enabled=self.amp)
)
self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp)
if world_size > 1:
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)

@ -0,0 +1,22 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
"""Activation modules."""
import torch
import torch.nn as nn
class AGLU(nn.Module):
"""Unified activation function module from https://github.com/kostas1515/AGLU."""
def __init__(self, device=None, dtype=None) -> None:
"""Initialize the Unified activation function."""
super().__init__()
self.act = nn.Softplus(beta=-1.0)
self.lambd = nn.Parameter(nn.init.uniform_(torch.empty(1, device=device, dtype=dtype))) # lambda parameter
self.kappa = nn.Parameter(nn.init.uniform_(torch.empty(1, device=device, dtype=dtype))) # kappa parameter
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Compute the forward pass of the Unified activation function."""
lam = torch.clamp(self.lambd, min=0.0001)
y = torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam)))
return y # for AGLU simply return y * input
Loading…
Cancel
Save