diff --git a/ultralytics/engine/trainer.py b/ultralytics/engine/trainer.py index 4415ba94eb..7184082240 100644 --- a/ultralytics/engine/trainer.py +++ b/ultralytics/engine/trainer.py @@ -41,7 +41,6 @@ from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_m from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command from ultralytics.utils.files import get_latest_run from ultralytics.utils.torch_utils import ( - TORCH_1_13, EarlyStopping, ModelEMA, autocast, @@ -266,11 +265,7 @@ class BaseTrainer: if RANK > -1 and world_size > 1: # DDP dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None) self.amp = bool(self.amp) # as boolean - self.scaler = ( - torch.amp.GradScaler("cuda", enabled=self.amp) - if TORCH_1_13 - else torch.cuda.amp.GradScaler(enabled=self.amp) - ) + self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp) if world_size > 1: self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)