[Fix] Fix no attribute 'samples_per_gpu' bug in `auto_scale_lr` (#7862)

* Fix

* update docstring
pull/7864/head
jbwang1997 3 years ago committed by GitHub
parent 1376e77e6e
commit 41ba2f4535
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 8
      mmdet/apis/train.py
  2. 2
      tools/train.py

@ -96,10 +96,10 @@ def auto_scale_lr(cfg, distributed, logger):
num_gpus = len(cfg.gpu_ids)
# calculate the batch size
batch_size = num_gpus * cfg.data.samples_per_gpu
logger.info(f'You are using {num_gpus} GPU(s) '
f'and {cfg.data.samples_per_gpu} samples per GPU. '
f'Total batch size is {batch_size}.')
samples_per_gpu = cfg.data.train_dataloader.samples_per_gpu
batch_size = num_gpus * samples_per_gpu
logger.info(f'Training with {num_gpus} GPU(s) with {samples_per_gpu} '
f'samples per GPU. The total batch size is {batch_size}.')
if batch_size != base_batch_size:
# scale LR with

@ -125,7 +125,7 @@ def main():
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file. Please update all the '
'configuration files to mmdet >= 2.23.1.')
'configuration files to mmdet >= 2.24.0.')
# set multi-process settings
setup_multi_processes(cfg)

Loading…
Cancel
Save