schedule_1x.py 1.1 KB

123456789101112131415161718192021222324252627282930313233
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
  3. from mmengine.optim.scheduler.lr_scheduler import LinearLR, MultiStepLR
  4. from mmengine.runner.loops import EpochBasedTrainLoop, TestLoop, ValLoop
  5. from torch.optim.sgd import SGD
  6. # training schedule for 1x
  7. train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=12, val_interval=1)
  8. val_cfg = dict(type=ValLoop)
  9. test_cfg = dict(type=TestLoop)
  10. # learning rate
  11. param_scheduler = [
  12. dict(type=LinearLR, start_factor=0.001, by_epoch=False, begin=0, end=500),
  13. dict(
  14. type=MultiStepLR,
  15. begin=0,
  16. end=12,
  17. by_epoch=True,
  18. milestones=[8, 11],
  19. gamma=0.1)
  20. ]
  21. # optimizer
  22. optim_wrapper = dict(
  23. type=OptimWrapper,
  24. optimizer=dict(type=SGD, lr=0.02, momentum=0.9, weight_decay=0.0001))
  25. # Default setting for scaling LR automatically
  26. # - `enable` means enable scaling LR automatically
  27. # or not by default.
  28. # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
  29. auto_scale_lr = dict(enable=False, base_batch_size=16)