rtmdet_l_8xb32-300e_coco.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. _base_ = [
  2. '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py',
  3. '../_base_/datasets/coco_detection.py', './rtmdet_tta.py'
  4. ]
  5. model = dict(
  6. type='RTMDet',
  7. data_preprocessor=dict(
  8. type='DetDataPreprocessor',
  9. mean=[103.53, 116.28, 123.675],
  10. std=[57.375, 57.12, 58.395],
  11. bgr_to_rgb=False,
  12. batch_augments=None),
  13. backbone=dict(
  14. type='CSPNeXt',
  15. arch='P5',
  16. expand_ratio=0.5,
  17. deepen_factor=1,
  18. widen_factor=1,
  19. channel_attention=True,
  20. norm_cfg=dict(type='SyncBN'),
  21. act_cfg=dict(type='SiLU', inplace=True)),
  22. neck=dict(
  23. type='CSPNeXtPAFPN',
  24. in_channels=[256, 512, 1024],
  25. out_channels=256,
  26. num_csp_blocks=3,
  27. expand_ratio=0.5,
  28. norm_cfg=dict(type='SyncBN'),
  29. act_cfg=dict(type='SiLU', inplace=True)),
  30. bbox_head=dict(
  31. type='RTMDetSepBNHead',
  32. num_classes=1,
  33. in_channels=256,
  34. stacked_convs=2,
  35. feat_channels=256,
  36. anchor_generator=dict(
  37. type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]),
  38. bbox_coder=dict(type='DistancePointBBoxCoder'),
  39. loss_cls=dict(
  40. type='QualityFocalLoss',
  41. use_sigmoid=True,
  42. beta=2.0,
  43. loss_weight=1.0),
  44. loss_bbox=dict(type='SIoULoss', loss_weight=2.0),
  45. with_objectness=False,
  46. exp_on_reg=True,
  47. share_conv=True,
  48. pred_kernel_size=1,
  49. norm_cfg=dict(type='SyncBN'),
  50. act_cfg=dict(type='SiLU', inplace=True)),
  51. train_cfg=dict(
  52. assigner=dict(type='DynamicSoftLabelAssigner', topk=13),
  53. allowed_border=-1,
  54. pos_weight=-1,
  55. debug=False),
  56. test_cfg=dict(
  57. nms_pre=30000,
  58. min_bbox_size=0,
  59. score_thr=0.001,
  60. nms=dict(type='nms', iou_threshold=0.65),
  61. max_per_img=300),
  62. )
  63. train_pipeline = [
  64. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  65. dict(type='LoadAnnotations', with_bbox=True),
  66. dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
  67. dict(
  68. type='RandomResize',
  69. scale=(1280, 1280),
  70. ratio_range=(0.1, 2.0),
  71. keep_ratio=True),
  72. dict(type='RandomCrop', crop_size=(640, 640)),
  73. dict(type='YOLOXHSVRandomAug'),
  74. dict(type='RandomFlip', prob=0.5),
  75. dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
  76. dict(
  77. type='CachedMixUp',
  78. img_scale=(640, 640),
  79. ratio_range=(1.0, 1.0),
  80. max_cached_images=20,
  81. pad_val=(114, 114, 114)),
  82. dict(type='PackDetInputs')
  83. ]
  84. train_pipeline_stage2 = [
  85. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  86. dict(type='LoadAnnotations', with_bbox=True),
  87. dict(
  88. type='RandomResize',
  89. scale=(640, 640),
  90. ratio_range=(0.1, 2.0),
  91. keep_ratio=True),
  92. dict(type='RandomCrop', crop_size=(640, 640)),
  93. dict(type='YOLOXHSVRandomAug'),
  94. dict(type='RandomFlip', prob=0.5),
  95. dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
  96. dict(type='PackDetInputs')
  97. ]
  98. test_pipeline = [
  99. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  100. dict(type='Resize', scale=(640, 640), keep_ratio=True),
  101. dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
  102. dict(type='LoadAnnotations', with_bbox=True),
  103. dict(
  104. type='PackDetInputs',
  105. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  106. 'scale_factor'))
  107. ]
  108. train_dataloader = dict(
  109. batch_size=16,
  110. num_workers=10,
  111. batch_sampler=None,
  112. pin_memory=True,
  113. dataset=dict(pipeline=train_pipeline))
  114. val_dataloader = dict(
  115. batch_size=1, num_workers=10, dataset=dict(pipeline=test_pipeline))
  116. test_dataloader = val_dataloader
  117. max_epochs = 240
  118. stage2_num_epochs = 20
  119. base_lr = 0.002
  120. interval = 5
  121. train_cfg = dict(
  122. max_epochs=max_epochs,
  123. val_interval=interval,
  124. dynamic_intervals=[(max_epochs - stage2_num_epochs, 1)])
  125. val_evaluator = dict(proposal_nums=(100, 1, 10))
  126. test_evaluator = val_evaluator
  127. # optimizer
  128. optim_wrapper = dict(
  129. _delete_=True,
  130. type='OptimWrapper',
  131. optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
  132. paramwise_cfg=dict(
  133. norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
  134. # learning rate
  135. param_scheduler = [
  136. dict(
  137. type='LinearLR',
  138. start_factor=1.0e-5,
  139. by_epoch=False,
  140. begin=0,
  141. end=1000),
  142. dict(
  143. # use cosine lr from 150 to 300 epoch
  144. type='CosineAnnealingLR',
  145. eta_min=base_lr * 0.05,
  146. begin=max_epochs // 3,
  147. end=max_epochs,
  148. T_max=max_epochs // 3,
  149. by_epoch=True,
  150. convert_to_iter_based=True),
  151. ]
  152. # hooks
  153. default_hooks = dict(
  154. checkpoint=dict(
  155. interval=interval,
  156. save_best='auto',
  157. type='CheckpointHook',
  158. # max_keep_ckpts=3 # only keep latest 3 checkpoints
  159. ))
  160. custom_hooks = [
  161. dict(
  162. type='EMAHook',
  163. ema_type='ExpMomentumEMA',
  164. momentum=0.0002,
  165. update_buffers=True,
  166. priority=49),
  167. dict(
  168. type='PipelineSwitchHook',
  169. switch_epoch=max_epochs - stage2_num_epochs,
  170. switch_pipeline=train_pipeline_stage2)
  171. ]