mask2former_swin-l-p4-w12-384-in21k_8xb2-8e_youtubevis2021.py 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. _base_ = ['./mask2former_r50_8xb2-8e_youtubevis2021.py']
  2. depths = [2, 2, 18, 2]
  3. model = dict(
  4. type='Mask2FormerVideo',
  5. backbone=dict(
  6. _delete_=True,
  7. type='SwinTransformer',
  8. pretrain_img_size=384,
  9. embed_dims=192,
  10. depths=depths,
  11. num_heads=[6, 12, 24, 48],
  12. window_size=12,
  13. mlp_ratio=4,
  14. qkv_bias=True,
  15. qk_scale=None,
  16. drop_rate=0.,
  17. attn_drop_rate=0.,
  18. drop_path_rate=0.3,
  19. patch_norm=True,
  20. out_indices=(0, 1, 2, 3),
  21. with_cp=False,
  22. convert_weights=True,
  23. frozen_stages=-1,
  24. init_cfg=None),
  25. track_head=dict(
  26. type='Mask2FormerTrackHead',
  27. in_channels=[192, 384, 768, 1536],
  28. num_queries=200),
  29. init_cfg=dict(
  30. type='Pretrained',
  31. checkpoint= # noqa: E251
  32. 'https://download.openmmlab.com/mmdetection/v3.0/mask2former/'
  33. 'mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic/'
  34. 'mask2former_swin-l-p4-w12-384-in21k_16xb1-lsj-100e_coco-panoptic_'
  35. '20220407_104949-82f8d28d.pth'))
  36. # set all layers in backbone to lr_mult=0.1
  37. # set all norm layers, position_embeding,
  38. # query_embeding, level_embeding to decay_multi=0.0
  39. backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
  40. backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
  41. embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
  42. custom_keys = {
  43. 'backbone': dict(lr_mult=0.1, decay_mult=1.0),
  44. 'backbone.patch_embed.norm': backbone_norm_multi,
  45. 'backbone.norm': backbone_norm_multi,
  46. 'absolute_pos_embed': backbone_embed_multi,
  47. 'relative_position_bias_table': backbone_embed_multi,
  48. 'query_embed': embed_multi,
  49. 'query_feat': embed_multi,
  50. 'level_embed': embed_multi
  51. }
  52. custom_keys.update({
  53. f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
  54. for stage_id, num_blocks in enumerate(depths)
  55. for block_id in range(num_blocks)
  56. })
  57. custom_keys.update({
  58. f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
  59. for stage_id in range(len(depths) - 1)
  60. })
  61. # optimizer
  62. optim_wrapper = dict(
  63. paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))