coco_detection.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. # dataset settings
  2. dataset_type = 'CocoDataset'
  3. data_root = '../../../media/tricolops/T7/Dataset/coco_format_bd/'
  4. metainfo = dict(
  5. classes=('barcode', ), palette=[
  6. (
  7. 220,
  8. 20,
  9. 60,
  10. ),
  11. ])
  12. # Example to use different file client
  13. # Method 1: simply set the data root and let the file I/O module
  14. # automatically infer from prefix (not support LMDB and Memcache yet)
  15. # data_root = 's3://openmmlab/datasets/detection/coco/'
  16. # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
  17. # backend_args = dict(
  18. # backend='petrel',
  19. # path_mapping=dict({
  20. # './data/': 's3://openmmlab/datasets/detection/',
  21. # 'data/': 's3://openmmlab/datasets/detection/'
  22. # }))
  23. backend_args = None
  24. train_pipeline = [
  25. dict(type='LoadImageFromFile', backend_args=backend_args),
  26. dict(type='LoadAnnotations', with_bbox=True),
  27. dict(type='Resize', scale=(1333, 800), keep_ratio=True),
  28. dict(type='RandomFlip', prob=0.5),
  29. dict(type='PackDetInputs')
  30. ]
  31. test_pipeline = [
  32. dict(type='LoadImageFromFile', backend_args=backend_args),
  33. dict(type='Resize', scale=(1333, 800), keep_ratio=True),
  34. # If you don't have a gt annotation, delete the pipeline
  35. dict(type='LoadAnnotations', with_bbox=True),
  36. dict(
  37. type='PackDetInputs',
  38. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  39. 'scale_factor'))
  40. ]
  41. train_dataloader = dict(
  42. batch_size=2,
  43. num_workers=2,
  44. persistent_workers=True,
  45. sampler=dict(type='DefaultSampler', shuffle=True),
  46. batch_sampler=dict(type='AspectRatioBatchSampler'),
  47. dataset=dict(
  48. type=dataset_type,
  49. data_root=data_root,
  50. ann_file='Train/Train.json',
  51. data_prefix=dict(img='Train/'),
  52. filter_cfg=dict(filter_empty_gt=True, min_size=32),
  53. metainfo=metainfo,
  54. pipeline=train_pipeline,
  55. backend_args=backend_args))
  56. val_dataloader = dict(
  57. batch_size=1,
  58. num_workers=2,
  59. persistent_workers=True,
  60. drop_last=False,
  61. sampler=dict(type='DefaultSampler', shuffle=False),
  62. dataset=dict(
  63. type=dataset_type,
  64. data_root=data_root,
  65. metainfo=metainfo,
  66. ann_file='Val/Val.json',
  67. data_prefix=dict(img='Val/'),
  68. test_mode=True,
  69. pipeline=test_pipeline,
  70. backend_args=backend_args))
  71. test_dataloader = val_dataloader
  72. val_evaluator = dict(
  73. type='CocoMetric',
  74. ann_file='../../../media/tricolops/T7/Dataset/coco_format_bd/Val/Val.json',
  75. metric='bbox',
  76. format_only=False,
  77. backend_args=backend_args)
  78. test_evaluator = val_evaluator
  79. # inference on test dataset and
  80. # format the output results for submission.
  81. # test_dataloader = dict(
  82. # batch_size=1,
  83. # num_workers=2,
  84. # persistent_workers=True,
  85. # drop_last=False,
  86. # sampler=dict(type='DefaultSampler', shuffle=False),
  87. # dataset=dict(
  88. # type=dataset_type,
  89. # data_root=data_root,
  90. # ann_file=data_root + 'annotations/image_info_test-dev2017.json',
  91. # data_prefix=dict(img='test2017/'),
  92. # test_mode=True,
  93. # pipeline=test_pipeline))
  94. # test_evaluator = dict(
  95. # type='CocoMetric',
  96. # metric='bbox',
  97. # format_only=True,
  98. # ann_file=data_root + 'annotations/image_info_test-dev2017.json',
  99. # outfile_prefix='./work_dirs/coco_detection/test')