123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275 |
- # '../_base_/models/mask_rcnn_r50_fpn.py'
- # '../swin/mask-rcnn_swin-t-p4-w7_fpn_1x_coco.py'
- # model settings
- model = dict(
- type='MaskRCNN',
- data_preprocessor=dict(
- type='DetDataPreprocessor',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- bgr_to_rgb=True,
- pad_mask=True,
- pad_size_divisor=32),
- backbone=dict(
- type='SwinTransformer',
- embed_dims=96,
- depths=[2, 2, 6, 2],
- num_heads=[3, 6, 12, 24],
- window_size=7,
- mlp_ratio=4,
- qkv_bias=True,
- qk_scale=None,
- drop_rate=0.,
- attn_drop_rate=0.,
- drop_path_rate=0.2,
- patch_norm=True,
- out_indices=(0, 1, 2, 3),
- with_cp=False,
- convert_weights=True,
- ),
- neck=dict(
- type='FPN',
- in_channels=[96, 192, 384, 768],
- out_channels=256,
- num_outs=5),
- rpn_head=dict(
- type='RPNHead',
- in_channels=256,
- feat_channels=256,
- anchor_generator=dict(
- type='AnchorGenerator',
- scales=[8],
- ratios=[0.5, 1.0, 2.0],
- strides=[4, 8, 16, 32, 64]),
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[.0, .0, .0, .0],
- target_stds=[1.0, 1.0, 1.0, 1.0]),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
- loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
- roi_head=dict(
- type='StandardRoIHead',
- bbox_roi_extractor=dict(
- type='SingleRoIExtractor',
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32]),
- bbox_head=dict(
- type='Shared2FCBBoxHead',
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=1,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- reg_class_agnostic=False,
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
- mask_roi_extractor=dict(
- type='SingleRoIExtractor',
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32]),
- mask_head=dict(
- type='FCNMaskHead',
- num_convs=4,
- in_channels=256,
- conv_out_channels=256,
- num_classes=1,
- loss_mask=dict(
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
- # model training and testing settings
- train_cfg=dict(
- rpn=dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.7,
- neg_iou_thr=0.3,
- min_pos_iou=0.3,
- match_low_quality=True,
- ignore_iof_thr=-1),
- sampler=dict(
- type='RandomSampler',
- num=256,
- pos_fraction=0.5,
- neg_pos_ub=-1,
- add_gt_as_proposals=False),
- allowed_border=-1,
- pos_weight=-1,
- debug=False),
- rpn_proposal=dict(
- nms_pre=2000,
- max_per_img=1000,
- nms=dict(type='nms', iou_threshold=0.7),
- min_bbox_size=0),
- rcnn=dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.5,
- neg_iou_thr=0.5,
- min_pos_iou=0.5,
- match_low_quality=True,
- ignore_iof_thr=-1),
- sampler=dict(
- type='RandomSampler',
- num=512,
- pos_fraction=0.25,
- neg_pos_ub=-1,
- add_gt_as_proposals=True),
- mask_size=28,
- pos_weight=-1,
- debug=False)),
- test_cfg=dict(
- rpn=dict(
- nms_pre=1000,
- max_per_img=1000,
- nms=dict(type='nms', iou_threshold=0.7),
- min_bbox_size=0),
- rcnn=dict(
- score_thr=0.05,
- nms=dict(type='nms', iou_threshold=0.5),
- max_per_img=100,
- mask_thr_binary=0.5)))
- # '../_base_/datasets/coco_instance.py',
- # dataset settings
- dataset_type = 'CocoDataset'
- data_root = 'barcode_coco'
- metainfo = {
- 'classes': ('barcode',),
- 'palette': [
- (220, 20, 60),
- ]
- }
- img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
- backend_args = None
- train_pipeline = [
- dict(type='LoadImageFromFile', backend_args=backend_args),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(type='Resize', scale=(1333, 800), keep_ratio=True),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PackDetInputs')
- ]
- test_pipeline = [
- dict(type='LoadImageFromFile', backend_args=backend_args),
- dict(type='Resize', scale=(1333, 800), keep_ratio=True),
- # If you don't have a gt annotation, delete the pipeline
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(
- type='PackDetInputs',
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
- 'scale_factor'))
- ]
- train_dataloader = dict(
- batch_size=1,
- num_workers=2,
- persistent_workers=True,
- sampler=dict(type='DefaultSampler', shuffle=True),
- batch_sampler=dict(type='AspectRatioBatchSampler'),
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- metainfo=metainfo,
- ann_file='train.json',
- data_prefix=dict(img=''),
- filter_cfg=dict(filter_empty_gt=True, min_size=32),
- pipeline=train_pipeline,
- backend_args=backend_args))
- val_dataloader = dict(
- batch_size=1,
- num_workers=2,
- persistent_workers=True,
- drop_last=False,
- sampler=dict(type='DefaultSampler', shuffle=False),
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- metainfo=metainfo,
- ann_file='val.json',
- data_prefix=dict(img=''),
- test_mode=True,
- pipeline=test_pipeline,
- backend_args=backend_args))
- test_dataloader = val_dataloader
- val_evaluator = dict(
- type='CocoMetric',
- ann_file=data_root + '/val.json',
- metric=['bbox', 'segm'],
- format_only=False,
- backend_args=backend_args)
- test_evaluator = val_evaluator
- # '../_base_/schedules/schedule_1x.py'
- # optimizer
- # optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
- # training schedule for 1x
- train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=50, val_interval=5)
- val_cfg = dict(type='ValLoop')
- test_cfg = dict(type='TestLoop')
- # learning rate
- param_scheduler = [
- dict(
- type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
- dict(
- type='MultiStepLR',
- begin=0,
- end=12,
- by_epoch=True,
- milestones=[8, 11],
- gamma=0.1)
- ]
- # optimizer
- optim_wrapper = dict(
- type='OptimWrapper',
- optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
- # Default setting for scaling LR automatically
- # - `enable` means enable scaling LR automatically
- # or not by default.
- # - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
- auto_scale_lr = dict(enable=False, base_batch_size=2)
- #'../_base_/default_runtime.py'
- default_scope = 'mmdet'
- default_hooks = dict(
- timer=dict(type='IterTimerHook'),
- logger=dict(type='LoggerHook', interval=50),
- param_scheduler=dict(type='ParamSchedulerHook'),
- checkpoint=dict(type='CheckpointHook', interval=1),
- sampler_seed=dict(type='DistSamplerSeedHook'),
- visualization=dict(type='DetVisualizationHook'))
- env_cfg = dict(
- cudnn_benchmark=False,
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
- dist_cfg=dict(backend='nccl'),
- )
- vis_backends = [dict(type='LocalVisBackend')]
- visualizer = dict(
- type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
- log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
- log_level = 'INFO'
- load_from = None
- resume = False
- # lr_config = dict(warmup_iters=1000, step=[8, 11])
- # change data dir
- # change data processor default as pth style
- # model = dict(
- # # use caffe img_norm
- # data_preprocessor=dict(
- # mean=[103.530, 116.280, 123.675],
- # std=[1.0, 1.0, 1.0],
- # bgr_to_rgb=False))
- # #train data change
|