youtubevis2coco.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import argparse
  3. import copy
  4. import os
  5. import os.path as osp
  6. from collections import defaultdict
  7. import mmengine
  8. from tqdm import tqdm
  9. def parse_args():
  10. parser = argparse.ArgumentParser(
  11. description='YouTube-VIS to COCO Video format')
  12. parser.add_argument(
  13. '-i',
  14. '--input',
  15. help='root directory of YouTube-VIS annotations',
  16. )
  17. parser.add_argument(
  18. '-o',
  19. '--output',
  20. help='directory to save coco formatted label file',
  21. )
  22. parser.add_argument(
  23. '--version',
  24. choices=['2019', '2021'],
  25. help='The version of YouTube-VIS Dataset',
  26. )
  27. return parser.parse_args()
  28. def convert_vis(ann_dir, save_dir, dataset_version, mode='train'):
  29. """Convert YouTube-VIS dataset in COCO style.
  30. Args:
  31. ann_dir (str): The path of YouTube-VIS dataset.
  32. save_dir (str): The path to save `VIS`.
  33. dataset_version (str): The version of dataset. Options are '2019',
  34. '2021'.
  35. mode (str): Convert train dataset or validation dataset or test
  36. dataset. Options are 'train', 'valid', 'test'. Default: 'train'.
  37. """
  38. assert dataset_version in ['2019', '2021']
  39. assert mode in ['train', 'valid', 'test']
  40. VIS = defaultdict(list)
  41. records = dict(vid_id=1, img_id=1, ann_id=1, global_instance_id=1)
  42. obj_num_classes = dict()
  43. if dataset_version == '2019':
  44. official_anns = mmengine.load(osp.join(ann_dir, f'{mode}.json'))
  45. elif dataset_version == '2021':
  46. official_anns = mmengine.load(
  47. osp.join(ann_dir, mode, 'instances.json'))
  48. VIS['categories'] = copy.deepcopy(official_anns['categories'])
  49. has_annotations = mode == 'train'
  50. if has_annotations:
  51. vid_to_anns = defaultdict(list)
  52. for ann_info in official_anns['annotations']:
  53. vid_to_anns[ann_info['video_id']].append(ann_info)
  54. video_infos = official_anns['videos']
  55. for video_info in tqdm(video_infos):
  56. video_name = video_info['file_names'][0].split(os.sep)[0]
  57. video = dict(
  58. id=video_info['id'],
  59. name=video_name,
  60. width=video_info['width'],
  61. height=video_info['height'])
  62. VIS['videos'].append(video)
  63. num_frames = len(video_info['file_names'])
  64. width = video_info['width']
  65. height = video_info['height']
  66. if has_annotations:
  67. ann_infos_in_video = vid_to_anns[video_info['id']]
  68. instance_id_maps = dict()
  69. for frame_id in range(num_frames):
  70. image = dict(
  71. file_name=video_info['file_names'][frame_id],
  72. height=height,
  73. width=width,
  74. id=records['img_id'],
  75. frame_id=frame_id,
  76. video_id=video_info['id'])
  77. VIS['images'].append(image)
  78. if has_annotations:
  79. for ann_info in ann_infos_in_video:
  80. bbox = ann_info['bboxes'][frame_id]
  81. if bbox is None:
  82. continue
  83. category_id = ann_info['category_id']
  84. track_id = ann_info['id']
  85. segmentation = ann_info['segmentations'][frame_id]
  86. area = ann_info['areas'][frame_id]
  87. assert isinstance(category_id, int)
  88. assert isinstance(track_id, int)
  89. assert segmentation is not None
  90. assert area is not None
  91. if track_id in instance_id_maps:
  92. instance_id = instance_id_maps[track_id]
  93. else:
  94. instance_id = records['global_instance_id']
  95. records['global_instance_id'] += 1
  96. instance_id_maps[track_id] = instance_id
  97. ann = dict(
  98. id=records['ann_id'],
  99. video_id=video_info['id'],
  100. image_id=records['img_id'],
  101. category_id=category_id,
  102. instance_id=instance_id,
  103. bbox=bbox,
  104. segmentation=segmentation,
  105. area=area,
  106. iscrowd=ann_info['iscrowd'])
  107. if category_id not in obj_num_classes:
  108. obj_num_classes[category_id] = 1
  109. else:
  110. obj_num_classes[category_id] += 1
  111. VIS['annotations'].append(ann)
  112. records['ann_id'] += 1
  113. records['img_id'] += 1
  114. records['vid_id'] += 1
  115. if not osp.isdir(save_dir):
  116. os.makedirs(save_dir)
  117. mmengine.dump(
  118. VIS, osp.join(save_dir, f'youtube_vis_{dataset_version}_{mode}.json'))
  119. print(f'-----YouTube VIS {dataset_version} {mode}------')
  120. print(f'{records["vid_id"]- 1} videos')
  121. print(f'{records["img_id"]- 1} images')
  122. if has_annotations:
  123. print(f'{records["ann_id"] - 1} objects')
  124. print(f'{records["global_instance_id"] - 1} instances')
  125. print('-----------------------')
  126. if has_annotations:
  127. for i in range(1, len(VIS['categories']) + 1):
  128. class_name = VIS['categories'][i - 1]['name']
  129. print(f'Class {i} {class_name} has {obj_num_classes[i]} objects.')
  130. def main():
  131. args = parse_args()
  132. for sub_set in ['train', 'valid', 'test']:
  133. convert_vis(args.input, args.output, args.version, sub_set)
  134. if __name__ == '__main__':
  135. main()