base_semseg_dataset.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import copy
  3. import os.path as osp
  4. from typing import Callable, Dict, List, Optional, Sequence, Union
  5. import mmengine
  6. import mmengine.fileio as fileio
  7. import numpy as np
  8. from mmengine.dataset import BaseDataset, Compose
  9. from mmdet.registry import DATASETS
  10. @DATASETS.register_module()
  11. class BaseSegDataset(BaseDataset):
  12. """Custom dataset for semantic segmentation. An example of file structure
  13. is as followed.
  14. .. code-block:: none
  15. ├── data
  16. │ ├── my_dataset
  17. │ │ ├── img_dir
  18. │ │ │ ├── train
  19. │ │ │ │ ├── xxx{img_suffix}
  20. │ │ │ │ ├── yyy{img_suffix}
  21. │ │ │ │ ├── zzz{img_suffix}
  22. │ │ │ ├── val
  23. │ │ ├── ann_dir
  24. │ │ │ ├── train
  25. │ │ │ │ ├── xxx{seg_map_suffix}
  26. │ │ │ │ ├── yyy{seg_map_suffix}
  27. │ │ │ │ ├── zzz{seg_map_suffix}
  28. │ │ │ ├── val
  29. The img/gt_semantic_seg pair of BaseSegDataset should be of the same
  30. except suffix. A valid img/gt_semantic_seg filename pair should be like
  31. ``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
  32. in the suffix). If split is given, then ``xxx`` is specified in txt file.
  33. Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
  34. Please refer to ``docs/en/tutorials/new_dataset.md`` for more details.
  35. Args:
  36. ann_file (str): Annotation file path. Defaults to ''.
  37. metainfo (dict, optional): Meta information for dataset, such as
  38. specify classes to load. Defaults to None.
  39. data_root (str, optional): The root directory for ``data_prefix`` and
  40. ``ann_file``. Defaults to None.
  41. data_prefix (dict, optional): Prefix for training data. Defaults to
  42. dict(img_path=None, seg_map_path=None).
  43. img_suffix (str): Suffix of images. Default: '.jpg'
  44. seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
  45. filter_cfg (dict, optional): Config for filter data. Defaults to None.
  46. indices (int or Sequence[int], optional): Support using first few
  47. data in annotation file to facilitate training/testing on a smaller
  48. dataset. Defaults to None which means using all ``data_infos``.
  49. serialize_data (bool, optional): Whether to hold memory using
  50. serialized objects, when enabled, data loader workers can use
  51. shared RAM from master process instead of making a copy. Defaults
  52. to True.
  53. pipeline (list, optional): Processing pipeline. Defaults to [].
  54. test_mode (bool, optional): ``test_mode=True`` means in test phase.
  55. Defaults to False.
  56. lazy_init (bool, optional): Whether to load annotation during
  57. instantiation. In some cases, such as visualization, only the meta
  58. information of the dataset is needed, which is not necessary to
  59. load annotation file. ``Basedataset`` can skip load annotations to
  60. save time by set ``lazy_init=True``. Defaults to False.
  61. use_label_map (bool, optional): Whether to use label map.
  62. Defaults to False.
  63. max_refetch (int, optional): If ``Basedataset.prepare_data`` get a
  64. None img. The maximum extra number of cycles to get a valid
  65. image. Defaults to 1000.
  66. backend_args (dict, Optional): Arguments to instantiate a file backend.
  67. See https://mmengine.readthedocs.io/en/latest/api/fileio.htm
  68. for details. Defaults to None.
  69. Notes: mmcv>=2.0.0rc4 required.
  70. """
  71. METAINFO: dict = dict()
  72. def __init__(self,
  73. ann_file: str = '',
  74. img_suffix='.jpg',
  75. seg_map_suffix='.png',
  76. metainfo: Optional[dict] = None,
  77. data_root: Optional[str] = None,
  78. data_prefix: dict = dict(img_path='', seg_map_path=''),
  79. filter_cfg: Optional[dict] = None,
  80. indices: Optional[Union[int, Sequence[int]]] = None,
  81. serialize_data: bool = True,
  82. pipeline: List[Union[dict, Callable]] = [],
  83. test_mode: bool = False,
  84. lazy_init: bool = False,
  85. use_label_map: bool = False,
  86. max_refetch: int = 1000,
  87. backend_args: Optional[dict] = None) -> None:
  88. self.img_suffix = img_suffix
  89. self.seg_map_suffix = seg_map_suffix
  90. self.backend_args = backend_args.copy() if backend_args else None
  91. self.data_root = data_root
  92. self.data_prefix = copy.copy(data_prefix)
  93. self.ann_file = ann_file
  94. self.filter_cfg = copy.deepcopy(filter_cfg)
  95. self._indices = indices
  96. self.serialize_data = serialize_data
  97. self.test_mode = test_mode
  98. self.max_refetch = max_refetch
  99. self.data_list: List[dict] = []
  100. self.data_bytes: np.ndarray
  101. # Set meta information.
  102. self._metainfo = self._load_metainfo(copy.deepcopy(metainfo))
  103. # Get label map for custom classes
  104. new_classes = self._metainfo.get('classes', None)
  105. self.label_map = self.get_label_map(
  106. new_classes) if use_label_map else None
  107. self._metainfo.update(dict(label_map=self.label_map))
  108. # Update palette based on label map or generate palette
  109. # if it is not defined
  110. updated_palette = self._update_palette()
  111. self._metainfo.update(dict(palette=updated_palette))
  112. # Join paths.
  113. if self.data_root is not None:
  114. self._join_prefix()
  115. # Build pipeline.
  116. self.pipeline = Compose(pipeline)
  117. # Full initialize the dataset.
  118. if not lazy_init:
  119. self.full_init()
  120. if test_mode:
  121. assert self._metainfo.get('classes') is not None, \
  122. 'dataset metainfo `classes` should be specified when testing'
  123. @classmethod
  124. def get_label_map(cls,
  125. new_classes: Optional[Sequence] = None
  126. ) -> Union[Dict, None]:
  127. """Require label mapping.
  128. The ``label_map`` is a dictionary, its keys are the old label ids and
  129. its values are the new label ids, and is used for changing pixel
  130. labels in load_annotations. If and only if old classes in cls.METAINFO
  131. is not equal to new classes in self._metainfo and nether of them is not
  132. None, `label_map` is not None.
  133. Args:
  134. new_classes (list, tuple, optional): The new classes name from
  135. metainfo. Default to None.
  136. Returns:
  137. dict, optional: The mapping from old classes in cls.METAINFO to
  138. new classes in self._metainfo
  139. """
  140. old_classes = cls.METAINFO.get('classes', None)
  141. if (new_classes is not None and old_classes is not None
  142. and list(new_classes) != list(old_classes)):
  143. label_map = {}
  144. if not set(new_classes).issubset(cls.METAINFO['classes']):
  145. raise ValueError(
  146. f'new classes {new_classes} is not a '
  147. f'subset of classes {old_classes} in METAINFO.')
  148. for i, c in enumerate(old_classes):
  149. if c not in new_classes:
  150. # 0 is background
  151. label_map[i] = 0
  152. else:
  153. label_map[i] = new_classes.index(c)
  154. return label_map
  155. else:
  156. return None
  157. def _update_palette(self) -> list:
  158. """Update palette after loading metainfo.
  159. If length of palette is equal to classes, just return the palette.
  160. If palette is not defined, it will randomly generate a palette.
  161. If classes is updated by customer, it will return the subset of
  162. palette.
  163. Returns:
  164. Sequence: Palette for current dataset.
  165. """
  166. palette = self._metainfo.get('palette', [])
  167. classes = self._metainfo.get('classes', [])
  168. # palette does match classes
  169. if len(palette) == len(classes):
  170. return palette
  171. if len(palette) == 0:
  172. # Get random state before set seed, and restore
  173. # random state later.
  174. # It will prevent loss of randomness, as the palette
  175. # may be different in each iteration if not specified.
  176. # See: https://github.com/open-mmlab/mmdetection/issues/5844
  177. state = np.random.get_state()
  178. np.random.seed(42)
  179. # random palette
  180. new_palette = np.random.randint(
  181. 0, 255, size=(len(classes), 3)).tolist()
  182. np.random.set_state(state)
  183. elif len(palette) >= len(classes) and self.label_map is not None:
  184. new_palette = []
  185. # return subset of palette
  186. for old_id, new_id in sorted(
  187. self.label_map.items(), key=lambda x: x[1]):
  188. # 0 is background
  189. if new_id != 0:
  190. new_palette.append(palette[old_id])
  191. new_palette = type(palette)(new_palette)
  192. elif len(palette) >= len(classes):
  193. # Allow palette length is greater than classes.
  194. return palette
  195. else:
  196. raise ValueError('palette does not match classes '
  197. f'as metainfo is {self._metainfo}.')
  198. return new_palette
  199. def load_data_list(self) -> List[dict]:
  200. """Load annotation from directory or annotation file.
  201. Returns:
  202. list[dict]: All data info of dataset.
  203. """
  204. data_list = []
  205. img_dir = self.data_prefix.get('img_path', None)
  206. ann_dir = self.data_prefix.get('seg_map_path', None)
  207. if not osp.isdir(self.ann_file) and self.ann_file:
  208. assert osp.isfile(self.ann_file), \
  209. f'Failed to load `ann_file` {self.ann_file}'
  210. lines = mmengine.list_from_file(
  211. self.ann_file, backend_args=self.backend_args)
  212. for line in lines:
  213. img_name = line.strip()
  214. data_info = dict(
  215. img_path=osp.join(img_dir, img_name + self.img_suffix))
  216. if ann_dir is not None:
  217. seg_map = img_name + self.seg_map_suffix
  218. data_info['seg_map_path'] = osp.join(ann_dir, seg_map)
  219. data_info['label_map'] = self.label_map
  220. data_list.append(data_info)
  221. else:
  222. for img in fileio.list_dir_or_file(
  223. dir_path=img_dir,
  224. list_dir=False,
  225. suffix=self.img_suffix,
  226. recursive=True,
  227. backend_args=self.backend_args):
  228. data_info = dict(img_path=osp.join(img_dir, img))
  229. if ann_dir is not None:
  230. seg_map = img.replace(self.img_suffix, self.seg_map_suffix)
  231. data_info['seg_map_path'] = osp.join(ann_dir, seg_map)
  232. data_info['label_map'] = self.label_map
  233. data_list.append(data_info)
  234. data_list = sorted(data_list, key=lambda x: x['img_path'])
  235. return data_list