cityscapes_metric.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import os
  3. import os.path as osp
  4. import shutil
  5. import tempfile
  6. from collections import OrderedDict
  7. from typing import Dict, Optional, Sequence
  8. import mmcv
  9. import numpy as np
  10. from mmengine.dist import is_main_process
  11. from mmengine.evaluator import BaseMetric
  12. from mmengine.logging import MMLogger
  13. from mmdet.registry import METRICS
  14. try:
  15. import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa: E501
  16. import cityscapesscripts.helpers.labels as CSLabels
  17. from mmdet.evaluation.functional import evaluateImgLists
  18. HAS_CITYSCAPESAPI = True
  19. except ImportError:
  20. HAS_CITYSCAPESAPI = False
  21. @METRICS.register_module()
  22. class CityScapesMetric(BaseMetric):
  23. """CityScapes metric for instance segmentation.
  24. Args:
  25. outfile_prefix (str): The prefix of txt and png files. The txt and
  26. png file will be save in a directory whose path is
  27. "outfile_prefix.results/".
  28. seg_prefix (str, optional): Path to the directory which contains the
  29. cityscapes instance segmentation masks. It's necessary when
  30. training and validation. It could be None when infer on test
  31. dataset. Defaults to None.
  32. format_only (bool): Format the output results without perform
  33. evaluation. It is useful when you want to format the result
  34. to a specific format and submit it to the test server.
  35. Defaults to False.
  36. collect_device (str): Device name used for collecting results from
  37. different ranks during distributed training. Must be 'cpu' or
  38. 'gpu'. Defaults to 'cpu'.
  39. prefix (str, optional): The prefix that will be added in the metric
  40. names to disambiguate homonymous metrics of different evaluators.
  41. If prefix is not provided in the argument, self.default_prefix
  42. will be used instead. Defaults to None.
  43. dump_matches (bool): Whether dump matches.json file during evaluating.
  44. Defaults to False.
  45. file_client_args (dict, optional): Arguments to instantiate the
  46. corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
  47. backend_args (dict, optional): Arguments to instantiate the
  48. corresponding backend. Defaults to None.
  49. """
  50. default_prefix: Optional[str] = 'cityscapes'
  51. def __init__(self,
  52. outfile_prefix: str,
  53. seg_prefix: Optional[str] = None,
  54. format_only: bool = False,
  55. collect_device: str = 'cpu',
  56. prefix: Optional[str] = None,
  57. dump_matches: bool = False,
  58. file_client_args: dict = None,
  59. backend_args: dict = None) -> None:
  60. if not HAS_CITYSCAPESAPI:
  61. raise RuntimeError('Failed to import `cityscapesscripts`.'
  62. 'Please try to install official '
  63. 'cityscapesscripts by '
  64. '"pip install cityscapesscripts"')
  65. super().__init__(collect_device=collect_device, prefix=prefix)
  66. self.tmp_dir = None
  67. self.format_only = format_only
  68. if self.format_only:
  69. assert outfile_prefix is not None, 'outfile_prefix must be not'
  70. 'None when format_only is True, otherwise the result files will'
  71. 'be saved to a temp directory which will be cleaned up at the end.'
  72. else:
  73. assert seg_prefix is not None, '`seg_prefix` is necessary when '
  74. 'computing the CityScapes metrics'
  75. if outfile_prefix is None:
  76. self.tmp_dir = tempfile.TemporaryDirectory()
  77. self.outfile_prefix = osp.join(self.tmp_dir.name, 'results')
  78. else:
  79. # the directory to save predicted panoptic segmentation mask
  80. self.outfile_prefix = osp.join(outfile_prefix, 'results') # type: ignore # yapf: disable # noqa: E501
  81. dir_name = osp.expanduser(self.outfile_prefix)
  82. if osp.exists(dir_name) and is_main_process():
  83. logger: MMLogger = MMLogger.get_current_instance()
  84. logger.info('remove previous results.')
  85. shutil.rmtree(dir_name)
  86. os.makedirs(dir_name, exist_ok=True)
  87. self.backend_args = backend_args
  88. if file_client_args is not None:
  89. raise RuntimeError(
  90. 'The `file_client_args` is deprecated, '
  91. 'please use `backend_args` instead, please refer to'
  92. 'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
  93. )
  94. self.seg_prefix = seg_prefix
  95. self.dump_matches = dump_matches
  96. def __del__(self) -> None:
  97. """Clean up the results if necessary."""
  98. if self.tmp_dir is not None:
  99. self.tmp_dir.cleanup()
  100. # TODO: data_batch is no longer needed, consider adjusting the
  101. # parameter position
  102. def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
  103. """Process one batch of data samples and predictions. The processed
  104. results should be stored in ``self.results``, which will be used to
  105. compute the metrics when all batches have been processed.
  106. Args:
  107. data_batch (dict): A batch of data from the dataloader.
  108. data_samples (Sequence[dict]): A batch of data samples that
  109. contain annotations and predictions.
  110. """
  111. for data_sample in data_samples:
  112. # parse pred
  113. result = dict()
  114. pred = data_sample['pred_instances']
  115. filename = data_sample['img_path']
  116. basename = osp.splitext(osp.basename(filename))[0]
  117. pred_txt = osp.join(self.outfile_prefix, basename + '_pred.txt')
  118. result['pred_txt'] = pred_txt
  119. labels = pred['labels'].cpu().numpy()
  120. masks = pred['masks'].cpu().numpy().astype(np.uint8)
  121. if 'mask_scores' in pred:
  122. # some detectors use different scores for bbox and mask
  123. mask_scores = pred['mask_scores'].cpu().numpy()
  124. else:
  125. mask_scores = pred['scores'].cpu().numpy()
  126. with open(pred_txt, 'w') as f:
  127. for i, (label, mask, mask_score) in enumerate(
  128. zip(labels, masks, mask_scores)):
  129. class_name = self.dataset_meta['classes'][label]
  130. class_id = CSLabels.name2label[class_name].id
  131. png_filename = osp.join(
  132. self.outfile_prefix,
  133. basename + f'_{i}_{class_name}.png')
  134. mmcv.imwrite(mask, png_filename)
  135. f.write(f'{osp.basename(png_filename)} '
  136. f'{class_id} {mask_score}\n')
  137. # parse gt
  138. gt = dict()
  139. img_path = filename.replace('leftImg8bit.png',
  140. 'gtFine_instanceIds.png')
  141. gt['file_name'] = img_path.replace('leftImg8bit', 'gtFine')
  142. self.results.append((gt, result))
  143. def compute_metrics(self, results: list) -> Dict[str, float]:
  144. """Compute the metrics from processed results.
  145. Args:
  146. results (list): The processed results of each batch.
  147. Returns:
  148. Dict[str, float]: The computed metrics. The keys are the names of
  149. the metrics, and the values are corresponding results.
  150. """
  151. logger: MMLogger = MMLogger.get_current_instance()
  152. if self.format_only:
  153. logger.info(
  154. f'results are saved to {osp.dirname(self.outfile_prefix)}')
  155. return OrderedDict()
  156. logger.info('starts to compute metric')
  157. gts, preds = zip(*results)
  158. # set global states in cityscapes evaluation API
  159. gt_instances_file = osp.join(self.outfile_prefix, 'gtInstances.json') # type: ignore # yapf: disable # noqa: E501
  160. # split gt and prediction list
  161. gts, preds = zip(*results)
  162. CSEval.args.JSONOutput = False
  163. CSEval.args.colorized = False
  164. CSEval.args.gtInstancesFile = gt_instances_file
  165. groundTruthImgList = [gt['file_name'] for gt in gts]
  166. predictionImgList = [pred['pred_txt'] for pred in preds]
  167. CSEval_results = evaluateImgLists(
  168. predictionImgList,
  169. groundTruthImgList,
  170. CSEval.args,
  171. self.backend_args,
  172. dump_matches=self.dump_matches)['averages']
  173. eval_results = OrderedDict()
  174. eval_results['mAP'] = CSEval_results['allAp']
  175. eval_results['AP@50'] = CSEval_results['allAp50%']
  176. return eval_results