segment_anything_model.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. import collections
  2. import threading
  3. import imgviz
  4. import numpy as np
  5. import onnxruntime
  6. import skimage
  7. from ..logger import logger
  8. from . import _utils
  9. class SegmentAnythingModel:
  10. def __init__(self, encoder_path, decoder_path):
  11. self._image_size = 1024
  12. self._encoder_session = onnxruntime.InferenceSession(encoder_path)
  13. self._decoder_session = onnxruntime.InferenceSession(decoder_path)
  14. self._lock = threading.Lock()
  15. self._image_embedding_cache = collections.OrderedDict()
  16. self._thread = None
  17. def set_image(self, image: np.ndarray):
  18. with self._lock:
  19. self._image = image
  20. self._image_embedding = self._image_embedding_cache.get(
  21. self._image.tobytes()
  22. )
  23. if self._image_embedding is None:
  24. self._thread = threading.Thread(
  25. target=self._compute_and_cache_image_embedding
  26. )
  27. self._thread.start()
  28. def _compute_and_cache_image_embedding(self):
  29. with self._lock:
  30. logger.debug("Computing image embedding...")
  31. self._image_embedding = _compute_image_embedding(
  32. image_size=self._image_size,
  33. encoder_session=self._encoder_session,
  34. image=self._image,
  35. )
  36. if len(self._image_embedding_cache) > 10:
  37. self._image_embedding_cache.popitem(last=False)
  38. self._image_embedding_cache[self._image.tobytes()] = self._image_embedding
  39. logger.debug("Done computing image embedding.")
  40. def _get_image_embedding(self):
  41. if self._thread is not None:
  42. self._thread.join()
  43. self._thread = None
  44. with self._lock:
  45. return self._image_embedding
  46. def predict_mask_from_points(self, points, point_labels):
  47. return _compute_mask_from_points(
  48. image_size=self._image_size,
  49. decoder_session=self._decoder_session,
  50. image=self._image,
  51. image_embedding=self._get_image_embedding(),
  52. points=points,
  53. point_labels=point_labels,
  54. )
  55. def predict_polygon_from_points(self, points, point_labels):
  56. mask = self.predict_mask_from_points(points=points, point_labels=point_labels)
  57. return _utils.compute_polygon_from_mask(mask=mask)
  58. def _compute_scale_to_resize_image(image_size, image):
  59. height, width = image.shape[:2]
  60. if width > height:
  61. scale = image_size / width
  62. new_height = int(round(height * scale))
  63. new_width = image_size
  64. else:
  65. scale = image_size / height
  66. new_height = image_size
  67. new_width = int(round(width * scale))
  68. return scale, new_height, new_width
  69. def _resize_image(image_size, image):
  70. scale, new_height, new_width = _compute_scale_to_resize_image(
  71. image_size=image_size, image=image
  72. )
  73. scaled_image = imgviz.resize(
  74. image,
  75. height=new_height,
  76. width=new_width,
  77. backend="pillow",
  78. ).astype(np.float32)
  79. return scale, scaled_image
  80. def _compute_image_embedding(image_size, encoder_session, image):
  81. image = imgviz.asrgb(image)
  82. scale, x = _resize_image(image_size, image)
  83. x = (x - np.array([123.675, 116.28, 103.53], dtype=np.float32)) / np.array(
  84. [58.395, 57.12, 57.375], dtype=np.float32
  85. )
  86. x = np.pad(
  87. x,
  88. (
  89. (0, image_size - x.shape[0]),
  90. (0, image_size - x.shape[1]),
  91. (0, 0),
  92. ),
  93. )
  94. x = x.transpose(2, 0, 1)[None, :, :, :]
  95. output = encoder_session.run(output_names=None, input_feed={"x": x})
  96. image_embedding = output[0]
  97. return image_embedding
  98. def _compute_mask_from_points(
  99. image_size, decoder_session, image, image_embedding, points, point_labels
  100. ):
  101. input_point = np.array(points, dtype=np.float32)
  102. input_label = np.array(point_labels, dtype=np.int32)
  103. onnx_coord = np.concatenate([input_point, np.array([[0.0, 0.0]])], axis=0)[
  104. None, :, :
  105. ]
  106. onnx_label = np.concatenate([input_label, np.array([-1])], axis=0)[None, :].astype(
  107. np.float32
  108. )
  109. scale, new_height, new_width = _compute_scale_to_resize_image(
  110. image_size=image_size, image=image
  111. )
  112. onnx_coord = (
  113. onnx_coord.astype(float)
  114. * (new_width / image.shape[1], new_height / image.shape[0])
  115. ).astype(np.float32)
  116. onnx_mask_input = np.zeros((1, 1, 256, 256), dtype=np.float32)
  117. onnx_has_mask_input = np.array([-1], dtype=np.float32)
  118. decoder_inputs = {
  119. "image_embeddings": image_embedding,
  120. "point_coords": onnx_coord,
  121. "point_labels": onnx_label,
  122. "mask_input": onnx_mask_input,
  123. "has_mask_input": onnx_has_mask_input,
  124. "orig_im_size": np.array(image.shape[:2], dtype=np.float32),
  125. }
  126. masks, _, _ = decoder_session.run(None, decoder_inputs)
  127. mask = masks[0, 0] # (1, 1, H, W) -> (H, W)
  128. mask = mask > 0.0
  129. MIN_SIZE_RATIO = 0.05
  130. skimage.morphology.remove_small_objects(
  131. mask, min_size=mask.sum() * MIN_SIZE_RATIO, out=mask
  132. )
  133. if 0:
  134. imgviz.io.imsave("mask.jpg", imgviz.label2rgb(mask, imgviz.rgb2gray(image)))
  135. return mask