point_sampling.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
  2. # pyre-unsafe
  3. import cv2
  4. import numpy as np
  5. import torch
  6. from PIL import Image as PILImage
  7. from pycocotools import mask as mask_util
  8. from sam3.train.data.sam3_image_dataset import Datapoint
  9. from torchvision.ops import masks_to_boxes
  10. def sample_points_from_rle(rle, n_points, mode, box=None, normalize=True):
  11. """
  12. Sample random points from a mask provided in COCO RLE format. 'mode'
  13. 'mode' is in ["centered", "random_mask", "random_box"]
  14. "centered": points are sampled farthest from the mask edges and each other
  15. "random_mask": points are sampled uniformly from the mask
  16. "random_box": points are sampled uniformly from the annotation's box
  17. 'box' must be provided if 'mode' is "random_box".
  18. If 'normalize' is true, points are in [0,1], relative to mask h,w.
  19. """
  20. mask = np.ascontiguousarray(mask_util.decode(rle))
  21. points = sample_points_from_mask(mask, n_points, mode, box)
  22. if normalize:
  23. h, w = mask.shape
  24. norm = np.array([w, h, 1.0])[None, :]
  25. points = points / norm
  26. return points
  27. def sample_points_from_mask(mask, n_points, mode, box=None):
  28. if mode == "centered":
  29. points = center_positive_sample(mask, n_points)
  30. elif mode == "random_mask":
  31. points = uniform_positive_sample(mask, n_points)
  32. elif mode == "random_box":
  33. assert box is not None, "'random_box' mode requires a provided box."
  34. points = uniform_sample_from_box(mask, box, n_points)
  35. else:
  36. raise ValueError(f"Unknown point sampling mode {mode}.")
  37. return points
  38. def uniform_positive_sample(mask, n_points):
  39. """
  40. Samples positive points uniformly from the mask. Only integer pixel
  41. values are sampled.
  42. """
  43. # Sampling directly from the uncompressed RLE would be faster but is
  44. # likely unnecessary.
  45. mask_points = np.stack(np.nonzero(mask), axis=0).transpose(1, 0)
  46. assert len(mask_points) > 0, "Can't sample positive points from an empty mask."
  47. selected_idxs = np.random.randint(low=0, high=len(mask_points), size=n_points)
  48. selected_points = mask_points[selected_idxs]
  49. selected_points = selected_points[:, ::-1] # (y, x) -> (x, y)
  50. labels = np.ones((len(selected_points), 1))
  51. selected_points = np.concatenate([selected_points, labels], axis=1)
  52. return selected_points
  53. def center_positive_sample(mask, n_points):
  54. """
  55. Samples points farthest from mask edges (by distance transform)
  56. and subsequent points also farthest from each other. Each new point
  57. sampled is treated as an edge for future points. Edges of the image are
  58. treated as edges of the mask.
  59. """
  60. # Pad mask by one pixel on each end to assure distance transform
  61. # avoids edges
  62. padded_mask = np.pad(mask, 1)
  63. points = []
  64. for _ in range(n_points):
  65. assert np.max(mask) > 0, "Can't sample positive points from an empty mask."
  66. dist = cv2.distanceTransform(padded_mask, cv2.DIST_L2, 0)
  67. point = np.unravel_index(dist.argmax(), dist.shape)
  68. # Mark selected point as background so next point avoids it
  69. padded_mask[point[0], point[1]] = 0
  70. points.append(point[::-1]) # (y, x) -> (x, y)
  71. points = np.stack(points, axis=0)
  72. points = points - 1 # Subtract left/top padding of 1
  73. labels = np.ones((len(points), 1))
  74. points = np.concatenate([points, labels], axis=1)
  75. return points
  76. def uniform_sample_from_box(mask, box, n_points):
  77. """
  78. Sample points uniformly from the provided box. The points' labels
  79. are determined by the provided mask. Does not guarantee a positive
  80. point is sampled. The box is assumed unnormalized in XYXY format.
  81. Points are sampled at integer values.
  82. """
  83. # Since lower/right edges are exclusive, ceil can be applied to all edges
  84. int_box = np.ceil(box)
  85. x = np.random.randint(low=int_box[0], high=int_box[2], size=n_points)
  86. y = np.random.randint(low=int_box[1], high=int_box[3], size=n_points)
  87. labels = mask[y, x]
  88. points = np.stack([x, y, labels], axis=1)
  89. return points
  90. def rescale_box_xyxy(box, factor, imsize=None):
  91. """
  92. Rescale a box providing in unnormalized XYXY format, fixing the center.
  93. If imsize is provided, clamp to the image.
  94. """
  95. cx, cy = (box[0] + box[2]) / 2, (box[1] + box[3]) / 2
  96. w, h = box[2] - box[0], box[3] - box[1]
  97. new_w, new_h = factor * w, factor * h
  98. new_x0, new_y0 = cx - new_w / 2, cy - new_h / 2
  99. new_x1, new_y1 = cx + new_w / 2, cy + new_h / 2
  100. if imsize is not None:
  101. new_x0 = max(min(new_x0, imsize[1]), 0)
  102. new_x1 = max(min(new_x1, imsize[1]), 0)
  103. new_y0 = max(min(new_y0, imsize[0]), 0)
  104. new_y1 = max(min(new_y1, imsize[0]), 0)
  105. return [new_x0, new_y0, new_x1, new_y1]
  106. def noise_box(box, im_size, box_noise_std, box_noise_max, min_box_area):
  107. if box_noise_std <= 0.0:
  108. return box
  109. noise = box_noise_std * torch.randn(size=(4,))
  110. w, h = box[2] - box[0], box[3] - box[1]
  111. scale_factor = torch.tensor([w, h, w, h])
  112. noise = noise * scale_factor
  113. if box_noise_max is not None:
  114. noise = torch.clamp(noise, -box_noise_max, box_noise_max)
  115. input_box = box + noise
  116. # Clamp to maximum image size
  117. img_clamp = torch.tensor([im_size[1], im_size[0], im_size[1], im_size[0]])
  118. input_box = torch.maximum(input_box, torch.zeros_like(input_box))
  119. input_box = torch.minimum(input_box, img_clamp)
  120. if (input_box[2] - input_box[0]) * (input_box[3] - input_box[1]) <= min_box_area:
  121. return box
  122. return input_box
  123. class RandomGeometricInputsAPI:
  124. """
  125. For geometric queries, replaces the input box or points with a random
  126. one sampled from the GT mask. Segments must be provided for objects
  127. that are targets of geometric queries, and must be binary masks. Existing
  128. point and box queries in the datapoint will be ignored and completely replaced.
  129. Will sample points and boxes in XYXY format in absolute pixel space.
  130. Geometry queries are currently determined by taking any query whose
  131. query text is a set value.
  132. Args:
  133. num_points (int or (int, int)): how many points to sample. If a tuple,
  134. sample a random number of points uniformly over the inclusive range.
  135. box_chance (float): fraction of time a box is sampled. A box will replace
  136. one sampled point.
  137. box_noise_std (float): if greater than 0, add noise to the sampled boxes
  138. with this std. Noise is relative to the length of the box side.
  139. box_noise_max (int): if not none, truncate any box noise larger than this
  140. in terms of absolute pixels.
  141. resample_box_from_mask (bool): if True, any sampled box will be determined
  142. by finding the extrema of the provided mask. If False, the bbox provided
  143. in the target object will be used.
  144. point_sample_mode (str): In ["centered", "random_mask", "random_box"],
  145. controlling how points are sampled:
  146. "centered": points are sampled farthest from the mask edges and each other
  147. "random_mask": points are sampled uniformly from the mask
  148. "random_box": points are sampled uniformly from the annotation's box
  149. Note that "centered" may be too slow for on-line generation.
  150. geometric_query_str (str): what string in query_text indicates a
  151. geometry query.
  152. minimum_box_area (float): sampled boxes with area this size or smaller after
  153. noising will use the original box instead. It is the input's responsibility
  154. to avoid original boxes that violate necessary area bounds.
  155. concat_points (bool): if True, any sampled points will be added to existing
  156. ones instead of replacing them.
  157. """
  158. def __init__(
  159. self,
  160. num_points,
  161. box_chance,
  162. box_noise_std=0.0,
  163. box_noise_max=None,
  164. minimum_box_area=0.0,
  165. resample_box_from_mask=False,
  166. point_sample_mode="random_mask",
  167. sample_box_scale_factor=1.0,
  168. geometric_query_str="geometric",
  169. concat_points=False,
  170. ):
  171. self.num_points = num_points
  172. if not isinstance(self.num_points, int):
  173. # Convert from inclusive range to exclusive range expected by torch
  174. self.num_points[1] += 1
  175. self.num_points = tuple(self.num_points)
  176. self.box_chance = box_chance
  177. self.box_noise_std = box_noise_std
  178. self.box_noise_max = box_noise_max
  179. self.minimum_box_area = minimum_box_area
  180. self.resample_box_from_mask = resample_box_from_mask
  181. self.point_sample_mode = point_sample_mode
  182. assert point_sample_mode in [
  183. "centered",
  184. "random_mask",
  185. "random_box",
  186. ], "Unknown point sample mode."
  187. self.geometric_query_str = geometric_query_str
  188. self.concat_points = concat_points
  189. self.sample_box_scale_factor = sample_box_scale_factor
  190. def _sample_num_points_and_if_box(self):
  191. if isinstance(self.num_points, tuple):
  192. n_points = torch.randint(
  193. low=self.num_points[0], high=self.num_points[1], size=(1,)
  194. ).item()
  195. else:
  196. n_points = self.num_points
  197. if self.box_chance > 0.0:
  198. use_box = torch.rand(size=(1,)).item() < self.box_chance
  199. n_points -= int(use_box) # box stands in for one point
  200. else:
  201. use_box = False
  202. return n_points, use_box
  203. def _get_original_box(self, target_object):
  204. if not self.resample_box_from_mask:
  205. return target_object.bbox
  206. mask = target_object.segment
  207. return masks_to_boxes(mask[None, :, :])[0]
  208. def _get_target_object(self, datapoint, query):
  209. img = datapoint.images[query.image_id]
  210. targets = query.object_ids_output
  211. assert len(targets) == 1, (
  212. "Geometric queries only support a single target object."
  213. )
  214. target_idx = targets[0]
  215. return img.objects[target_idx]
  216. def __call__(self, datapoint, **kwargs):
  217. for query in datapoint.find_queries:
  218. if query.query_text != self.geometric_query_str:
  219. continue
  220. target_object = self._get_target_object(datapoint, query)
  221. n_points, use_box = self._sample_num_points_and_if_box()
  222. box = self._get_original_box(target_object)
  223. mask = target_object.segment
  224. if n_points > 0:
  225. # FIXME: The conversion to numpy and back to reuse code
  226. # is awkward, but this is all in the dataloader worker anyway
  227. # on CPU and so I don't think it should matter.
  228. if self.sample_box_scale_factor != 1.0:
  229. sample_box = rescale_box_xyxy(
  230. box.numpy(), self.sample_box_scale_factor, mask.shape
  231. )
  232. else:
  233. sample_box = box.numpy()
  234. input_points = sample_points_from_mask(
  235. mask.numpy(),
  236. n_points,
  237. self.point_sample_mode,
  238. sample_box,
  239. )
  240. input_points = torch.as_tensor(input_points)
  241. input_points = input_points[None, :, :]
  242. if self.concat_points and query.input_points is not None:
  243. input_points = torch.cat([query.input_points, input_points], dim=1)
  244. else:
  245. input_points = query.input_points if self.concat_points else None
  246. if use_box:
  247. w, h = datapoint.images[query.image_id].size
  248. input_box = noise_box(
  249. box,
  250. (h, w),
  251. box_noise_std=self.box_noise_std,
  252. box_noise_max=self.box_noise_max,
  253. min_box_area=self.minimum_box_area,
  254. )
  255. input_box = input_box[None, :]
  256. else:
  257. input_box = query.input_bbox if self.concat_points else None
  258. query.input_points = input_points
  259. query.input_bbox = input_box
  260. return datapoint
  261. class RandomizeInputBbox:
  262. """
  263. Simplified version of the geometric transform that only deals with input boxes
  264. """
  265. def __init__(
  266. self,
  267. box_noise_std=0.0,
  268. box_noise_max=None,
  269. minimum_box_area=0.0,
  270. ):
  271. self.box_noise_std = box_noise_std
  272. self.box_noise_max = box_noise_max
  273. self.minimum_box_area = minimum_box_area
  274. def __call__(self, datapoint: Datapoint, **kwargs):
  275. for query in datapoint.find_queries:
  276. if query.input_bbox is None:
  277. continue
  278. img = datapoint.images[query.image_id].data
  279. if isinstance(img, PILImage.Image):
  280. w, h = img.size
  281. else:
  282. assert isinstance(img, torch.Tensor)
  283. h, w = img.shape[-2:]
  284. for box_id in range(query.input_bbox.shape[0]):
  285. query.input_bbox[box_id, :] = noise_box(
  286. query.input_bbox[box_id, :].view(4),
  287. (h, w),
  288. box_noise_std=self.box_noise_std,
  289. box_noise_max=self.box_noise_max,
  290. min_box_area=self.minimum_box_area,
  291. ).view(1, 4)
  292. return datapoint