sam3_tracker_utils.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
  2. # pyre-unsafe
  3. import numpy as np
  4. import torch
  5. import torch.nn.functional as F
  6. from numpy.typing import NDArray
  7. from sam3.model.edt import edt_triton
  8. def sample_box_points(
  9. masks: torch.Tensor,
  10. noise: float = 0.1, # SAM default
  11. noise_bound: int = 20, # SAM default
  12. top_left_label: int = 2,
  13. bottom_right_label: int = 3,
  14. ) -> tuple[NDArray, NDArray]:
  15. """
  16. Sample a noised version of the top left and bottom right corners of a given `bbox`
  17. Inputs:
  18. - masks: [B, 1, H, W] tensor
  19. - noise: noise as a fraction of box width and height, dtype=float
  20. - noise_bound: maximum amount of noise (in pure pixels), dtype=int
  21. Returns:
  22. - box_coords: [B, num_pt, 2], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.float
  23. - box_labels: [B, num_pt], label 2 is reserverd for top left and 3 for bottom right corners, dtype=torch.int32
  24. """
  25. device = masks.device
  26. box_coords = mask_to_box(masks)
  27. B, _, H, W = masks.shape
  28. box_labels = torch.tensor(
  29. [top_left_label, bottom_right_label], dtype=torch.int, device=device
  30. ).repeat(B)
  31. if noise > 0.0:
  32. if not isinstance(noise_bound, torch.Tensor):
  33. noise_bound = torch.tensor(noise_bound, device=device)
  34. bbox_w = box_coords[..., 2] - box_coords[..., 0]
  35. bbox_h = box_coords[..., 3] - box_coords[..., 1]
  36. max_dx = torch.min(bbox_w * noise, noise_bound)
  37. max_dy = torch.min(bbox_h * noise, noise_bound)
  38. box_noise = 2 * torch.rand(B, 1, 4, device=device) - 1
  39. box_noise = box_noise * torch.stack((max_dx, max_dy, max_dx, max_dy), dim=-1)
  40. box_coords = box_coords + box_noise
  41. img_bounds = (
  42. torch.tensor([W, H, W, H], device=device) - 1
  43. ) # uncentered pixel coords
  44. box_coords.clamp_(torch.zeros_like(img_bounds), img_bounds) # In place clamping
  45. box_coords = box_coords.reshape(-1, 2, 2) # always 2 points
  46. box_labels = box_labels.reshape(-1, 2)
  47. return box_coords, box_labels
  48. def mask_to_box(masks: torch.Tensor):
  49. """
  50. compute bounding box given an input mask
  51. Inputs:
  52. - masks: [B, 1, H, W] tensor
  53. Returns:
  54. - box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor
  55. """
  56. B, _, h, w = masks.shape
  57. device = masks.device
  58. mask_area = masks.sum(dim=(-1, -2))
  59. xs = torch.arange(w, device=device, dtype=torch.int32)
  60. ys = torch.arange(h, device=device, dtype=torch.int32)
  61. grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy")
  62. grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w)
  63. grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w)
  64. min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1)
  65. max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1)
  66. min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1)
  67. max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1)
  68. bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1)
  69. bbox_coords = torch.where(
  70. mask_area[..., None] > 0, bbox_coords, torch.zeros_like(bbox_coords)
  71. )
  72. return bbox_coords
  73. def sample_random_points_from_errors(gt_masks, pred_masks, num_pt=1):
  74. """
  75. Sample `num_pt` random points (along with their labels) independently from the error regions.
  76. Inputs:
  77. - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
  78. - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
  79. - num_pt: int, number of points to sample independently for each of the B error maps
  80. Outputs:
  81. - points: [B, num_pt, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
  82. - labels: [B, num_pt], dtype=torch.int32, where 1 means positive clicks and 0 means
  83. negative clicks
  84. """
  85. if pred_masks is None: # if pred_masks is not provided, treat it as empty
  86. pred_masks = torch.zeros_like(gt_masks)
  87. assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
  88. assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
  89. assert num_pt >= 0
  90. B, _, H_im, W_im = gt_masks.shape
  91. device = gt_masks.device
  92. # false positive region, a new point sampled in this region should have
  93. # negative label to correct the FP error
  94. fp_masks = ~gt_masks & pred_masks
  95. # false negative region, a new point sampled in this region should have
  96. # positive label to correct the FN error
  97. fn_masks = gt_masks & ~pred_masks
  98. # whether the prediction completely match the ground-truth on each mask
  99. all_correct = torch.all((gt_masks == pred_masks).flatten(2), dim=2)
  100. all_correct = all_correct[..., None, None]
  101. # channel 0 is FP map, while channel 1 is FN map
  102. pts_noise = torch.rand(B, num_pt, H_im, W_im, 2, device=device)
  103. # sample a negative new click from FP region or a positive new click
  104. # from FN region, depend on where the maximum falls,
  105. # and in case the predictions are all correct (no FP or FN), we just
  106. # sample a negative click from the background region
  107. pts_noise[..., 0] *= fp_masks | (all_correct & ~gt_masks)
  108. pts_noise[..., 1] *= fn_masks
  109. pts_idx = pts_noise.flatten(2).argmax(dim=2)
  110. labels = (pts_idx % 2).to(torch.int32)
  111. pts_idx = pts_idx // 2
  112. pts_x = pts_idx % W_im
  113. pts_y = pts_idx // W_im
  114. points = torch.stack([pts_x, pts_y], dim=2).to(torch.float)
  115. return points, labels
  116. def sample_one_point_from_error_center(gt_masks, pred_masks, padding=True):
  117. """
  118. Sample 1 random point (along with its label) from the center of each error region,
  119. that is, the point with the largest distance to the boundary of each error region.
  120. This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py
  121. Inputs:
  122. - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
  123. - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
  124. - padding: if True, pad with boundary of 1 px for distance transform
  125. Outputs:
  126. - points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
  127. - labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks
  128. """
  129. if pred_masks is None:
  130. pred_masks = torch.zeros_like(gt_masks)
  131. assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
  132. assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
  133. B, _, H, W = gt_masks.shape
  134. # false positive region, a new point sampled in this region should have
  135. # negative label to correct the FP error
  136. fp_masks = (~gt_masks & pred_masks).squeeze(1)
  137. # false negative region, a new point sampled in this region should have
  138. # positive label to correct the FN error
  139. fn_masks = (gt_masks & ~pred_masks).squeeze(1)
  140. if padding:
  141. padded_fp_masks = torch.zeros(
  142. B, H + 2, W + 2, dtype=fp_masks.dtype, device=fp_masks.device
  143. )
  144. padded_fp_masks[:, 1 : H + 1, 1 : W + 1] = fp_masks
  145. padded_fn_masks = torch.zeros(
  146. B, H + 2, W + 2, dtype=fp_masks.dtype, device=fp_masks.device
  147. )
  148. padded_fn_masks[:, 1 : H + 1, 1 : W + 1] = fn_masks
  149. else:
  150. padded_fp_masks = fp_masks
  151. padded_fn_masks = fn_masks
  152. fn_mask_dt = edt_triton(padded_fn_masks)
  153. fp_mask_dt = edt_triton(padded_fp_masks)
  154. if padding:
  155. fn_mask_dt = fn_mask_dt[:, 1:-1, 1:-1]
  156. fp_mask_dt = fp_mask_dt[:, 1:-1, 1:-1]
  157. fn_max, fn_argmax = fn_mask_dt.reshape(B, -1).max(dim=-1)
  158. fp_max, fp_argmax = fp_mask_dt.reshape(B, -1).max(dim=-1)
  159. is_positive = fn_max > fp_max
  160. chosen = torch.where(is_positive, fn_argmax, fp_argmax)
  161. points_x = chosen % W
  162. points_y = chosen // W
  163. labels = is_positive.long()
  164. points = torch.stack([points_x, points_y], -1)
  165. return points.unsqueeze(1), labels.unsqueeze(1)
  166. def sample_one_point_from_error_center_slow(gt_masks, pred_masks, padding=True):
  167. """
  168. Sample 1 random point (along with its label) from the center of each error region,
  169. that is, the point with the largest distance to the boundary of each error region.
  170. This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py
  171. Inputs:
  172. - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
  173. - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
  174. - padding: if True, pad with boundary of 1 px for distance transform
  175. Outputs:
  176. - points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
  177. - labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks
  178. """
  179. import cv2 # delay OpenCV import to avoid unnecessary dependency
  180. if pred_masks is None:
  181. pred_masks = torch.zeros_like(gt_masks)
  182. assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
  183. assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
  184. B, _, _, W_im = gt_masks.shape
  185. device = gt_masks.device
  186. # false positive region, a new point sampled in this region should have
  187. # negative label to correct the FP error
  188. fp_masks = ~gt_masks & pred_masks
  189. # false negative region, a new point sampled in this region should have
  190. # positive label to correct the FN error
  191. fn_masks = gt_masks & ~pred_masks
  192. fp_masks = fp_masks.cpu().numpy()
  193. fn_masks = fn_masks.cpu().numpy()
  194. points = torch.zeros(B, 1, 2, dtype=torch.float)
  195. labels = torch.ones(B, 1, dtype=torch.int32)
  196. for b in range(B):
  197. fn_mask = fn_masks[b, 0]
  198. fp_mask = fp_masks[b, 0]
  199. if padding:
  200. fn_mask = np.pad(fn_mask, ((1, 1), (1, 1)), "constant")
  201. fp_mask = np.pad(fp_mask, ((1, 1), (1, 1)), "constant")
  202. # compute the distance of each point in FN/FP region to its boundary
  203. fn_mask_dt = cv2.distanceTransform(fn_mask.astype(np.uint8), cv2.DIST_L2, 0)
  204. fp_mask_dt = cv2.distanceTransform(fp_mask.astype(np.uint8), cv2.DIST_L2, 0)
  205. if padding:
  206. fn_mask_dt = fn_mask_dt[1:-1, 1:-1]
  207. fp_mask_dt = fp_mask_dt[1:-1, 1:-1]
  208. # take the point in FN/FP region with the largest distance to its boundary
  209. fn_mask_dt_flat = fn_mask_dt.reshape(-1)
  210. fp_mask_dt_flat = fp_mask_dt.reshape(-1)
  211. fn_argmax = np.argmax(fn_mask_dt_flat)
  212. fp_argmax = np.argmax(fp_mask_dt_flat)
  213. is_positive = fn_mask_dt_flat[fn_argmax] > fp_mask_dt_flat[fp_argmax]
  214. pt_idx = fn_argmax if is_positive else fp_argmax
  215. points[b, 0, 0] = pt_idx % W_im # x
  216. points[b, 0, 1] = pt_idx // W_im # y
  217. labels[b, 0] = int(is_positive)
  218. points = points.to(device)
  219. labels = labels.to(device)
  220. return points, labels
  221. def get_next_point(gt_masks, pred_masks, method):
  222. if method == "uniform":
  223. return sample_random_points_from_errors(gt_masks, pred_masks)
  224. elif method == "center":
  225. return sample_one_point_from_error_center(gt_masks, pred_masks)
  226. else:
  227. raise ValueError(f"unknown sampling method {method}")
  228. def select_closest_cond_frames(
  229. frame_idx, cond_frame_outputs, max_cond_frame_num, keep_first_cond_frame=False
  230. ):
  231. """
  232. Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs`
  233. that are temporally closest to the current frame at `frame_idx`. Here, we take
  234. - a) the closest conditioning frame before `frame_idx` (if any);
  235. - b) the closest conditioning frame after `frame_idx` (if any);
  236. - c) any other temporally closest conditioning frames until reaching a total
  237. of `max_cond_frame_num` conditioning frames.
  238. Outputs:
  239. - selected_outputs: selected items (keys & values) from `cond_frame_outputs`.
  240. - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`.
  241. """
  242. if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
  243. selected_outputs = cond_frame_outputs
  244. unselected_outputs = {}
  245. else:
  246. assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
  247. selected_outputs = {}
  248. if keep_first_cond_frame:
  249. idx_first = min(
  250. (t for t in cond_frame_outputs if t < frame_idx), default=None
  251. )
  252. if idx_first is None:
  253. # Maybe we are tracking in reverse
  254. idx_first = max(
  255. (t for t in cond_frame_outputs if t > frame_idx), default=None
  256. )
  257. if idx_first is not None:
  258. selected_outputs[idx_first] = cond_frame_outputs[idx_first]
  259. # the closest conditioning frame before `frame_idx` (if any)
  260. idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
  261. if idx_before is not None:
  262. selected_outputs[idx_before] = cond_frame_outputs[idx_before]
  263. # the closest conditioning frame after `frame_idx` (if any)
  264. idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
  265. if idx_after is not None:
  266. selected_outputs[idx_after] = cond_frame_outputs[idx_after]
  267. # add other temporally closest conditioning frames until reaching a total
  268. # of `max_cond_frame_num` conditioning frames.
  269. num_remain = max_cond_frame_num - len(selected_outputs)
  270. inds_remain = sorted(
  271. (t for t in cond_frame_outputs if t not in selected_outputs),
  272. key=lambda x: abs(x - frame_idx),
  273. )[:num_remain]
  274. selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
  275. unselected_outputs = {
  276. t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs
  277. }
  278. return selected_outputs, unselected_outputs
  279. def get_1d_sine_pe(pos_inds, dim, temperature=10000):
  280. """
  281. Get 1D sine positional embedding as in the original Transformer paper.
  282. """
  283. pe_dim = dim // 2
  284. dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
  285. dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
  286. pos_embed = pos_inds.unsqueeze(-1) / dim_t
  287. pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
  288. return pos_embed
  289. def get_best_gt_match_from_multimasks(pred_multimasks, gt_masks, pred_scores=None):
  290. """
  291. Get the mask with the best match to GT masks (based on IoU) from pred_multimasks.
  292. Optionally, use `pred_scores` to break ties in case all IoUs are zeros.
  293. """
  294. assert pred_multimasks.ndim == 4 and gt_masks.ndim == 4
  295. if pred_multimasks.size(1) == 1:
  296. return pred_multimasks # only a single mask channel, nothing to select
  297. pred_multimasks_binary = pred_multimasks > 0
  298. area_i = torch.sum(pred_multimasks_binary & gt_masks, dim=(2, 3)).float()
  299. area_u = torch.sum(pred_multimasks_binary | gt_masks, dim=(2, 3)).float()
  300. ious = area_i / torch.clamp(area_u, min=1.0)
  301. # In case all IoUs are zeros (e.g. because the GT mask is empty), use pred_scores
  302. # to break ties and select the best mask
  303. if pred_scores is not None:
  304. has_nonzero_ious = torch.any(ious > 0).expand_as(ious)
  305. scores = torch.where(has_nonzero_ious, ious, pred_scores)
  306. else:
  307. scores = ious
  308. # Finally, take the best mask prediction (with the highest score)
  309. best_scores_inds = torch.argmax(scores, dim=-1)
  310. batch_inds = torch.arange(scores.size(0), device=scores.device)
  311. best_pred_mask = pred_multimasks[batch_inds, best_scores_inds].unsqueeze(1)
  312. return best_pred_mask
  313. def fill_holes_in_mask_scores(mask, max_area, fill_holes=True, remove_sprinkles=True):
  314. """
  315. A post processor to fill small holes in mask scores with area under `max_area`.
  316. Holes are those small connected components in either background or foreground.
  317. Note that it relies on the "cc_torch" package to find connected components fast. You can
  318. install it via the following command (`TORCH_CUDA_ARCH_LIST=8.0` is for A100 GPUs):
  319. ```
  320. pip uninstall -y cc_torch; TORCH_CUDA_ARCH_LIST=8.0 9.0 pip install git+https://github.com/ronghanghu/cc_torch
  321. ```
  322. Otherwise, it will fallback to a slightly slower triton implementation, or skimage if the tensor is on cpu
  323. """
  324. if max_area <= 0:
  325. return mask # nothing to fill in this case
  326. if fill_holes:
  327. # We remove small connected components in background by changing them to foreground
  328. # with a small positive mask score (0.1).
  329. mask_bg = mask <= 0
  330. bg_area_thresh = max_area
  331. _, areas_bg = _get_connected_components_with_padding(mask_bg)
  332. small_components_bg = mask_bg & (areas_bg <= bg_area_thresh)
  333. mask = torch.where(small_components_bg, 0.1, mask)
  334. if remove_sprinkles:
  335. # We remove small connected components in foreground by changing them to background
  336. # with a small negative mask score (-0.1). Here we only remove connected components
  337. # whose areas are under both `max_area` and half of the entire mask's area. This
  338. # removes sprinkles while avoids filtering out tiny objects that we want to track.
  339. mask_fg = mask > 0
  340. fg_area_thresh = torch.sum(mask_fg, dim=(2, 3), keepdim=True, dtype=torch.int32)
  341. fg_area_thresh.floor_divide_(2).clamp_(max=max_area)
  342. _, areas_fg = _get_connected_components_with_padding(mask_fg)
  343. small_components_fg = mask_fg & (areas_fg <= fg_area_thresh)
  344. mask = torch.where(small_components_fg, -0.1, mask)
  345. return mask
  346. def _get_connected_components_with_padding(mask):
  347. """Get connected components from masks (possibly padding them to an even size)."""
  348. from sam3.perflib.connected_components import connected_components
  349. mask = mask.to(torch.uint8)
  350. _, _, H, W = mask.shape
  351. # make sure both height and width are even (to be compatible with cc_torch)
  352. pad_h = H % 2
  353. pad_w = W % 2
  354. if pad_h == 0 and pad_w == 0:
  355. labels, counts = connected_components(mask)
  356. else:
  357. # pad the mask to make its height and width even
  358. # padding format is (padding_left,padding_right,padding_top,padding_bottom)
  359. mask_pad = F.pad(mask, (0, pad_w, 0, pad_h), mode="constant", value=0)
  360. labels, counts = connected_components(mask_pad)
  361. labels = labels[:, :, :H, :W]
  362. counts = counts[:, :, :H, :W]
  363. return labels, counts