transforms.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # All rights reserved.
  3. # This source code is licensed under the license found in the
  4. # LICENSE file in the root directory of this source tree.
  5. import warnings
  6. import torch
  7. import torch.nn as nn
  8. import torch.nn.functional as F
  9. from torchvision.transforms import Normalize, Resize, ToTensor
  10. class SAM2Transforms(nn.Module):
  11. def __init__(
  12. self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0
  13. ):
  14. """
  15. Transforms for SAM2.
  16. """
  17. super().__init__()
  18. self.resolution = resolution
  19. self.mask_threshold = mask_threshold
  20. self.max_hole_area = max_hole_area
  21. self.max_sprinkle_area = max_sprinkle_area
  22. self.mean = [0.485, 0.456, 0.406]
  23. self.std = [0.229, 0.224, 0.225]
  24. self.to_tensor = ToTensor()
  25. self.transforms = torch.jit.script(
  26. nn.Sequential(
  27. Resize((self.resolution, self.resolution)),
  28. Normalize(self.mean, self.std),
  29. )
  30. )
  31. def __call__(self, x):
  32. x = self.to_tensor(x)
  33. return self.transforms(x)
  34. def forward_batch(self, img_list):
  35. img_batch = [self.transforms(self.to_tensor(img)) for img in img_list]
  36. img_batch = torch.stack(img_batch, dim=0)
  37. return img_batch
  38. def transform_coords(
  39. self, coords: torch.Tensor, normalize=False, orig_hw=None
  40. ) -> torch.Tensor:
  41. """
  42. Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates,
  43. If the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
  44. Returns
  45. Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model.
  46. """
  47. if normalize:
  48. assert orig_hw is not None
  49. h, w = orig_hw
  50. coords = coords.clone()
  51. coords[..., 0] = coords[..., 0] / w
  52. coords[..., 1] = coords[..., 1] / h
  53. coords = coords * self.resolution # unnormalize coords
  54. return coords
  55. def transform_boxes(
  56. self, boxes: torch.Tensor, normalize=False, orig_hw=None
  57. ) -> torch.Tensor:
  58. """
  59. Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates,
  60. if the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
  61. """
  62. boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw)
  63. return boxes
  64. def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor:
  65. """
  66. Perform PostProcessing on output masks.
  67. """
  68. from sam2.utils.misc import get_connected_components
  69. masks = masks.float()
  70. input_masks = masks
  71. mask_flat = masks.flatten(0, 1).unsqueeze(1) # flatten as 1-channel image
  72. try:
  73. if self.max_hole_area > 0:
  74. # Holes are those connected components in background with area <= self.fill_hole_area
  75. # (background regions are those with mask scores <= self.mask_threshold)
  76. labels, areas = get_connected_components(
  77. mask_flat <= self.mask_threshold
  78. )
  79. is_hole = (labels > 0) & (areas <= self.max_hole_area)
  80. is_hole = is_hole.reshape_as(masks)
  81. # We fill holes with a small positive mask score (10.0) to change them to foreground.
  82. masks = torch.where(is_hole, self.mask_threshold + 10.0, masks)
  83. if self.max_sprinkle_area > 0:
  84. labels, areas = get_connected_components(
  85. mask_flat > self.mask_threshold
  86. )
  87. is_hole = (labels > 0) & (areas <= self.max_sprinkle_area)
  88. is_hole = is_hole.reshape_as(masks)
  89. # We fill holes with negative mask score (-10.0) to change them to background.
  90. masks = torch.where(is_hole, self.mask_threshold - 10.0, masks)
  91. except Exception as e:
  92. # Skip the post-processing step if the CUDA kernel fails
  93. warnings.warn(
  94. f"{e}\n\nSkipping the post-processing step due to the error above. You can "
  95. "still use SAM 2 and it's OK to ignore the error above, although some post-processing "
  96. "functionality may be limited (which doesn't affect the results in most cases; see "
  97. "https://github.com/facebookresearch/segment-anything-2/blob/main/INSTALL.md).",
  98. category=UserWarning,
  99. stacklevel=2,
  100. )
  101. masks = input_masks
  102. masks = F.interpolate(masks, orig_hw, mode="bilinear", align_corners=False)
  103. return masks