model_misc.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
  2. # pyre-unsafe
  3. """Various utility models"""
  4. import copy
  5. import math
  6. import weakref
  7. from collections.abc import Iterator
  8. from contextlib import AbstractContextManager
  9. from enum import auto, Enum
  10. from typing import Dict, List, Optional, Union
  11. import numpy as np
  12. import torch
  13. import torch.nn.functional as F
  14. from torch import nn, Tensor
  15. from typing_extensions import override
  16. def inverse_sigmoid(x, eps=1e-3):
  17. """
  18. The inverse function for sigmoid activation function.
  19. Note: It might face numberical issues with fp16 small eps.
  20. """
  21. x = x.clamp(min=0, max=1)
  22. x1 = x.clamp(min=eps)
  23. x2 = (1 - x).clamp(min=eps)
  24. return torch.log(x1 / x2)
  25. class MultiheadAttentionWrapper(nn.MultiheadAttention):
  26. def forward(self, *args, **kwargs):
  27. kwargs["need_weights"] = False
  28. return super().forward(*args, **kwargs)
  29. class DotProductScoring(torch.nn.Module):
  30. def __init__(
  31. self,
  32. d_model,
  33. d_proj,
  34. prompt_mlp=None,
  35. clamp_logits=True,
  36. clamp_max_val=12.0,
  37. ):
  38. super().__init__()
  39. self.d_proj = d_proj
  40. assert isinstance(prompt_mlp, torch.nn.Module) or prompt_mlp is None
  41. self.prompt_mlp = prompt_mlp # an optional MLP projection for prompt
  42. self.prompt_proj = torch.nn.Linear(d_model, d_proj)
  43. self.hs_proj = torch.nn.Linear(d_model, d_proj)
  44. self.scale = float(1.0 / np.sqrt(d_proj))
  45. self.clamp_logits = clamp_logits
  46. if self.clamp_logits:
  47. self.clamp_max_val = clamp_max_val
  48. def mean_pool_text(self, prompt, prompt_mask):
  49. # is_valid has shape (seq, bs, 1), where 1 is valid and 0 is padding
  50. is_valid = (~prompt_mask).float().permute(1, 0)[..., None]
  51. # num_valid has shape (bs, 1)
  52. num_valid = torch.clamp(torch.sum(is_valid, dim=0), min=1.0)
  53. # mean pool over all the valid tokens -- pooled_prompt has shape (bs, proj_dim)
  54. pooled_prompt = (prompt * is_valid).sum(dim=0) / num_valid
  55. return pooled_prompt
  56. def forward(self, hs, prompt, prompt_mask):
  57. # hs has shape (num_layer, bs, num_query, d_model)
  58. # prompt has shape (seq, bs, d_model)
  59. # prompt_mask has shape (bs, seq), where 1 is valid and 0 is padding
  60. assert hs.dim() == 4 and prompt.dim() == 3 and prompt_mask.dim() == 2
  61. # apply MLP on prompt if specified
  62. if self.prompt_mlp is not None:
  63. prompt = self.prompt_mlp(prompt)
  64. # first, get the mean-pooled version of the prompt
  65. pooled_prompt = self.mean_pool_text(prompt, prompt_mask)
  66. # then, project pooled_prompt and hs to d_proj dimensions
  67. proj_pooled_prompt = self.prompt_proj(pooled_prompt) # (bs, d_proj)
  68. proj_hs = self.hs_proj(hs) # (num_layer, bs, num_query, d_proj)
  69. # finally, get dot-product scores of shape (num_layer, bs, num_query, 1)
  70. scores = torch.matmul(proj_hs, proj_pooled_prompt.unsqueeze(-1))
  71. scores *= self.scale
  72. # clamp scores to a max value to avoid numerical issues in loss or matcher
  73. if self.clamp_logits:
  74. scores.clamp_(min=-self.clamp_max_val, max=self.clamp_max_val)
  75. return scores
  76. class LayerScale(nn.Module):
  77. def __init__(
  78. self,
  79. dim: int,
  80. init_values: Union[float, Tensor] = 1e-5,
  81. inplace: bool = False,
  82. ) -> None:
  83. super().__init__()
  84. self.inplace = inplace
  85. self.gamma = nn.Parameter(init_values * torch.ones(dim))
  86. def forward(self, x: Tensor) -> Tensor:
  87. return x.mul_(self.gamma) if self.inplace else x * self.gamma
  88. class LayerNorm2d(nn.Module):
  89. def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
  90. super().__init__()
  91. self.weight = nn.Parameter(torch.ones(num_channels))
  92. self.bias = nn.Parameter(torch.zeros(num_channels))
  93. self.eps = eps
  94. def forward(self, x: torch.Tensor) -> torch.Tensor:
  95. u = x.mean(1, keepdim=True)
  96. s = (x - u).pow(2).mean(1, keepdim=True)
  97. x = (x - u) / torch.sqrt(s + self.eps)
  98. x = self.weight[:, None, None] * x + self.bias[:, None, None]
  99. return x
  100. class TransformerWrapper(nn.Module):
  101. def __init__(
  102. self,
  103. encoder,
  104. decoder,
  105. d_model: int,
  106. two_stage_type="none", # ["none"] only for now
  107. pos_enc_at_input_dec=True,
  108. ):
  109. super().__init__()
  110. self.encoder = encoder
  111. self.decoder = decoder
  112. self.num_queries = decoder.num_queries if decoder is not None else None
  113. self.pos_enc_at_input_dec = pos_enc_at_input_dec
  114. # for two stage
  115. assert two_stage_type in ["none"], "unknown param {} of two_stage_type".format(
  116. two_stage_type
  117. )
  118. self.two_stage_type = two_stage_type
  119. self._reset_parameters()
  120. self.d_model = d_model
  121. def _reset_parameters(self):
  122. for n, p in self.named_parameters():
  123. if p.dim() > 1:
  124. if (
  125. "box_embed" not in n
  126. and "query_embed" not in n
  127. and "reference_points" not in n
  128. ):
  129. nn.init.xavier_uniform_(p)
  130. class MLP(nn.Module):
  131. """Very simple multi-layer perceptron (also called FFN)"""
  132. def __init__(
  133. self,
  134. input_dim: int,
  135. hidden_dim: int,
  136. output_dim: int,
  137. num_layers: int,
  138. dropout: float = 0.0,
  139. residual: bool = False,
  140. out_norm: Optional[nn.Module] = None,
  141. ):
  142. super().__init__()
  143. self.num_layers = num_layers
  144. h = [hidden_dim] * (num_layers - 1)
  145. self.layers = nn.ModuleList(
  146. nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
  147. )
  148. self.drop = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
  149. # whether to add the output as a residual connection to the input
  150. if residual and input_dim != output_dim:
  151. raise ValueError("residual is only supported if input_dim == output_dim")
  152. self.residual = residual
  153. # whether to apply a normalization layer to the output
  154. assert isinstance(out_norm, nn.Module) or out_norm is None
  155. self.out_norm = out_norm or nn.Identity()
  156. def forward(self, x):
  157. orig_x = x
  158. for i, layer in enumerate(self.layers):
  159. x = self.drop(F.relu(layer(x))) if i < self.num_layers - 1 else layer(x)
  160. if self.residual:
  161. x = x + orig_x
  162. x = self.out_norm(x)
  163. return x
  164. def get_clones(module, N):
  165. return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
  166. def get_clones_seq(module, N):
  167. return nn.Sequential(*[copy.deepcopy(module) for i in range(N)])
  168. def get_activation_fn(activation):
  169. """Return an activation function given a string"""
  170. if activation == "relu":
  171. return F.relu
  172. if activation == "gelu":
  173. return F.gelu
  174. if activation == "glu":
  175. return F.glu
  176. raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
  177. def get_activation_module(activation):
  178. """Return an activation function given a string"""
  179. if activation == "relu":
  180. return nn.ReLU
  181. if activation == "gelu":
  182. return nn.GELU
  183. if activation == "glu":
  184. return nn.GLU
  185. raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
  186. def get_valid_ratio(mask):
  187. _, H, W = mask.shape
  188. valid_H = torch.sum(~mask[:, :, 0], 1)
  189. valid_W = torch.sum(~mask[:, 0, :], 1)
  190. valid_ratio_h = valid_H.float() / H
  191. valid_ratio_w = valid_W.float() / W
  192. valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
  193. return valid_ratio
  194. def gen_sineembed_for_position(pos_tensor, num_feats=256):
  195. assert num_feats % 2 == 0
  196. num_feats = num_feats // 2
  197. # n_query, bs, _ = pos_tensor.size()
  198. # sineembed_tensor = torch.zeros(n_query, bs, 256)
  199. scale = 2 * math.pi
  200. dim_t = torch.arange(num_feats, dtype=torch.float32, device=pos_tensor.device)
  201. dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode="floor")) / num_feats)
  202. x_embed = pos_tensor[:, :, 0] * scale
  203. y_embed = pos_tensor[:, :, 1] * scale
  204. pos_x = x_embed[:, :, None] / dim_t
  205. pos_y = y_embed[:, :, None] / dim_t
  206. pos_x = torch.stack(
  207. (pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3
  208. ).flatten(2)
  209. pos_y = torch.stack(
  210. (pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3
  211. ).flatten(2)
  212. if pos_tensor.size(-1) == 2:
  213. pos = torch.cat((pos_y, pos_x), dim=2)
  214. elif pos_tensor.size(-1) == 4:
  215. w_embed = pos_tensor[:, :, 2] * scale
  216. pos_w = w_embed[:, :, None] / dim_t
  217. pos_w = torch.stack(
  218. (pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3
  219. ).flatten(2)
  220. h_embed = pos_tensor[:, :, 3] * scale
  221. pos_h = h_embed[:, :, None] / dim_t
  222. pos_h = torch.stack(
  223. (pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3
  224. ).flatten(2)
  225. pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
  226. else:
  227. raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1)))
  228. return pos
  229. class SAM3Output(list):
  230. """
  231. A class representing the output of a SAM3 model.
  232. It provides an iterable interface that supports different iteration modes, including iterating over all steps per stage,
  233. last step per stage, and flattened output.
  234. Attributes:
  235. output: The output of the SAM3 model, represented as a list of lists.
  236. iter_mode: The current iteration mode.
  237. Example:
  238. >>> output = [[1, 2], [3, 4], [5, 6]]
  239. >>> sam3_output = SAM3Output(output)
  240. >>> for step in sam3_output:
  241. ... print(step)
  242. [1, 2]
  243. [3, 4]
  244. [5, 6]
  245. >>> with SAM3Output.iteration_mode(SAM3Output.IterMode.LAST_STEP_PER_STAGE) as sam3_last_step_out:
  246. ... for step in sam3_last_step_out:
  247. ... print(step)
  248. [2]
  249. [4]
  250. [6]
  251. >>> with SAM3Output.iteration_mode(SAM3Output.IterMode.FLATTENED) as sam3_flattened_out:
  252. ... for step in sam3_flattened_out:
  253. ... print(step)
  254. 1
  255. 2
  256. 3
  257. 4
  258. 5
  259. 6
  260. """
  261. class IterMode(Enum):
  262. # Defines the type of iterator over ouptuts.
  263. ALL_STEPS_PER_STAGE = auto()
  264. LAST_STEP_PER_STAGE = auto()
  265. FLATTENED = auto() # Returns each interactivity step as if it is a separate stage (this is used in SAM3Image model)
  266. def __init__(
  267. self,
  268. output: List[List[Dict]] = None,
  269. iter_mode: IterMode = IterMode.ALL_STEPS_PER_STAGE,
  270. loss_stages: Optional[List[int]] = None,
  271. ):
  272. if output is not None:
  273. assert (
  274. isinstance(output, list)
  275. and len(output) > 0
  276. and isinstance(output[0], list)
  277. ), "Expected output to be a list of lists"
  278. self.output = output
  279. else:
  280. self.output = []
  281. assert isinstance(iter_mode, SAM3Output.IterMode), (
  282. f"iter_mode shoulf be of enum type 'SAM3Output.IterMode'. Got {type(iter_mode)}"
  283. )
  284. self.iter_mode = iter_mode
  285. # We create a weak reference to self to be used in the lambda functions.
  286. # This is to avoid cyclic references and let SAM3Output be garabge collected.
  287. self_ref = weakref.ref(self)
  288. self._mode2iter = {
  289. SAM3Output.IterMode.ALL_STEPS_PER_STAGE: lambda: iter(self_ref().output),
  290. SAM3Output.IterMode.LAST_STEP_PER_STAGE: lambda: (
  291. inner_list[-1] for inner_list in self_ref().output
  292. ),
  293. SAM3Output.IterMode.FLATTENED: lambda: (
  294. element for inner_list in self_ref().output for element in inner_list
  295. ),
  296. }
  297. self.loss_stages = loss_stages
  298. @override
  299. def __iter__(self) -> Iterator:
  300. return self._mode2iter[self.iter_mode]()
  301. def __getitem__(self, index):
  302. """
  303. Returns the item at the specified index.
  304. Args:
  305. index (int): The index of the item to return.
  306. Returns:
  307. list or element: The item at the specified index.
  308. """
  309. assert isinstance(index, int), f"index should be an integer. Got {type(index)}"
  310. if self.iter_mode == SAM3Output.IterMode.ALL_STEPS_PER_STAGE:
  311. return self.output[index]
  312. elif self.iter_mode == SAM3Output.IterMode.LAST_STEP_PER_STAGE:
  313. return self.output[index][-1]
  314. elif self.iter_mode == SAM3Output.IterMode.FLATTENED:
  315. if index == -1:
  316. return self.self.output[-1][-1]
  317. else:
  318. flattened_output = sum(self.output, [])
  319. return flattened_output[index]
  320. class _IterationMode(AbstractContextManager):
  321. """
  322. A context manager that temporarily changes the iteration mode of a SAM3Output object.
  323. This class is used internally by the SAM3Output.iteration_mode method.
  324. """
  325. def __init__(
  326. self, model_output: "SAM3Output", iter_mode: "SAM3Output.IterMode"
  327. ):
  328. self._model_output = model_output
  329. self._orig_iter_mode = model_output.iter_mode
  330. self._new_iter_mode = iter_mode
  331. @override
  332. def __enter__(self) -> "SAM3Output":
  333. self._model_output.iter_mode = self._new_iter_mode
  334. return self._model_output
  335. @override
  336. def __exit__(self, exc_type, exc_value, traceback):
  337. self._model_output.iter_mode = self._orig_iter_mode
  338. return super().__exit__(exc_type, exc_value, traceback)
  339. @staticmethod
  340. def iteration_mode(
  341. model_output: "SAM3Output", iter_mode: IterMode
  342. ) -> _IterationMode:
  343. """
  344. Returns a context manager that allows you to temporarily change the iteration mode of the SAM3Output object.
  345. Args:
  346. model_output: The SAM3Output object.
  347. iter_mode: The new iteration mode.
  348. Returns:
  349. SAM3Output._IterationMode: A context manager that changes the iteration mode of the SAM3Output object.
  350. """
  351. return SAM3Output._IterationMode(model_output=model_output, iter_mode=iter_mode)
  352. def append(self, item: list):
  353. assert isinstance(item, list), (
  354. f"Only list items are supported. Got {type(item)}"
  355. )
  356. self.output.append(item)
  357. def __repr__(self):
  358. return self.output.__repr__()
  359. def __len__(self):
  360. if self.iter_mode in [
  361. SAM3Output.IterMode.ALL_STEPS_PER_STAGE,
  362. SAM3Output.IterMode.LAST_STEP_PER_STAGE,
  363. ]:
  364. return len(self.output)
  365. elif self.iter_mode == SAM3Output.IterMode.FLATTENED:
  366. flattened_output = sum(self.output, [])
  367. return len(flattened_output)