basic.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
  2. # pyre-unsafe
  3. """
  4. Transforms and data augmentation for both image + bbox.
  5. """
  6. import math
  7. import random
  8. from typing import Iterable
  9. import PIL
  10. import torch
  11. import torchvision.transforms as T
  12. import torchvision.transforms.functional as F
  13. from sam3.model.box_ops import box_xyxy_to_cxcywh
  14. from sam3.model.data_misc import interpolate
  15. def crop(image, target, region):
  16. cropped_image = F.crop(image, *region)
  17. target = target.copy()
  18. i, j, h, w = region
  19. # should we do something wrt the original size?
  20. target["size"] = torch.tensor([h, w])
  21. fields = ["labels", "area", "iscrowd", "positive_map"]
  22. if "boxes" in target:
  23. boxes = target["boxes"]
  24. max_size = torch.as_tensor([w, h], dtype=torch.float32)
  25. cropped_boxes = boxes - torch.as_tensor([j, i, j, i], dtype=torch.float32)
  26. cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
  27. cropped_boxes = cropped_boxes.clamp(min=0)
  28. area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
  29. target["boxes"] = cropped_boxes.reshape(-1, 4)
  30. target["area"] = area
  31. fields.append("boxes")
  32. if "input_boxes" in target:
  33. boxes = target["input_boxes"]
  34. max_size = torch.as_tensor([w, h], dtype=torch.float32)
  35. cropped_boxes = boxes - torch.as_tensor([j, i, j, i], dtype=torch.float32)
  36. cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
  37. cropped_boxes = cropped_boxes.clamp(min=0)
  38. target["input_boxes"] = cropped_boxes.reshape(-1, 4)
  39. if "masks" in target:
  40. # FIXME should we update the area here if there are no boxes?
  41. target["masks"] = target["masks"][:, i : i + h, j : j + w]
  42. fields.append("masks")
  43. # remove elements for which the boxes or masks that have zero area
  44. if "boxes" in target or "masks" in target:
  45. # favor boxes selection when defining which elements to keep
  46. # this is compatible with previous implementation
  47. if "boxes" in target:
  48. cropped_boxes = target["boxes"].reshape(-1, 2, 2)
  49. keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
  50. else:
  51. keep = target["masks"].flatten(1).any(1)
  52. for field in fields:
  53. if field in target:
  54. target[field] = target[field][keep]
  55. return cropped_image, target
  56. def hflip(image, target):
  57. flipped_image = F.hflip(image)
  58. w, h = image.size
  59. target = target.copy()
  60. if "boxes" in target:
  61. boxes = target["boxes"]
  62. boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor(
  63. [-1, 1, -1, 1]
  64. ) + torch.as_tensor([w, 0, w, 0])
  65. target["boxes"] = boxes
  66. if "input_boxes" in target:
  67. boxes = target["input_boxes"]
  68. boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor(
  69. [-1, 1, -1, 1]
  70. ) + torch.as_tensor([w, 0, w, 0])
  71. target["input_boxes"] = boxes
  72. if "masks" in target:
  73. target["masks"] = target["masks"].flip(-1)
  74. if "text_input" in target:
  75. text_input = (
  76. target["text_input"]
  77. .replace("left", "[TMP]")
  78. .replace("right", "left")
  79. .replace("[TMP]", "right")
  80. )
  81. target["text_input"] = text_input
  82. return flipped_image, target
  83. def resize(image, target, size, max_size=None, square=False):
  84. # size can be min_size (scalar) or (w, h) tuple
  85. def get_size_with_aspect_ratio(image_size, size, max_size=None):
  86. w, h = image_size
  87. if max_size is not None:
  88. min_original_size = float(min((w, h)))
  89. max_original_size = float(max((w, h)))
  90. if max_original_size / min_original_size * size > max_size:
  91. size = int(round(max_size * min_original_size / max_original_size))
  92. if (w <= h and w == size) or (h <= w and h == size):
  93. return (h, w)
  94. if w < h:
  95. ow = size
  96. oh = int(size * h / w)
  97. else:
  98. oh = size
  99. ow = int(size * w / h)
  100. return (oh, ow)
  101. def get_size(image_size, size, max_size=None):
  102. if isinstance(size, (list, tuple)):
  103. return size[::-1]
  104. else:
  105. return get_size_with_aspect_ratio(image_size, size, max_size)
  106. if square:
  107. size = size, size
  108. else:
  109. size = get_size(image.size, size, max_size)
  110. rescaled_image = F.resize(image, size)
  111. if target is None:
  112. return rescaled_image, None
  113. ratios = tuple(
  114. float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)
  115. )
  116. ratio_width, ratio_height = ratios
  117. target = target.copy()
  118. if "boxes" in target:
  119. boxes = target["boxes"]
  120. scaled_boxes = boxes * torch.as_tensor(
  121. [ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32
  122. )
  123. target["boxes"] = scaled_boxes
  124. if "input_boxes" in target:
  125. boxes = target["input_boxes"]
  126. scaled_boxes = boxes * torch.as_tensor(
  127. [ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32
  128. )
  129. target["input_boxes"] = scaled_boxes
  130. if "area" in target:
  131. area = target["area"]
  132. scaled_area = area * (ratio_width * ratio_height)
  133. target["area"] = scaled_area
  134. h, w = size
  135. target["size"] = torch.tensor([h, w])
  136. if "masks" in target:
  137. target["masks"] = (
  138. interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0]
  139. > 0.5
  140. )
  141. return rescaled_image, target
  142. def pad(image, target, padding):
  143. if len(padding) == 2:
  144. # assumes that we only pad on the bottom right corners
  145. padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
  146. else:
  147. # left, top, right, bottom
  148. padded_image = F.pad(image, (padding[0], padding[1], padding[2], padding[3]))
  149. if target is None:
  150. return padded_image, None
  151. target = target.copy()
  152. w, h = padded_image.size
  153. # should we do something wrt the original size?
  154. target["size"] = torch.tensor([h, w])
  155. if "boxes" in target and len(padding) == 4:
  156. boxes = target["boxes"]
  157. boxes = boxes + torch.as_tensor(
  158. [padding[0], padding[1], padding[0], padding[1]], dtype=torch.float32
  159. )
  160. target["boxes"] = boxes
  161. if "input_boxes" in target and len(padding) == 4:
  162. boxes = target["input_boxes"]
  163. boxes = boxes + torch.as_tensor(
  164. [padding[0], padding[1], padding[0], padding[1]], dtype=torch.float32
  165. )
  166. target["input_boxes"] = boxes
  167. if "masks" in target:
  168. if len(padding) == 2:
  169. target["masks"] = torch.nn.functional.pad(
  170. target["masks"], (0, padding[0], 0, padding[1])
  171. )
  172. else:
  173. target["masks"] = torch.nn.functional.pad(
  174. target["masks"], (padding[0], padding[2], padding[1], padding[3])
  175. )
  176. return padded_image, target
  177. class RandomCrop:
  178. def __init__(self, size):
  179. self.size = size
  180. def __call__(self, img, target):
  181. region = T.RandomCrop.get_params(img, self.size)
  182. return crop(img, target, region)
  183. class RandomSizeCrop:
  184. def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False):
  185. self.min_size = min_size
  186. self.max_size = max_size
  187. self.respect_boxes = respect_boxes # if True we can't crop a box out
  188. def __call__(self, img: PIL.Image.Image, target: dict):
  189. init_boxes = len(target["boxes"])
  190. init_boxes_tensor = target["boxes"].clone()
  191. if self.respect_boxes and init_boxes > 0:
  192. minW, minH, maxW, maxH = (
  193. min(img.width, self.min_size),
  194. min(img.width, self.min_size),
  195. min(img.width, self.max_size),
  196. min(img.height, self.max_size),
  197. )
  198. minX, minY = (
  199. target["boxes"][:, 0].max().item() + 10.0,
  200. target["boxes"][:, 1].max().item() + 10.0,
  201. )
  202. minX = min(img.width, minX)
  203. minY = min(img.height, minY)
  204. maxX, maxY = (
  205. target["boxes"][:, 2].min().item() - 10,
  206. target["boxes"][:, 3].min().item() - 10,
  207. )
  208. maxX = max(0.0, maxX)
  209. maxY = max(0.0, maxY)
  210. minW = max(minW, minX - maxX)
  211. minH = max(minH, minY - maxY)
  212. w = random.uniform(minW, max(minW, maxW))
  213. h = random.uniform(minH, max(minH, maxH))
  214. if minX > maxX:
  215. # i = random.uniform(max(0, minX - w + 1), max(maxX, max(0, minX - w + 1)))
  216. i = random.uniform(max(0, minX - w), max(maxX, max(0, minX - w)))
  217. else:
  218. i = random.uniform(
  219. max(0, minX - w + 1), max(maxX - 1, max(0, minX - w + 1))
  220. )
  221. if minY > maxY:
  222. # j = random.uniform(max(0, minY - h + 1), max(maxY, max(0, minY - h + 1)))
  223. j = random.uniform(max(0, minY - h), max(maxY, max(0, minY - h)))
  224. else:
  225. j = random.uniform(
  226. max(0, minY - h + 1), max(maxY - 1, max(0, minY - h + 1))
  227. )
  228. result_img, result_target = crop(img, target, [j, i, h, w])
  229. assert len(result_target["boxes"]) == init_boxes, (
  230. f"img_w={img.width}\timg_h={img.height}\tminX={minX}\tminY={minY}\tmaxX={maxX}\tmaxY={maxY}\tminW={minW}\tminH={minH}\tmaxW={maxW}\tmaxH={maxH}\tw={w}\th={h}\ti={i}\tj={j}\tinit_boxes={init_boxes_tensor}\tresults={result_target['boxes']}"
  231. )
  232. return result_img, result_target
  233. else:
  234. w = random.randint(self.min_size, min(img.width, self.max_size))
  235. h = random.randint(self.min_size, min(img.height, self.max_size))
  236. region = T.RandomCrop.get_params(img, (h, w))
  237. result_img, result_target = crop(img, target, region)
  238. return result_img, result_target
  239. class CenterCrop:
  240. def __init__(self, size):
  241. self.size = size
  242. def __call__(self, img, target):
  243. image_width, image_height = img.size
  244. crop_height, crop_width = self.size
  245. crop_top = int(round((image_height - crop_height) / 2.0))
  246. crop_left = int(round((image_width - crop_width) / 2.0))
  247. return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
  248. class RandomHorizontalFlip:
  249. def __init__(self, p=0.5):
  250. self.p = p
  251. def __call__(self, img, target):
  252. if random.random() < self.p:
  253. return hflip(img, target)
  254. return img, target
  255. class RandomResize:
  256. def __init__(self, sizes, max_size=None, square=False):
  257. if isinstance(sizes, int):
  258. sizes = (sizes,)
  259. assert isinstance(sizes, Iterable)
  260. self.sizes = list(sizes)
  261. self.max_size = max_size
  262. self.square = square
  263. def __call__(self, img, target=None):
  264. size = random.choice(self.sizes)
  265. return resize(img, target, size, self.max_size, square=self.square)
  266. class RandomPad:
  267. def __init__(self, max_pad):
  268. self.max_pad = max_pad
  269. def __call__(self, img, target):
  270. pad_x = random.randint(0, self.max_pad)
  271. pad_y = random.randint(0, self.max_pad)
  272. return pad(img, target, (pad_x, pad_y))
  273. class PadToSize:
  274. def __init__(self, size):
  275. self.size = size
  276. def __call__(self, img, target):
  277. w, h = img.size
  278. pad_x = self.size - w
  279. pad_y = self.size - h
  280. assert pad_x >= 0 and pad_y >= 0
  281. pad_left = random.randint(0, pad_x)
  282. pad_right = pad_x - pad_left
  283. pad_top = random.randint(0, pad_y)
  284. pad_bottom = pad_y - pad_top
  285. return pad(img, target, (pad_left, pad_top, pad_right, pad_bottom))
  286. class Identity:
  287. def __call__(self, img, target):
  288. return img, target
  289. class RandomSelect:
  290. """
  291. Randomly selects between transforms1 and transforms2,
  292. with probability p for transforms1 and (1 - p) for transforms2
  293. """
  294. def __init__(self, transforms1=None, transforms2=None, p=0.5):
  295. self.transforms1 = transforms1 or Identity()
  296. self.transforms2 = transforms2 or Identity()
  297. self.p = p
  298. def __call__(self, img, target):
  299. if random.random() < self.p:
  300. return self.transforms1(img, target)
  301. return self.transforms2(img, target)
  302. class ToTensor:
  303. def __call__(self, img, target):
  304. return F.to_tensor(img), target
  305. class RandomErasing:
  306. def __init__(self, *args, **kwargs):
  307. self.eraser = T.RandomErasing(*args, **kwargs)
  308. def __call__(self, img, target):
  309. return self.eraser(img), target
  310. class Normalize:
  311. def __init__(self, mean, std):
  312. self.mean = mean
  313. self.std = std
  314. def __call__(self, image, target=None):
  315. image = F.normalize(image, mean=self.mean, std=self.std)
  316. if target is None:
  317. return image, None
  318. target = target.copy()
  319. h, w = image.shape[-2:]
  320. if "boxes" in target:
  321. boxes = target["boxes"]
  322. boxes = box_xyxy_to_cxcywh(boxes)
  323. boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
  324. target["boxes"] = boxes
  325. if "input_boxes" in target:
  326. boxes = target["input_boxes"]
  327. boxes = box_xyxy_to_cxcywh(boxes)
  328. boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
  329. target["input_boxes"] = boxes
  330. return image, target
  331. class RemoveDifficult:
  332. def __init__(self, enabled=False):
  333. self.remove_difficult = enabled
  334. def __call__(self, image, target=None):
  335. if target is None:
  336. return image, None
  337. target = target.copy()
  338. keep = ~target["iscrowd"].to(torch.bool) | (not self.remove_difficult)
  339. if "boxes" in target:
  340. target["boxes"] = target["boxes"][keep]
  341. target["labels"] = target["labels"][keep]
  342. target["iscrowd"] = target["iscrowd"][keep]
  343. return image, target
  344. class Compose:
  345. def __init__(self, transforms):
  346. self.transforms = transforms
  347. def __call__(self, image, target):
  348. for t in self.transforms:
  349. image, target = t(image, target)
  350. return image, target
  351. def __repr__(self):
  352. format_string = self.__class__.__name__ + "("
  353. for t in self.transforms:
  354. format_string += "\n"
  355. format_string += " {0}".format(t)
  356. format_string += "\n)"
  357. return format_string
  358. def get_random_resize_scales(size, min_size, rounded):
  359. stride = 128 if rounded else 32
  360. min_size = int(stride * math.ceil(min_size / stride))
  361. scales = list(range(min_size, size + 1, stride))
  362. return scales
  363. def get_random_resize_max_size(size, ratio=5 / 3):
  364. max_size = round(ratio * size)
  365. return max_size