vos_inference.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # All rights reserved.
  3. # This source code is licensed under the license found in the
  4. # LICENSE file in the root directory of this source tree.
  5. import argparse
  6. import os
  7. import numpy as np
  8. import torch
  9. from PIL import Image
  10. from sam2.build_sam import build_sam2_video_predictor
  11. # the PNG palette for DAVIS 2017 dataset
  12. DAVIS_PALETTE = b"\x00\x00\x00\x80\x00\x00\x00\x80\x00\x80\x80\x00\x00\x00\x80\x80\x00\x80\x00\x80\x80\x80\x80\x80@\x00\x00\xc0\x00\x00@\x80\x00\xc0\x80\x00@\x00\x80\xc0\x00\x80@\x80\x80\xc0\x80\x80\x00@\x00\x80@\x00\x00\xc0\x00\x80\xc0\x00\x00@\x80\x80@\x80\x00\xc0\x80\x80\xc0\x80@@\x00\xc0@\x00@\xc0\x00\xc0\xc0\x00@@\x80\xc0@\x80@\xc0\x80\xc0\xc0\x80\x00\x00@\x80\x00@\x00\x80@\x80\x80@\x00\x00\xc0\x80\x00\xc0\x00\x80\xc0\x80\x80\xc0@\x00@\xc0\x00@@\x80@\xc0\x80@@\x00\xc0\xc0\x00\xc0@\x80\xc0\xc0\x80\xc0\x00@@\x80@@\x00\xc0@\x80\xc0@\x00@\xc0\x80@\xc0\x00\xc0\xc0\x80\xc0\xc0@@@\xc0@@@\xc0@\xc0\xc0@@@\xc0\xc0@\xc0@\xc0\xc0\xc0\xc0\xc0 \x00\x00\xa0\x00\x00 \x80\x00\xa0\x80\x00 \x00\x80\xa0\x00\x80 \x80\x80\xa0\x80\x80`\x00\x00\xe0\x00\x00`\x80\x00\xe0\x80\x00`\x00\x80\xe0\x00\x80`\x80\x80\xe0\x80\x80 @\x00\xa0@\x00 \xc0\x00\xa0\xc0\x00 @\x80\xa0@\x80 \xc0\x80\xa0\xc0\x80`@\x00\xe0@\x00`\xc0\x00\xe0\xc0\x00`@\x80\xe0@\x80`\xc0\x80\xe0\xc0\x80 \x00@\xa0\x00@ \x80@\xa0\x80@ \x00\xc0\xa0\x00\xc0 \x80\xc0\xa0\x80\xc0`\x00@\xe0\x00@`\x80@\xe0\x80@`\x00\xc0\xe0\x00\xc0`\x80\xc0\xe0\x80\xc0 @@\xa0@@ \xc0@\xa0\xc0@ @\xc0\xa0@\xc0 \xc0\xc0\xa0\xc0\xc0`@@\xe0@@`\xc0@\xe0\xc0@`@\xc0\xe0@\xc0`\xc0\xc0\xe0\xc0\xc0\x00 \x00\x80 \x00\x00\xa0\x00\x80\xa0\x00\x00 \x80\x80 \x80\x00\xa0\x80\x80\xa0\x80@ \x00\xc0 \x00@\xa0\x00\xc0\xa0\x00@ \x80\xc0 \x80@\xa0\x80\xc0\xa0\x80\x00`\x00\x80`\x00\x00\xe0\x00\x80\xe0\x00\x00`\x80\x80`\x80\x00\xe0\x80\x80\xe0\x80@`\x00\xc0`\x00@\xe0\x00\xc0\xe0\x00@`\x80\xc0`\x80@\xe0\x80\xc0\xe0\x80\x00 @\x80 @\x00\xa0@\x80\xa0@\x00 \xc0\x80 \xc0\x00\xa0\xc0\x80\xa0\xc0@ @\xc0 @@\xa0@\xc0\xa0@@ \xc0\xc0 \xc0@\xa0\xc0\xc0\xa0\xc0\x00`@\x80`@\x00\xe0@\x80\xe0@\x00`\xc0\x80`\xc0\x00\xe0\xc0\x80\xe0\xc0@`@\xc0`@@\xe0@\xc0\xe0@@`\xc0\xc0`\xc0@\xe0\xc0\xc0\xe0\xc0 \x00\xa0 \x00 \xa0\x00\xa0\xa0\x00 \x80\xa0 \x80 \xa0\x80\xa0\xa0\x80` \x00\xe0 \x00`\xa0\x00\xe0\xa0\x00` \x80\xe0 \x80`\xa0\x80\xe0\xa0\x80 `\x00\xa0`\x00 \xe0\x00\xa0\xe0\x00 `\x80\xa0`\x80 \xe0\x80\xa0\xe0\x80``\x00\xe0`\x00`\xe0\x00\xe0\xe0\x00``\x80\xe0`\x80`\xe0\x80\xe0\xe0\x80 @\xa0 @ \xa0@\xa0\xa0@ \xc0\xa0 \xc0 \xa0\xc0\xa0\xa0\xc0` @\xe0 @`\xa0@\xe0\xa0@` \xc0\xe0 \xc0`\xa0\xc0\xe0\xa0\xc0 `@\xa0`@ \xe0@\xa0\xe0@ `\xc0\xa0`\xc0 \xe0\xc0\xa0\xe0\xc0``@\xe0`@`\xe0@\xe0\xe0@``\xc0\xe0`\xc0`\xe0\xc0\xe0\xe0\xc0"
  13. def load_ann_png(path):
  14. """Load a PNG file as a mask and its palette."""
  15. mask = Image.open(path)
  16. palette = mask.getpalette()
  17. mask = np.array(mask).astype(np.uint8)
  18. return mask, palette
  19. def save_ann_png(path, mask, palette):
  20. """Save a mask as a PNG file with the given palette."""
  21. assert mask.dtype == np.uint8
  22. assert mask.ndim == 2
  23. output_mask = Image.fromarray(mask)
  24. output_mask.putpalette(palette)
  25. output_mask.save(path)
  26. def get_per_obj_mask(mask):
  27. """Split a mask into per-object masks."""
  28. object_ids = np.unique(mask)
  29. object_ids = object_ids[object_ids > 0].tolist()
  30. per_obj_mask = {object_id: (mask == object_id) for object_id in object_ids}
  31. return per_obj_mask
  32. def put_per_obj_mask(per_obj_mask, height, width):
  33. """Combine per-object masks into a single mask."""
  34. mask = np.zeros((height, width), dtype=np.uint8)
  35. object_ids = sorted(per_obj_mask)[::-1]
  36. for object_id in object_ids:
  37. object_mask = per_obj_mask[object_id]
  38. object_mask = object_mask.reshape(height, width)
  39. mask[object_mask] = object_id
  40. return mask
  41. def load_masks_from_dir(input_mask_dir, video_name, frame_name, per_obj_png_file):
  42. """Load masks from a directory as a dict of per-object masks."""
  43. if not per_obj_png_file:
  44. input_mask_path = os.path.join(input_mask_dir, video_name, f"{frame_name}.png")
  45. input_mask, input_palette = load_ann_png(input_mask_path)
  46. per_obj_input_mask = get_per_obj_mask(input_mask)
  47. else:
  48. per_obj_input_mask = {}
  49. # each object is a directory in "{object_id:%03d}" format
  50. for object_name in os.listdir(os.path.join(input_mask_dir, video_name)):
  51. object_id = int(object_name)
  52. input_mask_path = os.path.join(
  53. input_mask_dir, video_name, object_name, f"{frame_name}.png"
  54. )
  55. input_mask, input_palette = load_ann_png(input_mask_path)
  56. per_obj_input_mask[object_id] = input_mask > 0
  57. return per_obj_input_mask, input_palette
  58. def save_masks_to_dir(
  59. output_mask_dir,
  60. video_name,
  61. frame_name,
  62. per_obj_output_mask,
  63. height,
  64. width,
  65. per_obj_png_file,
  66. output_palette,
  67. ):
  68. """Save masks to a directory as PNG files."""
  69. os.makedirs(os.path.join(output_mask_dir, video_name), exist_ok=True)
  70. if not per_obj_png_file:
  71. output_mask = put_per_obj_mask(per_obj_output_mask, height, width)
  72. output_mask_path = os.path.join(
  73. output_mask_dir, video_name, f"{frame_name}.png"
  74. )
  75. save_ann_png(output_mask_path, output_mask, output_palette)
  76. else:
  77. for object_id, object_mask in per_obj_output_mask.items():
  78. object_name = f"{object_id:03d}"
  79. os.makedirs(
  80. os.path.join(output_mask_dir, video_name, object_name),
  81. exist_ok=True,
  82. )
  83. output_mask = object_mask.reshape(height, width).astype(np.uint8)
  84. output_mask_path = os.path.join(
  85. output_mask_dir, video_name, object_name, f"{frame_name}.png"
  86. )
  87. save_ann_png(output_mask_path, output_mask, output_palette)
  88. @torch.inference_mode()
  89. @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
  90. def vos_inference(
  91. predictor,
  92. base_video_dir,
  93. input_mask_dir,
  94. output_mask_dir,
  95. video_name,
  96. score_thresh=0.0,
  97. use_all_masks=False,
  98. per_obj_png_file=False,
  99. ):
  100. """Run VOS inference on a single video with the given predictor."""
  101. # load the video frames and initialize the inference state on this video
  102. video_dir = os.path.join(base_video_dir, video_name)
  103. frame_names = [
  104. os.path.splitext(p)[0]
  105. for p in os.listdir(video_dir)
  106. if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"]
  107. ]
  108. frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))
  109. inference_state = predictor.init_state(
  110. video_path=video_dir, async_loading_frames=False
  111. )
  112. height = inference_state["video_height"]
  113. width = inference_state["video_width"]
  114. input_palette = None
  115. # fetch mask inputs from input_mask_dir (either only mask for the first frame, or all available masks)
  116. if not use_all_masks:
  117. # use only the first video's ground-truth mask as the input mask
  118. input_frame_inds = [0]
  119. else:
  120. # use all mask files available in the input_mask_dir as the input masks
  121. if not per_obj_png_file:
  122. input_frame_inds = [
  123. idx
  124. for idx, name in enumerate(frame_names)
  125. if os.path.exists(
  126. os.path.join(input_mask_dir, video_name, f"{name}.png")
  127. )
  128. ]
  129. else:
  130. input_frame_inds = [
  131. idx
  132. for object_name in os.listdir(os.path.join(input_mask_dir, video_name))
  133. for idx, name in enumerate(frame_names)
  134. if os.path.exists(
  135. os.path.join(input_mask_dir, video_name, object_name, f"{name}.png")
  136. )
  137. ]
  138. input_frame_inds = sorted(set(input_frame_inds))
  139. # add those input masks to SAM 2 inference state before propagation
  140. for input_frame_idx in input_frame_inds:
  141. per_obj_input_mask, input_palette = load_masks_from_dir(
  142. input_mask_dir=input_mask_dir,
  143. video_name=video_name,
  144. frame_name=frame_names[input_frame_idx],
  145. per_obj_png_file=per_obj_png_file,
  146. )
  147. for object_id, object_mask in per_obj_input_mask.items():
  148. predictor.add_new_mask(
  149. inference_state=inference_state,
  150. frame_idx=input_frame_idx,
  151. obj_id=object_id,
  152. mask=object_mask,
  153. )
  154. # run propagation throughout the video and collect the results in a dict
  155. os.makedirs(os.path.join(output_mask_dir, video_name), exist_ok=True)
  156. output_palette = input_palette or DAVIS_PALETTE
  157. video_segments = {} # video_segments contains the per-frame segmentation results
  158. for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(
  159. inference_state
  160. ):
  161. per_obj_output_mask = {
  162. out_obj_id: (out_mask_logits[i] > score_thresh).cpu().numpy()
  163. for i, out_obj_id in enumerate(out_obj_ids)
  164. }
  165. video_segments[out_frame_idx] = per_obj_output_mask
  166. # write the output masks as palette PNG files to output_mask_dir
  167. for out_frame_idx, per_obj_output_mask in video_segments.items():
  168. save_masks_to_dir(
  169. output_mask_dir=output_mask_dir,
  170. video_name=video_name,
  171. frame_name=frame_names[out_frame_idx],
  172. per_obj_output_mask=per_obj_output_mask,
  173. height=height,
  174. width=width,
  175. per_obj_png_file=per_obj_png_file,
  176. output_palette=output_palette,
  177. )
  178. def main():
  179. parser = argparse.ArgumentParser()
  180. parser.add_argument(
  181. "--sam2_cfg",
  182. type=str,
  183. default="sam2_hiera_b+.yaml",
  184. help="SAM 2 model configuration file",
  185. )
  186. parser.add_argument(
  187. "--sam2_checkpoint",
  188. type=str,
  189. default="./checkpoints/sam2_hiera_b+.pt",
  190. help="path to the SAM 2 model checkpoint",
  191. )
  192. parser.add_argument(
  193. "--base_video_dir",
  194. type=str,
  195. required=True,
  196. help="directory containing videos (as JPEG files) to run VOS prediction on",
  197. )
  198. parser.add_argument(
  199. "--input_mask_dir",
  200. type=str,
  201. required=True,
  202. help="directory containing input masks (as PNG files) of each video",
  203. )
  204. parser.add_argument(
  205. "--video_list_file",
  206. type=str,
  207. default=None,
  208. help="text file containing the list of video names to run VOS prediction on",
  209. )
  210. parser.add_argument(
  211. "--output_mask_dir",
  212. type=str,
  213. required=True,
  214. help="directory to save the output masks (as PNG files)",
  215. )
  216. parser.add_argument(
  217. "--score_thresh",
  218. type=float,
  219. default=0.0,
  220. help="threshold for the output mask logits (default: 0.0)",
  221. )
  222. parser.add_argument(
  223. "--use_all_masks",
  224. action="store_true",
  225. help="whether to use all available PNG files in input_mask_dir "
  226. "(default without this flag: just the first PNG file as input to the SAM 2 model; "
  227. "usually we don't need this flag, since semi-supervised VOS evaluation usually takes input from the first frame only)",
  228. )
  229. parser.add_argument(
  230. "--per_obj_png_file",
  231. action="store_true",
  232. help="whether use separate per-object PNG files for input and output masks "
  233. "(default without this flag: all object masks are packed into a single PNG file on each frame following DAVIS format; "
  234. "note that the SA-V dataset stores each object mask as an individual PNG file and requires this flag)",
  235. )
  236. parser.add_argument(
  237. "--apply_postprocessing",
  238. action="store_true",
  239. help="whether to apply postprocessing (e.g. hole-filling) to the output masks "
  240. "(we don't apply such post-processing in the SAM 2 model evaluation)",
  241. )
  242. args = parser.parse_args()
  243. # if we use per-object PNG files, they could possibly overlap in inputs and outputs
  244. hydra_overrides_extra = [
  245. "++model.non_overlap_masks=" + ("false" if args.per_obj_png_file else "true")
  246. ]
  247. predictor = build_sam2_video_predictor(
  248. config_file=args.sam2_cfg,
  249. ckpt_path=args.sam2_checkpoint,
  250. apply_postprocessing=args.apply_postprocessing,
  251. hydra_overrides_extra=hydra_overrides_extra,
  252. )
  253. if args.use_all_masks:
  254. print("using all available masks in input_mask_dir as input to the SAM 2 model")
  255. else:
  256. print(
  257. "using only the first frame's mask in input_mask_dir as input to the SAM 2 model"
  258. )
  259. # if a video list file is provided, read the video names from the file
  260. # (otherwise, we use all subdirectories in base_video_dir)
  261. if args.video_list_file is not None:
  262. with open(args.video_list_file, "r") as f:
  263. video_names = [v.strip() for v in f.readlines()]
  264. else:
  265. video_names = [
  266. p
  267. for p in os.listdir(args.base_video_dir)
  268. if os.path.isdir(os.path.join(args.base_video_dir, p))
  269. ]
  270. print(f"running VOS prediction on {len(video_names)} videos:\n{video_names}")
  271. for n_video, video_name in enumerate(video_names):
  272. print(f"\n{n_video + 1}/{len(video_names)} - running on {video_name}")
  273. vos_inference(
  274. predictor=predictor,
  275. base_video_dir=args.base_video_dir,
  276. input_mask_dir=args.input_mask_dir,
  277. output_mask_dir=args.output_mask_dir,
  278. video_name=video_name,
  279. score_thresh=args.score_thresh,
  280. use_all_masks=args.use_all_masks,
  281. per_obj_png_file=args.per_obj_png_file,
  282. )
  283. print(
  284. f"completed VOS prediction on {len(video_names)} videos -- "
  285. f"output masks saved to {args.output_mask_dir}"
  286. )
  287. if __name__ == "__main__":
  288. main()