vos_inference.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # All rights reserved.
  3. # This source code is licensed under the license found in the
  4. # LICENSE file in the root directory of this source tree.
  5. import argparse
  6. import os
  7. from collections import defaultdict
  8. import numpy as np
  9. import torch
  10. from PIL import Image
  11. from sam2.build_sam import build_sam2_video_predictor
  12. # the PNG palette for DAVIS 2017 dataset
  13. DAVIS_PALETTE = b"\x00\x00\x00\x80\x00\x00\x00\x80\x00\x80\x80\x00\x00\x00\x80\x80\x00\x80\x00\x80\x80\x80\x80\x80@\x00\x00\xc0\x00\x00@\x80\x00\xc0\x80\x00@\x00\x80\xc0\x00\x80@\x80\x80\xc0\x80\x80\x00@\x00\x80@\x00\x00\xc0\x00\x80\xc0\x00\x00@\x80\x80@\x80\x00\xc0\x80\x80\xc0\x80@@\x00\xc0@\x00@\xc0\x00\xc0\xc0\x00@@\x80\xc0@\x80@\xc0\x80\xc0\xc0\x80\x00\x00@\x80\x00@\x00\x80@\x80\x80@\x00\x00\xc0\x80\x00\xc0\x00\x80\xc0\x80\x80\xc0@\x00@\xc0\x00@@\x80@\xc0\x80@@\x00\xc0\xc0\x00\xc0@\x80\xc0\xc0\x80\xc0\x00@@\x80@@\x00\xc0@\x80\xc0@\x00@\xc0\x80@\xc0\x00\xc0\xc0\x80\xc0\xc0@@@\xc0@@@\xc0@\xc0\xc0@@@\xc0\xc0@\xc0@\xc0\xc0\xc0\xc0\xc0 \x00\x00\xa0\x00\x00 \x80\x00\xa0\x80\x00 \x00\x80\xa0\x00\x80 \x80\x80\xa0\x80\x80`\x00\x00\xe0\x00\x00`\x80\x00\xe0\x80\x00`\x00\x80\xe0\x00\x80`\x80\x80\xe0\x80\x80 @\x00\xa0@\x00 \xc0\x00\xa0\xc0\x00 @\x80\xa0@\x80 \xc0\x80\xa0\xc0\x80`@\x00\xe0@\x00`\xc0\x00\xe0\xc0\x00`@\x80\xe0@\x80`\xc0\x80\xe0\xc0\x80 \x00@\xa0\x00@ \x80@\xa0\x80@ \x00\xc0\xa0\x00\xc0 \x80\xc0\xa0\x80\xc0`\x00@\xe0\x00@`\x80@\xe0\x80@`\x00\xc0\xe0\x00\xc0`\x80\xc0\xe0\x80\xc0 @@\xa0@@ \xc0@\xa0\xc0@ @\xc0\xa0@\xc0 \xc0\xc0\xa0\xc0\xc0`@@\xe0@@`\xc0@\xe0\xc0@`@\xc0\xe0@\xc0`\xc0\xc0\xe0\xc0\xc0\x00 \x00\x80 \x00\x00\xa0\x00\x80\xa0\x00\x00 \x80\x80 \x80\x00\xa0\x80\x80\xa0\x80@ \x00\xc0 \x00@\xa0\x00\xc0\xa0\x00@ \x80\xc0 \x80@\xa0\x80\xc0\xa0\x80\x00`\x00\x80`\x00\x00\xe0\x00\x80\xe0\x00\x00`\x80\x80`\x80\x00\xe0\x80\x80\xe0\x80@`\x00\xc0`\x00@\xe0\x00\xc0\xe0\x00@`\x80\xc0`\x80@\xe0\x80\xc0\xe0\x80\x00 @\x80 @\x00\xa0@\x80\xa0@\x00 \xc0\x80 \xc0\x00\xa0\xc0\x80\xa0\xc0@ @\xc0 @@\xa0@\xc0\xa0@@ \xc0\xc0 \xc0@\xa0\xc0\xc0\xa0\xc0\x00`@\x80`@\x00\xe0@\x80\xe0@\x00`\xc0\x80`\xc0\x00\xe0\xc0\x80\xe0\xc0@`@\xc0`@@\xe0@\xc0\xe0@@`\xc0\xc0`\xc0@\xe0\xc0\xc0\xe0\xc0 \x00\xa0 \x00 \xa0\x00\xa0\xa0\x00 \x80\xa0 \x80 \xa0\x80\xa0\xa0\x80` \x00\xe0 \x00`\xa0\x00\xe0\xa0\x00` \x80\xe0 \x80`\xa0\x80\xe0\xa0\x80 `\x00\xa0`\x00 \xe0\x00\xa0\xe0\x00 `\x80\xa0`\x80 \xe0\x80\xa0\xe0\x80``\x00\xe0`\x00`\xe0\x00\xe0\xe0\x00``\x80\xe0`\x80`\xe0\x80\xe0\xe0\x80 @\xa0 @ \xa0@\xa0\xa0@ \xc0\xa0 \xc0 \xa0\xc0\xa0\xa0\xc0` @\xe0 @`\xa0@\xe0\xa0@` \xc0\xe0 \xc0`\xa0\xc0\xe0\xa0\xc0 `@\xa0`@ \xe0@\xa0\xe0@ `\xc0\xa0`\xc0 \xe0\xc0\xa0\xe0\xc0``@\xe0`@`\xe0@\xe0\xe0@``\xc0\xe0`\xc0`\xe0\xc0\xe0\xe0\xc0"
  14. def load_ann_png(path):
  15. """Load a PNG file as a mask and its palette."""
  16. mask = Image.open(path)
  17. palette = mask.getpalette()
  18. mask = np.array(mask).astype(np.uint8)
  19. return mask, palette
  20. def save_ann_png(path, mask, palette):
  21. """Save a mask as a PNG file with the given palette."""
  22. assert mask.dtype == np.uint8
  23. assert mask.ndim == 2
  24. output_mask = Image.fromarray(mask)
  25. output_mask.putpalette(palette)
  26. output_mask.save(path)
  27. def get_per_obj_mask(mask):
  28. """Split a mask into per-object masks."""
  29. object_ids = np.unique(mask)
  30. object_ids = object_ids[object_ids > 0].tolist()
  31. per_obj_mask = {object_id: (mask == object_id) for object_id in object_ids}
  32. return per_obj_mask
  33. def put_per_obj_mask(per_obj_mask, height, width):
  34. """Combine per-object masks into a single mask."""
  35. mask = np.zeros((height, width), dtype=np.uint8)
  36. object_ids = sorted(per_obj_mask)[::-1]
  37. for object_id in object_ids:
  38. object_mask = per_obj_mask[object_id]
  39. object_mask = object_mask.reshape(height, width)
  40. mask[object_mask] = object_id
  41. return mask
  42. def load_masks_from_dir(
  43. input_mask_dir, video_name, frame_name, per_obj_png_file, allow_missing=False
  44. ):
  45. """Load masks from a directory as a dict of per-object masks."""
  46. if not per_obj_png_file:
  47. input_mask_path = os.path.join(input_mask_dir, video_name, f"{frame_name}.png")
  48. if allow_missing and not os.path.exists(input_mask_path):
  49. return {}, None
  50. input_mask, input_palette = load_ann_png(input_mask_path)
  51. per_obj_input_mask = get_per_obj_mask(input_mask)
  52. else:
  53. per_obj_input_mask = {}
  54. input_palette = None
  55. # each object is a directory in "{object_id:%03d}" format
  56. for object_name in os.listdir(os.path.join(input_mask_dir, video_name)):
  57. object_id = int(object_name)
  58. input_mask_path = os.path.join(
  59. input_mask_dir, video_name, object_name, f"{frame_name}.png"
  60. )
  61. if allow_missing and not os.path.exists(input_mask_path):
  62. continue
  63. input_mask, input_palette = load_ann_png(input_mask_path)
  64. per_obj_input_mask[object_id] = input_mask > 0
  65. return per_obj_input_mask, input_palette
  66. def save_masks_to_dir(
  67. output_mask_dir,
  68. video_name,
  69. frame_name,
  70. per_obj_output_mask,
  71. height,
  72. width,
  73. per_obj_png_file,
  74. output_palette,
  75. ):
  76. """Save masks to a directory as PNG files."""
  77. os.makedirs(os.path.join(output_mask_dir, video_name), exist_ok=True)
  78. if not per_obj_png_file:
  79. output_mask = put_per_obj_mask(per_obj_output_mask, height, width)
  80. output_mask_path = os.path.join(
  81. output_mask_dir, video_name, f"{frame_name}.png"
  82. )
  83. save_ann_png(output_mask_path, output_mask, output_palette)
  84. else:
  85. for object_id, object_mask in per_obj_output_mask.items():
  86. object_name = f"{object_id:03d}"
  87. os.makedirs(
  88. os.path.join(output_mask_dir, video_name, object_name),
  89. exist_ok=True,
  90. )
  91. output_mask = object_mask.reshape(height, width).astype(np.uint8)
  92. output_mask_path = os.path.join(
  93. output_mask_dir, video_name, object_name, f"{frame_name}.png"
  94. )
  95. save_ann_png(output_mask_path, output_mask, output_palette)
  96. @torch.inference_mode()
  97. @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
  98. def vos_inference(
  99. predictor,
  100. base_video_dir,
  101. input_mask_dir,
  102. output_mask_dir,
  103. video_name,
  104. score_thresh=0.0,
  105. use_all_masks=False,
  106. per_obj_png_file=False,
  107. ):
  108. """Run VOS inference on a single video with the given predictor."""
  109. # load the video frames and initialize the inference state on this video
  110. video_dir = os.path.join(base_video_dir, video_name)
  111. frame_names = [
  112. os.path.splitext(p)[0]
  113. for p in os.listdir(video_dir)
  114. if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"]
  115. ]
  116. frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))
  117. inference_state = predictor.init_state(
  118. video_path=video_dir, async_loading_frames=False
  119. )
  120. height = inference_state["video_height"]
  121. width = inference_state["video_width"]
  122. input_palette = None
  123. # fetch mask inputs from input_mask_dir (either only mask for the first frame, or all available masks)
  124. if not use_all_masks:
  125. # use only the first video's ground-truth mask as the input mask
  126. input_frame_inds = [0]
  127. else:
  128. # use all mask files available in the input_mask_dir as the input masks
  129. if not per_obj_png_file:
  130. input_frame_inds = [
  131. idx
  132. for idx, name in enumerate(frame_names)
  133. if os.path.exists(
  134. os.path.join(input_mask_dir, video_name, f"{name}.png")
  135. )
  136. ]
  137. else:
  138. input_frame_inds = [
  139. idx
  140. for object_name in os.listdir(os.path.join(input_mask_dir, video_name))
  141. for idx, name in enumerate(frame_names)
  142. if os.path.exists(
  143. os.path.join(input_mask_dir, video_name, object_name, f"{name}.png")
  144. )
  145. ]
  146. # check and make sure we got at least one input frame
  147. if len(input_frame_inds) == 0:
  148. raise RuntimeError(
  149. f"In {video_name=}, got no input masks in {input_mask_dir=}. "
  150. "Please make sure the input masks are available in the correct format."
  151. )
  152. input_frame_inds = sorted(set(input_frame_inds))
  153. # add those input masks to SAM 2 inference state before propagation
  154. object_ids_set = None
  155. for input_frame_idx in input_frame_inds:
  156. try:
  157. per_obj_input_mask, input_palette = load_masks_from_dir(
  158. input_mask_dir=input_mask_dir,
  159. video_name=video_name,
  160. frame_name=frame_names[input_frame_idx],
  161. per_obj_png_file=per_obj_png_file,
  162. )
  163. except FileNotFoundError as e:
  164. raise RuntimeError(
  165. f"In {video_name=}, failed to load input mask for frame {input_frame_idx=}. "
  166. "Please add the `--track_object_appearing_later_in_video` flag "
  167. "for VOS datasets that don't have all objects to track appearing "
  168. "in the first frame (such as LVOS or YouTube-VOS)."
  169. ) from e
  170. # get the list of object ids to track from the first input frame
  171. if object_ids_set is None:
  172. object_ids_set = set(per_obj_input_mask)
  173. for object_id, object_mask in per_obj_input_mask.items():
  174. # check and make sure no new object ids appear only in later frames
  175. if object_id not in object_ids_set:
  176. raise RuntimeError(
  177. f"In {video_name=}, got a new {object_id=} appearing only in a "
  178. f"later {input_frame_idx=} (but not appearing in the first frame). "
  179. "Please add the `--track_object_appearing_later_in_video` flag "
  180. "for VOS datasets that don't have all objects to track appearing "
  181. "in the first frame (such as LVOS or YouTube-VOS)."
  182. )
  183. predictor.add_new_mask(
  184. inference_state=inference_state,
  185. frame_idx=input_frame_idx,
  186. obj_id=object_id,
  187. mask=object_mask,
  188. )
  189. # check and make sure we have at least one object to track
  190. if object_ids_set is None or len(object_ids_set) == 0:
  191. raise RuntimeError(
  192. f"In {video_name=}, got no object ids on {input_frame_inds=}. "
  193. "Please add the `--track_object_appearing_later_in_video` flag "
  194. "for VOS datasets that don't have all objects to track appearing "
  195. "in the first frame (such as LVOS or YouTube-VOS)."
  196. )
  197. # run propagation throughout the video and collect the results in a dict
  198. os.makedirs(os.path.join(output_mask_dir, video_name), exist_ok=True)
  199. output_palette = input_palette or DAVIS_PALETTE
  200. video_segments = {} # video_segments contains the per-frame segmentation results
  201. for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(
  202. inference_state
  203. ):
  204. per_obj_output_mask = {
  205. out_obj_id: (out_mask_logits[i] > score_thresh).cpu().numpy()
  206. for i, out_obj_id in enumerate(out_obj_ids)
  207. }
  208. video_segments[out_frame_idx] = per_obj_output_mask
  209. # write the output masks as palette PNG files to output_mask_dir
  210. for out_frame_idx, per_obj_output_mask in video_segments.items():
  211. save_masks_to_dir(
  212. output_mask_dir=output_mask_dir,
  213. video_name=video_name,
  214. frame_name=frame_names[out_frame_idx],
  215. per_obj_output_mask=per_obj_output_mask,
  216. height=height,
  217. width=width,
  218. per_obj_png_file=per_obj_png_file,
  219. output_palette=output_palette,
  220. )
  221. @torch.inference_mode()
  222. @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
  223. def vos_separate_inference_per_object(
  224. predictor,
  225. base_video_dir,
  226. input_mask_dir,
  227. output_mask_dir,
  228. video_name,
  229. score_thresh=0.0,
  230. use_all_masks=False,
  231. per_obj_png_file=False,
  232. ):
  233. """
  234. Run VOS inference on a single video with the given predictor.
  235. Unlike `vos_inference`, this function run inference separately for each object
  236. in a video, which could be applied to datasets like LVOS or YouTube-VOS that
  237. don't have all objects to track appearing in the first frame (i.e. some objects
  238. might appear only later in the video).
  239. """
  240. # load the video frames and initialize the inference state on this video
  241. video_dir = os.path.join(base_video_dir, video_name)
  242. frame_names = [
  243. os.path.splitext(p)[0]
  244. for p in os.listdir(video_dir)
  245. if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"]
  246. ]
  247. frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))
  248. inference_state = predictor.init_state(
  249. video_path=video_dir, async_loading_frames=False
  250. )
  251. height = inference_state["video_height"]
  252. width = inference_state["video_width"]
  253. input_palette = None
  254. # collect all the object ids and their input masks
  255. inputs_per_object = defaultdict(dict)
  256. for idx, name in enumerate(frame_names):
  257. if per_obj_png_file or os.path.exists(
  258. os.path.join(input_mask_dir, video_name, f"{name}.png")
  259. ):
  260. per_obj_input_mask, input_palette = load_masks_from_dir(
  261. input_mask_dir=input_mask_dir,
  262. video_name=video_name,
  263. frame_name=frame_names[idx],
  264. per_obj_png_file=per_obj_png_file,
  265. allow_missing=True,
  266. )
  267. for object_id, object_mask in per_obj_input_mask.items():
  268. # skip empty masks
  269. if not np.any(object_mask):
  270. continue
  271. # if `use_all_masks=False`, we only use the first mask for each object
  272. if len(inputs_per_object[object_id]) > 0 and not use_all_masks:
  273. continue
  274. print(f"adding mask from frame {idx} as input for {object_id=}")
  275. inputs_per_object[object_id][idx] = object_mask
  276. # run inference separately for each object in the video
  277. object_ids = sorted(inputs_per_object)
  278. output_scores_per_object = defaultdict(dict)
  279. for object_id in object_ids:
  280. # add those input masks to SAM 2 inference state before propagation
  281. input_frame_inds = sorted(inputs_per_object[object_id])
  282. predictor.reset_state(inference_state)
  283. for input_frame_idx in input_frame_inds:
  284. predictor.add_new_mask(
  285. inference_state=inference_state,
  286. frame_idx=input_frame_idx,
  287. obj_id=object_id,
  288. mask=inputs_per_object[object_id][input_frame_idx],
  289. )
  290. # run propagation throughout the video and collect the results in a dict
  291. for out_frame_idx, _, out_mask_logits in predictor.propagate_in_video(
  292. inference_state,
  293. start_frame_idx=min(input_frame_inds),
  294. reverse=False,
  295. ):
  296. obj_scores = out_mask_logits.cpu().numpy()
  297. output_scores_per_object[object_id][out_frame_idx] = obj_scores
  298. # post-processing: consolidate the per-object scores into per-frame masks
  299. os.makedirs(os.path.join(output_mask_dir, video_name), exist_ok=True)
  300. output_palette = input_palette or DAVIS_PALETTE
  301. video_segments = {} # video_segments contains the per-frame segmentation results
  302. for frame_idx in range(len(frame_names)):
  303. scores = torch.full(
  304. size=(len(object_ids), 1, height, width),
  305. fill_value=-1024.0,
  306. dtype=torch.float32,
  307. )
  308. for i, object_id in enumerate(object_ids):
  309. if frame_idx in output_scores_per_object[object_id]:
  310. scores[i] = torch.from_numpy(
  311. output_scores_per_object[object_id][frame_idx]
  312. )
  313. if not per_obj_png_file:
  314. scores = predictor._apply_non_overlapping_constraints(scores)
  315. per_obj_output_mask = {
  316. object_id: (scores[i] > score_thresh).cpu().numpy()
  317. for i, object_id in enumerate(object_ids)
  318. }
  319. video_segments[frame_idx] = per_obj_output_mask
  320. # write the output masks as palette PNG files to output_mask_dir
  321. for frame_idx, per_obj_output_mask in video_segments.items():
  322. save_masks_to_dir(
  323. output_mask_dir=output_mask_dir,
  324. video_name=video_name,
  325. frame_name=frame_names[frame_idx],
  326. per_obj_output_mask=per_obj_output_mask,
  327. height=height,
  328. width=width,
  329. per_obj_png_file=per_obj_png_file,
  330. output_palette=output_palette,
  331. )
  332. def main():
  333. parser = argparse.ArgumentParser()
  334. parser.add_argument(
  335. "--sam2_cfg",
  336. type=str,
  337. default="configs/sam2.1/sam2.1_hiera_b+.yaml",
  338. help="SAM 2 model configuration file",
  339. )
  340. parser.add_argument(
  341. "--sam2_checkpoint",
  342. type=str,
  343. default="./checkpoints/sam2.1_hiera_b+.pt",
  344. help="path to the SAM 2 model checkpoint",
  345. )
  346. parser.add_argument(
  347. "--base_video_dir",
  348. type=str,
  349. required=True,
  350. help="directory containing videos (as JPEG files) to run VOS prediction on",
  351. )
  352. parser.add_argument(
  353. "--input_mask_dir",
  354. type=str,
  355. required=True,
  356. help="directory containing input masks (as PNG files) of each video",
  357. )
  358. parser.add_argument(
  359. "--video_list_file",
  360. type=str,
  361. default=None,
  362. help="text file containing the list of video names to run VOS prediction on",
  363. )
  364. parser.add_argument(
  365. "--output_mask_dir",
  366. type=str,
  367. required=True,
  368. help="directory to save the output masks (as PNG files)",
  369. )
  370. parser.add_argument(
  371. "--score_thresh",
  372. type=float,
  373. default=0.0,
  374. help="threshold for the output mask logits (default: 0.0)",
  375. )
  376. parser.add_argument(
  377. "--use_all_masks",
  378. action="store_true",
  379. help="whether to use all available PNG files in input_mask_dir "
  380. "(default without this flag: just the first PNG file as input to the SAM 2 model; "
  381. "usually we don't need this flag, since semi-supervised VOS evaluation usually takes input from the first frame only)",
  382. )
  383. parser.add_argument(
  384. "--per_obj_png_file",
  385. action="store_true",
  386. help="whether use separate per-object PNG files for input and output masks "
  387. "(default without this flag: all object masks are packed into a single PNG file on each frame following DAVIS format; "
  388. "note that the SA-V dataset stores each object mask as an individual PNG file and requires this flag)",
  389. )
  390. parser.add_argument(
  391. "--apply_postprocessing",
  392. action="store_true",
  393. help="whether to apply postprocessing (e.g. hole-filling) to the output masks "
  394. "(we don't apply such post-processing in the SAM 2 model evaluation)",
  395. )
  396. parser.add_argument(
  397. "--track_object_appearing_later_in_video",
  398. action="store_true",
  399. help="whether to track objects that appear later in the video (i.e. not on the first frame; "
  400. "some VOS datasets like LVOS or YouTube-VOS don't have all objects appearing in the first frame)",
  401. )
  402. args = parser.parse_args()
  403. # if we use per-object PNG files, they could possibly overlap in inputs and outputs
  404. hydra_overrides_extra = [
  405. "++model.non_overlap_masks=" + ("false" if args.per_obj_png_file else "true")
  406. ]
  407. predictor = build_sam2_video_predictor(
  408. config_file=args.sam2_cfg,
  409. ckpt_path=args.sam2_checkpoint,
  410. apply_postprocessing=args.apply_postprocessing,
  411. hydra_overrides_extra=hydra_overrides_extra,
  412. )
  413. if args.use_all_masks:
  414. print("using all available masks in input_mask_dir as input to the SAM 2 model")
  415. else:
  416. print(
  417. "using only the first frame's mask in input_mask_dir as input to the SAM 2 model"
  418. )
  419. # if a video list file is provided, read the video names from the file
  420. # (otherwise, we use all subdirectories in base_video_dir)
  421. if args.video_list_file is not None:
  422. with open(args.video_list_file, "r") as f:
  423. video_names = [v.strip() for v in f.readlines()]
  424. else:
  425. video_names = [
  426. p
  427. for p in os.listdir(args.base_video_dir)
  428. if os.path.isdir(os.path.join(args.base_video_dir, p))
  429. ]
  430. print(f"running VOS prediction on {len(video_names)} videos:\n{video_names}")
  431. for n_video, video_name in enumerate(video_names):
  432. print(f"\n{n_video + 1}/{len(video_names)} - running on {video_name}")
  433. if not args.track_object_appearing_later_in_video:
  434. vos_inference(
  435. predictor=predictor,
  436. base_video_dir=args.base_video_dir,
  437. input_mask_dir=args.input_mask_dir,
  438. output_mask_dir=args.output_mask_dir,
  439. video_name=video_name,
  440. score_thresh=args.score_thresh,
  441. use_all_masks=args.use_all_masks,
  442. per_obj_png_file=args.per_obj_png_file,
  443. )
  444. else:
  445. vos_separate_inference_per_object(
  446. predictor=predictor,
  447. base_video_dir=args.base_video_dir,
  448. input_mask_dir=args.input_mask_dir,
  449. output_mask_dir=args.output_mask_dir,
  450. video_name=video_name,
  451. score_thresh=args.score_thresh,
  452. use_all_masks=args.use_all_masks,
  453. per_obj_png_file=args.per_obj_png_file,
  454. )
  455. print(
  456. f"completed VOS prediction on {len(video_names)} videos -- "
  457. f"output masks saved to {args.output_mask_dir}"
  458. )
  459. if __name__ == "__main__":
  460. main()