build_sam.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # All rights reserved.
  3. # This source code is licensed under the license found in the
  4. # LICENSE file in the root directory of this source tree.
  5. import logging
  6. import torch
  7. from hydra import compose
  8. from hydra.utils import instantiate
  9. from omegaconf import OmegaConf
  10. from huggingface_hub import hf_hub_download
  11. def build_sam2(
  12. config_file,
  13. ckpt_path=None,
  14. device="cuda",
  15. mode="eval",
  16. hydra_overrides_extra=[],
  17. apply_postprocessing=True,
  18. ):
  19. if apply_postprocessing:
  20. hydra_overrides_extra = hydra_overrides_extra.copy()
  21. hydra_overrides_extra += [
  22. # dynamically fall back to multi-mask if the single mask is not stable
  23. "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
  24. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
  25. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
  26. ]
  27. # Read config and init model
  28. cfg = compose(config_name=config_file, overrides=hydra_overrides_extra)
  29. OmegaConf.resolve(cfg)
  30. model = instantiate(cfg.model, _recursive_=True)
  31. _load_checkpoint(model, ckpt_path)
  32. model = model.to(device)
  33. if mode == "eval":
  34. model.eval()
  35. return model
  36. def build_sam2_video_predictor(
  37. config_file,
  38. ckpt_path=None,
  39. device="cuda",
  40. mode="eval",
  41. hydra_overrides_extra=[],
  42. apply_postprocessing=True,
  43. ):
  44. hydra_overrides = [
  45. "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor",
  46. ]
  47. if apply_postprocessing:
  48. hydra_overrides_extra = hydra_overrides_extra.copy()
  49. hydra_overrides_extra += [
  50. # dynamically fall back to multi-mask if the single mask is not stable
  51. "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
  52. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
  53. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
  54. # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking
  55. "++model.binarize_mask_from_pts_for_mem_enc=true",
  56. # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution)
  57. "++model.fill_hole_area=8",
  58. ]
  59. hydra_overrides.extend(hydra_overrides_extra)
  60. # Read config and init model
  61. cfg = compose(config_name=config_file, overrides=hydra_overrides)
  62. OmegaConf.resolve(cfg)
  63. model = instantiate(cfg.model, _recursive_=True)
  64. _load_checkpoint(model, ckpt_path)
  65. model = model.to(device)
  66. if mode == "eval":
  67. model.eval()
  68. return model
  69. def build_sam2_hf(model_id, **kwargs):
  70. model_id_to_filenames = {
  71. "facebook/sam2-hiera-tiny": ("sam2_hiera_t.yaml", "sam2_hiera_tiny.pt"),
  72. "facebook/sam2-hiera-small": ("sam2_hiera_s.yaml", "sam2_hiera_small.pt"),
  73. "facebook/sam2-hiera-base-plus": ("sam2_hiera_b+.yaml", "sam2_hiera_base_plus.pt"),
  74. "facebook/sam2-hiera-large": ("sam2_hiera_l.yaml", "sam2_hiera_large.pt"),
  75. }
  76. config_name, checkpoint_name = model_id_to_filenames[model_id]
  77. config_file = hf_hub_download(repo_id=model_id, filename=config_name)
  78. ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name)
  79. return build_sam2_video_predictor(config_file=config_file, ckpt_path=ckpt_path, **kwargs)
  80. def build_sam2_video_predictor_hf(model_id, **kwargs):
  81. config_file = hf_hub_download(repo_id=model_id, filename=f"{model_id}.yaml")
  82. ckpt_path = hf_hub_download(repo_id=model_id, filename=f"{model_id}.pt")
  83. return build_sam2_video_predictor(config_file=config_file, ckpt_path=ckpt_path, **kwargs)
  84. def _load_checkpoint(model, ckpt_path):
  85. if ckpt_path is not None:
  86. sd = torch.load(ckpt_path, map_location="cpu")["model"]
  87. missing_keys, unexpected_keys = model.load_state_dict(sd)
  88. if missing_keys:
  89. logging.error(missing_keys)
  90. raise RuntimeError()
  91. if unexpected_keys:
  92. logging.error(unexpected_keys)
  93. raise RuntimeError()
  94. logging.info("Loaded checkpoint sucessfully")