build_sam.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # All rights reserved.
  3. # This source code is licensed under the license found in the
  4. # LICENSE file in the root directory of this source tree.
  5. import logging
  6. import torch
  7. from hydra import compose
  8. from hydra.utils import instantiate
  9. from omegaconf import OmegaConf
  10. HF_MODEL_ID_TO_FILENAMES = {
  11. "facebook/sam2-hiera-tiny": (
  12. "configs/sam2/sam2_hiera_t.yaml",
  13. "sam2_hiera_tiny.pt",
  14. ),
  15. "facebook/sam2-hiera-small": (
  16. "configs/sam2/sam2_hiera_s.yaml",
  17. "sam2_hiera_small.pt",
  18. ),
  19. "facebook/sam2-hiera-base-plus": (
  20. "configs/sam2/sam2_hiera_b+.yaml",
  21. "sam2_hiera_base_plus.pt",
  22. ),
  23. "facebook/sam2-hiera-large": (
  24. "configs/sam2/sam2_hiera_l.yaml",
  25. "sam2_hiera_large.pt",
  26. ),
  27. "facebook/sam2.1-hiera-tiny": (
  28. "configs/sam2.1/sam2.1_hiera_t.yaml",
  29. "sam2.1_hiera_tiny.pt",
  30. ),
  31. "facebook/sam2.1-hiera-small": (
  32. "configs/sam2.1/sam2.1_hiera_s.yaml",
  33. "sam2.1_hiera_small.pt",
  34. ),
  35. "facebook/sam2.1-hiera-base-plus": (
  36. "configs/sam2.1/sam2.1_hiera_b+.yaml",
  37. "sam2.1_hiera_base_plus.pt",
  38. ),
  39. "facebook/sam2.1-hiera-large": (
  40. "configs/sam2.1/sam2.1_hiera_l.yaml",
  41. "sam2.1_hiera_large.pt",
  42. ),
  43. }
  44. def build_sam2(
  45. config_file,
  46. ckpt_path=None,
  47. device="cuda",
  48. mode="eval",
  49. hydra_overrides_extra=[],
  50. apply_postprocessing=True,
  51. **kwargs,
  52. ):
  53. if apply_postprocessing:
  54. hydra_overrides_extra = hydra_overrides_extra.copy()
  55. hydra_overrides_extra += [
  56. # dynamically fall back to multi-mask if the single mask is not stable
  57. "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
  58. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
  59. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
  60. ]
  61. # Read config and init model
  62. cfg = compose(config_name=config_file, overrides=hydra_overrides_extra)
  63. OmegaConf.resolve(cfg)
  64. model = instantiate(cfg.model, _recursive_=True)
  65. _load_checkpoint(model, ckpt_path)
  66. model = model.to(device)
  67. if mode == "eval":
  68. model.eval()
  69. return model
  70. def build_sam2_video_predictor(
  71. config_file,
  72. ckpt_path=None,
  73. device="cuda",
  74. mode="eval",
  75. hydra_overrides_extra=[],
  76. apply_postprocessing=True,
  77. **kwargs,
  78. ):
  79. hydra_overrides = [
  80. "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor",
  81. ]
  82. if apply_postprocessing:
  83. hydra_overrides_extra = hydra_overrides_extra.copy()
  84. hydra_overrides_extra += [
  85. # dynamically fall back to multi-mask if the single mask is not stable
  86. "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
  87. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
  88. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
  89. # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking
  90. "++model.binarize_mask_from_pts_for_mem_enc=true",
  91. # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution)
  92. "++model.fill_hole_area=8",
  93. ]
  94. hydra_overrides.extend(hydra_overrides_extra)
  95. # Read config and init model
  96. cfg = compose(config_name=config_file, overrides=hydra_overrides)
  97. OmegaConf.resolve(cfg)
  98. model = instantiate(cfg.model, _recursive_=True)
  99. _load_checkpoint(model, ckpt_path)
  100. model = model.to(device)
  101. if mode == "eval":
  102. model.eval()
  103. return model
  104. def _hf_download(model_id):
  105. from huggingface_hub import hf_hub_download
  106. config_name, checkpoint_name = HF_MODEL_ID_TO_FILENAMES[model_id]
  107. ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name)
  108. return config_name, ckpt_path
  109. def build_sam2_hf(model_id, **kwargs):
  110. config_name, ckpt_path = _hf_download(model_id)
  111. return build_sam2(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
  112. def build_sam2_video_predictor_hf(model_id, **kwargs):
  113. config_name, ckpt_path = _hf_download(model_id)
  114. return build_sam2_video_predictor(
  115. config_file=config_name, ckpt_path=ckpt_path, **kwargs
  116. )
  117. def _load_checkpoint(model, ckpt_path):
  118. if ckpt_path is not None:
  119. sd = torch.load(ckpt_path, map_location="cpu", weights_only=True)["model"]
  120. missing_keys, unexpected_keys = model.load_state_dict(sd)
  121. if missing_keys:
  122. logging.error(missing_keys)
  123. raise RuntimeError()
  124. if unexpected_keys:
  125. logging.error(unexpected_keys)
  126. raise RuntimeError()
  127. logging.info("Loaded checkpoint sucessfully")