build_sam.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. # Copyright (c) Meta Platforms, Inc. and affiliates.
  2. # All rights reserved.
  3. # This source code is licensed under the license found in the
  4. # LICENSE file in the root directory of this source tree.
  5. import logging
  6. import torch
  7. from hydra import compose
  8. from hydra.utils import instantiate
  9. from omegaconf import OmegaConf
  10. def build_sam2(
  11. config_file,
  12. ckpt_path=None,
  13. device="cuda",
  14. mode="eval",
  15. hydra_overrides_extra=[],
  16. apply_postprocessing=True,
  17. ):
  18. if apply_postprocessing:
  19. hydra_overrides_extra = hydra_overrides_extra.copy()
  20. hydra_overrides_extra += [
  21. # dynamically fall back to multi-mask if the single mask is not stable
  22. "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
  23. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
  24. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
  25. ]
  26. # Read config and init model
  27. cfg = compose(config_name=config_file, overrides=hydra_overrides_extra)
  28. OmegaConf.resolve(cfg)
  29. model = instantiate(cfg.model, _recursive_=True)
  30. _load_checkpoint(model, ckpt_path)
  31. model = model.to(device)
  32. if mode == "eval":
  33. model.eval()
  34. return model
  35. def build_sam2_video_predictor(
  36. config_file,
  37. ckpt_path=None,
  38. device="cuda",
  39. mode="eval",
  40. hydra_overrides_extra=[],
  41. apply_postprocessing=True,
  42. ):
  43. hydra_overrides = [
  44. "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor",
  45. ]
  46. if apply_postprocessing:
  47. hydra_overrides_extra = hydra_overrides_extra.copy()
  48. hydra_overrides_extra += [
  49. # dynamically fall back to multi-mask if the single mask is not stable
  50. "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
  51. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
  52. "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
  53. # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking
  54. "++model.binarize_mask_from_pts_for_mem_enc=true",
  55. # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution)
  56. "++model.fill_hole_area=8",
  57. ]
  58. hydra_overrides.extend(hydra_overrides_extra)
  59. # Read config and init model
  60. cfg = compose(config_name=config_file, overrides=hydra_overrides)
  61. OmegaConf.resolve(cfg)
  62. model = instantiate(cfg.model, _recursive_=True)
  63. _load_checkpoint(model, ckpt_path)
  64. model = model.to(device)
  65. if mode == "eval":
  66. model.eval()
  67. return model
  68. def build_sam2_hf(model_id, **kwargs):
  69. from huggingface_hub import hf_hub_download
  70. model_id_to_filenames = {
  71. "facebook/sam2-hiera-tiny": ("sam2_hiera_t.yaml", "sam2_hiera_tiny.pt"),
  72. "facebook/sam2-hiera-small": ("sam2_hiera_s.yaml", "sam2_hiera_small.pt"),
  73. "facebook/sam2-hiera-base-plus": ("sam2_hiera_b+.yaml", "sam2_hiera_base_plus.pt"),
  74. "facebook/sam2-hiera-large": ("sam2_hiera_l.yaml", "sam2_hiera_large.pt"),
  75. }
  76. config_name, checkpoint_name = model_id_to_filenames[model_id]
  77. ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name)
  78. return build_sam2(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
  79. def build_sam2_video_predictor_hf(model_id, **kwargs):
  80. from huggingface_hub import hf_hub_download
  81. model_id_to_filenames = {
  82. "facebook/sam2-hiera-tiny": ("sam2_hiera_t.yaml", "sam2_hiera_tiny.pt"),
  83. "facebook/sam2-hiera-small": ("sam2_hiera_s.yaml", "sam2_hiera_small.pt"),
  84. "facebook/sam2-hiera-base-plus": ("sam2_hiera_b+.yaml", "sam2_hiera_base_plus.pt"),
  85. "facebook/sam2-hiera-large": ("sam2_hiera_l.yaml", "sam2_hiera_large.pt"),
  86. }
  87. config_name, checkpoint_name = model_id_to_filenames[model_id]
  88. ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name)
  89. return build_sam2_video_predictor(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
  90. def _load_checkpoint(model, ckpt_path):
  91. if ckpt_path is not None:
  92. sd = torch.load(ckpt_path, map_location="cpu")["model"]
  93. missing_keys, unexpected_keys = model.load_state_dict(sd)
  94. if missing_keys:
  95. logging.error(missing_keys)
  96. raise RuntimeError()
  97. if unexpected_keys:
  98. logging.error(unexpected_keys)
  99. raise RuntimeError()
  100. logging.info("Loaded checkpoint sucessfully")