saco_veval_evaluators.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
  2. # pyre-unsafe
  3. import json
  4. import os
  5. import tempfile
  6. from collections import defaultdict
  7. from typing import Dict, Optional, Sequence, Tuple
  8. import numpy as np
  9. import pycocotools.mask
  10. from sam3.eval.cgf1_eval import CGF1_METRICS
  11. from sam3.eval.conversion_util import (
  12. convert_ytbvis_to_cocovid_gt,
  13. convert_ytbvis_to_cocovid_pred,
  14. )
  15. from sam3.eval.hota_eval_toolkit.run_ytvis_eval import run_ytvis_eval
  16. from sam3.eval.teta_eval_toolkit import config, Evaluator, metrics
  17. from sam3.eval.teta_eval_toolkit.datasets import COCO, TAO
  18. from sam3.eval.ytvis_coco_wrapper import YTVIS
  19. from sam3.eval.ytvis_eval import VideoDemoF1Eval, YTVISeval
  20. from sam3.train.nms_helper import process_frame_level_nms, process_track_level_nms
  21. def _get_metric_index(metric_name: str, iou_threshold: Optional[float] = None) -> int:
  22. """
  23. Find the index of a metric in CGF1_METRICS by name and IoU threshold.
  24. Args:
  25. metric_name: Name of the metric (e.g., "cgF1", "precision", "recall")
  26. iou_threshold: IoU threshold (None for average over 0.5:0.95, or specific value like 0.5, 0.75)
  27. Returns:
  28. Index of the metric in CGF1_METRICS
  29. Raises:
  30. ValueError: If metric not found
  31. """
  32. for idx, metric in enumerate(CGF1_METRICS):
  33. if metric.name == metric_name and metric.iou_threshold == iou_threshold:
  34. return idx
  35. raise ValueError(
  36. f"Metric '{metric_name}' with IoU threshold {iou_threshold} not found in CGF1_METRICS"
  37. )
  38. class BasePredFileEvaluator:
  39. """A base class for evaluating a prediction file."""
  40. pass
  41. class YTVISPredFileEvaluator(BasePredFileEvaluator):
  42. """Evaluate class mAP for YT-VIS prediction files."""
  43. def __init__(
  44. self,
  45. gt_ann_file: str,
  46. dataset_name: str = "video",
  47. iou_types: Optional[Sequence[str]] = None,
  48. ):
  49. self.gt_ann_file = gt_ann_file
  50. self.dataset_name = dataset_name
  51. self.iou_types = list(iou_types) if iou_types is not None else ["bbox", "segm"]
  52. assert all(iou_type in ["bbox", "segm"] for iou_type in self.iou_types)
  53. def evaluate(self, pred_file: str) -> Dict[str, float]:
  54. # use our internal video evaluation toolkit for YT-VIS pred file
  55. # (i.e. the same one we're using for video phrase AP)
  56. results = {}
  57. use_cats = True # YT-VIS mAP evaluation uses categories
  58. ytvisGT = YTVIS(self.gt_ann_file, ignore_gt_cats=not use_cats)
  59. # the original YT-VIS GT annotations have uncompressed RLEs ("counts" is an integer list)
  60. # rather than compressed RLEs ("counts" is a string), so we first convert them here.
  61. if "segm" in self.iou_types:
  62. for ann in ytvisGT.dataset["annotations"]:
  63. ann["segmentations"] = [
  64. _compress_rle(rle) for rle in ann["segmentations"]
  65. ]
  66. with open(pred_file) as f:
  67. dt = json.load(f)
  68. # Our prediction file saves "video_id" and absolute (unnormalized) boxes.
  69. # Note that we should use the official (original) YT-VIS annotations (i.e. the one
  70. # saved via "scripts/datasets/training/ytvis_split.py", instead of the one saved
  71. # via "scripts/api_db_to_ytvis_json.py") in this evaluator, which contain absolute
  72. # boxes coordinates in its GT annotations.
  73. for d in dt:
  74. d["image_id"] = d["video_id"]
  75. ytvisDT = ytvisGT.loadRes(dt)
  76. for iou_type in self.iou_types:
  77. ytvisEval = YTVISeval(ytvisGT, ytvisDT, iou_type)
  78. # set the area ranges for small, medium, and large objects (using
  79. # absolute pixel areas) as in the official YT-VIS evaluation toolkit:
  80. # https://github.com/achalddave/ytvosapi/blob/eca601117c9f86bad084cb91f1d918e9ab665a75/PythonAPI/ytvostools/ytvoseval.py#L538
  81. ytvisEval.params.areaRng = [
  82. [0**2, 1e5**2],
  83. [0**2, 128**2],
  84. [128**2, 256**2],
  85. [256**2, 1e5**2],
  86. ]
  87. ytvisEval.params.areaRngLbl = ["all", "small", "medium", "large"]
  88. ytvisEval.params.useCats = use_cats
  89. ytvisEval.evaluate()
  90. ytvisEval.accumulate()
  91. ytvisEval.summarize()
  92. result_key = f"{self.dataset_name}_{'mask' if iou_type == 'segm' else 'bbox'}_mAP_50_95"
  93. results[result_key] = ytvisEval.stats[0]
  94. # video-NP level results not supported for `YTVISPredFileEvaluator` yet
  95. video_np_level_results = {}
  96. return results, video_np_level_results
  97. class VideoPhraseApEvaluator(BasePredFileEvaluator):
  98. """Evaluate Video Phrase AP with YT-VIS format prediction and GT files."""
  99. def __init__(
  100. self,
  101. gt_ann_file: str,
  102. dataset_name: str = "video",
  103. iou_types: Optional[Sequence[str]] = None,
  104. ):
  105. self.gt_ann_file = gt_ann_file
  106. self.dataset_name = dataset_name
  107. self.iou_types = list(iou_types) if iou_types is not None else ["bbox", "segm"]
  108. assert all(iou_type in ["bbox", "segm"] for iou_type in self.iou_types)
  109. def evaluate(self, pred_file: str) -> Dict[str, float]:
  110. with open(self.gt_ann_file) as f:
  111. gt = json.load(f)
  112. with open(pred_file) as f:
  113. dt = json.load(f)
  114. # For phrase AP and demo F1 evaluation, we need to remap each pair of (video_id, category_id) to
  115. # a new unique video_id, so that we don't mix detections from different categories under `useCat=False`
  116. gt, dt = remap_video_category_pairs_to_unique_video_ids(gt, dt)
  117. if "segm" in self.iou_types:
  118. for ann in gt["annotations"]:
  119. ann["segmentations"] = [
  120. _compress_rle(rle) for rle in ann["segmentations"]
  121. ]
  122. for d in dt:
  123. d["image_id"] = d["video_id"]
  124. results = {}
  125. use_cats = False # Phrase AP evaluation does not use categories
  126. ytvisGT = YTVIS(annotation_file=None, ignore_gt_cats=not use_cats)
  127. ytvisGT.dataset = gt
  128. ytvisGT.createIndex()
  129. ytvisDT = ytvisGT.loadRes(dt)
  130. for iou_type in self.iou_types:
  131. phraseApEval = YTVISeval(ytvisGT, ytvisDT, iou_type)
  132. # set the area ranges for small, medium, and large objects (using
  133. # absolute pixel areas) as in the official YT-VIS evaluation toolkit:
  134. # https://github.com/achalddave/ytvosapi/blob/eca601117c9f86bad084cb91f1d918e9ab665a75/PythonAPI/ytvostools/ytvoseval.py#L538
  135. phraseApEval.params.areaRng = [
  136. [0**2, 1e5**2],
  137. [0**2, 128**2],
  138. [128**2, 256**2],
  139. [256**2, 1e5**2],
  140. ]
  141. phraseApEval.params.areaRngLbl = ["all", "small", "medium", "large"]
  142. phraseApEval.params.useCats = use_cats
  143. phraseApEval.evaluate()
  144. phraseApEval.accumulate()
  145. phraseApEval.summarize()
  146. result_prefix = f"{self.dataset_name}"
  147. result_prefix += f"_{'mask' if iou_type == 'segm' else 'bbox'}_phrase_ap"
  148. # fetch Phrase AP results from the corresponding indices in `phraseApEval.stats`
  149. # (see `_summarizeDets` in https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py)
  150. results[result_prefix + "_50_95"] = phraseApEval.stats[0] # IoU=0.5:0.95
  151. results[result_prefix + "_50"] = phraseApEval.stats[1] # IoU=0.5
  152. results[result_prefix + "_75"] = phraseApEval.stats[2] # IoU=0.75
  153. # video-NP level results not supported for `VideoPhraseApEvaluator` yet
  154. video_np_level_results = {}
  155. return results, video_np_level_results
  156. class VideoCGF1Evaluator(BasePredFileEvaluator):
  157. """Evaluate Video Demo F1 with YT-VIS format prediction and GT files."""
  158. def __init__(
  159. self,
  160. gt_ann_file: str,
  161. dataset_name: str = "video",
  162. prob_thresh: float = 0.5,
  163. iou_types: Optional[Sequence[str]] = None,
  164. ):
  165. self.gt_ann_file = gt_ann_file
  166. self.dataset_name = dataset_name
  167. self.prob_thresh = prob_thresh
  168. self.iou_types = list(iou_types) if iou_types is not None else ["bbox", "segm"]
  169. assert all(iou_type in ["bbox", "segm"] for iou_type in self.iou_types)
  170. def evaluate(self, pred_file: str) -> Dict[str, float]:
  171. with open(self.gt_ann_file) as f:
  172. gt = json.load(f)
  173. with open(pred_file) as f:
  174. dt = json.load(f)
  175. # compute IL_MCC and CG-F1 can only be computed if we have "video_np_pairs" keys in the GT JSON
  176. compute_ilmcc_and_cgf1 = "video_np_pairs" in gt
  177. if not compute_ilmcc_and_cgf1:
  178. print(
  179. f"Warning: IL_MCC and CG-F1 are not computed for {pred_file=} as it does not have 'video_np_pairs' keys in the GT JSON"
  180. )
  181. # For phrase AP and demo F1 evaluation, we need to remap each pair of (video_id, category_id) to
  182. # a new unique video_id, so that we don't mix detections from different categories under `useCat=False`
  183. gt, dt = remap_video_category_pairs_to_unique_video_ids(
  184. gt, dt, add_negative_np_pairs=compute_ilmcc_and_cgf1
  185. )
  186. if "segm" in self.iou_types:
  187. for ann in gt["annotations"]:
  188. ann["segmentations"] = [
  189. _compress_rle(rle) for rle in ann["segmentations"]
  190. ]
  191. for d in dt:
  192. d["image_id"] = d["video_id"]
  193. results = {}
  194. use_cats = False # Demo F1 evaluation does not use categories
  195. ytvisGT = YTVIS(annotation_file=None, ignore_gt_cats=not use_cats)
  196. ytvisGT.dataset = gt
  197. ytvisGT.createIndex()
  198. ytvisDT = ytvisGT.loadRes(dt)
  199. video_np_level_results = {}
  200. for iou_type in self.iou_types:
  201. demoF1Eval = VideoDemoF1Eval(ytvisGT, ytvisDT, iou_type, self.prob_thresh)
  202. demoF1Eval.params.useCats = use_cats
  203. demoF1Eval.params.areaRng = [[0**2, 1e5**2]]
  204. demoF1Eval.params.areaRngLbl = ["all"]
  205. demoF1Eval.params.maxDets = [100000]
  206. demoF1Eval.evaluate()
  207. demoF1Eval.accumulate()
  208. demoF1Eval.summarize()
  209. result_prefix = f"{self.dataset_name}"
  210. result_prefix += f"_{'mask' if iou_type == 'segm' else 'bbox'}_demo"
  211. stats = demoF1Eval.stats
  212. if compute_ilmcc_and_cgf1:
  213. # Average IoU threshold (0.5:0.95)
  214. cgf1_micro_avg_idx = _get_metric_index("cgF1", None)
  215. positive_micro_f1_avg_idx = _get_metric_index("positive_micro_F1", None)
  216. ilmcc_avg_idx = _get_metric_index("IL_MCC", None)
  217. results[result_prefix + "_cgf1_micro_50_95"] = stats[cgf1_micro_avg_idx]
  218. results[result_prefix + "_ilmcc_50_95"] = stats[ilmcc_avg_idx]
  219. results[result_prefix + "_positive_micro_f1_50_95"] = stats[
  220. positive_micro_f1_avg_idx
  221. ]
  222. # IoU = 0.5
  223. cgf1_micro_50_idx = _get_metric_index("cgF1", 0.5)
  224. positive_micro_f1_50_idx = _get_metric_index("positive_micro_F1", 0.5)
  225. results[result_prefix + "_cgf1_micro_50"] = stats[cgf1_micro_50_idx]
  226. results[result_prefix + "_ilmcc_50"] = float(
  227. np.array(stats[cgf1_micro_50_idx])
  228. / np.array(stats[positive_micro_f1_50_idx])
  229. )
  230. results[result_prefix + "_positive_micro_f1_50"] = stats[
  231. positive_micro_f1_50_idx
  232. ]
  233. # IoU = 0.75
  234. cgf1_micro_75_idx = _get_metric_index("cgF1", 0.75)
  235. positive_micro_f1_75_idx = _get_metric_index("positive_micro_F1", 0.75)
  236. results[result_prefix + "_cgf1_micro_75"] = stats[cgf1_micro_75_idx]
  237. results[result_prefix + "_ilmcc_75"] = float(
  238. np.array(stats[cgf1_micro_75_idx])
  239. / np.array(stats[positive_micro_f1_75_idx])
  240. )
  241. results[result_prefix + "_positive_micro_f1_75"] = stats[
  242. positive_micro_f1_75_idx
  243. ]
  244. self.extract_video_np_level_results(demoF1Eval, video_np_level_results)
  245. return results, video_np_level_results
  246. def extract_video_np_level_results(self, demoF1Eval, video_np_level_results):
  247. """Aggregate statistics for video-level metrics."""
  248. num_iou_thrs = len(demoF1Eval.params.iouThrs)
  249. iou_50_index = int(np.where(demoF1Eval.params.iouThrs == 0.5)[0])
  250. iou_75_index = int(np.where(demoF1Eval.params.iouThrs == 0.75)[0])
  251. result_prefix = "mask" if demoF1Eval.params.iouType == "segm" else "bbox"
  252. assert len(demoF1Eval.evalImgs) == len(demoF1Eval.cocoGt.dataset["images"])
  253. for i, video in enumerate(demoF1Eval.cocoGt.dataset["images"]):
  254. # the original video id and category id before remapping
  255. video_id = video["orig_video_id"]
  256. category_id = video["orig_category_id"]
  257. eval_img_dict = demoF1Eval.evalImgs[i]
  258. TPs = eval_img_dict.get("TPs", np.zeros(num_iou_thrs, dtype=np.int64))
  259. FPs = eval_img_dict.get("FPs", np.zeros(num_iou_thrs, dtype=np.int64))
  260. FNs = eval_img_dict.get("FNs", np.zeros(num_iou_thrs, dtype=np.int64))
  261. assert len(TPs) == len(FPs) == len(FNs) == num_iou_thrs
  262. # F1 = 2*TP / (2*TP + FP + FN), and we set F1 to 1.0 if denominator is 0
  263. denominator = 2 * TPs + FPs + FNs
  264. F1s = np.where(denominator > 0, 2 * TPs / np.maximum(denominator, 1), 1.0)
  265. local_results = {
  266. f"{result_prefix}_TP_50_95": float(TPs.mean()),
  267. f"{result_prefix}_FP_50_95": float(FPs.mean()),
  268. f"{result_prefix}_FN_50_95": float(FNs.mean()),
  269. f"{result_prefix}_F1_50_95": float(F1s.mean()),
  270. f"{result_prefix}_TP_50": float(TPs[iou_50_index]),
  271. f"{result_prefix}_FP_50": float(FPs[iou_50_index]),
  272. f"{result_prefix}_FN_50": float(FNs[iou_50_index]),
  273. f"{result_prefix}_F1_50": float(F1s[iou_50_index]),
  274. f"{result_prefix}_TP_75": float(TPs[iou_75_index]),
  275. f"{result_prefix}_FP_75": float(FPs[iou_75_index]),
  276. f"{result_prefix}_FN_75": float(FNs[iou_75_index]),
  277. f"{result_prefix}_F1_75": float(F1s[iou_75_index]),
  278. }
  279. if (video_id, category_id) not in video_np_level_results:
  280. video_np_level_results[(video_id, category_id)] = {}
  281. video_np_level_results[(video_id, category_id)].update(local_results)
  282. class VideoTetaEvaluator(BasePredFileEvaluator):
  283. """Evaluate TETA metric using YouTubeVIS format prediction and GT files."""
  284. def __init__(
  285. self,
  286. gt_ann_file: str,
  287. dataset_name: str = "video",
  288. tracker_name: str = "Sam3",
  289. nms_threshold: float = 0.5,
  290. nms_strategy: str = "none", # "track", "frame", or "none"
  291. prob_thresh: float = 0.5,
  292. is_exhaustive: bool = False,
  293. use_mask: bool = False,
  294. num_parallel_cores: int = 8,
  295. ):
  296. self.gt_ann_file = gt_ann_file
  297. self.dataset_name = dataset_name
  298. self.tracker_name = tracker_name
  299. self.nms_threshold = nms_threshold
  300. self.nms_strategy = nms_strategy.lower() # Convert to lowercase for consistency
  301. self.prob_thresh = prob_thresh
  302. self.metric_prefix = "TETA"
  303. self.is_exhaustive = is_exhaustive
  304. self.use_mask = use_mask
  305. self.num_parallel_cores = num_parallel_cores
  306. # Verify NMS strategy is valid
  307. valid_strategies = ["track", "frame", "none"]
  308. print("current nms_strategy:", self.nms_strategy)
  309. if self.nms_strategy not in valid_strategies:
  310. raise ValueError(
  311. f"Invalid NMS strategy: {self.nms_strategy}. Must be one of {valid_strategies}"
  312. )
  313. print(f"Initialized VideoTetaEvaluator with NMS strategy: {self.nms_strategy}")
  314. print(f"Probability threshold set to: {self.prob_thresh}")
  315. print(f"Dataset exhaustivity set to: {self.is_exhaustive}")
  316. print(f"Tracker name set to: {self.tracker_name}")
  317. print(f"Dataset name set to: {self.dataset_name}")
  318. print(f"Use mask set to: {self.use_mask}")
  319. def process_predictions(self, pred_file: str, tmp_dir: str) -> str:
  320. """Process predictions with selected NMS strategy"""
  321. with open(pred_file, "r") as f:
  322. raw_preds = json.load(f)
  323. print(f"Processing predictions with {self.nms_strategy} NMS strategy")
  324. # Filter by score threshold
  325. if self.prob_thresh > 0:
  326. raw_preds = [d for d in raw_preds if d["score"] >= self.prob_thresh]
  327. print(
  328. f"Filtered to {len(raw_preds)} predictions with score >= {self.prob_thresh}"
  329. )
  330. # Group predictions by video_id
  331. video_groups = defaultdict(list)
  332. for pred in raw_preds:
  333. video_groups[pred["video_id"]].append(pred)
  334. # Process based on NMS strategy
  335. if self.nms_strategy == "track":
  336. process_track_level_nms(video_groups, nms_threshold=self.nms_threshold)
  337. elif self.nms_strategy == "frame":
  338. process_frame_level_nms(video_groups, nms_threshold=self.nms_threshold)
  339. elif self.nms_strategy == "none":
  340. print("Skipping NMS processing as strategy is set to 'none'")
  341. # No processing needed for "none" strategy
  342. # Save processed predictions
  343. processed_preds = [
  344. track for tracks in video_groups.values() for track in tracks
  345. ]
  346. processed_path = os.path.join(tmp_dir, "processed_preds.json")
  347. with open(processed_path, "w") as f:
  348. json.dump(processed_preds, f)
  349. print(f"Saved processed predictions to {processed_path}")
  350. return processed_path
  351. def evaluate(self, pred_file: str) -> Tuple[Dict[str, float], Dict]:
  352. """Main evaluation method"""
  353. print(f"Evaluating TETA Metric with {self.nms_strategy.upper()} NMS strategy")
  354. with tempfile.TemporaryDirectory() as tmp_dir:
  355. # Process predictions first
  356. processed_pred_file = self.process_predictions(pred_file, tmp_dir)
  357. # Convert GT to COCO-vid format
  358. gt_dir = os.path.join(tmp_dir, "gt")
  359. os.makedirs(gt_dir, exist_ok=True)
  360. gt_coco_path = os.path.join(gt_dir, "annotations.json")
  361. convert_ytbvis_to_cocovid_gt(self.gt_ann_file, gt_coco_path)
  362. # Convert processed predictions to COCO-vid format
  363. pred_dir = os.path.join(tmp_dir, "predictions")
  364. tracker_dir = os.path.join(pred_dir, self.tracker_name)
  365. os.makedirs(tracker_dir, exist_ok=True)
  366. pred_coco_path = os.path.join(tracker_dir, "track_results_cocofmt.json")
  367. convert_ytbvis_to_cocovid_pred(
  368. youtubevis_pred_path=processed_pred_file,
  369. converted_dataset_path=gt_coco_path,
  370. output_path=pred_coco_path,
  371. )
  372. # Configure TETA evaluator
  373. default_eval_config = config.get_default_eval_config()
  374. default_eval_config["PRINT_ONLY_COMBINED"] = True
  375. default_eval_config["DISPLAY_LESS_PROGRESS"] = True
  376. default_eval_config["OUTPUT_TEMP_RAW_DATA"] = True
  377. default_eval_config["NUM_PARALLEL_CORES"] = self.num_parallel_cores
  378. default_dataset_config = config.get_default_dataset_config()
  379. default_dataset_config["TRACKERS_TO_EVAL"] = [self.tracker_name]
  380. default_dataset_config["GT_FOLDER"] = gt_dir
  381. default_dataset_config["OUTPUT_FOLDER"] = pred_dir
  382. default_dataset_config["TRACKER_SUB_FOLDER"] = tracker_dir
  383. default_dataset_config["USE_MASK"] = self.use_mask
  384. evaluator = Evaluator(default_eval_config)
  385. if self.is_exhaustive:
  386. dataset_list = [COCO(default_dataset_config)]
  387. dataset_parsing_key = "COCO"
  388. else:
  389. dataset_list = [TAO(default_dataset_config)]
  390. dataset_parsing_key = "TAO"
  391. # Run evaluation
  392. eval_results, _ = evaluator.evaluate(
  393. dataset_list, [metrics.TETA(exhaustive=self.is_exhaustive)]
  394. )
  395. # Extract and format results
  396. results = {
  397. f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_teta": float(
  398. eval_results[dataset_parsing_key]["TETA"][0]
  399. ),
  400. f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_loc_a": float(
  401. eval_results[dataset_parsing_key]["TETA"][1]
  402. ),
  403. f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_assoc_a": float(
  404. eval_results[dataset_parsing_key]["TETA"][2]
  405. ),
  406. f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_cls_a": float(
  407. eval_results[dataset_parsing_key]["TETA"][3]
  408. ),
  409. f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_loc_re": float(
  410. eval_results[dataset_parsing_key]["TETA"][4]
  411. ),
  412. f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_loc_pr": float(
  413. eval_results[dataset_parsing_key]["TETA"][5]
  414. ),
  415. f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_assoc_re": float(
  416. eval_results[dataset_parsing_key]["TETA"][6]
  417. ),
  418. f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_assoc_pr": float(
  419. eval_results[dataset_parsing_key]["TETA"][7]
  420. ),
  421. f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_cls_re": float(
  422. eval_results[dataset_parsing_key]["TETA"][8]
  423. ),
  424. f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_cls_pr": float(
  425. eval_results[dataset_parsing_key]["TETA"][9]
  426. ),
  427. }
  428. # video-NP level results not supported for `VideoTetaEvaluator` yet
  429. video_np_level_results = {}
  430. return results, video_np_level_results
  431. class VideoPhraseHotaEvaluator(BasePredFileEvaluator):
  432. """Evaluate Video Phrase HOTA with YT-VIS format prediction and GT files."""
  433. def __init__(
  434. self,
  435. gt_ann_file: str,
  436. dataset_name: str = "video",
  437. prob_thresh: float = 0.5,
  438. iou_types: Optional[Sequence[str]] = None,
  439. compute_video_mot_hota: bool = False,
  440. ):
  441. self.gt_ann_file = gt_ann_file
  442. self.dataset_name = dataset_name
  443. self.prob_thresh = prob_thresh
  444. self.metric_prefix = "phrase"
  445. # the list of metrics to collect from the HOTA evaluation results
  446. self.metric_to_collect = [
  447. "HOTA",
  448. "DetA",
  449. "AssA",
  450. "DetRe",
  451. "DetPr",
  452. "AssRe",
  453. "AssPr",
  454. "LocA",
  455. "OWTA",
  456. ]
  457. self.iou_types = list(iou_types) if iou_types is not None else ["bbox", "segm"]
  458. assert all(iou_type in ["bbox", "segm"] for iou_type in self.iou_types)
  459. # If True, compute video MOT HOTA, aggregating predictions/GT from all categories.
  460. self.compute_video_mot_hota = compute_video_mot_hota
  461. def evaluate(self, pred_file: str) -> Dict[str, float]:
  462. # use the YT-VIS evaluation toolkit in TrackEval
  463. with open(self.gt_ann_file) as f:
  464. gt = json.load(f)
  465. with open(pred_file) as f:
  466. dt = json.load(f)
  467. # keep only predictions with score above the probability threshold
  468. dt = [d for d in dt if d["score"] > self.prob_thresh]
  469. for d in dt:
  470. assert len(d["areas"]) == len(d["bboxes"])
  471. assert len(d["areas"]) == len(d["segmentations"])
  472. # remove empty boxes (otherwise they will count as false positives for during
  473. # per-frame detection accuracy in HOTA evaluation)
  474. for t in range(len(d["bboxes"])):
  475. bbox = d["bboxes"][t]
  476. if d["areas"][t] == 0 or bbox is None or all(x == 0 for x in bbox):
  477. d["segmentations"][t] = None
  478. d["bboxes"][t] = None
  479. d["areas"][t] = None
  480. # check that box occurence and mask occurence are consistent
  481. for bbox, mask, area in zip(d["bboxes"], d["segmentations"], d["areas"]):
  482. assert (area is None) == (bbox is None)
  483. assert (area is None) == (mask is None)
  484. # set all scores to 1.0 for HOTA evaluation (just like Demo F1, the exact score
  485. # value is not used in HOTA metrics; it will be treated as a detection prediction
  486. # as long as its score is above the threshold)
  487. d["score"] = 1.0
  488. # remap the GT and DT annotations for phrase HOTA evaluation
  489. gt = _fill_in_ann_height_width(gt)
  490. if not self.compute_video_mot_hota:
  491. # remap the GT and DT annotations for phrase HOTA evaluation
  492. gt, dt = self._remap_gt_dt(gt, dt)
  493. else:
  494. # Compute video-level MOT HOTA
  495. # Apply track-level NMS
  496. video_groups = defaultdict(list)
  497. for pred in dt:
  498. video_groups[pred["video_id"]].append(pred)
  499. process_track_level_nms(video_groups, nms_threshold=0.5)
  500. dt = [track for tracks in video_groups.values() for track in tracks]
  501. # Remap GT track ids for class-agnostic HOTA
  502. gt, dt = remap_gt_dt_class_agnostic(gt, dt)
  503. # run the HOTA evaluation using TrackEval on the remapped (video_id, category_id) pairs
  504. out_dict = {}
  505. video_np_level_results = {}
  506. for iou_type in self.iou_types:
  507. output_res, _ = run_ytvis_eval(
  508. args=[
  509. "--METRICS",
  510. "HOTA",
  511. "--IOU_TYPE",
  512. iou_type,
  513. "--DATASET_NAME",
  514. self.dataset_name,
  515. "--USE_PARALLEL",
  516. "True",
  517. "--NUM_PARALLEL_CORES",
  518. "8",
  519. "--PLOT_CURVES",
  520. "False",
  521. "--LOG_ON_ERROR",
  522. "None",
  523. "--PRINT_ONLY_COMBINED",
  524. "True",
  525. "--OUTPUT_SUMMARY",
  526. "False",
  527. "--OUTPUT_DETAILED",
  528. "False",
  529. "--TIME_PROGRESS",
  530. "False",
  531. "--PRINT_CONFIG",
  532. "False",
  533. ],
  534. gt_json=gt,
  535. dt_json=dt,
  536. )
  537. self.extract_video_np_level_results(
  538. iou_type=iou_type,
  539. remapped_gt=gt,
  540. raw_results=output_res[self.dataset_name]["tracker"],
  541. video_np_level_results=video_np_level_results,
  542. )
  543. def _summarize_results(output_res, iou_type, field, suffix):
  544. eval_res = output_res[self.dataset_name]["tracker"][field]
  545. result_prefix = f"{self.dataset_name}_{'mask' if iou_type == 'segm' else 'bbox'}_{suffix}"
  546. for metric_name in self.metric_to_collect:
  547. eval_res_hota = eval_res["cls_comb_cls_av"]["HOTA"]
  548. result_key = f"{result_prefix}_{self.metric_prefix}_{metric_name}"
  549. result_value = float(np.mean(eval_res_hota[metric_name]))
  550. out_dict[result_key] = result_value
  551. _summarize_results(output_res, iou_type, "COMBINED_SEQ", "all")
  552. if "COMBINED_SEQ_CHALLENGING" in output_res[self.dataset_name]["tracker"]:
  553. _summarize_results(
  554. output_res, iou_type, "COMBINED_SEQ_CHALLENGING", "challenging"
  555. )
  556. # video-NP level results not supported for `VideoPhraseHotaEvaluator` yet
  557. return out_dict, video_np_level_results
  558. def _remap_gt_dt(self, gt, dt):
  559. # For phrase HOTA evaluation, we need to remap each pair of (video_id, category_id) to
  560. # a new unique video_id, so that we don't mix detections from different categories
  561. gt, dt = remap_video_category_pairs_to_unique_video_ids(gt, dt)
  562. # We further map all the categories to category_id=1 in HOTA evaluation toolkit
  563. # for phrase HOTA (similar to "useCat=False" for video phrase AP)
  564. remapped_category_id = 1
  565. gt["categories"] = [
  566. {
  567. "supercategory": "object",
  568. "id": remapped_category_id,
  569. "name": "_REMAPPED_FOR_PHRASE_METRICS_",
  570. }
  571. ]
  572. for ann in gt["annotations"]:
  573. ann["category_id"] = remapped_category_id
  574. for d in dt:
  575. d["category_id"] = remapped_category_id
  576. # To be compatible with the TrackEval YT-VIS evaluation toolkit, we need to give
  577. # unique filenames to each remapped video, so we add remapped video_id as prefix.
  578. for video in gt["videos"]:
  579. new_video_id = video["id"]
  580. video["file_names"] = [
  581. f"remapped_vid_{new_video_id:012d}/{name}"
  582. for name in video["file_names"]
  583. ]
  584. return gt, dt
  585. def extract_video_np_level_results(
  586. self, iou_type, remapped_gt, raw_results, video_np_level_results
  587. ):
  588. """Aggregate statistics for video-level metrics."""
  589. result_prefix = "mask" if iou_type == "segm" else "bbox"
  590. for video in remapped_gt["videos"]:
  591. # the original video id and category id before remapping
  592. video_id = video["orig_video_id"]
  593. category_id = video["orig_category_id"]
  594. video_key = f"remapped_vid_{video['id']:012d}"
  595. results = raw_results[video_key]["_REMAPPED_FOR_PHRASE_METRICS_"]["HOTA"]
  596. local_results = {}
  597. for metric_name in self.metric_to_collect:
  598. result_key = f"{result_prefix}_{metric_name}"
  599. local_results[result_key] = float(results[metric_name].mean())
  600. if (video_id, category_id) not in video_np_level_results:
  601. video_np_level_results[(video_id, category_id)] = {}
  602. video_np_level_results[(video_id, category_id)].update(local_results)
  603. class VideoClassBasedHotaEvaluator(VideoPhraseHotaEvaluator):
  604. def __init__(
  605. self,
  606. gt_ann_file: str,
  607. dataset_name: str = "video",
  608. prob_thresh: float = 0.5,
  609. ):
  610. super().__init__(gt_ann_file, dataset_name, prob_thresh)
  611. self.metric_prefix = "class"
  612. def _remap_gt_dt(self, gt, dt):
  613. return gt, dt # no remapping needed for class-based HOTA evaluation
  614. def extract_video_np_level_results(self, *args, **kwargs):
  615. pass # no video-NP level results for class-based HOTA evaluation
  616. def _compress_rle(rle):
  617. """Convert RLEs from uncompressed (integer list) to compressed (string) format."""
  618. if rle is None:
  619. return None
  620. if isinstance(rle["counts"], list):
  621. rle = pycocotools.mask.frPyObjects(rle, rle["size"][0], rle["size"][1])
  622. rle["counts"] = rle["counts"].decode()
  623. return rle
  624. def remap_video_category_pairs_to_unique_video_ids(
  625. gt_json, dt_json, add_negative_np_pairs=False
  626. ):
  627. """
  628. Remap each pair of (video_id, category_id) to a new unique video_id. This is useful
  629. for phrase AP and demo F1 evaluation on videos, where we have `useCat=False` and
  630. rely on separating different NPs (from the same video) into different new video ids,
  631. so that we don't mix detections from different categories in computeIoU under `useCat=False`.
  632. This is consistent with how do we phrase AP and demo F1 evaluation on images, where we
  633. use a remapped unique coco_image_id for each image-NP pair (based in its query["id"] in
  634. CustomCocoDetectionAPI.load_queries in modulated_detection_api.py)
  635. """
  636. # collect the unique video_id-category_id pairs
  637. video_id_to_video = {v["id"]: v for v in gt_json["videos"]}
  638. video_id_category_id_pairs = set()
  639. for pred in dt_json:
  640. video_id_category_id_pairs.add((pred["video_id"], pred["category_id"]))
  641. for ann in gt_json["annotations"]:
  642. video_id_category_id_pairs.add((ann["video_id"], ann["category_id"]))
  643. # assign the video_id-category_id pairs to unique video ids
  644. video_id_category_id_pairs = sorted(video_id_category_id_pairs)
  645. video_id_category_id_to_new_video_id = {
  646. pair: (i + 1) for i, pair in enumerate(video_id_category_id_pairs)
  647. }
  648. # also map the negative NP pairs -- this is needed for IL_MCC and CG-F1 evaluation
  649. if add_negative_np_pairs:
  650. for vnp in gt_json["video_np_pairs"]:
  651. pair = (vnp["video_id"], vnp["category_id"])
  652. if pair not in video_id_category_id_to_new_video_id:
  653. video_id_category_id_to_new_video_id[pair] = (
  654. len(video_id_category_id_to_new_video_id) + 1
  655. )
  656. # map the "video_id" in predictions
  657. for pred in dt_json:
  658. pred["video_id"] = video_id_category_id_to_new_video_id[
  659. (pred["video_id"], pred["category_id"])
  660. ]
  661. # map the "video_id" in gt_json["annotations"]
  662. for ann in gt_json["annotations"]:
  663. ann["video_id"] = video_id_category_id_to_new_video_id[
  664. (ann["video_id"], ann["category_id"])
  665. ]
  666. # map and duplicate gt_json["videos"]
  667. new_videos = []
  668. for (
  669. video_id,
  670. category_id,
  671. ), new_video_id in video_id_category_id_to_new_video_id.items():
  672. video = video_id_to_video[video_id].copy()
  673. video["id"] = new_video_id
  674. # preserve the original video_id and category_id of each remapped video entry,
  675. # so that we can associate sample-level eval metrics with the original video-NP pairs
  676. video["orig_video_id"] = video_id
  677. video["orig_category_id"] = category_id
  678. new_videos.append(video)
  679. gt_json["videos"] = new_videos
  680. return gt_json, dt_json
  681. def remap_gt_dt_class_agnostic(gt, dt):
  682. """
  683. For class-agnostic HOTA, merge all GT tracks for each video (across NPs),
  684. ensure unique track_ids, and set all category_id to 1.
  685. Also, add orig_video_id and orig_category_id for compatibility.
  686. """
  687. # 1. Remap all GT track_ids to be unique per video
  688. gt_anns_by_video = defaultdict(list)
  689. for ann in gt["annotations"]:
  690. gt_anns_by_video[ann["video_id"]].append(ann)
  691. # Ensure unique track ids across tracks of all videos
  692. next_tid = 1
  693. for _, anns in gt_anns_by_video.items():
  694. # Map old track_ids to new unique ones
  695. old_to_new_tid = {}
  696. for ann in anns:
  697. old_tid = ann["id"]
  698. if old_tid not in old_to_new_tid:
  699. old_to_new_tid[old_tid] = next_tid
  700. next_tid += 1
  701. ann["id"] = old_to_new_tid[old_tid]
  702. # Set category_id to 1 for class-agnostic
  703. ann["category_id"] = 1
  704. # Set all GT categories to a single category
  705. gt["categories"] = [
  706. {
  707. "supercategory": "object",
  708. "id": 1,
  709. "name": "_REMAPPED_FOR_PHRASE_METRICS_",
  710. }
  711. ]
  712. # Add orig_video_id and orig_category_id to each video for compatibility
  713. anns_by_video = defaultdict(list)
  714. for ann in gt["annotations"]:
  715. anns_by_video[ann["video_id"]].append(ann)
  716. for video in gt["videos"]:
  717. video["orig_video_id"] = video["id"]
  718. # Use the first annotation's original category_id if available, else None
  719. orig_cat = (
  720. anns_by_video[video["id"]][0]["category_id"]
  721. if anns_by_video[video["id"]]
  722. else None
  723. )
  724. video["orig_category_id"] = orig_cat
  725. video["file_names"] = [
  726. f"remapped_vid_{video['id']:012d}/{name}" for name in video["file_names"]
  727. ]
  728. # Set all DT category_id to 1
  729. for d in dt:
  730. d["category_id"] = 1
  731. return gt, dt
  732. def _fill_in_ann_height_width(gt_json):
  733. """Fill in missing height/width in GT annotations from its video info."""
  734. video_id_to_video = {v["id"]: v for v in gt_json["videos"]}
  735. for ann in gt_json["annotations"]:
  736. if "height" not in ann or "width" not in ann:
  737. video = video_id_to_video[ann["video_id"]]
  738. if "height" not in ann:
  739. ann["height"] = video["height"]
  740. if "width" not in ann:
  741. ann["width"] = video["width"]
  742. return gt_json