|
|
""" |
|
|
Computes COCO-style evaluation metrics (Average Precision & Average Recall) |
|
|
""" |
|
|
|
|
|
|
|
|
GROUND_TRUTH_JSON = "ground_truth.json" |
|
|
PREDICTIONS_JSON = "predictions.json" |
|
|
IOU_TYPE = "segm" |
|
|
OUTPUT_PATH = "results_ap.json" |
|
|
|
|
|
|
|
|
import json |
|
|
from pycocotools.coco import COCO |
|
|
from pycocotools.cocoeval import COCOeval |
|
|
|
|
|
def _load_predictions_for_coco(gt_coco: COCO, predictions_json_path: str): |
|
|
""" |
|
|
Loads predictions into COCO's result format. |
|
|
|
|
|
Args: |
|
|
gt_coco (COCO): COCO object initialized with ground truth annotations. |
|
|
predictions_json_path (str): Path to predictions JSON file. |
|
|
|
|
|
Returns: |
|
|
COCO: A COCO results object that can be passed into COCOeval. |
|
|
""" |
|
|
with open(predictions_json_path, "r") as f: |
|
|
data = json.load(f) |
|
|
|
|
|
|
|
|
if isinstance(data, list): |
|
|
anns = data |
|
|
elif isinstance(data, dict) and "annotations" in data: |
|
|
anns = data["annotations"] |
|
|
else: |
|
|
raise ValueError("Predictions must be a list or a dict with an 'annotations' key.") |
|
|
|
|
|
|
|
|
for ann in anns: |
|
|
if "score" not in ann: |
|
|
ann["score"] = 1.0 |
|
|
|
|
|
|
|
|
return gt_coco.loadRes(anns) |
|
|
|
|
|
|
|
|
def compute_ap_map(ground_truth_json: str, predictions_json: str, iou_type: str = "segm"): |
|
|
""" |
|
|
Computes COCO-style AP/mAP and AR metrics. |
|
|
|
|
|
Args: |
|
|
ground_truth_json (str): Path to COCO-format ground truth file. |
|
|
predictions_json (str): Path to predictions file. |
|
|
iou_type (str): Type of evaluation ("segm", "bbox", or "keypoints"). |
|
|
|
|
|
Returns: |
|
|
dict: Dictionary containing AP and AR values across IoU thresholds, |
|
|
object sizes, and max detections. |
|
|
""" |
|
|
|
|
|
gt_coco = COCO(ground_truth_json) |
|
|
|
|
|
|
|
|
pred_coco = _load_predictions_for_coco(gt_coco, predictions_json) |
|
|
|
|
|
|
|
|
coco_eval = COCOeval(gt_coco, pred_coco, iou_type) |
|
|
coco_eval.evaluate() |
|
|
coco_eval.accumulate() |
|
|
coco_eval.summarize() |
|
|
|
|
|
|
|
|
stats = coco_eval.stats |
|
|
results = { |
|
|
"AP[0.50:0.95]": float(stats[0]), |
|
|
"[email protected]": float(stats[1]), |
|
|
"[email protected]": float(stats[2]), |
|
|
"AP_small": float(stats[3]), |
|
|
"AP_medium": float(stats[4]), |
|
|
"AP_large": float(stats[5]), |
|
|
"AR@1": float(stats[6]), |
|
|
"AR@10": float(stats[7]), |
|
|
"AR@100": float(stats[8]), |
|
|
"AR_small": float(stats[9]), |
|
|
"AR_medium": float(stats[10]), |
|
|
"AR_large": float(stats[11]), |
|
|
} |
|
|
return results |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
scores = compute_ap_map(GROUND_TRUTH_JSON, PREDICTIONS_JSON, IOU_TYPE) |
|
|
|
|
|
|
|
|
if OUTPUT_PATH: |
|
|
with open(OUTPUT_PATH, "w") as f: |
|
|
json.dump(scores, f, indent=2) |
|
|
|