From 489575e965972d1a3b0a028b768a8bb352177d9f Mon Sep 17 00:00:00 2001 From: Yazooliu <39296687+Yazooliu@users.noreply.github.com> Date: Tue, 19 Dec 2023 16:05:15 +0800 Subject: [PATCH] Update evaluation.py MRR metrics --- research/DuReader-Retrieval-Baseline/metric/evaluation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/research/DuReader-Retrieval-Baseline/metric/evaluation.py b/research/DuReader-Retrieval-Baseline/metric/evaluation.py index 777b692..3d6e47b 100644 --- a/research/DuReader-Retrieval-Baseline/metric/evaluation.py +++ b/research/DuReader-Retrieval-Baseline/metric/evaluation.py @@ -4,6 +4,7 @@ python msmarco_eval_ranking.py Creation Date : 06/12/2018 Last Modified : 1/21/2019 + 12/19/2023 update for MRR metric Authors : Daniel Campos , Rutger van Haasteren """ import sys @@ -132,7 +133,7 @@ def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passag if len(ranking) == 0: raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?") - MRR = MRR / len(qids_to_relevant_passageids) + MRR = MRR / len(qids_to_ranked_candidate_passages) recall_top1 = len(recall_q_top1) * 1.0 / len(qids_to_relevant_passageids) recall_top50 = len(recall_q_top50) * 1.0 / len(qids_to_relevant_passageids) recall_all = len(recall_q_all) * 1.0 / len(qids_to_relevant_passageids)