-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils_qa.py
98 lines (77 loc) · 3.25 KB
/
utils_qa.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Post-processing utilities for question answering.
"""
import string
import re
import json
from collections import Counter
from statistics import mean
from utils import ensure_file_path
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def calc_score(gold_answers, predictions):
em_lst = []
f1_lst = []
for ground_truths, prediction in zip(gold_answers, predictions):
em_lst.append(metric_max_over_ground_truths(exact_match_score, prediction, ground_truths))
f1_lst.append(metric_max_over_ground_truths(f1_score, prediction, ground_truths))
aggregated_em = mean(em_lst)
aggregated_f1 = mean(f1_lst)
return aggregated_em, aggregated_f1, em_lst, f1_lst
def output_pred(predictions, references, em_lst, f1_lst, output_pred_path, header_dict=None):
assert len(predictions) == len(references) == len(em_lst) == len(f1_lst)
ensure_file_path(output_pred_path)
with open(output_pred_path, 'w', encoding='utf-8') as f:
if header_dict:
f.write(json.dumps(header_dict) + '\n')
for pred, ref_lst, em, f1 in zip(predictions, references, em_lst, f1_lst):
output_dict = {
'pred': pred,
'ref_lst': ref_lst,
'em': em,
'f1': f1
}
f.write(json.dumps(output_dict) + '\n')