1
"""Official evaluation script for v1.1 of the SQuAD dataset."""
8
from collections import Counter
11
def normalize_answer(s):
12
"""Lower text and remove punctuation, articles and extra whitespace."""
14
def remove_articles(text):
15
return re.sub(r"\b(a|an|the)\b", " ", text)
17
def white_space_fix(text):
18
return " ".join(text.split())
20
def remove_punc(text):
21
exclude = set(string.punctuation)
22
return "".join(ch for ch in text if ch not in exclude)
27
return white_space_fix(remove_articles(remove_punc(lower(s))))
30
def f1_score(prediction, ground_truth):
31
prediction_tokens = normalize_answer(prediction).split()
32
ground_truth_tokens = normalize_answer(ground_truth).split()
33
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
34
num_same = sum(common.values())
37
precision = 1.0 * num_same / len(prediction_tokens)
38
recall = 1.0 * num_same / len(ground_truth_tokens)
39
f1 = (2 * precision * recall) / (precision + recall)
43
def exact_match_score(prediction, ground_truth):
44
return normalize_answer(prediction) == normalize_answer(ground_truth)
47
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
48
scores_for_ground_truths = []
49
for ground_truth in ground_truths:
50
score = metric_fn(prediction, ground_truth)
51
scores_for_ground_truths.append(score)
52
return max(scores_for_ground_truths)
55
def evaluate(dataset, predictions):
56
f1 = exact_match = total = 0
57
for article in dataset:
58
for paragraph in article["paragraphs"]:
59
for qa in paragraph["qas"]:
61
if qa["id"] not in predictions:
62
message = "Unanswered question " + qa["id"] + " will receive score 0."
63
print(message, file=sys.stderr)
65
ground_truths = [x["text"] for x in qa["answers"]]
66
prediction = predictions[qa["id"]]
67
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
68
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
70
exact_match = 100.0 * exact_match / total
71
f1 = 100.0 * f1 / total
73
return {"exact_match": exact_match, "f1": f1}
76
if __name__ == "__main__":
77
expected_version = "1.1"
78
parser = argparse.ArgumentParser(description="Evaluation for SQuAD " + expected_version)
79
parser.add_argument("dataset_file", help="Dataset file")
80
parser.add_argument("prediction_file", help="Prediction File")
81
args = parser.parse_args()
82
with open(args.dataset_file) as dataset_file:
83
dataset_json = json.load(dataset_file)
84
if dataset_json["version"] != expected_version:
86
"Evaluation expects v-" + expected_version + ", but got dataset with v-" + dataset_json["version"],
89
dataset = dataset_json["data"]
90
with open(args.prediction_file) as prediction_file:
91
predictions = json.load(prediction_file)
92
print(json.dumps(evaluate(dataset, predictions)))