datasets

Форк
0
/
evaluate.py 
92 строки · 3.2 Кб
1
"""Official evaluation script for v1.1 of the SQuAD dataset."""
2

3
import argparse
4
import json
5
import re
6
import string
7
import sys
8
from collections import Counter
9

10

11
def normalize_answer(s):
12
    """Lower text and remove punctuation, articles and extra whitespace."""
13

14
    def remove_articles(text):
15
        return re.sub(r"\b(a|an|the)\b", " ", text)
16

17
    def white_space_fix(text):
18
        return " ".join(text.split())
19

20
    def remove_punc(text):
21
        exclude = set(string.punctuation)
22
        return "".join(ch for ch in text if ch not in exclude)
23

24
    def lower(text):
25
        return text.lower()
26

27
    return white_space_fix(remove_articles(remove_punc(lower(s))))
28

29

30
def f1_score(prediction, ground_truth):
31
    prediction_tokens = normalize_answer(prediction).split()
32
    ground_truth_tokens = normalize_answer(ground_truth).split()
33
    common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
34
    num_same = sum(common.values())
35
    if num_same == 0:
36
        return 0
37
    precision = 1.0 * num_same / len(prediction_tokens)
38
    recall = 1.0 * num_same / len(ground_truth_tokens)
39
    f1 = (2 * precision * recall) / (precision + recall)
40
    return f1
41

42

43
def exact_match_score(prediction, ground_truth):
44
    return normalize_answer(prediction) == normalize_answer(ground_truth)
45

46

47
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
48
    scores_for_ground_truths = []
49
    for ground_truth in ground_truths:
50
        score = metric_fn(prediction, ground_truth)
51
        scores_for_ground_truths.append(score)
52
    return max(scores_for_ground_truths)
53

54

55
def evaluate(dataset, predictions):
56
    f1 = exact_match = total = 0
57
    for article in dataset:
58
        for paragraph in article["paragraphs"]:
59
            for qa in paragraph["qas"]:
60
                total += 1
61
                if qa["id"] not in predictions:
62
                    message = "Unanswered question " + qa["id"] + " will receive score 0."
63
                    print(message, file=sys.stderr)
64
                    continue
65
                ground_truths = [x["text"] for x in qa["answers"]]
66
                prediction = predictions[qa["id"]]
67
                exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
68
                f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
69

70
    exact_match = 100.0 * exact_match / total
71
    f1 = 100.0 * f1 / total
72

73
    return {"exact_match": exact_match, "f1": f1}
74

75

76
if __name__ == "__main__":
77
    expected_version = "1.1"
78
    parser = argparse.ArgumentParser(description="Evaluation for SQuAD " + expected_version)
79
    parser.add_argument("dataset_file", help="Dataset file")
80
    parser.add_argument("prediction_file", help="Prediction File")
81
    args = parser.parse_args()
82
    with open(args.dataset_file) as dataset_file:
83
        dataset_json = json.load(dataset_file)
84
        if dataset_json["version"] != expected_version:
85
            print(
86
                "Evaluation expects v-" + expected_version + ", but got dataset with v-" + dataset_json["version"],
87
                file=sys.stderr,
88
            )
89
        dataset = dataset_json["data"]
90
    with open(args.prediction_file) as prediction_file:
91
        predictions = json.load(prediction_file)
92
    print(json.dumps(evaluate(dataset, predictions)))
93

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.