datasets

Форк
0
/
seqeval.py 
163 строки · 6.2 Кб
1
# Copyright 2020 The HuggingFace Datasets Authors.
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14
"""seqeval metric."""
15

16
import importlib
17
from typing import List, Optional, Union
18

19
from seqeval.metrics import accuracy_score, classification_report
20

21
import datasets
22

23

24
_CITATION = """\
25
@inproceedings{ramshaw-marcus-1995-text,
26
    title = "Text Chunking using Transformation-Based Learning",
27
    author = "Ramshaw, Lance  and
28
      Marcus, Mitch",
29
    booktitle = "Third Workshop on Very Large Corpora",
30
    year = "1995",
31
    url = "https://www.aclweb.org/anthology/W95-0107",
32
}
33
@misc{seqeval,
34
  title={{seqeval}: A Python framework for sequence labeling evaluation},
35
  url={https://github.com/chakki-works/seqeval},
36
  note={Software available from https://github.com/chakki-works/seqeval},
37
  author={Hiroki Nakayama},
38
  year={2018},
39
}
40
"""
41

42
_DESCRIPTION = """\
43
seqeval is a Python framework for sequence labeling evaluation.
44
seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on.
45

46
This is well-tested by using the Perl script conlleval, which can be used for
47
measuring the performance of a system that has processed the CoNLL-2000 shared task data.
48

49
seqeval supports following formats:
50
IOB1
51
IOB2
52
IOE1
53
IOE2
54
IOBES
55

56
See the [README.md] file at https://github.com/chakki-works/seqeval for more information.
57
"""
58

59
_KWARGS_DESCRIPTION = """
60
Produces labelling scores along with its sufficient statistics
61
from a source against one or more references.
62

63
Args:
64
    predictions: List of List of predicted labels (Estimated targets as returned by a tagger)
65
    references: List of List of reference labels (Ground truth (correct) target values)
66
    suffix: True if the IOB prefix is after type, False otherwise. default: False
67
    scheme: Specify target tagging scheme. Should be one of ["IOB1", "IOB2", "IOE1", "IOE2", "IOBES", "BILOU"].
68
        default: None
69
    mode: Whether to count correct entity labels with incorrect I/B tags as true positives or not.
70
        If you want to only count exact matches, pass mode="strict". default: None.
71
    sample_weight: Array-like of shape (n_samples,), weights for individual samples. default: None
72
    zero_division: Which value to substitute as a metric value when encountering zero division. Should be on of 0, 1,
73
        "warn". "warn" acts as 0, but the warning is raised.
74

75
Returns:
76
    'scores': dict. Summary of the scores for overall and per type
77
        Overall:
78
            'accuracy': accuracy,
79
            'precision': precision,
80
            'recall': recall,
81
            'f1': F1 score, also known as balanced F-score or F-measure,
82
        Per type:
83
            'precision': precision,
84
            'recall': recall,
85
            'f1': F1 score, also known as balanced F-score or F-measure
86
Examples:
87

88
    >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
89
    >>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
90
    >>> seqeval = datasets.load_metric("seqeval")
91
    >>> results = seqeval.compute(predictions=predictions, references=references)
92
    >>> print(list(results.keys()))
93
    ['MISC', 'PER', 'overall_precision', 'overall_recall', 'overall_f1', 'overall_accuracy']
94
    >>> print(results["overall_f1"])
95
    0.5
96
    >>> print(results["PER"]["f1"])
97
    1.0
98
"""
99

100

101
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
102
class Seqeval(datasets.Metric):
103
    def _info(self):
104
        return datasets.MetricInfo(
105
            description=_DESCRIPTION,
106
            citation=_CITATION,
107
            homepage="https://github.com/chakki-works/seqeval",
108
            inputs_description=_KWARGS_DESCRIPTION,
109
            features=datasets.Features(
110
                {
111
                    "predictions": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
112
                    "references": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
113
                }
114
            ),
115
            codebase_urls=["https://github.com/chakki-works/seqeval"],
116
            reference_urls=["https://github.com/chakki-works/seqeval"],
117
        )
118

119
    def _compute(
120
        self,
121
        predictions,
122
        references,
123
        suffix: bool = False,
124
        scheme: Optional[str] = None,
125
        mode: Optional[str] = None,
126
        sample_weight: Optional[List[int]] = None,
127
        zero_division: Union[str, int] = "warn",
128
    ):
129
        if scheme is not None:
130
            try:
131
                scheme_module = importlib.import_module("seqeval.scheme")
132
                scheme = getattr(scheme_module, scheme)
133
            except AttributeError:
134
                raise ValueError(f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {scheme}")
135
        report = classification_report(
136
            y_true=references,
137
            y_pred=predictions,
138
            suffix=suffix,
139
            output_dict=True,
140
            scheme=scheme,
141
            mode=mode,
142
            sample_weight=sample_weight,
143
            zero_division=zero_division,
144
        )
145
        report.pop("macro avg")
146
        report.pop("weighted avg")
147
        overall_score = report.pop("micro avg")
148

149
        scores = {
150
            type_name: {
151
                "precision": score["precision"],
152
                "recall": score["recall"],
153
                "f1": score["f1-score"],
154
                "number": score["support"],
155
            }
156
            for type_name, score in report.items()
157
        }
158
        scores["overall_precision"] = overall_score["precision"]
159
        scores["overall_recall"] = overall_score["recall"]
160
        scores["overall_f1"] = overall_score["f1-score"]
161
        scores["overall_accuracy"] = accuracy_score(y_true=references, y_pred=predictions)
162

163
        return scores
164

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.