transformers

Форк
0
/
test_tokenization_bert.py 
342 строки · 13.9 Кб
1
# coding=utf-8
2
# Copyright 2020 The HuggingFace Team. All rights reserved.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16

17
import os
18
import unittest
19

20
from transformers import BertTokenizerFast
21
from transformers.models.bert.tokenization_bert import (
22
    VOCAB_FILES_NAMES,
23
    BasicTokenizer,
24
    BertTokenizer,
25
    WordpieceTokenizer,
26
    _is_control,
27
    _is_punctuation,
28
    _is_whitespace,
29
)
30
from transformers.testing_utils import require_tokenizers, slow
31

32
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
33

34

35
@require_tokenizers
36
class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
37
    tokenizer_class = BertTokenizer
38
    rust_tokenizer_class = BertTokenizerFast
39
    test_rust_tokenizer = True
40
    space_between_special_tokens = True
41
    from_pretrained_filter = filter_non_english
42

43
    def setUp(self):
44
        super().setUp()
45

46
        vocab_tokens = [
47
            "[UNK]",
48
            "[CLS]",
49
            "[SEP]",
50
            "[PAD]",
51
            "[MASK]",
52
            "want",
53
            "##want",
54
            "##ed",
55
            "wa",
56
            "un",
57
            "runn",
58
            "##ing",
59
            ",",
60
            "low",
61
            "lowest",
62
        ]
63
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
64
        with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
65
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
66

67
    def get_input_output_texts(self, tokenizer):
68
        input_text = "UNwant\u00E9d,running"
69
        output_text = "unwanted, running"
70
        return input_text, output_text
71

72
    def test_full_tokenizer(self):
73
        tokenizer = self.tokenizer_class(self.vocab_file)
74

75
        tokens = tokenizer.tokenize("UNwant\u00E9d,running")
76
        self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
77
        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11])
78

79
    def test_rust_and_python_full_tokenizers(self):
80
        if not self.test_rust_tokenizer:
81
            return
82

83
        tokenizer = self.get_tokenizer()
84
        rust_tokenizer = self.get_rust_tokenizer()
85

86
        sequence = "UNwant\u00E9d,running"
87

88
        tokens = tokenizer.tokenize(sequence)
89
        rust_tokens = rust_tokenizer.tokenize(sequence)
90
        self.assertListEqual(tokens, rust_tokens)
91

92
        ids = tokenizer.encode(sequence, add_special_tokens=False)
93
        rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
94
        self.assertListEqual(ids, rust_ids)
95

96
        rust_tokenizer = self.get_rust_tokenizer()
97
        ids = tokenizer.encode(sequence)
98
        rust_ids = rust_tokenizer.encode(sequence)
99
        self.assertListEqual(ids, rust_ids)
100

101
        # With lower casing
102
        tokenizer = self.get_tokenizer(do_lower_case=True)
103
        rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True)
104

105
        sequence = "UNwant\u00E9d,running"
106

107
        tokens = tokenizer.tokenize(sequence)
108
        rust_tokens = rust_tokenizer.tokenize(sequence)
109
        self.assertListEqual(tokens, rust_tokens)
110

111
        ids = tokenizer.encode(sequence, add_special_tokens=False)
112
        rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
113
        self.assertListEqual(ids, rust_ids)
114

115
        rust_tokenizer = self.get_rust_tokenizer()
116
        ids = tokenizer.encode(sequence)
117
        rust_ids = rust_tokenizer.encode(sequence)
118
        self.assertListEqual(ids, rust_ids)
119

120
    def test_chinese(self):
121
        tokenizer = BasicTokenizer()
122

123
        self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
124

125
    def test_basic_tokenizer_lower(self):
126
        tokenizer = BasicTokenizer(do_lower_case=True)
127

128
        self.assertListEqual(
129
            tokenizer.tokenize(" \tHeLLo!how  \n Are yoU?  "), ["hello", "!", "how", "are", "you", "?"]
130
        )
131
        self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
132

133
    def test_basic_tokenizer_lower_strip_accents_false(self):
134
        tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
135

136
        self.assertListEqual(
137
            tokenizer.tokenize(" \tHäLLo!how  \n Are yoU?  "), ["hällo", "!", "how", "are", "you", "?"]
138
        )
139
        self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
140

141
    def test_basic_tokenizer_lower_strip_accents_true(self):
142
        tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
143

144
        self.assertListEqual(
145
            tokenizer.tokenize(" \tHäLLo!how  \n Are yoU?  "), ["hallo", "!", "how", "are", "you", "?"]
146
        )
147
        self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
148

149
    def test_basic_tokenizer_lower_strip_accents_default(self):
150
        tokenizer = BasicTokenizer(do_lower_case=True)
151

152
        self.assertListEqual(
153
            tokenizer.tokenize(" \tHäLLo!how  \n Are yoU?  "), ["hallo", "!", "how", "are", "you", "?"]
154
        )
155
        self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
156

157
    def test_basic_tokenizer_no_lower(self):
158
        tokenizer = BasicTokenizer(do_lower_case=False)
159

160
        self.assertListEqual(
161
            tokenizer.tokenize(" \tHeLLo!how  \n Are yoU?  "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
162
        )
163

164
    def test_basic_tokenizer_no_lower_strip_accents_false(self):
165
        tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
166

167
        self.assertListEqual(
168
            tokenizer.tokenize(" \tHäLLo!how  \n Are yoU?  "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
169
        )
170

171
    def test_basic_tokenizer_no_lower_strip_accents_true(self):
172
        tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
173

174
        self.assertListEqual(
175
            tokenizer.tokenize(" \tHäLLo!how  \n Are yoU?  "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
176
        )
177

178
    def test_basic_tokenizer_respects_never_split_tokens(self):
179
        tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
180

181
        self.assertListEqual(
182
            tokenizer.tokenize(" \tHeLLo!how  \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
183
        )
184

185
    def test_basic_tokenizer_splits_on_punctuation(self):
186
        tokenizer = BasicTokenizer()
187
        text = "a\n'll !!to?'d of, can't."
188
        expected = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
189
        self.assertListEqual(tokenizer.tokenize(text), expected)
190

191
    def test_wordpiece_tokenizer(self):
192
        vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
193

194
        vocab = {}
195
        for i, token in enumerate(vocab_tokens):
196
            vocab[token] = i
197
        tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
198

199
        self.assertListEqual(tokenizer.tokenize(""), [])
200

201
        self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
202

203
        self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
204

205
    def test_is_whitespace(self):
206
        self.assertTrue(_is_whitespace(" "))
207
        self.assertTrue(_is_whitespace("\t"))
208
        self.assertTrue(_is_whitespace("\r"))
209
        self.assertTrue(_is_whitespace("\n"))
210
        self.assertTrue(_is_whitespace("\u00A0"))
211

212
        self.assertFalse(_is_whitespace("A"))
213
        self.assertFalse(_is_whitespace("-"))
214

215
    def test_is_control(self):
216
        self.assertTrue(_is_control("\u0005"))
217

218
        self.assertFalse(_is_control("A"))
219
        self.assertFalse(_is_control(" "))
220
        self.assertFalse(_is_control("\t"))
221
        self.assertFalse(_is_control("\r"))
222

223
    def test_is_punctuation(self):
224
        self.assertTrue(_is_punctuation("-"))
225
        self.assertTrue(_is_punctuation("$"))
226
        self.assertTrue(_is_punctuation("`"))
227
        self.assertTrue(_is_punctuation("."))
228

229
        self.assertFalse(_is_punctuation("A"))
230
        self.assertFalse(_is_punctuation(" "))
231

232
    def test_clean_text(self):
233
        tokenizer = self.get_tokenizer()
234
        rust_tokenizer = self.get_rust_tokenizer()
235

236
        # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
237
        self.assertListEqual([tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]])
238

239
        self.assertListEqual(
240
            [rust_tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]]
241
        )
242

243
    @slow
244
    def test_sequence_builders(self):
245
        tokenizer = self.tokenizer_class.from_pretrained("google-bert/bert-base-uncased")
246

247
        text = tokenizer.encode("sequence builders", add_special_tokens=False)
248
        text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
249

250
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
251
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
252

253
        assert encoded_sentence == [101] + text + [102]
254
        assert encoded_pair == [101] + text + [102] + text_2 + [102]
255

256
    def test_offsets_with_special_characters(self):
257
        for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
258
            with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
259
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
260

261
                sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
262
                tokens = tokenizer_r.encode_plus(
263
                    sentence,
264
                    return_attention_mask=False,
265
                    return_token_type_ids=False,
266
                    return_offsets_mapping=True,
267
                    add_special_tokens=True,
268
                )
269

270
                do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False
271
                expected_results = (
272
                    [
273
                        ((0, 0), tokenizer_r.cls_token),
274
                        ((0, 1), "A"),
275
                        ((1, 2), ","),
276
                        ((3, 5), "na"),
277
                        ((5, 6), "##ï"),
278
                        ((6, 8), "##ve"),
279
                        ((9, 15), tokenizer_r.mask_token),
280
                        ((16, 21), "Allen"),
281
                        ((21, 23), "##NL"),
282
                        ((23, 24), "##P"),
283
                        ((25, 33), "sentence"),
284
                        ((33, 34), "."),
285
                        ((0, 0), tokenizer_r.sep_token),
286
                    ]
287
                    if not do_lower_case
288
                    else [
289
                        ((0, 0), tokenizer_r.cls_token),
290
                        ((0, 1), "a"),
291
                        ((1, 2), ","),
292
                        ((3, 8), "naive"),
293
                        ((9, 15), tokenizer_r.mask_token),
294
                        ((16, 21), "allen"),
295
                        ((21, 23), "##nl"),
296
                        ((23, 24), "##p"),
297
                        ((25, 33), "sentence"),
298
                        ((33, 34), "."),
299
                        ((0, 0), tokenizer_r.sep_token),
300
                    ]
301
                )
302

303
                self.assertEqual(
304
                    [e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
305
                )
306
                self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
307

308
    def test_change_tokenize_chinese_chars(self):
309
        list_of_commun_chinese_char = ["的", "人", "有"]
310
        text_with_chinese_char = "".join(list_of_commun_chinese_char)
311
        for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
312
            with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
313
                kwargs["tokenize_chinese_chars"] = True
314
                tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
315
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
316

317
                ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False)
318
                ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False)
319

320
                tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r)
321
                tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p)
322

323
                # it is expected that each Chinese character is not preceded by "##"
324
                self.assertListEqual(tokens_without_spe_char_p, list_of_commun_chinese_char)
325
                self.assertListEqual(tokens_without_spe_char_r, list_of_commun_chinese_char)
326

327
                kwargs["tokenize_chinese_chars"] = False
328
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
329
                tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
330

331
                ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False)
332
                ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False)
333

334
                tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r)
335
                tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p)
336

337
                # it is expected that only the first Chinese character is not preceded by "##".
338
                expected_tokens = [
339
                    f"##{token}" if idx != 0 else token for idx, token in enumerate(list_of_commun_chinese_char)
340
                ]
341
                self.assertListEqual(tokens_without_spe_char_p, expected_tokens)
342
                self.assertListEqual(tokens_without_spe_char_r, expected_tokens)
343

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.