transformers

Форк
0
/
test_tokenization_roberta.py 
303 строки · 14.2 Кб
1
# coding=utf-8
2
# Copyright 2020 The HuggingFace Team. All rights reserved.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16

17
import itertools
18
import json
19
import os
20
import unittest
21

22
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
23
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
24
from transformers.testing_utils import require_tokenizers, slow
25

26
from ...test_tokenization_common import TokenizerTesterMixin
27

28

29
@require_tokenizers
30
class RobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
31
    tokenizer_class = RobertaTokenizer
32
    rust_tokenizer_class = RobertaTokenizerFast
33
    test_rust_tokenizer = True
34
    from_pretrained_kwargs = {"cls_token": "<s>"}
35

36
    def setUp(self):
37
        super().setUp()
38

39
        # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
40
        vocab = [
41
            "l",
42
            "o",
43
            "w",
44
            "e",
45
            "r",
46
            "s",
47
            "t",
48
            "i",
49
            "d",
50
            "n",
51
            "\u0120",
52
            "\u0120l",
53
            "\u0120n",
54
            "\u0120lo",
55
            "\u0120low",
56
            "er",
57
            "\u0120lowest",
58
            "\u0120newer",
59
            "\u0120wider",
60
            "<unk>",
61
        ]
62
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
63
        merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
64
        self.special_tokens_map = {"unk_token": "<unk>"}
65

66
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
67
        self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
68
        with open(self.vocab_file, "w", encoding="utf-8") as fp:
69
            fp.write(json.dumps(vocab_tokens) + "\n")
70
        with open(self.merges_file, "w", encoding="utf-8") as fp:
71
            fp.write("\n".join(merges))
72

73
    def get_tokenizer(self, **kwargs):
74
        kwargs.update(self.special_tokens_map)
75
        return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
76

77
    def get_rust_tokenizer(self, **kwargs):
78
        kwargs.update(self.special_tokens_map)
79
        return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
80

81
    def get_input_output_texts(self, tokenizer):
82
        input_text = "lower newer"
83
        output_text = "lower newer"
84
        return input_text, output_text
85

86
    def test_full_tokenizer(self):
87
        tokenizer = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map)
88
        text = "lower newer"
89
        bpe_tokens = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
90
        tokens = tokenizer.tokenize(text)  # , add_prefix_space=True)
91
        self.assertListEqual(tokens, bpe_tokens)
92

93
        input_tokens = tokens + [tokenizer.unk_token]
94
        input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
95
        self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
96

97
    def roberta_dict_integration_testing(self):
98
        tokenizer = self.get_tokenizer()
99

100
        self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2])
101
        self.assertListEqual(
102
            tokenizer.encode("Hello world! cécé herlolip 418", add_special_tokens=False),
103
            [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2],
104
        )
105

106
    @slow
107
    def test_sequence_builders(self):
108
        tokenizer = self.tokenizer_class.from_pretrained("FacebookAI/roberta-base")
109

110
        text = tokenizer.encode("sequence builders", add_special_tokens=False)
111
        text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
112

113
        encoded_text_from_decode = tokenizer.encode(
114
            "sequence builders", add_special_tokens=True, add_prefix_space=False
115
        )
116
        encoded_pair_from_decode = tokenizer.encode(
117
            "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False
118
        )
119

120
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
121
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
122

123
        assert encoded_sentence == encoded_text_from_decode
124
        assert encoded_pair == encoded_pair_from_decode
125

126
    def test_space_encoding(self):
127
        tokenizer = self.get_tokenizer()
128

129
        sequence = "Encode this sequence."
130
        space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]]
131

132
        # Testing encoder arguments
133
        encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False)
134
        first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
135
        self.assertNotEqual(first_char, space_encoding)
136

137
        encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
138
        first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0]
139
        self.assertEqual(first_char, space_encoding)
140

141
        tokenizer.add_special_tokens({"bos_token": "<s>"})
142
        encoded = tokenizer.encode(sequence, add_special_tokens=True)
143
        first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0]
144
        self.assertNotEqual(first_char, space_encoding)
145

146
        # Testing spaces after special tokens
147
        mask = "<mask>"
148
        tokenizer.add_special_tokens(
149
            {"mask_token": AddedToken(mask, lstrip=True, rstrip=False)}
150
        )  # mask token has a left space
151
        mask_ind = tokenizer.convert_tokens_to_ids(mask)
152

153
        sequence = "Encode <mask> sequence"
154
        sequence_nospace = "Encode <mask>sequence"
155

156
        encoded = tokenizer.encode(sequence)
157
        mask_loc = encoded.index(mask_ind)
158
        first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
159
        self.assertEqual(first_char, space_encoding)
160

161
        encoded = tokenizer.encode(sequence_nospace)
162
        mask_loc = encoded.index(mask_ind)
163
        first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
164
        self.assertNotEqual(first_char, space_encoding)
165

166
    def test_pretokenized_inputs(self):
167
        pass
168

169
    def test_embeded_special_tokens(self):
170
        for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
171
            with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
172
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
173
                tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
174
                sentence = "A, <mask> AllenNLP sentence."
175
                tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
176
                tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
177

178
                # token_type_ids should put 0 everywhere
179
                self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
180

181
                # attention_mask should put 1 everywhere, so sum over length should be 1
182
                self.assertEqual(
183
                    sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]),
184
                    sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]),
185
                )
186

187
                tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
188
                tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
189

190
                # Rust correctly handles the space before the mask while python doesnt
191
                self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
192
                self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
193

194
                self.assertSequenceEqual(
195
                    tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"]
196
                )
197
                self.assertSequenceEqual(
198
                    tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"]
199
                )
200

201
    def test_change_add_prefix_space_and_trim_offsets_args(self):
202
        for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2):
203
            tokenizer_r = self.rust_tokenizer_class.from_pretrained(
204
                self.tmpdirname, use_fast=True, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets
205
            )
206

207
            pre_tokenizer_state = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
208
            post_processor_state = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
209

210
            self.assertEqual(pre_tokenizer_state["add_prefix_space"], add_prefix_space)
211

212
            self.assertEqual(post_processor_state["add_prefix_space"], add_prefix_space)
213
            self.assertEqual(post_processor_state["trim_offsets"], trim_offsets)
214

215
    def test_offsets_mapping_with_different_add_prefix_space_and_trim_space_arguments(self):
216
        # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
217
        # `trim_offsets`
218
        for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
219
            with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
220
                text_of_1_token = "hello"  # `hello` is a token in the vocabulary of `pretrained_name`
221
                text = f"{text_of_1_token} {text_of_1_token}"
222

223
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(
224
                    pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
225
                )
226
                encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
227
                self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
228
                self.assertEqual(
229
                    encoding.offset_mapping[1],
230
                    (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)),
231
                )
232

233
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(
234
                    pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True
235
                )
236
                encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
237
                self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
238
                self.assertEqual(
239
                    encoding.offset_mapping[1],
240
                    (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)),
241
                )
242

243
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(
244
                    pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False
245
                )
246
                encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
247
                self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
248
                self.assertEqual(
249
                    encoding.offset_mapping[1],
250
                    (len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)),
251
                )
252

253
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(
254
                    pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False
255
                )
256
                encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
257
                self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token)))
258
                self.assertEqual(
259
                    encoding.offset_mapping[1],
260
                    (len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)),
261
                )
262

263
                text = f" {text}"
264

265
                # tokenizer_r = self.rust_tokenizer_class.from_pretrained(
266
                #     pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
267
                # )
268
                # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
269
                # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
270
                # self.assertEqual(
271
                #     encoding.offset_mapping[1],
272
                #     (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
273
                # )
274

275
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(
276
                    pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True
277
                )
278
                encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
279
                self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
280
                self.assertEqual(
281
                    encoding.offset_mapping[1],
282
                    (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
283
                )
284

285
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(
286
                    pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False
287
                )
288
                encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
289
                self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token)))
290
                self.assertEqual(
291
                    encoding.offset_mapping[1],
292
                    (1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
293
                )
294

295
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(
296
                    pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False
297
                )
298
                encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
299
                self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token)))
300
                self.assertEqual(
301
                    encoding.offset_mapping[1],
302
                    (1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
303
                )
304

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.