transformers

Форк
0
/
test_tokenization_gpt2.py 
352 строки · 14.5 Кб
1
# coding=utf-8
2
# Copyright 2020 The HuggingFace Team. All rights reserved.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16

17
import json
18
import os
19
import unittest
20

21
from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast
22
from transformers.models.gpt2.tokenization_gpt2 import VOCAB_FILES_NAMES
23
from transformers.testing_utils import require_jinja, require_tokenizers
24

25
from ...test_tokenization_common import TokenizerTesterMixin
26

27

28
@require_tokenizers
29
class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
30
    tokenizer_class = GPT2Tokenizer
31
    rust_tokenizer_class = GPT2TokenizerFast
32
    test_rust_tokenizer = True
33
    from_pretrained_kwargs = {"add_prefix_space": True}
34
    test_seq2seq = False
35

36
    def setUp(self):
37
        super().setUp()
38

39
        # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
40
        vocab = [
41
            "l",
42
            "o",
43
            "w",
44
            "e",
45
            "r",
46
            "s",
47
            "t",
48
            "i",
49
            "d",
50
            "n",
51
            "\u0120",
52
            "\u0120l",
53
            "\u0120n",
54
            "\u0120lo",
55
            "\u0120low",
56
            "er",
57
            "\u0120lowest",
58
            "\u0120newer",
59
            "\u0120wider",
60
            "<unk>",
61
            "<|endoftext|>",
62
        ]
63
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
64
        merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
65
        self.special_tokens_map = {"unk_token": "<unk>"}
66

67
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
68
        self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
69
        with open(self.vocab_file, "w", encoding="utf-8") as fp:
70
            fp.write(json.dumps(vocab_tokens) + "\n")
71
        with open(self.merges_file, "w", encoding="utf-8") as fp:
72
            fp.write("\n".join(merges))
73

74
    def get_tokenizer(self, **kwargs):
75
        kwargs.update(self.special_tokens_map)
76
        return GPT2Tokenizer.from_pretrained(self.tmpdirname, **kwargs)
77

78
    def get_rust_tokenizer(self, **kwargs):
79
        kwargs.update(self.special_tokens_map)
80
        return GPT2TokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
81

82
    def get_input_output_texts(self, tokenizer):
83
        input_text = "lower newer"
84
        output_text = "lower newer"
85
        return input_text, output_text
86

87
    def test_full_tokenizer(self):
88
        tokenizer = GPT2Tokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
89
        text = "lower newer"
90
        bpe_tokens = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
91
        tokens = tokenizer.tokenize(text, add_prefix_space=True)
92
        self.assertListEqual(tokens, bpe_tokens)
93

94
        input_tokens = tokens + [tokenizer.unk_token]
95
        input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
96
        self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
97

98
    def test_rust_and_python_full_tokenizers(self):
99
        if not self.test_rust_tokenizer:
100
            return
101

102
        tokenizer = self.get_tokenizer()
103
        rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
104

105
        sequence = "lower newer"
106

107
        # Testing tokenization
108
        tokens = tokenizer.tokenize(sequence, add_prefix_space=True)
109
        rust_tokens = rust_tokenizer.tokenize(sequence)
110
        self.assertListEqual(tokens, rust_tokens)
111

112
        # Testing conversion to ids without special tokens
113
        ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
114
        rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
115
        self.assertListEqual(ids, rust_ids)
116

117
        # Testing conversion to ids with special tokens
118
        rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
119
        ids = tokenizer.encode(sequence, add_prefix_space=True)
120
        rust_ids = rust_tokenizer.encode(sequence)
121
        self.assertListEqual(ids, rust_ids)
122

123
        # Testing the unknown token
124
        input_tokens = tokens + [rust_tokenizer.unk_token]
125
        input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
126
        self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
127

128
    def test_pretokenized_inputs(self, *args, **kwargs):
129
        # It's very difficult to mix/test pretokenization with byte-level
130
        # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
131
        pass
132

133
    def test_padding(self, max_length=15):
134
        for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
135
            with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
136
                tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
137

138
                # Simple input
139
                s = "This is a simple input"
140
                s2 = ["This is a simple input 1", "This is a simple input 2"]
141
                p = ("This is a simple input", "This is a pair")
142
                p2 = [
143
                    ("This is a simple input 1", "This is a simple input 2"),
144
                    ("This is a simple pair 1", "This is a simple pair 2"),
145
                ]
146

147
                # Simple input tests
148
                self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length")
149

150
                # Simple input
151
                self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length")
152

153
                # Simple input
154
                self.assertRaises(
155
                    ValueError,
156
                    tokenizer_r.batch_encode_plus,
157
                    s2,
158
                    max_length=max_length,
159
                    padding="max_length",
160
                )
161

162
                # Pair input
163
                self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length")
164

165
                # Pair input
166
                self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length")
167

168
                # Pair input
169
                self.assertRaises(
170
                    ValueError,
171
                    tokenizer_r.batch_encode_plus,
172
                    p2,
173
                    max_length=max_length,
174
                    padding="max_length",
175
                )
176

177
    def test_padding_if_pad_token_set_slow(self):
178
        tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>")
179

180
        # Simple input
181
        s = "This is a simple input"
182
        s2 = ["This is a simple input looooooooong", "This is a simple input"]
183
        p = ("This is a simple input", "This is a pair")
184
        p2 = [
185
            ("This is a simple input loooooong", "This is a simple input"),
186
            ("This is a simple pair loooooong", "This is a simple pair"),
187
        ]
188

189
        pad_token_id = tokenizer.pad_token_id
190

191
        out_s = tokenizer(s, padding="max_length", max_length=30, return_tensors="np")
192
        out_s2 = tokenizer(s2, padding=True, truncate=True, return_tensors="np")
193
        out_p = tokenizer(*p, padding="max_length", max_length=60, return_tensors="np")
194
        out_p2 = tokenizer(p2, padding=True, truncate=True, return_tensors="np")
195

196
        # s
197
        # test single string max_length padding
198
        self.assertEqual(out_s["input_ids"].shape[-1], 30)
199
        self.assertTrue(pad_token_id in out_s["input_ids"])
200
        self.assertTrue(0 in out_s["attention_mask"])
201

202
        # s2
203
        # test automatic padding
204
        self.assertEqual(out_s2["input_ids"].shape[-1], 33)
205
        # long slice doesn't have padding
206
        self.assertFalse(pad_token_id in out_s2["input_ids"][0])
207
        self.assertFalse(0 in out_s2["attention_mask"][0])
208
        # short slice does have padding
209
        self.assertTrue(pad_token_id in out_s2["input_ids"][1])
210
        self.assertTrue(0 in out_s2["attention_mask"][1])
211

212
        # p
213
        # test single pair max_length padding
214
        self.assertEqual(out_p["input_ids"].shape[-1], 60)
215
        self.assertTrue(pad_token_id in out_p["input_ids"])
216
        self.assertTrue(0 in out_p["attention_mask"])
217

218
        # p2
219
        # test automatic padding pair
220
        self.assertEqual(out_p2["input_ids"].shape[-1], 52)
221
        # long slice pair doesn't have padding
222
        self.assertFalse(pad_token_id in out_p2["input_ids"][0])
223
        self.assertFalse(0 in out_p2["attention_mask"][0])
224
        # short slice pair does have padding
225
        self.assertTrue(pad_token_id in out_p2["input_ids"][1])
226
        self.assertTrue(0 in out_p2["attention_mask"][1])
227

228
    def test_add_bos_token_slow(self):
229
        bos_token = "$$$"
230
        tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname, bos_token=bos_token, add_bos_token=True)
231

232
        s = "This is a simple input"
233
        s2 = ["This is a simple input 1", "This is a simple input 2"]
234

235
        bos_token_id = tokenizer.bos_token_id
236

237
        out_s = tokenizer(s)
238
        out_s2 = tokenizer(s2)
239

240
        self.assertEqual(out_s.input_ids[0], bos_token_id)
241
        self.assertTrue(all(o[0] == bos_token_id for o in out_s2.input_ids))
242

243
        decode_s = tokenizer.decode(out_s.input_ids)
244
        decode_s2 = tokenizer.batch_decode(out_s2.input_ids)
245

246
        self.assertTrue(decode_s.startswith(bos_token))
247
        self.assertTrue(all(d.startswith(bos_token) for d in decode_s2))
248

249
    # tokenizer has no padding token
250
    def test_padding_different_model_input_name(self):
251
        pass
252

253
    def test_special_tokens_mask_input_pairs_and_bos_token(self):
254
        # TODO: change to self.get_tokenizers() when the fast version is implemented
255
        tokenizers = [self.get_tokenizer(do_lower_case=False, add_bos_token=True)]
256
        for tokenizer in tokenizers:
257
            with self.subTest(f"{tokenizer.__class__.__name__}"):
258
                sequence_0 = "Encode this."
259
                sequence_1 = "This one too please."
260
                encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
261
                encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False)
262
                encoded_sequence_dict = tokenizer.encode_plus(
263
                    sequence_0,
264
                    sequence_1,
265
                    add_special_tokens=True,
266
                    return_special_tokens_mask=True,
267
                )
268
                encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
269
                special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
270
                self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
271

272
                filtered_sequence = [
273
                    (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
274
                ]
275
                filtered_sequence = [x for x in filtered_sequence if x is not None]
276
                self.assertEqual(encoded_sequence, filtered_sequence)
277

278
    @require_jinja
279
    def test_tokenization_for_chat(self):
280
        tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname)
281
        test_chats = [
282
            [{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}],
283
            [
284
                {"role": "system", "content": "You are a helpful chatbot."},
285
                {"role": "user", "content": "Hello!"},
286
                {"role": "assistant", "content": "Nice to meet you."},
287
            ],
288
            [{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}],
289
        ]
290
        tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats]
291
        # fmt: off
292
        expected_tokens = [[20, 1, 20, 10, 20, 4, 3, 10, 20, 10, 20, 3, 0, 20, 20, 20, 0, 10, 20, 20, 20, 6, 20, 1, 6, 20, 20, 20, 3, 0, 0, 1, 20, 20],
293
                          [20, 1, 20, 10, 20, 4, 3, 10, 20, 10, 20, 3, 0, 20, 20, 20, 0, 10, 20, 20, 20, 6, 20, 1, 6, 20, 20, 20, 3, 0, 0, 1, 20, 20, 20, 7, 20, 3, 10, 6, 1, 10, 20, 3, 3, 6, 10, 20, 1, 20, 20, 20],
294
                          [20, 7, 20, 3, 10, 6, 1, 10, 20, 3, 3, 6, 10, 20, 1, 20, 20, 20, 20, 3, 0, 0, 1, 20, 20]]
295
        # fmt: on
296
        for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens):
297
            self.assertListEqual(tokenized_chat, expected_tokens)
298

299

300
@require_tokenizers
301
class OPTTokenizationTest(unittest.TestCase):
302
    def test_serialize_deserialize_fast_opt(self):
303
        # More context:
304
        # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
305
        # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
306
        # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
307

308
        tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True)
309
        text = "A photo of a cat"
310

311
        tokens_ids = tokenizer.encode(
312
            text,
313
        )
314
        self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758])
315
        tokenizer.save_pretrained("test_opt")
316

317
        tokenizer = AutoTokenizer.from_pretrained("./test_opt")
318
        tokens_ids = tokenizer.encode(
319
            text,
320
        )
321
        self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758])
322

323
    def test_fast_slow_equivalence(self):
324
        tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=True)
325
        text = "A photo of a cat"
326

327
        tokens_ids = tokenizer.encode(
328
            text,
329
        )
330
        # Same as above
331
        self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758])
332

333
    @unittest.skip("This test is failing because of a bug in the fast tokenizer")
334
    def test_users_can_modify_bos(self):
335
        tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True)
336

337
        tokenizer.bos_token = "bos"
338
        tokenizer.bos_token_id = tokenizer.get_vocab()["bos"]
339

340
        text = "A photo of a cat"
341
        tokens_ids = tokenizer.encode(
342
            text,
343
        )
344
        # We changed the bos token
345
        self.assertEqual(tokens_ids, [31957, 250, 1345, 9, 10, 4758])
346
        tokenizer.save_pretrained("./tok")
347
        tokenizer = AutoTokenizer.from_pretrained("./tok")
348
        self.assertTrue(tokenizer.is_fast)
349
        tokens_ids = tokenizer.encode(
350
            text,
351
        )
352
        self.assertEqual(tokens_ids, [31957, 250, 1345, 9, 10, 4758])
353

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.