transformers
352 строки · 14.5 Кб
1# coding=utf-8
2# Copyright 2020 The HuggingFace Team. All rights reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16
17import json
18import os
19import unittest
20
21from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast
22from transformers.models.gpt2.tokenization_gpt2 import VOCAB_FILES_NAMES
23from transformers.testing_utils import require_jinja, require_tokenizers
24
25from ...test_tokenization_common import TokenizerTesterMixin
26
27
28@require_tokenizers
29class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
30tokenizer_class = GPT2Tokenizer
31rust_tokenizer_class = GPT2TokenizerFast
32test_rust_tokenizer = True
33from_pretrained_kwargs = {"add_prefix_space": True}
34test_seq2seq = False
35
36def setUp(self):
37super().setUp()
38
39# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
40vocab = [
41"l",
42"o",
43"w",
44"e",
45"r",
46"s",
47"t",
48"i",
49"d",
50"n",
51"\u0120",
52"\u0120l",
53"\u0120n",
54"\u0120lo",
55"\u0120low",
56"er",
57"\u0120lowest",
58"\u0120newer",
59"\u0120wider",
60"<unk>",
61"<|endoftext|>",
62]
63vocab_tokens = dict(zip(vocab, range(len(vocab))))
64merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
65self.special_tokens_map = {"unk_token": "<unk>"}
66
67self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
68self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
69with open(self.vocab_file, "w", encoding="utf-8") as fp:
70fp.write(json.dumps(vocab_tokens) + "\n")
71with open(self.merges_file, "w", encoding="utf-8") as fp:
72fp.write("\n".join(merges))
73
74def get_tokenizer(self, **kwargs):
75kwargs.update(self.special_tokens_map)
76return GPT2Tokenizer.from_pretrained(self.tmpdirname, **kwargs)
77
78def get_rust_tokenizer(self, **kwargs):
79kwargs.update(self.special_tokens_map)
80return GPT2TokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
81
82def get_input_output_texts(self, tokenizer):
83input_text = "lower newer"
84output_text = "lower newer"
85return input_text, output_text
86
87def test_full_tokenizer(self):
88tokenizer = GPT2Tokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
89text = "lower newer"
90bpe_tokens = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
91tokens = tokenizer.tokenize(text, add_prefix_space=True)
92self.assertListEqual(tokens, bpe_tokens)
93
94input_tokens = tokens + [tokenizer.unk_token]
95input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
96self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
97
98def test_rust_and_python_full_tokenizers(self):
99if not self.test_rust_tokenizer:
100return
101
102tokenizer = self.get_tokenizer()
103rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
104
105sequence = "lower newer"
106
107# Testing tokenization
108tokens = tokenizer.tokenize(sequence, add_prefix_space=True)
109rust_tokens = rust_tokenizer.tokenize(sequence)
110self.assertListEqual(tokens, rust_tokens)
111
112# Testing conversion to ids without special tokens
113ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
114rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
115self.assertListEqual(ids, rust_ids)
116
117# Testing conversion to ids with special tokens
118rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
119ids = tokenizer.encode(sequence, add_prefix_space=True)
120rust_ids = rust_tokenizer.encode(sequence)
121self.assertListEqual(ids, rust_ids)
122
123# Testing the unknown token
124input_tokens = tokens + [rust_tokenizer.unk_token]
125input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
126self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
127
128def test_pretokenized_inputs(self, *args, **kwargs):
129# It's very difficult to mix/test pretokenization with byte-level
130# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
131pass
132
133def test_padding(self, max_length=15):
134for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
135with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
136tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
137
138# Simple input
139s = "This is a simple input"
140s2 = ["This is a simple input 1", "This is a simple input 2"]
141p = ("This is a simple input", "This is a pair")
142p2 = [
143("This is a simple input 1", "This is a simple input 2"),
144("This is a simple pair 1", "This is a simple pair 2"),
145]
146
147# Simple input tests
148self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length")
149
150# Simple input
151self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length")
152
153# Simple input
154self.assertRaises(
155ValueError,
156tokenizer_r.batch_encode_plus,
157s2,
158max_length=max_length,
159padding="max_length",
160)
161
162# Pair input
163self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length")
164
165# Pair input
166self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length")
167
168# Pair input
169self.assertRaises(
170ValueError,
171tokenizer_r.batch_encode_plus,
172p2,
173max_length=max_length,
174padding="max_length",
175)
176
177def test_padding_if_pad_token_set_slow(self):
178tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>")
179
180# Simple input
181s = "This is a simple input"
182s2 = ["This is a simple input looooooooong", "This is a simple input"]
183p = ("This is a simple input", "This is a pair")
184p2 = [
185("This is a simple input loooooong", "This is a simple input"),
186("This is a simple pair loooooong", "This is a simple pair"),
187]
188
189pad_token_id = tokenizer.pad_token_id
190
191out_s = tokenizer(s, padding="max_length", max_length=30, return_tensors="np")
192out_s2 = tokenizer(s2, padding=True, truncate=True, return_tensors="np")
193out_p = tokenizer(*p, padding="max_length", max_length=60, return_tensors="np")
194out_p2 = tokenizer(p2, padding=True, truncate=True, return_tensors="np")
195
196# s
197# test single string max_length padding
198self.assertEqual(out_s["input_ids"].shape[-1], 30)
199self.assertTrue(pad_token_id in out_s["input_ids"])
200self.assertTrue(0 in out_s["attention_mask"])
201
202# s2
203# test automatic padding
204self.assertEqual(out_s2["input_ids"].shape[-1], 33)
205# long slice doesn't have padding
206self.assertFalse(pad_token_id in out_s2["input_ids"][0])
207self.assertFalse(0 in out_s2["attention_mask"][0])
208# short slice does have padding
209self.assertTrue(pad_token_id in out_s2["input_ids"][1])
210self.assertTrue(0 in out_s2["attention_mask"][1])
211
212# p
213# test single pair max_length padding
214self.assertEqual(out_p["input_ids"].shape[-1], 60)
215self.assertTrue(pad_token_id in out_p["input_ids"])
216self.assertTrue(0 in out_p["attention_mask"])
217
218# p2
219# test automatic padding pair
220self.assertEqual(out_p2["input_ids"].shape[-1], 52)
221# long slice pair doesn't have padding
222self.assertFalse(pad_token_id in out_p2["input_ids"][0])
223self.assertFalse(0 in out_p2["attention_mask"][0])
224# short slice pair does have padding
225self.assertTrue(pad_token_id in out_p2["input_ids"][1])
226self.assertTrue(0 in out_p2["attention_mask"][1])
227
228def test_add_bos_token_slow(self):
229bos_token = "$$$"
230tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname, bos_token=bos_token, add_bos_token=True)
231
232s = "This is a simple input"
233s2 = ["This is a simple input 1", "This is a simple input 2"]
234
235bos_token_id = tokenizer.bos_token_id
236
237out_s = tokenizer(s)
238out_s2 = tokenizer(s2)
239
240self.assertEqual(out_s.input_ids[0], bos_token_id)
241self.assertTrue(all(o[0] == bos_token_id for o in out_s2.input_ids))
242
243decode_s = tokenizer.decode(out_s.input_ids)
244decode_s2 = tokenizer.batch_decode(out_s2.input_ids)
245
246self.assertTrue(decode_s.startswith(bos_token))
247self.assertTrue(all(d.startswith(bos_token) for d in decode_s2))
248
249# tokenizer has no padding token
250def test_padding_different_model_input_name(self):
251pass
252
253def test_special_tokens_mask_input_pairs_and_bos_token(self):
254# TODO: change to self.get_tokenizers() when the fast version is implemented
255tokenizers = [self.get_tokenizer(do_lower_case=False, add_bos_token=True)]
256for tokenizer in tokenizers:
257with self.subTest(f"{tokenizer.__class__.__name__}"):
258sequence_0 = "Encode this."
259sequence_1 = "This one too please."
260encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
261encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False)
262encoded_sequence_dict = tokenizer.encode_plus(
263sequence_0,
264sequence_1,
265add_special_tokens=True,
266return_special_tokens_mask=True,
267)
268encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
269special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
270self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
271
272filtered_sequence = [
273(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
274]
275filtered_sequence = [x for x in filtered_sequence if x is not None]
276self.assertEqual(encoded_sequence, filtered_sequence)
277
278@require_jinja
279def test_tokenization_for_chat(self):
280tokenizer = GPT2Tokenizer.from_pretrained(self.tmpdirname)
281test_chats = [
282[{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}],
283[
284{"role": "system", "content": "You are a helpful chatbot."},
285{"role": "user", "content": "Hello!"},
286{"role": "assistant", "content": "Nice to meet you."},
287],
288[{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}],
289]
290tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats]
291# fmt: off
292expected_tokens = [[20, 1, 20, 10, 20, 4, 3, 10, 20, 10, 20, 3, 0, 20, 20, 20, 0, 10, 20, 20, 20, 6, 20, 1, 6, 20, 20, 20, 3, 0, 0, 1, 20, 20],
293[20, 1, 20, 10, 20, 4, 3, 10, 20, 10, 20, 3, 0, 20, 20, 20, 0, 10, 20, 20, 20, 6, 20, 1, 6, 20, 20, 20, 3, 0, 0, 1, 20, 20, 20, 7, 20, 3, 10, 6, 1, 10, 20, 3, 3, 6, 10, 20, 1, 20, 20, 20],
294[20, 7, 20, 3, 10, 6, 1, 10, 20, 3, 3, 6, 10, 20, 1, 20, 20, 20, 20, 3, 0, 0, 1, 20, 20]]
295# fmt: on
296for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens):
297self.assertListEqual(tokenized_chat, expected_tokens)
298
299
300@require_tokenizers
301class OPTTokenizationTest(unittest.TestCase):
302def test_serialize_deserialize_fast_opt(self):
303# More context:
304# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
305# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
306# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
307
308tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True)
309text = "A photo of a cat"
310
311tokens_ids = tokenizer.encode(
312text,
313)
314self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758])
315tokenizer.save_pretrained("test_opt")
316
317tokenizer = AutoTokenizer.from_pretrained("./test_opt")
318tokens_ids = tokenizer.encode(
319text,
320)
321self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758])
322
323def test_fast_slow_equivalence(self):
324tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=True)
325text = "A photo of a cat"
326
327tokens_ids = tokenizer.encode(
328text,
329)
330# Same as above
331self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758])
332
333@unittest.skip("This test is failing because of a bug in the fast tokenizer")
334def test_users_can_modify_bos(self):
335tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True)
336
337tokenizer.bos_token = "bos"
338tokenizer.bos_token_id = tokenizer.get_vocab()["bos"]
339
340text = "A photo of a cat"
341tokens_ids = tokenizer.encode(
342text,
343)
344# We changed the bos token
345self.assertEqual(tokens_ids, [31957, 250, 1345, 9, 10, 4758])
346tokenizer.save_pretrained("./tok")
347tokenizer = AutoTokenizer.from_pretrained("./tok")
348self.assertTrue(tokenizer.is_fast)
349tokens_ids = tokenizer.encode(
350text,
351)
352self.assertEqual(tokens_ids, [31957, 250, 1345, 9, 10, 4758])
353