transformers
497 строк · 23.0 Кб
1# coding=utf-8
2# Copyright 2024 The HuggingFace Team. All rights reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16import os
17import tempfile
18import unittest
19
20from datasets import load_dataset
21
22from transformers import (
23AddedToken,
24GemmaTokenizer,
25GemmaTokenizerFast,
26is_torch_available,
27)
28from transformers.convert_slow_tokenizer import convert_slow_tokenizer
29from transformers.testing_utils import (
30get_tests_dir,
31nested_simplify,
32require_jinja,
33require_sentencepiece,
34require_tokenizers,
35require_torch,
36slow,
37)
38
39from ...test_tokenization_common import TokenizerTesterMixin
40
41
42SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
43
44
45if is_torch_available():
46pass
47
48
49@require_sentencepiece
50@require_tokenizers
51class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
52tokenizer_class = GemmaTokenizer
53rust_tokenizer_class = GemmaTokenizerFast
54
55test_rust_tokenizer = False
56test_sentencepiece = True
57from_pretrained_kwargs = {}
58
59def setUp(self):
60super().setUp()
61# We have a SentencePiece fixture for testing
62tokenizer = GemmaTokenizer(SAMPLE_VOCAB, keep_accents=True)
63tokenizer.pad_token = tokenizer.eos_token
64tokenizer.save_pretrained(self.tmpdirname)
65
66@require_torch
67def test_batch_tokenization(self):
68if not self.test_seq2seq:
69return
70
71tokenizers = self.get_tokenizers()
72for tokenizer in tokenizers:
73with self.subTest(f"{tokenizer.__class__.__name__}"):
74# Longer text that will definitely require truncation.
75text = [
76" UN Chief Says There Is No Military Solution in Syria",
77" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
78" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
79" will only worsen the violence and misery for millions of people.",
80]
81try:
82batch = tokenizer(
83text=text,
84max_length=3,
85max_target_length=10,
86return_tensors="pt",
87)
88except NotImplementedError:
89return
90self.assertEqual(batch.input_ids.shape[1], 3)
91# max_target_length will default to max_length if not specified
92batch = tokenizer(text, max_length=3, return_tensors="pt")
93self.assertEqual(batch.input_ids.shape[1], 3)
94
95batch_encoder_only = tokenizer(text=text, max_length=3, max_target_length=10, return_tensors="pt")
96self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
97self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
98self.assertNotIn("decoder_input_ids", batch_encoder_only)
99
100@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece.")
101def test_save_slow_from_fast_and_reload_fast(self):
102pass
103
104def test_special_tokens_initialization(self):
105for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
106with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
107added_tokens = [AddedToken("<special>", lstrip=True)]
108
109tokenizer_r = self.rust_tokenizer_class.from_pretrained(
110pretrained_name, additional_special_tokens=added_tokens, **kwargs
111)
112r_output = tokenizer_r.encode("Hey this is a <special> token")
113
114special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0]
115
116self.assertTrue(special_token_id in r_output)
117
118if self.test_slow_tokenizer:
119tokenizer_cr = self.rust_tokenizer_class.from_pretrained(
120pretrained_name,
121additional_special_tokens=added_tokens,
122**kwargs, # , from_slow=True <- unfortunately too slow to convert
123)
124tokenizer_p = self.tokenizer_class.from_pretrained(
125pretrained_name, additional_special_tokens=added_tokens, **kwargs
126)
127
128p_output = tokenizer_p.encode("Hey this is a <special> token")
129
130cr_output = tokenizer_cr.encode("Hey this is a <special> token")
131
132self.assertEqual(p_output, r_output)
133self.assertEqual(cr_output, r_output)
134self.assertTrue(special_token_id in p_output)
135self.assertTrue(special_token_id in cr_output)
136
137@slow
138def test_tokenizer_integration(self):
139expected_encoding = {'input_ids': [[2, 158434, 591, 84193, 3836, 685, 6599, 31223, 235290, 140247, 578, 6599, 31223, 235290, 145139, 235290, 3491, 235275, 6572, 3311, 235290, 38197, 109959, 591, 25894, 235269, 162174, 235290, 235284, 235269, 1791, 6362, 12481, 235269, 1576, 18622, 235269, 2900, 1136, 86684, 235269, 29092, 4632, 16994, 604, 13146, 14944, 40371, 591, 19700, 235327, 235275, 578, 13146, 14944, 25511, 591, 235300, 12474, 235275, 675, 1163, 235248, 235304, 235284, 235340, 229903, 5377, 575, 235248, 235274, 235276, 235276, 235340, 17044, 578, 5271, 1061, 118345, 1865, 125247, 235269, 8745, 111226, 578, 176888, 235265], [2, 25894, 603, 6869, 577, 953, 235290, 8297, 5271, 209099, 41642, 774, 748, 78253, 2793, 731, 51506, 34346, 611, 2145, 2731, 578, 1833, 4807, 575, 832, 16630, 235265], [2, 651, 4320, 8426, 25341, 36271, 1163, 573, 27894, 5929, 235265]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip
140self.tokenizer_integration_test_util(
141expected_encoding=expected_encoding,
142model_name="hf-internal-testing/dummy-gemma",
143revision="",
144padding=False,
145)
146
147@unittest.skip("worker 'gw4' crashed on CI, passing locally.")
148def test_pickle_subword_regularization_tokenizer(self):
149pass
150
151@unittest.skip("worker 'gw4' crashed on CI, passing locally.")
152def test_subword_regularization_tokenizer(self):
153pass
154
155@unittest.skip("This test will be removed from main @LysandreJik")
156def test_pretrained_model_lists(self):
157pass
158
159@unittest.skip("Skipping")
160def test_torch_encode_plus_sent_to_model(self):
161pass
162
163
164@require_torch
165@require_sentencepiece
166@require_tokenizers
167class GemmaIntegrationTest(unittest.TestCase):
168@classmethod
169def setUpClass(cls):
170checkpoint_name = "hf-internal-testing/dummy-gemma"
171cls.tokenizer: GemmaTokenizer = GemmaTokenizer.from_pretrained(
172checkpoint_name, eos_token="<s>"
173) # add this token
174cls.rust_tokenizer = GemmaTokenizerFast.from_pretrained(
175checkpoint_name, eos_token="<s>", from_slow=True
176) # add this token
177return cls
178
179@require_torch
180def integration_tests(self):
181inputs = self.tokenizer(
182["The following string should be properly encoded: Hello.", "But ird and ปี ird ด"],
183return_tensors="pt",
184)
185
186self.assertEqual(
187nested_simplify(inputs),
188{
189"input_ids": [
190[2, 450, 1494, 1347, 881, 367, 6284, 18511, 29901, 15043, 29889],
191[2, 1205, 29871, 1823, 322, 29871, 31010, 30691, 1678, 1823, 1678, 30718],
192],
193"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
194},
195)
196
197def test_fast_special_tokens(self):
198slow_tokenizer = self.tokenizer
199fast_tokenizer = self.rust_tokenizer
200slow = slow_tokenizer.encode("A sample test", add_special_tokens=True)
201assert slow == [2, 235280, 6453, 2121]
202
203fast_tokenizer.add_eos_token = False
204fast = fast_tokenizer.encode("A sample test", add_special_tokens=True)
205assert fast == [2, 235280, 6453, 2121]
206
207fast_tokenizer.add_eos_token = True
208fast = fast_tokenizer.encode("A sample test", add_special_tokens=True)
209assert fast == [2, 235280, 6453, 2121, 204]
210
211slow_tokenizer.add_eos_token = True
212slow = slow_tokenizer.encode("A sample test", add_special_tokens=True)
213assert slow == [2, 235280, 6453, 2121, 204]
214
215self.tokenizer.add_eos_token = False
216self.rust_tokenizer.add_eos_token = False
217
218@unittest.skip("Not super important and always failing. Let's skip it")
219@slow
220def test_conversion(self):
221# This is excruciatingly slow since it has to recreate the entire merge
222# list from the original vocabulary in spm
223self.rust_tokenizer.save_pretrained("./out")
224with tempfile.TemporaryDirectory() as dirname:
225self.rust_tokenizer.save_pretrained(dirname)
226
227with open(os.path.join(dirname, "tokenizer.json"), "r") as f:
228old_serialized = f.read()
229
230new_tokenizer = convert_slow_tokenizer(self.tokenizer)
231with tempfile.NamedTemporaryFile() as f:
232new_tokenizer.save(f.name)
233# Re-opening since `f` is in bytes.
234new_serialized = open(f.name, "r").read()
235with open("out_tokenizer.json", "w") as g:
236g.write(new_serialized)
237
238self.assertEqual(old_serialized, new_serialized)
239
240def test_simple_encode_decode(self):
241pyth_tokenizer = self.tokenizer
242rust_tokenizer = self.rust_tokenizer
243
244self.tokenizer.add_eos_token = False
245self.rust_tokenizer.add_eos_token = False
246
247self.assertEqual(pyth_tokenizer.encode("This is a test"), [2, 1596, 603, 476, 2121])
248self.assertEqual(rust_tokenizer.encode("This is a test"), [2, 1596, 603, 476, 2121])
249self.assertEqual(pyth_tokenizer.decode([2, 1596, 603, 476, 2121], skip_special_tokens=True), "This is a test")
250self.assertEqual(rust_tokenizer.decode([2, 1596, 603, 476, 2121], skip_special_tokens=True), "This is a test")
251
252# bytefallback showcase
253self.assertEqual(pyth_tokenizer.encode("生活的真谛是"), [2, 122182, 235710, 245467, 235427] ) # fmt: skip
254self.assertEqual(rust_tokenizer.encode("生活的真谛是"), [2, 122182, 235710, 245467, 235427] ) # fmt: skip
255self.assertEqual(
256pyth_tokenizer.decode([2, 122182, 235710, 245467, 235427], skip_special_tokens=True),
257"生活的真谛是",
258)
259self.assertEqual(
260rust_tokenizer.decode([2, 122182, 235710, 245467, 235427], skip_special_tokens=True),
261"生活的真谛是",
262)
263
264# Inner spaces showcase
265self.assertEqual(pyth_tokenizer.encode("Hi Hello"), [2, 2151, 139, 4521])
266self.assertEqual(rust_tokenizer.encode("Hi Hello"), [2, 2151, 139, 4521])
267self.assertEqual(pyth_tokenizer.decode([2, 2151, 139, 4521], skip_special_tokens=True), "Hi Hello")
268self.assertEqual(rust_tokenizer.decode([2, 2151, 139, 4521], skip_special_tokens=True), "Hi Hello")
269
270self.assertEqual(pyth_tokenizer.encode("Hi Hello"), [2, 2151, 140, 4521])
271self.assertEqual(rust_tokenizer.encode("Hi Hello"), [2, 2151, 140, 4521])
272self.assertEqual(pyth_tokenizer.decode([2, 2151, 140, 4521], skip_special_tokens=True), "Hi Hello")
273self.assertEqual(rust_tokenizer.decode([2, 2151, 140, 4521], skip_special_tokens=True), "Hi Hello")
274
275self.assertEqual(pyth_tokenizer.encode(""), [2])
276self.assertEqual(rust_tokenizer.encode(""), [2])
277
278self.assertEqual(pyth_tokenizer.encode(" "), [2, 235248])
279self.assertEqual(rust_tokenizer.encode(" "), [2, 235248])
280
281self.assertEqual(pyth_tokenizer.encode(" "), [2, 139])
282self.assertEqual(rust_tokenizer.encode(" "), [2, 139])
283
284self.assertEqual(pyth_tokenizer.encode(" Hello"), [2, 25957])
285self.assertEqual(rust_tokenizer.encode(" Hello"), [2, 25957])
286
287def test_no_differences_decode(self):
288self.tokenizer.add_eos_token = False
289self.rust_tokenizer.add_eos_token = False
290pyth_tokenizer = self.tokenizer
291rust_tokenizer = self.rust_tokenizer
292
293self.assertEqual(pyth_tokenizer.decode([869]), "og")
294self.assertEqual(rust_tokenizer.decode([869]), "og")
295
296self.assertEqual(pyth_tokenizer.decode([30112, 869]), " expenditureog")
297self.assertEqual(rust_tokenizer.decode([30112, 869]), " expenditureog")
298
299def test_no_differences_special_tokens(self):
300pyth_tokenizer = self.tokenizer
301rust_tokenizer = self.rust_tokenizer
302self.assertEqual(pyth_tokenizer.encode(""), [2])
303self.assertEqual(rust_tokenizer.encode(""), [2])
304
305self.assertEqual(pyth_tokenizer.encode("<s>"), [2, 204])
306self.assertEqual(rust_tokenizer.encode("<s>"), [2, 204])
307
308@unittest.skipIf(
309os.getenv("RUN_TOKENIZER_INTEGRATION", "0") == "0",
310"RUN_TOKENIZER_INTEGRATION=1 to run tokenizer integration tests",
311)
312def test_integration_test_xnli(self):
313import tqdm
314
315pyth_tokenizer = self.tokenizer
316rust_tokenizer = self.rust_tokenizer
317
318dataset = load_dataset("code_x_glue_ct_code_to_text", "go")
319for item in tqdm.tqdm(dataset["validation"]):
320string = item["code"]
321encoded1 = pyth_tokenizer.encode(string)
322encoded2 = rust_tokenizer.encode(string)
323
324self.assertEqual(encoded1, encoded2)
325
326decoded1 = pyth_tokenizer.decode(encoded1, skip_special_tokens=True)
327decoded2 = rust_tokenizer.decode(encoded1, skip_special_tokens=True)
328
329self.assertEqual(decoded1, decoded2)
330
331dataset = load_dataset("xnli", "all_languages")
332
333for item in tqdm.tqdm(dataset["train"]):
334for string in item["premise"].values():
335encoded1 = pyth_tokenizer.encode(string)
336encoded2 = rust_tokenizer.encode(string)
337
338self.assertEqual(encoded1, encoded2)
339
340decoded1 = pyth_tokenizer.decode(encoded1, skip_special_tokens=True)
341decoded2 = rust_tokenizer.decode(encoded2, skip_special_tokens=True)
342
343self.assertEqual(decoded1, decoded2)
344
345def test_special_token_special_word(self):
346# the word inform should be split as ['in', 'form']
347tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
348tokenizer.add_tokens([AddedToken("<REPR_END>", rstrip=True, lstrip=True)], special_tokens=False)
349out1 = tokenizer.decode(
350tokenizer.encode("<REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=False
351)
352self.assertEqual(out1, "<REPR_END>inform")
353out2 = tokenizer.decode(
354tokenizer.encode("<REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=True
355)
356# decoding strips the added prefix space.
357self.assertEqual(out2, "<REPR_END> inform")
358input_ids = tokenizer.encode("<REPR_END>inform", add_special_tokens=False)
359self.assertEqual(input_ids, [256000, 43910])
360
361out2 = tokenizer.decode(
362tokenizer.encode(" <REPR_END> inform", add_special_tokens=False), spaces_between_special_tokens=False
363)
364# TODO @ArthurZ currently we strip left and right, so this will not keep the spaces
365self.assertEqual(out2, "<REPR_END>inform")
366
367### Let's make sure decoding does not add extra spaces here and there
368# TODO @ArthurZ this should be affected by the lstrip/rstrip/single word /normalize refactoring
369# Since currently we always strip left and right of the token, results are as such
370input_ids = tokenizer.encode("<s> Hello<s>how", add_special_tokens=False)
371self.assertEqual(input_ids, [204, 25957, 204, 1139])
372tokens = tokenizer.tokenize("<s> Hello<s>how", add_special_tokens=False)
373self.assertEqual(tokens, ["<s>", "▁Hello", "<s>", "how"])
374decoded_tokens = tokenizer.decode(input_ids)
375self.assertEqual(decoded_tokens, "<s> Hello<s>how")
376
377# Let's make sure that if there are any spaces, we don't remove them!
378input_ids = tokenizer.encode(" <s> Hello<s> how", add_special_tokens=False)
379self.assertEqual(input_ids, [235248, 204, 25957, 204, 1368])
380tokens = tokenizer.tokenize(" <s> Hello<s> how", add_special_tokens=False)
381self.assertEqual(tokens, ["▁", "<s>", "▁Hello", "<s>", "▁how"])
382decoded_tokens = tokenizer.decode(input_ids)
383self.assertEqual(decoded_tokens, " <s> Hello<s> how")
384
385def test_some_edge_cases(self):
386tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
387
388sp_tokens = tokenizer.sp_model.encode("<s>>", out_type=str)
389self.assertEqual(sp_tokens, ["<s>", ">"])
390tokens = tokenizer.tokenize("<s>>")
391self.assertEqual(sp_tokens, tokens)
392self.assertEqual(tokens, ["<s>", ">"])
393
394tokens = tokenizer.tokenize("")
395self.assertEqual(tokens, [])
396self.assertEqual(tokens, tokenizer.sp_model.encode("", out_type=str))
397
398tokens = tokenizer.tokenize(" ")
399self.assertEqual(tokens, ["▁"])
400# a dummy prefix space is not added by the sp_model as it was de-activated
401self.assertEqual(tokens, tokenizer.sp_model.encode(" ", out_type=str))
402
403tokens = tokenizer.tokenize("▁")
404self.assertEqual(tokens, ["▁"])
405# a dummy prefix space is not added by the sp_model as it was de-activated
406self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str))
407
408tokens = tokenizer.tokenize(" ▁")
409self.assertEqual(tokens, ["▁▁"])
410# a dummy prefix space is not added by the sp_model as it was de-activated
411self.assertEqual(tokens, tokenizer.sp_model.encode("▁▁", out_type=str))
412
413@require_jinja
414def test_tokenization_for_chat(self):
415tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
416
417test_chats = [
418[{"role": "user", "content": "Hello!"}],
419[
420{"role": "user", "content": "Hello!"},
421{"role": "assistant", "content": "Nice to meet you."},
422],
423[{"role": "user", "content": "Hello!"}],
424]
425# Matt: The third test case tests the default system message, but if this is ever changed in the
426# class/repo code then that test will fail, and the case will need to be updated.
427tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats]
428expected_tokens = [[235322, 235371, 571, 235298, 2997, 73786, 1645, 108, 4521, 149907, 235371, 571, 235298, 615, 73786, 108], [235322, 235371, 571, 235298, 2997, 73786, 1645, 108, 4521, 149907, 235371, 571, 235298, 615, 73786, 108, 235322, 235371, 571, 235298, 2997, 73786, 105776, 108, 7731, 577, 4664, 692, 35606, 235371, 571, 235298, 615, 73786, 108], [235322, 235371, 571, 235298, 2997, 73786, 1645, 108, 4521, 149907, 235371, 571, 235298, 615, 73786, 108]] # fmt: skip
429for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens):
430self.assertListEqual(tokenized_chat, expected_tokens)
431
432
433@require_sentencepiece
434@require_tokenizers
435class CommonSpmIntegrationTests(unittest.TestCase):
436"""
437A class that regroups important test to make sure that we properly handle the special tokens.
438"""
439
440def test_edge_case_tabulation(self):
441fast_tokenizer = GemmaTokenizerFast.from_pretrained("hf-internal-testing/dummy-gemma")
442slow_tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
443input_text = "Hey<eos>. \t\t \n\nyou é @#😈 🤗! , 1234 15 5,61"
444EXPECTED_IDS = [ 2, 6750, 1, 235265, 235248, 255969, 235248, 109, 4747, 139, 235335, 139, 216311, 241316, 139, 239880, 235341, 144, 235269, 235248, 235274, 235284, 235304, 235310, 235248, 235274, 235308, 235248, 235308, 235269, 235318, 235274] # fmt: skip
445EXPECTED_TOKENS = [ "Hey", "<eos>", ".", "▁", "\t\t", "▁", "\n\n", "you", "▁▁", "é", "▁▁", "@#", "😈", "▁▁", "🤗", "!", "▁▁▁▁▁▁▁", ",", "▁", "1", "2", "3", "4", "▁", "1", "5", "▁", "5", ",", "6", "1"] # fmt: skip
446
447tokens = fast_tokenizer.tokenize(input_text)
448with self.subTest("test fast edge case fast"):
449self.assertEqual(tokens, EXPECTED_TOKENS)
450
451tokens = slow_tokenizer.tokenize(input_text)
452with self.subTest("test fast edge case fast"):
453self.assertEqual(tokens, EXPECTED_TOKENS)
454
455input_ids = fast_tokenizer.encode(input_text)
456with self.subTest("test fast edge case fast"):
457self.assertEqual(input_ids, EXPECTED_IDS)
458
459input_ids = slow_tokenizer.encode(input_text)
460with self.subTest("test fast edge case fast"):
461self.assertEqual(input_ids, EXPECTED_IDS)
462
463text = fast_tokenizer.decode(EXPECTED_IDS)
464with self.subTest("test fast edge case fast"):
465self.assertEqual(text, "<bos>Hey<eos>. \t\t \n\nyou é @#😈 🤗! , 1234 15 5,61")
466
467text = slow_tokenizer.decode(EXPECTED_IDS)
468with self.subTest("test fast edge case fast"):
469self.assertEqual(text, "<bos>Hey<eos>. \t\t \n\nyou é @#😈 🤗! , 1234 15 5,61")
470
471input_text = "\t\t\t\t \n\n61"
472EXPECTED_IDS = [2, 255971, 235248, 109, 235318, 235274]
473EXPECTED_TOKENS = ["\t\t\t\t", "▁", "\n\n", "6", "1"]
474
475tokens = fast_tokenizer.tokenize(input_text)
476with self.subTest("test fast edge case fast"):
477self.assertEqual(tokens, EXPECTED_TOKENS)
478
479tokens = slow_tokenizer.tokenize(input_text)
480with self.subTest("test fast edge case fast"):
481self.assertEqual(tokens, EXPECTED_TOKENS)
482
483input_ids = fast_tokenizer.encode(input_text)
484with self.subTest("test fast edge case fast"):
485self.assertEqual(input_ids, EXPECTED_IDS)
486
487input_ids = slow_tokenizer.encode(input_text)
488with self.subTest("test fast edge case fast"):
489self.assertEqual(input_ids, EXPECTED_IDS)
490
491text = fast_tokenizer.decode(EXPECTED_IDS)
492with self.subTest("test fast edge case fast"):
493self.assertEqual(text, "<bos>\t\t\t\t \n\n61")
494
495text = slow_tokenizer.decode(EXPECTED_IDS)
496with self.subTest("test fast edge case fast"):
497self.assertEqual(text, "<bos>\t\t\t\t \n\n61")
498