transformers
1951 строка · 97.4 Кб
1# coding=utf-8
2# Copyright 2021 The HuggingFace Inc. team.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16import inspect17import shutil18import tempfile19import unittest20from typing import List21
22from transformers import (23AddedToken,24LayoutXLMTokenizerFast,25SpecialTokensMixin,26is_tf_available,27is_torch_available,28logging,29)
30from transformers.models.layoutxlm.tokenization_layoutxlm import LayoutXLMTokenizer31from transformers.testing_utils import (32get_tests_dir,33is_pt_tf_cross_test,34require_pandas,35require_sentencepiece,36require_tokenizers,37require_torch,38slow,39)
40
41from ...test_tokenization_common import (42SMALL_TRAINING_CORPUS,43TokenizerTesterMixin,44filter_non_english,45merge_model_tokenizer_mappings,46)
47
48
49logger = logging.get_logger(__name__)50SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")51
52
53@require_sentencepiece
54@require_tokenizers
55@require_pandas
56class LayoutXLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):57tokenizer_class = LayoutXLMTokenizer58rust_tokenizer_class = LayoutXLMTokenizerFast59test_rust_tokenizer = True60from_pretrained_filter = filter_non_english61test_seq2seq = False62test_sentencepiece = True63maxDiff = None64
65def get_words_and_boxes(self):66words = ["a", "weirdly", "test"]67boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]68
69return words, boxes70
71def get_words_and_boxes_batch(self):72words = [["a", "weirdly", "test"], ["hello", "my", "name", "is", "bob"]]73boxes = [74[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],75[[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],76]77
78return words, boxes79
80def get_question_words_and_boxes(self):81question = "what's his name?"82words = ["a", "weirdly", "test"]83boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]]84
85return question, words, boxes86
87def get_question_words_and_boxes_batch(self):88questions = ["what's his name?", "how is he called?"]89words = [["a", "weirdly", "test"], ["what", "a", "laif", "gastn"]]90boxes = [91[[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]],92[[256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]],93]94
95return questions, words, boxes96
97def setUp(self):98super().setUp()99
100# We have a SentencePiece fixture for testing101tokenizer = LayoutXLMTokenizer(SAMPLE_VOCAB, keep_accents=True)102tokenizer.save_pretrained(self.tmpdirname)103
104def get_input_output_texts(self, tokenizer):105input_text = "UNwant\u00E9d,running"106output_text = "unwanted, running"107return input_text, output_text108
109# override test in `test_tokenization_common.py` because of the required input format of the `__call__`` method of110# this tokenizer111def test_save_sentencepiece_tokenizer(self) -> None:112if not self.test_sentencepiece or not self.test_slow_tokenizer:113return114# We want to verify that we will be able to save the tokenizer even if the original files that were used to115# build the tokenizer have been deleted in the meantime.116words, boxes = self.get_words_and_boxes()117
118tokenizer_slow_1 = self.get_tokenizer()119encoding_tokenizer_slow_1 = tokenizer_slow_1(120words,121boxes=boxes,122)123
124tmpdirname_1 = tempfile.mkdtemp()125tmpdirname_2 = tempfile.mkdtemp()126
127tokenizer_slow_1.save_pretrained(tmpdirname_1)128tokenizer_slow_2 = self.tokenizer_class.from_pretrained(tmpdirname_1)129encoding_tokenizer_slow_2 = tokenizer_slow_2(130words,131boxes=boxes,132)133
134shutil.rmtree(tmpdirname_1)135tokenizer_slow_2.save_pretrained(tmpdirname_2)136
137tokenizer_slow_3 = self.tokenizer_class.from_pretrained(tmpdirname_2)138encoding_tokenizer_slow_3 = tokenizer_slow_3(139words,140boxes=boxes,141)142shutil.rmtree(tmpdirname_2)143
144self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_2)145self.assertEqual(encoding_tokenizer_slow_1, encoding_tokenizer_slow_3)146
147def test_split_special_tokens(self):148tokenizer = self.tokenizer_class.from_pretrained("microsoft/layoutxlm-base")149_, _, boxes = self.get_question_words_and_boxes()150special_token = "[SPECIAL_TOKEN]"151tokenizer.add_special_tokens({"additional_special_tokens": [special_token]})152encoded_special_token = tokenizer.tokenize(special_token, boxes=boxes, add_special_tokens=False)153self.assertEqual(len(encoded_special_token), 1)154
155encoded_split_special_token = tokenizer.tokenize(156special_token, add_special_tokens=False, split_special_tokens=True, boxes=boxes157)158self.assertTrue(len(encoded_split_special_token) > 1)159
160@slow161def test_sequence_builders(self):162tokenizer = self.tokenizer_class.from_pretrained("microsoft/layoutxlm-base")163
164question, words, boxes = self.get_question_words_and_boxes()165
166text = tokenizer.encode(167question.split(),168boxes=[tokenizer.pad_token_box for _ in range(len(question.split()))],169add_special_tokens=False,170)171text_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)172
173encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)174
175assert encoded_pair == [0] + text + [2] + [2] + text_2 + [2]176
177def test_offsets_with_special_characters(self):178for tokenizer, pretrained_name, kwargs in self.tokenizers_list:179with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):180tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)181
182words, boxes = self.get_words_and_boxes()183words[1] = tokenizer_r.mask_token184tokens = tokenizer_r.encode_plus(185words,186boxes=boxes,187return_attention_mask=False,188return_token_type_ids=False,189return_offsets_mapping=True,190add_special_tokens=True,191)192
193expected_results = [194((0, 0), tokenizer_r.cls_token),195((0, 1), "▁a"),196((0, 6), tokenizer_r.mask_token),197((0, 4), "▁test"),198((0, 0), tokenizer_r.sep_token),199]200
201self.assertEqual(202[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])203)204self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])205
206def test_add_special_tokens(self):207tokenizers: List[LayoutXLMTokenizer] = self.get_tokenizers(do_lower_case=False)208for tokenizer in tokenizers:209with self.subTest(f"{tokenizer.__class__.__name__}"):210special_token = "[SPECIAL_TOKEN]"211special_token_box = [1000, 1000, 1000, 1000]212
213tokenizer.add_special_tokens({"cls_token": special_token})214encoded_special_token = tokenizer.encode(215[special_token], boxes=[special_token_box], add_special_tokens=False216)217self.assertEqual(len(encoded_special_token), 1)218
219decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)220self.assertTrue(special_token not in decoded)221
222def test_add_tokens_tokenizer(self):223tokenizers: List[LayoutXLMTokenizer] = self.get_tokenizers(do_lower_case=False)224for tokenizer in tokenizers:225with self.subTest(f"{tokenizer.__class__.__name__}"):226vocab_size = tokenizer.vocab_size227all_size = len(tokenizer)228
229self.assertNotEqual(vocab_size, 0)230
231# We usually have added tokens from the start in tests because our vocab fixtures are232# smaller than the original vocabs - let's not assert this233# self.assertEqual(vocab_size, all_size)234
235new_toks = ["aaaaa", "bbbbbb", "cccccccccdddddddd"]236added_toks = tokenizer.add_tokens(new_toks)237vocab_size_2 = tokenizer.vocab_size238all_size_2 = len(tokenizer)239
240self.assertNotEqual(vocab_size_2, 0)241self.assertEqual(vocab_size, vocab_size_2)242self.assertEqual(added_toks, len(new_toks))243self.assertEqual(all_size_2, all_size + len(new_toks))244
245words = "aaaaa bbbbbb low cccccccccdddddddd l".split()246boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]247
248tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)249
250self.assertGreaterEqual(len(tokens), 4)251self.assertGreater(tokens[0], tokenizer.vocab_size - 1)252self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)253
254new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}255added_toks_2 = tokenizer.add_special_tokens(new_toks_2)256vocab_size_3 = tokenizer.vocab_size257all_size_3 = len(tokenizer)258
259self.assertNotEqual(vocab_size_3, 0)260self.assertEqual(vocab_size, vocab_size_3)261self.assertEqual(added_toks_2, len(new_toks_2))262self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))263
264words = ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l".split()265boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]266
267tokens = tokenizer.encode(268words,269boxes=boxes,270add_special_tokens=False,271)272
273self.assertGreaterEqual(len(tokens), 6)274self.assertGreater(tokens[0], tokenizer.vocab_size - 1)275self.assertGreater(tokens[0], tokens[1])276self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)277self.assertGreater(tokens[-2], tokens[-3])278self.assertEqual(tokens[0], tokenizer.eos_token_id)279self.assertEqual(tokens[-2], tokenizer.pad_token_id)280
281@require_tokenizers282def test_encode_decode_with_spaces(self):283tokenizers = self.get_tokenizers(do_lower_case=False)284for tokenizer in tokenizers:285with self.subTest(f"{tokenizer.__class__.__name__}"):286words, boxes = self.get_words_and_boxes()287
288new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]289tokenizer.add_tokens(new_toks)290input = "[ABC][DEF][ABC][DEF]"291if self.space_between_special_tokens:292output = "[ABC] [DEF] [ABC] [DEF]"293else:294output = input295encoded = tokenizer.encode(input.split(), boxes=boxes, add_special_tokens=False)296decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)297self.assertIn(decoded, [output, output.lower()])298
299def test_encode_plus_with_padding(self):300tokenizers = self.get_tokenizers(do_lower_case=False)301for tokenizer in tokenizers:302with self.subTest(f"{tokenizer.__class__.__name__}"):303words, boxes = self.get_words_and_boxes()304
305# check correct behaviour if no pad_token_id exists and add it eventually306self._check_no_pad_token_padding(tokenizer, words)307
308padding_size = 10309padding_idx = tokenizer.pad_token_id310
311encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_special_tokens_mask=True)312input_ids = encoded_sequence["input_ids"]313special_tokens_mask = encoded_sequence["special_tokens_mask"]314sequence_length = len(input_ids)315
316# Test 'longest' and 'no_padding' don't do anything317tokenizer.padding_side = "right"318
319not_padded_sequence = tokenizer.encode_plus(320words,321boxes=boxes,322padding=False,323return_special_tokens_mask=True,324)325not_padded_input_ids = not_padded_sequence["input_ids"]326
327not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]328not_padded_sequence_length = len(not_padded_input_ids)329
330self.assertTrue(sequence_length == not_padded_sequence_length)331self.assertTrue(input_ids == not_padded_input_ids)332self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)333
334not_padded_sequence = tokenizer.encode_plus(335words,336boxes=boxes,337padding=False,338return_special_tokens_mask=True,339)340not_padded_input_ids = not_padded_sequence["input_ids"]341
342not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]343not_padded_sequence_length = len(not_padded_input_ids)344
345self.assertTrue(sequence_length == not_padded_sequence_length)346self.assertTrue(input_ids == not_padded_input_ids)347self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)348
349# Test right padding350tokenizer.padding_side = "right"351
352right_padded_sequence = tokenizer.encode_plus(353words,354boxes=boxes,355max_length=sequence_length + padding_size,356padding="max_length",357return_special_tokens_mask=True,358)359right_padded_input_ids = right_padded_sequence["input_ids"]360
361right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]362right_padded_sequence_length = len(right_padded_input_ids)363
364self.assertTrue(sequence_length + padding_size == right_padded_sequence_length)365self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids)366self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask)367
368# Test left padding369tokenizer.padding_side = "left"370left_padded_sequence = tokenizer.encode_plus(371words,372boxes=boxes,373max_length=sequence_length + padding_size,374padding="max_length",375return_special_tokens_mask=True,376)377left_padded_input_ids = left_padded_sequence["input_ids"]378left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]379left_padded_sequence_length = len(left_padded_input_ids)380
381self.assertTrue(sequence_length + padding_size == left_padded_sequence_length)382self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids)383self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask)384
385if "token_type_ids" in tokenizer.model_input_names:386token_type_ids = encoded_sequence["token_type_ids"]387left_padded_token_type_ids = left_padded_sequence["token_type_ids"]388right_padded_token_type_ids = right_padded_sequence["token_type_ids"]389
390assert token_type_ids + [0] * padding_size == right_padded_token_type_ids391assert [0] * padding_size + token_type_ids == left_padded_token_type_ids392
393if "attention_mask" in tokenizer.model_input_names:394attention_mask = encoded_sequence["attention_mask"]395right_padded_attention_mask = right_padded_sequence["attention_mask"]396left_padded_attention_mask = left_padded_sequence["attention_mask"]397
398self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask)399self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask)400
401def test_internal_consistency(self):402tokenizers = self.get_tokenizers()403for tokenizer in tokenizers:404with self.subTest(f"{tokenizer.__class__.__name__}"):405words, boxes = self.get_words_and_boxes()406
407tokens = []408for word in words:409tokens.extend(tokenizer.tokenize(word))410ids = tokenizer.convert_tokens_to_ids(tokens)411ids_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)412self.assertListEqual(ids, ids_2)413
414tokens_2 = tokenizer.convert_ids_to_tokens(ids)415self.assertNotEqual(len(tokens_2), 0)416text_2 = tokenizer.decode(ids)417self.assertIsInstance(text_2, str)418
419output_text = "a weirdly test"420self.assertEqual(text_2, output_text)421
422def test_mask_output(self):423tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)424for tokenizer in tokenizers:425with self.subTest(f"{tokenizer.__class__.__name__}"):426words, boxes = self.get_words_and_boxes()427
428if (429tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"430and "token_type_ids" in tokenizer.model_input_names431):432information = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)433sequences, mask = information["input_ids"], information["token_type_ids"]434self.assertEqual(len(sequences), len(mask))435
436def test_number_of_added_tokens(self):437tokenizers = self.get_tokenizers(do_lower_case=False)438for tokenizer in tokenizers:439with self.subTest(f"{tokenizer.__class__.__name__}"):440# test 1: single sequence441words, boxes = self.get_words_and_boxes()442
443sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)444attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)445
446# Method is implemented (e.g. not GPT-2)447if len(attached_sequences) != 2:448self.assertEqual(449tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences)450)451
452# test 2: two sequences453question, words, boxes = self.get_question_words_and_boxes()454
455sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False)456attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True)457
458# Method is implemented (e.g. not GPT-2)459if len(attached_sequences) != 2:460self.assertEqual(461tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)462)463
464def test_padding_to_max_length(self):465"""We keep this test for backward compatibility but it should be removed when `pad_to_max_length` will be deprecated"""466tokenizers = self.get_tokenizers(do_lower_case=False)467for tokenizer in tokenizers:468with self.subTest(f"{tokenizer.__class__.__name__}"):469words, boxes = self.get_words_and_boxes()470padding_size = 10471
472# check correct behaviour if no pad_token_id exists and add it eventually473self._check_no_pad_token_padding(tokenizer, words)474
475padding_idx = tokenizer.pad_token_id476
477# Check that it correctly pads when a maximum length is specified along with the padding flag set to True478tokenizer.padding_side = "right"479encoded_sequence = tokenizer.encode(words, boxes=boxes)480sequence_length = len(encoded_sequence)481# FIXME: the next line should be padding(max_length) to avoid warning482padded_sequence = tokenizer.encode(483words, boxes=boxes, max_length=sequence_length + padding_size, pad_to_max_length=True484)485padded_sequence_length = len(padded_sequence)486assert sequence_length + padding_size == padded_sequence_length487assert encoded_sequence + [padding_idx] * padding_size == padded_sequence488
489# Check that nothing is done when a maximum length is not specified490encoded_sequence = tokenizer.encode(words, boxes=boxes)491sequence_length = len(encoded_sequence)492
493tokenizer.padding_side = "right"494padded_sequence_right = tokenizer.encode(words, boxes=boxes, pad_to_max_length=True)495padded_sequence_right_length = len(padded_sequence_right)496assert sequence_length == padded_sequence_right_length497assert encoded_sequence == padded_sequence_right498
499def test_padding(self, max_length=50):500for tokenizer, pretrained_name, kwargs in self.tokenizers_list:501with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):502tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)503tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)504
505self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)506pad_token_id = tokenizer_p.pad_token_id507
508# Encode - Simple input509words, boxes = self.get_words_and_boxes()510input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)511input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)512self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)513input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, padding="max_length")514input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, padding="max_length")515self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)516
517input_r = tokenizer_r.encode(words, boxes=boxes, padding="longest")518input_p = tokenizer_p.encode(words, boxes=boxes, padding=True)519self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)520
521# Encode - Pair input522question, words, boxes = self.get_question_words_and_boxes()523input_r = tokenizer_r.encode(524question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True525)526input_p = tokenizer_p.encode(527question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True528)529self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)530input_r = tokenizer_r.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length")531input_p = tokenizer_p.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length")532self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)533input_r = tokenizer_r.encode(question, words, boxes=boxes, padding=True)534input_p = tokenizer_p.encode(question, words, boxes=boxes, padding="longest")535self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)536
537# Encode_plus - Simple input538words, boxes = self.get_words_and_boxes()539input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)540input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)541self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)542self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])543input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length")544input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length")545self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)546self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])547
548input_r = tokenizer_r.encode_plus(words, boxes=boxes, padding="longest")549input_p = tokenizer_p.encode_plus(words, boxes=boxes, padding=True)550self.assert_padded_input_match(551input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id552)553
554self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])555
556# Encode_plus - Pair input557question, words, boxes = self.get_question_words_and_boxes()558input_r = tokenizer_r.encode_plus(559question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True560)561input_p = tokenizer_p.encode_plus(562question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True563)564self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)565self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])566input_r = tokenizer_r.encode_plus(567question, words, boxes=boxes, max_length=max_length, padding="max_length"568)569input_p = tokenizer_p.encode_plus(570question, words, boxes=boxes, max_length=max_length, padding="max_length"571)572self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)573self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])574input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, padding="longest")575input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, padding=True)576self.assert_padded_input_match(577input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id578)579self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])580
581# Batch_encode_plus - Simple input582words, boxes = self.get_words_and_boxes_batch()583
584input_r = tokenizer_r.batch_encode_plus(585words,586boxes=boxes,587max_length=max_length,588pad_to_max_length=True,589)590input_p = tokenizer_p.batch_encode_plus(591words,592boxes=boxes,593max_length=max_length,594pad_to_max_length=True,595)596self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)597
598input_r = tokenizer_r.batch_encode_plus(599words,600boxes=boxes,601max_length=max_length,602padding="max_length",603)604input_p = tokenizer_p.batch_encode_plus(605words,606boxes=boxes,607max_length=max_length,608padding="max_length",609)610self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)611
612input_r = tokenizer_r.batch_encode_plus(613words,614boxes=boxes,615max_length=max_length,616padding="longest",617)618input_p = tokenizer_p.batch_encode_plus(619words,620boxes=boxes,621max_length=max_length,622padding=True,623)624self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)625
626input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, padding="longest")627input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, padding=True)628self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)629
630# Batch_encode_plus - Pair input631questions, words, boxes = self.get_question_words_and_boxes_batch()632
633input_r = tokenizer_r.batch_encode_plus(634list(zip(questions, words)),635is_pair=True,636boxes=boxes,637max_length=max_length,638truncation=True,639padding="max_length",640)641input_p = tokenizer_p.batch_encode_plus(642list(zip(questions, words)),643is_pair=True,644boxes=boxes,645max_length=max_length,646truncation=True,647padding="max_length",648)649self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)650
651input_r = tokenizer_r.batch_encode_plus(652list(zip(questions, words)),653is_pair=True,654boxes=boxes,655padding=True,656)657input_p = tokenizer_p.batch_encode_plus(658list(zip(questions, words)),659is_pair=True,660boxes=boxes,661padding="longest",662)663self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)664
665# Using pad on single examples after tokenization666words, boxes = self.get_words_and_boxes()667input_r = tokenizer_r.encode_plus(words, boxes=boxes)668input_r = tokenizer_r.pad(input_r)669
670input_p = tokenizer_r.encode_plus(words, boxes=boxes)671input_p = tokenizer_r.pad(input_p)672
673self.assert_padded_input_match(674input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id675)676
677# Using pad on single examples after tokenization678input_r = tokenizer_r.encode_plus(words, boxes=boxes)679input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")680
681input_p = tokenizer_r.encode_plus(words, boxes=boxes)682input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")683
684self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)685
686# Using pad after tokenization687words, boxes = self.get_words_and_boxes_batch()688input_r = tokenizer_r.batch_encode_plus(689words,690boxes=boxes,691)692input_r = tokenizer_r.pad(input_r)693
694input_p = tokenizer_r.batch_encode_plus(695words,696boxes=boxes,697)698input_p = tokenizer_r.pad(input_p)699
700self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)701
702# Using pad after tokenization703words, boxes = self.get_words_and_boxes_batch()704input_r = tokenizer_r.batch_encode_plus(705words,706boxes=boxes,707)708input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")709
710input_p = tokenizer_r.batch_encode_plus(711words,712boxes=boxes,713)714input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")715
716self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)717
718def test_padding_warning_message_fast_tokenizer(self):719if not self.test_rust_tokenizer:720return721
722words, boxes = self.get_words_and_boxes_batch()723
724tokenizer_fast = self.get_rust_tokenizer()725
726encoding_fast = tokenizer_fast(727words,728boxes=boxes,729)730
731with self.assertLogs("transformers", level="WARNING") as cm:732tokenizer_fast.pad(encoding_fast)733self.assertEqual(len(cm.records), 1)734self.assertIn(735"Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to"736" encode the text followed by a call to the `pad` method to get a padded encoding.",737cm.records[0].message,738)739
740if not self.test_slow_tokenizer:741return742
743tokenizer_slow = self.get_tokenizer()744
745encoding_slow = tokenizer_slow(746words,747boxes=boxes,748)749
750with self.assertLogs(level="WARNING") as cm:751# We want to assert there are no warnings, but the 'assertLogs' method does not support that.752# Therefore, we are adding a dummy warning, and then we will assert it is the only warning.753logger.warning("Dummy warning")754tokenizer_slow.pad(encoding_slow)755self.assertEqual(len(cm.records), 1)756self.assertIn(757"Dummy warning",758cm.records[0].message,759)760
761def test_call(self):762# Tests that all call wrap to encode_plus and batch_encode_plus763tokenizers = self.get_tokenizers(do_lower_case=False)764for tokenizer in tokenizers:765with self.subTest(f"{tokenizer.__class__.__name__}"):766# Test not batched767words, boxes = self.get_words_and_boxes()768encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)769encoded_sequences_2 = tokenizer(words, boxes=boxes)770self.assertEqual(encoded_sequences_1, encoded_sequences_2)771
772# Test not batched pairs773question, words, boxes = self.get_question_words_and_boxes()774encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)775encoded_sequences_2 = tokenizer(words, boxes=boxes)776self.assertEqual(encoded_sequences_1, encoded_sequences_2)777
778# Test batched779words, boxes = self.get_words_and_boxes_batch()780encoded_sequences_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes)781encoded_sequences_2 = tokenizer(words, boxes=boxes)782self.assertEqual(encoded_sequences_1, encoded_sequences_2)783
784def test_batch_encode_plus_batch_sequence_length(self):785# Tests that all encoded values have the correct size786tokenizers = self.get_tokenizers(do_lower_case=False)787for tokenizer in tokenizers:788with self.subTest(f"{tokenizer.__class__.__name__}"):789words, boxes = self.get_words_and_boxes_batch()790
791encoded_sequences = [792tokenizer.encode_plus(words_example, boxes=boxes_example)793for words_example, boxes_example in zip(words, boxes)794]795encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=False)796self.assertListEqual(797encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)798)799
800maximum_length = len(801max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)802)803
804# check correct behaviour if no pad_token_id exists and add it eventually805self._check_no_pad_token_padding(tokenizer, words)806
807encoded_sequences_padded = [808tokenizer.encode_plus(809words_example, boxes=boxes_example, max_length=maximum_length, padding="max_length"810)811for words_example, boxes_example in zip(words, boxes)812]813
814encoded_sequences_batch_padded = tokenizer.batch_encode_plus(815words, is_pair=False, boxes=boxes, padding=True816)817self.assertListEqual(818encoded_sequences_padded,819self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),820)821
822# check 'longest' is unsensitive to a max length823encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(824words, is_pair=False, boxes=boxes, padding=True825)826encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(827words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding="longest"828)829for key in encoded_sequences_batch_padded_1.keys():830self.assertListEqual(831encoded_sequences_batch_padded_1[key],832encoded_sequences_batch_padded_2[key],833)834
835# check 'no_padding' is unsensitive to a max length836encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(837words, is_pair=False, boxes=boxes, padding=False838)839encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(840words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding=False841)842for key in encoded_sequences_batch_padded_1.keys():843self.assertListEqual(844encoded_sequences_batch_padded_1[key],845encoded_sequences_batch_padded_2[key],846)847
848@unittest.skip("batch_encode_plus does not handle overflowing tokens.")849def test_batch_encode_plus_overflowing_tokens(self):850pass851
852def test_batch_encode_plus_padding(self):853# Test that padded sequences are equivalent between batch_encode_plus and encode_plus854
855# Right padding tests856tokenizers = self.get_tokenizers(do_lower_case=False)857for tokenizer in tokenizers:858with self.subTest(f"{tokenizer.__class__.__name__}"):859words, boxes = self.get_words_and_boxes_batch()860
861max_length = 100862
863# check correct behaviour if no pad_token_id exists and add it eventually864self._check_no_pad_token_padding(tokenizer, words)865
866encoded_sequences = [867tokenizer.encode_plus(868words_example, boxes=boxes_example, max_length=max_length, padding="max_length"869)870for words_example, boxes_example in zip(words, boxes)871]872encoded_sequences_batch = tokenizer.batch_encode_plus(873words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"874)875self.assertListEqual(876encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)877)878
879# Left padding tests880tokenizers = self.get_tokenizers(do_lower_case=False)881for tokenizer in tokenizers:882with self.subTest(f"{tokenizer.__class__.__name__}"):883tokenizer.padding_side = "left"884words, boxes = self.get_words_and_boxes_batch()885
886max_length = 100887
888# check correct behaviour if no pad_token_id exists and add it eventually889self._check_no_pad_token_padding(tokenizer, words)890
891encoded_sequences = [892tokenizer.encode_plus(893words_example, boxes=boxes_example, max_length=max_length, padding="max_length"894)895for words_example, boxes_example in zip(words, boxes)896]897encoded_sequences_batch = tokenizer.batch_encode_plus(898words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length"899)900self.assertListEqual(901encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)902)903
904def test_padding_to_multiple_of(self):905tokenizers = self.get_tokenizers()906for tokenizer in tokenizers:907with self.subTest(f"{tokenizer.__class__.__name__}"):908if tokenizer.pad_token is None:909self.skipTest("No padding token.")910else:911words, boxes = self.get_words_and_boxes()912
913# empty_tokens = tokenizer([""], [[]], padding=True, pad_to_multiple_of=8)914normal_tokens = tokenizer(words, boxes=boxes, padding=True, pad_to_multiple_of=8)915# for key, value in empty_tokens.items():916# self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")917for key, value in normal_tokens.items():918self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")919
920normal_tokens = tokenizer(words, boxes=boxes, pad_to_multiple_of=8)921for key, value in normal_tokens.items():922self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")923
924# Should also work with truncation925normal_tokens = tokenizer(words, boxes=boxes, padding=True, truncation=True, pad_to_multiple_of=8)926for key, value in normal_tokens.items():927self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")928
929# truncation to something which is not a multiple of pad_to_multiple_of raises an error930self.assertRaises(931ValueError,932tokenizer.__call__,933words,934boxes=boxes,935padding=True,936truncation=True,937max_length=12,938pad_to_multiple_of=8,939)940
941def test_tokenizer_slow_store_full_signature(self):942signature = inspect.signature(self.tokenizer_class.__init__)943tokenizer = self.get_tokenizer()944
945for parameter_name, parameter in signature.parameters.items():946if parameter.default != inspect.Parameter.empty:947self.assertIn(parameter_name, tokenizer.init_kwargs)948
949def test_build_inputs_with_special_tokens(self):950if not self.test_slow_tokenizer:951# as we don't have a slow version, we can't compare the outputs between slow and fast versions952return953
954for tokenizer, pretrained_name, kwargs in self.tokenizers_list:955with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):956tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)957tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)958
959# Input tokens id960words, boxes = self.get_words_and_boxes()961input_simple = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)962input_pair = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)963
964# Generate output965output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)966output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)967self.assertEqual(output_p, output_r)968
969# Generate pair output970output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)971output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)972self.assertEqual(output_p, output_r)973
974def test_special_tokens_mask_input_pairs(self):975tokenizers = self.get_tokenizers(do_lower_case=False)976for tokenizer in tokenizers:977with self.subTest(f"{tokenizer.__class__.__name__}"):978words, boxes = self.get_words_and_boxes()979encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)980encoded_sequence_dict = tokenizer.encode_plus(981words,982boxes=boxes,983add_special_tokens=True,984return_special_tokens_mask=True,985# add_prefix_space=False,986)987encoded_sequence_w_special = encoded_sequence_dict["input_ids"]988special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]989self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))990
991filtered_sequence = [992(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)993]994filtered_sequence = [x for x in filtered_sequence if x is not None]995self.assertEqual(encoded_sequence, filtered_sequence)996
997def test_special_tokens_mask(self):998tokenizers = self.get_tokenizers(do_lower_case=False)999for tokenizer in tokenizers:1000with self.subTest(f"{tokenizer.__class__.__name__}"):1001words, boxes = self.get_words_and_boxes()1002# Testing single inputs1003encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)1004encoded_sequence_dict = tokenizer.encode_plus(1005words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True1006)1007encoded_sequence_w_special = encoded_sequence_dict["input_ids"]1008special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]1009self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))1010
1011filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]1012self.assertEqual(encoded_sequence, filtered_sequence)1013
1014def test_save_and_load_tokenizer(self):1015# safety check on max_len default value so we are sure the test works1016tokenizers = self.get_tokenizers()1017for tokenizer in tokenizers:1018with self.subTest(f"{tokenizer.__class__.__name__}"):1019self.assertNotEqual(tokenizer.model_max_length, 42)1020
1021# Now let's start the test1022tokenizers = self.get_tokenizers()1023for tokenizer in tokenizers:1024with self.subTest(f"{tokenizer.__class__.__name__}"):1025# Isolate this from the other tests because we save additional tokens/etc1026words, boxes = self.get_words_and_boxes()1027tmpdirname = tempfile.mkdtemp()1028
1029before_tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)1030before_vocab = tokenizer.get_vocab()1031tokenizer.save_pretrained(tmpdirname)1032
1033after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)1034after_tokens = after_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)1035after_vocab = after_tokenizer.get_vocab()1036self.assertListEqual(before_tokens, after_tokens)1037self.assertDictEqual(before_vocab, after_vocab)1038
1039shutil.rmtree(tmpdirname)1040
1041@unittest.skip("Not implemented")1042def test_right_and_left_truncation(self):1043pass1044
1045def test_right_and_left_padding(self):1046tokenizers = self.get_tokenizers(do_lower_case=False)1047for tokenizer in tokenizers:1048with self.subTest(f"{tokenizer.__class__.__name__}"):1049words, boxes = self.get_words_and_boxes()1050sequence = "Sequence"1051padding_size = 101052
1053# check correct behaviour if no pad_token_id exists and add it eventually1054self._check_no_pad_token_padding(tokenizer, sequence)1055
1056padding_idx = tokenizer.pad_token_id1057
1058# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True1059tokenizer.padding_side = "right"1060encoded_sequence = tokenizer.encode(words, boxes=boxes)1061sequence_length = len(encoded_sequence)1062padded_sequence = tokenizer.encode(1063words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"1064)1065padded_sequence_length = len(padded_sequence)1066assert sequence_length + padding_size == padded_sequence_length1067assert encoded_sequence + [padding_idx] * padding_size == padded_sequence1068
1069# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True1070tokenizer.padding_side = "left"1071encoded_sequence = tokenizer.encode(words, boxes=boxes)1072sequence_length = len(encoded_sequence)1073padded_sequence = tokenizer.encode(1074words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length"1075)1076padded_sequence_length = len(padded_sequence)1077assert sequence_length + padding_size == padded_sequence_length1078assert [padding_idx] * padding_size + encoded_sequence == padded_sequence1079
1080# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'1081encoded_sequence = tokenizer.encode(words, boxes=boxes)1082sequence_length = len(encoded_sequence)1083
1084tokenizer.padding_side = "right"1085padded_sequence_right = tokenizer.encode(words, boxes=boxes, padding=True)1086padded_sequence_right_length = len(padded_sequence_right)1087assert sequence_length == padded_sequence_right_length1088assert encoded_sequence == padded_sequence_right1089
1090tokenizer.padding_side = "left"1091padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding="longest")1092padded_sequence_left_length = len(padded_sequence_left)1093assert sequence_length == padded_sequence_left_length1094assert encoded_sequence == padded_sequence_left1095
1096tokenizer.padding_side = "right"1097padded_sequence_right = tokenizer.encode(words, boxes=boxes)1098padded_sequence_right_length = len(padded_sequence_right)1099assert sequence_length == padded_sequence_right_length1100assert encoded_sequence == padded_sequence_right1101
1102tokenizer.padding_side = "left"1103padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding=False)1104padded_sequence_left_length = len(padded_sequence_left)1105assert sequence_length == padded_sequence_left_length1106assert encoded_sequence == padded_sequence_left1107
1108def test_token_type_ids(self):1109tokenizers = self.get_tokenizers()1110for tokenizer in tokenizers:1111with self.subTest(f"{tokenizer.__class__.__name__}"):1112# test 1: single sequence1113words, boxes = self.get_words_and_boxes()1114
1115output = tokenizer(words, boxes=boxes, return_token_type_ids=True)1116
1117# Assert that the token type IDs have the same length as the input IDs1118self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))1119
1120# Assert that the token type IDs have the same length as the attention mask1121self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))1122
1123self.assertIn(0, output["token_type_ids"])1124self.assertNotIn(1, output["token_type_ids"])1125
1126# test 2: two sequences (question + words)1127question, words, boxes = self.get_question_words_and_boxes()1128
1129output = tokenizer(question, words, boxes, return_token_type_ids=True)1130
1131# Assert that the token type IDs have the same length as the input IDs1132self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))1133
1134# Assert that the token type IDs have the same length as the attention mask1135self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))1136
1137self.assertIn(0, output["token_type_ids"])1138self.assertNotIn(1, output["token_type_ids"])1139
1140def test_offsets_mapping(self):1141for tokenizer, pretrained_name, kwargs in self.tokenizers_list:1142with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):1143tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)1144
1145text = ["a", "wonderful", "test"]1146boxes = [[1, 8, 12, 20] for _ in range(len(text))]1147
1148# No pair1149tokens_with_offsets = tokenizer_r.encode_plus(1150text,1151boxes=boxes,1152return_special_tokens_mask=True,1153return_offsets_mapping=True,1154add_special_tokens=True,1155)1156added_tokens = tokenizer_r.num_special_tokens_to_add(False)1157offsets = tokens_with_offsets["offset_mapping"]1158
1159# Assert there is the same number of tokens and offsets1160self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))1161
1162# Assert there is online added_tokens special_tokens1163self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)1164
1165# Pairs1166text = "what's his name"1167pair = ["a", "wonderful", "test"]1168boxes = [[1, 8, 12, 20] for _ in range(len(pair))]1169tokens_with_offsets = tokenizer_r.encode_plus(1170text,1171pair,1172boxes=boxes,1173return_special_tokens_mask=True,1174return_offsets_mapping=True,1175add_special_tokens=True,1176)1177added_tokens = tokenizer_r.num_special_tokens_to_add(True)1178offsets = tokens_with_offsets["offset_mapping"]1179
1180# Assert there is the same number of tokens and offsets1181self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))1182
1183# Assert there is online added_tokens special_tokens1184self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)1185
1186@require_torch1187@slow1188def test_torch_encode_plus_sent_to_model(self):1189import torch1190
1191from transformers import MODEL_MAPPING, TOKENIZER_MAPPING1192
1193MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)1194
1195tokenizers = self.get_tokenizers(do_lower_case=False)1196for tokenizer in tokenizers:1197with self.subTest(f"{tokenizer.__class__.__name__}"):1198if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:1199return1200
1201config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]1202config = config_class()1203
1204if config.is_encoder_decoder or config.pad_token_id is None:1205return1206
1207model = model_class(config)1208
1209# Make sure the model contains at least the full vocabulary size in its embedding matrix1210is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight")1211assert (1212(model.get_input_embeddings().weight.shape[0] >= len(tokenizer))1213if is_using_common_embeddings1214else True1215)1216
1217# Build sequence1218words, boxes = self.get_words_and_boxes()1219encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_tensors="pt")1220batch_encoded_sequence = tokenizer.batch_encode_plus(1221[words, words], [boxes, boxes], return_tensors="pt"1222)1223# This should not fail1224
1225with torch.no_grad(): # saves some time1226model(**encoded_sequence)1227model(**batch_encoded_sequence)1228
1229def test_rust_and_python_full_tokenizers(self):1230if not self.test_rust_tokenizer:1231return1232
1233if not self.test_slow_tokenizer:1234# as we don't have a slow version, we can't compare the outputs between slow and fast versions1235return1236
1237tokenizer = self.get_tokenizer()1238rust_tokenizer = self.get_rust_tokenizer()1239
1240words, boxes = self.get_words_and_boxes()1241
1242ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)1243rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)1244self.assertListEqual(ids, rust_ids)1245
1246ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)1247rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=True)1248self.assertListEqual(ids, rust_ids)1249
1250def test_tokenization_python_rust_equals(self):1251if not self.test_slow_tokenizer:1252# as we don't have a slow version, we can't compare the outputs between slow and fast versions1253return1254
1255for tokenizer, pretrained_name, kwargs in self.tokenizers_list:1256with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):1257tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)1258tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)1259
1260words, boxes = self.get_words_and_boxes()1261
1262# Ensure basic input match1263input_p = tokenizer_p.encode_plus(words, boxes=boxes)1264input_r = tokenizer_r.encode_plus(words, boxes=boxes)1265
1266for key in filter(1267lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()1268):1269self.assertSequenceEqual(input_p[key], input_r[key])1270
1271input_pairs_p = tokenizer_p.encode_plus(words, boxes=boxes)1272input_pairs_r = tokenizer_r.encode_plus(words, boxes=boxes)1273
1274for key in filter(1275lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()1276):1277self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])1278
1279words = ["hello" for _ in range(1000)]1280boxes = [[1000, 1000, 1000, 1000] for _ in range(1000)]1281
1282# Ensure truncation match1283input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=512, truncation=True)1284input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=512, truncation=True)1285
1286for key in filter(1287lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()1288):1289self.assertSequenceEqual(input_p[key], input_r[key])1290
1291# Ensure truncation with stride match1292input_p = tokenizer_p.encode_plus(1293words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True1294)1295input_r = tokenizer_r.encode_plus(1296words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True1297)1298
1299for key in filter(1300lambda x: x in ["input_ids", "token_type_ids", "attention_mask", "bbox"], input_p.keys()1301):1302self.assertSequenceEqual(input_p[key], input_r[key][0])1303
1304def test_embeded_special_tokens(self):1305if not self.test_slow_tokenizer:1306# as we don't have a slow version, we can't compare the outputs between slow and fast versions1307return1308
1309for tokenizer, pretrained_name, kwargs in self.tokenizers_list:1310with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):1311tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)1312tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)1313words, boxes = self.get_words_and_boxes()1314tokens_r = tokenizer_r.encode_plus(1315words,1316boxes=boxes,1317add_special_tokens=True,1318)1319tokens_p = tokenizer_p.encode_plus(1320words,1321boxes=boxes,1322add_special_tokens=True,1323)1324
1325for key in tokens_p.keys():1326self.assertEqual(tokens_r[key], tokens_p[key])1327
1328if "token_type_ids" in tokens_r:1329self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))1330
1331tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])1332tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])1333self.assertSequenceEqual(tokens_r, tokens_p)1334
1335def test_compare_add_special_tokens(self):1336for tokenizer, pretrained_name, kwargs in self.tokenizers_list:1337with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):1338tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)1339
1340simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)1341
1342words, boxes = self.get_words_and_boxes()1343# tokenize()1344no_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=False)1345with_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=True)1346self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)1347
1348# encode()1349no_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False)1350with_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=True)1351self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)1352
1353# encode_plus()1354no_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=False)1355with_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=True)1356for key in no_special_tokens.keys():1357self.assertEqual(1358len(no_special_tokens[key]),1359len(with_special_tokens[key]) - simple_num_special_tokens_to_add,1360)1361
1362# # batch_encode_plus1363words, boxes = self.get_words_and_boxes_batch()1364
1365no_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=False)1366with_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=True)1367for key in no_special_tokens.keys():1368for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):1369self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)1370
1371@slow1372def test_layoutxlm_truncation_integration_test(self):1373words, boxes = self.get_words_and_boxes()1374
1375tokenizer = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base", model_max_length=512)1376
1377for i in range(12, 512):1378new_encoded_inputs = tokenizer.encode(words, boxes=boxes, max_length=i, truncation=True)1379
1380# Ensure that the input IDs are less than the max length defined.1381self.assertLessEqual(len(new_encoded_inputs), i)1382
1383tokenizer.model_max_length = 201384new_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)1385dropped_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)1386
1387# Ensure that the input IDs are still truncated when no max_length is specified1388self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)1389self.assertLessEqual(len(new_encoded_inputs), 20)1390
1391@is_pt_tf_cross_test1392def test_batch_encode_plus_tensors(self):1393tokenizers = self.get_tokenizers(do_lower_case=False)1394for tokenizer in tokenizers:1395with self.subTest(f"{tokenizer.__class__.__name__}"):1396words, boxes = self.get_words_and_boxes_batch()1397
1398# A Tensor cannot be build by sequences which are not the same size1399self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="pt")1400self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="tf")1401
1402if tokenizer.pad_token_id is None:1403self.assertRaises(1404ValueError,1405tokenizer.batch_encode_plus,1406words,1407boxes=boxes,1408padding=True,1409return_tensors="pt",1410)1411self.assertRaises(1412ValueError,1413tokenizer.batch_encode_plus,1414words,1415boxes=boxes,1416padding="longest",1417return_tensors="tf",1418)1419else:1420pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors="pt")1421tensorflow_tensor = tokenizer.batch_encode_plus(1422words, boxes=boxes, padding="longest", return_tensors="tf"1423)1424encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True)1425
1426for key in encoded_sequences.keys():1427pytorch_value = pytorch_tensor[key].tolist()1428tensorflow_value = tensorflow_tensor[key].numpy().tolist()1429encoded_value = encoded_sequences[key]1430
1431self.assertEqual(pytorch_value, tensorflow_value, encoded_value)1432
1433def test_sequence_ids(self):1434tokenizers = self.get_tokenizers()1435for tokenizer in tokenizers:1436if not tokenizer.is_fast:1437continue1438with self.subTest(f"{tokenizer.__class__.__name__}"):1439seq_0 = "Test this method."1440seq_1 = ["With", "these", "inputs."]1441boxes = [[1000, 1000, 1000, 1000] for _ in range(len(seq_1))]1442
1443# We want to have sequence 0 and sequence 1 are tagged1444# respectively with 0 and 1 token_ids1445# (regardless of whether the model use token type ids)1446# We use this assumption in the QA pipeline among other place1447output = tokenizer(seq_0.split(), boxes=boxes)1448self.assertIn(0, output.sequence_ids())1449
1450output = tokenizer(seq_0, seq_1, boxes=boxes)1451self.assertIn(0, output.sequence_ids())1452self.assertIn(1, output.sequence_ids())1453
1454if tokenizer.num_special_tokens_to_add(pair=True):1455self.assertIn(None, output.sequence_ids())1456
1457def test_special_tokens_initialization(self):1458for tokenizer, pretrained_name, kwargs in self.tokenizers_list:1459with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):1460added_tokens = [AddedToken("<special>", lstrip=True)]1461
1462tokenizer_r = self.rust_tokenizer_class.from_pretrained(1463pretrained_name, additional_special_tokens=added_tokens, **kwargs1464)1465words = "Hey this is a <special> token".split()1466boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]1467r_output = tokenizer_r.encode(words, boxes=boxes)1468
1469special_token_id = tokenizer_r.encode(1470["<special>"], boxes=[1000, 1000, 1000, 1000], add_special_tokens=False1471)[0]1472
1473self.assertTrue(special_token_id in r_output)1474
1475if self.test_slow_tokenizer:1476tokenizer_cr = self.rust_tokenizer_class.from_pretrained(1477pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True1478)1479tokenizer_p = self.tokenizer_class.from_pretrained(1480pretrained_name, additional_special_tokens=added_tokens, **kwargs1481)1482
1483words = "Hey this is a <special> token".split()1484boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]1485
1486p_output = tokenizer_p.encode(words, boxes=boxes)1487cr_output = tokenizer_cr.encode(words, boxes=boxes)1488
1489self.assertEqual(p_output, r_output)1490self.assertEqual(cr_output, r_output)1491self.assertTrue(special_token_id in p_output)1492self.assertTrue(special_token_id in cr_output)1493
1494def test_training_new_tokenizer(self):1495# This feature only exists for fast tokenizers1496if not self.test_rust_tokenizer:1497return1498
1499tokenizer = self.get_rust_tokenizer()1500new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)1501
1502# Test we can use the new tokenizer with something not seen during training1503text = [["this", "is", "the"], ["how", "are", "you"]]1504boxes = [[[1, 2, 3, 4], [5, 6, 7, 8], [1, 3, 4, 8]], [[5, 6, 7, 8], [4, 5, 6, 7], [3, 9, 2, 7]]]1505inputs = new_tokenizer(text, boxes=boxes)1506self.assertEqual(len(inputs["input_ids"]), 2)1507decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)1508expected_result = "this is the"1509
1510if tokenizer.backend_tokenizer.normalizer is not None:1511expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)1512self.assertEqual(expected_result, decoded_input)1513
1514# We check that the parameters of the tokenizer remained the same1515# Check we have the same number of added_tokens for both pair and non-pair inputs.1516self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))1517self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))1518
1519# Check we have the correct max_length for both pair and non-pair inputs.1520self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)1521self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)1522
1523# Assert the set of special tokens match as we didn't ask to change them1524self.assertSequenceEqual(1525tokenizer.all_special_tokens_extended,1526new_tokenizer.all_special_tokens_extended,1527)1528
1529self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)1530
1531def test_training_new_tokenizer_with_special_tokens_change(self):1532# This feature only exists for fast tokenizers1533if not self.test_rust_tokenizer:1534return1535
1536tokenizer = self.get_rust_tokenizer()1537# Test with a special tokens map1538class_signature = inspect.signature(tokenizer.__class__)1539if "cls_token" in class_signature.parameters:1540new_tokenizer = tokenizer.train_new_from_iterator(1541SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"}1542)1543cls_id = new_tokenizer.get_vocab()["<cls>"]1544self.assertEqual(new_tokenizer.cls_token, "<cls>")1545self.assertEqual(new_tokenizer.cls_token_id, cls_id)1546
1547# Create a new mapping from the special tokens defined in the original tokenizer1548special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()1549special_tokens_list.remove("additional_special_tokens")1550special_tokens_map = {}1551for token in special_tokens_list:1552# Get the private one to avoid unnecessary warnings.1553if getattr(tokenizer, f"_{token}") is not None:1554special_token = getattr(tokenizer, token)1555special_tokens_map[special_token] = f"{special_token}a"1556
1557# Train new tokenizer1558new_tokenizer = tokenizer.train_new_from_iterator(1559SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map1560)1561
1562# Check the changes1563for token in special_tokens_list:1564# Get the private one to avoid unnecessary warnings.1565if getattr(tokenizer, f"_{token}") is None:1566continue1567special_token = getattr(tokenizer, token)1568if special_token in special_tokens_map:1569new_special_token = getattr(new_tokenizer, token)1570self.assertEqual(special_tokens_map[special_token], new_special_token)1571
1572new_id = new_tokenizer.get_vocab()[new_special_token]1573self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id)1574
1575# Check if the AddedToken / string format has been kept1576for special_token in tokenizer.all_special_tokens_extended:1577if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map:1578# The special token must appear identically in the list of the new tokenizer.1579self.assertTrue(1580special_token in new_tokenizer.all_special_tokens_extended,1581f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",1582)1583elif isinstance(special_token, AddedToken):1584# The special token must appear in the list of the new tokenizer as an object of type AddedToken with1585# the same parameters as the old AddedToken except the content that the user has requested to change.1586special_token_str = special_token.content1587new_special_token_str = special_tokens_map[special_token_str]1588
1589find = False1590for candidate in new_tokenizer.all_special_tokens_extended:1591if (1592isinstance(candidate, AddedToken)1593and candidate.content == new_special_token_str1594and candidate.lstrip == special_token.lstrip1595and candidate.rstrip == special_token.rstrip1596and candidate.normalized == special_token.normalized1597and candidate.single_word == special_token.single_word1598):1599find = True1600break1601self.assertTrue(1602find,1603f"'{new_special_token_str}' doesn't appear in the list "1604f"'{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as "1605f"'{special_token}' in the list {tokenizer.all_special_tokens_extended}",1606)1607elif special_token not in special_tokens_map:1608# The special token must appear identically in the list of the new tokenizer.1609self.assertTrue(1610special_token in new_tokenizer.all_special_tokens_extended,1611f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",1612)1613
1614else:1615# The special token must appear in the list of the new tokenizer as an object of type string.1616self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended)1617
1618# Test we can use the new tokenizer with something not seen during training1619words = [["this", "is"], ["hello", "🤗"]]1620boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[1, 2, 3, 4], [5, 6, 7, 8]]]1621inputs = new_tokenizer(words, boxes=boxes)1622self.assertEqual(len(inputs["input_ids"]), 2)1623decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)1624expected_result = "this is"1625
1626if tokenizer.backend_tokenizer.normalizer is not None:1627expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)1628self.assertEqual(expected_result, decoded_input)1629
1630def test_prepare_for_model(self):1631tokenizers = self.get_tokenizers(do_lower_case=False)1632for tokenizer in tokenizers:1633# only test prepare_for_model for the slow tokenizer1634if tokenizer.__class__.__name__ == "LayoutXLMTokenizerFast":1635continue1636with self.subTest(f"{tokenizer.__class__.__name__}"):1637words, boxes = self.get_words_and_boxes()1638prepared_input_dict = tokenizer.prepare_for_model(words, boxes=boxes, add_special_tokens=True)1639
1640input_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)1641
1642self.assertEqual(input_dict, prepared_input_dict)1643
1644def test_padding_different_model_input_name(self):1645if not self.test_slow_tokenizer:1646# as we don't have a slow version, we can't compare the outputs between slow and fast versions1647return1648
1649for tokenizer, pretrained_name, kwargs in self.tokenizers_list:1650with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):1651tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)1652tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)1653self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)1654pad_token_id = tokenizer_p.pad_token_id1655
1656words, boxes = self.get_words_and_boxes_batch()1657
1658input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes)1659input_p = tokenizer_r.batch_encode_plus(words, boxes=boxes)1660
1661# rename encoded batch to "inputs"1662input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]]1663del input_r[tokenizer_r.model_input_names[0]]1664
1665input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]]1666del input_p[tokenizer_p.model_input_names[0]]1667
1668# Renaming `input_ids` to `inputs`1669tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:]1670tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:]1671
1672input_r = tokenizer_r.pad(input_r, padding="longest")1673input_p = tokenizer_r.pad(input_p, padding="longest")1674
1675max_length = len(input_p["inputs"][0])1676self.assert_batch_padded_input_match(1677input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs"1678)1679
1680def test_batch_encode_dynamic_overflowing(self):1681"""1682When calling batch_encode with multiple sequences, it can return different number of
1683overflowing encoding for each sequence:
1684[
1685Sequence 1: [Encoding 1, Encoding 2],
1686Sequence 2: [Encoding 1],
1687Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
1688]
1689This needs to be padded so that it can represented as a tensor
1690"""
1691for tokenizer, pretrained_name, kwargs in self.tokenizers_list:1692tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)1693
1694with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):1695if is_torch_available():1696returned_tensor = "pt"1697elif is_tf_available():1698returned_tensor = "tf"1699else:1700returned_tensor = "jax"1701
1702# Single example1703words, boxes = self.get_words_and_boxes()1704tokens = tokenizer.encode_plus(1705words,1706boxes=boxes,1707max_length=6,1708padding=True,1709truncation=True,1710return_tensors=returned_tensor,1711return_overflowing_tokens=True,1712)1713
1714for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):1715if key != "bbox":1716self.assertEqual(len(tokens[key].shape), 2)1717else:1718self.assertEqual(len(tokens[key].shape), 3)1719
1720# Batch of examples1721# For these 2 examples, 3 training examples will be created1722words, boxes = self.get_words_and_boxes_batch()1723tokens = tokenizer.batch_encode_plus(1724words,1725boxes=boxes,1726max_length=6,1727padding=True,1728truncation="only_first",1729return_tensors=returned_tensor,1730return_overflowing_tokens=True,1731)1732
1733for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):1734if key != "bbox":1735self.assertEqual(len(tokens[key].shape), 2)1736self.assertEqual(tokens[key].shape[-1], 6)1737else:1738self.assertEqual(len(tokens[key].shape), 3)1739self.assertEqual(tokens[key].shape[-1], 4)1740
1741# overwrite from test_tokenization_common to speed up test1742def test_save_pretrained(self):1743if not self.test_slow_tokenizer:1744# as we don't have a slow version, we can't compare the outputs between slow and fast versions1745return1746
1747self.tokenizers_list[0] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-layoutxlm", {})1748for tokenizer, pretrained_name, kwargs in self.tokenizers_list:1749with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):1750tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)1751tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)1752
1753tmpdirname2 = tempfile.mkdtemp()1754
1755tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2)1756tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)1757
1758# Checks it save with the same files + the tokenizer.json file for the fast one1759self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))1760tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)1761self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)1762
1763# Checks everything loads correctly in the same way1764tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)1765tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)1766
1767# Check special tokens are set accordingly on Rust and Python1768for key in tokenizer_pp.special_tokens_map:1769self.assertTrue(hasattr(tokenizer_rp, key))1770# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))1771# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))1772
1773shutil.rmtree(tmpdirname2)1774
1775# Save tokenizer rust, legacy_format=True1776tmpdirname2 = tempfile.mkdtemp()1777
1778tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True)1779tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)1780
1781# Checks it save with the same files1782self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)1783
1784# Checks everything loads correctly in the same way1785tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)1786tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)1787
1788# Check special tokens are set accordingly on Rust and Python1789for key in tokenizer_pp.special_tokens_map:1790self.assertTrue(hasattr(tokenizer_rp, key))1791
1792shutil.rmtree(tmpdirname2)1793
1794# Save tokenizer rust, legacy_format=False1795tmpdirname2 = tempfile.mkdtemp()1796
1797tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False)1798tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)1799
1800# Checks it saved the tokenizer.json file1801self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))1802
1803# Checks everything loads correctly in the same way1804tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)1805tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)1806
1807# Check special tokens are set accordingly on Rust and Python1808for key in tokenizer_pp.special_tokens_map:1809self.assertTrue(hasattr(tokenizer_rp, key))1810
1811shutil.rmtree(tmpdirname2)1812
1813@unittest.skip("TO DO: overwrite this very extensive test.")1814def test_alignement_methods(self):1815pass1816
1817@unittest.skip("layoutxlm tokenizer requires boxes besides sequences.")1818def test_maximum_encoding_length_pair_input(self):1819pass1820
1821@unittest.skip("layoutxlm tokenizer requires boxes besides sequences.")1822def test_maximum_encoding_length_single_input(self):1823pass1824
1825@unittest.skip("layoutxlm tokenizer requires boxes besides sequences.")1826def test_pretokenized_inputs(self):1827pass1828
1829@unittest.skip("layoutxlm tokenizer always expects pretokenized inputs.")1830def test_compare_pretokenized_inputs(self):1831pass1832
1833@unittest.skip("layoutxlm fast tokenizer does not support prepare_for_model")1834def test_compare_prepare_for_model(self):1835pass1836
1837@slow1838def test_only_label_first_subword(self):1839words = ["hello", "niels"]1840boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]1841word_labels = [0, 1]1842
1843# test slow tokenizer1844tokenizer_p = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base")1845encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)1846self.assertListEqual(encoding.labels, [-100, 0, -100, 1, -100, -100])1847
1848tokenizer_p = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base", only_label_first_subword=False)1849encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)1850self.assertListEqual(encoding.labels, [-100, 0, 0, 1, 1, -100])1851
1852# test fast tokenizer1853tokenizer_r = LayoutXLMTokenizerFast.from_pretrained("microsoft/layoutxlm-base")1854encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)1855self.assertListEqual(encoding.labels, [-100, 0, -100, 1, -100, -100])1856
1857tokenizer_r = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base", only_label_first_subword=False)1858encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)1859self.assertListEqual(encoding.labels, [-100, 0, 0, 1, 1, -100])1860
1861@slow1862def test_layoutxlm_integration_test(self):1863tokenizer_p = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base")1864tokenizer_r = LayoutXLMTokenizerFast.from_pretrained("microsoft/layoutxlm-base")1865
1866# There are 3 cases:1867# CASE 1: document image classification (training + inference), document image token classification (inference),1868# in which case only words and normalized bounding boxes are provided to the tokenizer1869# CASE 2: document image token classification (training),1870# in which case one also provides word labels to the tokenizer1871# CASE 3: document image visual question answering (inference),1872# in which case one also provides a question to the tokenizer1873
1874# We need to test all 3 cases both on batched and non-batched inputs.1875
1876# CASE 1: not batched1877words, boxes = self.get_words_and_boxes()1878
1879expected_results = {'input_ids': [0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip1880
1881encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20)1882encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20)1883self.assertDictEqual(dict(encoding_p), expected_results)1884self.assertDictEqual(dict(encoding_r), expected_results)1885
1886# CASE 1: batched1887words, boxes = self.get_words_and_boxes_batch()1888
1889expected_results = {'input_ids': [[0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 33600, 31, 759, 9351, 83, 21895, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip1890
1891encoding_p = tokenizer_p(words, boxes=boxes, padding="max_length", max_length=20)1892encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20)1893self.assertDictEqual(dict(encoding_p), expected_results)1894self.assertDictEqual(dict(encoding_r), expected_results)1895
1896# CASE 2: not batched1897words, boxes = self.get_words_and_boxes()1898word_labels = [1, 2, 3]1899
1900expected_results = {'input_ids': [0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip1901
1902encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)1903encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)1904self.assertDictEqual(dict(encoding_p), expected_results)1905self.assertDictEqual(dict(encoding_r), expected_results)1906
1907# CASE 2: batched1908words, boxes = self.get_words_and_boxes_batch()1909word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]]1910
1911expected_results = {'input_ids': [[0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 33600, 31, 759, 9351, 83, 21895, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, -100, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip1912
1913encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)1914encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20)1915self.assertDictEqual(dict(encoding_p), expected_results)1916self.assertDictEqual(dict(encoding_r), expected_results)1917
1918# CASE 3: not batched1919question, words, boxes = self.get_question_words_and_boxes()1920
1921expected_results = {'input_ids': [0, 2367, 25, 7, 1919, 9351, 32, 2, 2, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]} # fmt: skip1922
1923encoding_p = tokenizer_p(question, words, boxes, padding="max_length", max_length=20)1924encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20)1925self.assertDictEqual(dict(encoding_p), expected_results)1926self.assertDictEqual(dict(encoding_r), expected_results)1927
1928# CASE 3: batched1929questions, words, boxes = self.get_question_words_and_boxes_batch()1930
1931expected_results = {'input_ids': [[0, 2367, 25, 7, 1919, 9351, 32, 2, 2, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1], [0, 3642, 83, 764, 35839, 32, 2, 2, 2367, 10, 21, 3190, 53496, 19, 2, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]} # fmt: skip1932encoding_p = tokenizer_p(questions, words, boxes, padding="max_length", max_length=20)1933encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20)1934self.assertDictEqual(dict(encoding_p), expected_results)1935self.assertDictEqual(dict(encoding_r), expected_results)1936
1937@unittest.skip("Doesn't support another framework than PyTorch")1938def test_np_encode_plus_sent_to_model(self):1939pass1940
1941@unittest.skip("Doesn't use SentencePiece")1942def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):1943pass1944
1945@unittest.skip("Doesn't use SentencePiece")1946def test_sentencepiece_tokenize_and_decode(self):1947pass1948
1949@unittest.skip("Chat is not supported")1950def test_chat_template(self):1951pass1952