transformers
213 строк · 11.1 Кб
1# coding=utf-8
2# Copyright 2018 HuggingFace Inc. team.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16import tempfile
17import unittest
18
19from transformers import AddedToken, CamembertTokenizer, CamembertTokenizerFast
20from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
21from transformers.utils import is_torch_available
22
23from ...test_tokenization_common import TokenizerTesterMixin
24
25
26SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
27SAMPLE_BPE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
28
29FRAMEWORK = "pt" if is_torch_available() else "tf"
30
31
32@require_sentencepiece
33@require_tokenizers
34class CamembertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
35tokenizer_class = CamembertTokenizer
36rust_tokenizer_class = CamembertTokenizerFast
37test_rust_tokenizer = True
38test_sentencepiece = True
39
40def setUp(self):
41super().setUp()
42
43# We have a SentencePiece fixture for testing
44tokenizer = CamembertTokenizer(SAMPLE_VOCAB)
45tokenizer.save_pretrained(self.tmpdirname)
46
47@unittest.skip(
48"Token maps are not equal because someone set the probability of ('<unk>NOTUSED', -100), so it's never encoded for fast"
49)
50def test_special_tokens_map_equal(self):
51return
52
53def test_convert_token_and_id(self):
54"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
55token = "<pad>"
56token_id = 1 # 1 is the offset id, but in the spm vocab it's 3
57
58self.assertEqual(self.get_tokenizer().convert_tokens_to_ids(token), token_id)
59self.assertEqual(self.get_tokenizer().convert_ids_to_tokens(token_id), token)
60
61def test_get_vocab(self):
62vocab_keys = list(self.get_tokenizer().get_vocab().keys())
63
64self.assertEqual(vocab_keys[0], "<s>NOTUSED")
65self.assertEqual(vocab_keys[1], "<pad>")
66self.assertEqual(vocab_keys[-1], "<mask>")
67self.assertEqual(len(vocab_keys), 1_005)
68
69def test_vocab_size(self):
70self.assertEqual(self.get_tokenizer().vocab_size, 1_000)
71
72def test_rust_and_python_bpe_tokenizers(self):
73tokenizer = CamembertTokenizer(SAMPLE_BPE_VOCAB)
74tokenizer.save_pretrained(self.tmpdirname)
75rust_tokenizer = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
76
77sequence = "I was born in 92000, and this is falsé."
78
79ids = tokenizer.encode(sequence)
80rust_ids = rust_tokenizer.encode(sequence)
81self.assertListEqual(ids, rust_ids)
82
83ids = tokenizer.encode(sequence, add_special_tokens=False)
84rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
85self.assertListEqual(ids, rust_ids)
86
87# <unk> tokens are not the same for `rust` than for `slow`.
88# Because spm gives back raw token instead of `unk` in EncodeAsPieces
89# tokens = tokenizer.tokenize(sequence)
90tokens = tokenizer.convert_ids_to_tokens(ids)
91rust_tokens = rust_tokenizer.tokenize(sequence)
92self.assertListEqual(tokens, rust_tokens)
93
94def test_rust_and_python_full_tokenizers(self):
95if not self.test_rust_tokenizer:
96return
97
98tokenizer = self.get_tokenizer()
99rust_tokenizer = self.get_rust_tokenizer()
100
101sequence = "I was born in 92000, and this is falsé."
102
103tokens = tokenizer.tokenize(sequence)
104rust_tokens = rust_tokenizer.tokenize(sequence)
105self.assertListEqual(tokens, rust_tokens)
106
107ids = tokenizer.encode(sequence, add_special_tokens=False)
108rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
109self.assertListEqual(ids, rust_ids)
110
111rust_tokenizer = self.get_rust_tokenizer()
112ids = tokenizer.encode(sequence)
113rust_ids = rust_tokenizer.encode(sequence)
114self.assertListEqual(ids, rust_ids)
115
116@slow
117def test_tokenizer_integration(self):
118expected_encoding = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # fmt: skip
119
120# camembert is a french model. So we also use french texts.
121sequences = [
122"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
123"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
124"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
125"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
126"telles que la traduction et la synthèse de texte.",
127]
128
129self.tokenizer_integration_test_util(
130expected_encoding=expected_encoding,
131model_name="almanach/camembert-base",
132revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf",
133sequences=sequences,
134)
135
136# Overwritten because we have to use from slow (online pretrained is wrong, the tokenizer.json has a whole)
137def test_added_tokens_serialization(self):
138self.maxDiff = None
139
140# Utility to test the added vocab
141def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir):
142tokenizer = tokenizer_class.from_pretrained(temp_dir)
143self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens)
144self.assertIn(new_eos, tokenizer.added_tokens_decoder.values())
145self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos)
146self.assertDictEqual(expected, tokenizer.added_tokens_decoder)
147return tokenizer
148
149new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False)
150for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
151with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
152# Load a slow tokenizer from the hub, init with the new token for fast to also include it
153tokenizer = self.tokenizer_class.from_pretrained(pretrained_name, eos_token=new_eos)
154EXPECTED_ADDED_TOKENS_DECODER = tokenizer.added_tokens_decoder
155with self.subTest("Hub -> Slow: Test loading a slow tokenizer from the hub)"):
156self.assertEqual(tokenizer._eos_token, new_eos)
157self.assertIn(new_eos, list(tokenizer.added_tokens_decoder.values()))
158
159with tempfile.TemporaryDirectory() as tmp_dir_2:
160tokenizer.save_pretrained(tmp_dir_2)
161with self.subTest(
162"Hub -> Slow -> Slow: Test saving this slow tokenizer and reloading it in the fast class"
163):
164_test_added_vocab_and_eos(
165EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_2
166)
167
168if self.rust_tokenizer_class is not None:
169with self.subTest(
170"Hub -> Slow -> Fast: Test saving this slow tokenizer and reloading it in the fast class"
171):
172tokenizer_fast = _test_added_vocab_and_eos(
173EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_2
174)
175with tempfile.TemporaryDirectory() as tmp_dir_3:
176tokenizer_fast.save_pretrained(tmp_dir_3)
177with self.subTest(
178"Hub -> Slow -> Fast -> Fast: Test saving this fast tokenizer and reloading it in the fast class"
179):
180_test_added_vocab_and_eos(
181EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3
182)
183
184with self.subTest(
185"Hub -> Slow -> Fast -> Slow: Test saving this slow tokenizer and reloading it in the slow class"
186):
187_test_added_vocab_and_eos(
188EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3
189)
190
191with self.subTest("Hub -> Fast: Test loading a fast tokenizer from the hub)"):
192if self.rust_tokenizer_class is not None:
193tokenizer_fast = self.rust_tokenizer_class.from_pretrained(
194pretrained_name, eos_token=new_eos, from_slow=True
195)
196self.assertEqual(tokenizer_fast._eos_token, new_eos)
197self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values()))
198# We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright
199with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"):
200self.assertDictEqual(EXPECTED_ADDED_TOKENS_DECODER, tokenizer_fast.added_tokens_decoder)
201
202EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder
203with tempfile.TemporaryDirectory() as tmp_dir_4:
204tokenizer_fast.save_pretrained(tmp_dir_4)
205with self.subTest("Hub -> Fast -> Fast: saving Fast1 locally and loading"):
206_test_added_vocab_and_eos(
207EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_4
208)
209
210with self.subTest("Hub -> Fast -> Slow: saving Fast1 locally and loading"):
211_test_added_vocab_and_eos(
212EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_4
213)
214