transformers

Форк
0
/
test_tokenization_camembert.py 
213 строк · 11.1 Кб
1
# coding=utf-8
2
# Copyright 2018 HuggingFace Inc. team.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16
import tempfile
17
import unittest
18

19
from transformers import AddedToken, CamembertTokenizer, CamembertTokenizerFast
20
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
21
from transformers.utils import is_torch_available
22

23
from ...test_tokenization_common import TokenizerTesterMixin
24

25

26
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
27
SAMPLE_BPE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
28

29
FRAMEWORK = "pt" if is_torch_available() else "tf"
30

31

32
@require_sentencepiece
33
@require_tokenizers
34
class CamembertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
35
    tokenizer_class = CamembertTokenizer
36
    rust_tokenizer_class = CamembertTokenizerFast
37
    test_rust_tokenizer = True
38
    test_sentencepiece = True
39

40
    def setUp(self):
41
        super().setUp()
42

43
        # We have a SentencePiece fixture for testing
44
        tokenizer = CamembertTokenizer(SAMPLE_VOCAB)
45
        tokenizer.save_pretrained(self.tmpdirname)
46

47
    @unittest.skip(
48
        "Token maps are not equal because someone set the probability of ('<unk>NOTUSED', -100), so it's never encoded for fast"
49
    )
50
    def test_special_tokens_map_equal(self):
51
        return
52

53
    def test_convert_token_and_id(self):
54
        """Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
55
        token = "<pad>"
56
        token_id = 1  # 1 is the offset id, but in the spm vocab it's 3
57

58
        self.assertEqual(self.get_tokenizer().convert_tokens_to_ids(token), token_id)
59
        self.assertEqual(self.get_tokenizer().convert_ids_to_tokens(token_id), token)
60

61
    def test_get_vocab(self):
62
        vocab_keys = list(self.get_tokenizer().get_vocab().keys())
63

64
        self.assertEqual(vocab_keys[0], "<s>NOTUSED")
65
        self.assertEqual(vocab_keys[1], "<pad>")
66
        self.assertEqual(vocab_keys[-1], "<mask>")
67
        self.assertEqual(len(vocab_keys), 1_005)
68

69
    def test_vocab_size(self):
70
        self.assertEqual(self.get_tokenizer().vocab_size, 1_000)
71

72
    def test_rust_and_python_bpe_tokenizers(self):
73
        tokenizer = CamembertTokenizer(SAMPLE_BPE_VOCAB)
74
        tokenizer.save_pretrained(self.tmpdirname)
75
        rust_tokenizer = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
76

77
        sequence = "I was born in 92000, and this is falsé."
78

79
        ids = tokenizer.encode(sequence)
80
        rust_ids = rust_tokenizer.encode(sequence)
81
        self.assertListEqual(ids, rust_ids)
82

83
        ids = tokenizer.encode(sequence, add_special_tokens=False)
84
        rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
85
        self.assertListEqual(ids, rust_ids)
86

87
        # <unk> tokens are not the same for `rust` than for `slow`.
88
        # Because spm gives back raw token instead of `unk` in EncodeAsPieces
89
        # tokens = tokenizer.tokenize(sequence)
90
        tokens = tokenizer.convert_ids_to_tokens(ids)
91
        rust_tokens = rust_tokenizer.tokenize(sequence)
92
        self.assertListEqual(tokens, rust_tokens)
93

94
    def test_rust_and_python_full_tokenizers(self):
95
        if not self.test_rust_tokenizer:
96
            return
97

98
        tokenizer = self.get_tokenizer()
99
        rust_tokenizer = self.get_rust_tokenizer()
100

101
        sequence = "I was born in 92000, and this is falsé."
102

103
        tokens = tokenizer.tokenize(sequence)
104
        rust_tokens = rust_tokenizer.tokenize(sequence)
105
        self.assertListEqual(tokens, rust_tokens)
106

107
        ids = tokenizer.encode(sequence, add_special_tokens=False)
108
        rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
109
        self.assertListEqual(ids, rust_ids)
110

111
        rust_tokenizer = self.get_rust_tokenizer()
112
        ids = tokenizer.encode(sequence)
113
        rust_ids = rust_tokenizer.encode(sequence)
114
        self.assertListEqual(ids, rust_ids)
115

116
    @slow
117
    def test_tokenizer_integration(self):
118
        expected_encoding = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}  # fmt: skip
119

120
        # camembert is a french model. So we also use french texts.
121
        sequences = [
122
            "Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
123
            "utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
124
            "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
125
            "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
126
            "telles que la traduction et la synthèse de texte.",
127
        ]
128

129
        self.tokenizer_integration_test_util(
130
            expected_encoding=expected_encoding,
131
            model_name="almanach/camembert-base",
132
            revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf",
133
            sequences=sequences,
134
        )
135

136
    # Overwritten because we have to use from slow (online pretrained is wrong, the tokenizer.json has a whole)
137
    def test_added_tokens_serialization(self):
138
        self.maxDiff = None
139

140
        # Utility to test the added vocab
141
        def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir):
142
            tokenizer = tokenizer_class.from_pretrained(temp_dir)
143
            self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens)
144
            self.assertIn(new_eos, tokenizer.added_tokens_decoder.values())
145
            self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos)
146
            self.assertDictEqual(expected, tokenizer.added_tokens_decoder)
147
            return tokenizer
148

149
        new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False)
150
        for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
151
            with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
152
                # Load a slow tokenizer from the hub, init with the new token for fast to also include it
153
                tokenizer = self.tokenizer_class.from_pretrained(pretrained_name, eos_token=new_eos)
154
                EXPECTED_ADDED_TOKENS_DECODER = tokenizer.added_tokens_decoder
155
                with self.subTest("Hub -> Slow: Test loading a slow tokenizer from the hub)"):
156
                    self.assertEqual(tokenizer._eos_token, new_eos)
157
                    self.assertIn(new_eos, list(tokenizer.added_tokens_decoder.values()))
158

159
                with tempfile.TemporaryDirectory() as tmp_dir_2:
160
                    tokenizer.save_pretrained(tmp_dir_2)
161
                    with self.subTest(
162
                        "Hub -> Slow -> Slow: Test saving this slow tokenizer and reloading it in the fast class"
163
                    ):
164
                        _test_added_vocab_and_eos(
165
                            EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_2
166
                        )
167

168
                    if self.rust_tokenizer_class is not None:
169
                        with self.subTest(
170
                            "Hub -> Slow -> Fast: Test saving this slow tokenizer and reloading it in the fast class"
171
                        ):
172
                            tokenizer_fast = _test_added_vocab_and_eos(
173
                                EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_2
174
                            )
175
                            with tempfile.TemporaryDirectory() as tmp_dir_3:
176
                                tokenizer_fast.save_pretrained(tmp_dir_3)
177
                                with self.subTest(
178
                                    "Hub -> Slow -> Fast -> Fast: Test saving this fast tokenizer and reloading it in the fast class"
179
                                ):
180
                                    _test_added_vocab_and_eos(
181
                                        EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3
182
                                    )
183

184
                                with self.subTest(
185
                                    "Hub -> Slow -> Fast -> Slow: Test saving this slow tokenizer and reloading it in the slow class"
186
                                ):
187
                                    _test_added_vocab_and_eos(
188
                                        EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3
189
                                    )
190

191
                with self.subTest("Hub -> Fast: Test loading a fast tokenizer from the hub)"):
192
                    if self.rust_tokenizer_class is not None:
193
                        tokenizer_fast = self.rust_tokenizer_class.from_pretrained(
194
                            pretrained_name, eos_token=new_eos, from_slow=True
195
                        )
196
                        self.assertEqual(tokenizer_fast._eos_token, new_eos)
197
                        self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values()))
198
                        # We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright
199
                        with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"):
200
                            self.assertDictEqual(EXPECTED_ADDED_TOKENS_DECODER, tokenizer_fast.added_tokens_decoder)
201

202
                        EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder
203
                        with tempfile.TemporaryDirectory() as tmp_dir_4:
204
                            tokenizer_fast.save_pretrained(tmp_dir_4)
205
                            with self.subTest("Hub -> Fast -> Fast: saving Fast1 locally and loading"):
206
                                _test_added_vocab_and_eos(
207
                                    EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_4
208
                                )
209

210
                            with self.subTest("Hub -> Fast -> Slow: saving Fast1 locally and loading"):
211
                                _test_added_vocab_and_eos(
212
                                    EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_4
213
                                )
214

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.