transformers

Форк
0
/
test_tokenization_bert_japanese.py 
499 строк · 21.9 Кб
1
# coding=utf-8
2
# Copyright 2020 The HuggingFace Team. All rights reserved.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16

17
import os
18
import pickle
19
import unittest
20

21
from transformers import AutoTokenizer
22
from transformers.models.bert.tokenization_bert import BertTokenizer
23
from transformers.models.bert_japanese.tokenization_bert_japanese import (
24
    VOCAB_FILES_NAMES,
25
    BertJapaneseTokenizer,
26
    CharacterTokenizer,
27
    JumanppTokenizer,
28
    MecabTokenizer,
29
    SudachiTokenizer,
30
    WordpieceTokenizer,
31
)
32
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi_projection
33

34
from ...test_tokenization_common import TokenizerTesterMixin
35

36

37
@custom_tokenizers
38
class BertJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
39
    tokenizer_class = BertJapaneseTokenizer
40
    test_rust_tokenizer = False
41
    space_between_special_tokens = True
42

43
    def setUp(self):
44
        super().setUp()
45

46
        vocab_tokens = [
47
            "[UNK]",
48
            "[CLS]",
49
            "[SEP]",
50
            "こんにちは",
51
            "こん",
52
            "にちは",
53
            "ばんは",
54
            "##こん",
55
            "##にちは",
56
            "##ばんは",
57
            "世界",
58
            "##世界",
59
            "、",
60
            "##、",
61
            "。",
62
            "##。",
63
            "アップルストア",
64
            "外国",
65
            "##人",
66
            "参政",
67
            "##権",
68
            "此れ",
69
            "は",
70
            "猫",
71
            "です",
72
        ]
73

74
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
75
        with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
76
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
77

78
    def get_input_output_texts(self, tokenizer):
79
        input_text = "こんにちは、世界。 \nこんばんは、世界。"
80
        output_text = "こんにちは 、 世界 。 こんばんは 、 世界 。"
81
        return input_text, output_text
82

83
    def get_clean_sequence(self, tokenizer):
84
        input_text, output_text = self.get_input_output_texts(tokenizer)
85
        ids = tokenizer.encode(output_text, add_special_tokens=False)
86
        text = tokenizer.decode(ids, clean_up_tokenization_spaces=False)
87
        return text, ids
88

89
    def test_pretokenized_inputs(self):
90
        pass  # TODO add if relevant
91

92
    def test_maximum_encoding_length_pair_input(self):
93
        pass  # TODO add if relevant
94

95
    def test_maximum_encoding_length_single_input(self):
96
        pass  # TODO add if relevant
97

98
    def test_full_tokenizer(self):
99
        tokenizer = self.tokenizer_class(self.vocab_file)
100

101
        tokens = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。")
102
        self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
103
        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14])
104

105
    def test_pickle_mecab_tokenizer(self):
106
        tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="mecab")
107
        self.assertIsNotNone(tokenizer)
108

109
        text = "こんにちは、世界。\nこんばんは、世界。"
110
        tokens = tokenizer.tokenize(text)
111
        self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
112
        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14])
113

114
        filename = os.path.join(self.tmpdirname, "tokenizer.bin")
115
        with open(filename, "wb") as handle:
116
            pickle.dump(tokenizer, handle)
117

118
        with open(filename, "rb") as handle:
119
            tokenizer_new = pickle.load(handle)
120

121
        tokens_loaded = tokenizer_new.tokenize(text)
122

123
        self.assertListEqual(tokens, tokens_loaded)
124

125
    def test_mecab_full_tokenizer_with_mecab_kwargs(self):
126
        tokenizer = self.tokenizer_class(
127
            self.vocab_file, word_tokenizer_type="mecab", mecab_kwargs={"mecab_dic": "ipadic"}
128
        )
129

130
        text = "アップルストア"
131
        tokens = tokenizer.tokenize(text)
132
        self.assertListEqual(tokens, ["アップルストア"])
133

134
    def test_mecab_tokenizer_ipadic(self):
135
        tokenizer = MecabTokenizer(mecab_dic="ipadic")
136

137
        self.assertListEqual(
138
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
139
            ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"],
140
        )
141

142
    def test_mecab_tokenizer_unidic_lite(self):
143
        try:
144
            tokenizer = MecabTokenizer(mecab_dic="unidic_lite")
145
        except ModuleNotFoundError:
146
            return
147

148
        self.assertListEqual(
149
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
150
            ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"],
151
        )
152

153
    def test_mecab_tokenizer_unidic(self):
154
        try:
155
            import unidic
156

157
            self.assertTrue(
158
                os.path.isdir(unidic.DICDIR),
159
                "The content of unidic was not downloaded. Run `python -m unidic download` before running this test case. Note that this requires 2.1GB on disk.",
160
            )
161
            tokenizer = MecabTokenizer(mecab_dic="unidic")
162
        except ModuleNotFoundError:
163
            return
164

165
        self.assertListEqual(
166
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
167
            ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"],
168
        )
169

170
    def test_mecab_tokenizer_lower(self):
171
        tokenizer = MecabTokenizer(do_lower_case=True, mecab_dic="ipadic")
172

173
        self.assertListEqual(
174
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
175
            ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"],
176
        )
177

178
    def test_mecab_tokenizer_with_option(self):
179
        try:
180
            tokenizer = MecabTokenizer(
181
                do_lower_case=True, normalize_text=False, mecab_option="-d /usr/local/lib/mecab/dic/jumandic"
182
            )
183
        except RuntimeError:
184
            # if dict doesn't exist in the system, previous code raises this error.
185
            return
186

187
        self.assertListEqual(
188
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
189
            ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"],
190
        )
191

192
    def test_mecab_tokenizer_no_normalize(self):
193
        tokenizer = MecabTokenizer(normalize_text=False, mecab_dic="ipadic")
194

195
        self.assertListEqual(
196
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
197
            ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"],
198
        )
199

200
    @require_sudachi_projection
201
    def test_pickle_sudachi_tokenizer(self):
202
        tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="sudachi")
203
        self.assertIsNotNone(tokenizer)
204

205
        text = "こんにちは、世界。\nこんばんは、世界。"
206
        tokens = tokenizer.tokenize(text)
207
        self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
208
        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14])
209

210
        filename = os.path.join(self.tmpdirname, "tokenizer.bin")
211
        with open(filename, "wb") as handle:
212
            pickle.dump(tokenizer, handle)
213

214
        with open(filename, "rb") as handle:
215
            tokenizer_new = pickle.load(handle)
216

217
        tokens_loaded = tokenizer_new.tokenize(text)
218

219
        self.assertListEqual(tokens, tokens_loaded)
220

221
    @require_sudachi_projection
222
    def test_sudachi_tokenizer_core(self):
223
        tokenizer = SudachiTokenizer(sudachi_dict_type="core")
224

225
        # fmt: off
226
        self.assertListEqual(
227
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
228
            [" ",  "\t",  "アップル",  "ストア",  "で",  "iPhone",  "8",  " ",  "が",  " ",  " ",  "\n ",  "発売",  "さ",  "れ",  "た",  " ",  "。",  " ",  " "],
229
        )
230
        # fmt: on
231

232
    @require_sudachi_projection
233
    def test_sudachi_tokenizer_split_mode_A(self):
234
        tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="A")
235

236
        self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国", "人", "参政", "権"])
237

238
    @require_sudachi_projection
239
    def test_sudachi_tokenizer_split_mode_B(self):
240
        tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="B")
241

242
        self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人", "参政権"])
243

244
    @require_sudachi_projection
245
    def test_sudachi_tokenizer_split_mode_C(self):
246
        tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="C")
247

248
        self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人参政権"])
249

250
    @require_sudachi_projection
251
    def test_sudachi_full_tokenizer_with_sudachi_kwargs_split_mode_B(self):
252
        tokenizer = self.tokenizer_class(
253
            self.vocab_file, word_tokenizer_type="sudachi", sudachi_kwargs={"sudachi_split_mode": "B"}
254
        )
255

256
        self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国", "##人", "参政", "##権"])
257

258
    @require_sudachi_projection
259
    def test_sudachi_tokenizer_projection(self):
260
        tokenizer = SudachiTokenizer(
261
            sudachi_dict_type="core", sudachi_split_mode="A", sudachi_projection="normalized_nouns"
262
        )
263

264
        self.assertListEqual(tokenizer.tokenize("これはねこです。"), ["此れ", "は", "猫", "です", "。"])
265

266
    @require_sudachi_projection
267
    def test_sudachi_full_tokenizer_with_sudachi_kwargs_sudachi_projection(self):
268
        tokenizer = self.tokenizer_class(
269
            self.vocab_file, word_tokenizer_type="sudachi", sudachi_kwargs={"sudachi_projection": "normalized_nouns"}
270
        )
271

272
        self.assertListEqual(tokenizer.tokenize("これはねこです。"), ["此れ", "は", "猫", "です", "。"])
273

274
    @require_sudachi_projection
275
    def test_sudachi_tokenizer_lower(self):
276
        tokenizer = SudachiTokenizer(do_lower_case=True, sudachi_dict_type="core")
277

278
        self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),[" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "])  # fmt: skip
279

280
    @require_sudachi_projection
281
    def test_sudachi_tokenizer_no_normalize(self):
282
        tokenizer = SudachiTokenizer(normalize_text=False, sudachi_dict_type="core")
283

284
        self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "])  # fmt: skip
285

286
    @require_sudachi_projection
287
    def test_sudachi_tokenizer_trim_whitespace(self):
288
        tokenizer = SudachiTokenizer(trim_whitespace=True, sudachi_dict_type="core")
289

290
        self.assertListEqual(
291
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
292
            ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"],
293
        )
294

295
    @require_jumanpp
296
    def test_pickle_jumanpp_tokenizer(self):
297
        tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="jumanpp")
298
        self.assertIsNotNone(tokenizer)
299

300
        text = "こんにちは、世界。\nこんばんは、世界。"
301
        tokens = tokenizer.tokenize(text)
302
        self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
303
        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14])
304

305
        filename = os.path.join(self.tmpdirname, "tokenizer.bin")
306
        with open(filename, "wb") as handle:
307
            pickle.dump(tokenizer, handle)
308

309
        with open(filename, "rb") as handle:
310
            tokenizer_new = pickle.load(handle)
311

312
        tokens_loaded = tokenizer_new.tokenize(text)
313

314
        self.assertListEqual(tokens, tokens_loaded)
315

316
    @require_jumanpp
317
    def test_jumanpp_tokenizer(self):
318
        tokenizer = JumanppTokenizer()
319

320
        self.assertListEqual(
321
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"])  # fmt: skip
322

323
    @require_jumanpp
324
    def test_jumanpp_tokenizer_lower(self):
325
        tokenizer = JumanppTokenizer(do_lower_case=True)
326

327
        self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"],)  # fmt: skip
328

329
    @require_jumanpp
330
    def test_jumanpp_tokenizer_no_normalize(self):
331
        tokenizer = JumanppTokenizer(normalize_text=False)
332

333
        self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"],)  # fmt: skip
334

335
    @require_jumanpp
336
    def test_jumanpp_tokenizer_trim_whitespace(self):
337
        tokenizer = JumanppTokenizer(trim_whitespace=True)
338

339
        self.assertListEqual(
340
            tokenizer.tokenize(" \tアップルストアでiPhone8 が  \n 発売された 。  "),
341
            ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"],
342
        )
343

344
    @require_jumanpp
345
    def test_jumanpp_full_tokenizer_with_jumanpp_kwargs_trim_whitespace(self):
346
        tokenizer = self.tokenizer_class(
347
            self.vocab_file, word_tokenizer_type="jumanpp", jumanpp_kwargs={"trim_whitespace": True}
348
        )
349

350
        text = "こんにちは、世界。\nこんばんは、世界。"
351
        tokens = tokenizer.tokenize(text)
352
        self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
353
        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14])
354

355
    @require_jumanpp
356
    def test_jumanpp_tokenizer_ext(self):
357
        tokenizer = JumanppTokenizer()
358

359
        self.assertListEqual(
360
            tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。"),
361
            ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"],
362
        )
363

364
    def test_wordpiece_tokenizer(self):
365
        vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]  # fmt: skip
366

367
        vocab = {}
368
        for i, token in enumerate(vocab_tokens):
369
            vocab[token] = i
370
        tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
371

372
        self.assertListEqual(tokenizer.tokenize(""), [])
373

374
        self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こんにちは"])
375

376
        self.assertListEqual(tokenizer.tokenize("こんばんは"), ["こん", "##ばんは"])
377

378
        self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは"), ["こん", "##ばんは", "[UNK]", "こんにちは"])  # fmt: skip
379

380
    def test_sentencepiece_tokenizer(self):
381
        tokenizer = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
382
        subword_tokenizer = tokenizer.subword_tokenizer
383

384
        tokens = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。")
385
        self.assertListEqual(tokens, ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"])  # fmt: skip
386

387
        tokens = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは")
388
        self.assertListEqual(tokens, ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"])
389

390
    def test_sequence_builders(self):
391
        tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese")
392

393
        text = tokenizer.encode("ありがとう。", add_special_tokens=False)
394
        text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False)
395

396
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
397
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
398

399
        # 2 is for "[CLS]", 3 is for "[SEP]"
400
        assert encoded_sentence == [2] + text + [3]
401
        assert encoded_pair == [2] + text + [3] + text_2 + [3]
402

403

404
@custom_tokenizers
405
class BertJapaneseCharacterTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
406
    tokenizer_class = BertJapaneseTokenizer
407
    test_rust_tokenizer = False
408

409
    def setUp(self):
410
        super().setUp()
411

412
        vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
413

414
        self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
415
        with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
416
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
417

418
    def get_tokenizer(self, **kwargs):
419
        return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type="character", **kwargs)
420

421
    def get_input_output_texts(self, tokenizer):
422
        input_text = "こんにちは、世界。 \nこんばんは、世界。"
423
        output_text = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
424
        return input_text, output_text
425

426
    def test_pretokenized_inputs(self):
427
        pass  # TODO add if relevant
428

429
    def test_maximum_encoding_length_pair_input(self):
430
        pass  # TODO add if relevant
431

432
    def test_maximum_encoding_length_single_input(self):
433
        pass  # TODO add if relevant
434

435
    def test_full_tokenizer(self):
436
        tokenizer = self.tokenizer_class(self.vocab_file, subword_tokenizer_type="character")
437

438
        tokens = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。")
439
        self.assertListEqual(tokens, ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"])  # fmt: skip
440
        self.assertListEqual(
441
            tokenizer.convert_tokens_to_ids(tokens), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12]
442
        )
443

444
    def test_character_tokenizer(self):
445
        vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
446

447
        vocab = {}
448
        for i, token in enumerate(vocab_tokens):
449
            vocab[token] = i
450
        tokenizer = CharacterTokenizer(vocab=vocab, unk_token="[UNK]")
451

452
        self.assertListEqual(tokenizer.tokenize(""), [])
453

454
        self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こ", "ん", "に", "ち", "は"])
455

456
        self.assertListEqual(tokenizer.tokenize("こんにちほ"), ["こ", "ん", "に", "ち", "[UNK]"])
457

458
    def test_sequence_builders(self):
459
        tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char")
460

461
        text = tokenizer.encode("ありがとう。", add_special_tokens=False)
462
        text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False)
463

464
        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
465
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
466

467
        # 2 is for "[CLS]", 3 is for "[SEP]"
468
        assert encoded_sentence == [2] + text + [3]
469
        assert encoded_pair == [2] + text + [3] + text_2 + [3]
470

471

472
@custom_tokenizers
473
class AutoTokenizerCustomTest(unittest.TestCase):
474
    def test_tokenizer_bert_japanese(self):
475
        EXAMPLE_BERT_JAPANESE_ID = "cl-tohoku/bert-base-japanese"
476
        tokenizer = AutoTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID)
477
        self.assertIsInstance(tokenizer, BertJapaneseTokenizer)
478

479

480
class BertTokenizerMismatchTest(unittest.TestCase):
481
    def test_tokenizer_mismatch_warning(self):
482
        EXAMPLE_BERT_JAPANESE_ID = "cl-tohoku/bert-base-japanese"
483
        with self.assertLogs("transformers", level="WARNING") as cm:
484
            BertTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID)
485
            self.assertTrue(
486
                cm.records[0].message.startswith(
487
                    "The tokenizer class you load from this checkpoint is not the same type as the class this function"
488
                    " is called from."
489
                )
490
            )
491
        EXAMPLE_BERT_ID = "google-bert/bert-base-cased"
492
        with self.assertLogs("transformers", level="WARNING") as cm:
493
            BertJapaneseTokenizer.from_pretrained(EXAMPLE_BERT_ID)
494
            self.assertTrue(
495
                cm.records[0].message.startswith(
496
                    "The tokenizer class you load from this checkpoint is not the same type as the class this function"
497
                    " is called from."
498
                )
499
            )
500

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.