google-research

Форк
0
428 строк · 12.8 Кб
1
# coding=utf-8
2
# Copyright 2024 The Google Research Authors.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16
"""Tokenization classes. Copied from bert."""
17

18
from __future__ import absolute_import
19
from __future__ import division
20
from __future__ import print_function
21

22
import collections
23
import re
24
import unicodedata
25

26
from absl import flags
27
import six
28
import tensorflow.compat.v1 as tf
29

30
FLAGS = flags.FLAGS
31

32
flags.DEFINE_bool(
33
    "preserve_unused_tokens", False,
34
    "If True, Wordpiece tokenization will not be applied to words in the vocab."
35
)
36

37
_UNUSED_TOKEN_RE = re.compile("^\\[unused\\d+\\]$")
38

39

40
def preserve_token(token, vocab):
41
  """Returns True if the token should forgo tokenization and be preserved."""
42
  if not FLAGS.preserve_unused_tokens:
43
    return False
44
  if token not in vocab:
45
    return False
46
  return bool(_UNUSED_TOKEN_RE.search(token))
47

48

49
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
50
  """Checks whether the casing config is consistent with the checkpoint name."""
51

52
  # The casing has to be passed in by the user and there is no explicit check
53
  # as to whether it matches the checkpoint. The casing information probably
54
  # should have been stored in the bert_config.json file, but it's not, so
55
  # we have to heuristically detect it to validate.
56

57
  if not init_checkpoint:
58
    return
59

60
  m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
61
  if m is None:
62
    return
63

64
  model_name = m.group(1)
65

66
  lower_models = [
67
      "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
68
      "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
69
  ]
70

71
  cased_models = [
72
      "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
73
      "multi_cased_L-12_H-768_A-12"
74
  ]
75

76
  is_bad_config = False
77
  if model_name in lower_models and not do_lower_case:
78
    is_bad_config = True
79
    actual_flag = "False"
80
    case_name = "lowercased"
81
    opposite_flag = "True"
82

83
  if model_name in cased_models and do_lower_case:
84
    is_bad_config = True
85
    actual_flag = "True"
86
    case_name = "cased"
87
    opposite_flag = "False"
88

89
  if is_bad_config:
90
    raise ValueError(
91
        "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
92
        "However, `%s` seems to be a %s model, so you "
93
        "should pass in `--do_lower_case=%s` so that the fine-tuning matches "
94
        "how the model was pre-training. If this error is wrong, please "
95
        "just comment out this check." % (actual_flag, init_checkpoint,
96
                                          model_name, case_name, opposite_flag))
97

98

99
def convert_to_unicode(text):
100
  """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
101
  if six.PY3:
102
    if isinstance(text, str):
103
      return text
104
    elif isinstance(text, bytes):
105
      return text.decode("utf-8", "ignore")
106
    else:
107
      raise ValueError("Unsupported string type: %s" % (type(text)))
108
  elif six.PY2:
109
    if isinstance(text, str):
110
      return text.decode("utf-8", "ignore")
111
    elif isinstance(text, unicode):
112
      return text
113
    else:
114
      raise ValueError("Unsupported string type: %s" % (type(text)))
115
  else:
116
    raise ValueError("Not running on Python2 or Python 3?")
117

118

119
def printable_text(text):
120
  """Returns text encoded in a way suitable for print or `tf.logging`."""
121

122
  # These functions want `str` for both Python2 and Python3, but in one case
123
  # it's a Unicode string and in the other it's a byte string.
124
  if six.PY3:
125
    if isinstance(text, str):
126
      return text
127
    elif isinstance(text, bytes):
128
      return text.decode("utf-8", "ignore")
129
    else:
130
      raise ValueError("Unsupported string type: %s" % (type(text)))
131
  elif six.PY2:
132
    if isinstance(text, str):
133
      return text
134
    elif isinstance(text, unicode):
135
      return text.encode("utf-8")
136
    else:
137
      raise ValueError("Unsupported string type: %s" % (type(text)))
138
  else:
139
    raise ValueError("Not running on Python2 or Python 3?")
140

141

142
def load_vocab(vocab_file):
143
  """Loads a vocabulary file into a dictionary."""
144
  vocab = collections.OrderedDict()
145
  with tf.io.gfile.GFile(vocab_file, "r") as reader:
146
    while True:
147
      token = convert_to_unicode(reader.readline())
148
      if not token:
149
        break
150
      token = token.strip()
151
      if token not in vocab:
152
        vocab[token] = len(vocab)
153
  return vocab
154

155

156
def convert_by_vocab(vocab, items):
157
  """Converts a sequence of [tokens|ids] using the vocab."""
158
  output = []
159
  for item in items:
160
    output.append(vocab[item])
161
  return output
162

163

164
def convert_tokens_to_ids(vocab, tokens):
165
  return convert_by_vocab(vocab, tokens)
166

167

168
def convert_ids_to_tokens(inv_vocab, ids):
169
  return convert_by_vocab(inv_vocab, ids)
170

171

172
def whitespace_tokenize(text):
173
  """Runs basic whitespace cleaning and splitting on a piece of text."""
174
  text = text.strip()
175
  if not text:
176
    return []
177
  tokens = text.split()
178
  return tokens
179

180

181
class FullTokenizer(object):
182
  """Runs end-to-end tokenziation."""
183

184
  def __init__(self, vocab_file, do_lower_case=True):
185
    self.vocab = load_vocab(vocab_file)
186
    self.inv_vocab = {v: k for k, v in self.vocab.items()}
187
    self.basic_tokenizer = BasicTokenizer(
188
        do_lower_case=do_lower_case, vocab=self.vocab)
189
    self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
190

191
  def tokenize(self, text):
192
    split_tokens = []
193
    for token in self.basic_tokenizer.tokenize(text):
194
      if preserve_token(token, self.vocab):
195
        split_tokens.append(token)
196
        continue
197
      for sub_token in self.wordpiece_tokenizer.tokenize(token):
198
        split_tokens.append(sub_token)
199

200
    return split_tokens
201

202
  def convert_tokens_to_ids(self, tokens):
203
    return convert_by_vocab(self.vocab, tokens)
204

205
  def convert_ids_to_tokens(self, ids):
206
    return convert_by_vocab(self.inv_vocab, ids)
207

208

209
class BasicTokenizer(object):
210
  """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
211

212
  def __init__(self, do_lower_case=True, vocab=tuple()):
213
    """Constructs a BasicTokenizer.
214

215
    Args:
216
      do_lower_case: Whether to lower case the input.
217
      vocab: A container of tokens to not mutate during tokenization.
218
    """
219
    self.do_lower_case = do_lower_case
220
    self.vocab = vocab
221

222
  def tokenize(self, text):
223
    """Tokenizes a piece of text."""
224
    text = convert_to_unicode(text)
225
    text = self._clean_text(text)
226

227
    # This was added on November 1st, 2018 for the multilingual and Chinese
228
    # models. This is also applied to the English models now, but it doesn't
229
    # matter since the English models were not trained on any Chinese data
230
    # and generally don't have any Chinese data in them (there are Chinese
231
    # characters in the vocabulary because Wikipedia does have some Chinese
232
    # words in the English Wikipedia.).
233
    text = self._tokenize_chinese_chars(text)
234

235
    orig_tokens = whitespace_tokenize(text)
236
    split_tokens = []
237
    for token in orig_tokens:
238
      if preserve_token(token, self.vocab):
239
        split_tokens.append(token)
240
        continue
241
      if self.do_lower_case:
242
        token = token.lower()
243
        token = self._run_strip_accents(token)
244
      split_tokens.extend(self._run_split_on_punc(token))
245

246
    output_tokens = whitespace_tokenize(" ".join(split_tokens))
247
    return output_tokens
248

249
  def _run_strip_accents(self, text):
250
    """Strips accents from a piece of text."""
251
    text = unicodedata.normalize("NFD", text)
252
    output = []
253
    for char in text:
254
      cat = unicodedata.category(char)
255
      if cat == "Mn":
256
        continue
257
      output.append(char)
258
    return "".join(output)
259

260
  def _run_split_on_punc(self, text):
261
    """Splits punctuation on a piece of text."""
262
    chars = list(text)
263
    i = 0
264
    start_new_word = True
265
    output = []
266
    while i < len(chars):
267
      char = chars[i]
268
      if _is_punctuation(char):
269
        output.append([char])
270
        start_new_word = True
271
      else:
272
        if start_new_word:
273
          output.append([])
274
        start_new_word = False
275
        output[-1].append(char)
276
      i += 1
277

278
    return ["".join(x) for x in output]
279

280
  def _tokenize_chinese_chars(self, text):
281
    """Adds whitespace around any CJK character."""
282
    output = []
283
    for char in text:
284
      cp = ord(char)
285
      if self._is_chinese_char(cp):
286
        output.append(" ")
287
        output.append(char)
288
        output.append(" ")
289
      else:
290
        output.append(char)
291
    return "".join(output)
292

293
  def _is_chinese_char(self, cp):
294
    """Checks whether CP is the codepoint of a CJK character."""
295
    # This defines a "chinese character" as anything in the CJK Unicode block:
296
    #   https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
297
    #
298
    # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
299
    # despite its name. The modern Korean Hangul alphabet is a different block,
300
    # as is Japanese Hiragana and Katakana. Those alphabets are used to write
301
    # space-separated words, so they are not treated specially and handled
302
    # like the all of the other languages.
303
    if ((cp >= 0x4E00 and cp <= 0x9FFF) or  #
304
        (cp >= 0x3400 and cp <= 0x4DBF) or  #
305
        (cp >= 0x20000 and cp <= 0x2A6DF) or  #
306
        (cp >= 0x2A700 and cp <= 0x2B73F) or  #
307
        (cp >= 0x2B740 and cp <= 0x2B81F) or  #
308
        (cp >= 0x2B820 and cp <= 0x2CEAF) or
309
        (cp >= 0xF900 and cp <= 0xFAFF) or  #
310
        (cp >= 0x2F800 and cp <= 0x2FA1F)):  #
311
      return True
312

313
    return False
314

315
  def _clean_text(self, text):
316
    """Performs invalid character removal and whitespace cleanup on text."""
317
    output = []
318
    for char in text:
319
      cp = ord(char)
320
      if cp == 0 or cp == 0xfffd or _is_control(char):
321
        continue
322
      if _is_whitespace(char):
323
        output.append(" ")
324
      else:
325
        output.append(char)
326
    return "".join(output)
327

328

329
class WordpieceTokenizer(object):
330
  """Runs WordPiece tokenziation."""
331

332
  def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
333
    self.vocab = vocab
334
    self.unk_token = unk_token
335
    self.max_input_chars_per_word = max_input_chars_per_word
336

337
  def tokenize(self, text):
338
    """Tokenizes a piece of text into its word pieces.
339

340
    This uses a greedy longest-match-first algorithm to perform tokenization
341
    using the given vocabulary.
342

343
    For example:
344
      input = "unaffable"
345
      output = ["un", "##aff", "##able"]
346

347
    Args:
348
      text: A single token or whitespace separated tokens. This should have
349
        already been passed through `BasicTokenizer.
350

351
    Returns:
352
      A list of wordpiece tokens.
353
    """
354

355
    text = convert_to_unicode(text)
356

357
    output_tokens = []
358
    for token in whitespace_tokenize(text):
359
      chars = list(token)
360
      if len(chars) > self.max_input_chars_per_word:
361
        output_tokens.append(self.unk_token)
362
        continue
363

364
      is_bad = False
365
      start = 0
366
      sub_tokens = []
367
      while start < len(chars):
368
        end = len(chars)
369
        cur_substr = None
370
        while start < end:
371
          substr = "".join(chars[start:end])
372
          if start > 0:
373
            substr = "##" + substr
374
          if substr in self.vocab:
375
            cur_substr = substr
376
            break
377
          end -= 1
378
        if cur_substr is None:
379
          is_bad = True
380
          break
381
        sub_tokens.append(cur_substr)
382
        start = end
383

384
      if is_bad:
385
        output_tokens.append(self.unk_token)
386
      else:
387
        output_tokens.extend(sub_tokens)
388
    return output_tokens
389

390

391
def _is_whitespace(char):
392
  """Checks whether `chars` is a whitespace character."""
393
  # \t, \n, and \r are technically control characters but we treat them
394
  # as whitespace since they are generally considered as such.
395
  if char == " " or char == "\t" or char == "\n" or char == "\r":
396
    return True
397
  cat = unicodedata.category(char)
398
  if cat == "Zs":
399
    return True
400
  return False
401

402

403
def _is_control(char):
404
  """Checks whether `chars` is a control character."""
405
  # These are technically control characters but we count them as whitespace
406
  # characters.
407
  if char == "\t" or char == "\n" or char == "\r":
408
    return False
409
  cat = unicodedata.category(char)
410
  if cat in ("Cc", "Cf"):
411
    return True
412
  return False
413

414

415
def _is_punctuation(char):
416
  """Checks whether `chars` is a punctuation character."""
417
  cp = ord(char)
418
  # We treat all non-letter/number ASCII as punctuation.
419
  # Characters such as "^", "$", and "`" are not in the Unicode
420
  # Punctuation class but we treat them as punctuation anyways, for
421
  # consistency.
422
  if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
423
      (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
424
    return True
425
  cat = unicodedata.category(char)
426
  if cat.startswith("P"):
427
    return True
428
  return False
429

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.