CSS-LM

Форк
0
/
tokenization_xlnet.py 
343 строки · 13.6 Кб
1
# coding=utf-8
2
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15
""" Tokenization classes for XLNet model."""
16

17

18
import logging
19
import os
20
import unicodedata
21
from shutil import copyfile
22
from typing import List, Optional
23

24
from .tokenization_utils import PreTrainedTokenizer
25

26

27
logger = logging.getLogger(__name__)
28

29
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
30

31
PRETRAINED_VOCAB_FILES_MAP = {
32
    "vocab_file": {
33
        "xlnet-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-spiece.model",
34
        "xlnet-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-spiece.model",
35
    }
36
}
37

38
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
39
    "xlnet-base-cased": None,
40
    "xlnet-large-cased": None,
41
}
42

43
SPIECE_UNDERLINE = "▁"
44

45
# Segments (not really needed)
46
SEG_ID_A = 0
47
SEG_ID_B = 1
48
SEG_ID_CLS = 2
49
SEG_ID_SEP = 3
50
SEG_ID_PAD = 4
51

52

53
class XLNetTokenizer(PreTrainedTokenizer):
54
    """
55
    Constructs an XLNet tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__
56

57
    This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users
58
    should refer to the superclass for more information regarding methods.
59

60
    Args:
61
        vocab_file (:obj:`string`):
62
            `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a .spm extension) that
63
            contains the vocabulary necessary to instantiate a tokenizer.
64
        do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
65
            Whether to lowercase the input when tokenizing.
66
        remove_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
67
            Whether to strip the text when tokenizing (removing excess spaces before and after the string).
68
        keep_accents (:obj:`bool`, `optional`, defaults to :obj:`False`):
69
            Whether to keep accents when tokenizing.
70
        bos_token (:obj:`string`, `optional`, defaults to "<s>"):
71
            The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
72

73
            .. note::
74

75
                When building a sequence using special tokens, this is not the token that is used for the beginning
76
                of sequence. The token used is the :obj:`cls_token`.
77
        eos_token (:obj:`string`, `optional`, defaults to "</s>"):
78
            The end of sequence token.
79

80
            .. note::
81

82
                When building a sequence using special tokens, this is not the token that is used for the end
83
                of sequence. The token used is the :obj:`sep_token`.
84
        unk_token (:obj:`string`, `optional`, defaults to "<unk>"):
85
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
86
            token instead.
87
        sep_token (:obj:`string`, `optional`, defaults to "<sep>"):
88
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences
89
            for sequence classification or for a text and a question for question answering.
90
            It is also used as the last token of a sequence built with special tokens.
91
        pad_token (:obj:`string`, `optional`, defaults to "<pad>"):
92
            The token used for padding, for example when batching sequences of different lengths.
93
        cls_token (:obj:`string`, `optional`, defaults to "<cls>"):
94
            The classifier token which is used when doing sequence classification (classification of the whole
95
            sequence instead of per-token classification). It is the first token of the sequence when built with
96
            special tokens.
97
        mask_token (:obj:`string`, `optional`, defaults to "<mask>"):
98
            The token used for masking values. This is the token used when training this model with masked language
99
            modeling. This is the token which the model will try to predict.
100
        additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<eop>", "<eod>"]`):
101
            Additional special tokens used by the tokenizer.
102

103
    Attributes:
104
        sp_model (:obj:`SentencePieceProcessor`):
105
            The `SentencePiece` processor that is used for every conversion (string, tokens and IDs).
106
    """
107

108
    vocab_files_names = VOCAB_FILES_NAMES
109
    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
110
    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
111
    padding_side = "left"
112

113
    def __init__(
114
        self,
115
        vocab_file,
116
        do_lower_case=False,
117
        remove_space=True,
118
        keep_accents=False,
119
        bos_token="<s>",
120
        eos_token="</s>",
121
        unk_token="<unk>",
122
        sep_token="<sep>",
123
        pad_token="<pad>",
124
        cls_token="<cls>",
125
        mask_token="<mask>",
126
        additional_special_tokens=["<eop>", "<eod>"],
127
        **kwargs
128
    ):
129
        super().__init__(
130
            bos_token=bos_token,
131
            eos_token=eos_token,
132
            unk_token=unk_token,
133
            sep_token=sep_token,
134
            pad_token=pad_token,
135
            cls_token=cls_token,
136
            mask_token=mask_token,
137
            additional_special_tokens=additional_special_tokens,
138
            **kwargs,
139
        )
140

141
        self._pad_token_type_id = 3
142

143
        try:
144
            import sentencepiece as spm
145
        except ImportError:
146
            logger.warning(
147
                "You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece"
148
                "pip install sentencepiece"
149
            )
150
            raise
151

152
        self.do_lower_case = do_lower_case
153
        self.remove_space = remove_space
154
        self.keep_accents = keep_accents
155
        self.vocab_file = vocab_file
156

157
        self.sp_model = spm.SentencePieceProcessor()
158
        self.sp_model.Load(vocab_file)
159

160
    @property
161
    def vocab_size(self):
162
        return len(self.sp_model)
163

164
    def get_vocab(self):
165
        vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
166
        vocab.update(self.added_tokens_encoder)
167
        return vocab
168

169
    def __getstate__(self):
170
        state = self.__dict__.copy()
171
        state["sp_model"] = None
172
        return state
173

174
    def __setstate__(self, d):
175
        self.__dict__ = d
176
        try:
177
            import sentencepiece as spm
178
        except ImportError:
179
            logger.warning(
180
                "You need to install SentencePiece to use XLNetTokenizer: https://github.com/google/sentencepiece"
181
                "pip install sentencepiece"
182
            )
183
            raise
184
        self.sp_model = spm.SentencePieceProcessor()
185
        self.sp_model.Load(self.vocab_file)
186

187
    def preprocess_text(self, inputs):
188
        if self.remove_space:
189
            outputs = " ".join(inputs.strip().split())
190
        else:
191
            outputs = inputs
192
        outputs = outputs.replace("``", '"').replace("''", '"')
193

194
        if not self.keep_accents:
195
            outputs = unicodedata.normalize("NFKD", outputs)
196
            outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
197
        if self.do_lower_case:
198
            outputs = outputs.lower()
199

200
        return outputs
201

202
    def _tokenize(self, text, sample=False):
203
        """ Tokenize a string. """
204
        text = self.preprocess_text(text)
205

206
        if not sample:
207
            pieces = self.sp_model.EncodeAsPieces(text)
208
        else:
209
            pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1)
210
        new_pieces = []
211
        for piece in pieces:
212
            if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
213
                cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
214
                if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
215
                    if len(cur_pieces[0]) == 1:
216
                        cur_pieces = cur_pieces[1:]
217
                    else:
218
                        cur_pieces[0] = cur_pieces[0][1:]
219
                cur_pieces.append(piece[-1])
220
                new_pieces.extend(cur_pieces)
221
            else:
222
                new_pieces.append(piece)
223

224
        return new_pieces
225

226
    def _convert_token_to_id(self, token):
227
        """ Converts a token (str) in an id using the vocab. """
228
        return self.sp_model.PieceToId(token)
229

230
    def _convert_id_to_token(self, index):
231
        """Converts an index (integer) in a token (str) using the vocab."""
232
        return self.sp_model.IdToPiece(index)
233

234
    def convert_tokens_to_string(self, tokens):
235
        """Converts a sequence of tokens (strings for sub-words) in a single string."""
236
        out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
237
        return out_string
238

239
    def build_inputs_with_special_tokens(
240
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
241
    ) -> List[int]:
242
        """
243
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks
244
        by concatenating and adding special tokens.
245
        An XLNet sequence has the following format:
246

247
        - single sequence: ``X <sep> <cls>``
248
        - pair of sequences: ``A <sep> B <sep> <cls>``
249

250
        Args:
251
            token_ids_0 (:obj:`List[int]`):
252
                List of IDs to which the special tokens will be added
253
            token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
254
                Optional second list of IDs for sequence pairs.
255

256
        Returns:
257
            :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
258
        """
259
        sep = [self.sep_token_id]
260
        cls = [self.cls_token_id]
261
        if token_ids_1 is None:
262
            return token_ids_0 + sep + cls
263
        return token_ids_0 + sep + token_ids_1 + sep + cls
264

265
    def get_special_tokens_mask(
266
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
267
    ) -> List[int]:
268
        """
269
        Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
270
        special tokens using the tokenizer ``prepare_for_model`` methods.
271

272
        Args:
273
            token_ids_0 (:obj:`List[int]`):
274
                List of ids.
275
            token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
276
                Optional second list of IDs for sequence pairs.
277
            already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
278
                Set to True if the token list is already formatted with special tokens for the model
279

280
        Returns:
281
            :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
282
        """
283

284
        if already_has_special_tokens:
285
            if token_ids_1 is not None:
286
                raise ValueError(
287
                    "You should not supply a second sequence if the provided sequence of "
288
                    "ids is already formated with special tokens for the model."
289
                )
290
            return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
291

292
        if token_ids_1 is not None:
293
            return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
294
        return ([0] * len(token_ids_0)) + [1, 1]
295

296
    def create_token_type_ids_from_sequences(
297
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
298
    ) -> List[int]:
299
        """
300
        Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
301
        An XLNet sequence pair mask has the following format:
302
        0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 2
303
        | first sequence    | second sequence     | CLS segment ID
304

305
        if token_ids_1 is None, only returns the first portion of the mask (0's).
306

307
        Args:
308
            token_ids_0 (:obj:`List[int]`):
309
                List of ids.
310
            token_ids_1 (:obj:`List[int]`, `optional`, defaults to :obj:`None`):
311
                Optional second list of IDs for sequence pairs.
312

313
        Returns:
314
            :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
315
            sequence(s).
316
        """
317
        sep = [self.sep_token_id]
318
        cls_segment_id = [2]
319

320
        if token_ids_1 is None:
321
            return len(token_ids_0 + sep) * [0] + cls_segment_id
322
        return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
323

324
    def save_vocabulary(self, save_directory):
325
        """
326
        Save the sentencepiece vocabulary (copy original file) and special tokens file to a directory.
327

328
        Args:
329
            save_directory (:obj:`str`):
330
                The directory in which to save the vocabulary.
331

332
        Returns:
333
            :obj:`Tuple(str)`: Paths to the files saved.
334
        """
335
        if not os.path.isdir(save_directory):
336
            logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
337
            return
338
        out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"])
339

340
        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
341
            copyfile(self.vocab_file, out_vocab_file)
342

343
        return (out_vocab_file,)
344

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.