CSS-LM

Форк
0
/
modeling_encoder_decoder.py 
318 строк · 16.8 Кб
1
# coding=utf-8
2
# Copyright 2018 The HuggingFace Inc. team.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15
""" Classes to support Encoder-Decoder architectures """
16

17

18
import logging
19
from typing import Optional
20

21
from .configuration_encoder_decoder import EncoderDecoderConfig
22
from .configuration_utils import PretrainedConfig
23
from .modeling_utils import PreTrainedModel
24

25

26
logger = logging.getLogger(__name__)
27

28

29
class EncoderDecoderModel(PreTrainedModel):
30
    r"""
31
        :class:`~transformers.EncoderDecoder` is a generic model class that will be
32
        instantiated as a transformer architecture with one of the base model
33
        classes of the library as encoder and another one as
34
        decoder when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)`
35
        class method for the encoder and `AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path)` class method for the decoder.
36
    """
37
    config_class = EncoderDecoderConfig
38
    base_model_prefix = "encoder_decoder"
39

40
    def __init__(
41
        self,
42
        config: Optional[PretrainedConfig] = None,
43
        encoder: Optional[PreTrainedModel] = None,
44
        decoder: Optional[PreTrainedModel] = None,
45
    ):
46
        assert config is not None or (
47
            encoder is not None and decoder is not None
48
        ), "Either a configuration or an Encoder and a decoder has to be provided"
49
        if config is None:
50
            config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
51
        else:
52
            assert isinstance(config, self.config_class), "config: {} has to be of type {}".format(
53
                config, self.config_class
54
            )
55
        # initialize with config
56
        super().__init__(config)
57

58
        if encoder is None:
59
            from .modeling_auto import AutoModel
60

61
            encoder = AutoModel.from_config(config.encoder)
62

63
        if decoder is None:
64
            from .modeling_auto import AutoModelForCausalLM
65

66
            decoder = AutoModelForCausalLM.from_config(config.decoder)
67

68
        self.encoder = encoder
69
        self.decoder = decoder
70
        assert (
71
            self.encoder.get_output_embeddings() is None
72
        ), "The encoder {} should not have a LM Head. Please use a model without LM Head"
73

74
    def tie_weights(self):
75
        # for now no weights tying in encoder-decoder
76
        pass
77

78
    def get_encoder(self):
79
        return self.encoder
80

81
    def get_decoder(self):
82
        return self.decoder
83

84
    def get_input_embeddings(self):
85
        return self.encoder.get_input_embeddings()
86

87
    def get_output_embeddings(self):
88
        return self.decoder.get_output_embeddings()
89

90
    @classmethod
91
    def from_encoder_decoder_pretrained(
92
        cls,
93
        encoder_pretrained_model_name_or_path: str = None,
94
        decoder_pretrained_model_name_or_path: str = None,
95
        *model_args,
96
        **kwargs
97
    ) -> PreTrainedModel:
98
        r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints.
99

100

101
        The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated).
102
        To train the model, you need to first set it back in training mode with `model.train()`.
103

104
        Params:
105
            encoder_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`):
106
                information necessary to initiate the encoder. Either:
107

108
                - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
109
                - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
110
                - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``.
111
                - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
112

113
            decoder_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`):
114
                information necessary to initiate the decoder. Either:
115

116
                - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
117
                - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
118
                - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``.
119
                - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
120

121
            model_args: (`optional`) Sequence of positional arguments:
122
                All remaning positional arguments will be passed to the underlying model's ``__init__`` method
123

124
            kwargs: (`optional`) Remaining dictionary of keyword arguments.
125
                Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
126

127
        Examples::
128

129
            >>> from transformers import EncoderDecoderModel
130
            >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
131
        """
132

133
        kwargs_encoder = {
134
            argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
135
        }
136

137
        kwargs_decoder = {
138
            argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
139
        }
140

141
        # Load and initialize the encoder and decoder
142
        # The distinction between encoder and decoder at the model level is made
143
        # by the value of the flag `is_decoder` that we need to set correctly.
144
        encoder = kwargs_encoder.pop("model", None)
145
        if encoder is None:
146
            assert (
147
                encoder_pretrained_model_name_or_path is not None
148
            ), "If `model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined"
149
            from .modeling_auto import AutoModel
150

151
            encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
152
        encoder.config.is_decoder = False
153

154
        decoder = kwargs_decoder.pop("model", None)
155
        if decoder is None:
156
            assert (
157
                decoder_pretrained_model_name_or_path is not None
158
            ), "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined"
159
            from .modeling_auto import AutoModelForCausalLM
160

161
            if "config" not in kwargs_decoder:
162
                from .configuration_auto import AutoConfig
163

164
                decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
165
                if decoder_config.is_decoder is False:
166
                    logger.info(
167
                        f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
168
                    )
169
                    decoder_config.is_decoder = True
170

171
                kwargs_decoder["config"] = decoder_config
172

173
            if kwargs_decoder["config"].is_decoder is False:
174
                logger.warning(
175
                    f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attribute `is_decoder` of `decoder_config` passed to `.from_encoder_decoder_pretrained(...)` is set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`"
176
                )
177

178
            decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
179

180
        return cls(encoder=encoder, decoder=decoder)
181

182
    def forward(
183
        self,
184
        input_ids=None,
185
        inputs_embeds=None,
186
        attention_mask=None,
187
        head_mask=None,
188
        encoder_outputs=None,
189
        decoder_input_ids=None,
190
        decoder_attention_mask=None,
191
        decoder_head_mask=None,
192
        decoder_inputs_embeds=None,
193
        labels=None,
194
        **kwargs,
195
    ):
196

197
        """
198
        Args:
199
            input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
200
                Indices of input sequence tokens in the vocabulary for the encoder.
201
                Indices can be obtained using :class:`transformers.PretrainedTokenizer`.
202
                See :func:`transformers.PreTrainedTokenizer.encode` and
203
                :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
204
            inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
205
                Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
206
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
207
                than the model's internal embedding lookup matrix.
208
            attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
209
                Mask to avoid performing attention on padding token indices for the encoder.
210
                Mask values selected in ``[0, 1]``:
211
                ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
212
            head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
213
                Mask to nullify selected heads of the self-attention modules for the encoder.
214
                Mask values selected in ``[0, 1]``:
215
                ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
216
            encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`, defaults to :obj:`None`):
217
                Tuple consists of (`last_hidden_state`, `optional`: `hidden_states`, `optional`: `attentions`)
218
                `last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`) is a sequence of hidden-states at the output of the last layer of the encoder.
219
                Used in the cross-attention of the decoder.
220
            decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`, defaults to :obj:`None`):
221
                Provide for sequence to sequence training to the decoder.
222
                Indices can be obtained using :class:`transformers.PretrainedTokenizer`.
223
                See :func:`transformers.PreTrainedTokenizer.encode` and
224
                :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
225
            decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`, defaults to :obj:`None`):
226
                Default behavior: generate a tensor that ignores pad tokens in decoder_input_ids. Causal mask will also be used by default.
227
            decoder_head_mask: (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
228
                Mask to nullify selected heads of the self-attention modules for the decoder.
229
                Mask values selected in ``[0, 1]``:
230
                ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
231
            decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
232
                Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation.
233
                This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors
234
                than the model's internal embedding lookup matrix.
235
            labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
236
                Labels for computing the masked language modeling loss for the decoder.
237
                Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
238
                Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
239
                in ``[0, ..., config.vocab_size]``
240
            kwargs: (`optional`) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
241
                - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
242
                - With a `decoder_` prefix which will be input as `**decoder_kwargs` for the decoder forward function.
243

244
        Examples::
245

246
            >>> from transformers import EncoderDecoderModel, BertTokenizer
247
            >>> import torch
248

249
            >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
250
            >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
251

252
            >>> # forward
253
            >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)  # Batch size 1
254
            >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
255

256
            >>> # training
257
            >>> loss, outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids)[:2]
258

259
            >>> # generation
260
            >>> generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.pad_token_id)
261

262
        """
263

264
        kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
265

266
        kwargs_decoder = {
267
            argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
268
        }
269

270
        if encoder_outputs is None:
271
            encoder_outputs = self.encoder(
272
                input_ids=input_ids,
273
                attention_mask=attention_mask,
274
                inputs_embeds=inputs_embeds,
275
                head_mask=head_mask,
276
                return_dict=False,
277
                **kwargs_encoder,
278
            )
279

280
        hidden_states = encoder_outputs[0]
281

282
        # Decode
283
        decoder_outputs = self.decoder(
284
            input_ids=decoder_input_ids,
285
            inputs_embeds=decoder_inputs_embeds,
286
            attention_mask=decoder_attention_mask,
287
            encoder_hidden_states=hidden_states,
288
            encoder_attention_mask=attention_mask,
289
            head_mask=decoder_head_mask,
290
            labels=labels,
291
            return_dict=False,
292
            **kwargs_decoder,
293
        )
294

295
        return decoder_outputs + encoder_outputs
296

297
    def prepare_inputs_for_generation(self, input_ids, past, attention_mask, **kwargs):
298
        assert past is not None, "past has to be defined for encoder_outputs"
299

300
        # first step
301
        if type(past) is tuple:
302
            encoder_outputs, _ = past
303
        else:
304
            encoder_outputs = (past,)
305

306
        decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids)
307

308
        return {
309
            "attention_mask": attention_mask,
310
            "decoder_attention_mask": decoder_inputs["attention_mask"],
311
            "decoder_input_ids": decoder_inputs["input_ids"],
312
            "encoder_outputs": encoder_outputs,
313
        }
314

315
    def _reorder_cache(self, past, beam_idx):
316
        # as a default encoder-decoder models do not re-order the past.
317
        # TODO(PVP): might have to be updated, e.g. if GPT2 is to be used as a decoder
318
        return past
319

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.