CSS-LM

Форк
0
/
configuration_reformer.py 
206 строк · 13.1 Кб
1
# coding=utf-8
2
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
3
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
4
#
5
# Licensed under the Apache License, Version 2.0 (the "License");
6
# you may not use this file except in compliance with the License.
7
# You may obtain a copy of the License at
8
#
9
#     http://www.apache.org/licenses/LICENSE-2.0
10
#
11
# Unless required by applicable law or agreed to in writing, software
12
# distributed under the License is distributed on an "AS IS" BASIS,
13
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
# See the License for the specific language governing permissions and
15
# limitations under the License.
16
""" Reformer model configuration """
17

18

19
import logging
20

21
from .configuration_utils import PretrainedConfig
22

23

24
logger = logging.getLogger(__name__)
25

26
REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27
    "google/reformer-crime-and-punishment": "https://cdn.huggingface.co/google/reformer-crime-and-punishment/config.json",
28
    "google/reformer-enwik8": "https://cdn.huggingface.co/google/reformer-enwik8/config.json",
29
}
30

31

32
class ReformerConfig(PretrainedConfig):
33
    r"""
34
        This is the configuration class to store the configuration of a :class:`~transformers.ReformerModel`.
35
        It is used to instantiate an Reformer model according to the specified arguments, defining the model
36
        architecture.
37

38
        Configuration objects inherit from  :class:`~transformers.PretrainedConfig` and can be used
39
        to control the model outputs. Read the documentation from  :class:`~transformers.PretrainedConfig`
40
        for more information.
41

42
        Args:
43
            attention_head_size (:obj:`int`, optional, defaults to 64):
44
                Dimensionality of the projected key, query and value vectors
45
            attn_layers (:obj:`list(str)`, optional, defaults to ["local", "lsh", "local", "lsh", "local", "lsh"]):
46
                List of attention layer types in ascending order. It can be chosen between a
47
                LSHSelfAttention layer ("lsh") and a LocalSelfAttention layer ("local").
48
                For more information on LSHSelfAttention layer, see `LSH Self Attention <reformer.html#lsh-self-attention>`__ .
49
                For more information on LocalSelfAttention layer, see `Local Self Attention <reformer.html#local-sensitive-hashing-self-attention>`__ .
50
            axial_pos_embds (:obj:`bool`, optional, defaults to True):
51
                If `True` use axial position embeddings. For more information on how axial position embeddings work, see `Axial Position Encodings <reformer.html#axial-positional-encodings>`__
52
            axial_norm_std (:obj:`float`, optional, defaluts to 1.0):
53
                The standard deviation of the normal_initializer for initializing the weight matrices of the axial positional encodings.
54
            axial_pos_shape (:obj:`list(int)`, optional, defaults to `[64, 64]`):
55
                The position dims of the axial position encodings.
56
                During training the product of the position dims has to equal the sequence length.
57
                For more information on how axial position embeddings work, see `Axial Position Encodings <reformer.html#axial-positional-encodings>`__.
58
            axial_pos_embds_dim (:obj:`list(int)`, optional, defaults to `[64, 192]`):
59
                The embedding dims of the axial position encodings.
60
                The sum of the embedding dims has to equal the hidden size.
61
                For more information on how axial position embeddings work, see `Axial Position Encodings <reformer.html#axial-positional-encodings>`__.
62
            chunk_size_lm_head (:obj:`int`, optional, defaults to 0):
63
                The chunk size of the final language model feed forward head layer.
64
                A chunk size of 0 means that the feed forward layer is not chunked.
65
                A chunk size of n means that the feed forward layer processes n < sequence_length embeddings at a time.
66
                For more information on feed forward chunking, see `How does Feed Forward Chunking work? <../glossary.html#feed-forward-chunking>`__ .
67
            chunk_size_feed_forward (:obj:`int`, optional, defaults to 0):
68
                The chunk size of all feed forward layers in the residual attention blocks.
69
                A chunk size of 0 means that the feed forward layer is not chunked.
70
                A chunk size of n means that the feed forward layer processes n < sequence_length embeddings at a time.
71
                For more information on feed forward chunking, see `How does Feed Forward Chunking work? <../glossary.html#feed-forward-chunking>`__ .
72
            eos_token_id (:obj:`int`, optional, defaults to 2):
73
                The token id for the <EOS> token.
74
            feed_forward_size (:obj:`int`, optional, defaults to 512):
75
                Dimensionality of the "feed_forward" (i.e., feed-forward) layer in the residual attention block.
76
            hash_seed (:obj:`int`, optional, defaults to `None`):
77
                Seed that can be used to make local sensitive hashing in LSHSelfAttention deterministic. This should only be set for testing purposed. For evaluation and training purposes `hash_seed` should be set to `None` to ensure fully random rotations in local sensitive hashing scheme.
78
            hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "relu"):
79
                The non-linear activation function (function or string) in the feed forward layer in the residual attention block.
80
                If string, "gelu", "relu", "swish", "gelu_new" and "gelu_fast" are supported.
81
            hidden_dropout_prob (:obj:`float`, optional, defaults to 0.05):
82
                The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
83
            hidden_size (:obj:`int`, optional, defaults to 256):
84
                Dimensionality of the output hidden states of the residual attention blocks.
85
            initializer_range (:obj:`float`, optional, defaults to 0.02):
86
                The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
87
            is_decoder (:obj:`bool`, optional, defaults to False):
88
                If `is_decoder` is True, a causal mask is used in addition to `attention_mask`.
89
                When using the Reformer for causal language modeling, `is_decoder` is set to `True`.
90
            layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
91
                The epsilon used by the layer normalization layers.
92
            local_chunk_length (:obj:`int`, optional, defaults to 64):
93
                Length of chunk which attends to itself in LocalSelfAttention. Chunking reduces memory complexity from sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk length (chunked self attention).
94
            local_num_chunks_before (:obj:`int`, optional, defaults to 1):
95
                Number of previous neighbouring chunks to attend to in LocalSelfAttention layer to itself.
96
            local_num_chunks_after (:obj:`int`, optional, defaults to 0):
97
                Number of following neighbouring chunks to attend to in LocalSelfAttention layer in addition to itself.
98
            local_attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
99
                The dropout ratio for the attention probabilities in LocalSelfAttention.
100
            lsh_attn_chunk_length (:obj:`int`, optional, defaults to 64):
101
                Length of chunk which attends to itself in LSHSelfAttention. Chunking reduces memory complexity from sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk length (chunked self attention).
102
            lsh_num_chunks_before (:obj:`int`, optional, defaults to 1):
103
                Number of previous neighbouring chunks to attend to in LSHSelfAttention layer to itself.
104
            lsh_num_chunks_after (:obj:`int`, optional, defaults to 0):
105
                Number of following neighbouring chunks to attend to in LSHSelfAttention layer to itself.
106
            lsh_attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
107
                The dropout ratio for the attention probabilities in LSHSelfAttention.
108
            max_position_embeddings (:obj:`int`, optional, defaults to 4096):
109
                The maximum sequence length that this model might ever be used with.
110
                Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
111
            num_attention_heads (:obj:`int`, optional, defaults to 12):
112
                Number of attention heads for each attention layer in the Transformer encoder.
113
            num_buckets (:obj:`int` or :obj:`list(int)`, optional, defaults to `None`):
114
                Number of buckets, the key query vectors can be "hashed into" using the locality sensitive hashing scheme. Each query key vector is hashed into a hash in `1, ..., num_buckets`.
115
                The number of buckets can also be factorized into a list for improved memory complexity. In this case, each query key vector is hashed into a hash in `1-1, 1-2, ..., num_buckets[0]-1, ..., num_buckets[0]-num_buckets[1]` if `num_buckets` is factorized into two factors.
116
                The number of buckets (or the product the factors) should approximately equal sequence length / lsh_chunk_length. If `num_buckets` is set to `None`, a good value for `num_buckets` is calculated on the fly.
117
            num_hashes (:obj:`int`, optional, defaults to 1):
118
                Number of hashing rounds (e.g. number of random rotations) in Local Sensitive Hashing scheme.
119
                The higher `num_hashes`, the more accurate the `LSHSelfAttention` becomes, but also the more memory and time intensive the hashing becomes.
120
            pad_token_id (:obj:`int`, optional, defaults to 0):
121
                The token id for the <PAD> token.
122
            vocab_size (:obj:`int`, optional, defaults to 320):
123
                Vocabulary size of the Reformer model. Defines the different tokens that
124
                can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.ReformerModel`.
125

126
        Example::
127

128
            >>> from transformers import ReformerModel, ReformerConfig
129

130
            >>> # Initializing a Reformer configuration
131
            >>> configuration = ReformerConfig()
132

133
            >>> # Initializing a Reformer model
134
            >>> model = ReformerModel(configuration)
135

136
            >>> # Accessing the model configuration
137
            >>> configuration = model.config
138
    """
139
    model_type = "reformer"
140

141
    def __init__(
142
        self,
143
        attention_head_size=64,
144
        attn_layers=["local", "lsh", "local", "lsh", "local", "lsh"],
145
        axial_norm_std=1.0,
146
        axial_pos_embds=True,
147
        axial_pos_shape=[64, 64],
148
        axial_pos_embds_dim=[64, 192],
149
        chunk_size_lm_head=0,
150
        chunk_size_feed_forward=0,
151
        eos_token_id=2,
152
        feed_forward_size=512,
153
        hash_seed=None,
154
        hidden_act="relu",
155
        hidden_dropout_prob=0.05,
156
        hidden_size=256,
157
        initializer_range=0.02,
158
        is_decoder=False,
159
        layer_norm_eps=1e-12,
160
        local_num_chunks_before=1,
161
        local_num_chunks_after=0,
162
        local_attention_probs_dropout_prob=0.05,
163
        local_attn_chunk_length=64,
164
        lsh_attn_chunk_length=64,
165
        lsh_attention_probs_dropout_prob=0.0,
166
        lsh_num_chunks_before=1,
167
        lsh_num_chunks_after=0,
168
        max_position_embeddings=4096,
169
        num_attention_heads=2,
170
        num_buckets=None,
171
        num_hashes=1,
172
        pad_token_id=0,
173
        vocab_size=320,
174
        **kwargs
175
    ):
176
        super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_decoder=is_decoder, **kwargs)
177

178
        self.hash_seed = hash_seed
179
        self.vocab_size = vocab_size
180
        self.attention_head_size = attention_head_size
181
        self.hidden_size = hidden_size
182
        self.num_attention_heads = num_attention_heads
183
        self.num_hashes = num_hashes
184
        self.num_hidden_layers = len(attn_layers)
185
        self.num_buckets = tuple(num_buckets) if isinstance(num_buckets, list) else num_buckets
186
        self.lsh_attn_chunk_length = lsh_attn_chunk_length
187
        self.local_attn_chunk_length = local_attn_chunk_length
188
        self.lsh_num_chunks_after = lsh_num_chunks_after
189
        self.lsh_num_chunks_before = lsh_num_chunks_before
190
        self.local_num_chunks_after = local_num_chunks_after
191
        self.local_num_chunks_before = local_num_chunks_before
192
        self.hidden_act = hidden_act
193
        self.feed_forward_size = feed_forward_size
194
        self.hidden_dropout_prob = hidden_dropout_prob
195
        self.lsh_attention_probs_dropout_prob = lsh_attention_probs_dropout_prob
196
        self.local_attention_probs_dropout_prob = local_attention_probs_dropout_prob
197
        self.max_position_embeddings = max_position_embeddings
198
        self.initializer_range = initializer_range
199
        self.layer_norm_eps = layer_norm_eps
200
        self.axial_pos_embds = axial_pos_embds
201
        self.axial_pos_shape = tuple(axial_pos_shape)
202
        self.axial_pos_embds_dim = tuple(axial_pos_embds_dim)
203
        self.axial_norm_std = axial_norm_std
204
        self.chunk_size_lm_head = chunk_size_lm_head
205
        self.chunk_size_feed_forward = chunk_size_feed_forward
206
        self.attn_layers = attn_layers
207

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.