colossalai

Форк
0
/
configuration_chatglm.py 
101 строка · 4.2 Кб
1
"""
2
This code is copied from https://huggingface.co/THUDM/chatglm-6b/resolve/main/configuration_chatglm.py
3
"""
4

5
""" ChatGLM model configuration """
6

7
from transformers.configuration_utils import PretrainedConfig
8
from transformers.utils import logging
9

10
logger = logging.get_logger(__name__)
11

12

13
class ChatGLMConfig(PretrainedConfig):
14
    r"""
15
    This is the configuration class to store the configuration of a [`~ChatGLMModel`].
16
    It is used to instantiate an ChatGLM model according to the specified arguments, defining the model
17
    architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
18
    the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture.
19

20
    Configuration objects inherit from  [`PretrainedConfig`] and can be used
21
    to control the model outputs. Read the documentation from  [`PretrainedConfig`]
22
    for more information.
23

24

25
    Args:
26
        vocab_size (`int`, *optional*, defaults to 150528):
27
            Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the
28
            `inputs_ids` passed when calling [`~ChatGLMModel`] or
29
            [`~TFChatGLMModel`].
30
        hidden_size (`int`, *optional*, defaults to 4096):
31
            Dimension of the encoder layers and the pooler layer.
32
        num_hidden_layers (`int`, *optional*, defaults to 28):
33
            Number of hidden layers in the Transformer encoder.
34
        num_attention_heads (`int`, *optional*, defaults to 32):
35
            Number of attention heads for each attention layer in the Transformer encoder.
36
        inner_hidden_size (`int`, *optional*, defaults to 16384):
37
            Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
38
        max_sequence_length (`int`, *optional*, defaults to 512):
39
            The maximum sequence length that this model might ever be used with.
40
            Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
41
        layernorm_epsilon (`float`, *optional*, defaults to 1e-5):
42
            The epsilon used by the layer normalization layers.
43
        use_cache (`bool`, *optional*, defaults to `True`):
44
            Whether the model should return the last key/values attentions (not used by all models).
45
        Example:
46

47
    ```python
48
    >>> from configuration_chatglm import ChatGLMConfig
49
    >>> from modeling_chatglm import ChatGLMModel
50

51
    >>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration
52
    >>> configuration = ChatGLMConfig()
53

54
    >>> # Initializing a model from the THUDM/ChatGLM-6B style configuration
55
    >>> model = ChatGLMModel(configuration)
56

57
    >>> # Accessing the model configuration
58
    >>> configuration = model.config
59
    ```"""
60
    model_type = "chatglm"
61

62
    def __init__(
63
        self,
64
        vocab_size=130528,
65
        hidden_size=4096,
66
        num_layers=28,
67
        num_attention_heads=32,
68
        layernorm_epsilon=1e-5,
69
        use_cache=True,
70
        bos_token_id=130004,
71
        eos_token_id=130005,
72
        mask_token_id=130000,
73
        gmask_token_id=130001,
74
        pad_token_id=3,
75
        max_sequence_length=2048,
76
        inner_hidden_size=16384,
77
        position_encoding_2d=True,
78
        quantization_bit=0,
79
        pre_seq_len=None,
80
        prefix_projection=False,
81
        **kwargs,
82
    ):
83
        self.num_layers = num_layers
84
        self.vocab_size = vocab_size
85
        self.hidden_size = hidden_size
86
        self.num_attention_heads = num_attention_heads
87
        self.max_sequence_length = max_sequence_length
88
        self.layernorm_epsilon = layernorm_epsilon
89
        self.inner_hidden_size = inner_hidden_size
90
        self.use_cache = use_cache
91
        self.bos_token_id = bos_token_id
92
        self.eos_token_id = eos_token_id
93
        self.pad_token_id = pad_token_id
94
        self.mask_token_id = mask_token_id
95
        self.gmask_token_id = gmask_token_id
96
        self.position_encoding_2d = position_encoding_2d
97
        self.quantization_bit = quantization_bit
98
        self.pre_seq_len = pre_seq_len
99
        self.prefix_projection = prefix_projection
100

101
        super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
102

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.