intel-extension-for-pytorch

Форк
0
1592 строки · 57.4 Кб
1
# encoding: UTF-8
2
import math
3
import copy
4
import warnings
5

6
import torch
7
import torch.utils.checkpoint
8
import torch.nn.functional as F
9
from torch import nn
10
from torch.nn import CrossEntropyLoss, LayerNorm, MSELoss, BCEWithLogitsLoss
11
from torch.nn.utils import skip_init
12
from typing import Optional, Tuple, Union, List, Callable, Dict, Any
13
from copy import deepcopy
14

15
from transformers.modeling_outputs import (
16
    BaseModelOutputWithPast,
17
    CausalLMOutputWithPast,
18
    SequenceClassifierOutputWithPast,
19
)
20
from transformers.modeling_utils import PreTrainedModel
21
from transformers.utils import logging
22
from transformers.generation.logits_process import LogitsProcessor
23
from transformers.generation.utils import (
24
    LogitsProcessorList,
25
    StoppingCriteriaList,
26
    GenerationConfig,
27
    ModelOutput,
28
)
29

30
from .configuration_chatglm import ChatGLMConfig
31

32
# flags required to enable jit fusion kernels
33
# import sys
34
# if sys.platform != "darwin":
35
#     torch._C._jit_set_profiling_mode(False)
36
#     torch._C._jit_set_profiling_executor(False)
37
#     torch._C._jit_override_can_fuse_on_cpu(True)
38
#     torch._C._jit_override_can_fuse_on_gpu(True)
39

40
logger = logging.get_logger(__name__)
41

42
_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM"
43
_CONFIG_FOR_DOC = "ChatGLMConfig"
44

45
CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [
46
    "THUDM/chatglm3-6b",
47
    # See all ChatGLM models at https://huggingface.co/models?filter=chatglm
48
]
49

50

51
def default_init(cls, *args, **kwargs):
52
    return cls(*args, **kwargs)
53

54

55
class InvalidScoreLogitsProcessor(LogitsProcessor):
56
    def __call__(
57
        self, input_ids: torch.LongTensor, scores: torch.FloatTensor
58
    ) -> torch.FloatTensor:
59
        if torch.isnan(scores).any() or torch.isinf(scores).any():
60
            scores.zero_()
61
            scores[..., 5] = 5e4
62
        return scores
63

64

65
class PrefixEncoder(torch.nn.Module):
66
    """
67
    The torch.nn model to encode the prefix
68
    Input shape: (batch-size, prefix-length)
69
    Output shape: (batch-size, prefix-length, 2*layers*hidden)
70
    """
71

72
    def __init__(self, config: ChatGLMConfig):
73
        super().__init__()
74
        self.prefix_projection = config.prefix_projection
75
        if self.prefix_projection:
76
            # Use a two-layer MLP to encode the prefix
77
            kv_size = (
78
                config.num_layers
79
                * config.kv_channels
80
                * config.multi_query_group_num
81
                * 2
82
            )
83
            self.embedding = torch.nn.Embedding(config.pre_seq_len, kv_size)
84
            self.trans = torch.nn.Sequential(
85
                torch.nn.Linear(kv_size, config.hidden_size),
86
                torch.nn.Tanh(),
87
                torch.nn.Linear(config.hidden_size, kv_size),
88
            )
89
        else:
90
            self.embedding = torch.nn.Embedding(
91
                config.pre_seq_len,
92
                config.num_layers
93
                * config.kv_channels
94
                * config.multi_query_group_num
95
                * 2,
96
            )
97

98
    def forward(self, prefix: torch.Tensor):
99
        if self.prefix_projection:
100
            prefix_tokens = self.embedding(prefix)
101
            past_key_values = self.trans(prefix_tokens)
102
        else:
103
            past_key_values = self.embedding(prefix)
104
        return past_key_values
105

106

107
def split_tensor_along_last_dim(
108
    tensor: torch.Tensor,
109
    num_partitions: int,
110
    contiguous_split_chunks: bool = False,
111
) -> List[torch.Tensor]:
112
    """Split a tensor along its last dimension.
113
    Arguments:
114
        tensor: input tensor.
115
        num_partitions: number of partitions to split the tensor
116
        contiguous_split_chunks: If True, make each chunk contiguous
117
                                 in memory.
118
    Returns:
119
        A list of Tensors
120
    """
121
    # Get the size and dimension.
122
    last_dim = tensor.dim() - 1
123
    last_dim_size = tensor.size()[last_dim] // num_partitions
124
    # Split.
125
    tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
126
    # Note: torch.split does not create contiguous tensors by default.
127
    if contiguous_split_chunks:
128
        return tuple(chunk.contiguous() for chunk in tensor_list)
129

130
    return tensor_list
131

132

133
class RotaryEmbedding(nn.Module):
134
    def __init__(self, dim, original_impl=False, device=None, dtype=None):
135
        super().__init__()
136
        inv_freq = 1.0 / (
137
            10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim)
138
        )
139
        self.register_buffer("inv_freq", inv_freq)
140
        self.dim = dim
141
        self.original_impl = original_impl
142

143
    def forward_impl(
144
        self,
145
        seq_len: int,
146
        n_elem: int,
147
        dtype: torch.dtype,
148
        device: torch.device,
149
        base: int = 10000,
150
    ):
151
        """Enhanced Transformer with Rotary Position Embedding.
152
        Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
153
        transformers/rope/__init__.py. MIT License:
154
        https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
155
        """
156
        # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
157
        theta = 1.0 / (
158
            base
159
            ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem)
160
        )
161

162
        # Create position indexes `[0, 1, ..., seq_len - 1]`
163
        seq_idx = torch.arange(seq_len, dtype=torch.float, device=device)
164

165
        # Calculate the product of position index and $\theta_i$
166
        idx_theta = torch.outer(seq_idx, theta).float()
167

168
        cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)
169

170
        # this is to mimic the behaviour of complex32, else we will get different results
171
        if dtype in (torch.float16, torch.bfloat16, torch.int8):
172
            cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half()
173
        return cache
174

175
    def forward(self, max_seq_len, offset=0):
176
        return self.forward_impl(
177
            max_seq_len,
178
            self.dim,
179
            dtype=self.inv_freq.dtype,
180
            device=self.inv_freq.device,
181
        )
182

183

184
@torch.jit.script
185
def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:
186
    # x: [sq, b, np, hn]
187
    sq, b, np, hn = x.size(0), x.size(1), x.size(2), x.size(3)
188
    rot_dim = rope_cache.shape[-2] * 2
189
    x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
190
    # truncate to support variable sizes
191
    rope_cache = rope_cache[:sq]
192
    xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2)
193
    rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2)
194
    x_out2 = torch.stack(
195
        [
196
            xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
197
            xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
198
        ],
199
        -1,
200
    )
201
    x_out2 = x_out2.flatten(3)
202
    return torch.cat((x_out2, x_pass), dim=-1)
203

204

205
class RMSNorm(torch.nn.Module):
206
    def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):
207
        super().__init__()
208
        self.weight = torch.nn.Parameter(
209
            torch.empty(normalized_shape, device=device, dtype=dtype)
210
        )
211
        self.eps = eps
212

213
    def forward(self, hidden_states: torch.Tensor):
214
        input_dtype = hidden_states.dtype
215
        variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
216
        hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
217

218
        return (self.weight * hidden_states).to(input_dtype)
219

220

221
class CoreAttention(torch.nn.Module):
222
    def __init__(self, config: ChatGLMConfig, layer_number):
223
        super(CoreAttention, self).__init__()
224

225
        self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
226
        self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
227
        if self.apply_query_key_layer_scaling:
228
            self.attention_softmax_in_fp32 = True
229
        self.layer_number = max(1, layer_number)
230

231
        projection_size = config.kv_channels * config.num_attention_heads
232

233
        # Per attention head and per partition values.
234
        self.hidden_size_per_partition = projection_size
235
        self.hidden_size_per_attention_head = (
236
            projection_size // config.num_attention_heads
237
        )
238
        self.num_attention_heads_per_partition = config.num_attention_heads
239

240
        coeff = None
241
        self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
242
        if self.apply_query_key_layer_scaling:
243
            coeff = self.layer_number
244
            self.norm_factor *= coeff
245
        self.coeff = coeff
246

247
        self.attention_dropout = torch.nn.Dropout(config.attention_dropout)
248

249
    def forward(self, query_layer, key_layer, value_layer, attention_mask):
250
        pytorch_major_version = int(torch.__version__.split(".")[0])
251
        if pytorch_major_version >= 2:
252
            query_layer, key_layer, value_layer = [
253
                k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]
254
            ]
255
            key_layer = key_layer.to(query_layer.dtype)
256
            value_layer = value_layer.to(query_layer.dtype)
257
            if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:
258
                context_layer = torch.nn.functional.scaled_dot_product_attention(
259
                    query_layer, key_layer, value_layer, is_causal=True
260
                )
261
            else:
262
                if attention_mask is not None:
263
                    attention_mask = ~attention_mask
264
                context_layer = torch.nn.functional.scaled_dot_product_attention(
265
                    query_layer, key_layer, value_layer, attention_mask
266
                )
267
            context_layer = context_layer.permute(2, 0, 1, 3)
268
            new_context_layer_shape = context_layer.size()[:-2] + (
269
                self.hidden_size_per_partition,
270
            )
271
            context_layer = context_layer.reshape(*new_context_layer_shape)
272
        else:
273
            # Raw attention scores
274

275
            # [b, np, sq, sk]
276
            output_size = (
277
                query_layer.size(1),
278
                query_layer.size(2),
279
                query_layer.size(0),
280
                key_layer.size(0),
281
            )
282

283
            # [sq, b, np, hn] -> [sq, b * np, hn]
284
            query_layer = query_layer.view(
285
                output_size[2], output_size[0] * output_size[1], -1
286
            )
287
            # [sk, b, np, hn] -> [sk, b * np, hn]
288
            key_layer = key_layer.view(
289
                output_size[3], output_size[0] * output_size[1], -1
290
            )
291

292
            # preallocting input tensor: [b * np, sq, sk]
293
            matmul_input_buffer = torch.empty(
294
                output_size[0] * output_size[1],
295
                output_size[2],
296
                output_size[3],
297
                dtype=query_layer.dtype,
298
                device=query_layer.device,
299
            )
300

301
            # Raw attention scores. [b * np, sq, sk]
302
            matmul_result = torch.baddbmm(
303
                matmul_input_buffer,
304
                query_layer.transpose(0, 1),  # [b * np, sq, hn]
305
                key_layer.transpose(0, 1).transpose(1, 2),  # [b * np, hn, sk]
306
                beta=0.0,
307
                alpha=(1.0 / self.norm_factor),
308
            )
309

310
            # change view to [b, np, sq, sk]
311
            attention_scores = matmul_result.view(*output_size)
312

313
            # ===========================
314
            # Attention probs and dropout
315
            # ===========================
316

317
            # attention scores and attention mask [b, np, sq, sk]
318
            if self.attention_softmax_in_fp32:
319
                attention_scores = attention_scores.float()
320
            if self.coeff is not None:
321
                attention_scores = attention_scores * self.coeff
322
            if (
323
                attention_mask is None
324
                and attention_scores.shape[2] == attention_scores.shape[3]
325
            ):
326
                attention_mask = torch.ones(
327
                    output_size[0],
328
                    1,
329
                    output_size[2],
330
                    output_size[3],
331
                    device=attention_scores.device,
332
                    dtype=torch.bool,
333
                )
334
                attention_mask.tril_()
335
                attention_mask = ~attention_mask
336
            if attention_mask is not None:
337
                attention_scores = attention_scores.masked_fill(
338
                    attention_mask, float("-inf")
339
                )
340
            attention_probs = F.softmax(attention_scores, dim=-1)
341
            attention_probs = attention_probs.type_as(value_layer)
342

343
            # This is actually dropping out entire tokens to attend to, which might
344
            # seem a bit unusual, but is taken from the original Transformer paper.
345
            attention_probs = self.attention_dropout(attention_probs)
346
            # =========================
347
            # Context layer. [sq, b, hp]
348
            # =========================
349

350
            # value_layer -> context layer.
351
            # [sk, b, np, hn] --> [b, np, sq, hn]
352

353
            # context layer shape: [b, np, sq, hn]
354
            output_size = (
355
                value_layer.size(1),
356
                value_layer.size(2),
357
                query_layer.size(0),
358
                value_layer.size(3),
359
            )
360
            # change view [sk, b * np, hn]
361
            value_layer = value_layer.view(
362
                value_layer.size(0), output_size[0] * output_size[1], -1
363
            )
364
            # change view [b * np, sq, sk]
365
            attention_probs = attention_probs.view(
366
                output_size[0] * output_size[1], output_size[2], -1
367
            )
368
            # matmul: [b * np, sq, hn]
369
            context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
370
            # change view [b, np, sq, hn]
371
            context_layer = context_layer.view(*output_size)
372
            # [b, np, sq, hn] --> [sq, b, np, hn]
373
            context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
374
            # [sq, b, np, hn] --> [sq, b, hp]
375
            new_context_layer_shape = context_layer.size()[:-2] + (
376
                self.hidden_size_per_partition,
377
            )
378
            context_layer = context_layer.view(*new_context_layer_shape)
379

380
        return context_layer
381

382

383
class SelfAttention(torch.nn.Module):
384
    """Parallel self-attention layer abstract class.
385
    Self-attention layer takes input with size [s, b, h]
386
    and returns output of the same size.
387
    """
388

389
    def __init__(self, config: ChatGLMConfig, layer_number, device=None):
390
        super(SelfAttention, self).__init__()
391
        self.layer_number = max(1, layer_number)
392

393
        self.projection_size = config.kv_channels * config.num_attention_heads
394

395
        # Per attention head and per partition values.
396
        self.hidden_size_per_attention_head = (
397
            self.projection_size // config.num_attention_heads
398
        )
399
        self.num_attention_heads_per_partition = config.num_attention_heads
400

401
        self.multi_query_attention = config.multi_query_attention
402
        self.qkv_hidden_size = 3 * self.projection_size
403
        if self.multi_query_attention:
404
            self.num_multi_query_groups_per_partition = config.multi_query_group_num
405
            self.qkv_hidden_size = (
406
                self.projection_size
407
                + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num
408
            )
409
        self.query_key_value = nn.Linear(
410
            config.hidden_size,
411
            self.qkv_hidden_size,
412
            bias=config.add_bias_linear or config.add_qkv_bias,
413
            device=device,
414
            **_config_to_kwargs(config),
415
        )
416

417
        self.core_attention = CoreAttention(config, self.layer_number)
418

419
        # Output.
420
        self.dense = nn.Linear(
421
            self.projection_size,
422
            config.hidden_size,
423
            bias=config.add_bias_linear,
424
            device=device,
425
            **_config_to_kwargs(config),
426
        )
427

428
    def _allocate_memory(
429
        self, inference_max_sequence_len, batch_size, device=None, dtype=None
430
    ):
431
        if self.multi_query_attention:
432
            num_attention_heads = self.num_multi_query_groups_per_partition
433
        else:
434
            num_attention_heads = self.num_attention_heads_per_partition
435
        return torch.empty(
436
            inference_max_sequence_len,
437
            batch_size,
438
            num_attention_heads,
439
            self.hidden_size_per_attention_head,
440
            dtype=dtype,
441
            device=device,
442
        )
443

444
    def forward(
445
        self,
446
        hidden_states,
447
        attention_mask,
448
        rotary_pos_emb,
449
        kv_cache=None,
450
        use_cache=True,
451
    ):
452
        # hidden_states: [sq, b, h]
453

454
        # =================================================
455
        # Pre-allocate memory for key-values for inference.
456
        # =================================================
457
        # =====================
458
        # Query, Key, and Value
459
        # =====================
460

461
        # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
462
        mixed_x_layer = self.query_key_value(hidden_states)
463

464
        if self.multi_query_attention:
465
            (query_layer, key_layer, value_layer) = mixed_x_layer.split(
466
                [
467
                    self.num_attention_heads_per_partition
468
                    * self.hidden_size_per_attention_head,
469
                    self.num_multi_query_groups_per_partition
470
                    * self.hidden_size_per_attention_head,
471
                    self.num_multi_query_groups_per_partition
472
                    * self.hidden_size_per_attention_head,
473
                ],
474
                dim=-1,
475
            )
476
            query_layer = query_layer.view(
477
                query_layer.size()[:-1]
478
                + (
479
                    self.num_attention_heads_per_partition,
480
                    self.hidden_size_per_attention_head,
481
                )
482
            )
483
            key_layer = key_layer.view(
484
                key_layer.size()[:-1]
485
                + (
486
                    self.num_multi_query_groups_per_partition,
487
                    self.hidden_size_per_attention_head,
488
                )
489
            )
490
            value_layer = value_layer.view(
491
                value_layer.size()[:-1]
492
                + (
493
                    self.num_multi_query_groups_per_partition,
494
                    self.hidden_size_per_attention_head,
495
                )
496
            )
497
        else:
498
            new_tensor_shape = mixed_x_layer.size()[:-1] + (
499
                self.num_attention_heads_per_partition,
500
                3 * self.hidden_size_per_attention_head,
501
            )
502
            mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
503

504
            # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
505
            (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(
506
                mixed_x_layer, 3
507
            )
508

509
        # apply relative positional encoding (rotary embedding)
510
        if rotary_pos_emb is not None:
511
            query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb)
512
            key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb)
513

514
        # adjust key and value for inference
515
        if kv_cache is not None:
516
            cache_k, cache_v = kv_cache
517
            key_layer = torch.cat((cache_k, key_layer), dim=0)
518
            value_layer = torch.cat((cache_v, value_layer), dim=0)
519
        if use_cache:
520
            kv_cache = (key_layer, value_layer)
521
        else:
522
            kv_cache = None
523

524
        if self.multi_query_attention:
525
            key_layer = key_layer.unsqueeze(-2)
526
            key_layer = key_layer.expand(
527
                -1,
528
                -1,
529
                -1,
530
                self.num_attention_heads_per_partition
531
                // self.num_multi_query_groups_per_partition,
532
                -1,
533
            )
534
            key_layer = key_layer.contiguous().view(
535
                key_layer.size()[:2]
536
                + (
537
                    self.num_attention_heads_per_partition,
538
                    self.hidden_size_per_attention_head,
539
                )
540
            )
541
            value_layer = value_layer.unsqueeze(-2)
542
            value_layer = value_layer.expand(
543
                -1,
544
                -1,
545
                -1,
546
                self.num_attention_heads_per_partition
547
                // self.num_multi_query_groups_per_partition,
548
                -1,
549
            )
550
            value_layer = value_layer.contiguous().view(
551
                value_layer.size()[:2]
552
                + (
553
                    self.num_attention_heads_per_partition,
554
                    self.hidden_size_per_attention_head,
555
                )
556
            )
557

558
        # ==================================
559
        # core attention computation
560
        # ==================================
561

562
        context_layer = self.core_attention(
563
            query_layer, key_layer, value_layer, attention_mask
564
        )
565

566
        # =================
567
        # Output. [sq, b, h]
568
        # =================
569

570
        output = self.dense(context_layer)
571

572
        return output, kv_cache
573

574

575
def _config_to_kwargs(args):
576
    common_kwargs = {
577
        "dtype": args.torch_dtype,
578
    }
579
    return common_kwargs
580

581

582
class MLP(torch.nn.Module):
583
    """MLP.
584
    MLP will take the input with h hidden state, project it to 4*h
585
    hidden dimension, perform nonlinear transformation, and project the
586
    state back into h hidden dimension.
587
    """
588

589
    def __init__(self, config: ChatGLMConfig, device=None):
590
        super(MLP, self).__init__()
591

592
        self.add_bias = config.add_bias_linear
593

594
        # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
595
        self.dense_h_to_4h = nn.Linear(
596
            config.hidden_size,
597
            config.ffn_hidden_size * 2,
598
            bias=self.add_bias,
599
            device=device,
600
            **_config_to_kwargs(config),
601
        )
602

603
        def swiglu(x):
604
            x = torch.chunk(x, 2, dim=-1)
605
            return F.silu(x[0]) * x[1]
606

607
        self.activation_func = swiglu
608

609
        # Project back to h.
610
        self.dense_4h_to_h = nn.Linear(
611
            config.ffn_hidden_size,
612
            config.hidden_size,
613
            bias=self.add_bias,
614
            device=device,
615
            **_config_to_kwargs(config),
616
        )
617

618
    def forward(self, hidden_states):
619
        # [s, b, 4hp]
620
        intermediate_parallel = self.dense_h_to_4h(hidden_states)
621
        intermediate_parallel = self.activation_func(intermediate_parallel)
622
        # [s, b, h]
623
        output = self.dense_4h_to_h(intermediate_parallel)
624
        return output
625

626

627
class GLMBlock(torch.nn.Module):
628
    """A single transformer layer.
629
    Transformer layer takes input with size [s, b, h] and returns an
630
    output of the same size.
631
    """
632

633
    def __init__(self, config: ChatGLMConfig, layer_number, device=None):
634
        super(GLMBlock, self).__init__()
635
        self.layer_number = layer_number
636

637
        self.apply_residual_connection_post_layernorm = (
638
            config.apply_residual_connection_post_layernorm
639
        )
640

641
        self.fp32_residual_connection = config.fp32_residual_connection
642

643
        LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
644
        # Layernorm on the input data.
645
        self.input_layernorm = LayerNormFunc(
646
            config.hidden_size,
647
            eps=config.layernorm_epsilon,
648
            device=device,
649
            dtype=config.torch_dtype,
650
        )
651

652
        # Self attention.
653
        self.self_attention = SelfAttention(config, layer_number, device=device)
654
        self.hidden_dropout = config.hidden_dropout
655

656
        # Layernorm on the attention output
657
        self.post_attention_layernorm = LayerNormFunc(
658
            config.hidden_size,
659
            eps=config.layernorm_epsilon,
660
            device=device,
661
            dtype=config.torch_dtype,
662
        )
663

664
        # MLP
665
        self.mlp = MLP(config, device=device)
666

667
    def forward(
668
        self,
669
        hidden_states,
670
        attention_mask,
671
        rotary_pos_emb,
672
        kv_cache=None,
673
        use_cache=True,
674
    ):
675
        # hidden_states: [s, b, h]
676

677
        # Layer norm at the beginning of the transformer layer.
678
        layernorm_output = self.input_layernorm(hidden_states)
679
        # Self attention.
680
        attention_output, kv_cache = self.self_attention(
681
            layernorm_output,
682
            attention_mask,
683
            rotary_pos_emb,
684
            kv_cache=kv_cache,
685
            use_cache=use_cache,
686
        )
687

688
        # Residual connection.
689
        if self.apply_residual_connection_post_layernorm:
690
            residual = layernorm_output
691
        else:
692
            residual = hidden_states
693

694
        layernorm_input = torch.nn.functional.dropout(
695
            attention_output, p=self.hidden_dropout, training=self.training
696
        )
697
        layernorm_input = residual + layernorm_input
698

699
        # Layer norm post the self attention.
700
        layernorm_output = self.post_attention_layernorm(layernorm_input)
701

702
        # MLP.
703
        mlp_output = self.mlp(layernorm_output)
704

705
        # Second residual connection.
706
        if self.apply_residual_connection_post_layernorm:
707
            residual = layernorm_output
708
        else:
709
            residual = layernorm_input
710

711
        output = torch.nn.functional.dropout(
712
            mlp_output, p=self.hidden_dropout, training=self.training
713
        )
714
        output = residual + output
715

716
        return output, kv_cache
717

718

719
class GLMTransformer(torch.nn.Module):
720
    """Transformer class."""
721

722
    def __init__(self, config: ChatGLMConfig, device=None):
723
        super(GLMTransformer, self).__init__()
724

725
        self.fp32_residual_connection = config.fp32_residual_connection
726
        self.post_layer_norm = config.post_layer_norm
727

728
        # Number of layers.
729
        self.num_layers = config.num_layers
730

731
        # Transformer layers.
732
        def build_layer(layer_number):
733
            return GLMBlock(config, layer_number, device=device)
734

735
        self.layers = torch.nn.ModuleList(
736
            [build_layer(i + 1) for i in range(self.num_layers)]
737
        )
738

739
        if self.post_layer_norm:
740
            LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
741
            # Final layer norm before output.
742
            self.final_layernorm = LayerNormFunc(
743
                config.hidden_size,
744
                eps=config.layernorm_epsilon,
745
                device=device,
746
                dtype=config.torch_dtype,
747
            )
748

749
        self.gradient_checkpointing = False
750

751
    def _get_layer(self, layer_number):
752
        return self.layers[layer_number]
753

754
    def forward(
755
        self,
756
        hidden_states,
757
        attention_mask,
758
        rotary_pos_emb,
759
        kv_caches=None,
760
        use_cache: Optional[bool] = True,
761
        output_hidden_states: Optional[bool] = False,
762
    ):
763
        if not kv_caches:
764
            kv_caches = [None for _ in range(self.num_layers)]
765
        presents = () if use_cache else None
766
        if self.gradient_checkpointing and self.training:
767
            if use_cache:
768
                logger.warning_once(
769
                    "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
770
                )
771
                use_cache = False
772

773
        all_self_attentions = None
774
        all_hidden_states = () if output_hidden_states else None
775
        for index in range(self.num_layers):
776
            if output_hidden_states:
777
                all_hidden_states = all_hidden_states + (hidden_states,)
778

779
            layer = self._get_layer(index)
780
            if self.gradient_checkpointing and self.training:
781
                layer_ret = torch.utils.checkpoint.checkpoint(
782
                    layer,
783
                    hidden_states,
784
                    attention_mask,
785
                    rotary_pos_emb,
786
                    kv_caches[index],
787
                    use_cache,
788
                )
789
            else:
790
                layer_ret = layer(
791
                    hidden_states,
792
                    attention_mask,
793
                    rotary_pos_emb,
794
                    kv_cache=kv_caches[index],
795
                    use_cache=use_cache,
796
                )
797
            hidden_states, kv_cache = layer_ret
798
            if use_cache:
799
                presents = presents + (kv_cache,)
800

801
        if output_hidden_states:
802
            all_hidden_states = all_hidden_states + (hidden_states,)
803

804
        # Final layer norm.
805
        if self.post_layer_norm:
806
            hidden_states = self.final_layernorm(hidden_states)
807

808
        return hidden_states, presents, all_hidden_states, all_self_attentions
809

810

811
class ChatGLMPreTrainedModel(PreTrainedModel):
812
    """
813
    An abstract class to handle weights initialization and
814
    a simple interface for downloading and loading pretrained models.
815
    """
816

817
    is_parallelizable = False
818
    supports_gradient_checkpointing = True
819
    config_class = ChatGLMConfig
820
    base_model_prefix = "transformer"
821
    _no_split_modules = ["GLMBlock"]
822

823
    def _init_weights(self, module: nn.Module):
824
        """Initialize the weights."""
825
        return
826

827
    def get_masks(self, input_ids, past_key_values, padding_mask=None):
828
        batch_size, seq_length = input_ids.shape
829
        full_attention_mask = torch.ones(
830
            batch_size, seq_length, seq_length, device=input_ids.device
831
        )
832
        full_attention_mask.tril_()
833
        past_length = 0
834
        if past_key_values:
835
            past_length = past_key_values[0][0].shape[0]
836
        if past_length:
837
            full_attention_mask = torch.cat(
838
                (
839
                    torch.ones(
840
                        batch_size, seq_length, past_length, device=input_ids.device
841
                    ),
842
                    full_attention_mask,
843
                ),
844
                dim=-1,
845
            )
846
        if padding_mask is not None:
847
            full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
848
        if not past_length and padding_mask is not None:
849
            full_attention_mask -= padding_mask.unsqueeze(-1) - 1
850
        full_attention_mask = (full_attention_mask < 0.5).bool()
851
        full_attention_mask.unsqueeze_(1)
852
        return full_attention_mask
853

854
    def get_position_ids(self, input_ids, device):
855
        batch_size, seq_length = input_ids.shape
856
        position_ids = (
857
            torch.arange(seq_length, dtype=torch.long, device=device)
858
            .unsqueeze(0)
859
            .repeat(batch_size, 1)
860
        )
861
        return position_ids
862

863
    def _set_gradient_checkpointing(self, module, value=False):
864
        if isinstance(module, GLMTransformer):
865
            module.gradient_checkpointing = value
866

867

868
class Embedding(torch.nn.Module):
869
    """Language model embeddings."""
870

871
    def __init__(self, config: ChatGLMConfig, device=None):
872
        super(Embedding, self).__init__()
873

874
        self.hidden_size = config.hidden_size
875
        # Word embeddings (parallel).
876
        self.word_embeddings = nn.Embedding(
877
            config.padded_vocab_size,
878
            self.hidden_size,
879
            dtype=config.torch_dtype,
880
            device=device,
881
        )
882
        self.fp32_residual_connection = config.fp32_residual_connection
883

884
    def forward(self, input_ids):
885
        # Embeddings.
886
        words_embeddings = self.word_embeddings(input_ids)
887
        embeddings = words_embeddings
888
        # Data format change to avoid explicit tranposes : [b s h] --> [s b h].
889
        embeddings = embeddings.transpose(0, 1).contiguous()
890
        # If the input flag for fp32 residual connection is set, convert for float.
891
        if self.fp32_residual_connection:
892
            embeddings = embeddings.float()
893
        return embeddings
894

895

896
class ChatGLMModel(ChatGLMPreTrainedModel):
897
    def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):
898
        super().__init__(config)
899
        if empty_init:
900
            init_method = skip_init
901
        else:
902
            init_method = default_init
903
        init_kwargs = {}
904
        if device is not None:
905
            init_kwargs["device"] = device
906
        self.embedding = init_method(Embedding, config, **init_kwargs)
907
        self.num_layers = config.num_layers
908
        self.multi_query_group_num = config.multi_query_group_num
909
        self.kv_channels = config.kv_channels
910

911
        # Rotary positional embeddings
912
        self.seq_length = config.seq_length
913
        rotary_dim = (
914
            config.hidden_size // config.num_attention_heads
915
            if config.kv_channels is None
916
            else config.kv_channels
917
        )
918

919
        self.rotary_pos_emb = RotaryEmbedding(
920
            rotary_dim // 2,
921
            original_impl=config.original_rope,
922
            device=device,
923
            dtype=config.torch_dtype,
924
        )
925
        self.encoder = init_method(GLMTransformer, config, **init_kwargs)
926
        self.output_layer = init_method(
927
            nn.Linear,
928
            config.hidden_size,
929
            config.padded_vocab_size,
930
            bias=False,
931
            dtype=config.torch_dtype,
932
            **init_kwargs,
933
        )
934
        self.pre_seq_len = config.pre_seq_len
935
        self.prefix_projection = config.prefix_projection
936
        if self.pre_seq_len is not None:
937
            for param in self.parameters():
938
                param.requires_grad = False
939
            self.prefix_tokens = torch.arange(self.pre_seq_len).long()
940
            self.prefix_encoder = PrefixEncoder(config)
941
            self.dropout = torch.nn.Dropout(0.1)
942

943
    def get_input_embeddings(self):
944
        return self.embedding.word_embeddings
945

946
    def get_prompt(self, batch_size, device, dtype=torch.half):
947
        prefix_tokens = (
948
            self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)
949
        )
950
        past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)
951
        past_key_values = past_key_values.view(
952
            batch_size,
953
            self.pre_seq_len,
954
            self.num_layers * 2,
955
            self.multi_query_group_num,
956
            self.kv_channels,
957
        )
958
        # seq_len, b, nh, hidden_size
959
        past_key_values = self.dropout(past_key_values)
960
        past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)
961
        return past_key_values
962

963
    def forward(
964
        self,
965
        input_ids,
966
        position_ids: Optional[torch.Tensor] = None,
967
        attention_mask: Optional[torch.BoolTensor] = None,
968
        full_attention_mask: Optional[torch.BoolTensor] = None,
969
        past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
970
        inputs_embeds: Optional[torch.Tensor] = None,
971
        use_cache: Optional[bool] = None,
972
        output_hidden_states: Optional[bool] = None,
973
        return_dict: Optional[bool] = None,
974
    ):
975
        output_hidden_states = (
976
            output_hidden_states
977
            if output_hidden_states is not None
978
            else self.config.output_hidden_states
979
        )
980
        use_cache = use_cache if use_cache is not None else self.config.use_cache
981
        return_dict = (
982
            return_dict if return_dict is not None else self.config.use_return_dict
983
        )
984

985
        batch_size, seq_length = input_ids.shape
986

987
        if inputs_embeds is None:
988
            inputs_embeds = self.embedding(input_ids)
989

990
        if self.pre_seq_len is not None:
991
            if past_key_values is None:
992
                past_key_values = self.get_prompt(
993
                    batch_size=batch_size,
994
                    device=input_ids.device,
995
                    dtype=inputs_embeds.dtype,
996
                )
997
            if attention_mask is not None:
998
                attention_mask = torch.cat(
999
                    [
1000
                        attention_mask.new_ones((batch_size, self.pre_seq_len)),
1001
                        attention_mask,
1002
                    ],
1003
                    dim=-1,
1004
                )
1005

1006
        if full_attention_mask is None:
1007
            if (attention_mask is not None and not attention_mask.all()) or (
1008
                past_key_values and seq_length != 1
1009
            ):
1010
                full_attention_mask = self.get_masks(
1011
                    input_ids, past_key_values, padding_mask=attention_mask
1012
                )
1013

1014
        # Rotary positional embeddings
1015
        rotary_pos_emb = self.rotary_pos_emb(self.seq_length)
1016
        if position_ids is not None:
1017
            rotary_pos_emb = rotary_pos_emb[position_ids]
1018
        else:
1019
            rotary_pos_emb = rotary_pos_emb[None, :seq_length]
1020
        rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous()
1021

1022
        # Run encoder.
1023
        hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
1024
            inputs_embeds,
1025
            full_attention_mask,
1026
            rotary_pos_emb=rotary_pos_emb,
1027
            kv_caches=past_key_values,
1028
            use_cache=use_cache,
1029
            output_hidden_states=output_hidden_states,
1030
        )
1031

1032
        if not return_dict:
1033
            return tuple(
1034
                v
1035
                for v in [
1036
                    hidden_states,
1037
                    presents,
1038
                    all_hidden_states,
1039
                    all_self_attentions,
1040
                ]
1041
                if v is not None
1042
            )
1043

1044
        return BaseModelOutputWithPast(
1045
            last_hidden_state=hidden_states,
1046
            past_key_values=presents,
1047
            hidden_states=all_hidden_states,
1048
            attentions=all_self_attentions,
1049
        )
1050

1051
    def quantize(self, weight_bit_width: int):
1052
        from .quantization import quantize
1053

1054
        quantize(self.encoder, weight_bit_width)
1055
        return self
1056

1057

1058
class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
1059
    def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
1060
        super().__init__(config)
1061

1062
        self.max_sequence_length = config.max_length
1063
        self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
1064
        self.config = config
1065
        self.quantized = False
1066

1067
        if self.config.quantization_bit:
1068
            self.quantize(self.config.quantization_bit, empty_init=True)
1069

1070
    def _update_model_kwargs_for_generation(
1071
        self,
1072
        outputs: ModelOutput,
1073
        model_kwargs: Dict[str, Any],
1074
        is_encoder_decoder: bool = False,
1075
        standardize_cache_format: bool = False,
1076
    ) -> Dict[str, Any]:
1077
        # update past_key_values
1078
        model_kwargs["past_key_values"] = self._extract_past_from_model_output(
1079
            outputs, standardize_cache_format=standardize_cache_format
1080
        )
1081

1082
        # update attention mask
1083
        if "attention_mask" in model_kwargs:
1084
            attention_mask = model_kwargs["attention_mask"]
1085
            model_kwargs["attention_mask"] = torch.cat(
1086
                [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))],
1087
                dim=-1,
1088
            )
1089

1090
        # update position ids
1091
        if "position_ids" in model_kwargs:
1092
            position_ids = model_kwargs["position_ids"]
1093
            new_position_id = position_ids[..., -1:].clone()
1094
            new_position_id += 1
1095
            model_kwargs["position_ids"] = torch.cat(
1096
                [position_ids, new_position_id], dim=-1
1097
            )
1098

1099
        model_kwargs["is_first_forward"] = False
1100
        return model_kwargs
1101

1102
    def prepare_inputs_for_generation(
1103
        self,
1104
        input_ids: torch.LongTensor,
1105
        past_key_values: Optional[torch.Tensor] = None,
1106
        attention_mask: Optional[torch.Tensor] = None,
1107
        position_ids: Optional[torch.Tensor] = None,
1108
        use_cache: Optional[bool] = None,
1109
        is_first_forward: bool = True,
1110
        **kwargs,
1111
    ) -> dict:
1112
        # only last token for input_ids if past is not None
1113
        if position_ids is None:
1114
            position_ids = self.get_position_ids(input_ids, device=input_ids.device)
1115
        if not is_first_forward:
1116
            if past_key_values is not None:
1117
                position_ids = position_ids[..., -1:]
1118
                input_ids = input_ids[:, -1:]
1119
        return {
1120
            "input_ids": input_ids,
1121
            "past_key_values": past_key_values,
1122
            "position_ids": position_ids,
1123
            "attention_mask": attention_mask,
1124
            "return_last_logit": True,
1125
            "use_cache": use_cache,
1126
        }
1127

1128
    def forward(
1129
        self,
1130
        input_ids: Optional[torch.Tensor] = None,
1131
        position_ids: Optional[torch.Tensor] = None,
1132
        attention_mask: Optional[torch.Tensor] = None,
1133
        past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
1134
        inputs_embeds: Optional[torch.Tensor] = None,
1135
        labels: Optional[torch.Tensor] = None,
1136
        use_cache: Optional[bool] = None,
1137
        output_attentions: Optional[bool] = None,
1138
        output_hidden_states: Optional[bool] = None,
1139
        return_dict: Optional[bool] = None,
1140
        return_last_logit: Optional[bool] = False,
1141
    ):
1142
        use_cache = use_cache if use_cache is not None else self.config.use_cache
1143
        return_dict = (
1144
            return_dict if return_dict is not None else self.config.use_return_dict
1145
        )
1146

1147
        transformer_outputs = self.transformer(
1148
            input_ids=input_ids,
1149
            position_ids=position_ids,
1150
            attention_mask=attention_mask,
1151
            past_key_values=past_key_values,
1152
            inputs_embeds=inputs_embeds,
1153
            use_cache=use_cache,
1154
            output_hidden_states=output_hidden_states,
1155
            return_dict=return_dict,
1156
        )
1157

1158
        hidden_states = transformer_outputs[0]
1159
        if return_last_logit:
1160
            hidden_states = hidden_states[-1:]
1161
        lm_logits = self.transformer.output_layer(hidden_states)
1162
        lm_logits = lm_logits.transpose(0, 1).contiguous()
1163

1164
        loss = None
1165
        if labels is not None:
1166
            lm_logits = lm_logits.to(torch.float32)
1167

1168
            # Shift so that tokens < n predict n
1169
            shift_logits = lm_logits[..., :-1, :].contiguous()
1170
            shift_labels = labels[..., 1:].contiguous()
1171
            # Flatten the tokens
1172
            loss_fct = CrossEntropyLoss(ignore_index=-100)
1173
            loss = loss_fct(
1174
                shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
1175
            )
1176

1177
            lm_logits = lm_logits.to(hidden_states.dtype)
1178
            loss = loss.to(hidden_states.dtype)
1179

1180
        if not return_dict:
1181
            output = (lm_logits,) + transformer_outputs[1:]
1182
            return ((loss,) + output) if loss is not None else output
1183

1184
        return CausalLMOutputWithPast(
1185
            loss=loss,
1186
            logits=lm_logits,
1187
            past_key_values=transformer_outputs.past_key_values,
1188
            hidden_states=transformer_outputs.hidden_states,
1189
            attentions=transformer_outputs.attentions,
1190
        )
1191

1192
    @staticmethod
1193
    def _reorder_cache(
1194
        past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
1195
    ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
1196
        """
1197
        This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1198
        [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1199
        beam_idx at every generation step.
1200
        Output shares the same memory storage as `past`.
1201
        """
1202
        return tuple(
1203
            (
1204
                layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
1205
                layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
1206
            )
1207
            for layer_past in past
1208
        )
1209

1210
    def process_response(self, output, history):
1211
        content = ""
1212
        history = deepcopy(history)
1213
        for response in output.split("<|assistant|>"):
1214
            metadata, content = response.split("\n", maxsplit=1)
1215
            if not metadata.strip():
1216
                content = content.strip()
1217
                history.append(
1218
                    {"role": "assistant", "metadata": metadata, "content": content}
1219
                )
1220
                content = content.replace("[[训练时间]]", "2023年")
1221
            else:
1222
                history.append(
1223
                    {"role": "assistant", "metadata": metadata, "content": content}
1224
                )
1225
                if history[0]["role"] == "system" and "tools" in history[0]:
1226
                    content = "\n".join(content.split("\n")[1:-1])
1227

1228
                    def tool_call(**kwargs):
1229
                        return kwargs
1230

1231
                    parameters = eval(content)
1232
                    content = {"name": metadata.strip(), "parameters": parameters}
1233
                else:
1234
                    content = {"name": metadata.strip(), "content": content}
1235
        return content, history
1236

1237
    @torch.inference_mode()
1238
    def chat(
1239
        self,
1240
        tokenizer,
1241
        query: str,
1242
        history: List[Dict] = None,
1243
        role: str = "user",
1244
        max_length: int = 8192,
1245
        num_beams=1,
1246
        do_sample=True,
1247
        top_p=0.8,
1248
        temperature=0.8,
1249
        logits_processor=None,
1250
        **kwargs,
1251
    ):
1252
        if history is None:
1253
            history = []
1254
        if logits_processor is None:
1255
            logits_processor = LogitsProcessorList()
1256
        logits_processor.append(InvalidScoreLogitsProcessor())
1257
        gen_kwargs = {
1258
            "max_length": max_length,
1259
            "num_beams": num_beams,
1260
            "do_sample": do_sample,
1261
            "top_p": top_p,
1262
            "temperature": temperature,
1263
            "logits_processor": logits_processor,
1264
            **kwargs,
1265
        }
1266
        inputs = tokenizer.build_chat_input(query, history=history, role=role)
1267
        inputs = inputs.to(self.device)
1268
        eos_token_id = [
1269
            tokenizer.eos_token_id,
1270
            tokenizer.get_command("<|user|>"),
1271
            tokenizer.get_command("<|observation|>"),
1272
        ]
1273
        outputs = self.generate(**inputs, **gen_kwargs, eos_token_id=eos_token_id)
1274
        outputs = outputs.tolist()[0][len(inputs["input_ids"][0]) : -1]
1275
        response = tokenizer.decode(outputs)
1276
        history.append({"role": role, "content": query})
1277
        response, history = self.process_response(response, history)
1278
        return response, history
1279

1280
    @torch.inference_mode()
1281
    def stream_chat(
1282
        self,
1283
        tokenizer,
1284
        query: str,
1285
        history: List[Dict] = None,
1286
        role: str = "user",
1287
        past_key_values=None,
1288
        max_length: int = 8192,
1289
        do_sample=True,
1290
        top_p=0.8,
1291
        temperature=0.8,
1292
        logits_processor=None,
1293
        return_past_key_values=False,
1294
        **kwargs,
1295
    ):
1296
        if history is None:
1297
            history = []
1298
        if logits_processor is None:
1299
            logits_processor = LogitsProcessorList()
1300
        logits_processor.append(InvalidScoreLogitsProcessor())
1301
        eos_token_id = [
1302
            tokenizer.eos_token_id,
1303
            tokenizer.get_command("<|user|>"),
1304
            tokenizer.get_command("<|observation|>"),
1305
        ]
1306
        gen_kwargs = {
1307
            "max_length": max_length,
1308
            "do_sample": do_sample,
1309
            "top_p": top_p,
1310
            "temperature": temperature,
1311
            "logits_processor": logits_processor,
1312
            **kwargs,
1313
        }
1314
        if past_key_values is None:
1315
            inputs = tokenizer.build_chat_input(query, history=history, role=role)
1316
        else:
1317
            inputs = tokenizer.build_chat_input(query, role=role)
1318
        inputs = inputs.to(self.device)
1319
        if past_key_values is not None:
1320
            past_length = past_key_values[0][0].shape[0]
1321
            if self.transformer.pre_seq_len is not None:
1322
                past_length -= self.transformer.pre_seq_len
1323
            inputs.position_ids += past_length
1324
            attention_mask = inputs.attention_mask
1325
            attention_mask = torch.cat(
1326
                (attention_mask.new_ones(1, past_length), attention_mask), dim=1
1327
            )
1328
            inputs["attention_mask"] = attention_mask
1329
        history.append({"role": role, "content": query})
1330
        for outputs in self.stream_generate(
1331
            **inputs,
1332
            past_key_values=past_key_values,
1333
            eos_token_id=eos_token_id,
1334
            return_past_key_values=return_past_key_values,
1335
            **gen_kwargs,
1336
        ):
1337
            if return_past_key_values:
1338
                outputs, past_key_values = outputs
1339
            outputs = outputs.tolist()[0][len(inputs["input_ids"][0]) : -1]
1340
            response = tokenizer.decode(outputs)
1341
            if response and response[-1] != "�":
1342
                response, new_history = self.process_response(response, history)
1343
                if return_past_key_values:
1344
                    yield response, new_history, past_key_values
1345
                else:
1346
                    yield response, new_history
1347

1348
    @torch.inference_mode()
1349
    def stream_generate(
1350
        self,
1351
        input_ids,
1352
        generation_config: Optional[GenerationConfig] = None,
1353
        logits_processor: Optional[LogitsProcessorList] = None,
1354
        stopping_criteria: Optional[StoppingCriteriaList] = None,
1355
        prefix_allowed_tokens_fn: Optional[
1356
            Callable[[int, torch.Tensor], List[int]]
1357
        ] = None,
1358
        return_past_key_values=False,
1359
        **kwargs,
1360
    ):
1361
        batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
1362

1363
        if generation_config is None:
1364
            generation_config = self.generation_config
1365
        generation_config = copy.deepcopy(generation_config)
1366
        model_kwargs = generation_config.update(**kwargs)
1367
        model_kwargs["use_cache"] = generation_config.use_cache
1368
        bos_token_id, eos_token_id = (
1369
            generation_config.bos_token_id,
1370
            generation_config.eos_token_id,
1371
        )
1372

1373
        if isinstance(eos_token_id, int):
1374
            eos_token_id = [eos_token_id]
1375
        assert eos_token_id is not None
1376
        eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device)
1377

1378
        has_default_max_length = (
1379
            kwargs.get("max_length") is None
1380
            and generation_config.max_length is not None
1381
        )
1382
        if has_default_max_length and generation_config.max_new_tokens is None:
1383
            warnings.warn(
1384
                f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
1385
                "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
1386
                " recommend using `max_new_tokens` to control the maximum length of the generation.",
1387
                UserWarning,
1388
            )
1389
        elif generation_config.max_new_tokens is not None:
1390
            generation_config.max_length = (
1391
                generation_config.max_new_tokens + input_ids_seq_length
1392
            )
1393
            if not has_default_max_length:
1394
                logger.warn(
1395
                    f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
1396
                    f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
1397
                    "Please refer to the documentation for more information. "
1398
                    "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
1399
                    UserWarning,
1400
                )
1401

1402
        if input_ids_seq_length >= generation_config.max_length:
1403
            input_ids_string = (
1404
                "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1405
            )
1406
            logger.warning(
1407
                f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
1408
                f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
1409
                " increasing `max_new_tokens`."
1410
            )
1411

1412
        # 2. Set generation parameters if not already defined
1413
        logits_processor = (
1414
            logits_processor if logits_processor is not None else LogitsProcessorList()
1415
        )
1416
        stopping_criteria = (
1417
            stopping_criteria
1418
            if stopping_criteria is not None
1419
            else StoppingCriteriaList()
1420
        )
1421

1422
        logits_processor = self._get_logits_processor(
1423
            generation_config=generation_config,
1424
            input_ids_seq_length=input_ids_seq_length,
1425
            encoder_input_ids=input_ids,
1426
            prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1427
            logits_processor=logits_processor,
1428
        )
1429

1430
        stopping_criteria = self._get_stopping_criteria(
1431
            generation_config=generation_config, stopping_criteria=stopping_criteria
1432
        )
1433
        logits_warper = self._get_logits_warper(generation_config)
1434

1435
        unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
1436
        scores = None
1437
        while True:
1438
            model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1439
            # forward pass to get next token
1440
            outputs = self(
1441
                **model_inputs,
1442
                return_dict=True,
1443
                output_attentions=False,
1444
                output_hidden_states=False,
1445
            )
1446

1447
            next_token_logits = outputs.logits[:, -1, :]
1448

1449
            # pre-process distribution
1450
            next_token_scores = logits_processor(input_ids, next_token_logits)
1451
            next_token_scores = logits_warper(input_ids, next_token_scores)
1452

1453
            # sample
1454
            probs = nn.functional.softmax(next_token_scores, dim=-1)
1455
            if generation_config.do_sample:
1456
                next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1457
            else:
1458
                next_tokens = torch.argmax(probs, dim=-1)
1459
            # update generated ids, model inputs, and length for next step
1460
            input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1461
            model_kwargs = self._update_model_kwargs_for_generation(
1462
                outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
1463
            )
1464
            unfinished_sequences = unfinished_sequences.mul(
1465
                next_tokens.tile(eos_token_id_tensor.shape[0], 1)
1466
                .ne(eos_token_id_tensor.unsqueeze(1))
1467
                .prod(dim=0)
1468
            )
1469
            if return_past_key_values:
1470
                yield input_ids, outputs.past_key_values
1471
            else:
1472
                yield input_ids
1473
            # stop when each sentence is finished, or if we exceed the maximum length
1474
            if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
1475
                break
1476

1477
    def quantize(self, bits: int, empty_init=False, device=None, **kwargs):
1478
        if bits == 0:
1479
            return
1480

1481
        from .quantization import quantize
1482

1483
        if self.quantized:
1484
            logger.info("Already quantized.")
1485
            return self
1486

1487
        self.quantized = True
1488

1489
        self.config.quantization_bit = bits
1490

1491
        self.transformer.encoder = quantize(
1492
            self.transformer.encoder,
1493
            bits,
1494
            empty_init=empty_init,
1495
            device=device,
1496
            **kwargs,
1497
        )
1498
        return self
1499

1500

1501
class ChatGLMForSequenceClassification(ChatGLMPreTrainedModel):
1502
    def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
1503
        super().__init__(config)
1504

1505
        self.num_labels = config.num_labels
1506
        self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
1507

1508
        self.classifier_head = nn.Linear(
1509
            config.hidden_size, config.num_labels, bias=True, dtype=torch.half
1510
        )
1511
        if config.classifier_dropout is not None:
1512
            self.dropout = nn.Dropout(config.classifier_dropout)
1513
        else:
1514
            self.dropout = None
1515
        self.config = config
1516

1517
        if self.config.quantization_bit:
1518
            self.quantize(self.config.quantization_bit, empty_init=True)
1519

1520
    def forward(
1521
        self,
1522
        input_ids: Optional[torch.LongTensor] = None,
1523
        position_ids: Optional[torch.LongTensor] = None,
1524
        attention_mask: Optional[torch.Tensor] = None,
1525
        full_attention_mask: Optional[torch.Tensor] = None,
1526
        past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1527
        inputs_embeds: Optional[torch.LongTensor] = None,
1528
        labels: Optional[torch.LongTensor] = None,
1529
        use_cache: Optional[bool] = None,
1530
        output_hidden_states: Optional[bool] = None,
1531
        return_dict: Optional[bool] = None,
1532
    ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutputWithPast]:
1533
        return_dict = (
1534
            return_dict if return_dict is not None else self.config.use_return_dict
1535
        )
1536

1537
        transformer_outputs = self.transformer(
1538
            input_ids=input_ids,
1539
            position_ids=position_ids,
1540
            attention_mask=attention_mask,
1541
            full_attention_mask=full_attention_mask,
1542
            past_key_values=past_key_values,
1543
            inputs_embeds=inputs_embeds,
1544
            use_cache=use_cache,
1545
            output_hidden_states=output_hidden_states,
1546
            return_dict=return_dict,
1547
        )
1548

1549
        hidden_states = transformer_outputs[0]
1550
        pooled_hidden_states = hidden_states[-1]
1551
        if self.dropout is not None:
1552
            pooled_hidden_states = self.dropout(pooled_hidden_states)
1553
        logits = self.classifier_head(pooled_hidden_states)
1554

1555
        loss = None
1556
        if labels is not None:
1557
            if self.config.problem_type is None:
1558
                if self.num_labels == 1:
1559
                    self.config.problem_type = "regression"
1560
                elif self.num_labels > 1 and (
1561
                    labels.dtype == torch.long or labels.dtype == torch.int
1562
                ):
1563
                    self.config.problem_type = "single_label_classification"
1564
                else:
1565
                    self.config.problem_type = "multi_label_classification"
1566

1567
            if self.config.problem_type == "regression":
1568
                loss_fct = MSELoss()
1569
                if self.num_labels == 1:
1570
                    loss = loss_fct(logits.squeeze().float(), labels.squeeze())
1571
                else:
1572
                    loss = loss_fct(logits.float(), labels)
1573
            elif self.config.problem_type == "single_label_classification":
1574
                loss_fct = CrossEntropyLoss()
1575
                loss = loss_fct(
1576
                    logits.view(-1, self.num_labels).float(), labels.view(-1)
1577
                )
1578
            elif self.config.problem_type == "multi_label_classification":
1579
                loss_fct = BCEWithLogitsLoss()
1580
                loss = loss_fct(logits.float(), labels.view(-1, self.num_labels))
1581

1582
        if not return_dict:
1583
            output = (logits,) + transformer_outputs[1:]
1584
            return ((loss,) + output) if loss is not None else output
1585

1586
        return SequenceClassifierOutputWithPast(
1587
            loss=loss,
1588
            logits=logits,
1589
            past_key_values=transformer_outputs.past_key_values,
1590
            hidden_states=transformer_outputs.hidden_states,
1591
            attentions=transformer_outputs.attentions,
1592
        )
1593

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.