paddlenlp

Форк
0
745 строк · 28.9 Кб
1
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14

15
import copy
16
import logging
17
import math
18
import os
19

20
import numpy as np
21
import paddle
22
import ppfleetx.models.language_model.gpt as gpt
23
from paddle.static import InputSpec
24
from ppfleetx.core.module.basic_module import BasicModule
25
from ppfleetx.data.tokenizers import GPTTokenizer
26
from ppfleetx.distributed.apis import env
27
from ppfleetx.models.language_model.gpt.dygraph.sequence_parallel_utils import (
28
    register_sequence_parallel_allreduce_hooks,
29
)
30
from ppfleetx.utils.log import logger
31

32
# TODO(haohongxiang): to solve the problem of cross-reference
33
import paddlenlp  # noqa: F401
34
from paddlenlp.transformers.gpt.tokenizer import GPTChineseTokenizer
35
from paddlenlp.transformers.segment_parallel_utils  import split_inputs_sequence_dim
36

37
from .metrics import Accuracy, AccuracyAndF1, Mcc, PearsonAndSpearman
38
from .utils import process_configs
39

40
MODEL_CLASSES = {
41
    "GPT": (GPTTokenizer, "gpt2"),
42
    "GPT-cn": (GPTChineseTokenizer, "gpt-cpm-large-cn"),
43
}
44

45

46
def get_model_size(l, h, v, s):
47
    P = 0
48
    # embedding
49
    P += (v + s) * h
50
    # attention
51
    P += (4 * h * h + 4 * h) * l
52
    # layer_norm of decoder
53
    P += (2 * (2 * h)) * l
54
    # FFN Layer
55
    P += (8 * h * h + 5 * h) * l
56
    # layer_norm of transformer
57
    P += 2 * h
58
    logger.info("Model Size: {:.2f} B".format(P / 1000.0 / 1000.0 / 1000.0))
59

60

61
def vocab_size_with_padding(vocab_size, div_unit, mp_degree):
62
    padded_size = vocab_size
63
    multiple = div_unit * mp_degree
64
    while (padded_size % multiple) != 0:
65
        padded_size += 1
66
    logging.warning(
67
        " > padded vocab (size: {}) with {} dummy tokens "
68
        "(new size: {})".format(vocab_size, padded_size - vocab_size, padded_size)
69
    )
70
    return padded_size
71

72

73
class LanguageModule(BasicModule):
74
    def __init__(self, configs):
75
        self.nranks = paddle.distributed.get_world_size()
76
        self.data_world_size = env.get_data_world_size()
77
        super(LanguageModule, self).__init__(configs)
78

79
        self.loss_fn = self.get_loss_fn()
80

81
    def process_configs(self, configs):
82
        configs = process_configs(configs)
83
        return configs
84

85
    def forward(self, tokens, ids):
86
        return self.model(tokens, ids)
87

88
    def training_step(self, batch):
89
        tokens, position_ids, labels, loss_mask = batch
90
        if self.nranks > 1 and paddle.distributed.fleet.get_hybrid_communicate_group().get_sep_parallel_world_size() > 1:
91
            tokens = split_inputs_sequence_dim(tokens)
92
            position_ids = split_inputs_sequence_dim(position_ids)
93
            labels = split_inputs_sequence_dim(labels)
94

95
        loss_mask.stop_gradient = True
96
        labels.stop_gradient = True
97
        position_ids.stop_gradient = True
98

99
        preds = self(tokens, position_ids)
100
        loss = self.loss_fn(preds, labels, loss_mask)
101

102
        return loss
103

104
    def training_step_end(self, log_dict):
105
        speed = 1.0 / log_dict["train_cost"]
106
        default_global_tokens_num = self.configs.Global.global_batch_size * self.configs.Data.Train.dataset.max_seq_len
107

108
        loss_scale_str = (
109
            "loss_scale: %.9f," % (log_dict["loss_scale"]) if log_dict.get("loss_scale", None) is not None else ""
110
        )
111
        memort_str=(
112
            ", max_memory_allocated: %.1f MB, max_memory_reserved: %.1f MB, " \
113
            "memory_allocated: %.1f MB, memory_reserved: %.1f MB" \
114
            % (log_dict["max_memory_allocated"], log_dict["max_memory_reserved"],log_dict["memory_allocated"], log_dict["memory_reserved"]) if "max_memory_allocated" in log_dict else ""
115
        )
116
        logger.info(
117
            "[train] epoch: [%d/%d], batch: [%d/%d], loss: %.9f, avg_batch_cost: %.5f sec, speed: %.2f step/s, "
118
            "ips_total: %.0f tokens/s, ips: %.0f tokens/s, ips_per_device:%.0f tokens/s/device, %s learning rate: %.5e, found_inf: %.0f %s"
119
            % (
120
                log_dict["epoch"],
121
                log_dict["total_epoch"],
122
                log_dict["batch"],
123
                log_dict["total_step"],
124
                log_dict["loss"],
125
                log_dict["train_cost"],
126
                speed,
127
                speed * default_global_tokens_num,
128
                speed * default_global_tokens_num / self.data_world_size,
129
                speed * default_global_tokens_num / paddle.distributed.get_world_size(),
130
                loss_scale_str,
131
                log_dict["lr"],
132
                log_dict["found_inf"],
133
                memort_str,
134
            )
135
        )
136

137
    def validation_step(self, batch):
138
        tokens, position_ids, labels, loss_mask = batch
139
        preds = self(tokens, position_ids)
140
        preds = paddle.cast(preds, dtype="float32")
141
        loss = self.loss_fn(preds, labels, loss_mask)
142
        return loss
143

144
    def validation_step_end(self, log_dict):
145
        speed = 1.0 / log_dict["eval_cost"]
146
        logger.info(
147
            "[eval] epoch: %d, batch: %d/%d, loss: %.9f, avg_eval_cost: %.5f sec, speed: %.2f step/s"
148
            % (
149
                log_dict["epoch"],
150
                log_dict["batch"],
151
                log_dict["total_batch"],
152
                log_dict["loss"],
153
                log_dict["eval_cost"],
154
                speed,
155
            )
156
        )
157

158
    def test_step(self, batch):
159
        tokens, position_ids, labels, loss_mask = batch
160
        preds = self(tokens, position_ids)
161
        preds = paddle.cast(preds, dtype="float32")
162
        loss = self.loss_fn(preds, labels, loss_mask)
163
        return loss
164

165
    def test_step_end(self, log_dict):
166
        speed = 1.0 / log_dict["test_cost"]
167
        logger.info(
168
            "[test] epoch: %d, batch: %d, loss: %.9f, avg_test_cost: %.5f sec, speed: %.2f step/s"
169
            % (log_dict["epoch"], log_dict["batch"], log_dict["loss"], log_dict["test_cost"], speed)
170
        )
171

172
    def training_epoch_end(self, log_dict):
173
        logger.info("[Training] epoch: %d, total time: %.5f sec" % (log_dict["epoch"], log_dict["train_cost"]))
174

175

176
class GPTModule(LanguageModule):
177
    def __init__(self, configs):
178
        super(GPTModule, self).__init__(configs)
179
        if configs.Model.sequence_parallel:
180
            register_sequence_parallel_allreduce_hooks(
181
                self, configs.Engine.accumulate_steps, configs.Distributed.fuse_sequence_parallel_allreduce
182
            )
183

184
    def get_model(self):
185
        model_setting = copy.deepcopy(self.configs.Model)
186
        if "Compress" in self.configs and "Quantization" in self.configs.Compress:
187
            quant_setting = copy.deepcopy(self.configs.Compress.Quantization)
188
            skip_tensor_map = quant_setting.get("skip_tensor_map", {})
189
            freeze_embedding = quant_setting.get("freeze_embedding", False)
190
            model_setting["skip_tensor_map"] = skip_tensor_map
191
            model_setting["freeze_embedding"] = freeze_embedding
192
        model_setting.pop("module")
193

194
        model_name = model_setting.pop("name")
195
        tokenizer_class, pretrained_name = MODEL_CLASSES[model_name]
196
        self.tokenizer = tokenizer_class.from_pretrained(pretrained_name)
197

198
        model_setting["vocab_size"] = vocab_size_with_padding(
199
            model_setting.get("vocab_size", self.tokenizer.vocab_size),
200
            model_setting.pop("vocab_size_divisible_unit", 128),
201
            self.configs.Distributed.get("mp_degree", 1),
202
        )
203

204
        l = model_setting["num_layers"]
205
        h = model_setting["hidden_size"]
206
        v = model_setting["vocab_size"]
207
        s = self.configs.Data.Train.dataset.max_seq_len
208
        get_model_size(l, h, v, s)
209

210
        if self.nranks == 1:
211
            model_setting.pop("sequence_parallel")
212
            model = gpt.GPTForPretraining(gpt.GPTModel(**model_setting))
213
        else:
214
            model_setting["num_partitions"] = self.configs.Distributed.mp_degree
215
            if self.configs.Distributed.pp_degree == 1:
216
                model_setting.pop("virtual_pp_degree", None)
217
                model = gpt.GPTForPretrainingHybrid(gpt.GPTModelHybrid(**model_setting))
218
            else:
219
                model = gpt.GPTForPretrainingPipe(**model_setting)
220

221
        return model
222

223
    def get_loss_fn(self):
224
        if self.nranks == 1:
225
            loss_fn = gpt.GPTPretrainingCriterion()
226
        else:
227
            loss_fn = gpt.GPTPretrainingCriterionHybird(sequence_parallel=self.configs.Model.sequence_parallel)
228
        return loss_fn
229

230
    def pretreating_batch(self, batch):
231
        if self.configs.Distributed.pp_degree > 1:
232
            tokens, position_ids, labels, loss_mask = batch
233
            data = [(tokens, position_ids), (labels, loss_mask)]
234
            return data
235
        else:
236
            return batch
237

238
    def input_spec(self):
239
        return [
240
            InputSpec(shape=[None, None], name="tokens", dtype="int64"),
241
            InputSpec(shape=[None, None], name="ids", dtype="int64"),
242
        ]
243

244
    def inference_end(self, outputs):
245
        for k, v in outputs.items():
246
            for i in range(v.shape[0]):
247
                out_ids = [int(x) for x in v[i]]
248
                ret_str = self.tokenizer.decode(out_ids)
249
                # ret_str = text[i] + ret_str
250
                print(ret_str)
251

252

253
class GPTFinetuneModule(BasicModule):
254
    def __init__(self, configs):
255
        self.nranks = paddle.distributed.get_world_size()
256
        self.data_world_size = env.get_data_world_size()
257
        super(GPTFinetuneModule, self).__init__(configs)
258

259
        # self.loss_config will be init in super class by get_model()
260
        assert self.loss_config is not None
261
        assert "train" in self.loss_config
262
        assert "eval" in self.loss_config
263

264
        train_loss = copy.deepcopy(self.loss_config.train)
265
        train_loss_cls = train_loss.pop("name")
266
        self.loss_fn = eval(f"paddle.nn.loss.{train_loss_cls}")(**train_loss)
267

268
        eval_loss = copy.deepcopy(self.loss_config.eval)
269
        eval_loss_cls = eval_loss.pop("name")
270
        self.eval_loss_fn = eval(f"paddle.nn.loss.{eval_loss_cls}")(**eval_loss)
271

272
        # self.metric_config will be init in super class by get_model()
273
        assert self.metric_config is not None
274
        assert "eval" in self.metric_config
275

276
        if "train" in self.metric_config:
277
            train_metric = copy.deepcopy(self.metric_config.train)
278
            train_metric_cls = train_metric.pop("name")
279
            self.train_metric = eval(f"{train_metric_cls}")(**train_metric)
280

281
        eval_metric = copy.deepcopy(self.metric_config.eval)
282
        eval_metric_cls = eval_metric.pop("name")
283
        self.eval_metric = eval(f"{eval_metric_cls}")(**eval_metric)
284

285
        self.best_metric = 0.0
286

287
    def process_configs(self, configs):
288
        return configs
289

290
    def get_model(self):
291
        model_setting = copy.deepcopy(self.configs.Model)
292
        model_setting.pop("module")
293

294
        self.metric_config = model_setting.pop("metric", None)
295
        self.loss_config = model_setting.pop("loss", None)
296

297
        pretrained = model_setting.pop("pretrained")
298
        num_classes = model_setting.pop("num_classes", 2)
299
        assert pretrained is not None
300

301
        model_name = model_setting.pop("name")
302
        tokenizer_class, pretrained_name = MODEL_CLASSES[model_name]
303
        self.tokenizer = tokenizer_class.from_pretrained(pretrained_name)
304

305
        model_setting["vocab_size"] = vocab_size_with_padding(
306
            model_setting.get("vocab_size", self.tokenizer.vocab_size),
307
            model_setting.pop("vocab_size_divisible_unit", 128),
308
            self.configs.Distributed.get("mp_degree", 1),
309
        )
310

311
        l = model_setting["num_layers"]
312
        h = model_setting["hidden_size"]
313
        v = model_setting["vocab_size"]
314
        num_heads = model_setting["num_attention_heads"]
315
        s = self.configs.Data.Train.dataset.max_length
316
        get_model_size(l, h, v, s)
317

318
        if self.nranks == 1:
319
            model = gpt.GPTForSequenceClassification(gpt.GPTModel(**model_setting), num_classes)
320
        else:
321
            raise NotImplementedError
322

323
        pretrained_path = pretrained + ".pdparams"
324
        assert os.path.exists(pretrained_path), f"{pretrained_path} is not exists!"
325
        model_dict = paddle.load(pretrained_path)
326

327
        # Note(GuoxiaWang): Guess whether to convert fused vs non-fused parameters.
328
        # 'q_proj' vs 'qkv_proj'
329
        def is_fused(model_state):
330
            for key in model_state:
331
                if "qkv_proj" in key:
332
                    return True
333
            return False
334

335
        def split_params(model_state, num_layers):
336
            for idx in range(num_layers):
337
                qkv_b = model_state.pop(f"gpt.decoder.layers.{idx}.self_attn.qkv_proj.bias")
338
                qkv_w = model_state.pop(f"gpt.decoder.layers.{idx}.self_attn.qkv_proj.weight")
339

340
                qkv_b = qkv_b.reshape((num_heads, 3, -1))
341
                qkv_w = qkv_w.reshape((h, num_heads, 3, -1))
342

343
                q_w, k_w, v_w = np.split(qkv_w, 3, axis=2)
344
                q_w = q_w.reshape((h, -1))
345
                k_w = k_w.reshape((h, -1))
346
                v_w = v_w.reshape((h, -1))
347

348
                q_b, k_b, v_b = np.split(qkv_b, 3, axis=1)
349
                q_b = q_b.reshape((-1))
350
                k_b = k_b.reshape((-1))
351
                v_b = v_b.reshape((-1))
352

353
                model_state[f"gpt.decoder.layers.{idx}.self_attn.q_proj.bias"] = q_b
354
                model_state[f"gpt.decoder.layers.{idx}.self_attn.q_proj.weight"] = q_w
355

356
                model_state[f"gpt.decoder.layers.{idx}.self_attn.k_proj.bias"] = k_b
357
                model_state[f"gpt.decoder.layers.{idx}.self_attn.k_proj.weight"] = k_w
358

359
                model_state[f"gpt.decoder.layers.{idx}.self_attn.v_proj.bias"] = v_b
360
                model_state[f"gpt.decoder.layers.{idx}.self_attn.v_proj.weight"] = v_w
361

362
            return model_state
363

364
        def fuse_params(model_state, num_layers):
365
            for idx in range(num_layers):
366
                q_b = model_state.pop(f"gpt.decoder.layers.{idx}.self_attn.q_proj.bias")
367
                q_w = model_state.pop(f"gpt.decoder.layers.{idx}.self_attn.q_proj.weight")
368

369
                k_b = model_state.pop(f"gpt.decoder.layers.{idx}.self_attn.k_proj.bias")
370
                k_w = model_state.pop(f"gpt.decoder.layers.{idx}.self_attn.k_proj.weight")
371

372
                v_b = model_state.pop(f"gpt.decoder.layers.{idx}.self_attn.v_proj.bias")
373
                v_w = model_state.pop(f"gpt.decoder.layers.{idx}.self_attn.v_proj.weight")
374

375
                q_w = q_w.reshape((h, num_heads, -1))
376
                k_w = k_w.reshape((h, num_heads, -1))
377
                v_w = v_w.reshape((h, num_heads, -1))
378

379
                qkv_w = np.stack([q_w, k_w, v_w], axis=2)
380
                qkv_w = qkv_w.reshape((h, -1))
381

382
                q_b = q_b.reshape((num_heads, -1))
383
                k_b = k_b.reshape((num_heads, -1))
384
                v_b = v_b.reshape((num_heads, -1))
385
                qkv_b = np.stack([q_b, k_b, v_b], axis=1)
386
                qkv_b = qkv_b.reshape((-1))
387

388
                model_state[f"gpt.decoder.layers.{idx}.self_attn.qkv_proj.weight"] = qkv_w
389
                model_state[f"gpt.decoder.layers.{idx}.self_attn.qkv_proj.bias"] = qkv_b
390
            return model_state
391

392
        fused = is_fused(model.state_dict())
393
        load_fused = is_fused(model_dict)
394

395
        if fused is True and load_fused is False:
396
            model_dict = fuse_params(model_dict, l)
397
        elif fused is False and load_fused is True:
398
            model_dict = split_params(model_dict, l)
399

400
        for name, param in model.state_dict().items():
401
            if name in model_dict and param.dtype != model_dict[name].dtype:
402
                model_dict[name] = model_dict[name].cast(param.dtype)
403

404
        model.set_state_dict(model_dict)
405
        logger.info(f"Load pretrained weight from {pretrained_path}")
406

407
        return model
408

409
    def forward(self, tokens):
410
        return self.model(tokens)
411

412
    def training_step(self, batch):
413
        input_ids, labels = batch
414

415
        input_ids.stop_gradient = True
416
        labels.stop_gradient = True
417

418
        logits = self(input_ids)
419
        loss = self.loss_fn(logits, labels)
420

421
        return loss
422

423
    def training_step_end(self, log_dict):
424
        speed = 1.0 / log_dict["train_cost"]
425
        default_global_tokens_num = self.configs.Global.global_batch_size * self.configs.Data.Train.dataset.max_length
426

427
        logger.info(
428
            "[train] epoch: [%d/%d], step: [%d/%d], learning rate: %.7f, loss: %.9f, avg_batch_cost: %.5f sec, speed: %.2f step/s, "
429
            "ips_total: %.0f tokens/s, ips: %.0f tokens/s"
430
            % (
431
                log_dict["epoch"],
432
                log_dict["total_epoch"],
433
                log_dict["batch"],
434
                log_dict["total_batch"],
435
                log_dict["lr"],
436
                log_dict["loss"],
437
                log_dict["train_cost"],
438
                speed,
439
                speed * default_global_tokens_num,
440
                speed * default_global_tokens_num / self.data_world_size,
441
            )
442
        )
443

444
    def validation_step(self, batch):
445
        input_ids, labels = batch
446

447
        input_ids.stop_gradient = True
448
        labels.stop_gradient = True
449

450
        logits = self(input_ids)
451
        loss = self.eval_loss_fn(logits, labels)
452
        correct = self.eval_metric.compute(logits, labels)
453
        self.eval_metric.update(correct)
454
        return loss
455

456
    def validation_step_end(self, log_dict):
457
        speed = 1.0 / log_dict["eval_cost"]
458
        logger.info(
459
            "[eval] epoch: %d, batch: %d, loss: %.9f, avg_eval_cost: %.5f sec, speed: %.2f step/s"
460
            % (log_dict["epoch"], log_dict["batch"], log_dict["loss"], log_dict["eval_cost"], speed)
461
        )
462

463
    def test_step(self, batch):
464
        tokens, position_ids, labels, loss_mask = batch
465
        preds = self(tokens, position_ids)
466
        preds = paddle.cast(preds, dtype="float32")
467
        loss = self.eval_loss_fn(preds, labels, loss_mask)
468
        return loss
469

470
    def test_step_end(self, log_dict):
471
        speed = 1.0 / log_dict["test_cost"]
472
        logger.info(
473
            "[test] epoch: %d, batch: %d, loss: %.9f, avg_test_cost: %.5f sec, speed: %.2f step/s"
474
            % (log_dict["epoch"], log_dict["batch"], log_dict["loss"], log_dict["test_cost"], speed)
475
        )
476

477
    def training_epoch_end(self, log_dict):
478
        logger.info("[Training] epoch: %d, total time: %.5f sec" % (log_dict["epoch"], log_dict["train_cost"]))
479

480
    def validation_epoch_end(self, log_dict):
481
        res = self.eval_metric.accumulate()
482
        self.eval_metric.reset()
483
        if isinstance(self.eval_metric, AccuracyAndF1):
484
            msg = "acc: %.5f, precision: %.5f, recall: %.5f, f1: %.5f, acc and f1: %.5f" % (
485
                res[0],
486
                res[1],
487
                res[2],
488
                res[3],
489
                res[4],
490
            )
491
            metric = res[4]
492
        elif isinstance(self.eval_metric, Mcc):
493
            msg = "mcc: %.5f" % (res[0])
494
            metric = res[0]
495
        elif isinstance(self.eval_metric, PearsonAndSpearman):
496
            msg = "pearson: %.5f, spearman: %.5f, pearson and spearman: %.5f" % (res[0], res[1], res[2])
497
            metric = res[2]
498
        else:
499
            msg = "acc: %.5f" % (res)
500
            metric = res
501

502
        if metric > self.best_metric:
503
            self.best_metric = metric
504

505
        logger.info(
506
            "[Eval] epoch: %d, total time: %.5f sec, %s, best_metric: %.5f"
507
            % (log_dict["epoch"], log_dict["eval_cost"], msg, self.best_metric)
508
        )
509

510

511
class GPTGenerationModule(BasicModule):
512
    def __init__(self, configs):
513
        self.configs = configs
514
        self.generation_cfgs = configs.Generation
515
        self.nranks = paddle.distributed.get_world_size()
516

517
        super().__init__(configs)
518

519
    def process_configs(self, configs):
520
        configs = process_configs(configs)
521
        return configs
522

523
    def get_model(self):
524
        model_setting = copy.deepcopy(self.configs.Model)
525
        if "Compress" in self.configs and "Quantization" in self.configs.Compress:
526
            quant_setting = copy.deepcopy(self.configs.Compress.Quantization)
527
            skip_tensor_map = quant_setting.get("skip_tensor_map", {})
528
            freeze_embedding = quant_setting.get("freeze_embedding", False)
529
            model_setting["skip_tensor_map"] = skip_tensor_map
530
            model_setting["freeze_embedding"] = freeze_embedding
531
        model_setting.pop("module")
532

533
        model_name = model_setting.pop("name")
534
        tokenizer_class, pretrained_name = MODEL_CLASSES[model_name]
535
        self.tokenizer = tokenizer_class.from_pretrained(pretrained_name)
536

537
        model_setting["vocab_size"] = vocab_size_with_padding(
538
            model_setting.get("vocab_size", self.tokenizer.vocab_size),
539
            model_setting.pop("vocab_size_divisible_unit", 128),
540
            self.configs.Distributed.get("mp_degree", 1),
541
        )
542

543
        if self.nranks == 1:
544
            model = gpt.GPTForGeneration(gpt.GPTModel(**model_setting), self.generation_cfgs)
545
        else:
546
            assert (
547
                self.nranks == self.configs.Distributed.dp_degree
548
            ), "only support single card and data parallel in generation task."
549
            model = gpt.GPTForGenerationHybrid(gpt.GPTModelHybrid(**model_setting), self.generation_cfgs)
550

551
        self.generation_cfgs["max_dec_len"] = self.adjust_length_to_model(self.generation_cfgs["max_dec_len"], 512)
552

553
        self.generation_cfgs["bos_token_id"] = self.tokenizer.eos_token_id
554
        self.generation_cfgs["eos_token_id"] = self.tokenizer.eos_token_id
555
        self.generation_cfgs["pad_token_id"] = self.tokenizer.eos_token_id
556

557
        return model
558

559
    def adjust_length_to_model(self, length, max_sequence_length):
560
        if length < 0 or length > max_sequence_length:
561
            length = max_sequence_length
562
        return length
563

564
    def left_padding(self, inputs, pad_id, padding="longest"):
565
        assert "input_ids" in inputs, "input_ids should be in inputs!"
566
        max_length = 0
567
        for ids in inputs["input_ids"]:
568
            max_length = max(max_length, len(ids))
569

570
        def extend_max_lenth(value, max_length, to_pad_id):
571
            return [to_pad_id] * (max_length - len(value)) + value
572

573
        def extend_filed(name, max_length, to_pad_id):
574
            values = inputs[name]
575
            res = []
576
            for index, value in enumerate(values):
577
                res.append(extend_max_lenth(value, max_length, to_pad_id))
578
            inputs[name] = res
579

580
        extend_filed("input_ids", max_length, pad_id)
581
        if "attention_mask" in inputs:
582
            extend_filed("attention_mask", max_length, 0)
583
        if "position_ids" in inputs:
584
            extend_filed("position_ids", max_length, 0)
585

586
        return inputs
587

588
    def generate(self, input_text):
589
        return self(input_text)
590

591
    def forward(self, input_text):
592
        input_ids = self.tokenizer.encode(input_text)
593
        inputs = {"input_ids": [input_ids]}
594

595
        inputs = self.left_padding(inputs, self.tokenizer.eos_token_id)
596
        input_ids = inputs["input_ids"]
597

598
        if len(input_ids) == 0:
599
            input_ids = None
600
        else:
601
            # [1, seq_len]
602
            input_ids = paddle.to_tensor(input_ids, dtype="int64")
603

604
        ids, scores = self.model(input_ids=input_ids)
605

606
        generated_sequences = []
607
        for i, generated_ids in enumerate(ids):
608
            generated_ids = generated_ids.numpy().tolist()
609
            # Decode text
610
            text = self.tokenizer.convert_ids_to_string(generated_ids)
611
            sequence = input_text + text
612
            generated_sequences.append(sequence)
613

614
        return generated_sequences
615

616
    def input_spec(self):
617
        return [InputSpec(shape=[None, None], name="input_ids", dtype="int64")]
618

619

620
class GPTEvalModule(LanguageModule):
621
    def __init__(self, configs):
622
        self.eval_cfgs = configs.Offline_Eval
623

624
        super().__init__(configs)
625

626
        self.post_process_configs()
627

628
        self.first_step = True
629
        self.total_score = 0
630
        self.score_name = "loss" if not self.eval_cfgs.cloze_eval else "number correct"
631

632
    def post_process_configs(self):
633
        self.configs.pop("Optimizer", None)
634
        self.configs.pop("Inference", None)
635

636
        self.configs.Data.pop("Train", None)
637
        self.configs.Data.pop("Test", None)
638
        self.configs.Data.Eval.pop("sampler", None)
639
        self.configs.Data.Eval.loader.collate_fn = "gpt_collate_fn"
640
        self.configs.Data.Eval.loader.batch_size = self.eval_cfgs.batch_size
641
        self.configs.Data.Eval.dataset.input_dir = self.eval_cfgs.eval_path
642
        self.configs.Data.Eval.dataset.max_seq_len = self.eval_cfgs.max_seq_len
643

644
        self.configs.Engine.logging_freq = self.eval_cfgs.logging_freq
645

646
        if not self.eval_cfgs.cloze_eval:
647
            self.configs.Data.Eval.dataset.name = "LM_Eval_Dataset"
648
            self.configs.Data.Eval.dataset.overlapping_eval = self.eval_cfgs.overlapping_eval
649
        else:
650
            self.configs.Data.Eval.dataset.name = "Lambada_Eval_Dataset"
651

652
    def get_model(self):
653
        model_setting = copy.deepcopy(self.configs.Model)
654
        if "Compress" in self.configs and "Quantization" in self.configs.Compress:
655
            quant_setting = copy.deepcopy(self.configs.Compress.Quantization)
656
            skip_tensor_map = quant_setting.get("skip_tensor_map", {})
657
            freeze_embedding = quant_setting.get("freeze_embedding", False)
658
            model_setting["skip_tensor_map"] = skip_tensor_map
659
            model_setting["freeze_embedding"] = freeze_embedding
660
        model_setting.pop("module")
661

662
        model_name = model_setting.pop("name")
663
        tokenizer_class, pretrained_name = MODEL_CLASSES[model_name]
664
        self.tokenizer = tokenizer_class.from_pretrained(pretrained_name)
665

666
        model_setting["vocab_size"] = vocab_size_with_padding(
667
            model_setting.get("vocab_size", self.tokenizer.vocab_size),
668
            model_setting.pop("vocab_size_divisible_unit", 128),
669
            self.configs.Distributed.get("mp_degree", 1),
670
        )
671

672
        if self.nranks == 1:
673
            model = gpt.GPTForPretraining(gpt.GPTModel(**model_setting))
674
        else:
675
            raise RuntimeError("Only single-card offline eval is supported in GPTModel now.")
676

677
        return model
678

679
    def forward(self, tokens, ids, mask):
680
        return self.model(tokens, ids, mask)
681

682
    def validation_step(self, batch):
683
        tokens, loss_mask, attention_mask, position_ids, labels, info = batch
684

685
        preds = self(tokens, position_ids, attention_mask)
686

687
        if not self.eval_cfgs.cloze_eval:
688
            if self.first_step:
689
                self.num_original_tokens = info.numpy()[0][0]
690
                self.num_tokenized_tokens = info.numpy()[0][1]
691

692
            masked_lm_loss = paddle.nn.functional.cross_entropy(preds, labels, reduction="none")
693
            loss = paddle.sum(masked_lm_loss * loss_mask)
694
            return loss
695
        else:
696
            if self.first_step:
697
                self.num_examples = info.numpy()[0][0]
698

699
            outputs = paddle.argmax(preds, -1)
700
            acc = paddle.cast(outputs == labels, "float32")
701
            acc = paddle.where(paddle.cast(loss_mask, "bool"), acc, paddle.ones_like(acc))
702
            acc = paddle.sum(paddle.prod(acc, -1))
703
            return acc
704

705
        self.first_step = False
706

707
    def validation_step_end(self, log_dict):
708
        speed = 1.0 / log_dict["eval_cost"]
709

710
        if not self.eval_cfgs.cloze_eval:
711
            self.total_score += log_dict["loss"] * self.configs.Engine.logging_freq / (self.num_tokenized_tokens - 1)
712
        else:
713
            self.total_score += log_dict["loss"] * self.configs.Engine.logging_freq
714

715
        logger.info(
716
            "[eval] epoch: %d, batch: %d, %s: %.9f, speed: %.2f step/s"
717
            % (log_dict["epoch"], log_dict["batch"], self.score_name, self.total_score, speed)
718
        )
719

720
    def validation_epoch_end(self, log_dict):
721
        if not self.eval_cfgs.cloze_eval:
722
            total_loss = float(self.total_score)
723
            ppl = math.exp(min(20, total_loss))
724
            token_ratio = (self.num_tokenized_tokens - 1) / (self.num_original_tokens - 1)
725
            adjusted_ppl = math.exp(min(20, total_loss * token_ratio))
726
            string = " validation results on {} | ".format(self.eval_cfgs.eval_path)
727
            string += "avg loss: {:.4E} | ".format(total_loss)
728
            string += "ppl: {:.4E} | ".format(ppl)
729
            string += "adjusted ppl: {:.4E} | ".format(adjusted_ppl)
730
            string += "token ratio: {} |".format(token_ratio)
731
        else:
732
            num_correct = float(self.total_score)
733
            acc = float(num_correct / self.num_examples)
734
            string = " validation results on {} | ".format(self.eval_cfgs.eval_path)
735
            string += "number correct: {:.4E} | ".format(num_correct)
736
            string += "total examples: {:.4E} | ".format(self.num_examples)
737
            string += "avg accuracy: {:.4E}".format(acc)
738

739
        logger.info(string)
740

741
    def input_spec(self):
742
        return [
743
            InputSpec(shape=[None, None], name="tokens", dtype="int64"),
744
            InputSpec(shape=[None, None], name="ids", dtype="int64"),
745
        ]
746

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.