paddlenlp

Форк
0
/
tensor_fusion_helper.py 
114 строк · 3.6 Кб
1
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14

15
from collections import OrderedDict
16

17
import numpy as np
18
import paddle
19
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_storage import (
20
    GradStorage,
21
    ParamStorage,
22
)
23
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import Type
24
from paddle.framework import core
25

26
alignment = {
27
    "gpu": 256,
28
}
29
align = {
30
    Type.fp16.value: 2,
31
    Type.fp32.value: 4,
32
}
33

34

35
def assign_group_by_size(parameters, group_size=256 * 1024 * 1024):
36
    is_sparse_gradient = [False] * len(parameters)
37

38
    group_indices = core.eager_assign_group_by_size(parameters, is_sparse_gradient, [group_size, group_size])
39

40
    var_groups = OrderedDict()
41
    for group_idx, indices in enumerate(group_indices):
42
        for index in indices:
43
            var_groups.setdefault(group_idx, []).append(parameters[index])
44
    return var_groups
45

46

47
def flatten_dense_tensors(parameters):
48
    _buffer_size = 0
49
    _param2align = {}
50
    dtype = parameters[0].dtype
51

52
    for param in parameters:
53
        assert param.trainable, "param must be trainable..."
54
        size = np.prod(param.shape) * align[dtype]
55
        remaining = size % alignment["gpu"]
56
        ali = 0 if remaining == 0 else alignment["gpu"] - remaining
57
        align_ = ali // align[dtype]
58
        _buffer_size += np.prod(param.shape) + align_
59
        _param2align[param.name] = align_
60

61
    param_storage = ParamStorage(size=_buffer_size, dtype=dtype, device="gpu")
62

63
    param_storage.add_rank_params(parameters, _param2align)
64

65
    # process gradient
66
    grad_storage = GradStorage(size=_buffer_size, dtype=dtype, device="gpu", destination="0", parm2align=_param2align)
67

68
    for param in parameters:
69
        grad_storage.add_grad(param, _param2align[param.name])
70

71
    # param_storage --> grad_storage
72
    param_storage.buffer._copy_gradient_from(grad_storage.buffer)
73
    param_storage.buffer.stop_gradient = False
74
    return param_storage, grad_storage
75

76

77
def obtain_storage(parameters):
78
    if len(parameters) < 1:
79
        return []
80

81
    var_groups = assign_group_by_size(parameters)
82
    storage = []
83
    for group_idx, parameters in var_groups.items():
84
        param_storage, grad_storage = flatten_dense_tensors(parameters)
85
        storage.append(param_storage.buffer)
86
    return storage
87

88

89
def fused_parameters(parameters, use_sharding=False):
90
    decay_params = []
91
    other_params = []
92

93
    for param in parameters:
94
        if not any(nd in param.name for nd in ["bias", "norm", "b_0"]):
95
            decay_params.append(param)
96
        else:
97
            other_params.append(param)
98

99
    decay_fused = decay_params if use_sharding else obtain_storage(decay_params)
100
    other_fused = other_params if use_sharding else obtain_storage(other_params)
101
    all_fused = decay_fused + other_fused
102

103
    return decay_fused, all_fused
104

105

106
def all_reduce_parameters(params, group):
107
    if group.nranks < 2:
108
        return
109

110
    div_factor = 1.0 / group.nranks
111
    with paddle.framework.no_grad():
112
        for p in params:
113
            grad = p.grad.scale_(div_factor)
114
            paddle.distributed.all_reduce(grad, group=group)
115

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.