colossalai

Форк
0
/
test_moe_zero_optim.py 
83 строки · 3.1 Кб
1
import pytest
2
import torch
3

4
import colossalai
5
from colossalai.booster import Booster
6
from colossalai.booster.plugin import LowLevelZeroPlugin
7
from colossalai.moe.manager import MOE_MANAGER
8
from colossalai.tensor.moe_tensor.api import is_moe_tensor
9
from colossalai.testing import rerun_if_address_is_in_use, spawn
10
from colossalai.testing.random import seed_all
11
from tests.test_moe.moe_utils import MoeModel, delete_moe_info, loose_close, run_fwd_bwd, sync_local_from_ep
12

13

14
def run_zero_test(local_rank, stage=1):
15
    criterion = torch.nn.CrossEntropyLoss()
16

17
    MOE_MANAGER.__init__()
18
    MOE_MANAGER.setup(parallel="EP")
19
    moe_model = MoeModel().bfloat16()
20
    moe_optimizer = torch.optim.Adam(moe_model.parameters(), lr=1.0)
21
    moe_plugin = LowLevelZeroPlugin(stage=stage, precision="bf16")
22
    moe_booster = Booster(plugin=moe_plugin)
23
    moe_model, moe_optimizer, _, _, _ = moe_booster.boost(moe_model, moe_optimizer)
24

25
    MOE_MANAGER.__init__()
26
    MOE_MANAGER.setup(parallel=None)
27
    zero_model = MoeModel().bfloat16()
28
    delete_moe_info(zero_model)
29
    sync_local_from_ep(zero_model, moe_model)
30
    zero_optimizer = torch.optim.Adam(zero_model.parameters(), lr=1.0)
31
    zero_plugin = LowLevelZeroPlugin(stage=stage, precision="bf16")
32
    zero_booster = Booster(plugin=zero_plugin)
33
    zero_model, zero_optimizer, _, _, _ = zero_booster.boost(zero_model, zero_optimizer)
34

35
    for (moe_name, moe_param), (zero_name, zero_param) in zip(
36
        moe_model.named_parameters(), zero_model.named_parameters()
37
    ):
38
        if ".experts." in moe_name:
39
            continue
40
        assert moe_name == zero_name
41
        assert torch.allclose(
42
            moe_param.data, zero_param.data
43
        ), f"{moe_name}\ntorch_param {moe_param.data}\nzero_param {zero_param.data}"
44

45
    for _ in range(1):
46
        data = torch.randn(2, 4).bfloat16().cuda()
47
        label = torch.randint(0, 4, (2,)).cuda()
48

49
        moe_out = run_fwd_bwd(moe_model, data, label, criterion, moe_optimizer)
50
        zero_out = run_fwd_bwd(zero_model, data, label, criterion, zero_optimizer)
51
        assert torch.allclose(zero_out, moe_out)
52
        moe_optimizer.step()
53
        zero_optimizer.step()
54

55
        for (moe_name, moe_param), (zero_name, zero_param) in zip(
56
            moe_model.named_parameters(), zero_model.named_parameters()
57
        ):
58
            assert moe_name == zero_name
59
            if is_moe_tensor(moe_param):
60
                param_size = moe_param.shape[0]
61
                zero_param = zero_param[local_rank * param_size : (local_rank + 1) * param_size]
62
            loose_close(moe_param.data, zero_param.data, dtype=moe_param.dtype)
63

64
        moe_optimizer.zero_grad()
65
        zero_optimizer.zero_grad()
66

67

68
def run_dist(rank, world_size, port, stage):
69
    colossalai.launch(config=dict(), rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
70
    seed_all(42 + rank)
71
    run_zero_test(rank, stage=stage)
72

73

74
@pytest.mark.dist
75
@pytest.mark.parametrize("world_size", [2])
76
@pytest.mark.parametrize("stage", [1, 2])
77
@rerun_if_address_is_in_use()
78
def test_moe_zero_optim(world_size, stage):
79
    spawn(run_dist, world_size, stage=stage)
80

81

82
if __name__ == "__main__":
83
    test_moe_zero_optim(world_size=2, stage=1)
84

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.