colossalai
83 строки · 3.1 Кб
1import pytest
2import torch
3
4import colossalai
5from colossalai.booster import Booster
6from colossalai.booster.plugin import LowLevelZeroPlugin
7from colossalai.moe.manager import MOE_MANAGER
8from colossalai.tensor.moe_tensor.api import is_moe_tensor
9from colossalai.testing import rerun_if_address_is_in_use, spawn
10from colossalai.testing.random import seed_all
11from tests.test_moe.moe_utils import MoeModel, delete_moe_info, loose_close, run_fwd_bwd, sync_local_from_ep
12
13
14def run_zero_test(local_rank, stage=1):
15criterion = torch.nn.CrossEntropyLoss()
16
17MOE_MANAGER.__init__()
18MOE_MANAGER.setup(parallel="EP")
19moe_model = MoeModel().bfloat16()
20moe_optimizer = torch.optim.Adam(moe_model.parameters(), lr=1.0)
21moe_plugin = LowLevelZeroPlugin(stage=stage, precision="bf16")
22moe_booster = Booster(plugin=moe_plugin)
23moe_model, moe_optimizer, _, _, _ = moe_booster.boost(moe_model, moe_optimizer)
24
25MOE_MANAGER.__init__()
26MOE_MANAGER.setup(parallel=None)
27zero_model = MoeModel().bfloat16()
28delete_moe_info(zero_model)
29sync_local_from_ep(zero_model, moe_model)
30zero_optimizer = torch.optim.Adam(zero_model.parameters(), lr=1.0)
31zero_plugin = LowLevelZeroPlugin(stage=stage, precision="bf16")
32zero_booster = Booster(plugin=zero_plugin)
33zero_model, zero_optimizer, _, _, _ = zero_booster.boost(zero_model, zero_optimizer)
34
35for (moe_name, moe_param), (zero_name, zero_param) in zip(
36moe_model.named_parameters(), zero_model.named_parameters()
37):
38if ".experts." in moe_name:
39continue
40assert moe_name == zero_name
41assert torch.allclose(
42moe_param.data, zero_param.data
43), f"{moe_name}\ntorch_param {moe_param.data}\nzero_param {zero_param.data}"
44
45for _ in range(1):
46data = torch.randn(2, 4).bfloat16().cuda()
47label = torch.randint(0, 4, (2,)).cuda()
48
49moe_out = run_fwd_bwd(moe_model, data, label, criterion, moe_optimizer)
50zero_out = run_fwd_bwd(zero_model, data, label, criterion, zero_optimizer)
51assert torch.allclose(zero_out, moe_out)
52moe_optimizer.step()
53zero_optimizer.step()
54
55for (moe_name, moe_param), (zero_name, zero_param) in zip(
56moe_model.named_parameters(), zero_model.named_parameters()
57):
58assert moe_name == zero_name
59if is_moe_tensor(moe_param):
60param_size = moe_param.shape[0]
61zero_param = zero_param[local_rank * param_size : (local_rank + 1) * param_size]
62loose_close(moe_param.data, zero_param.data, dtype=moe_param.dtype)
63
64moe_optimizer.zero_grad()
65zero_optimizer.zero_grad()
66
67
68def run_dist(rank, world_size, port, stage):
69colossalai.launch(config=dict(), rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
70seed_all(42 + rank)
71run_zero_test(rank, stage=stage)
72
73
74@pytest.mark.dist
75@pytest.mark.parametrize("world_size", [2])
76@pytest.mark.parametrize("stage", [1, 2])
77@rerun_if_address_is_in_use()
78def test_moe_zero_optim(world_size, stage):
79spawn(run_dist, world_size, stage=stage)
80
81
82if __name__ == "__main__":
83test_moe_zero_optim(world_size=2, stage=1)
84