colossalai
116 строк · 4.1 Кб
1import pytest
2import torch
3import torch.distributed as dist
4from torch.nn.parallel import DistributedDataParallel as DDP
5from torch.testing import assert_close
6
7import colossalai
8from colossalai.accelerator import get_accelerator
9from colossalai.legacy.amp import convert_to_apex_amp
10from colossalai.nn.optimizer import HybridAdam
11from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
12from colossalai.utils import set_seed
13from colossalai.zero import GeminiDDP, GeminiOptimizer
14from colossalai.zero.gemini.chunk import search_chunk_configuration
15from tests.kit.model_zoo import model_zoo, run_fwd_bwd
16
17PLACEMENT_CONFIGS = [
18{"placement_policy": "static", "shard_param_frac": 0.0}, # zero2
19{"placement_policy": "static", "shard_param_frac": 1.0}, # zero3
20{"placement_policy": "static", "shard_param_frac": 0.5}, # zero3-half
21{"placement_policy": "auto"},
22]
23
24
25def check_grad(model: GeminiDDP, torch_model: torch.nn.Module):
26chunk_manager = model.chunk_manager
27param_list = [p for p in model.parameters()]
28chunk_list = chunk_manager.get_chunks(param_list)
29if not model.reuse_fp16_chunk:
30chunk_list = [chunk.grad_chunk for chunk in chunk_list]
31for chunk in chunk_list:
32chunk_manager.access_chunk(chunk)
33
34for p0, p1 in zip(model.parameters(), torch_model.parameters()):
35assert_close(p0, p1.grad, rtol=1e-3, atol=5e-5)
36
37
38@parameterize("placement_config", PLACEMENT_CONFIGS)
39@parameterize("keep_gather", [False, True])
40@parameterize("model_name", ["transformers_gpt_lm"])
41@parameterize("use_grad_checkpoint", [False, True])
42@parameterize("master_weights", [False, True])
43def exam_gpt_fwd_bwd(
44placement_config,
45keep_gather,
46model_name: str,
47use_grad_checkpoint: bool = False,
48master_weights: bool = True,
49):
50init_device = get_accelerator().get_current_device()
51model_builder, data_gen_fn, output_transform_fn, loss_fn, *_ = next(
52iter(model_zoo.get_sub_registry(model_name).values())
53)
54
55set_seed(42)
56model = model_builder()
57
58set_seed(42)
59torch_model = model_builder().cuda()
60for torch_p, p in zip(torch_model.parameters(), model.parameters()):
61torch_p.data.copy_(p.data)
62
63if use_grad_checkpoint:
64model.gradient_checkpointing_enable()
65torch_model.gradient_checkpointing_enable()
66
67world_size = torch.distributed.get_world_size()
68config_dict, *_ = search_chunk_configuration(model, search_range_m=1, search_interval=100)
69config_dict[world_size]["chunk_size"] = 5000
70config_dict[world_size]["keep_gathered"] = keep_gather
71model = GeminiDDP(
72model, config_dict, init_device, pin_memory=True, **placement_config, master_weights=master_weights
73)
74optimizer = HybridAdam(model.parameters(), lr=1e-3)
75zero_optim = GeminiOptimizer(optimizer, model, initial_scale=1)
76
77rank = dist.get_rank()
78amp_config = dict(opt_level="O2", keep_batchnorm_fp32=False, loss_scale=1, master_weights=master_weights)
79torch_optim = torch.optim.Adam(torch_model.parameters(), lr=1e-3)
80torch_model, torch_optim = convert_to_apex_amp(torch_model, torch_optim, amp_config)
81torch_model = DDP(torch_model, device_ids=[rank])
82
83set_seed(rank)
84
85data = data_gen_fn()
86data = {k: v.cuda() if isinstance(v, torch.Tensor) else v for k, v in data.items()}
87
88torch_optim.zero_grad()
89zero_optim.zero_grad()
90
91# set random seed is same as torch_model.eval()
92set_seed(42)
93torch_loss = run_fwd_bwd(torch_model, data, output_transform_fn, loss_fn, optimizer=torch_optim)
94set_seed(42)
95loss = run_fwd_bwd(model, data, output_transform_fn, loss_fn, optimizer=zero_optim)
96
97assert_close(torch_loss.float(), loss.float())
98
99check_grad(model, torch_model)
100
101
102def run_dist(rank, world_size, port):
103config = {}
104colossalai.launch(config=config, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
105exam_gpt_fwd_bwd()
106
107
108@pytest.mark.dist
109@pytest.mark.parametrize("world_size", [1, 4])
110@rerun_if_address_is_in_use()
111def test_gpt(world_size):
112spawn(run_dist, world_size)
113
114
115if __name__ == "__main__":
116test_gpt(1)
117