colossalai
133 строки · 4.5 Кб
1import pytest
2import torch
3import torch.distributed as dist
4from torch.nn.parallel import DistributedDataParallel as DDP
5from torch.testing import assert_close
6
7import colossalai
8from colossalai.legacy.amp import convert_to_apex_amp
9from colossalai.nn.optimizer import HybridAdam
10from colossalai.testing import DummyDataloader, parameterize, rerun_if_address_is_in_use, spawn
11from colossalai.utils import set_seed
12from colossalai.zero import GeminiDDP, GeminiOptimizer
13from colossalai.zero.gemini.chunk import search_chunk_configuration
14from tests.kit.model_zoo import model_zoo, run_fwd_bwd
15
16PLACEMENT_CONFIGS = [
17{
18"placement_policy": "static",
19"shard_param_frac": 0.0,
20"offload_optim_frac": 0.0,
21"offload_param_frac": 0.0,
22}, # zero2
23{
24"placement_policy": "static",
25"shard_param_frac": 0.0,
26"offload_optim_frac": 1.0,
27"offload_param_frac": 0.0,
28}, # zero2-offload
29{
30"placement_policy": "static",
31"shard_param_frac": 0.0,
32"offload_optim_frac": 0.5,
33"offload_param_frac": 0.0,
34}, # zero2-offload-half
35{"placement_policy": "auto"},
36]
37
38
39def check_param(model: GeminiDDP, torch_model: torch.nn.Module):
40zero_dict = model.state_dict(only_rank_0=False)
41torch_dict = torch_model.state_dict()
42
43for key, value in torch_dict.items():
44# key is 'module.model.PARAMETER', so we truncate it
45key = key[7:]
46assert key in zero_dict, "{} not in ZeRO dictionary.".format(key)
47temp_zero_value = zero_dict[key].to(device=value.device, dtype=value.dtype)
48# debug_print([0], "max range: ", key, torch.max(torch.abs(value - temp_zero_value)))
49assert_close(value, temp_zero_value, rtol=1e-3, atol=4e-3)
50
51
52@parameterize("placement_config", PLACEMENT_CONFIGS)
53@parameterize("model_name", ["transformers_gpt_lm"])
54@parameterize("master_weights", [True, False])
55def exam_grad_clipping(placement_config, model_name: str, master_weights: bool):
56set_seed(1912)
57model_builder, data_gen_fn, output_transform_fn, loss_fn, *_ = next(
58iter(model_zoo.get_sub_registry(model_name).values())
59)
60
61torch_model = model_builder().cuda()
62amp_config = dict(opt_level="O2", keep_batchnorm_fp32=False, loss_scale=32)
63torch_optim = torch.optim.Adam(torch_model.parameters(), lr=1e-3)
64torch_model, torch_optim = convert_to_apex_amp(torch_model, torch_optim, amp_config)
65torch_model = DDP(torch_model, device_ids=[dist.get_rank()])
66
67model = model_builder()
68
69for torch_p, p in zip(torch_model.parameters(), model.parameters()):
70p.data.copy_(torch_p.data)
71
72world_size = torch.distributed.get_world_size()
73config_dict, *_ = search_chunk_configuration(model, search_range_m=1, search_interval=100)
74config_dict[world_size]["chunk_size"] = 5000
75config_dict[world_size]["keep_gathered"] = False
76if placement_config["placement_policy"] != "cuda":
77init_device = torch.device("cpu")
78else:
79init_device = None
80
81model = GeminiDDP(
82model,
83chunk_config_dict=config_dict,
84chunk_init_device=init_device,
85pin_memory=True,
86master_weights=master_weights,
87**placement_config,
88)
89
90optimizer = HybridAdam(model.parameters(), lr=1e-3)
91zero_optim = GeminiOptimizer(optimizer, model, initial_scale=32, max_norm=1.0)
92
93model.train()
94torch_model.train()
95
96set_seed(dist.get_rank() * 3 + 128)
97train_dataloader = DummyDataloader(data_gen_fn)
98for i, data in enumerate(train_dataloader):
99if i > 2:
100break
101data = {k: v.cuda() if isinstance(v, torch.Tensor) else v for k, v in data.items()}
102
103zero_optim.zero_grad()
104torch_optim.zero_grad()
105
106run_fwd_bwd(torch_model, data, output_transform_fn, loss_fn, optimizer=torch_optim)
107run_fwd_bwd(model, data, output_transform_fn, loss_fn, optimizer=zero_optim)
108
109import apex.amp as apex_amp
110
111torch.nn.utils.clip_grad_norm_(apex_amp.master_params(torch_optim), 1.0)
112torch_optim.step()
113zero_optim.step()
114
115if master_weights:
116check_param(model, torch_model)
117
118
119def run_dist(rank, world_size, port):
120config = {}
121colossalai.launch(config=config, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
122exam_grad_clipping()
123
124
125@pytest.mark.dist
126@pytest.mark.parametrize("world_size", [1, 2])
127@rerun_if_address_is_in_use()
128def test_grad_clip(world_size):
129spawn(run_dist, world_size)
130
131
132if __name__ == "__main__":
133test_grad_clip(2)
134