1
# GPT-2 pretraining setup
3
# Have 4 experts per layer (every 2 layers by default)
4
# So with 12 layers total:
5
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
6
# Experts would be in layers:
10
# parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages
11
# across the node boundaries )
12
"pipe_parallel_size": 1,
13
"model_parallel_size": 1,
14
"moe_expert_parallel_size": 1,
19
"num_attention_heads": 12,
21
"max_position_embeddings": 2048,
24
"no_weight_tying": true,
25
"gpt_j_residual": false,
26
"output_layer_parallelism": "column",
28
# these should provide some speedup but takes a while to build, set to true if desired
29
"scaled_upper_triang_masked_softmax_fusion": false,
30
"bias_gelu_fusion": false,
34
"init_method": "small_init",
35
"output_layer_init_method": "wang_init",
49
# for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training
50
"zero_optimization": {
52
"allgather_partitions": True,
53
"allgather_bucket_size": 500000000,
55
"reduce_scatter": True,
56
"reduce_bucket_size": 500000000,
57
"contiguous_gradients": True,
60
# batch / data settings
61
"train_micro_batch_size_per_gpu": 4,
64
# activation checkpointing
65
"checkpoint_activations": true,
66
"checkpoint_num_layers": 1,
67
"partition_activations": true,
68
"synchronize_each_layer": true,
71
"gradient_clipping": 1.0,
73
"hidden_dropout": 0.0,
74
"attention_dropout": 0.0,
80
"loss_scale_window": 1000,
85
# misc. training settings
86
"train_iters": 320000,
87
"lr_decay_iters": 320000,
88
"distributed_backend": "nccl",
89
"lr_decay_style": "cosine",
91
"checkpoint_factor": 10000,
92
"eval_interval": 1000,
97
"steps_per_print": 10,
98
"keep_last_n_checkpoints": 4,
99
"wall_clock_breakdown": true,
102
"hostfile": "/mock_path"