1
# GPT-2 pretraining setup
3
# parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages
4
# across the node boundaries )
5
"pipe_parallel_size": 1,
6
"model_parallel_size": 1,
11
"num_attention_heads": 32,
13
"max_position_embeddings": 2048,
16
"no_weight_tying": true,
17
"gpt_j_residual": false,
18
"output_layer_parallelism": "column",
20
# these should provide some speedup but takes a while to build, set to true if desired
21
"scaled_upper_triang_masked_softmax_fusion": false,
22
"bias_gelu_fusion": false,
24
"layernorm_fusion": false,
27
"init_method": "small_init",
28
"output_layer_init_method": "wang_init",
40
# for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training
41
"zero_optimization": {
43
"allgather_partitions": True,
44
"allgather_bucket_size": 500000000,
46
"reduce_scatter": True,
47
"reduce_bucket_size": 500000000,
48
"contiguous_gradients": True,
52
# batch / data settings
53
"train_micro_batch_size_per_gpu": 4,
56
# activation checkpointing
57
"checkpoint_activations": true,
58
"checkpoint_num_layers": 1,
59
"partition_activations": true,
60
"synchronize_each_layer": true,
63
"gradient_clipping": 1.0,
66
"attention_dropout": 0,
73
"loss_scale_window": 1000,
78
# misc. training settings
79
"train_iters": 320000,
80
"lr_decay_iters": 320000,
81
"distributed_backend": "nccl",
82
"lr_decay_style": "cosine",
84
"checkpoint_factor": 10000,
85
"eval_interval": 1000,
90
"steps_per_print": 10,
91
"keep_last_n_checkpoints": 4,
92
"wall_clock_breakdown": true,