gpt-neox

Форк
0
/
125M-moe.yml 
103 строки · 2.6 Кб
1
# GPT-2 pretraining setup
2
{
3
   # Have 4 experts per layer (every 2 layers by default)
4
   # So with 12 layers total:
5
   # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
6
   # Experts would be in layers:
7
   # 0, 2, 4, 6, 8, 10
8
   "num_experts": 4,
9

10
   # parallelism settings ( you will want to change these based on your cluster setup, ideally scheduling pipeline stages
11
   # across the node boundaries )
12
   "pipe_parallel_size": 1,
13
   "model_parallel_size": 1,
14
   "moe_expert_parallel_size": 1,
15

16
   # model settings
17
   "num_layers": 12,
18
   "hidden_size": 768,
19
   "num_attention_heads": 12,
20
   "seq_length": 2048,
21
   "max_position_embeddings": 2048,
22
   "norm": "layernorm",
23
   "pos_emb": "rotary",
24
   "no_weight_tying": true,
25
   "gpt_j_residual": false,
26
   "output_layer_parallelism": "column",
27

28
   # these should provide some speedup but takes a while to build, set to true if desired
29
   "scaled_upper_triang_masked_softmax_fusion": false,
30
   "bias_gelu_fusion": false,
31
   "rope_fusion": false,
32

33
   # init methods
34
   "init_method": "small_init",
35
   "output_layer_init_method": "wang_init",
36

37

38
   # optimizer settings
39
   "optimizer": {
40
     "type": "Adam",
41
     "params": {
42
       "lr": 0.0006,
43
       "betas": [0.9, 0.95],
44
       "eps": 1.0e-8,
45
     }
46
   },
47
   "min_lr": 0.00006,
48

49
   # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training
50
   "zero_optimization": {
51
    "stage": 1,
52
    "allgather_partitions": True,
53
    "allgather_bucket_size": 500000000,
54
    "overlap_comm": True,
55
    "reduce_scatter": True,
56
    "reduce_bucket_size": 500000000,
57
    "contiguous_gradients": True,
58
  },
59

60
   # batch / data settings
61
   "train_micro_batch_size_per_gpu": 4,
62
   "data_impl": "mmap",
63

64
   # activation checkpointing
65
   "checkpoint_activations": true,
66
   "checkpoint_num_layers": 1,
67
   "partition_activations": true,
68
   "synchronize_each_layer": true,
69

70
   # regularization
71
   "gradient_clipping": 1.0,
72
   "weight_decay": 0.1,
73
   "hidden_dropout": 0.0,
74
   "attention_dropout": 0.0,
75

76
   # precision settings
77
   "fp16": {
78
     "enabled": true,
79
     "loss_scale": 0,
80
     "loss_scale_window": 1000,
81
     "hysteresis": 2,
82
     "min_loss_scale": 1
83
   },
84

85
   # misc. training settings
86
   "train_iters": 320000,
87
   "lr_decay_iters": 320000,
88
   "distributed_backend": "nccl",
89
   "lr_decay_style": "cosine",
90
   "warmup": 0.01,
91
   "checkpoint_factor": 10000,
92
   "eval_interval": 1000,
93
   "eval_iters": 10,
94

95
   # logging
96
   "log_interval": 10,
97
   "steps_per_print": 10,
98
   "keep_last_n_checkpoints": 4,
99
   "wall_clock_breakdown": true,
100

101
  #  networking
102
  "hostfile": "/mock_path"
103
}
104

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.