gpt-neox
1{
2"pipe_parallel_size": 1,3"model_parallel_size": 1,4"make_vocab_size_divisible_by": 1,5
6# model settings7"num_layers": 48,8"hidden_size": 8192,9"num_attention_heads": 64,10"num_kv_heads": 8,11# Codellama was uptrained on 16k token sequence lengths12# with rotary_emb_base adjusted to 1_000_000.13"seq_length": 16384,14"max_position_embeddings": 16384,15"pos_emb": "rotary",16"rotary_pct": 1,17"rotary_emb_base": 1000000,18"no_weight_tying": true,19"gpt_j_residual": false,20"output_layer_parallelism": "column",21"norm": "rmsnorm",22"rms_norm_epsilon": 1.0e-5,23
24"attention_config": [[["flash"], 48]],25
26"scaled_upper_triang_masked_softmax_fusion": true,27"bias_gelu_fusion": false,28"use_bias_in_norms": false,29"use_bias_in_attn_linear": false,30"mlp_type": "llama",31"activation": "silu",32}
33