llm-adapters
/
export_state_dict_checkpoint.py
125 строк · 3.5 Кб
1import json2import os3
4import torch5import transformers6from peft import PeftModel7from transformers import LlamaForCausalLM, LlamaTokenizer # noqa: E4028
9BASE_MODEL = os.environ.get("BASE_MODEL", None)10assert (11BASE_MODEL
12), "Please specify a value for BASE_MODEL environment variable, e.g. `export BASE_MODEL=decapoda-research/llama-7b-hf`" # noqa: E50113
14tokenizer = LlamaTokenizer.from_pretrained(BASE_MODEL)15
16base_model = LlamaForCausalLM.from_pretrained(17BASE_MODEL,18load_in_8bit=False,19torch_dtype=torch.float16,20device_map={"": "cpu"},21)
22
23lora_model = PeftModel.from_pretrained(24base_model,25"tloen/alpaca-lora-7b",26device_map={"": "cpu"},27torch_dtype=torch.float16,28)
29
30# merge weights
31for layer in lora_model.base_model.model.model.layers:32layer.self_attn.q_proj.merge_weights = True33layer.self_attn.v_proj.merge_weights = True34
35lora_model.train(False)36
37lora_model_sd = lora_model.state_dict()38
39params = {40"dim": 4096,41"multiple_of": 256,42"n_heads": 32,43"n_layers": 32,44"norm_eps": 1e-06,45"vocab_size": -1,46}
47n_layers = params["n_layers"]48n_heads = params["n_heads"]49dim = params["dim"]50dims_per_head = dim // n_heads51base = 10000.052inv_freq = 1.0 / (53base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)54)
55
56
57def permute(w):58return (59w.view(n_heads, dim // n_heads // 2, 2, dim)60.transpose(1, 2)61.reshape(dim, dim)62)63
64
65def unpermute(w):66return (67w.view(n_heads, 2, dim // n_heads // 2, dim)68.transpose(1, 2)69.reshape(dim, dim)70)71
72
73def translate_state_dict_key(k): # noqa: C90174k = k.replace("base_model.model.", "")75if k == "model.embed_tokens.weight":76return "tok_embeddings.weight"77elif k == "model.norm.weight":78return "norm.weight"79elif k == "lm_head.weight":80return "output.weight"81elif k.startswith("model.layers."):82layer = k.split(".")[2]83if k.endswith(".self_attn.q_proj.weight"):84return f"layers.{layer}.attention.wq.weight"85elif k.endswith(".self_attn.k_proj.weight"):86return f"layers.{layer}.attention.wk.weight"87elif k.endswith(".self_attn.v_proj.weight"):88return f"layers.{layer}.attention.wv.weight"89elif k.endswith(".self_attn.o_proj.weight"):90return f"layers.{layer}.attention.wo.weight"91elif k.endswith(".mlp.gate_proj.weight"):92return f"layers.{layer}.feed_forward.w1.weight"93elif k.endswith(".mlp.down_proj.weight"):94return f"layers.{layer}.feed_forward.w2.weight"95elif k.endswith(".mlp.up_proj.weight"):96return f"layers.{layer}.feed_forward.w3.weight"97elif k.endswith(".input_layernorm.weight"):98return f"layers.{layer}.attention_norm.weight"99elif k.endswith(".post_attention_layernorm.weight"):100return f"layers.{layer}.ffn_norm.weight"101elif k.endswith("rotary_emb.inv_freq") or "lora" in k:102return None103else:104print(layer, k)105raise NotImplementedError106else:107print(k)108raise NotImplementedError109
110
111new_state_dict = {}112for k, v in lora_model_sd.items():113new_k = translate_state_dict_key(k)114if new_k is not None:115if "wq" in new_k or "wk" in new_k:116new_state_dict[new_k] = unpermute(v)117else:118new_state_dict[new_k] = v119
120os.makedirs("./ckpt", exist_ok=True)121
122torch.save(new_state_dict, "./ckpt/consolidated.00.pth")123
124with open("./ckpt/params.json", "w") as f:125json.dump(params, f)126