4
from transformers import AutoModel, AutoTokenizer
6
ap = argparse.ArgumentParser()
7
ap.add_argument("-m", "--model", help="Path to MiniCPM-V model")
11
model = AutoModel.from_pretrained(args.model, trust_remote_code=True, local_files_only=True)
12
checkpoint = model.state_dict()
15
mm_tensors = [k for k, v in checkpoint.items() if k.startswith("resampler")]
18
projector = {name: checkpoint[name].float() for name in mm_tensors}
19
torch.save(projector, f"{args.model}/minicpmv.projector")
21
clip_tensors = [k for k, v in checkpoint.items() if k.startswith("vpm")]
22
if len(clip_tensors) > 0:
23
clip = {name.replace("vpm.", ""): checkpoint[name].float() for name in clip_tensors}
24
torch.save(clip, f"{args.model}/minicpmv.clip")
27
if os.path.exists(f"{args.model}/added_tokens.json"):
28
with open(f"{args.model}/added_tokens.json", "w") as f:
31
config = model.llm.config
33
"AutoConfig": "configuration_minicpm.MiniCPMConfig",
34
"AutoModel": "modeling_minicpm.MiniCPMModel",
35
"AutoModelForCausalLM": "modeling_minicpm.MiniCPMForCausalLM",
36
"AutoModelForSeq2SeqLM": "modeling_minicpm.MiniCPMForCausalLM",
37
"AutoModelForSequenceClassification": "modeling_minicpm.MiniCPMForSequenceClassification"
39
model.llm.save_pretrained(f"{args.model}/model")
40
tok = AutoTokenizer.from_pretrained(args.model, trust_remote_code=True)
41
tok.save_pretrained(f"{args.model}/model")
44
print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.")
45
print(f"Also, use {args.model}/minicpmv.projector to prepare a minicpmv-encoder.gguf file.")