7
ap = argparse.ArgumentParser()
8
ap.add_argument("-m", "--model", help="Path to LLaVA v1.5 model")
11
# find the model part that includes the the multimodal projector weights
12
path = sorted(glob.glob(f"{args.model}/pytorch_model*.bin"))[-1]
13
checkpoint = torch.load(path)
15
# get a list of mm tensor names
16
mm_tensors = [k for k, v in checkpoint.items() if k.startswith("model.mm_projector")]
18
# store these tensors in a new dictionary and torch.save them
19
projector = {name: checkpoint[name].float() for name in mm_tensors}
20
torch.save(projector, f"{args.model}/llava.projector")
22
# BakLLaVA models contain CLIP tensors in it
23
clip_tensors = [k for k, v in checkpoint.items() if k.startswith("model.vision_tower")]
24
if len(clip_tensors) > 0:
25
clip = {name.replace("vision_tower.vision_tower.", ""): checkpoint[name].float() for name in clip_tensors}
26
torch.save(clip, f"{args.model}/llava.clip")
29
# added tokens should be removed to be able to convert Mistral models
30
if os.path.exists(f"{args.model}/added_tokens.json"):
31
with open(f"{args.model}/added_tokens.json", "w") as f:
37
print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.")
38
print(f"Also, use {args.model}/llava.projector to prepare a llava-encoder.gguf file.")