3
python3 -m fastchat.serve.cli --model ~/model_weights/llama-7b
8
from transformers import AutoTokenizer, AutoModelForCausalLM
10
from pipeline.serve.conversation import conv_templates, SeparatorStyle
13
@torch.inference_mode()
14
def generate_stream(tokenizer, model, params, device, context_len=2048, stream_interval=2):
15
"""Adapted from fastchat/serve/model_worker.py::generate_stream"""
17
prompt = params["prompt"]
18
l_prompt = len(prompt)
19
temperature = float(params.get("temperature", 1.0))
20
max_new_tokens = int(params.get("max_new_tokens", 256))
21
stop_str = params.get("stop", None)
23
input_ids = tokenizer(prompt).input_ids
24
output_ids = list(input_ids)
26
max_src_len = context_len - max_new_tokens - 8
27
input_ids = input_ids[-max_src_len:]
29
for i in range(max_new_tokens):
31
out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
33
past_key_values = out.past_key_values
35
attention_mask = torch.ones(1, past_key_values[0][0].shape[-2] + 1, device=device)
37
input_ids=torch.as_tensor([[token]], device=device),
39
attention_mask=attention_mask,
40
past_key_values=past_key_values,
43
past_key_values = out.past_key_values
45
last_token_logits = logits[0][-1]
46
if temperature < 1e-4:
47
token = int(torch.argmax(last_token_logits))
49
probs = torch.softmax(last_token_logits / temperature, dim=-1)
50
token = int(torch.multinomial(probs, num_samples=1))
52
output_ids.append(token)
54
if token == tokenizer.eos_token_id:
59
if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
60
output = tokenizer.decode(output_ids, skip_special_tokens=True)
61
pos = output.rfind(stop_str, l_prompt)
74
model_name = args.model_name
75
num_gpus = args.num_gpus
78
if args.device == "cuda":
79
kwargs = {"torch_dtype": torch.float16}
80
if num_gpus == "auto":
81
kwargs["device_map"] = "auto"
83
num_gpus = int(num_gpus)
88
"max_memory": {i: "13GiB" for i in range(num_gpus)},
91
elif args.device == "cpu":
94
raise ValueError(f"Invalid device: {args.device}")
96
tokenizer = AutoTokenizer.from_pretrained(model_name)
97
model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True, **kwargs)
99
if args.device == "cuda" and num_gpus == 1:
103
conv = conv_templates[args.conv_template].copy()
106
inp = input(f"{conv.roles[0]}: ")
113
conv.append_message(conv.roles[0], inp)
114
conv.append_message(conv.roles[1], None)
115
prompt = conv.get_prompt()
120
"temperature": args.temperature,
121
"max_new_tokens": args.max_new_tokens,
122
"stop": conv.sep if conv.sep_style == SeparatorStyle.SINGLE else conv.sep2,
125
print(f"{conv.roles[1]}: ", end="", flush=True)
127
for outputs in generate_stream(tokenizer, model, params, args.device):
128
outputs = outputs[len(prompt) + 1 :].strip()
129
outputs = outputs.split(" ")
132
print(" ".join(outputs[pre : now - 1]), end=" ", flush=True)
134
print(" ".join(outputs[pre:]), flush=True)
136
conv.messages[-1][-1] = " ".join(outputs)
139
print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
142
if __name__ == "__main__":
143
parser = argparse.ArgumentParser()
144
parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
145
parser.add_argument("--num-gpus", type=str, default="1")
146
parser.add_argument("--device", type=str, choices=["cuda", "cpu"], default="cuda")
147
parser.add_argument("--conv-template", type=str, default="v1")
148
parser.add_argument("--temperature", type=float, default=0.7)
149
parser.add_argument("--max-new-tokens", type=int, default=512)
150
parser.add_argument("--debug", action="store_true")
151
args = parser.parse_args()