otter

Форк
0
152 строки · 4.9 Кб
1
"""
2
Usage:
3
python3 -m fastchat.serve.cli --model ~/model_weights/llama-7b
4
"""
5
import argparse
6

7
import torch
8
from transformers import AutoTokenizer, AutoModelForCausalLM
9

10
from pipeline.serve.conversation import conv_templates, SeparatorStyle
11

12

13
@torch.inference_mode()
14
def generate_stream(tokenizer, model, params, device, context_len=2048, stream_interval=2):
15
    """Adapted from fastchat/serve/model_worker.py::generate_stream"""
16

17
    prompt = params["prompt"]
18
    l_prompt = len(prompt)
19
    temperature = float(params.get("temperature", 1.0))
20
    max_new_tokens = int(params.get("max_new_tokens", 256))
21
    stop_str = params.get("stop", None)
22

23
    input_ids = tokenizer(prompt).input_ids
24
    output_ids = list(input_ids)
25

26
    max_src_len = context_len - max_new_tokens - 8
27
    input_ids = input_ids[-max_src_len:]
28

29
    for i in range(max_new_tokens):
30
        if i == 0:
31
            out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
32
            logits = out.logits
33
            past_key_values = out.past_key_values
34
        else:
35
            attention_mask = torch.ones(1, past_key_values[0][0].shape[-2] + 1, device=device)
36
            out = model(
37
                input_ids=torch.as_tensor([[token]], device=device),
38
                use_cache=True,
39
                attention_mask=attention_mask,
40
                past_key_values=past_key_values,
41
            )
42
            logits = out.logits
43
            past_key_values = out.past_key_values
44

45
        last_token_logits = logits[0][-1]
46
        if temperature < 1e-4:
47
            token = int(torch.argmax(last_token_logits))
48
        else:
49
            probs = torch.softmax(last_token_logits / temperature, dim=-1)
50
            token = int(torch.multinomial(probs, num_samples=1))
51

52
        output_ids.append(token)
53

54
        if token == tokenizer.eos_token_id:
55
            stopped = True
56
        else:
57
            stopped = False
58

59
        if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
60
            output = tokenizer.decode(output_ids, skip_special_tokens=True)
61
            pos = output.rfind(stop_str, l_prompt)
62
            if pos != -1:
63
                output = output[:pos]
64
                stopped = True
65
            yield output
66

67
        if stopped:
68
            break
69

70
    del past_key_values
71

72

73
def main(args):
74
    model_name = args.model_name
75
    num_gpus = args.num_gpus
76

77
    # Model
78
    if args.device == "cuda":
79
        kwargs = {"torch_dtype": torch.float16}
80
        if num_gpus == "auto":
81
            kwargs["device_map"] = "auto"
82
        else:
83
            num_gpus = int(num_gpus)
84
            if num_gpus != 1:
85
                kwargs.update(
86
                    {
87
                        "device_map": "auto",
88
                        "max_memory": {i: "13GiB" for i in range(num_gpus)},
89
                    }
90
                )
91
    elif args.device == "cpu":
92
        kwargs = {}
93
    else:
94
        raise ValueError(f"Invalid device: {args.device}")
95

96
    tokenizer = AutoTokenizer.from_pretrained(model_name)
97
    model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True, **kwargs)
98

99
    if args.device == "cuda" and num_gpus == 1:
100
        model.cuda()
101

102
    # Chat
103
    conv = conv_templates[args.conv_template].copy()
104
    while True:
105
        try:
106
            inp = input(f"{conv.roles[0]}: ")
107
        except EOFError:
108
            inp = ""
109
        if not inp:
110
            print("exit...")
111
            break
112

113
        conv.append_message(conv.roles[0], inp)
114
        conv.append_message(conv.roles[1], None)
115
        prompt = conv.get_prompt()
116

117
        params = {
118
            "model": model_name,
119
            "prompt": prompt,
120
            "temperature": args.temperature,
121
            "max_new_tokens": args.max_new_tokens,
122
            "stop": conv.sep if conv.sep_style == SeparatorStyle.SINGLE else conv.sep2,
123
        }
124

125
        print(f"{conv.roles[1]}: ", end="", flush=True)
126
        pre = 0
127
        for outputs in generate_stream(tokenizer, model, params, args.device):
128
            outputs = outputs[len(prompt) + 1 :].strip()
129
            outputs = outputs.split(" ")
130
            now = len(outputs)
131
            if now - 1 > pre:
132
                print(" ".join(outputs[pre : now - 1]), end=" ", flush=True)
133
                pre = now - 1
134
        print(" ".join(outputs[pre:]), flush=True)
135

136
        conv.messages[-1][-1] = " ".join(outputs)
137

138
        if args.debug:
139
            print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
140

141

142
if __name__ == "__main__":
143
    parser = argparse.ArgumentParser()
144
    parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
145
    parser.add_argument("--num-gpus", type=str, default="1")
146
    parser.add_argument("--device", type=str, choices=["cuda", "cpu"], default="cuda")
147
    parser.add_argument("--conv-template", type=str, default="v1")
148
    parser.add_argument("--temperature", type=float, default=0.7)
149
    parser.add_argument("--max-new-tokens", type=int, default=512)
150
    parser.add_argument("--debug", action="store_true")
151
    args = parser.parse_args()
152
    main(args)
153

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.