llama

Форк
0
/
llava-cli.cpp 
329 строк · 12.1 Кб
1
#include "arg.h"
2
#include "base64.hpp"
3
#include "log.h"
4
#include "common.h"
5
#include "sampling.h"
6
#include "clip.h"
7
#include "llava.h"
8
#include "llama.h"
9
#include "ggml.h"
10

11
#include <cstdio>
12
#include <cstdlib>
13
#include <cstring>
14
#include <vector>
15

16
static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past) {
17
    int N = (int) tokens.size();
18
    for (int i = 0; i < N; i += n_batch) {
19
        int n_eval = (int) tokens.size() - i;
20
        if (n_eval > n_batch) {
21
            n_eval = n_batch;
22
        }
23
        if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
24
            LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
25
            return false;
26
        }
27
        *n_past += n_eval;
28
    }
29
    return true;
30
}
31

32
static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
33
    std::vector<llama_token> tokens;
34
    tokens.push_back(id);
35
    return eval_tokens(ctx_llama, tokens, 1, n_past);
36
}
37

38
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
39
    std::string              str2     = str;
40
    std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
41
    eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
42
    return true;
43
}
44

45
static const char * sample(struct gpt_sampler * smpl,
46
                           struct llama_context * ctx_llama,
47
                           int * n_past) {
48
    const llama_token id = gpt_sampler_sample(smpl, ctx_llama, -1);
49
    gpt_sampler_accept(smpl, id, true);
50
    static std::string ret;
51
    if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
52
        ret = "</s>";
53
    } else {
54
        ret = llama_token_to_piece(ctx_llama, id);
55
    }
56
    eval_id(ctx_llama, id, n_past);
57
    return ret.c_str();
58
}
59

60
static const char* IMG_BASE64_TAG_BEGIN = "<img src=\"data:image/jpeg;base64,";
61
static const char* IMG_BASE64_TAG_END = "\">";
62

63
static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) {
64
    begin_out = prompt.find(IMG_BASE64_TAG_BEGIN);
65
    end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out);
66
}
67

68
static bool prompt_contains_image(const std::string& prompt) {
69
    size_t begin, end;
70
    find_image_tag_in_prompt(prompt, begin, end);
71
    return (begin != std::string::npos);
72
}
73

74
// replaces the base64 image tag in the prompt with `replacement`
75
static llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) {
76
    size_t img_base64_str_start, img_base64_str_end;
77
    find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end);
78
    if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) {
79
        LOG_ERR("%s: invalid base64 image tag. must be %s<base64 byte string>%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END);
80
        return NULL;
81
    }
82

83
    auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN);
84
    auto base64_bytes_count = img_base64_str_end - base64_bytes_start;
85
    auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count );
86

87
    auto required_bytes = base64::required_encode_size(base64_str.size());
88
    auto img_bytes = std::vector<unsigned char>(required_bytes);
89
    base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin());
90

91
    auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size());
92
    if (!embed) {
93
        LOG_ERR("%s: could not load image from base64 string.\n", __func__);
94
        return NULL;
95
    }
96

97
    return embed;
98
}
99

100
static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") {
101
    size_t begin, end;
102
    find_image_tag_in_prompt(prompt, begin, end);
103
    if (begin == std::string::npos || end == std::string::npos) {
104
        return prompt;
105
    }
106
    auto pre = prompt.substr(0, begin);
107
    auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END));
108
    return pre + replacement + post;
109
}
110

111
struct llava_context {
112
    struct clip_ctx * ctx_clip = NULL;
113
    struct llama_context * ctx_llama = NULL;
114
    struct llama_model * model = NULL;
115
};
116

117
static void print_usage(int, char ** argv) {
118
    LOG("\n example usage:\n");
119
    LOG("\n     %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
120
    LOG("\n note: a lower temperature value like 0.1 is recommended for better quality.\n");
121
}
122

123
static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params, const std::string & fname) {
124

125
    // load and preprocess the image
126
    llava_image_embed * embed = NULL;
127
    auto prompt = params->prompt;
128
    if (prompt_contains_image(prompt)) {
129
        if (!params->image.empty()) {
130
            LOG_INF("using base64 encoded image instead of command line image path\n");
131
        }
132
        embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->cpuparams.n_threads, prompt);
133
        if (!embed) {
134
            LOG_ERR("%s: can't load image from prompt\n", __func__);
135
            return NULL;
136
        }
137
        params->prompt = remove_image_from_prompt(prompt);
138
    } else {
139
        embed = llava_image_embed_make_with_filename(ctx_llava->ctx_clip, params->cpuparams.n_threads, fname.c_str());
140
        if (!embed) {
141
            fprintf(stderr, "%s: is %s really an image file?\n", __func__, fname.c_str());
142
            return NULL;
143
        }
144
    }
145

146
    return embed;
147
}
148

149
static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, gpt_params * params, const std::string & prompt) {
150
    int n_past = 0;
151

152
    const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict;
153

154
    std::string system_prompt, user_prompt;
155
    size_t image_pos = prompt.find("<image>");
156
    if (image_pos != std::string::npos) {
157
        // new templating mode: Provide the full prompt including system message and use <image> as a placeholder for the image
158
        system_prompt = prompt.substr(0, image_pos);
159
        user_prompt = prompt.substr(image_pos + std::string("<image>").length());
160
        LOG_INF("system_prompt: %s\n", system_prompt.c_str());
161
        if (params->verbose_prompt) {
162
            auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, system_prompt, true, true);
163
            for (int i = 0; i < (int) tmp.size(); i++) {
164
                LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
165
            }
166
        }
167
        LOG_INF("user_prompt: %s\n", user_prompt.c_str());
168
        if (params->verbose_prompt) {
169
            auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
170
            for (int i = 0; i < (int) tmp.size(); i++) {
171
                LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
172
            }
173
        }
174
    } else {
175
        // llava-1.5 native mode
176
        system_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\nUSER:";
177
        user_prompt = prompt + "\nASSISTANT:";
178
        if (params->verbose_prompt) {
179
            auto tmp = ::llama_tokenize(ctx_llava->ctx_llama, user_prompt, true, true);
180
            for (int i = 0; i < (int) tmp.size(); i++) {
181
                LOG_INF("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str());
182
            }
183
        }
184
    }
185

186
    eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, true);
187
    llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past);
188
    eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
189

190
    // generate the response
191

192
    LOG("\n");
193

194
    struct gpt_sampler * smpl = gpt_sampler_init(ctx_llava->model, params->sparams);
195
    if (!smpl) {
196
        LOG_ERR("%s: failed to initialize sampling subsystem\n", __func__);
197
        exit(1);
198
    }
199

200
    std::string response = "";
201
    for (int i = 0; i < max_tgt_len; i++) {
202
        const char * tmp = sample(smpl, ctx_llava->ctx_llama, &n_past);
203
        response += tmp;
204
        if (strcmp(tmp, "</s>") == 0) break;
205
        if (strstr(tmp, "###")) break; // Yi-VL behavior
206
        LOG("%s", tmp);
207
        if (strstr(response.c_str(), "<|im_end|>")) break; // Yi-34B llava-1.6 - for some reason those decode not as the correct token (tokenizer works)
208
        if (strstr(response.c_str(), "<|im_start|>")) break; // Yi-34B llava-1.6
209
        if (strstr(response.c_str(), "USER:")) break; // mistral llava-1.6
210

211
        fflush(stdout);
212
    }
213

214
    gpt_sampler_free(smpl);
215
    LOG("\n");
216
}
217

218
static struct llama_model * llava_init(gpt_params * params) {
219
    llama_backend_init();
220
    llama_numa_init(params->numa);
221

222
    llama_model_params model_params = llama_model_params_from_gpt_params(*params);
223

224
    llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
225
    if (model == NULL) {
226
        LOG_ERR("%s: unable to load model\n" , __func__);
227
        return NULL;
228
    }
229
    return model;
230
}
231

232
static struct llava_context * llava_init_context(gpt_params * params, llama_model * model) {
233
    const char * clip_path = params->mmproj.c_str();
234

235
    auto prompt = params->prompt;
236
    if (prompt.empty()) {
237
        prompt = "describe the image in detail.";
238
    }
239

240
    auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
241

242

243
    llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
244
    ctx_params.n_ctx           = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings
245

246
    llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
247

248
    if (ctx_llama == NULL) {
249
        LOG_ERR("%s: failed to create the llama_context\n" , __func__);
250
        return NULL;
251
    }
252

253
    auto * ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));
254

255
    ctx_llava->ctx_llama = ctx_llama;
256
    ctx_llava->ctx_clip = ctx_clip;
257
    ctx_llava->model = model;
258
    return ctx_llava;
259
}
260

261
static void llava_free(struct llava_context * ctx_llava) {
262
    if (ctx_llava->ctx_clip) {
263
        clip_free(ctx_llava->ctx_clip);
264
        ctx_llava->ctx_clip = NULL;
265
    }
266

267
    llama_free(ctx_llava->ctx_llama);
268
    llama_free_model(ctx_llava->model);
269
    llama_backend_free();
270
}
271

272
int main(int argc, char ** argv) {
273
    ggml_time_init();
274

275
    gpt_params params;
276

277
    if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, print_usage)) {
278
        return 1;
279
    }
280

281
    gpt_init();
282

283
    if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
284
        print_usage(argc, argv);
285
        return 1;
286
    }
287

288
    auto * model = llava_init(&params);
289
    if (model == NULL) {
290
        fprintf(stderr, "%s: error: failed to init llava model\n", __func__);
291
        return 1;
292
    }
293

294
    if (prompt_contains_image(params.prompt)) {
295
        auto * ctx_llava = llava_init_context(&params, model);
296

297
        auto * image_embed = load_image(ctx_llava, &params, "");
298

299
        // process the prompt
300
        process_prompt(ctx_llava, image_embed, &params, params.prompt);
301

302
        llama_perf_context_print(ctx_llava->ctx_llama);
303
        llava_image_embed_free(image_embed);
304
        ctx_llava->model = NULL;
305
        llava_free(ctx_llava);
306
    } else {
307
        for (auto & image : params.image) {
308
            auto * ctx_llava = llava_init_context(&params, model);
309

310
            auto * image_embed = load_image(ctx_llava, &params, image);
311
            if (!image_embed) {
312
                LOG_ERR("%s: failed to load image %s. Terminating\n\n", __func__, image.c_str());
313
                return 1;
314
            }
315

316
            // process the prompt
317
            process_prompt(ctx_llava, image_embed, &params, params.prompt);
318

319
            llama_perf_context_print(ctx_llava->ctx_llama);
320
            llava_image_embed_free(image_embed);
321
            ctx_llava->model = NULL;
322
            llava_free(ctx_llava);
323
        }
324
    }
325

326
    llama_free_model(model);
327

328
    return 0;
329
}
330

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.