llama

Форк
0
/
minicpmv-cli.cpp 
323 строки · 12.5 Кб
1
#include "arg.h"
2
#include "log.h"
3
#include "common.h"
4
#include "sampling.h"
5
#include "clip.h"
6
#include "llava.h"
7
#include "llama.h"
8
#include "ggml.h"
9

10
#include <algorithm>
11
#include <cstdio>
12
#include <cstdlib>
13
#include <cstring>
14
#include <vector>
15
#include <iostream> // TODO: remove me
16

17
struct llava_context {
18
    struct clip_ctx * ctx_clip = NULL;
19
    struct llama_context * ctx_llama = NULL;
20
    struct llama_model * model = NULL;
21
};
22

23
static void show_additional_info(int /*argc*/, char ** argv) {
24
    LOG("\nexample usage:\n\n%s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
25
    LOG("\nnote: a lower temperature value like 0.1 is recommended for better quality.\n");
26
}
27

28
static struct llama_model * llava_init(gpt_params * params) {
29
    llama_backend_init();
30
    llama_numa_init(params->numa);
31

32
    llama_model_params model_params = llama_model_params_from_gpt_params(*params);
33

34
    llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
35
    if (model == NULL) {
36
        LOG_ERR("%s: unable to load model\n" , __func__);
37
        return NULL;
38
    }
39
    return model;
40
}
41

42
static struct llava_context * llava_init_context(gpt_params * params, llama_model * model) {
43
    auto prompt = params->prompt;
44
    if (prompt.empty()) {
45
        prompt = "describe the image in detail.";
46
    }
47

48
    llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
49
    if (params->n_ctx < 2048) {
50
        // warn user here, "Image processing requires at least 2048 context, setting context to 2048"
51
        LOG_WRN("%s: Image processing requires at least 2048 context, setting context to 2048\n" , __func__);
52
        ctx_params.n_ctx = 2048;
53
    } else {
54
        ctx_params.n_ctx = params->n_ctx;
55
    }
56

57
    llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
58

59
    if (ctx_llama == NULL) {
60
        LOG_ERR("%s: failed to create the llama_context\n" , __func__);
61
        return NULL;
62
    }
63

64
    auto * ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));
65

66
    ctx_llava->ctx_llama = ctx_llama;
67
    ctx_llava->model = model;
68
    return ctx_llava;
69
}
70

71
static void llava_free(struct llava_context * ctx_llava) {
72
    if (ctx_llava->ctx_clip) {
73
        clip_free(ctx_llava->ctx_clip);
74
        ctx_llava->ctx_clip = NULL;
75
    }
76

77
    llama_free(ctx_llava->ctx_llama);
78
    llama_free_model(ctx_llava->model);
79
    llama_backend_free();
80
}
81

82
static struct clip_ctx * clip_init_context(gpt_params * params) {
83
    const char * clip_path = params->mmproj.c_str();
84

85
    auto prompt = params->prompt;
86
    if (prompt.empty()) {
87
        prompt = "describe the image in detail.";
88
    }
89
    auto * ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
90
    return ctx_clip;
91
}
92

93
static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past) {
94
    int N = (int) tokens.size();
95
    for (int i = 0; i < N; i += n_batch) {
96
        int n_eval = (int) tokens.size() - i;
97
        if (n_eval > n_batch) {
98
            n_eval = n_batch;
99
        }
100
        if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
101
            LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
102
            return false;
103
        }
104
        *n_past += n_eval;
105
    }
106
    return true;
107
}
108

109
static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
110
    std::vector<llama_token> tokens;
111
    tokens.push_back(id);
112
    return eval_tokens(ctx_llama, tokens, 1, n_past);
113
}
114

115
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
116
    std::string              str2     = str;
117
    std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
118
    return eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
119
}
120

121
static void process_eval_image_embed(struct llava_context * ctx_llava, const struct llava_image_embed * embeds, int n_batch, int * n_past, int idx) {
122
    float * image_embed = (float *)malloc(clip_embd_nbytes(ctx_llava->ctx_clip));
123
    std::memcpy(image_embed, embeds->embed + idx * clip_n_patches(ctx_llava->ctx_clip) * clip_n_mmproj_embd(ctx_llava->ctx_clip), clip_embd_nbytes(ctx_llava->ctx_clip));
124

125
    auto * slice_embed = (llava_image_embed*)malloc(sizeof(llava_image_embed));
126
    slice_embed->embed = image_embed;
127
    slice_embed->n_image_pos = clip_n_patches(ctx_llava->ctx_clip);
128
    llava_eval_image_embed(ctx_llava->ctx_llama, slice_embed, n_batch, n_past);
129
    llava_image_embed_free(slice_embed);
130
}
131

132
static void process_image(struct llava_context * ctx_llava, struct llava_image_embed * embeds, gpt_params * params, int &n_past) {
133
    std::string system_prompt;
134
    int idx = 0;
135
    int num_image_embeds = embeds->n_image_pos / clip_n_patches(ctx_llava->ctx_clip);
136
    int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
137
    if (has_minicpmv_projector == 2) {
138
        system_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n";
139
    }
140
    else if (has_minicpmv_projector == 3) {
141
        system_prompt = "<|im_start|>user\n";
142
    }
143
    LOG_INF("%s: image token past: %d\n", __func__, n_past);
144
    eval_string(ctx_llava->ctx_llama, (system_prompt+"").c_str(), params->n_batch, &n_past, false);
147
    if (num_image_embeds > 1) {
148
        size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip);
149
        eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
150
        for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) {
151
            for (size_t j = 0; j < num_image_embeds_col; ++j) {
152
                eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false);
155
                if (j == num_image_embeds_col - 1) {
156
                    eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
157
                }
158
            }
159
        }
160
        eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false);
161
    }
162
    LOG_INF("%s: image token past: %d\n", __func__, n_past);
163
}
164

165
static const char * sample(struct gpt_sampler * smpl,
166
                           struct llama_context * ctx_llama,
167
                           int * n_past) {
168
    const llama_token id = gpt_sampler_sample(smpl, ctx_llama, -1);
169
    gpt_sampler_accept(smpl, id, true);
170
    static std::string ret;
171
    if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
172
        ret = "</s>";
173
    } else {
174
        ret = llama_token_to_piece(ctx_llama, id);
175
    }
176
    eval_id(ctx_llama, id, n_past);
177
    return ret.c_str();
178
}
179

180
static struct llava_context * minicpmv_init(gpt_params * params, const std::string & fname, int &n_past){
181
    auto * ctx_clip = clip_init_context(params);
182
    auto * embeds = llava_image_embed_make_with_filename(ctx_clip, params->cpuparams.n_threads, fname.c_str());
183
    if (!embeds) {
184
        LOG_ERR("failed to load image %s. Terminating\n\n", fname.c_str());
185
        return NULL;
186
    }
187

188
    // process the prompt
189
    if (params->prompt.empty() && params->interactive == false) {
190
        LOG_ERR("prompt should be given or interactive mode should be on");
191
        return NULL;
192
    }
193

194
    auto * model = llava_init(params);
195
    if (model == NULL) {
196
        fprintf(stderr, "%s: error: failed to init minicpmv model\n", __func__);
197
        return NULL;
198
    }
199
    const int64_t t_llava_init_start_us = ggml_time_us();
200
    auto * ctx_llava = llava_init_context(params, model);
201
    ctx_llava->ctx_clip = ctx_clip;
202
    const int64_t t_llava_init_end_us = ggml_time_us();
203
    float t_llava_init_ms = (t_llava_init_end_us - t_llava_init_start_us) / 1000.0;
204
    LOG_INF("%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms);
205

206
    const int64_t t_process_image_start_us = ggml_time_us();
207
    process_image(ctx_llava, embeds, params, n_past);
208
    const int64_t t_process_image_end_us = ggml_time_us();
209
    float t_process_image_ms = (t_process_image_end_us - t_process_image_start_us) / 1000.0;
210
    LOG_INF("%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms);
211

212
    llava_image_embed_free(embeds);
213
    return ctx_llava;
214
}
215

216
static struct gpt_sampler * llama_init(struct llava_context * ctx_llava, gpt_params * params, const std::string & prompt, int & n_past, bool is_first = false){
217
    std::string user_prompt = prompt;
218
    int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
219
    if (!is_first) {
220
        if (has_minicpmv_projector == 2) {
221
            user_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n" + prompt;
222
        }
223
        else if (has_minicpmv_projector == 3) {
224
            user_prompt = "<|im_start|>user\n" + prompt;
225
        }
226
    }
227

228
    eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
229
    if (has_minicpmv_projector == 2) {
230
        eval_string(ctx_llava->ctx_llama, "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", params->n_batch, &n_past, false);
231
    }
232
    else if (has_minicpmv_projector == 3) {
233
        eval_string(ctx_llava->ctx_llama, "<|im_end|><|im_start|>assistant\n", params->n_batch, &n_past, false);
234
    }
235

236
    // generate the response
237

238
    LOG_INF("\n");
239

240
    struct gpt_sampler * smpl = gpt_sampler_init(ctx_llava->model, params->sparams);
241
    return smpl;
242
}
243

244
static const char * llama_loop(struct llava_context * ctx_llava,struct gpt_sampler * smpl, int &n_past){
245

246
    const char * tmp = sample(smpl, ctx_llava->ctx_llama, &n_past);
247
    return tmp;
248
}
249

250
int main(int argc, char ** argv) {
251
    ggml_time_init();
252

253
    gpt_params params;
254

255
    if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) {
256
        return 1;
257
    }
258

259
    gpt_init();
260

261
    if (params.mmproj.empty() || (params.image.empty())) {
262
        show_additional_info(argc, argv);
263
        return 1;
264
    }
265

266
    for (auto & image : params.image) {
267
        int n_past = 0;
268
        auto * ctx_llava = minicpmv_init(&params, image, n_past);
269

270
        if (!params.prompt.empty()) {
271
            LOG("<user>%s\n", params.prompt.c_str());
272
            LOG("<assistant>");
273
            auto * smpl = llama_init(ctx_llava, &params, params.prompt, n_past, true);
274
            const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
275
            std::string response;
276
            bool have_tmp = false;
277
            for (int i = 0; i < max_tgt_len; i++) {
278
                const auto * tmp = llama_loop(ctx_llava, smpl, n_past);
279
                response += tmp;
280
                if (strcmp(tmp, "</s>") == 0){
281
                    if (!have_tmp) {
282
                        continue;
283
                    }
284
                    break;
285
                }
286
                if (strstr(tmp, "###")) break; // Yi-VL behavior
287
                have_tmp = true;
288
                printf("%s", tmp);
289
                if (strstr(response.c_str(), "<user>")) break; // minicpm-v
290

291
                fflush(stdout);
292
            }
293
            gpt_sampler_free(smpl);
294
        }else {
295
            while (true) {
296
                LOG("<user>");
297
                std::string prompt;
298
                std::getline(std::cin, prompt);
299
                LOG("<assistant>");
300
                auto * smpl = llama_init(ctx_llava, &params, prompt, n_past, true);
301
                const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
302
                std::string response;
303
                for (int i = 0; i < max_tgt_len; i++) {
304
                    const auto * tmp = llama_loop(ctx_llava, smpl, n_past);
305
                    response += tmp;
306
                    if (strcmp(tmp, "</s>") == 0) break;
307
                    if (strstr(tmp, "###")) break; // Yi-VL behavior
308
                    printf("%s", tmp);// mistral llava-1.6
309
                    if (strstr(response.c_str(), "<user>")) break; // minicpm-v
310
                    fflush(stdout);
311
                }
312
                gpt_sampler_free(smpl);
313
            }
314
        }
315
        printf("\n");
316
        llama_perf_context_print(ctx_llava->ctx_llama);
317

318
        ctx_llava->model = NULL;
319
        llava_free(ctx_llava);
320
    }
321

322
    return 0;
323
}
324

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.