18
struct clip_ctx * ctx_clip = NULL;
19
struct llama_context * ctx_llama = NULL;
20
struct llama_model * model = NULL;
23
static void show_additional_info(int , char ** argv) {
24
LOG("\nexample usage:\n\n%s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
25
LOG("\nnote: a lower temperature value like 0.1 is recommended for better quality.\n");
28
static struct llama_model * llava_init(gpt_params * params) {
30
llama_numa_init(params->numa);
32
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
34
llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
36
LOG_ERR("%s: unable to load model\n" , __func__);
42
static struct llava_context * llava_init_context(gpt_params * params, llama_model * model) {
43
auto prompt = params->prompt;
45
prompt = "describe the image in detail.";
48
llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
49
if (params->n_ctx < 2048) {
51
LOG_WRN("%s: Image processing requires at least 2048 context, setting context to 2048\n" , __func__);
52
ctx_params.n_ctx = 2048;
54
ctx_params.n_ctx = params->n_ctx;
57
llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);
59
if (ctx_llama == NULL) {
60
LOG_ERR("%s: failed to create the llama_context\n" , __func__);
64
auto * ctx_llava = (struct llava_context *)malloc(sizeof(llava_context));
66
ctx_llava->ctx_llama = ctx_llama;
67
ctx_llava->model = model;
71
static void llava_free(struct llava_context * ctx_llava) {
72
if (ctx_llava->ctx_clip) {
73
clip_free(ctx_llava->ctx_clip);
74
ctx_llava->ctx_clip = NULL;
77
llama_free(ctx_llava->ctx_llama);
78
llama_free_model(ctx_llava->model);
82
static struct clip_ctx * clip_init_context(gpt_params * params) {
83
const char * clip_path = params->mmproj.c_str();
85
auto prompt = params->prompt;
87
prompt = "describe the image in detail.";
89
auto * ctx_clip = clip_model_load(clip_path, 1);
93
static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_token> tokens, int n_batch, int * n_past) {
94
int N = (int) tokens.size();
95
for (int i = 0; i < N; i += n_batch) {
96
int n_eval = (int) tokens.size() - i;
97
if (n_eval > n_batch) {
100
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
101
LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
109
static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
110
std::vector<llama_token> tokens;
111
tokens.push_back(id);
112
return eval_tokens(ctx_llama, tokens, 1, n_past);
115
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
116
std::string str2 = str;
117
std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
118
return eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
121
static void process_eval_image_embed(struct llava_context * ctx_llava, const struct llava_image_embed * embeds, int n_batch, int * n_past, int idx) {
122
float * image_embed = (float *)malloc(clip_embd_nbytes(ctx_llava->ctx_clip));
123
std::memcpy(image_embed, embeds->embed + idx * clip_n_patches(ctx_llava->ctx_clip) * clip_n_mmproj_embd(ctx_llava->ctx_clip), clip_embd_nbytes(ctx_llava->ctx_clip));
125
auto * slice_embed = (llava_image_embed*)malloc(sizeof(llava_image_embed));
126
slice_embed->embed = image_embed;
127
slice_embed->n_image_pos = clip_n_patches(ctx_llava->ctx_clip);
128
llava_eval_image_embed(ctx_llava->ctx_llama, slice_embed, n_batch, n_past);
129
llava_image_embed_free(slice_embed);
132
static void process_image(struct llava_context * ctx_llava, struct llava_image_embed * embeds, gpt_params * params, int &n_past) {
133
std::string system_prompt;
135
int num_image_embeds = embeds->n_image_pos / clip_n_patches(ctx_llava->ctx_clip);
136
int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
137
if (has_minicpmv_projector == 2) {
138
system_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n";
140
else if (has_minicpmv_projector == 3) {
141
system_prompt = "<|im_start|>user\n";
143
LOG_INF("%s: image token past: %d\n", __func__, n_past);
144
eval_string(ctx_llava->ctx_llama, (system_prompt+"").c_str(), params->n_batch, &n_past, false);
147
if (num_image_embeds > 1) {
148
size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip);
149
eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
150
for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) {
151
for (size_t j = 0; j < num_image_embeds_col; ++j) {
152
eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false);
155
if (j == num_image_embeds_col - 1) {
156
eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
160
eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false);
162
LOG_INF("%s: image token past: %d\n", __func__, n_past);
165
static const char * sample(struct gpt_sampler * smpl,
166
struct llama_context * ctx_llama,
168
const llama_token id = gpt_sampler_sample(smpl, ctx_llama, -1);
169
gpt_sampler_accept(smpl, id, true);
170
static std::string ret;
171
if (llama_token_is_eog(llama_get_model(ctx_llama), id)) {
174
ret = llama_token_to_piece(ctx_llama, id);
176
eval_id(ctx_llama, id, n_past);
180
static struct llava_context * minicpmv_init(gpt_params * params, const std::string & fname, int &n_past){
181
auto * ctx_clip = clip_init_context(params);
182
auto * embeds = llava_image_embed_make_with_filename(ctx_clip, params->cpuparams.n_threads, fname.c_str());
184
LOG_ERR("failed to load image %s. Terminating\n\n", fname.c_str());
189
if (params->prompt.empty() && params->interactive == false) {
190
LOG_ERR("prompt should be given or interactive mode should be on");
194
auto * model = llava_init(params);
196
fprintf(stderr, "%s: error: failed to init minicpmv model\n", __func__);
199
const int64_t t_llava_init_start_us = ggml_time_us();
200
auto * ctx_llava = llava_init_context(params, model);
201
ctx_llava->ctx_clip = ctx_clip;
202
const int64_t t_llava_init_end_us = ggml_time_us();
203
float t_llava_init_ms = (t_llava_init_end_us - t_llava_init_start_us) / 1000.0;
204
LOG_INF("%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms);
206
const int64_t t_process_image_start_us = ggml_time_us();
207
process_image(ctx_llava, embeds, params, n_past);
208
const int64_t t_process_image_end_us = ggml_time_us();
209
float t_process_image_ms = (t_process_image_end_us - t_process_image_start_us) / 1000.0;
210
LOG_INF("%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms);
212
llava_image_embed_free(embeds);
216
static struct gpt_sampler * llama_init(struct llava_context * ctx_llava, gpt_params * params, const std::string & prompt, int & n_past, bool is_first = false){
217
std::string user_prompt = prompt;
218
int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
220
if (has_minicpmv_projector == 2) {
221
user_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n" + prompt;
223
else if (has_minicpmv_projector == 3) {
224
user_prompt = "<|im_start|>user\n" + prompt;
228
eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
229
if (has_minicpmv_projector == 2) {
230
eval_string(ctx_llava->ctx_llama, "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", params->n_batch, &n_past, false);
232
else if (has_minicpmv_projector == 3) {
233
eval_string(ctx_llava->ctx_llama, "<|im_end|><|im_start|>assistant\n", params->n_batch, &n_past, false);
240
struct gpt_sampler * smpl = gpt_sampler_init(ctx_llava->model, params->sparams);
244
static const char * llama_loop(struct llava_context * ctx_llava,struct gpt_sampler * smpl, int &n_past){
246
const char * tmp = sample(smpl, ctx_llava->ctx_llama, &n_past);
250
int main(int argc, char ** argv) {
255
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) {
261
if (params.mmproj.empty() || (params.image.empty())) {
262
show_additional_info(argc, argv);
266
for (auto & image : params.image) {
268
auto * ctx_llava = minicpmv_init(¶ms, image, n_past);
270
if (!params.prompt.empty()) {
271
LOG("<user>%s\n", params.prompt.c_str());
273
auto * smpl = llama_init(ctx_llava, ¶ms, params.prompt, n_past, true);
274
const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
275
std::string response;
276
bool have_tmp = false;
277
for (int i = 0; i < max_tgt_len; i++) {
278
const auto * tmp = llama_loop(ctx_llava, smpl, n_past);
280
if (strcmp(tmp, "</s>") == 0){
286
if (strstr(tmp, "###")) break;
289
if (strstr(response.c_str(), "<user>")) break;
293
gpt_sampler_free(smpl);
298
std::getline(std::cin, prompt);
300
auto * smpl = llama_init(ctx_llava, ¶ms, prompt, n_past, true);
301
const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
302
std::string response;
303
for (int i = 0; i < max_tgt_len; i++) {
304
const auto * tmp = llama_loop(ctx_llava, smpl, n_past);
306
if (strcmp(tmp, "</s>") == 0) break;
307
if (strstr(tmp, "###")) break;
309
if (strstr(response.c_str(), "<user>")) break;
312
gpt_sampler_free(smpl);
316
llama_perf_context_print(ctx_llava->ctx_llama);
318
ctx_llava->model = NULL;
319
llava_free(ctx_llava);