1
// NOTE: This is modified from clip.cpp only for LLaVA,
2
// so there might be still unnecessary artifacts hanging around
3
// I'll gradually clean and extend it
4
// Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch
8
#include "ggml-backend.h"
15
#include "ggml-metal.h"
23
#include "ggml-vulkan.h"
26
#define STB_IMAGE_IMPLEMENTATION
42
#define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
43
#define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
44
#define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
45
#define LOG_DBG(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
47
//#define CLIP_DEBUG_FUNCTIONS
54
std::vector<uint8_t> buf;
57
// RGB float32 image (NHWC)
58
// Memory layout: RGBRGBRGB...
59
struct clip_image_f32 {
63
std::vector<float> buf;
66
static std::string format(const char * fmt, ...) {
71
int size = vsnprintf(NULL, 0, fmt, ap);
72
GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
73
std::vector<char> buf(size + 1);
74
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
75
GGML_ASSERT(size2 == size);
78
return std::string(buf.data(), buf.size());
85
#define KEY_FTYPE "general.file_type"
86
#define KEY_NAME "general.name"
87
#define KEY_DESCRIPTION "general.description"
88
#define KEY_HAS_TEXT_ENC "clip.has_text_encoder"
89
#define KEY_HAS_VIS_ENC "clip.has_vision_encoder"
90
#define KEY_HAS_LLAVA_PROJ "clip.has_llava_projector"
91
#define KEY_HAS_MINICPMV_PROJ "clip.has_minicpmv_projector"
92
#define KEY_MINICPMV_VERSION "clip.minicpmv_version"
93
#define KEY_USE_GELU "clip.use_gelu"
94
#define KEY_N_EMBD "clip.%s.embedding_length"
95
#define KEY_N_FF "clip.%s.feed_forward_length"
96
#define KEY_N_BLOCK "clip.%s.block_count"
97
#define KEY_N_HEAD "clip.%s.attention.head_count"
98
#define KEY_LAYER_NORM_EPS "clip.%s.attention.layer_norm_epsilon"
99
#define KEY_PROJ_DIM "clip.%s.projection_dim"
100
#define KEY_TOKENS "tokenizer.ggml.tokens"
101
#define KEY_N_POSITIONS "clip.text.context_length"
102
#define KEY_IMAGE_SIZE "clip.vision.image_size"
103
#define KEY_PATCH_SIZE "clip.vision.patch_size"
104
#define KEY_IMAGE_MEAN "clip.vision.image_mean"
105
#define KEY_IMAGE_STD "clip.vision.image_std"
106
#define KEY_PROJ_TYPE "clip.projector_type"
108
#define KEY_MM_PATCH_MERGE_TYPE "clip.vision.mm_patch_merge_type"
109
#define KEY_IMAGE_GRID_PINPOINTS "clip.vision.image_grid_pinpoints"
110
#define KEY_IMAGE_CROP_RESOLUTION "clip.vision.image_crop_resolution"
114
// tensor name constants
117
#define TN_TOKEN_EMBD "%s.token_embd.weight"
118
#define TN_POS_EMBD "%s.position_embd.weight"
119
#define TN_CLASS_EMBD "v.class_embd"
120
#define TN_PATCH_EMBD "v.patch_embd.weight"
121
#define TN_PATCH_BIAS "v.patch_embd.bias"
122
#define TN_ATTN_K "%s.blk.%d.attn_k.%s"
123
#define TN_ATTN_Q "%s.blk.%d.attn_q.%s"
124
#define TN_ATTN_V "%s.blk.%d.attn_v.%s"
125
#define TN_ATTN_OUTPUT "%s.blk.%d.attn_out.%s"
126
#define TN_FFN_DOWN "%s.blk.%d.ffn_down.%s"
127
#define TN_FFN_UP "%s.blk.%d.ffn_up.%s"
128
#define TN_LN_1 "%s.blk.%d.ln1.%s"
129
#define TN_LN_2 "%s.blk.%d.ln2.%s"
130
#define TN_LN_PRE "%s.pre_ln.%s"
131
#define TN_LN_POST "%s.post_ln.%s"
132
#define TN_TEXT_PROJ "text_projection.weight"
133
#define TN_VIS_PROJ "visual_projection.weight"
134
#define TN_LLAVA_PROJ "mm.%d.%s"
135
#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s"
136
#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
137
#define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s"
138
#define TN_IMAGE_NEWLINE "model.image_newline"
140
#define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k"
141
#define TN_MINICPMV_QUERY "resampler.query"
142
#define TN_MINICPMV_PROJ "resampler.proj.weight"
143
#define TN_MINICPMV_KV_PROJ "resampler.kv.weight"
144
#define TN_MINICPMV_ATTN "resampler.attn.%s.%s"
145
#define TN_MINICPMV_LN "resampler.ln_%s.%s"
150
PROJECTOR_TYPE_MLP_NORM,
152
PROJECTOR_TYPE_LDPV2,
153
PROJECTOR_TYPE_RESAMPLER,
154
PROJECTOR_TYPE_UNKNOWN,
157
static std::map<projector_type, std::string> PROJECTOR_TYPE_NAMES = {
158
{ PROJECTOR_TYPE_MLP, "mlp" },
159
{ PROJECTOR_TYPE_LDP, "ldp" },
160
{ PROJECTOR_TYPE_LDPV2, "ldpv2"},
161
{ PROJECTOR_TYPE_RESAMPLER, "resampler"},
166
// utilities to get data from a gguf file
169
static int get_key_idx(const gguf_context * ctx, const char * key) {
170
int i = gguf_find_key(ctx, key);
172
LOG_ERR("key %s not found in file\n", key);
173
throw std::runtime_error(format("Missing required key: %s", key));
179
static uint32_t get_u32(const gguf_context * ctx, const std::string & key) {
180
const int i = get_key_idx(ctx, key.c_str());
182
return gguf_get_val_u32(ctx, i);
185
static float get_f32(const gguf_context * ctx, const std::string & key) {
186
const int i = get_key_idx(ctx, key.c_str());
188
return gguf_get_val_f32(ctx, i);
191
static struct ggml_tensor * get_tensor(struct ggml_context * ctx, const std::string & name) {
192
struct ggml_tensor * cur = ggml_get_tensor(ctx, name.c_str());
194
throw std::runtime_error(format("%s: unable to find tensor %s\n", __func__, name.c_str()));
200
static std::string get_ftype(int ftype) {
201
return ggml_type_name(static_cast<ggml_type>(ftype));
204
static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
206
case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
207
case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
208
case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
209
case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
210
case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
211
case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
212
case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
213
case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
214
case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
215
case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
216
case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
217
default: return format("unknown type %d", type);
221
static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
222
if (search.empty()) {
226
builder.reserve(s.length());
229
while ((pos = s.find(search, last_pos)) != std::string::npos) {
230
builder.append(s, last_pos, pos - last_pos);
231
builder.append(replace);
232
last_pos = pos + search.length();
234
builder.append(s, last_pos, std::string::npos);
235
s = std::move(builder);
238
static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
239
const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
242
case GGUF_TYPE_STRING:
243
return gguf_get_val_str(ctx_gguf, i);
244
case GGUF_TYPE_ARRAY:
246
const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
247
int arr_n = gguf_get_arr_n(ctx_gguf, i);
248
const void * data = gguf_get_arr_data(ctx_gguf, i);
249
std::stringstream ss;
251
for (int j = 0; j < arr_n; j++) {
252
if (arr_type == GGUF_TYPE_STRING) {
253
std::string val = gguf_get_arr_str(ctx_gguf, i, j);
255
replace_all(val, "\\", "\\\\");
256
replace_all(val, "\"", "\\\"");
257
ss << '"' << val << '"';
258
} else if (arr_type == GGUF_TYPE_ARRAY) {
261
ss << gguf_data_to_str(arr_type, data, j);
271
return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
275
static void print_tensor_info(const ggml_tensor * tensor, const char * prefix = "") {
276
size_t tensor_size = ggml_nbytes(tensor);
277
LOG_INF("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 "], type = %s\n",
278
prefix, ggml_n_dims(tensor), tensor->name, tensor_size,
279
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], ggml_type_name(tensor->type));
282
static projector_type clip_projector_type_from_string(const std::string & name) {
283
for (const auto & kv : PROJECTOR_TYPE_NAMES) { // NOLINT
284
if (kv.second == name) {
288
return PROJECTOR_TYPE_UNKNOWN;
291
#ifdef CLIP_DEBUG_FUNCTIONS
292
static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) {
293
std::ofstream file(filename, std::ios::binary);
294
if (!file.is_open()) {
295
LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
299
// PPM header: P6 format, width, height, and max color value
300
file << "P6\n" << img.nx << " " << img.ny << "\n255\n";
303
for (size_t i = 0; i < img.buf.size(); i += 3) {
304
// PPM expects binary data in RGB format, which matches our image buffer
305
file.write(reinterpret_cast<const char*>(&img.buf[i]), 3);
311
static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) {
312
std::ofstream file(filename, std::ios::binary);
313
if (!file.is_open()) {
314
LOG_ERR("Failed to open file for writing: %s\n", filename.c_str());
318
int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data
319
int bytesPerPixel = 3;
320
int widthInBytes = img.nx * bytesPerPixel;
321
int paddingAmount = (4 - (widthInBytes % 4)) % 4;
322
int stride = widthInBytes + paddingAmount;
324
// Bitmap file header
325
unsigned char fileHeader[14] = {
326
'B','M', // Signature
327
0,0,0,0, // Image file size in bytes
329
54,0,0,0 // Start of pixel array
333
fileSize = 54 + (stride * img.ny);
334
fileHeader[2] = (unsigned char)(fileSize);
335
fileHeader[3] = (unsigned char)(fileSize >> 8);
336
fileHeader[4] = (unsigned char)(fileSize >> 16);
337
fileHeader[5] = (unsigned char)(fileSize >> 24);
339
// Bitmap information header (BITMAPINFOHEADER)
340
unsigned char infoHeader[40] = {
341
40,0,0,0, // Size of this header (40 bytes)
342
0,0,0,0, // Image width
343
0,0,0,0, // Image height
344
1,0, // Number of color planes
345
24,0, // Bits per pixel
346
0,0,0,0, // No compression
347
0,0,0,0, // Image size (can be 0 for no compression)
348
0,0,0,0, // X pixels per meter (not specified)
349
0,0,0,0, // Y pixels per meter (not specified)
350
0,0,0,0, // Total colors (color table not used)
351
0,0,0,0 // Important colors (all are important)
354
// Width and height in the information header
355
infoHeader[4] = (unsigned char)(img.nx);
356
infoHeader[5] = (unsigned char)(img.nx >> 8);
357
infoHeader[6] = (unsigned char)(img.nx >> 16);
358
infoHeader[7] = (unsigned char)(img.nx >> 24);
359
infoHeader[8] = (unsigned char)(img.ny);
360
infoHeader[9] = (unsigned char)(img.ny >> 8);
361
infoHeader[10] = (unsigned char)(img.ny >> 16);
362
infoHeader[11] = (unsigned char)(img.ny >> 24);
364
// Write file headers
365
file.write(reinterpret_cast<char*>(fileHeader), sizeof(fileHeader));
366
file.write(reinterpret_cast<char*>(infoHeader), sizeof(infoHeader));
369
std::vector<unsigned char> padding(3, 0); // Max padding size to be added to each row
370
for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top
371
for (int x = 0; x < img.nx; ++x) {
373
size_t pixelIndex = (y * img.nx + x) * 3;
374
unsigned char pixel[3] = {
375
img.buf[pixelIndex + 2], // BMP stores pixels in BGR format
376
img.buf[pixelIndex + 1],
379
file.write(reinterpret_cast<char*>(pixel), 3);
381
// Write padding for the row
382
file.write(reinterpret_cast<char*>(padding.data()), paddingAmount);
388
// debug function to convert f32 to u8
389
static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) {
392
dst.buf.resize(3 * src.nx * src.ny);
393
for (size_t i = 0; i < src.buf.size(); ++i) {
394
dst.buf[i] = static_cast<uint8_t>(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255));
408
int32_t n_intermediate;
409
int32_t projection_dim;
415
char mm_patch_merge_type[32] = "flat"; // spatial_unpad or flat (default)
417
int32_t image_grid_pinpoints[32];
418
int32_t image_crop_resolution;
423
struct ggml_tensor * k_w;
424
struct ggml_tensor * k_b;
425
struct ggml_tensor * q_w;
426
struct ggml_tensor * q_b;
427
struct ggml_tensor * v_w;
428
struct ggml_tensor * v_b;
430
struct ggml_tensor * o_w;
431
struct ggml_tensor * o_b;
434
struct ggml_tensor * ln_1_w;
435
struct ggml_tensor * ln_1_b;
438
struct ggml_tensor * ff_i_w;
439
struct ggml_tensor * ff_i_b;
441
struct ggml_tensor * ff_o_w;
442
struct ggml_tensor * ff_o_b;
445
struct ggml_tensor * ln_2_w;
446
struct ggml_tensor * ln_2_b;
449
struct clip_vision_model {
450
struct clip_hparams hparams;
453
struct ggml_tensor * class_embedding;
454
struct ggml_tensor * patch_embeddings;
455
struct ggml_tensor * patch_bias;
456
struct ggml_tensor * position_embeddings;
458
struct ggml_tensor * pre_ln_w;
459
struct ggml_tensor * pre_ln_b;
461
std::vector<clip_layer> layers;
463
struct ggml_tensor * post_ln_w;
464
struct ggml_tensor * post_ln_b;
466
struct ggml_tensor * projection;
469
struct ggml_tensor * mm_0_w = NULL;
470
struct ggml_tensor * mm_0_b = NULL;
471
struct ggml_tensor * mm_2_w = NULL;
472
struct ggml_tensor * mm_2_b = NULL;
474
struct ggml_tensor * image_newline = NULL;
476
// Yi type models with mlp+normalization projection
477
struct ggml_tensor * mm_1_w = NULL; // Yi type models have 0, 1, 3, 4
478
struct ggml_tensor * mm_1_b = NULL;
479
struct ggml_tensor * mm_3_w = NULL;
480
struct ggml_tensor * mm_3_b = NULL;
481
struct ggml_tensor * mm_4_w = NULL;
482
struct ggml_tensor * mm_4_b = NULL;
484
// MobileVLM projection
485
struct ggml_tensor * mm_model_mlp_1_w;
486
struct ggml_tensor * mm_model_mlp_1_b;
487
struct ggml_tensor * mm_model_mlp_3_w;
488
struct ggml_tensor * mm_model_mlp_3_b;
489
struct ggml_tensor * mm_model_block_1_block_0_0_w;
490
struct ggml_tensor * mm_model_block_1_block_0_1_w;
491
struct ggml_tensor * mm_model_block_1_block_0_1_b;
492
struct ggml_tensor * mm_model_block_1_block_1_fc1_w;
493
struct ggml_tensor * mm_model_block_1_block_1_fc1_b;
494
struct ggml_tensor * mm_model_block_1_block_1_fc2_w;
495
struct ggml_tensor * mm_model_block_1_block_1_fc2_b;
496
struct ggml_tensor * mm_model_block_1_block_2_0_w;
497
struct ggml_tensor * mm_model_block_1_block_2_1_w;
498
struct ggml_tensor * mm_model_block_1_block_2_1_b;
499
struct ggml_tensor * mm_model_block_2_block_0_0_w;
500
struct ggml_tensor * mm_model_block_2_block_0_1_w;
501
struct ggml_tensor * mm_model_block_2_block_0_1_b;
502
struct ggml_tensor * mm_model_block_2_block_1_fc1_w;
503
struct ggml_tensor * mm_model_block_2_block_1_fc1_b;
504
struct ggml_tensor * mm_model_block_2_block_1_fc2_w;
505
struct ggml_tensor * mm_model_block_2_block_1_fc2_b;
506
struct ggml_tensor * mm_model_block_2_block_2_0_w;
507
struct ggml_tensor * mm_model_block_2_block_2_1_w;
508
struct ggml_tensor * mm_model_block_2_block_2_1_b;
510
// MobileVLM_V2 projection
511
struct ggml_tensor * mm_model_mlp_0_w;
512
struct ggml_tensor * mm_model_mlp_0_b;
513
struct ggml_tensor * mm_model_mlp_2_w;
514
struct ggml_tensor * mm_model_mlp_2_b;
515
struct ggml_tensor * mm_model_peg_0_w;
516
struct ggml_tensor * mm_model_peg_0_b;
518
// MINICPMV projection
519
struct ggml_tensor * mm_model_pos_embed_k;
520
struct ggml_tensor * mm_model_query;
521
struct ggml_tensor * mm_model_proj;
522
struct ggml_tensor * mm_model_kv_proj;
523
struct ggml_tensor * mm_model_attn_q_w;
524
struct ggml_tensor * mm_model_attn_q_b;
525
struct ggml_tensor * mm_model_attn_k_w;
526
struct ggml_tensor * mm_model_attn_k_b;
527
struct ggml_tensor * mm_model_attn_v_w;
528
struct ggml_tensor * mm_model_attn_v_b;
529
struct ggml_tensor * mm_model_attn_o_w;
530
struct ggml_tensor * mm_model_attn_o_b;
531
struct ggml_tensor * mm_model_ln_q_w;
532
struct ggml_tensor * mm_model_ln_q_b;
533
struct ggml_tensor * mm_model_ln_kv_w;
534
struct ggml_tensor * mm_model_ln_kv_b;
535
struct ggml_tensor * mm_model_ln_post_w;
536
struct ggml_tensor * mm_model_ln_post_b;
540
bool has_text_encoder = false;
541
bool has_vision_encoder = false;
542
bool has_llava_projector = false;
543
bool has_minicpmv_projector = false;
544
int minicpmv_version = 2;
546
struct clip_vision_model vision_model;
547
projector_type proj_type = PROJECTOR_TYPE_MLP;
551
bool use_gelu = false;
554
bool has_class_embedding = true;
555
bool has_pre_norm = true;
556
bool has_post_norm = false;
557
bool has_patch_bias = false;
559
struct gguf_context * ctx_gguf;
560
struct ggml_context * ctx_data;
562
std::vector<uint8_t> buf_compute_meta;
564
// memory buffers to evaluate the model
565
ggml_backend_buffer_t params_buffer = NULL;
567
ggml_backend_t backend = NULL;
568
ggml_gallocr_t compute_alloc = NULL;
570
struct clip_image_size * load_image_size;
573
static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch * imgs, struct clip_image_size * load_image_size, bool is_inf = false) {
574
if (!ctx->has_vision_encoder) {
575
LOG_ERR("This gguf file seems to have no vision encoder\n");
579
const auto & model = ctx->vision_model;
580
const auto & hparams = model.hparams;
582
const int image_size = hparams.image_size;
583
int image_size_width = image_size;
584
int image_size_height = image_size;
585
if (ctx->has_minicpmv_projector) {
586
if (load_image_size == nullptr) {
587
load_image_size = clip_image_size_init();
589
LOG_DBG("%s: %d %d\n", __func__, load_image_size->width, load_image_size->height);
590
image_size_width = load_image_size->width;
591
image_size_height = load_image_size->height;
593
image_size_width = imgs->data->nx;
594
image_size_height = imgs->data->ny;
597
const int patch_size = hparams.patch_size;
598
const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
599
const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
600
const int hidden_size = hparams.hidden_size;
601
const int n_head = hparams.n_head;
602
const int d_head = hidden_size / n_head;
603
int n_layer = hparams.n_layer;
604
const float eps = hparams.eps;
606
const int batch_size = imgs->size;
608
if (ctx->has_llava_projector || ctx->has_minicpmv_projector) {
609
GGML_ASSERT(batch_size == 1);
612
struct ggml_init_params params = {
613
/*.mem_size =*/ ctx->buf_compute_meta.size(),
614
/*.mem_buffer =*/ ctx->buf_compute_meta.data(),
615
/*.no_alloc =*/ true,
618
struct ggml_context * ctx0 = ggml_init(params);
619
struct ggml_cgraph * gf = ggml_new_graph(ctx0);
621
struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3, batch_size);
622
ggml_set_name(inp_raw, "inp_raw");
623
ggml_set_input(inp_raw);
625
struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
627
inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
628
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
630
if (ctx->has_patch_bias) {
631
// inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
632
inp = ggml_add(ctx0, inp, model.patch_bias);
634
struct ggml_tensor * embeddings = inp;
635
struct ggml_tensor * pos_embed = nullptr;
637
if (ctx->has_llava_projector) {
638
// concat class_embeddings and patch_embeddings
639
if (ctx->has_class_embedding) {
640
embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
641
ggml_set_name(embeddings, "embeddings");
642
ggml_set_input(embeddings);
643
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
644
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
645
embeddings = ggml_acc(ctx0, embeddings, inp,
646
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
650
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
651
ggml_set_name(positions, "positions");
652
ggml_set_input(positions);
655
ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
657
if (ctx->has_minicpmv_projector) {
658
int pos_w = image_size_width/patch_size;
659
int pos_h = image_size_height/patch_size;
660
if (ctx->minicpmv_version == 2) {
661
pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 4096, pos_w * pos_h, 1);
663
else if (ctx->minicpmv_version == 3) {
664
pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 3584, pos_w * pos_h, 1);
666
ggml_set_name(pos_embed, "pos_embed");
667
ggml_set_input(pos_embed);
671
if (ctx->has_pre_norm) {
672
embeddings = ggml_norm(ctx0, embeddings, eps);
673
ggml_set_name(embeddings, "pre_ln");
675
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b);
679
if (ctx->has_minicpmv_projector) {
682
for (int il = 0; il < n_layer - 1; il++) {
683
struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states
685
//const size_t nb_q_w = model.layers[il].q_w->nb[0];
689
cur = ggml_norm(ctx0, cur, eps);
691
cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w),
692
model.layers[il].ln_1_b);
698
struct ggml_tensor * Q =
699
ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b);
701
Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head));
702
Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size);
703
Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
704
Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size);
706
struct ggml_tensor * K =
707
ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b);
709
K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
710
K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
711
K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
713
struct ggml_tensor * V =
714
ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b);
716
V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
717
V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
718
V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
720
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
721
KQ = ggml_soft_max_inplace(ctx0, KQ);
722
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
723
KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_positions, n_head, batch_size);
724
KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
726
cur = ggml_cont_3d(ctx0, KQV, hidden_size, num_positions, batch_size);
730
cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b);
732
// re-add the layer input, e.g., residual
733
cur = ggml_add(ctx0, cur, embeddings);
735
embeddings = cur; // embeddings = residual, cur = hidden_states
739
cur = ggml_norm(ctx0, cur, eps);
741
cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b);
744
cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur);
745
cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b);
748
cur = ggml_gelu_inplace(ctx0, cur);
750
cur = ggml_gelu_quick_inplace(ctx0, cur);
753
cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur);
754
cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b);
757
cur = ggml_add(ctx0, embeddings, cur);
763
if (ctx->has_post_norm) {
764
embeddings = ggml_norm(ctx0, embeddings, eps);
765
ggml_set_name(embeddings, "post_ln");
767
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
771
if (ctx->has_llava_projector) {
772
embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
774
struct ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches);
775
ggml_set_name(patches, "patches");
776
ggml_set_input(patches);
778
// shape [1, 576, 1024]
779
// ne is whcn, ne = [1024, 576, 1, 1]
780
embeddings = ggml_get_rows(ctx0, embeddings, patches);
782
// print_tensor_info(embeddings, "embeddings");
785
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
786
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
787
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
789
embeddings = ggml_gelu(ctx0, embeddings);
790
embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
791
embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
793
else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
794
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
795
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
796
// ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
798
embeddings = ggml_norm(ctx0, embeddings, eps);
799
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_1_w),
803
embeddings = ggml_gelu(ctx0, embeddings);
805
// Second linear layer
806
embeddings = ggml_mul_mat(ctx0, model.mm_3_w, embeddings);
807
embeddings = ggml_add(ctx0, embeddings, model.mm_3_b);
810
embeddings = ggml_norm(ctx0, embeddings, eps);
811
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w),
814
else if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
815
// MobileVLM projector
817
struct ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
818
mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b);
819
mlp_1 = ggml_gelu(ctx0, mlp_1);
820
struct ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1);
821
mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b);
822
// mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1]
825
struct ggml_tensor * block_1 = nullptr;
827
// transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24]
828
mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
829
mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
830
// stride = 1, padding = 1, bias is nullptr
831
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);
834
// // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
835
block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
836
// block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
837
block_1 = ggml_norm(ctx0, block_1, eps);
838
block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b);
839
block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
841
// block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
843
struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
845
block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
846
// block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
848
block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
849
block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1);
850
block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b);
851
block_1 = ggml_relu(ctx0, block_1);
852
block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1);
853
block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b);
854
block_1 = ggml_hardsigmoid(ctx0, block_1);
855
// block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1]
856
block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
857
block_1 = ggml_mul(ctx0, block_1_hw, block_1);
859
int w = block_1->ne[0], h = block_1->ne[1];
860
block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
861
block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
863
// block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
864
block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1);
865
block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
867
// block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1]
868
block_1 = ggml_norm(ctx0, block_1, eps);
869
block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b);
870
block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
871
// block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
873
block_1 = ggml_add(ctx0, mlp_3, block_1);
879
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);
881
// block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
883
block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3));
884
// block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
885
block_1 = ggml_norm(ctx0, block_1, eps);
886
block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b);
887
block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3));
888
// block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
890
struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1);
892
// not sure the parameters is right for globalAvgPooling
893
block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0);
894
// block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
896
block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]);
897
block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1);
898
block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b);
899
block_1 = ggml_relu(ctx0, block_1);
900
block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1);
901
block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b);
902
block_1 = ggml_hardsigmoid(ctx0, block_1);
904
// block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1]
905
block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]);
906
block_1 = ggml_mul(ctx0, block_1_hw, block_1);
908
int w = block_1->ne[0], h = block_1->ne[1];
909
block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]);
910
block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3));
911
// block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1]
912
block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1);
913
block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]);
916
// block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1]
917
block_1 = ggml_norm(ctx0, block_1, eps);
918
block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b);
919
block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]);
920
// block_1 shape = [1, 144, 2048], ne = [2048, 144, 1]
922
embeddings = block_1;
924
else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2)
927
struct ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
928
mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
929
mlp_0 = ggml_gelu(ctx0, mlp_0);
930
struct ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
931
mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
932
// mlp_2 ne = [2048, 576, 1, 1]
933
// // AVG Pool Layer 2*2, strides = 2
934
mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 0, 2, 3));
935
// mlp_2 ne = [576, 2048, 1, 1]
936
mlp_2 = ggml_reshape_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
937
// mlp_2 ne [24, 24, 2048, 1]
938
mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
939
// weight ne = [3, 3, 2048, 1]
940
struct ggml_tensor * peg_0 = ggml_conv_depthwise_2d(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
941
peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
942
peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
943
mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3));
944
peg_0 = ggml_add(ctx0, peg_0, mlp_2);
945
peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
949
GGML_ABORT("fatal error");
952
// minicpmv projector
953
else if (ctx->has_minicpmv_projector)
955
if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
956
struct ggml_tensor * q = model.mm_model_query;
958
q = ggml_norm(ctx0, q, eps);
959
q = ggml_add(ctx0, ggml_mul(ctx0, q, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
961
struct ggml_tensor * v = ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings);
963
v = ggml_norm(ctx0, v, eps);
964
v = ggml_add(ctx0, ggml_mul(ctx0, v, model.mm_model_ln_kv_w), model.mm_model_ln_kv_b);
966
struct ggml_tensor * k;
968
// q = ggml_add(ctx0, q, model.mm_model_pos_embed);
969
k = ggml_add(ctx0, v, pos_embed);
973
int hidden_size = 4096;
974
const int d_head = 128;
975
int n_head = hidden_size/d_head;
977
if (ctx->minicpmv_version == 2) {
979
n_head = hidden_size/d_head;
982
else if (ctx->minicpmv_version == 3) {
984
n_head = hidden_size/d_head;
988
struct ggml_tensor * Q = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q), model.mm_model_attn_q_b);
989
Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head));
990
struct ggml_tensor * K = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k), model.mm_model_attn_k_b);
991
struct ggml_tensor * V = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v), model.mm_model_attn_v_b);
993
Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_query, batch_size);
994
Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3));
995
Q = ggml_reshape_3d(ctx0, Q, d_head, num_query, n_head * batch_size);
996
K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size);
997
K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3));
998
K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size);
999
V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size);
1000
V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3));
1001
V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size);
1002
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
1003
KQ = ggml_soft_max_inplace(ctx0, KQ);
1004
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ);
1005
KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_query, n_head, batch_size);
1006
KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
1007
KQV = ggml_cont_3d(ctx0, KQV, hidden_size, num_query, batch_size);
1009
embeddings = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_o_w, KQV), model.mm_model_attn_o_b);
1012
embeddings = ggml_norm(ctx0, embeddings, eps);
1013
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_post_w), model.mm_model_ln_post_b);
1015
embeddings = ggml_mul_mat(ctx0, model.mm_model_proj, embeddings);
1023
ggml_build_forward_expand(gf, embeddings);
1030
// read and create ggml_context containing the tensors and their data
1031
struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
1032
struct ggml_context * meta = NULL;
1034
struct gguf_init_params params = {
1035
/*.no_alloc = */ true,
1039
struct gguf_context * ctx = gguf_init_from_file(fname, params);
1041
throw std::runtime_error(format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname));
1044
if (verbosity >= 1) {
1045
const int n_tensors = gguf_get_n_tensors(ctx);
1046
const int n_kv = gguf_get_n_kv(ctx);
1047
const int ftype = get_u32(ctx, KEY_FTYPE);
1048
const std::string ftype_str = get_ftype(ftype);
1049
const int idx_desc = get_key_idx(ctx, KEY_DESCRIPTION);
1050
const std::string description = gguf_get_val_str(ctx, idx_desc);
1051
const int idx_name = gguf_find_key(ctx, KEY_NAME);
1052
if (idx_name != -1) { // make name optional temporarily as some of the uploaded models missing it due to a bug
1053
const std::string name = gguf_get_val_str(ctx, idx_name);
1054
LOG_INF("%s: model name: %s\n", __func__, name.c_str());
1056
LOG_INF("%s: description: %s\n", __func__, description.c_str());
1057
LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx));
1058
LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx));
1059
LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors);
1060
LOG_INF("%s: n_kv: %d\n", __func__, n_kv);
1061
LOG_INF("%s: ftype: %s\n", __func__, ftype_str.c_str());
1064
const int n_tensors = gguf_get_n_tensors(ctx);
1067
const int n_kv = gguf_get_n_kv(ctx);
1068
LOG_INF("%s: loaded meta data with %d key-value pairs and %d tensors from %s\n",
1069
__func__, n_kv, n_tensors, fname);
1071
std::map<enum ggml_type, uint32_t> n_type;
1073
for (int i = 0; i < n_tensors; i++) {
1074
enum ggml_type type = gguf_get_tensor_type(ctx, i);
1079
LOG_INF("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
1080
for (int i = 0; i < n_kv; i++) {
1081
const char * name = gguf_get_key(ctx, i);
1082
const enum gguf_type type = gguf_get_kv_type(ctx, i);
1083
const std::string type_name =
1084
type == GGUF_TYPE_ARRAY
1085
? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx, i)), gguf_get_arr_n(ctx, i))
1086
: gguf_type_name(type);
1088
std::string value = gguf_kv_to_str(ctx, i);
1089
const size_t MAX_VALUE_LEN = 40;
1090
if (value.size() > MAX_VALUE_LEN) {
1091
value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
1093
replace_all(value, "\n", "\\n");
1095
LOG_INF("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
1098
// print type counts
1099
for (auto & kv : n_type) {
1100
if (kv.second == 0) {
1104
LOG_INF("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
1109
size_t model_size = 0;
1111
for (int i = 0; i < n_tensors; ++i) {
1112
const char * name = gguf_get_tensor_name(ctx, i);
1113
const size_t offset = gguf_get_tensor_offset(ctx, i);
1114
enum ggml_type type = gguf_get_tensor_type(ctx, i);
1115
struct ggml_tensor * cur = ggml_get_tensor(meta, name);
1116
size_t tensor_size = ggml_nbytes(cur);
1117
model_size += tensor_size;
1118
if (verbosity >= 3) {
1119
LOG_INF("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
1120
__func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
1125
clip_ctx * new_clip = new clip_ctx{};
1127
// update projector type
1129
int idx = gguf_find_key(ctx, KEY_PROJ_TYPE);
1131
const std::string proj_type = gguf_get_val_str(ctx, idx);
1132
new_clip->proj_type = clip_projector_type_from_string(proj_type);
1134
new_clip->proj_type = PROJECTOR_TYPE_MLP;
1137
if (new_clip->proj_type == PROJECTOR_TYPE_MLP) {
1138
if (gguf_find_tensor(ctx, format(TN_LLAVA_PROJ, 3, "weight").c_str()) != -1) {
1139
new_clip->proj_type = PROJECTOR_TYPE_MLP_NORM;
1145
new_clip->backend = ggml_backend_cuda_init(0);
1146
LOG_INF("%s: CLIP using CUDA backend\n", __func__);
1149
#ifdef GGML_USE_METAL
1150
new_clip->backend = ggml_backend_metal_init();
1151
LOG_INF("%s: CLIP using Metal backend\n", __func__);
1155
new_clip->backend = ggml_backend_cann_init(0);
1156
LOG_INF("%s: CLIP using CANN backend\n", __func__);
1159
#ifdef GGML_USE_VULKAN
1160
new_clip->backend = ggml_backend_vk_init(0);
1161
LOG_INF("%s: CLIP using Vulkan backend\n", __func__);
1164
if (!new_clip->backend) {
1165
new_clip->backend = ggml_backend_cpu_init();
1166
LOG_INF("%s: CLIP using CPU backend\n", __func__);
1169
// model size and capabilities
1171
int idx = get_key_idx(ctx, KEY_HAS_TEXT_ENC);
1172
new_clip->has_text_encoder = gguf_get_val_bool(ctx, idx);
1174
idx = get_key_idx(ctx, KEY_HAS_VIS_ENC);
1175
new_clip->has_vision_encoder = gguf_get_val_bool(ctx, idx);
1177
idx = gguf_find_key(ctx, KEY_HAS_LLAVA_PROJ);
1179
new_clip->has_llava_projector = gguf_get_val_bool(ctx, idx);
1182
idx = gguf_find_key(ctx, KEY_HAS_MINICPMV_PROJ);
1184
new_clip->has_minicpmv_projector = gguf_get_val_bool(ctx, idx);
1187
idx = gguf_find_key(ctx, KEY_MINICPMV_VERSION);
1189
new_clip->minicpmv_version = gguf_get_val_i32(ctx, idx);
1192
// GGML_ASSERT(new_clip->has_llava_projector); // see monatis/clip.cpp for image and/or text encoding for semantic search
1194
GGML_ASSERT(new_clip->has_vision_encoder);
1195
GGML_ASSERT(!new_clip->has_text_encoder);
1197
idx = get_key_idx(ctx, KEY_USE_GELU);
1198
new_clip->use_gelu = gguf_get_val_bool(ctx, idx);
1200
if (verbosity >= 1) {
1201
LOG_INF("%s: text_encoder: %d\n", __func__, new_clip->has_text_encoder);
1202
LOG_INF("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder);
1203
LOG_INF("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector);
1204
LOG_INF("%s: minicpmv_projector: %d\n", __func__, new_clip->has_minicpmv_projector);
1205
LOG_INF("%s: model size: %.2f MB\n", __func__, model_size / 1024.0 / 1024.0);
1206
LOG_INF("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0);
1210
LOG_INF("%s: params backend buffer size = % 6.2f MB (%i tensors)\n", __func__, model_size / (1024.0 * 1024.0), n_tensors);
1214
std::vector<uint8_t> read_buf;
1215
struct ggml_init_params params = {
1216
/*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(),
1217
/*.mem_buffer =*/ NULL,
1218
/*.no_alloc =*/ true,
1221
new_clip->ctx_data = ggml_init(params);
1222
if (!new_clip->ctx_data) {
1223
LOG_ERR("%s: ggml_init() failed\n", __func__);
1224
clip_free(new_clip);
1229
auto fin = std::ifstream(fname, std::ios::binary);
1231
LOG_ERR("cannot open model file for loading tensors\n");
1232
clip_free(new_clip);
1237
// add tensors to context
1238
for (int i = 0; i < n_tensors; ++i) {
1239
const char * name = gguf_get_tensor_name(ctx, i);
1240
struct ggml_tensor * t = ggml_get_tensor(meta, name);
1241
struct ggml_tensor * cur = ggml_dup_tensor(new_clip->ctx_data, t);
1242
ggml_set_name(cur, name);
1245
// alloc memory and offload data
1246
new_clip->params_buffer = ggml_backend_alloc_ctx_tensors(new_clip->ctx_data, new_clip->backend);
1247
for (int i = 0; i < n_tensors; ++i) {
1248
const char * name = gguf_get_tensor_name(ctx, i);
1249
struct ggml_tensor * cur = ggml_get_tensor(new_clip->ctx_data, name);
1250
const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i);
1251
fin.seekg(offset, std::ios::beg);
1253
LOG_ERR("%s: failed to seek for tensor %s\n", __func__, name);
1254
clip_free(new_clip);
1258
int num_bytes = ggml_nbytes(cur);
1259
if (ggml_backend_buffer_is_host(new_clip->params_buffer)) {
1260
// for the CPU and Metal backend, we can read directly into the tensor
1261
fin.read(reinterpret_cast<char *>(cur->data), num_bytes);
1263
// read into a temporary buffer first, then copy to device memory
1264
read_buf.resize(num_bytes);
1265
fin.read(reinterpret_cast<char *>(read_buf.data()), num_bytes);
1266
ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes);
1273
if (new_clip->has_vision_encoder) {
1274
// load vision model
1275
auto & vision_model = new_clip->vision_model;
1276
auto & hparams = vision_model.hparams;
1277
hparams.hidden_size = get_u32(ctx, format(KEY_N_EMBD, "vision"));
1278
hparams.n_head = get_u32(ctx, format(KEY_N_HEAD, "vision"));
1279
hparams.n_intermediate = get_u32(ctx, format(KEY_N_FF, "vision"));
1280
hparams.n_layer = get_u32(ctx, format(KEY_N_BLOCK, "vision"));
1281
hparams.image_size = get_u32(ctx, KEY_IMAGE_SIZE);
1282
hparams.patch_size = get_u32(ctx, KEY_PATCH_SIZE);
1283
hparams.projection_dim = get_u32(ctx, format(KEY_PROJ_DIM, "vision"));
1284
hparams.eps = get_f32(ctx, format(KEY_LAYER_NORM_EPS, "vision"));
1287
int idx = get_key_idx(ctx, KEY_IMAGE_GRID_PINPOINTS);
1288
int n = gguf_get_arr_n(ctx, idx);
1289
const int32_t * pinpoints = (const int32_t *)gguf_get_arr_data(ctx, idx);
1290
for (int i = 0; i < 32 && i < n && pinpoints[i] != 0; ++i) {
1291
hparams.image_grid_pinpoints[i] = pinpoints[i];
1294
hparams.image_grid_pinpoints[n] = 0;
1295
} catch (std::runtime_error & /*e*/) {
1296
hparams.image_grid_pinpoints[0]=0;
1300
int idx = get_key_idx(ctx, KEY_MM_PATCH_MERGE_TYPE);
1301
strcpy(hparams.mm_patch_merge_type, gguf_get_val_str(ctx, idx));
1302
} catch (std::runtime_error & /*e*/) {
1303
strcpy(hparams.mm_patch_merge_type, "flat");
1307
hparams.image_crop_resolution = get_u32(ctx, KEY_IMAGE_CROP_RESOLUTION); // llava-1.6
1308
} catch(const std::exception& /*e*/) {
1309
hparams.image_crop_resolution = hparams.image_size;
1312
int idx_mean = get_key_idx(ctx, KEY_IMAGE_MEAN);
1313
int idx_std = get_key_idx(ctx, KEY_IMAGE_STD);
1315
const float * mean_data = (const float *)gguf_get_arr_data(ctx, idx_mean);
1316
const float * std_data = (const float *)gguf_get_arr_data(ctx, idx_std);
1318
for (int i = 0; i < 3; ++i) {
1319
new_clip->image_mean[i] = mean_data[i];
1320
new_clip->image_std[i] = std_data[i];
1323
if (verbosity >= 2) {
1324
LOG_INF("\n%s: vision model hparams\n", __func__);
1325
LOG_INF("image_size %d\n", hparams.image_size);
1326
LOG_INF("patch_size %d\n", hparams.patch_size);
1327
LOG_INF("v_hidden_size %d\n", hparams.hidden_size);
1328
LOG_INF("v_n_intermediate %d\n", hparams.n_intermediate);
1329
LOG_INF("v_projection_dim %d\n", hparams.projection_dim);
1330
LOG_INF("v_n_head %d\n", hparams.n_head);
1331
LOG_INF("v_n_layer %d\n", hparams.n_layer);
1332
LOG_INF("v_eps %f\n", hparams.eps);
1333
LOG_INF("v_image_mean %f %f %f\n", new_clip->image_mean[0], new_clip->image_mean[1], new_clip->image_mean[2]);
1334
LOG_INF("v_image_std %f %f %f\n", new_clip->image_std[0], new_clip->image_std[1], new_clip->image_std[2]);
1335
LOG_INF("v_image_grid_pinpoints: ");
1336
for (int i = 0; i < 32 && (hparams.image_grid_pinpoints[i] != 0); ++i) {
1337
LOG_INF("%d ", hparams.image_grid_pinpoints[i]);
1340
LOG_INF("v_mm_patch_merge_type: %s\n", hparams.mm_patch_merge_type);
1345
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
1346
new_clip->has_class_embedding = true;
1347
} catch (const std::exception& /*e*/) {
1348
new_clip->has_class_embedding = false;
1352
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
1353
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
1354
new_clip->has_pre_norm = true;
1355
} catch (std::exception & /*e*/) {
1356
new_clip->has_pre_norm = false;
1360
vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight"));
1361
vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias"));
1362
new_clip->has_post_norm = true;
1363
} catch (std::exception & /*e*/) {
1364
new_clip->has_post_norm = false;
1368
vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS);
1369
new_clip->has_patch_bias = true;
1370
} catch (std::exception & /*e*/) {
1371
new_clip->has_patch_bias = false;
1375
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
1376
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
1377
} catch(const std::exception& /*e*/) {
1378
LOG_ERR("%s: failed to load vision model tensors\n", __func__);
1382
if (new_clip->proj_type == PROJECTOR_TYPE_MLP || new_clip->proj_type == PROJECTOR_TYPE_MLP_NORM) {
1383
vision_model.mm_0_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "weight"));
1384
vision_model.mm_0_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "bias"));
1387
vision_model.mm_1_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "weight"));
1388
vision_model.mm_1_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "bias"));
1389
} catch (std::runtime_error & /*e*/) { }
1391
// missing in Yi-type llava
1392
vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight"));
1393
vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias"));
1394
} catch (std::runtime_error & /*e*/) { }
1397
vision_model.mm_3_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "weight"));
1398
vision_model.mm_3_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "bias"));
1399
} catch (std::runtime_error & /*e*/) { }
1402
vision_model.mm_4_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "weight"));
1403
vision_model.mm_4_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "bias"));
1404
} catch (std::runtime_error & /*e*/) { }
1406
vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE);
1407
// LOG_INF("%s: image_newline tensor (llava-1.6) found\n", __func__);
1408
} catch (std::runtime_error & /*e*/) { }
1409
} else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
1410
// MobileVLM projection
1411
vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight"));
1412
vision_model.mm_model_mlp_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "bias"));
1413
vision_model.mm_model_mlp_3_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 3, "weight"));
1414
vision_model.mm_model_mlp_3_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 3, "bias"));
1415
vision_model.mm_model_block_1_block_0_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
1416
vision_model.mm_model_block_1_block_0_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
1417
vision_model.mm_model_block_1_block_0_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
1418
vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
1419
vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
1420
vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
1421
vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
1422
vision_model.mm_model_block_1_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
1423
vision_model.mm_model_block_1_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
1424
vision_model.mm_model_block_1_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
1425
vision_model.mm_model_block_2_block_0_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
1426
vision_model.mm_model_block_2_block_0_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
1427
vision_model.mm_model_block_2_block_0_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
1428
vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
1429
vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
1430
vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
1431
vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
1432
vision_model.mm_model_block_2_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
1433
vision_model.mm_model_block_2_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
1434
vision_model.mm_model_block_2_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
1436
else if (new_clip->proj_type == PROJECTOR_TYPE_LDPV2)
1438
// MobilVLM_V2 projection
1439
vision_model.mm_model_mlp_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "weight"));
1440
vision_model.mm_model_mlp_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "bias"));
1441
vision_model.mm_model_mlp_2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "weight"));
1442
vision_model.mm_model_mlp_2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "bias"));
1443
vision_model.mm_model_peg_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "weight"));
1444
vision_model.mm_model_peg_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "bias"));
1446
else if (new_clip->proj_type == PROJECTOR_TYPE_RESAMPLER) {
1447
// vision_model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
1448
vision_model.mm_model_pos_embed_k = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD_K);
1449
vision_model.mm_model_query = get_tensor(new_clip->ctx_data, TN_MINICPMV_QUERY);
1450
vision_model.mm_model_proj = get_tensor(new_clip->ctx_data, TN_MINICPMV_PROJ);
1451
vision_model.mm_model_kv_proj = get_tensor(new_clip->ctx_data, TN_MINICPMV_KV_PROJ);
1452
vision_model.mm_model_attn_q_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "q", "weight"));
1453
vision_model.mm_model_attn_k_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "k", "weight"));
1454
vision_model.mm_model_attn_v_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "v", "weight"));
1455
vision_model.mm_model_attn_q_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "q", "bias"));
1456
vision_model.mm_model_attn_k_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "k", "bias"));
1457
vision_model.mm_model_attn_v_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "v", "bias"));
1458
vision_model.mm_model_attn_o_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "out", "weight"));
1459
vision_model.mm_model_attn_o_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "out", "bias"));
1460
vision_model.mm_model_ln_q_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "q", "weight"));
1461
vision_model.mm_model_ln_q_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "q", "bias"));
1462
vision_model.mm_model_ln_kv_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "kv", "weight"));
1463
vision_model.mm_model_ln_kv_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "kv", "bias"));
1464
vision_model.mm_model_ln_post_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "weight"));
1465
vision_model.mm_model_ln_post_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "bias"));
1468
std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type];
1469
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
1472
vision_model.layers.resize(hparams.n_layer);
1474
for (int il = 0; il < hparams.n_layer; ++il) {
1475
auto & layer = vision_model.layers[il];
1476
layer.k_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "weight"));
1477
layer.q_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "weight"));
1478
layer.v_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "weight"));
1479
layer.o_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "weight"));
1480
layer.ln_1_w = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "weight"));
1481
layer.ln_2_w = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "weight"));
1482
layer.ff_i_w = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "weight"));
1483
layer.ff_o_w = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "weight"));
1484
layer.k_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "bias"));
1485
layer.q_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "bias"));
1486
layer.v_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "bias"));
1487
layer.o_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "bias"));
1488
layer.ln_1_b = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "bias"));
1489
layer.ln_2_b = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "bias"));
1490
layer.ff_i_b = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "bias"));
1491
layer.ff_o_b = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "bias"));
1497
new_clip->ctx_gguf = ctx;
1499
// measure mem requirement and allocate
1501
new_clip->buf_compute_meta.resize(GGML_DEFAULT_GRAPH_SIZE * ggml_tensor_overhead() + ggml_graph_overhead());
1502
new_clip->compute_alloc = ggml_gallocr_new(ggml_backend_get_default_buffer_type(new_clip->backend));
1503
clip_image_f32_batch batch;
1505
ggml_cgraph * gf = clip_image_build_graph(new_clip, &batch, nullptr, false);
1506
ggml_gallocr_reserve(new_clip->compute_alloc, gf);
1507
size_t compute_memory_buffer_size = ggml_gallocr_get_buffer_size(new_clip->compute_alloc, 0);
1508
LOG_INF("%s: compute allocated memory: %.2f MB\n", __func__, compute_memory_buffer_size /1024.0/1024.0);
1514
void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size) {
1515
ctx_clip->load_image_size = load_image_size;
1518
struct clip_image_size * clip_image_size_init() {
1519
struct clip_image_size * load_image_size = new struct clip_image_size();
1520
load_image_size->width = 448;
1521
load_image_size->height = 448;
1522
return load_image_size;
1525
struct clip_image_u8 * clip_image_u8_init() {
1526
return new clip_image_u8();
1529
struct clip_image_f32 * clip_image_f32_init() {
1530
return new clip_image_f32();
1533
void clip_image_u8_free(struct clip_image_u8 * img) { delete img; }
1534
void clip_image_f32_free(struct clip_image_f32 * img) { delete img; }
1535
void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) {
1536
if (batch->size > 0) {
1537
delete[] batch->data;
1541
void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) {
1542
if (batch->size > 0) {
1543
delete[] batch->data;
1548
static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) {
1551
img->buf.resize(3 * nx * ny);
1552
memcpy(img->buf.data(), data, img->buf.size());
1555
bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
1557
auto * data = stbi_load(fname, &nx, &ny, &nc, 3);
1559
LOG_ERR("%s: failed to load image '%s'\n", __func__, fname);
1562
build_clip_img_from_data(data, nx, ny, img);
1563
stbi_image_free(data);
1567
bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) {
1569
auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3);
1571
LOG_ERR("%s: failed to decode image bytes\n", __func__);
1574
build_clip_img_from_data(data, nx, ny, img);
1575
stbi_image_free(data);
1579
// Linear interpolation between two points
1580
inline float clip_lerp(float s, float e, float t) {
1581
return s + (e - s) * t;
1583
// Bilinear resize function
1584
static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) {
1585
dst.nx = target_width;
1586
dst.ny = target_height;
1587
dst.buf.resize(3 * target_width * target_height);
1589
float x_ratio = static_cast<float>(src.nx - 1) / target_width;
1590
float y_ratio = static_cast<float>(src.ny - 1) / target_height;
1592
for (int y = 0; y < target_height; y++) {
1593
for (int x = 0; x < target_width; x++) {
1594
float px = x_ratio * x;
1595
float py = y_ratio * y;
1596
int x_floor = static_cast<int>(px);
1597
int y_floor = static_cast<int>(py);
1598
float x_lerp = px - x_floor;
1599
float y_lerp = py - y_floor;
1601
for (int c = 0; c < 3; c++) {
1602
float top = clip_lerp(
1603
static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
1604
static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
1607
float bottom = clip_lerp(
1608
static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
1609
static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
1612
dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(clip_lerp(top, bottom, y_lerp));
1618
// Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
1619
static void normalize_image_u8_to_f32(const clip_image_u8* src, clip_image_f32* dst, const float mean[3], const float std[3]) {
1622
dst->buf.resize(src->buf.size());
1624
for (size_t i = 0; i < src->buf.size(); ++i) {
1625
int c = i % 3; // rgb
1626
dst->buf[i] = (static_cast<float>(src->buf[i]) / 255.0f - mean[c]) / std[c];
1630
inline int clip(int x, int lower, int upper) {
1631
return std::max(lower, std::min(x, upper));
1634
static bool bicubic_resize(const clip_image_u8 &img, clip_image_u8 &dst, int target_width, int target_height) {
1635
const int nx = img.nx;
1636
const int ny = img.ny;
1638
dst.nx = target_width;
1639
dst.ny = target_height;
1640
dst.buf.resize(3 * target_width * target_height);
1644
float d0, d2, d3, a0, a1, a2, a3;
1650
tx = (float)nx / (float)target_width;
1651
ty = (float)ny / (float)target_height;
1653
// Bicubic interpolation; adapted from ViT.cpp, inspired from :
1654
// -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36
1655
// -> https://en.wikipedia.org/wiki/Bicubic_interpolation
1657
for (i = 0; i < target_height; i++) {
1658
for (j = 0; j < target_width; j++) {
1665
for (k = 0; k < 3; k++) {
1666
for (jj = 0; jj <= 3; jj++) {
1667
d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
1668
d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
1669
d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
1670
a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k];
1672
a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
1673
a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
1674
a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
1676
C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx;
1682
a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3;
1683
a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2;
1684
a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3;
1685
Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy;
1687
const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f);
1688
dst.buf[(i * target_width + j) * 3 + k] = float(Cc2);
1697
// llava-1.6 type of resize_and_pad (black)
1698
static void resize_and_pad_image(const clip_image_u8& image, clip_image_u8 &image_output, const std::pair<int, int>& target_resolution) {
1699
int target_width = target_resolution.first;
1700
int target_height = target_resolution.second;
1702
float scale_w = static_cast<float>(target_width) / image.nx;
1703
float scale_h = static_cast<float>(target_height) / image.ny;
1705
int new_width, new_height;
1707
if (scale_w < scale_h) {
1708
new_width = target_width;
1709
new_height = std::min(static_cast<int>(std::ceil(image.ny * scale_w)), target_height);
1711
new_height = target_height;
1712
new_width = std::min(static_cast<int>(std::ceil(image.nx * scale_h)), target_width);
1715
clip_image_u8 resized_image;
1716
// bilinear_resize(image, resized_image, new_width, new_height);
1717
bicubic_resize(image, resized_image, new_width, new_height);
1719
clip_image_u8 padded_image;
1720
padded_image.nx = target_width;
1721
padded_image.ny = target_height;
1722
padded_image.buf.resize(3 * target_width * target_height, 0); // Initialize with black
1724
// Calculate padding offsets
1725
int pad_x = (target_width - new_width) / 2;
1726
int pad_y = (target_height - new_height) / 2;
1728
// Copy the resized image into the center of the padded buffer
1729
for (int y = 0; y < new_height; ++y) {
1730
for (int x = 0; x < new_width; ++x) {
1731
for (int c = 0; c < 3; ++c) {
1732
padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c];
1736
image_output = std::move(padded_image);
1740
* Selects the best resolution from a list of possible resolutions based on the original size.
1742
* @param original_size The original size of the image in the format (width, height).
1743
* @param possible_resolutions A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
1744
* @return The best fit resolution in the format (width, height).
1746
static std::pair<int, int> select_best_resolution(const std::pair<int, int> & original_size, const std::vector<std::pair<int, int>> & possible_resolutions) {
1747
int original_width = original_size.first;
1748
int original_height = original_size.second;
1749
std::pair<int, int> best_fit;
1750
int max_effective_resolution = 0;
1751
int min_wasted_resolution = std::numeric_limits<int>::max();
1753
for (const auto& resolution : possible_resolutions) {
1754
int width = resolution.first;
1755
int height = resolution.second;
1756
float scale = std::min(static_cast<float>(width) / original_width, static_cast<float>(height) / original_height);
1757
int downscaled_width = static_cast<int>(original_width * scale);
1758
int downscaled_height = static_cast<int>(original_height * scale);
1759
int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
1760
int wasted_resolution = (width * height) - effective_resolution;
1761
// LOG_INF("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
1762
if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
1763
max_effective_resolution = effective_resolution;
1764
min_wasted_resolution = wasted_resolution;
1765
best_fit = resolution;
1772
static std::vector<clip_image_u8*> divide_to_patches_u8(const clip_image_u8 & image, int patch_size) {
1773
std::vector<clip_image_u8*> patches;
1774
int width = image.nx;
1775
int height = image.ny;
1776
for (int i = 0; i < height; i += patch_size) {
1777
for (int j = 0; j < width; j += patch_size) {
1778
clip_image_u8 *patch = clip_image_u8_init();
1779
patch->nx = std::min(patch_size, width - j);
1780
patch->ny = std::min(patch_size, height - i);
1781
patch->buf.resize(3 * patch->nx * patch->ny);
1782
for (int y = 0; y < patch->ny; ++y) {
1783
for (int x = 0; x < patch->nx; ++x) {
1784
for (int c = 0; c < 3; ++c) {
1785
patch->buf[3 * (y * patch->nx + x) + c] = image.buf[3 * ((i + y) * width + (j + x)) + c];
1789
patches.push_back(patch);
1795
static int ensure_divide(int length, int patch_size) {
1796
return std::max(static_cast<int>(std::round(static_cast<float>(length) / patch_size) * patch_size), patch_size);
1799
static std::pair<int, int> uhd_find_best_resize(std::pair<int, int> original_size, int scale_resolution, int patch_size, bool allow_upscale = false) {
1800
int width = original_size.first;
1801
int height = original_size.second;
1802
if ((width * height > scale_resolution * scale_resolution) || allow_upscale) {
1803
float r = static_cast<float>(width) / height;
1804
height = static_cast<int>(scale_resolution / std::sqrt(r));
1805
width = static_cast<int>(height * r);
1807
int best_width = ensure_divide(width, patch_size);
1808
int best_height = ensure_divide(height, patch_size);
1809
return std::make_pair(best_width, best_height);
1812
static std::pair<int, int> uhd_get_refine_size(std::pair<int, int> original_size, std::pair<int, int> grid, int scale_resolution, int patch_size, bool allow_upscale = false) {
1814
std::tie(width, height) = original_size;
1816
std::tie(grid_x, grid_y) = grid;
1818
int refine_width = ensure_divide(width, grid_x);
1819
int refine_height = ensure_divide(height, grid_y);
1821
int grid_width = refine_width / grid_x;
1822
int grid_height = refine_height / grid_y;
1824
// auto best_grid_size = find_best_resize(std::make_tuple(grid_width, grid_height), scale_resolution, patch_size, allow_upscale); (old line)
1825
auto best_grid_size = uhd_find_best_resize(std::make_pair(grid_width, grid_height), scale_resolution, patch_size, allow_upscale); // (new line) => fixes conversion for make_tuple to make_pair
1826
int best_grid_width, best_grid_height;
1827
std::tie(best_grid_width, best_grid_height) = best_grid_size;
1829
// std::pair<int, int> refine_size = std::make_tuple(best_grid_width * grid_x, best_grid_height * grid_y); (old line)
1830
std::pair<int, int> refine_size = std::make_pair(best_grid_width * grid_x, best_grid_height * grid_y); // (new line)
1834
static std::pair<int, int> uhd_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) {
1835
std::vector<int> candidate_split_grids_nums;
1836
for (int i : {multiple - 1, multiple, multiple + 1}) {
1837
if (i == 1 || i > max_slice_nums) {
1840
candidate_split_grids_nums.push_back(i);
1843
std::vector<std::pair<int, int>> candidate_grids;
1844
for (int split_grids_nums : candidate_split_grids_nums) {
1846
while (m <= split_grids_nums) {
1847
if (split_grids_nums % m == 0) {
1848
candidate_grids.emplace_back(m, split_grids_nums / m);
1854
std::pair<int, int> best_grid{1, 1};
1855
float min_error = std::numeric_limits<float>::infinity();
1856
for (const auto& grid : candidate_grids) {
1857
float error = std::abs(log_ratio - std::log(1.0 * grid.first / grid.second));
1858
if (error < min_error) {
1866
// inspired from LLaVA-UHD:
1867
// -> https://arxiv.org/pdf/2403.11703
1868
// -> https://github.com/thunlp/LLaVA-UHD
1869
// -> https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118
1870
static std::vector<std::vector<clip_image_u8 *>> uhd_slice_image(const clip_image_u8 * img, const int max_slice_nums=9, const int scale_resolution=448, const int patch_size=14) {
1871
const std::pair<int, int> original_size={img->nx,img->ny};
1872
const int original_width = img->nx;
1873
const int original_height = img->ny;
1874
const float log_ratio = log(1.0*original_width/original_height);
1875
const float ratio = 1.0 * original_width * original_height/ (scale_resolution * scale_resolution);
1876
const int multiple = fmin(ceil(ratio), max_slice_nums);
1878
std::vector<std::vector<clip_image_u8 *>> images;
1879
LOG_INF("%s: multiple %d\n", __func__, multiple);
1880
images.push_back(std::vector<clip_image_u8 *>());
1882
if (multiple <= 1) {
1883
auto best_size = uhd_find_best_resize(original_size, scale_resolution, patch_size, true);
1884
clip_image_u8 * source_image = clip_image_u8_init();
1885
bicubic_resize(*img, *source_image, best_size.first, best_size.second);
1886
// source_image = image.resize(best_size, Image.Resampling.BICUBIC)
1887
images[images.size()-1].push_back(source_image);
1889
else if (multiple > 1) {
1890
auto best_size = uhd_find_best_resize(original_size, scale_resolution, patch_size);
1891
clip_image_u8 * source_image = clip_image_u8_init();
1892
bicubic_resize(*img, *source_image, best_size.first, best_size.second);
1893
// source_image = image.copy().resize(best_resize, Image.Resampling.BICUBIC)
1894
LOG_INF("%s: image_size: %d %d; source_image size: %d %d\n", __func__, img->nx, img->ny, best_size.first, best_size.second);
1895
images[images.size()-1].push_back(source_image);
1897
std::pair<int, int> best_grid = uhd_best_grid(max_slice_nums, multiple, log_ratio);
1898
LOG_INF("%s: image_size: %d %d; best_grid: %d %d\n", __func__, img->nx, img->ny, best_grid.first, best_grid.second);
1900
auto refine_size = uhd_get_refine_size(original_size, best_grid, scale_resolution, patch_size, true);
1901
clip_image_u8 * refine_image = clip_image_u8_init();
1902
bicubic_resize(*img, *refine_image, refine_size.first, refine_size.second);
1904
LOG_INF("%s: refine_image_size: %d %d; refine_size: %d %d\n", __func__, refine_image->nx, refine_image->ny, refine_size.first, refine_size.second);
1907
int width = refine_image->nx;
1908
int height = refine_image->ny;
1909
int grid_x = int(width / best_grid.first);
1910
int grid_y = int(height / best_grid.second);
1911
for (int patches_i = 0, ic = 0; patches_i < height && ic < best_grid.second; patches_i += grid_y, ic += 1){
1912
images.push_back(std::vector<clip_image_u8 *>());
1913
for(int patches_j = 0, jc = 0; patches_j < width && jc < best_grid.first; patches_j += grid_x, jc += 1){
1914
clip_image_u8 * patch = clip_image_u8_init();
1917
patch->buf.resize(3 * patch->nx * patch->ny);
1918
for (int y = patches_i; y < patches_i + grid_y; ++y) {
1919
for (int x = patches_j; x < patches_j + grid_x; ++x) {
1920
const int i = 3 * (y * refine_image->nx + x);
1921
const int j = 3 * ((y-patches_i) * patch->nx + (x-patches_j));
1922
patch->buf[j] = refine_image->buf[i];
1923
patch->buf[j+1] = refine_image->buf[i+1];
1924
patch->buf[j+2] = refine_image->buf[i+2];
1927
images[images.size()-1].push_back(patch);
1934
int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip) {
1935
const int max_slice_nums=9;
1936
const int scale_resolution=448;
1937
const int original_width = ctx_clip->load_image_size->width;
1938
const int original_height = ctx_clip->load_image_size->height;
1939
const float log_ratio = log(1.0*original_width/original_height);
1940
const float ratio = 1.0 * original_width * original_height/ (scale_resolution * scale_resolution);
1941
const int multiple = fmin(ceil(ratio), max_slice_nums);
1942
std::pair<int, int> best_grid = uhd_best_grid(max_slice_nums, multiple, log_ratio);
1943
return best_grid.first;
1946
// returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
1947
// res_imgs memory is being allocated here, previous allocations will be freed if found
1948
bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32_batch * res_imgs) {
1950
if(clip_is_minicpmv(ctx)){
1951
int max_slice_nums = 9;
1952
std::vector<std::vector<clip_image_u8 *>> imgs = uhd_slice_image(img, max_slice_nums);
1954
for (size_t i = 0; i < imgs.size(); ++i){
1955
res_imgs->size += imgs[i].size();
1957
res_imgs->data = new clip_image_f32[res_imgs->size];
1959
for (size_t i = 0; i < imgs.size(); ++i) {
1960
for (size_t j = 0; j < imgs[i].size(); ++j) {
1961
LOG_DBG("%s: %d %d\n", __func__,imgs[i][j]->nx,imgs[i][j]->ny);
1962
clip_image_f32 * res = clip_image_f32_init();
1963
normalize_image_u8_to_f32(imgs[i][j], res, ctx->image_mean, ctx->image_std);
1964
res_imgs->data[idx++] = *res;
1965
clip_image_f32_free(res);
1971
bool pad_to_square = true;
1972
if (!ctx->has_vision_encoder) {
1973
LOG_ERR("This gguf file seems to have no vision encoder\n");
1976
auto & params = ctx->vision_model.hparams;
1977
// The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
1978
if (strcmp(params.mm_patch_merge_type, "spatial_unpad") == 0) {
1979
pad_to_square = false;
1981
// free the previous res_imgs if any set
1982
if (res_imgs->size > 0) {
1983
clip_image_f32_batch_free(res_imgs);
1985
res_imgs->data = nullptr;
1988
// the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
1989
// see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156
1991
clip_image_u8 * temp = clip_image_u8_init(); // we will keep the input image data here temporarily
1992
if (pad_to_square && img->nx != img->ny) {
1993
int longer_side = std::max(img->nx, img->ny);
1994
temp->nx = longer_side;
1995
temp->ny = longer_side;
1996
temp->buf.resize(3 * longer_side * longer_side);
1997
const uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA (this is the mean rgb color * 255)
1999
// fill with background color
2000
for (size_t i = 0; i < temp->buf.size(); i++) {
2001
temp->buf[i] = bc[i % 3];
2004
// copy from the input image
2005
for (int y = 0; y < img->ny; y++) {
2006
for (int x = 0; x < img->nx; x++) {
2007
const int i = 3 * (y * img->nx + x);
2008
const int j = 3 * (y * temp->nx + x);
2009
temp->buf[j] = img->buf[i];
2010
temp->buf[j+1] = img->buf[i+1];
2011
temp->buf[j+2] = img->buf[i+2];
2015
if (params.image_grid_pinpoints[0] != 0) {
2016
// "spatial_unpad" with "anyres" processing for llava-1.6
2017
std::vector<std::pair<int, int>> possible_resolutions;
2018
for (int i = 0; i < 32 && params.image_grid_pinpoints[i] != 0; i+=2) {
2019
possible_resolutions.push_back({params.image_grid_pinpoints[i], params.image_grid_pinpoints[i+1]});
2021
std::pair<int, int> best_resolution = select_best_resolution({img->nx, img->ny}, possible_resolutions);
2022
// clip_image_save_to_bmp(*img, "input.bmp");
2023
resize_and_pad_image(*img, *temp, best_resolution); // we do not pad with mean-bg color anymore in llava-1.6
2024
// clip_image_save_to_bmp(*temp, "resized.bmp");
2025
// visually verify normalized image:
2026
// normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std);
2028
// clip_image_u8 * temp2 = clip_image_u8_init();
2029
// clip_image_convert_f32_to_u8(*res, *temp2);
2030
// clip_image_save_to_bmp(*temp2, "resized_normalized_f32.bmp");
2031
// clip_image_u8_free(temp2);
2034
std::vector<clip_image_u8 *> patches = divide_to_patches_u8(*temp, params.image_size); // prepare spatial sorted main patches of image_size each (336 in llava-1.6)
2036
clip_image_u8 *image_original_resize = clip_image_u8_init();
2037
// bilinear_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square
2038
bicubic_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square
2039
patches.insert(patches.begin(), image_original_resize);
2040
// clip_image_f32_batch_init(patches.size());
2041
res_imgs->size = patches.size();
2042
res_imgs->data = new clip_image_f32[res_imgs->size];
2044
for (auto& patch : patches) {
2045
normalize_image_u8_to_f32(patch, &res_imgs->data[num], ctx->image_mean, ctx->image_std);
2049
for (size_t i = 0; i < patches.size(); i++) {
2050
// LOG_DBG("patch %d: %d %d\n", i, patches[i]->nx, patches[i]->ny);
2051
clip_image_u8_free(patches[i]);
2054
clip_image_u8_free(temp);
2060
temp->buf.resize(img->buf.size());
2061
memcpy(temp->buf.data(), img->buf.data(), temp->buf.size());
2065
const int nx = temp->nx;
2066
const int ny = temp->ny;
2067
// clip_image_save_to_bmp(*temp, "resized_vanilla.bmp");
2069
const int nx2 = ctx->vision_model.hparams.image_size;
2070
const int ny2 = ctx->vision_model.hparams.image_size;
2071
clip_image_f32 * res = clip_image_f32_init();
2074
res->buf.resize(3 * nx2 * ny2);
2076
const float scale = std::max(nx, ny) / (float)ctx->vision_model.hparams.image_size;
2078
const int nx3 = int(nx / scale + 0.5f);
2079
const int ny3 = int(ny / scale + 0.5f);
2081
const auto & m3 = ctx->image_mean; // {0.48145466f, 0.4578275f, 0.40821073f};
2082
const auto & s3 = ctx->image_std; // {0.26862954f, 0.26130258f, 0.27577711f};
2084
for (int y = 0; y < ny3; y++) {
2085
for (int x = 0; x < nx3; x++) {
2086
for (int c = 0; c < 3; c++) {
2087
// linear interpolation
2088
const float sx = (x + 0.5f) * scale - 0.5f;
2089
const float sy = (y + 0.5f) * scale - 0.5f;
2091
const int x0 = std::max(0, (int)std::floor(sx));
2092
const int y0 = std::max(0, (int)std::floor(sy));
2094
const int x1 = std::min(x0 + 1, nx - 1);
2095
const int y1 = std::min(y0 + 1, ny - 1);
2097
const float dx = sx - x0;
2098
const float dy = sy - y0;
2100
const int j00 = 3 * (y0 * nx + x0) + c;
2101
const int j01 = 3 * (y0 * nx + x1) + c;
2102
const int j10 = 3 * (y1 * nx + x0) + c;
2103
const int j11 = 3 * (y1 * nx + x1) + c;
2105
const float v00 = temp->buf[j00];
2106
const float v01 = temp->buf[j01];
2107
const float v10 = temp->buf[j10];
2108
const float v11 = temp->buf[j11];
2110
const float v0 = v00 * (1.0f - dx) + v01 * dx;
2111
const float v1 = v10 * (1.0f - dx) + v11 * dx;
2113
const float v = v0 * (1.0f - dy) + v1 * dy;
2115
const uint8_t v2 = std::min(std::max(std::round(v), 0.0f), 255.0f);
2117
const int i = 3 * (y * nx3 + x) + c;
2119
res->buf[i] = ((float(v2) / 255.0f) - m3[c]) / s3[c];
2123
clip_image_u8_free(temp);
2126
// clip_image_u8 * temp2 = clip_image_u8_init();
2127
// clip_image_convert_f32_to_u8(*res, *temp2);
2128
// clip_image_save_to_bmp(*temp2, "resized_normalized_f32_vanilla.bmp");
2129
// clip_image_u8_free(temp2);
2131
// res_imgs.push_back(res);
2134
res_imgs->data = new clip_image_f32[res_imgs->size];
2135
res_imgs->data[0] = *res;
2136
clip_image_f32_free(res);
2141
ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
2142
return ctx->vision_model.image_newline;
2145
void clip_free(clip_ctx * ctx) {
2146
ggml_free(ctx->ctx_data);
2147
gguf_free(ctx->ctx_gguf);
2149
ggml_backend_buffer_free(ctx->params_buffer);
2150
ggml_backend_free(ctx->backend);
2151
ggml_gallocr_free(ctx->compute_alloc);
2155
size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
2156
return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float);
2159
int32_t clip_image_size(const struct clip_ctx * ctx) {
2160
return ctx->vision_model.hparams.image_size;
2163
int32_t clip_patch_size(const struct clip_ctx * ctx) {
2164
return ctx->vision_model.hparams.patch_size;
2167
int32_t clip_hidden_size(const struct clip_ctx * ctx) {
2168
return ctx->vision_model.hparams.hidden_size;
2171
const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
2172
return ctx->vision_model.hparams.mm_patch_merge_type;
2175
const int32_t * clip_image_grid(const struct clip_ctx * ctx) {
2176
return ctx->vision_model.hparams.image_grid_pinpoints;
2179
int clip_n_patches(const struct clip_ctx * ctx) {
2180
const auto & params = ctx->vision_model.hparams;
2182
int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
2184
if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2) {
2186
} else if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
2187
if (ctx->minicpmv_version == 2) {
2190
else if (ctx->minicpmv_version == 3) {
2198
static std::vector<std::vector<std::vector<float>>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector<std::vector<float>> & pos) {
2199
assert(embed_dim % 2 == 0);
2201
int W = pos[0].size();
2203
std::vector<float> omega(embed_dim / 2);
2204
for (int i = 0; i < embed_dim / 2; ++i) {
2205
omega[i] = 1.0 / pow(10000.0, static_cast<float>(i) / (embed_dim / 2));
2208
std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
2209
for (int h = 0; h < H; ++h) {
2210
for (int w = 0; w < W; ++w) {
2211
for (int d = 0; d < embed_dim / 2; ++d) {
2212
float out_value = pos[h][w] * omega[d];
2213
emb[h][w][d] = sin(out_value);
2214
emb[h][w][d + embed_dim / 2] = cos(out_value);
2222
static std::vector<std::vector<std::vector<float>>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector<std::vector<std::vector<float>>> & grid) {
2223
assert(embed_dim % 2 == 0);
2224
std::vector<std::vector<std::vector<float>>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2)
2225
std::vector<std::vector<std::vector<float>>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2)
2227
int H = emb_h.size();
2228
int W = emb_h[0].size();
2229
std::vector<std::vector<std::vector<float>>> emb(H, std::vector<std::vector<float>>(W, std::vector<float>(embed_dim)));
2231
for (int h = 0; h < H; ++h) {
2232
for (int w = 0; w < W; ++w) {
2233
for (int d = 0; d < embed_dim / 2; ++d) {
2234
emb[h][w][d] = emb_h[h][w][d];
2235
emb[h][w][d + embed_dim / 2] = emb_w[h][w][d];
2242
static std::vector<std::vector<float>> get_2d_sincos_pos_embed(int embed_dim, const std::pair<int, int> image_size) {
2243
int grid_h_size = image_size.first;
2244
int grid_w_size = image_size.second;
2246
std::vector<float> grid_h(grid_h_size);
2247
std::vector<float> grid_w(grid_w_size);
2249
for (int i = 0; i < grid_h_size; ++i) {
2250
grid_h[i] = static_cast<float>(i);
2252
for (int i = 0; i < grid_w_size; ++i) {
2253
grid_w[i] = static_cast<float>(i);
2256
std::vector<std::vector<float>> grid(grid_h_size, std::vector<float>(grid_w_size));
2257
for (int h = 0; h < grid_h_size; ++h) {
2258
for (int w = 0; w < grid_w_size; ++w) {
2259
grid[h][w] = grid_w[w];
2262
std::vector<std::vector<std::vector<float>>> grid_2d = {grid, grid};
2263
for (int h = 0; h < grid_h_size; ++h) {
2264
for (int w = 0; w < grid_w_size; ++w) {
2265
grid_2d[0][h][w] = grid_h[h];
2266
grid_2d[1][h][w] = grid_w[w];
2270
std::vector<std::vector<std::vector<float>>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d);
2272
int H = image_size.first;
2273
int W = image_size.second;
2274
std::vector<std::vector<float>> pos_embed_2d(H * W, std::vector<float>(embed_dim));
2275
for (int h = 0; h < H; ++h) {
2276
for (int w = 0; w < W; ++w) {
2277
pos_embed_2d[w * H + h] = pos_embed_3d[h][w];
2281
return pos_embed_2d;
2284
bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) {
2285
if (!ctx->has_vision_encoder) {
2286
LOG_ERR("This gguf file seems to have no vision encoder\n");
2290
clip_image_f32_batch imgs{};
2293
return clip_image_batch_encode(ctx, n_threads, &imgs, vec);
2296
bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs, float * vec) {
2297
if (!ctx->has_vision_encoder) {
2298
LOG_ERR("This gguf file seems to have no vision encoder\n");
2302
int batch_size = imgs->size;
2303
if (ctx->has_llava_projector) {
2304
GGML_ASSERT(batch_size == 1); // TODO: support multiple images
2306
if (ctx->has_minicpmv_projector) {
2307
GGML_ASSERT(batch_size == 1);
2310
// build the inference graph
2311
ggml_cgraph * gf = clip_image_build_graph(ctx, imgs, ctx->load_image_size, true);
2312
ggml_gallocr_alloc_graph(ctx->compute_alloc, gf);
2315
const auto & model = ctx->vision_model;
2316
const auto & hparams = model.hparams;
2318
const int image_size = hparams.image_size;
2319
int image_size_width = image_size;
2320
int image_size_height = image_size;
2321
if (ctx->has_minicpmv_projector) {
2322
image_size_width = imgs->data[0].nx;
2323
image_size_height = imgs->data[0].ny;
2325
const int patch_size = hparams.patch_size;
2326
const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size));
2327
const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
2328
if(ctx->load_image_size==nullptr){
2329
ctx->load_image_size= clip_image_size_init();
2331
const int pos_w = ctx->load_image_size->width/patch_size;
2332
const int pos_h = ctx->load_image_size->height/patch_size;
2335
struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
2336
float * data = (float *)malloc(ggml_nbytes(inp_raw));
2338
for (size_t i = 0; i < imgs->size; i++) {
2339
const int nx = imgs->data[i].nx;
2340
const int ny = imgs->data[i].ny;
2341
if (!ctx->has_minicpmv_projector) {
2342
GGML_ASSERT(nx == image_size && ny == image_size);
2345
const int n = nx * ny;
2347
for (int b = 0; b < batch_size; b++) {
2348
for (int k = 0; k < 3; k++) {
2349
for (int y = 0; y < ny; y++) {
2350
for (int x = 0; x < nx; x++) {
2351
data[(b * 3 * n) + k * n + y * nx + x] = imgs->data[b].buf[3 * (y * nx + x) + k];
2357
ggml_backend_tensor_set(inp_raw, data, 0, ggml_nbytes(inp_raw));
2360
if (ctx->has_minicpmv_projector) {
2362
// inspired from siglip:
2363
// -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
2364
// -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316
2365
struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
2366
int* positions_data = (int*)malloc(ggml_nbytes(positions));
2367
int bucket_coords_h[70];
2368
int bucket_coords_w[70];
2369
for (int i = 0; i < pos_h; i++){
2370
bucket_coords_h[i] = std::floor(70.0*i/pos_h);
2372
for (int i = 0; i < pos_w; i++){
2373
bucket_coords_w[i] = std::floor(70.0*i/pos_w);
2375
for (int i = 0, id = 0; i < pos_h; i++){
2376
for (int j = 0; j < pos_w; j++){
2377
positions_data[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j];
2380
ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
2381
free(positions_data);
2385
// inspired from resampler of Qwen-VL:
2386
// -> https://huggingface.co/Qwen/Qwen-VL/tree/main
2387
// -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23
2388
struct ggml_tensor * pos_embed = ggml_graph_get_tensor(gf, "pos_embed");
2389
int embed_dim = 4096;
2390
if (ctx->minicpmv_version == 2) {
2393
else if (ctx->minicpmv_version == 3) {
2396
auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h));
2398
float * pos_embed_data = (float *)malloc(ggml_nbytes(pos_embed));
2399
for(int i=0;i<pos_w * pos_h;++i){
2400
for(int j=0;j<embed_dim;++j){
2401
pos_embed_data[i*embed_dim+j]=pos_embed_t[i][j];
2405
ggml_backend_tensor_set(pos_embed, pos_embed_data, 0, ggml_nbytes(pos_embed));
2406
free(pos_embed_data);
2411
if (ctx->has_class_embedding) {
2412
struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings");
2414
void* zero_mem = malloc(ggml_nbytes(embeddings));
2415
memset(zero_mem, 0, ggml_nbytes(embeddings));
2416
ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings));
2422
struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
2424
int* positions_data = (int*)malloc(ggml_nbytes(positions));
2425
for (int i = 0; i < num_positions; i++) {
2426
positions_data[i] = i;
2428
ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
2429
free(positions_data);
2433
struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
2434
int* patches_data = (int*)malloc(ggml_nbytes(patches));
2435
for (int i = 0; i < num_patches; i++) {
2436
patches_data[i] = i + 1;
2438
ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
2443
if (ggml_backend_is_cpu(ctx->backend)) {
2444
ggml_backend_cpu_set_n_threads(ctx->backend, n_threads);
2447
#ifdef GGML_USE_METAL
2448
if (ggml_backend_is_metal(ctx->backend)) {
2449
ggml_backend_metal_set_n_cb(ctx->backend, n_threads);
2453
ggml_backend_graph_compute(ctx->backend, gf);
2455
// the last node is the embedding tensor
2456
struct ggml_tensor * embeddings = ggml_graph_node(gf, -1);
2458
// copy the embeddings to the location passed by the user
2459
ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
2464
bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) {
2465
ggml_type type = GGML_TYPE_Q4_1;
2467
assert(itype < GGML_TYPE_COUNT);
2468
type = static_cast<ggml_type>(itype);
2470
auto * ctx_clip = clip_model_load(fname_inp, 2);
2472
const auto & ctx_src = ctx_clip->ctx_gguf;
2473
const auto & ctx_data = ctx_clip->ctx_data;
2475
auto * ctx_out = gguf_init_empty();
2476
gguf_set_kv(ctx_out, ctx_src);
2477
gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
2478
gguf_set_val_u32(ctx_out, "general.file_type", itype);
2480
auto fout = std::ofstream(fname_out, std::ios::binary);
2482
const int n_tensors = gguf_get_n_tensors(ctx_src);
2484
for (int i = 0; i < n_tensors; ++i) {
2485
const char * name = gguf_get_tensor_name(ctx_src, i);
2486
struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
2487
gguf_add_tensor(ctx_out, cur);
2490
const size_t meta_size = gguf_get_meta_size(ctx_out);
2491
for (size_t i = 0; i < meta_size; ++i) {
2495
// regexes of tensor names to be quantized
2496
const std::vector<std::string> k_names = {
2500
std::vector<uint8_t> work(512);
2501
std::vector<float> conv_buf(512);
2502
size_t total_size_org = 0;
2503
size_t total_size_new = 0;
2505
for (int i = 0; i < n_tensors; ++i) {
2506
const std::string name = gguf_get_tensor_name(ctx_src, i);
2507
struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str());
2509
enum ggml_type new_type;
2513
bool quantize = false;
2514
for (const auto & s : k_names) {
2515
if (std::regex_match(name, std::regex(s))) {
2521
// quantize only 2D tensors
2522
quantize &= (ggml_n_dims(cur) == 2);
2526
if (new_type >= GGML_TYPE_Q2_K && name.find("embd") != std::string::npos) {
2527
new_type = GGML_TYPE_Q8_0; // ggml_get_rows needs non K type
2528
// LOG_ERR("%s: quantizing %s to %s\n", __func__, name.c_str(), ggml_type_name(new_type));
2530
const size_t n_elms = ggml_nelements(cur);
2533
switch (cur->type) {
2535
f32_data = (float *)cur->data;
2538
if (conv_buf.size() < n_elms) {
2539
conv_buf.resize(n_elms);
2541
for (size_t j = 0; j < n_elms; ++j) {
2542
conv_buf[j] = ggml_fp16_to_fp32(((ggml_fp16_t *)cur->data)[j]);
2544
f32_data = (float *)conv_buf.data();
2547
LOG_ERR("Please use an input file in f32 or f16\n");
2552
if (work.size() < n_elms * 4) {
2553
work.resize(n_elms * 4);
2555
new_data = work.data();
2557
new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr);
2559
new_type = cur->type;
2560
new_data = cur->data;
2561
new_size = ggml_nbytes(cur);
2563
const size_t orig_size = ggml_nbytes(cur);
2564
total_size_org += orig_size;
2565
total_size_new += new_size;
2566
gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
2567
gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
2568
fout.write((const char *)new_data, new_size);
2569
size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size;
2570
for (size_t j = 0; j < pad; ++j) {
2574
LOG_INF("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize,
2575
orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
2578
// go back to beginning of file and write the updated metadata
2579
fout.seekp(0, std::ios::beg);
2580
std::vector<uint8_t> meta(meta_size);
2581
gguf_get_meta_data(ctx_out, meta.data());
2582
fout.write((const char *)meta.data(), meta_size);
2586
clip_free(ctx_clip);
2590
LOG_INF("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
2591
LOG_INF("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
2597
int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
2598
if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
2599
return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
2601
if (ctx->proj_type == PROJECTOR_TYPE_LDPV2) {
2602
return ctx->vision_model.mm_model_peg_0_b->ne[0];
2604
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
2605
return ctx->vision_model.mm_2_b->ne[0];
2607
if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
2608
return ctx->vision_model.mm_3_b->ne[0];
2610
if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
2611
if (ctx->minicpmv_version == 2) {
2614
else if (ctx->minicpmv_version == 3) {
2619
std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type];
2620
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
2623
int clip_is_minicpmv(const struct clip_ctx * ctx) {
2624
if (ctx->has_minicpmv_projector) {
2625
return ctx->minicpmv_version;