llama

Форк
0
/
export-lora.cpp 
421 строка · 15.8 Кб
1
#include "arg.h"
2
#include "common.h"
3
#include "ggml.h"
4
#include "ggml-alloc.h"
5

6
#include <map>
7
#include <vector>
8
#include <string>
9
#include <thread>
10
#include <fstream>
11

12
static bool g_verbose = false;
13

14
struct tensor_transformation {
15
    struct ggml_tensor * in;
16
    struct ggml_tensor * out;
17
    bool is_copy;
18
};
19

20
static std::string get_kv_str(struct gguf_context * ctx_gguf, const std::string & key){
21
    int id = gguf_find_key(ctx_gguf, key.c_str());
22
    return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf, id));
23
}
24

25
static float get_kv_f32(struct gguf_context * ctx_gguf, const std::string & key) {
26
    int id = gguf_find_key(ctx_gguf, key.c_str());
27
    return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf, id);
28
}
29

30
static void zeros(std::ofstream & file, size_t n) {
31
    char zero = 0;
32
    for (size_t i = 0; i < n; ++i) {
33
        file.write(&zero, 1);
34
    }
35
}
36

37
static std::string ggml_ne_string(const ggml_tensor * t) {
38
    std::string str;
39
    for (int i = 0; i < GGML_MAX_DIMS; ++i) {
40
        str += std::to_string(t->ne[i]);
41
        if (i + 1 < GGML_MAX_DIMS) {
42
            str += ", ";
43
        }
44
    }
45
    return str;
46
}
47

48
static struct gguf_context * load_gguf(std::string & fname, struct ggml_context ** ctx_ggml) {
49
    struct gguf_init_params params = {
50
        /*.no_alloc = */ true,
51
        /*.ctx      = */ ctx_ggml,
52
    };
53
    struct gguf_context * ctx_gguf = gguf_init_from_file(fname.c_str(), params);
54
    if (!ctx_gguf) {
55
        throw std::runtime_error("failed to load input GGUF from " + fname);
56
    }
57
    return ctx_gguf;
58
}
59

60
struct file_input {
61
    struct ggml_context * ctx_meta = nullptr;
62
    struct gguf_context * ctx_gguf = nullptr;
63
    std::ifstream f_in;
64
    std::map<std::string, ggml_tensor *> tensors;
65
    float alpha;
66
    float scale;
67

68
    file_input(std::string & fname, float scale): f_in(fname, std::ios::binary), scale(scale) {
69
        if (!f_in.is_open()) {
70
            throw std::runtime_error("failed to open input gguf from " + fname);
71
        }
72

73
        ctx_gguf = load_gguf(fname, &ctx_meta);
74
        alpha = get_kv_f32(ctx_gguf, "adapter.lora.alpha");
75
        printf("%s: loaded gguf from %s\n", __func__, fname.c_str());
76

77
        for (ggml_tensor * cur = ggml_get_first_tensor(ctx_meta); cur; cur = ggml_get_next_tensor(ctx_meta, cur)) {
78
            std::string name(cur->name);
79
            tensors[name] = cur;
80
            if (g_verbose) {
81
                printf("%s: %s\n", __func__, cur->name);
82
            }
83
        }
84
    }
85

86
    ggml_tensor * get_tensor(std::string name) {
87
        if (tensors.find(name) == tensors.end()) {
88
            return nullptr;
89
        }
90
        return tensors[name];
91
    }
92

93
    void read_tensor_data(std::string name, std::vector<uint8_t> & buf) {
94
        if (tensors.find(name) == tensors.end()) {
95
            throw std::runtime_error("cannot find tensor with name: " + name);
96
        }
97
        auto len = ggml_nbytes(tensors[name]);
98
        if (buf.size() < len) {
99
            buf.resize(len);
100
        }
101
        auto i_tensor_in = gguf_find_tensor(ctx_gguf, name.c_str()); // idx of tensor in the input file
102
        auto offset = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i_tensor_in);
103
        f_in.seekg(offset);
104
        f_in.read((char* )buf.data(), len);
105
    }
106

107
    ~file_input() {
108
        gguf_free(ctx_gguf);
109
        ggml_free(ctx_meta);
110
    }
111
};
112

113
struct lora_merge_ctx {
114
    // input base model + adapters
115
    file_input base_model;
116
    std::vector<std::unique_ptr<file_input>> adapters;
117

118
    // for computing merged tensor
119
    int n_threads;
120
    ggml_backend_t backend = nullptr;
121
    ggml_gallocr_t allocr = nullptr;
122
    std::vector<uint8_t> read_buf;
123

124
    // output file
125
    struct gguf_context * ctx_out;
126
    struct ggml_context * ctx_out_ggml;
127
    std::ofstream fout;
128

129
    lora_merge_ctx(
130
            std::string & base_fname,
131
            std::vector<llama_lora_adapter_info> & lora_files,
132
            std::string & outfile,
133
            int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
134
        fout.exceptions(std::ofstream::failbit); // fail fast on write errors
135

136
        if (gguf_find_key(base_model.ctx_gguf, LLM_KV_SPLIT_COUNT) >= 0) {
137
            throw std::runtime_error("split model is not yet supported");
138
        }
139

140
        for (auto & lora_inp : lora_files) {
141
            auto fname = lora_inp.path;
142
            auto scale = lora_inp.scale;
143
            std::unique_ptr<file_input> adapter(new file_input(fname, scale));
144
            check_metadata_lora(adapter.get());
145
            adapters.push_back(std::move(adapter));
146
        }
147

148
        ctx_out = gguf_init_empty();
149
        struct ggml_init_params params = {
150
            /*.mem_size   =*/ gguf_get_n_tensors(base_model.ctx_gguf)*ggml_tensor_overhead(),
151
            /*.mem_buffer =*/ NULL,
152
            /*.no_alloc   =*/ true,
153
        };
154
        ctx_out_ggml = ggml_init(params);
155
        backend = ggml_backend_cpu_init();
156
        allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend));
157
    }
158

159
    void check_metadata_lora(file_input * adapter) {
160
        auto general_type = get_kv_str(adapter->ctx_gguf, "general.type");
161
        if (general_type != "adapter") {
162
            throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
163
        }
164

165
        auto adapter_type = get_kv_str(adapter->ctx_gguf, "adapter.type");
166
        if (adapter_type != "lora") {
167
            throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
168
        }
169

170
        auto general_arch_base = get_kv_str(base_model.ctx_gguf, "general.architecture");
171
        auto general_arch_lora = get_kv_str(adapter->ctx_gguf,   "general.architecture");
172
        if (general_arch_base != general_arch_lora) {
173
            throw std::runtime_error("model arch and LoRA arch mismatch");
174
        }
175
    }
176

177
    ggml_type get_out_tensor_type(struct ggml_tensor * t) {
178
        if (t->type == GGML_TYPE_F32) {
179
            return GGML_TYPE_F32;
180
        } else {
181
            return GGML_TYPE_F16;
182
        }
183
    }
184

185
    void run_merge() {
186
        // prepare metadata
187
        gguf_set_kv(ctx_out, base_model.ctx_gguf);
188
        // output is forced to f16 for now
189
        gguf_set_val_u32(ctx_out, "general.file_type", LLAMA_FTYPE_MOSTLY_F16);
190

191
        // check if all lora adapters have the same tensors
192
        // TODO: remove this when we can support merging subset of adapters. Ref: https://github.com/ggerganov/llama.cpp/pull/8607#discussion_r1686027777
193
        static const char * err_no_subset_adapter = "Input adapters do not have the same list of tensors. This is not yet supported. Please merge the adapter one-by-one instead of merging all at once.";
194
        if (adapters.size() > 1) {
195
            for (size_t i = 1; i < adapters.size(); ++i) {
196
                if (adapters[0]->tensors.size() != adapters[i]->tensors.size()) {
197
                    throw std::runtime_error(err_no_subset_adapter);
198
                }
199
                for (auto & it : adapters[i]->tensors) {
200
                    if (adapters[0]->get_tensor(it.first) == nullptr) {
201
                        throw std::runtime_error(err_no_subset_adapter);
202
                    }
203
                }
204
            }
205
        }
206

207
        // mapping base tensor to out tensor (same shape with base, but different type)
208
        std::vector<tensor_transformation> trans;
209
        for (auto & it : base_model.tensors) {
210
            bool t_a = true;
211
            bool t_b = true;
212
            for (auto & adapter : adapters) {
213
                t_a &= nullptr != adapter->get_tensor(it.first + ".lora_a");
214
                t_b &= nullptr != adapter->get_tensor(it.first + ".lora_b");
215
            }
216
            auto base_tensor = it.second;
217
            if (!t_a && !t_b) {
218
                // only copy
219
                struct ggml_tensor * cpy_tensor = ggml_dup_tensor(ctx_out_ggml, base_tensor);
220
                ggml_set_name(cpy_tensor, base_tensor->name);
221
                trans.push_back({
222
                    cpy_tensor,
223
                    cpy_tensor,
224
                    true,
225
                });
226
                gguf_add_tensor(ctx_out, cpy_tensor);
227
            } else if (t_a && t_b) {
228
                // need merging
229
                struct ggml_tensor * out_tensor = ggml_new_tensor(
230
                    ctx_out_ggml, get_out_tensor_type(base_tensor), GGML_MAX_DIMS, base_tensor->ne);
231
                ggml_set_name(out_tensor, base_tensor->name);
232
                trans.push_back({
233
                    base_tensor,
234
                    out_tensor,
235
                    false,
236
                });
237
                gguf_add_tensor(ctx_out, out_tensor);
238
            } else {
239
                throw std::runtime_error("tensor " + it.first + " missing either lora_a or lora_b");
240
            }
241
        }
242

243
        // placeholder for the meta data
244
        {
245
            size_t meta_size = gguf_get_meta_size(ctx_out);
246
            zeros(fout, meta_size);
247
        }
248

249
        // process base model tensors
250
        size_t n_merged = 0;
251
        for (auto & it : trans) {
252
            if (!it.is_copy) {
253
                merge_tensor(it.in, it.out);
254
                n_merged++;
255
            } else {
256
                copy_tensor(it.in);
257
            }
258
        }
259

260
        // write output metadata
261
        {
262
            std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
263
            gguf_get_meta_data(ctx_out, data.data());
264
            fout.seekp(0);
265
            fout.write((const char *)data.data(), data.size());
266
        }
267

268
        printf("%s : merged %ld tensors with lora adapters\n", __func__, n_merged);
269
        printf("%s : wrote %ld tensors to output file\n", __func__, trans.size());
270
    }
271

272
    void copy_tensor(struct ggml_tensor * base) {
273
        printf("%s :  %s [%s]\n", __func__, base->name, ggml_ne_string(base).c_str());
274
        size_t len = ggml_nbytes(base);
275
        base_model.read_tensor_data(base->name, read_buf);
276
        fout.write((char* )read_buf.data(), len);
277
        zeros(fout, GGML_PAD(len, GGUF_DEFAULT_ALIGNMENT) - len);
278
    }
279

280
    void merge_tensor(struct ggml_tensor * base, struct ggml_tensor * out) {
281
        std::string name_base(base->name);
282
        std::string name_lora_a = name_base + ".lora_a";
283
        std::string name_lora_b = name_base + ".lora_b";
284

285
        printf("%s : %s [%s]\n", __func__, base->name, ggml_ne_string(base).c_str());
286

287
        // context for input tensor
288
        std::vector<struct ggml_tensor *> inp_a(adapters.size());
289
        std::vector<struct ggml_tensor *> inp_b(adapters.size());
290
        struct ggml_init_params params {
291
            /*.mem_size   =*/ ggml_tensor_overhead()*(2+adapters.size()*2),
292
            /*.mem_buffer =*/ NULL,
293
            /*.no_alloc   =*/ true,
294
        };
295
        struct ggml_context * ctx = ggml_init(params);
296

297
        // alloc tensors
298
        struct ggml_tensor * inp_base = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, base->ne);
299
        for (size_t i = 0; i < adapters.size(); ++i) {
300
            auto t_a = adapters[i]->get_tensor(name_lora_a);
301
            auto t_b = adapters[i]->get_tensor(name_lora_b);
302
            // TODO: add support for quantized lora
303
            if (ggml_is_quantized(t_a->type) || ggml_is_quantized(t_b->type)) {
304
                throw std::runtime_error("quantized LoRA adapters is not supported, please retry with f16 or f32");
305
            }
306
            inp_a[i] = ggml_dup_tensor(ctx, t_a);
307
            inp_b[i] = ggml_dup_tensor(ctx, t_b);
308
        }
309
        ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
310

311
        // load base tensor to backend buffer
312
        base_model.read_tensor_data(name_base, read_buf);
313
        if (base->type != GGML_TYPE_F32) {
314
            // optionally dequantize it
315
            printf("%s :   + dequantize base tensor from %s to F32\n", __func__, ggml_type_name(base->type));
316
            auto nels = ggml_nelements(inp_base);
317
            ggml_type_traits_t qtype = ggml_internal_get_type_traits(base->type);
318
            std::vector<uint8_t> dequant_buf(nels * sizeof(float));
319
            qtype.to_float(read_buf.data(), (float *)dequant_buf.data(), nels);
320
            ggml_backend_tensor_set(inp_base, dequant_buf.data(), 0, dequant_buf.size());
321
        } else {
322
            ggml_backend_tensor_set(inp_base, read_buf.data(), 0, ggml_nbytes(inp_base));
323
        }
324

325
        // load lora tensors to backend buffer
326
        for (size_t i = 0; i < adapters.size(); ++i) {
327
            adapters[i]->read_tensor_data(name_lora_a, read_buf);
328
            ggml_backend_tensor_set(inp_a[i], read_buf.data(), 0, ggml_nbytes(inp_a[i]));
329
            adapters[i]->read_tensor_data(name_lora_b, read_buf);
330
            ggml_backend_tensor_set(inp_b[i], read_buf.data(), 0, ggml_nbytes(inp_b[i]));
331
        }
332

333
        // build graph
334
        struct ggml_cgraph * gf;
335
        {
336
            static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
337
            static std::vector<uint8_t> buf(buf_size);
338
            struct ggml_init_params params0 = {
339
                /*.mem_size   =*/ buf_size,
340
                /*.mem_buffer =*/ buf.data(),
341
                /*.no_alloc   =*/ true,
342
            };
343
            struct ggml_context * ctx0 = ggml_init(params0);
344
            gf = ggml_new_graph(ctx0);
345
            struct ggml_tensor * cur = inp_base;
346
            for (size_t i = 0; i < adapters.size(); ++i) {
347
                struct ggml_tensor * a_T = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_cast(ctx0, inp_a[i], GGML_TYPE_F32)));
348
                struct ggml_tensor * delta = ggml_mul_mat(ctx0, a_T, ggml_cast(ctx0, inp_b[i], GGML_TYPE_F32));
349
                // scale
350
                const float alpha = adapters[i]->alpha;
351
                const float rank  = (float) inp_b[i]->ne[0];
352
                const float scale = alpha ? adapters[i]->scale * alpha / rank : adapters[i]->scale;
353
                delta = ggml_scale(ctx0, delta, scale);
354
                cur = ggml_add(ctx0, delta, cur);
355
                printf("%s :   + merging from adapter[%ld] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type));
356
                printf("%s :     input_scale=%f calculated_scale=%f rank=%d\n", __func__, adapters[i]->scale, scale, (int) inp_b[i]->ne[0]);
357
            }
358
            cur = ggml_cast(ctx0, cur, out->type);
359
            printf("%s :   + output type is %s\n", __func__, ggml_type_name(out->type));
360
            ggml_build_forward_expand(gf, cur);
361
            ggml_free(ctx0);
362
        }
363

364
        // compute
365
        {
366
            ggml_gallocr_alloc_graph(allocr, gf);
367
            ggml_backend_cpu_set_n_threads(backend, n_threads);
368
            ggml_backend_graph_compute(backend, gf);
369
        }
370

371
        // write data to output file
372
        {
373
            auto * result = ggml_graph_node(gf, -1);
374
            size_t len = ggml_nbytes(result);
375
            if (read_buf.size() < len) {
376
                read_buf.resize(len);
377
            }
378
            ggml_backend_tensor_get(result, read_buf.data(), 0, len);
379
            fout.write((char* )read_buf.data(), len);
380
            zeros(fout, GGML_PAD(len, GGUF_DEFAULT_ALIGNMENT) - len);
381
        }
382

383
        ggml_free(ctx);
384
        ggml_backend_buffer_free(buffer);
385
    }
386

387
    ~lora_merge_ctx() {
388
        ggml_gallocr_free(allocr);
389
        ggml_backend_free(backend);
390
        gguf_free(ctx_out);
391
        ggml_free(ctx_out_ggml);
392
    }
393
};
394

395
static void print_usage(int, char ** argv) {
396
    printf("\nexample usage:\n");
397
    printf("\n  %s -m base-model.gguf --lora lora-file.gguf -o merged-model-f16.gguf\n", argv[0]);
398
    printf("\nNOTE: output model is F16\n");
399
    printf("\n");
400
}
401

402
int main(int argc, char ** argv) {
403
    gpt_params params;
404

405
    if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage)) {
406
        return 1;
407
    }
408

409
    g_verbose = (params.verbosity > 1);
410
    try {
411
        lora_merge_ctx ctx(params.model, params.lora_adapters, params.lora_outfile, params.cpuparams.n_threads);
412
        ctx.run_merge();
413
    } catch (const std::exception & err) {
414
        fprintf(stderr, "%s\n", err.what());
415
        exit(EXIT_FAILURE);
416
    }
417

418
    printf("done, output file is %s\n", params.lora_outfile.c_str());
419

420
    return 0;
421
}
422

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.