llama
421 строка · 15.8 Кб
1#include "arg.h"
2#include "common.h"
3#include "ggml.h"
4#include "ggml-alloc.h"
5
6#include <map>
7#include <vector>
8#include <string>
9#include <thread>
10#include <fstream>
11
12static bool g_verbose = false;
13
14struct tensor_transformation {
15struct ggml_tensor * in;
16struct ggml_tensor * out;
17bool is_copy;
18};
19
20static std::string get_kv_str(struct gguf_context * ctx_gguf, const std::string & key){
21int id = gguf_find_key(ctx_gguf, key.c_str());
22return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf, id));
23}
24
25static float get_kv_f32(struct gguf_context * ctx_gguf, const std::string & key) {
26int id = gguf_find_key(ctx_gguf, key.c_str());
27return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf, id);
28}
29
30static void zeros(std::ofstream & file, size_t n) {
31char zero = 0;
32for (size_t i = 0; i < n; ++i) {
33file.write(&zero, 1);
34}
35}
36
37static std::string ggml_ne_string(const ggml_tensor * t) {
38std::string str;
39for (int i = 0; i < GGML_MAX_DIMS; ++i) {
40str += std::to_string(t->ne[i]);
41if (i + 1 < GGML_MAX_DIMS) {
42str += ", ";
43}
44}
45return str;
46}
47
48static struct gguf_context * load_gguf(std::string & fname, struct ggml_context ** ctx_ggml) {
49struct gguf_init_params params = {
50/*.no_alloc = */ true,
51/*.ctx = */ ctx_ggml,
52};
53struct gguf_context * ctx_gguf = gguf_init_from_file(fname.c_str(), params);
54if (!ctx_gguf) {
55throw std::runtime_error("failed to load input GGUF from " + fname);
56}
57return ctx_gguf;
58}
59
60struct file_input {
61struct ggml_context * ctx_meta = nullptr;
62struct gguf_context * ctx_gguf = nullptr;
63std::ifstream f_in;
64std::map<std::string, ggml_tensor *> tensors;
65float alpha;
66float scale;
67
68file_input(std::string & fname, float scale): f_in(fname, std::ios::binary), scale(scale) {
69if (!f_in.is_open()) {
70throw std::runtime_error("failed to open input gguf from " + fname);
71}
72
73ctx_gguf = load_gguf(fname, &ctx_meta);
74alpha = get_kv_f32(ctx_gguf, "adapter.lora.alpha");
75printf("%s: loaded gguf from %s\n", __func__, fname.c_str());
76
77for (ggml_tensor * cur = ggml_get_first_tensor(ctx_meta); cur; cur = ggml_get_next_tensor(ctx_meta, cur)) {
78std::string name(cur->name);
79tensors[name] = cur;
80if (g_verbose) {
81printf("%s: %s\n", __func__, cur->name);
82}
83}
84}
85
86ggml_tensor * get_tensor(std::string name) {
87if (tensors.find(name) == tensors.end()) {
88return nullptr;
89}
90return tensors[name];
91}
92
93void read_tensor_data(std::string name, std::vector<uint8_t> & buf) {
94if (tensors.find(name) == tensors.end()) {
95throw std::runtime_error("cannot find tensor with name: " + name);
96}
97auto len = ggml_nbytes(tensors[name]);
98if (buf.size() < len) {
99buf.resize(len);
100}
101auto i_tensor_in = gguf_find_tensor(ctx_gguf, name.c_str()); // idx of tensor in the input file
102auto offset = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i_tensor_in);
103f_in.seekg(offset);
104f_in.read((char* )buf.data(), len);
105}
106
107~file_input() {
108gguf_free(ctx_gguf);
109ggml_free(ctx_meta);
110}
111};
112
113struct lora_merge_ctx {
114// input base model + adapters
115file_input base_model;
116std::vector<std::unique_ptr<file_input>> adapters;
117
118// for computing merged tensor
119int n_threads;
120ggml_backend_t backend = nullptr;
121ggml_gallocr_t allocr = nullptr;
122std::vector<uint8_t> read_buf;
123
124// output file
125struct gguf_context * ctx_out;
126struct ggml_context * ctx_out_ggml;
127std::ofstream fout;
128
129lora_merge_ctx(
130std::string & base_fname,
131std::vector<llama_lora_adapter_info> & lora_files,
132std::string & outfile,
133int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
134fout.exceptions(std::ofstream::failbit); // fail fast on write errors
135
136if (gguf_find_key(base_model.ctx_gguf, LLM_KV_SPLIT_COUNT) >= 0) {
137throw std::runtime_error("split model is not yet supported");
138}
139
140for (auto & lora_inp : lora_files) {
141auto fname = lora_inp.path;
142auto scale = lora_inp.scale;
143std::unique_ptr<file_input> adapter(new file_input(fname, scale));
144check_metadata_lora(adapter.get());
145adapters.push_back(std::move(adapter));
146}
147
148ctx_out = gguf_init_empty();
149struct ggml_init_params params = {
150/*.mem_size =*/ gguf_get_n_tensors(base_model.ctx_gguf)*ggml_tensor_overhead(),
151/*.mem_buffer =*/ NULL,
152/*.no_alloc =*/ true,
153};
154ctx_out_ggml = ggml_init(params);
155backend = ggml_backend_cpu_init();
156allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend));
157}
158
159void check_metadata_lora(file_input * adapter) {
160auto general_type = get_kv_str(adapter->ctx_gguf, "general.type");
161if (general_type != "adapter") {
162throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
163}
164
165auto adapter_type = get_kv_str(adapter->ctx_gguf, "adapter.type");
166if (adapter_type != "lora") {
167throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
168}
169
170auto general_arch_base = get_kv_str(base_model.ctx_gguf, "general.architecture");
171auto general_arch_lora = get_kv_str(adapter->ctx_gguf, "general.architecture");
172if (general_arch_base != general_arch_lora) {
173throw std::runtime_error("model arch and LoRA arch mismatch");
174}
175}
176
177ggml_type get_out_tensor_type(struct ggml_tensor * t) {
178if (t->type == GGML_TYPE_F32) {
179return GGML_TYPE_F32;
180} else {
181return GGML_TYPE_F16;
182}
183}
184
185void run_merge() {
186// prepare metadata
187gguf_set_kv(ctx_out, base_model.ctx_gguf);
188// output is forced to f16 for now
189gguf_set_val_u32(ctx_out, "general.file_type", LLAMA_FTYPE_MOSTLY_F16);
190
191// check if all lora adapters have the same tensors
192// TODO: remove this when we can support merging subset of adapters. Ref: https://github.com/ggerganov/llama.cpp/pull/8607#discussion_r1686027777
193static const char * err_no_subset_adapter = "Input adapters do not have the same list of tensors. This is not yet supported. Please merge the adapter one-by-one instead of merging all at once.";
194if (adapters.size() > 1) {
195for (size_t i = 1; i < adapters.size(); ++i) {
196if (adapters[0]->tensors.size() != adapters[i]->tensors.size()) {
197throw std::runtime_error(err_no_subset_adapter);
198}
199for (auto & it : adapters[i]->tensors) {
200if (adapters[0]->get_tensor(it.first) == nullptr) {
201throw std::runtime_error(err_no_subset_adapter);
202}
203}
204}
205}
206
207// mapping base tensor to out tensor (same shape with base, but different type)
208std::vector<tensor_transformation> trans;
209for (auto & it : base_model.tensors) {
210bool t_a = true;
211bool t_b = true;
212for (auto & adapter : adapters) {
213t_a &= nullptr != adapter->get_tensor(it.first + ".lora_a");
214t_b &= nullptr != adapter->get_tensor(it.first + ".lora_b");
215}
216auto base_tensor = it.second;
217if (!t_a && !t_b) {
218// only copy
219struct ggml_tensor * cpy_tensor = ggml_dup_tensor(ctx_out_ggml, base_tensor);
220ggml_set_name(cpy_tensor, base_tensor->name);
221trans.push_back({
222cpy_tensor,
223cpy_tensor,
224true,
225});
226gguf_add_tensor(ctx_out, cpy_tensor);
227} else if (t_a && t_b) {
228// need merging
229struct ggml_tensor * out_tensor = ggml_new_tensor(
230ctx_out_ggml, get_out_tensor_type(base_tensor), GGML_MAX_DIMS, base_tensor->ne);
231ggml_set_name(out_tensor, base_tensor->name);
232trans.push_back({
233base_tensor,
234out_tensor,
235false,
236});
237gguf_add_tensor(ctx_out, out_tensor);
238} else {
239throw std::runtime_error("tensor " + it.first + " missing either lora_a or lora_b");
240}
241}
242
243// placeholder for the meta data
244{
245size_t meta_size = gguf_get_meta_size(ctx_out);
246zeros(fout, meta_size);
247}
248
249// process base model tensors
250size_t n_merged = 0;
251for (auto & it : trans) {
252if (!it.is_copy) {
253merge_tensor(it.in, it.out);
254n_merged++;
255} else {
256copy_tensor(it.in);
257}
258}
259
260// write output metadata
261{
262std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
263gguf_get_meta_data(ctx_out, data.data());
264fout.seekp(0);
265fout.write((const char *)data.data(), data.size());
266}
267
268printf("%s : merged %ld tensors with lora adapters\n", __func__, n_merged);
269printf("%s : wrote %ld tensors to output file\n", __func__, trans.size());
270}
271
272void copy_tensor(struct ggml_tensor * base) {
273printf("%s : %s [%s]\n", __func__, base->name, ggml_ne_string(base).c_str());
274size_t len = ggml_nbytes(base);
275base_model.read_tensor_data(base->name, read_buf);
276fout.write((char* )read_buf.data(), len);
277zeros(fout, GGML_PAD(len, GGUF_DEFAULT_ALIGNMENT) - len);
278}
279
280void merge_tensor(struct ggml_tensor * base, struct ggml_tensor * out) {
281std::string name_base(base->name);
282std::string name_lora_a = name_base + ".lora_a";
283std::string name_lora_b = name_base + ".lora_b";
284
285printf("%s : %s [%s]\n", __func__, base->name, ggml_ne_string(base).c_str());
286
287// context for input tensor
288std::vector<struct ggml_tensor *> inp_a(adapters.size());
289std::vector<struct ggml_tensor *> inp_b(adapters.size());
290struct ggml_init_params params {
291/*.mem_size =*/ ggml_tensor_overhead()*(2+adapters.size()*2),
292/*.mem_buffer =*/ NULL,
293/*.no_alloc =*/ true,
294};
295struct ggml_context * ctx = ggml_init(params);
296
297// alloc tensors
298struct ggml_tensor * inp_base = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, base->ne);
299for (size_t i = 0; i < adapters.size(); ++i) {
300auto t_a = adapters[i]->get_tensor(name_lora_a);
301auto t_b = adapters[i]->get_tensor(name_lora_b);
302// TODO: add support for quantized lora
303if (ggml_is_quantized(t_a->type) || ggml_is_quantized(t_b->type)) {
304throw std::runtime_error("quantized LoRA adapters is not supported, please retry with f16 or f32");
305}
306inp_a[i] = ggml_dup_tensor(ctx, t_a);
307inp_b[i] = ggml_dup_tensor(ctx, t_b);
308}
309ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
310
311// load base tensor to backend buffer
312base_model.read_tensor_data(name_base, read_buf);
313if (base->type != GGML_TYPE_F32) {
314// optionally dequantize it
315printf("%s : + dequantize base tensor from %s to F32\n", __func__, ggml_type_name(base->type));
316auto nels = ggml_nelements(inp_base);
317ggml_type_traits_t qtype = ggml_internal_get_type_traits(base->type);
318std::vector<uint8_t> dequant_buf(nels * sizeof(float));
319qtype.to_float(read_buf.data(), (float *)dequant_buf.data(), nels);
320ggml_backend_tensor_set(inp_base, dequant_buf.data(), 0, dequant_buf.size());
321} else {
322ggml_backend_tensor_set(inp_base, read_buf.data(), 0, ggml_nbytes(inp_base));
323}
324
325// load lora tensors to backend buffer
326for (size_t i = 0; i < adapters.size(); ++i) {
327adapters[i]->read_tensor_data(name_lora_a, read_buf);
328ggml_backend_tensor_set(inp_a[i], read_buf.data(), 0, ggml_nbytes(inp_a[i]));
329adapters[i]->read_tensor_data(name_lora_b, read_buf);
330ggml_backend_tensor_set(inp_b[i], read_buf.data(), 0, ggml_nbytes(inp_b[i]));
331}
332
333// build graph
334struct ggml_cgraph * gf;
335{
336static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
337static std::vector<uint8_t> buf(buf_size);
338struct ggml_init_params params0 = {
339/*.mem_size =*/ buf_size,
340/*.mem_buffer =*/ buf.data(),
341/*.no_alloc =*/ true,
342};
343struct ggml_context * ctx0 = ggml_init(params0);
344gf = ggml_new_graph(ctx0);
345struct ggml_tensor * cur = inp_base;
346for (size_t i = 0; i < adapters.size(); ++i) {
347struct ggml_tensor * a_T = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_cast(ctx0, inp_a[i], GGML_TYPE_F32)));
348struct ggml_tensor * delta = ggml_mul_mat(ctx0, a_T, ggml_cast(ctx0, inp_b[i], GGML_TYPE_F32));
349// scale
350const float alpha = adapters[i]->alpha;
351const float rank = (float) inp_b[i]->ne[0];
352const float scale = alpha ? adapters[i]->scale * alpha / rank : adapters[i]->scale;
353delta = ggml_scale(ctx0, delta, scale);
354cur = ggml_add(ctx0, delta, cur);
355printf("%s : + merging from adapter[%ld] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type));
356printf("%s : input_scale=%f calculated_scale=%f rank=%d\n", __func__, adapters[i]->scale, scale, (int) inp_b[i]->ne[0]);
357}
358cur = ggml_cast(ctx0, cur, out->type);
359printf("%s : + output type is %s\n", __func__, ggml_type_name(out->type));
360ggml_build_forward_expand(gf, cur);
361ggml_free(ctx0);
362}
363
364// compute
365{
366ggml_gallocr_alloc_graph(allocr, gf);
367ggml_backend_cpu_set_n_threads(backend, n_threads);
368ggml_backend_graph_compute(backend, gf);
369}
370
371// write data to output file
372{
373auto * result = ggml_graph_node(gf, -1);
374size_t len = ggml_nbytes(result);
375if (read_buf.size() < len) {
376read_buf.resize(len);
377}
378ggml_backend_tensor_get(result, read_buf.data(), 0, len);
379fout.write((char* )read_buf.data(), len);
380zeros(fout, GGML_PAD(len, GGUF_DEFAULT_ALIGNMENT) - len);
381}
382
383ggml_free(ctx);
384ggml_backend_buffer_free(buffer);
385}
386
387~lora_merge_ctx() {
388ggml_gallocr_free(allocr);
389ggml_backend_free(backend);
390gguf_free(ctx_out);
391ggml_free(ctx_out_ggml);
392}
393};
394
395static void print_usage(int, char ** argv) {
396printf("\nexample usage:\n");
397printf("\n %s -m base-model.gguf --lora lora-file.gguf -o merged-model-f16.gguf\n", argv[0]);
398printf("\nNOTE: output model is F16\n");
399printf("\n");
400}
401
402int main(int argc, char ** argv) {
403gpt_params params;
404
405if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage)) {
406return 1;
407}
408
409g_verbose = (params.verbosity > 1);
410try {
411lora_merge_ctx ctx(params.model, params.lora_adapters, params.lora_outfile, params.cpuparams.n_threads);
412ctx.run_merge();
413} catch (const std::exception & err) {
414fprintf(stderr, "%s\n", err.what());
415exit(EXIT_FAILURE);
416}
417
418printf("done, output file is %s\n", params.lora_outfile.c_str());
419
420return 0;
421}
422