llama

Форк
0
/
infill.cpp 
637 строк · 22.8 Кб
1
#include "arg.h"
2
#include "common.h"
3
#include "console.h"
4
#include "sampling.h"
5
#include "log.h"
6
#include "llama.h"
7

8
#include <cassert>
9
#include <cinttypes>
10
#include <cmath>
11
#include <cstdio>
12
#include <cstring>
13
#include <ctime>
14
#include <fstream>
15
#include <iostream>
16
#include <sstream>
17
#include <string>
18
#include <vector>
19

20
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
21
#include <signal.h>
22
#include <unistd.h>
23
#elif defined (_WIN32)
24
#define WIN32_LEAN_AND_MEAN
25
#ifndef NOMINMAX
26
#define NOMINMAX
27
#endif
28
#include <windows.h>
29
#include <signal.h>
30
#endif
31

32
#if defined(_MSC_VER)
33
#pragma warning(disable: 4244 4267) // possible loss of data
34
#endif
35

36
static llama_context           ** g_ctx;
37
static llama_model             ** g_model;
38
static gpt_sampler             ** g_smpl;
39
static gpt_params               * g_params;
40
static std::vector<llama_token> * g_input_tokens;
41
static std::ostringstream       * g_output_ss;
42
static std::vector<llama_token> * g_output_tokens;
43

44
static bool is_interacting = false;
45

46
static void write_logfile(
47
    const llama_context * ctx, const gpt_params & params, const llama_model * model,
48
    const std::vector<llama_token> & input_tokens, const std::string & output,
49
    const std::vector<llama_token> & output_tokens
50
) {
51
    if (params.logdir.empty()) {
52
        return;
53
    }
54

55
    const std::string timestamp = string_get_sortable_timestamp();
56

57
    const bool success = fs_create_directory_with_parents(params.logdir);
58
    if (!success) {
59
        LOG_ERR("%s: warning: failed to create logdir %s, cannot write logfile\n",
60
                __func__, params.logdir.c_str());
61
        return;
62
    }
63

64
    const std::string logfile_path = params.logdir + timestamp + ".yml";
65
    FILE * logfile = fopen(logfile_path.c_str(), "w");
66

67
    if (logfile == NULL) {
68
        LOG_ERR("%s: failed to open logfile %s\n", __func__, logfile_path.c_str());
69
        return;
70
    }
71

72
    fprintf(logfile, "binary: infill\n");
73
    char model_desc[128];
74
    llama_model_desc(model, model_desc, sizeof(model_desc));
75
    yaml_dump_non_result_info(logfile, params, ctx, timestamp, input_tokens, model_desc);
76

77
    fprintf(logfile, "\n");
78
    fprintf(logfile, "######################\n");
79
    fprintf(logfile, "# Generation Results #\n");
80
    fprintf(logfile, "######################\n");
81
    fprintf(logfile, "\n");
82

83
    yaml_dump_string_multiline(logfile, "output", output.c_str());
84
    yaml_dump_vector_int(logfile, "output_tokens", output_tokens);
85

86
    llama_perf_dump_yaml(logfile, ctx);
87
    fclose(logfile);
88
}
89

90
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
91
static void sigint_handler(int signo) {
92
    if (signo == SIGINT) {
93
        if (!is_interacting) {
94
            is_interacting = true;
95
        } else {
96
            console::cleanup();
97
            LOG("\n");
98
            gpt_perf_print(*g_ctx, *g_smpl);
99
            write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
100

101
            // make sure all logs are flushed
102
            LOG("Interrupted by user\n");
103
            gpt_log_pause(gpt_log_main());
104

105
            _exit(130);
106
        }
107
    }
108
}
109
#endif
110

111
int main(int argc, char ** argv) {
112
    gpt_params params;
113
    g_params = &params;
114

115
    if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_INFILL)) {
116
        return 1;
117
    }
118

119
    gpt_init();
120

121
    auto & sparams = params.sparams;
122

123
    console::init(params.simple_io, params.use_color);
124
    atexit([]() { console::cleanup(); });
125

126
    if (params.logits_all) {
127
        LOG_ERR("\n************\n");
128
        LOG_ERR("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
129
        LOG_ERR("************\n\n");
130

131
        return 0;
132
    }
133

134
    if (params.embedding) {
135
        LOG_ERR("\n************\n");
136
        LOG_ERR("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
137
        LOG_ERR("************\n\n");
138

139
        return 0;
140
    }
141

142
    if (params.n_ctx != 0 && params.n_ctx < 8) {
143
        LOG_WRN("%s: minimum context size is 8, using minimum size.\n", __func__);
144
        params.n_ctx = 8;
145
    }
146

147
    if (!params.interactive_first && (params.input_prefix.empty() && params.input_suffix.empty())) {
148
        LOG_ERR("\n************\n");
149
        LOG_ERR("%s: please use '--interactive_first' or specify '--in_prefix' and/or '--in_suffix'\n", __func__);
150
        LOG_ERR("************\n\n");
151

152
        return 0;
153
    }
154

155
    if (params.rope_freq_base != 0.0) {
156
        LOG_WRN("%s: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base);
157
    }
158

159
    if (params.rope_freq_scale != 0.0) {
160
        LOG_WRN("%s: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
161
    }
162

163
    LOG_INF("%s: llama backend init\n", __func__);
164
    llama_backend_init();
165
    llama_numa_init(params.numa);
166

167
    llama_model * model = nullptr;
168
    llama_context * ctx = nullptr;
169
    gpt_sampler  * smpl = nullptr;
170

171
    g_model = &model;
172
    g_ctx = &ctx;
173
    g_smpl = &smpl;
174

175
    // load the model and apply lora adapter, if any
176
    LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
177
    llama_init_result llama_init = llama_init_from_gpt_params(params);
178

179
    model = llama_init.model;
180
    ctx = llama_init.context;
181

182
    if (model == NULL) {
183
        LOG_ERR("%s: unable to load model\n", __func__);
184
        return 1;
185
    }
186

187
    const int n_ctx_train = llama_n_ctx_train(model);
188
    const int n_ctx = llama_n_ctx(ctx);
189
    LOG_DBG("n_ctx: %d\n", n_ctx);
190

191
    if (n_ctx > n_ctx_train) {
192
        LOG_WRN("%s: model was trained on only %d context tokens (%d specified)\n", __func__, n_ctx_train, n_ctx);
193
    }
194

195
    // print system information
196
    {
197
        LOG_INF("\n");
198
        LOG_INF("%s\n", gpt_params_get_system_info(params).c_str());
199
    }
200
    const bool add_bos = llama_add_bos_token(model);
201
    GGML_ASSERT(!llama_add_eos_token(model));
202

203
    std::vector<llama_token> embd_inp;
204
    std::vector<llama_token> embd_end;
205
    std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
206
    std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
207

208
    GGML_ASSERT(llama_token_prefix(model) >= 0);
209
    GGML_ASSERT(llama_token_suffix(model) >= 0);
210

211
    inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
212
    inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
213

214
    embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
215
    embd_end = params.spm_infill ? inp_pfx : inp_sfx;
216
    if (add_bos) {
217
        embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
218
    }
219
    embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
220

221
    const llama_token middle_token = llama_token_middle(model);
222
    if (middle_token >= 0) {
223
        embd_inp.push_back(middle_token);
224
    }
225

226
    LOG_DBG("add_bos: %d\n", add_bos);
227
    LOG_DBG("prefix: \"%s\"\n", params.input_prefix.c_str());
228
    LOG_DBG("suffix: \"%s\"\n", params.input_suffix.c_str());
229
    LOG_DBG("tokens: %s\n", string_from(ctx, embd_inp).c_str());
230

231
    // Should not run without any tokens
232
    if (embd_inp.empty()) {
233
        embd_inp.push_back(llama_token_bos(model));
234
        LOG_WRN("embd_inp was considered empty and bos was added: %s\n", string_from(ctx, embd_inp).c_str());
235
    }
236

237
    if ((int) embd_inp.size() > n_ctx - 4) {
238
        LOG_ERR("%s: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
239
        return 1;
240
    }
241

242
    // number of tokens to keep when resetting context
243
    if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size()) {
244
        params.n_keep = (int)embd_inp.size();
245
    }
246

247
    LOG_INF("inp_pfx: %s\n", string_from(ctx, inp_pfx).c_str());
248
    LOG_INF("inp_sfx: %s\n", string_from(ctx, inp_sfx).c_str());
249

250
    // enable interactive mode if interactive start is specified
251
    if (params.interactive_first) {
252
        params.interactive = true;
253
    }
254

255
    if (params.verbose_prompt) {
256
        LOG_INF("\n");
257
        LOG_INF("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
258
        LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
259
        for (int i = 0; i < (int) embd_inp.size(); i++) {
260
            LOG_INF("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
261
        }
262

263
        if (params.n_keep > 0) {
264
        LOG_INF("%s: static prompt based on n_keep: '", __func__);
265
            for (int i = 0; i < params.n_keep; i++) {
266
                LOG("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
267
            }
268
            LOG("'\n");
269
        }
270
        LOG_INF("\n");
271
    }
272

273
    if (params.interactive) {
274
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
275
        struct sigaction sigint_action;
276
        sigint_action.sa_handler = sigint_handler;
277
        sigemptyset (&sigint_action.sa_mask);
278
        sigint_action.sa_flags = 0;
279
        sigaction(SIGINT, &sigint_action, NULL);
280
#elif defined (_WIN32)
281
        auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
282
            return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
283
        };
284
        SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
285
#endif
286

287
        LOG_INF("%s: interactive mode on.\n", __func__);
288

289
        if (params.input_prefix_bos) {
290
            LOG_INF("Input prefix with BOS\n");
291
        }
292

293
        if (!params.input_prefix.empty()) {
294
            LOG_INF("Input prefix: '%s'\n", params.input_prefix.c_str());
295
        }
296

297
        if (!params.input_suffix.empty()) {
298
            LOG_INF("Input suffix: '%s'\n", params.input_suffix.c_str());
299
        }
300
    }
301
    smpl = gpt_sampler_init(model, sparams);
302

303
    LOG_INF("sampler seed: %u\n",     gpt_sampler_get_seed(smpl));
304
    LOG_INF("sampler params: \n%s\n", sparams.print().c_str());
305
    LOG_INF("sampler chain: %s\n",    gpt_sampler_print(smpl).c_str());
306

307
    LOG_INF("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
308

309
    LOG("\n");
310
    LOG("\n#####  Infill mode  #####\n\n");
311
    if (params.interactive) {
312
        const char *control_message;
313
        if (params.multiline_input) {
314
            control_message = " - To return control to LLaMA, end your input with '\\'.\n"
315
                              " - To return control without starting a new line, end your input with '/'.\n";
316
        } else {
317
            control_message = " - Press Return to return control to LLaMA.\n"
318
                              " - To return control without starting a new line, end your input with '/'.\n"
319
                              " - If you want to submit another line, end your input with '\\'.\n";
320
        }
321
        LOG("== Running in interactive mode. ==\n");
322
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
323
        LOG(       " - Press Ctrl+C to interject at any time.\n");
324
#endif
325
        LOG(       "%s\n", control_message);
326

327
        is_interacting = params.interactive_first;
328
    }
329

330
    bool input_echo = true;
331

332
    int n_past     = 0;
333
    int n_remain   = params.n_predict;
334
    int n_consumed = 0;
335

336
    std::vector<int>   input_tokens;  g_input_tokens  = &input_tokens;
337
    std::vector<int>   output_tokens; g_output_tokens = &output_tokens;
338
    std::ostringstream output_ss;     g_output_ss     = &output_ss;
339

340
    // the first thing we will do is to output the prompt, so set color accordingly
341
    console::set_display(console::prompt);
342

343
    std::vector<llama_token> embd;
344

345
    while (n_remain != 0 || params.interactive) {
346
        // predict
347
        if (!embd.empty()) {
348
            // Note: n_ctx - 4 here is to match the logic for commandline prompt handling via
349
            // --prompt or --file which uses the same value.
350
            int max_embd_size = n_ctx - 4;
351

352
            // Ensure the input doesn't exceed the context size by truncating embd if necessary.
353
            if ((int) embd.size() > max_embd_size) {
354
                const int skipped_tokens = (int) embd.size() - max_embd_size;
355
                embd.resize(max_embd_size);
356

357
                console::set_display(console::error);
358
                LOG_WRN("<<input too long: skipped %d token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
359
                console::set_display(console::reset);
360
            }
361

362
            // infinite text generation via context swapping
363
            // if we run out of context:
364
            // - take the n_keep first tokens from the original prompt (via n_past)
365
            // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
366
            if (n_past + (int) embd.size() > n_ctx) {
367
                if (params.n_predict == -2) {
368
                    LOG_DBG("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
369
                    break;
370
                }
371

372
                const int n_left    = n_past - params.n_keep - 1;
373
                const int n_discard = n_left/2;
374

375
                LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
376
                    n_past, n_left, n_ctx, params.n_keep, n_discard);
377

378
                llama_kv_cache_seq_rm (ctx, 0, params.n_keep + 1            , params.n_keep + n_discard + 1);
379
                llama_kv_cache_seq_add(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard);
380

381
                n_past -= n_discard;
382

383
                LOG_DBG("after swap: n_past = %d\n", n_past);
384

385
                LOG_DBG("embd: %s\n", string_from(ctx, embd).c_str());
386

387
            }
388

389
            // evaluate tokens in batches
390
            // embd is typically prepared beforehand to fit within a batch, but not always
391
            for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
392
                int n_eval = (int) embd.size() - i;
393
                if (n_eval > params.n_batch) {
394
                    n_eval = params.n_batch;
395
                }
396

397
                LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
398

399
                if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
400
                    LOG_ERR("%s : failed to eval\n", __func__);
401
                    return 1;
402
                }
403

404
                n_past += n_eval;
405

406
                LOG_DBG("n_past = %d\n", n_past);
407
            }
408

409
        }
410

411
        embd.clear();
412

413
        if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
414
            const llama_token id = gpt_sampler_sample(smpl, ctx, -1);
415

416
            gpt_sampler_accept(smpl, id, true);
417

418
            // LOG_DBG("last: %s\n", string_from(ctx, smpl->prev.to_vector()).c_str());
419

420
            embd.push_back(id);
421

422
            // echo this to console
423
            input_echo = true;
424

425
            // decrement remaining sampling budget
426
            --n_remain;
427

428
            LOG_DBG("n_remain: %d\n", n_remain);
429
        } else {
430
            // some user input remains from prompt or interaction, forward it to processing
431
            LOG_DBG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
432
            while ((int) embd_inp.size() > n_consumed) {
433
                embd.push_back(embd_inp[n_consumed]);
434

435
                // push the prompt in the sampling context in order to apply repetition penalties later
436
                // for the prompt, we don't apply grammar rules
437
                gpt_sampler_accept(smpl, embd_inp[n_consumed], false);
438

439
                ++n_consumed;
440
                if ((int) embd.size() >= params.n_batch) {
441
                    break;
442
                }
443
            }
444
        }
445

446
        // display text
447
        if (input_echo) {
448
            for (auto id : embd) {
449
                const std::string token_str = llama_token_to_piece(ctx, id);
450
                LOG("%s", token_str.c_str());
451

452
                if (embd.size() > 1) {
453
                    input_tokens.push_back(id);
454
                } else {
455
                    output_tokens.push_back(id);
456
                    output_ss << token_str;
457
                }
458
            }
459
        }
460
        // reset color to default if we there is no pending user input
461
        if (input_echo && (int) embd_inp.size() == n_consumed) {
462
            console::set_display(console::reset);
463
        }
464

465
        // if not currently processing queued inputs;
466
        if ((int) embd_inp.size() <= n_consumed) {
467
            // deal with eot token in infill mode
468
            if ((gpt_sampler_last(smpl) == llama_token_eot(model) || is_interacting) && params.interactive){
469
                if (is_interacting && !params.interactive_first) {
470
                    // print an eot token
471
                    LOG("%s", llama_token_to_piece(ctx, llama_token_eot(model)).c_str());
472
                }
473
                LOG("\n");
474
                console::set_display(console::user_input);
475
                std::string buffer;
476
                std::string line;
477
                bool another_line=true;
478
                // set a new prefix via stdin
479
                do {
480
                    another_line = console::readline(line, params.multiline_input);
481
                    buffer += line;
482
                } while (another_line);
483
                // check if we got an empty line, if so we use the old input
484
                if (!buffer.empty() && !(buffer.length() == 1 && buffer[0] == '\n')) {
485
                    params.input_prefix = buffer;
486
                }
487
                buffer.clear();
488
                // set a new suffix via stdin
489
                do {
490
                    another_line = console::readline(line, params.multiline_input);
491
                    buffer += line;
492
                } while (another_line);
493
                // check if we got an empty line
494
                if (!buffer.empty() && !(buffer.length() == 1 && buffer[0] == '\n')) {
495
                    params.input_suffix = buffer;
496
                }
497
                buffer.clear();
498
                // done taking input, reset color
499
                console::set_display(console::reset);
500

501
                if (params.escape) {
502
                    //process escape sequences, for the initial prompt this is done in common.cpp when we load the params, but for the interactive mode we need to do it here
503
                    string_process_escapes(params.input_prefix);
504
                    string_process_escapes(params.input_suffix);
505
                }
506

507
                // tokenize new prefix and suffix
508
                std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
509
                std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
510

511
                inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
512
                inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
513

514
                embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
515
                embd_end = params.spm_infill ? inp_pfx : inp_sfx;
516
                if (add_bos) {
517
                    embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
518
                }
519
                embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
520

521
                if (middle_token >= 0) {
522
                    embd_inp.push_back(middle_token);
523
                }
524

525
                embd.clear();
526
                n_remain = params.n_predict;
527
                n_past = 0;
528
                n_consumed = 0;
529
                is_interacting = false;
530
            }
531
            // deal with end of generation tokens in interactive mode
532
            else if (llama_token_is_eog(model, gpt_sampler_last(smpl))) {
533
                LOG_DBG("found EOS token\n");
534

535
                if (params.interactive) {
536

537
                    is_interacting = true;
538
                    LOG("\n");
539
                    console::set_display(console::user_input);
540
               }
541
            }
542

543
            if (n_past > 0 && is_interacting && !params.interactive) {
544
                LOG_DBG("waiting for user input\n");
545

546
                if (params.input_prefix_bos) {
547
                    LOG_DBG("adding input prefix BOS token\n");
548
                    embd_inp.push_back(llama_token_bos(model));
549
                }
550

551
                std::string buffer;
552
                if (!params.input_prefix.empty()) {
553
                    LOG_DBG("appending input prefix: '%s'\n", params.input_prefix.c_str());
554
                    buffer += params.input_prefix;
555
                    LOG("%s", buffer.c_str());
556
                }
557

558
                std::string line;
559
                bool another_line = true;
560
                do {
561
                    another_line = console::readline(line, params.multiline_input);
562
                    buffer += line;
563
                } while (another_line);
564

565
                // done taking input, reset color
566
                console::set_display(console::reset);
567

568
                // Add tokens to embd only if the input buffer is non-empty
569
                // Entering a empty line lets the user pass control back
570
                if (buffer.length() > 1) {
571
                    // append input suffix if any
572
                    if (!params.input_suffix.empty()) {
573
                        LOG_DBG("appending input suffix: '%s'\n", params.input_suffix.c_str());
574
                        buffer += params.input_suffix;
575
                        LOG("%s", params.input_suffix.c_str());
576
                    }
577

578
                    LOG_DBG("buffer: '%s'\n", buffer.c_str());
579

580
                    const size_t original_size = embd_inp.size();
581

582
                    const auto line_inp = ::llama_tokenize(ctx, buffer, false);
583
                    LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
584

585
                    embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
586

587
                    for (size_t i = original_size; i < embd_inp.size(); ++i) {
588
                        const llama_token token = embd_inp[i];
589
                        output_tokens.push_back(token);
590
                        output_ss << llama_token_to_piece(ctx, token);
591
                    }
592

593
                    n_remain -= line_inp.size();
594
                    LOG_DBG("n_remain: %d\n", n_remain);
595
                } else {
596
                    LOG_DBG("empty line, passing control back\n");
597
                }
598

599
                input_echo = false; // do not echo this again
600
            }
601

602
            if (n_past > 0) {
603
                if (is_interacting) {
604
                    gpt_sampler_reset(smpl);
605
                }
606
                is_interacting = false;
607
            }
608
        }
609

610
        // end of generation
611
        if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !params.interactive) {
612
            break;
613
        }
614

615
        // In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
616
        // We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size).
617
        if (params.interactive && n_remain <= 0 && params.n_predict >= 0) {
618
            n_remain = params.n_predict;
619
            is_interacting = true;
620
        }
621
    }
622
    if (!params.interactive && n_remain <= 0) {
623
        LOG("%s", llama_token_to_piece(ctx, llama_token_eot(model)).c_str());
624
    }
625

626
    LOG("\n");
627
    gpt_perf_print(ctx, smpl);
628
    write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
629

630
    llama_free(ctx);
631
    llama_free_model(model);
632

633
    gpt_sampler_free(smpl);
634
    llama_backend_free();
635

636
    return 0;
637
}
638

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.