4
#include "ngram-cache.h"
15
int main(int argc, char ** argv){
18
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
24
const int n_draft = params.n_draft;
28
llama_numa_init(params.numa);
31
llama_init_result llama_init = llama_init_from_gpt_params(params);
33
llama_model * model = llama_init.model;
34
llama_context * ctx = llama_init.context;
36
// tokenize the prompt
37
std::vector<llama_token> inp;
38
inp = ::llama_tokenize(ctx, params.prompt, true, true);
40
llama_ngram_cache ngram_cache_context;
41
llama_ngram_cache ngram_cache_dynamic;
42
llama_ngram_cache ngram_cache_static;
43
int64_t t_draft_flat_us = 0;
44
int64_t t_draft_us = 0;
47
const int64_t t_start_draft_us = ggml_time_us();
49
if (!params.lookup_cache_static.empty()) {
51
ngram_cache_static = llama_ngram_cache_load(params.lookup_cache_static);
52
} catch (std::ifstream::failure const &) {
53
LOG_ERR("failed to open static lookup cache: %s", params.lookup_cache_static.c_str());
58
if (!params.lookup_cache_dynamic.empty()) {
60
ngram_cache_dynamic = llama_ngram_cache_load(params.lookup_cache_dynamic);
61
} catch (std::ifstream::failure const &) {} // if the file does not exist it will simply be created at the end of the program
64
t_draft_flat_us += ggml_time_us() - t_start_draft_us;
67
const int n_input = inp.size();
68
const int n_ctx = llama_n_ctx(ctx);
73
const int64_t t_start_ms = ggml_time_ms();
75
// Iterate over input tokens in chunks of size n_ctx.
76
// Each chunk is treated as if a sequential generation but with pre-determined tokens to ensure reproducibility.
77
for (int i_start = 0; i_start + n_ctx < n_input; i_start += n_ctx) {
78
const std::vector<llama_token> inp_slice(inp.begin() + i_start, inp.begin() + i_start + n_ctx);
79
std::vector<llama_token> pseudo_output;
80
pseudo_output.push_back(inp_slice[0]);
82
while ((int) pseudo_output.size() < n_ctx) {
83
// Simulate drafting and decoding from draft:
84
std::vector<llama_token> draft;
85
draft.push_back(pseudo_output.back());
88
const int64_t t_start_draft_us = ggml_time_us();
89
llama_ngram_cache_draft(pseudo_output, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
90
t_draft_us += ggml_time_us() - t_start_draft_us;
93
n_drafted += draft.size() - 1;
95
for (size_t j = 1; j < draft.size() && (int) pseudo_output.size() < n_ctx; ++j) {
96
const llama_token ground_truth = inp_slice[pseudo_output.size()];
97
const llama_token drafted = draft[j];
99
if (ground_truth != drafted) {
104
pseudo_output.push_back(ground_truth);
107
const int64_t t_start_draft_us = ggml_time_us();
108
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
109
t_draft_us += ggml_time_us() - t_start_draft_us;
113
// After each simulated batch decoding simulate the sampling of a single token:
114
if ((int) pseudo_output.size() < n_ctx) {
115
pseudo_output.push_back(inp_slice[pseudo_output.size()]);
117
const int64_t t_start_draft_us = ggml_time_us();
118
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
119
t_draft_us += ggml_time_us() - t_start_draft_us;
123
draft.erase(draft.begin());
126
if (i_start > 0 && i_start / 100000 != (i_start - n_ctx) / 100000) {
127
const int64_t t_now_ms = ggml_time_ms();
128
const int64_t eta_ms = (n_input - i_start) * (t_now_ms - t_start_ms) / i_start;
129
const int64_t eta_min = eta_ms / (60*1000);
130
const int64_t eta_s = (eta_ms - 60*1000*eta_min) / 1000;
132
LOG_INF("lookup-stats: %d/%d done, ETA: %02" PRId64 ":%02" PRId64 "\n", i_start, n_input, eta_min, eta_s);
135
// After each chunk, update the dynamic ngram cache with the context ngram cache:
136
llama_ngram_cache_merge(ngram_cache_dynamic, ngram_cache_context);
137
ngram_cache_context.clear();
143
LOG_INF("n_draft = %d\n", n_draft);
144
LOG_INF("n_predict = %d\n", n_input - n_input % n_ctx);
145
LOG_INF("n_drafted = %d\n", n_drafted);
146
LOG_INF("t_draft_flat = %.2f ms\n", t_draft_flat_us*1e-3);
147
LOG_INF("t_draft = %.2f ms, %.2f us per token, %.2f tokens per second\n",
148
t_draft_us*1e-3, 1.0f*t_draft_us/n_drafted, n_drafted/(1e-6*t_draft_us));
149
LOG_INF("n_accept = %d\n", n_accept);
150
LOG_INF("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
153
llama_free_model(model);
155
llama_backend_free();