llama
1662 строки · 58.7 Кб
1#include <algorithm>
2#include <array>
3#include <cassert>
4#include <chrono>
5#include <cinttypes>
6#include <clocale>
7#include <cmath>
8#include <cstdio>
9#include <cstring>
10#include <ctime>
11#include <cstdlib>
12#include <iterator>
13#include <map>
14#include <numeric>
15#include <regex>
16#include <sstream>
17#include <string>
18#include <vector>
19#include <thread>
20
21#include "ggml.h"
22#include "llama.h"
23#include "common.h"
24#include "ggml-cuda.h"
25#include "ggml-sycl.h"
26
27#ifdef GGML_USE_CANN
28#include "ggml-cann.h"
29#endif
30
31#ifdef _WIN32
32#define WIN32_LEAN_AND_MEAN
33#ifndef NOMINMAX
34# define NOMINMAX
35#endif
36#include <windows.h>
37#endif
38
39// utils
40static uint64_t get_time_ns() {
41using clock = std::chrono::high_resolution_clock;
42return std::chrono::nanoseconds(clock::now().time_since_epoch()).count();
43}
44
45template<class T>
46static std::string join(const std::vector<T> & values, const std::string & delim) {
47std::ostringstream str;
48for (size_t i = 0; i < values.size(); i++) {
49str << values[i];
50if (i < values.size() - 1) {
51str << delim;
52}
53}
54return str.str();
55}
56
57template<typename T, typename F>
58static std::vector<std::string> transform_to_str(const std::vector<T> & values, F f) {
59std::vector<std::string> str_values;
60std::transform(values.begin(), values.end(), std::back_inserter(str_values), f);
61return str_values;
62}
63
64template<typename T>
65static T avg(const std::vector<T> & v) {
66if (v.empty()) {
67return 0;
68}
69T sum = std::accumulate(v.begin(), v.end(), T(0));
70return sum / (T)v.size();
71}
72
73template<typename T>
74static T stdev(const std::vector<T> & v) {
75if (v.size() <= 1) {
76return 0;
77}
78T mean = avg(v);
79T sq_sum = std::inner_product(v.begin(), v.end(), v.begin(), T(0));
80T stdev = std::sqrt(sq_sum / (T)(v.size() - 1) - mean * mean * (T)v.size() / (T)(v.size() - 1));
81return stdev;
82}
83
84static std::string get_cpu_info() {
85std::string id;
86#ifdef __linux__
87FILE * f = fopen("/proc/cpuinfo", "r");
88if (f) {
89char buf[1024];
90while (fgets(buf, sizeof(buf), f)) {
91if (strncmp(buf, "model name", 10) == 0) {
92char * p = strchr(buf, ':');
93if (p) {
94p++;
95while (std::isspace(*p)) {
96p++;
97}
98while (std::isspace(p[strlen(p) - 1])) {
99p[strlen(p) - 1] = '\0';
100}
101id = p;
102break;
103}
104}
105}
106fclose(f);
107}
108#elif defined(_WIN32)
109HKEY hKey;
110if (RegOpenKeyEx(HKEY_LOCAL_MACHINE,
111TEXT("HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0"),
1120,
113KEY_READ,
114&hKey) != ERROR_SUCCESS) {
115// fail to open registry key
116return "";
117}
118char cpu_brand[256];
119DWORD cpu_brand_size = sizeof(cpu_brand);
120if (RegQueryValueExA(hKey,
121TEXT("ProcessorNameString"),
122NULL,
123NULL,
124(LPBYTE)cpu_brand,
125&cpu_brand_size) == ERROR_SUCCESS) {
126id.assign(cpu_brand, cpu_brand_size);
127if (id.find('\0') != std::string::npos) {
128id.resize(id.find('\0'));
129}
130}
131RegCloseKey(hKey);
132#endif
133// TODO: other platforms
134return id;
135}
136
137static std::string get_gpu_info() {
138std::string id;
139#ifdef GGML_USE_CUDA
140int count = ggml_backend_cuda_get_device_count();
141for (int i = 0; i < count; i++) {
142char buf[128];
143ggml_backend_cuda_get_device_description(i, buf, sizeof(buf));
144id += buf;
145if (i < count - 1) {
146id += "/";
147}
148}
149#endif
150#ifdef GGML_USE_SYCL
151int count = ggml_backend_sycl_get_device_count();
152for (int i = 0; i < count; i++) {
153char buf[128];
154ggml_sycl_get_device_description(i, buf, sizeof(buf));
155id += buf;
156if (i < count - 1) {
157id += "/";
158}
159}
160#endif
161#ifdef GGML_USE_CANN
162uint32_t count = ggml_backend_cann_get_device_count();
163for (uint32_t i = 0; i < count; i++) {
164char buf[128];
165ggml_backend_cann_get_device_description(i, buf, sizeof(buf));
166id += buf;
167if (i < count - 1) {
168id += "/";
169}
170}
171#endif
172// TODO: other backends
173return id;
174}
175
176// command line params
177enum output_formats {NONE, CSV, JSON, JSONL, MARKDOWN, SQL};
178
179static const char * output_format_str(output_formats format) {
180switch (format) {
181case NONE: return "none";
182case CSV: return "csv";
183case JSON: return "json";
184case JSONL: return "jsonl";
185case MARKDOWN: return "md";
186case SQL: return "sql";
187default: GGML_ABORT("invalid output format");
188}
189}
190
191static bool output_format_from_str(const std::string & s, output_formats & format) {
192if (s == "none") {
193format = NONE;
194} else if (s == "csv") {
195format = CSV;
196} else if (s == "json") {
197format = JSON;
198} else if (s == "jsonl") {
199format = JSONL;
200} else if (s == "md") {
201format = MARKDOWN;
202} else if (s == "sql") {
203format = SQL;
204} else {
205return false;
206}
207return true;
208}
209
210static const char * split_mode_str(llama_split_mode mode) {
211switch (mode) {
212case LLAMA_SPLIT_MODE_NONE: return "none";
213case LLAMA_SPLIT_MODE_LAYER: return "layer";
214case LLAMA_SPLIT_MODE_ROW: return "row";
215default: GGML_ABORT("invalid split mode");
216}
217}
218
219static std::string pair_str(const std::pair<int, int> & p) {
220static char buf[32];
221snprintf(buf, sizeof(buf), "%d,%d", p.first, p.second);
222return buf;
223}
224
225struct cmd_params {
226std::vector<std::string> model;
227std::vector<int> n_prompt;
228std::vector<int> n_gen;
229std::vector<std::pair<int, int>> n_pg;
230std::vector<int> n_batch;
231std::vector<int> n_ubatch;
232std::vector<ggml_type> type_k;
233std::vector<ggml_type> type_v;
234std::vector<int> n_threads;
235std::vector<std::string> cpu_mask;
236std::vector<bool> cpu_strict;
237std::vector<int> poll;
238std::vector<int> n_gpu_layers;
239std::vector<std::string> rpc_servers;
240std::vector<llama_split_mode> split_mode;
241std::vector<int> main_gpu;
242std::vector<bool> no_kv_offload;
243std::vector<bool> flash_attn;
244std::vector<std::vector<float>> tensor_split;
245std::vector<bool> use_mmap;
246std::vector<bool> embeddings;
247ggml_numa_strategy numa;
248int reps;
249ggml_sched_priority prio;
250int delay;
251bool verbose;
252bool progress;
253output_formats output_format;
254output_formats output_format_stderr;
255};
256
257static const cmd_params cmd_params_defaults = {
258/* model */ {"models/7B/ggml-model-q4_0.gguf"},
259/* n_prompt */ {512},
260/* n_gen */ {128},
261/* n_pg */ {},
262/* n_batch */ {2048},
263/* n_ubatch */ {512},
264/* type_k */ {GGML_TYPE_F16},
265/* type_v */ {GGML_TYPE_F16},
266/* n_threads */ {cpu_get_num_math()},
267/* cpu_mask */ {"0x0"},
268/* cpu_strict */ {false},
269/* poll */ {50},
270/* n_gpu_layers */ {99},
271/* rpc_servers */ {""},
272/* split_mode */ {LLAMA_SPLIT_MODE_LAYER},
273/* main_gpu */ {0},
274/* no_kv_offload */ {false},
275/* flash_attn */ {false},
276/* tensor_split */ {std::vector<float>(llama_max_devices(), 0.0f)},
277/* use_mmap */ {true},
278/* embeddings */ {false},
279/* numa */ GGML_NUMA_STRATEGY_DISABLED,
280/* reps */ 5,
281/* prio */ GGML_SCHED_PRIO_NORMAL,
282/* delay */ 0,
283/* verbose */ false,
284/* progress */ false,
285/* output_format */ MARKDOWN,
286/* output_format_stderr */ NONE,
287};
288
289static void print_usage(int /* argc */, char ** argv) {
290printf("usage: %s [options]\n", argv[0]);
291printf("\n");
292printf("options:\n");
293printf(" -h, --help\n");
294printf(" -m, --model <filename> (default: %s)\n", join(cmd_params_defaults.model, ",").c_str());
295printf(" -p, --n-prompt <n> (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str());
296printf(" -n, --n-gen <n> (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str());
297printf(" -pg <pp,tg> (default: %s)\n", join(transform_to_str(cmd_params_defaults.n_pg, pair_str), ",").c_str());
298printf(" -b, --batch-size <n> (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str());
299printf(" -ub, --ubatch-size <n> (default: %s)\n", join(cmd_params_defaults.n_ubatch, ",").c_str());
300printf(" -ctk, --cache-type-k <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
301printf(" -ctv, --cache-type-v <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
302printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
303printf(" -C, --cpu-mask <hex,hex> (default: %s)\n", join(cmd_params_defaults.cpu_mask, ",").c_str());
304printf(" --cpu-strict <0|1> (default: %s)\n", join(cmd_params_defaults.cpu_strict, ",").c_str());
305printf(" --poll <0...100> (default: %s)\n", join(cmd_params_defaults.poll, ",").c_str());
306printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
307#ifdef GGML_USE_RPC
308printf(" -rpc, --rpc <rpc_servers> (default: %s)\n", join(cmd_params_defaults.rpc_servers, ",").c_str());
309#endif
310printf(" -sm, --split-mode <none|layer|row> (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
311printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
312printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
313printf(" -fa, --flash-attn <0|1> (default: %s)\n", join(cmd_params_defaults.flash_attn, ",").c_str());
314printf(" -mmp, --mmap <0|1> (default: %s)\n", join(cmd_params_defaults.use_mmap, ",").c_str());
315printf(" --numa <distribute|isolate|numactl> (default: disabled)\n");
316printf(" -embd, --embeddings <0|1> (default: %s)\n", join(cmd_params_defaults.embeddings, ",").c_str());
317printf(" -ts, --tensor-split <ts0/ts1/..> (default: 0)\n");
318printf(" -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps);
319printf(" --prio <0|1|2|3> (default: %d)\n", cmd_params_defaults.prio);
320printf(" --delay <0...N> (seconds) (default: %d)\n", cmd_params_defaults.delay);
321printf(" -o, --output <csv|json|jsonl|md|sql> (default: %s)\n", output_format_str(cmd_params_defaults.output_format));
322printf(" -oe, --output-err <csv|json|jsonl|md|sql> (default: %s)\n", output_format_str(cmd_params_defaults.output_format_stderr));
323printf(" -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0");
324printf(" --progress (default: %s)\n", cmd_params_defaults.progress ? "1" : "0");
325printf("\n");
326printf("Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n");
327}
328
329static ggml_type ggml_type_from_name(const std::string & s) {
330if (s == "f16") {
331return GGML_TYPE_F16;
332}
333if (s == "q8_0") {
334return GGML_TYPE_Q8_0;
335}
336if (s == "q4_0") {
337return GGML_TYPE_Q4_0;
338}
339if (s == "q4_1") {
340return GGML_TYPE_Q4_1;
341}
342if (s == "q5_0") {
343return GGML_TYPE_Q5_0;
344}
345if (s == "q5_1") {
346return GGML_TYPE_Q5_1;
347}
348if (s == "iq4_nl") {
349return GGML_TYPE_IQ4_NL;
350}
351
352return GGML_TYPE_COUNT;
353}
354
355
356static cmd_params parse_cmd_params(int argc, char ** argv) {
357cmd_params params;
358std::string arg;
359bool invalid_param = false;
360const std::string arg_prefix = "--";
361const char split_delim = ',';
362
363params.verbose = cmd_params_defaults.verbose;
364params.output_format = cmd_params_defaults.output_format;
365params.output_format_stderr = cmd_params_defaults.output_format_stderr;
366params.reps = cmd_params_defaults.reps;
367params.numa = cmd_params_defaults.numa;
368params.prio = cmd_params_defaults.prio;
369params.delay = cmd_params_defaults.delay;
370params.progress = cmd_params_defaults.progress;
371
372for (int i = 1; i < argc; i++) {
373arg = argv[i];
374if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
375std::replace(arg.begin(), arg.end(), '_', '-');
376}
377
378if (arg == "-h" || arg == "--help") {
379print_usage(argc, argv);
380exit(0);
381} else if (arg == "-m" || arg == "--model") {
382if (++i >= argc) {
383invalid_param = true;
384break;
385}
386auto p = string_split<std::string>(argv[i], split_delim);
387params.model.insert(params.model.end(), p.begin(), p.end());
388} else if (arg == "-p" || arg == "--n-prompt") {
389if (++i >= argc) {
390invalid_param = true;
391break;
392}
393auto p = string_split<int>(argv[i], split_delim);
394params.n_prompt.insert(params.n_prompt.end(), p.begin(), p.end());
395} else if (arg == "-n" || arg == "--n-gen") {
396if (++i >= argc) {
397invalid_param = true;
398break;
399}
400auto p = string_split<int>(argv[i], split_delim);
401params.n_gen.insert(params.n_gen.end(), p.begin(), p.end());
402} else if (arg == "-pg") {
403if (++i >= argc) {
404invalid_param = true;
405break;
406}
407auto p = string_split<std::string>(argv[i], ',');
408if (p.size() != 2) {
409invalid_param = true;
410break;
411}
412params.n_pg.push_back({std::stoi(p[0]), std::stoi(p[1])});
413} else if (arg == "-b" || arg == "--batch-size") {
414if (++i >= argc) {
415invalid_param = true;
416break;
417}
418auto p = string_split<int>(argv[i], split_delim);
419params.n_batch.insert(params.n_batch.end(), p.begin(), p.end());
420} else if (arg == "-ub" || arg == "--ubatch-size") {
421if (++i >= argc) {
422invalid_param = true;
423break;
424}
425auto p = string_split<int>(argv[i], split_delim);
426params.n_ubatch.insert(params.n_ubatch.end(), p.begin(), p.end());
427} else if (arg == "-ctk" || arg == "--cache-type-k") {
428if (++i >= argc) {
429invalid_param = true;
430break;
431}
432auto p = string_split<std::string>(argv[i], split_delim);
433std::vector<ggml_type> types;
434for (const auto & t : p) {
435ggml_type gt = ggml_type_from_name(t);
436if (gt == GGML_TYPE_COUNT) {
437invalid_param = true;
438break;
439}
440types.push_back(gt);
441}
442if (invalid_param) {
443break;
444}
445params.type_k.insert(params.type_k.end(), types.begin(), types.end());
446} else if (arg == "-ctv" || arg == "--cache-type-v") {
447if (++i >= argc) {
448invalid_param = true;
449break;
450}
451auto p = string_split<std::string>(argv[i], split_delim);
452std::vector<ggml_type> types;
453for (const auto & t : p) {
454ggml_type gt = ggml_type_from_name(t);
455if (gt == GGML_TYPE_COUNT) {
456invalid_param = true;
457break;
458}
459types.push_back(gt);
460}
461if (invalid_param) {
462break;
463}
464params.type_v.insert(params.type_v.end(), types.begin(), types.end());
465} else if (arg == "-t" || arg == "--threads") {
466if (++i >= argc) {
467invalid_param = true;
468break;
469}
470auto p = string_split<int>(argv[i], split_delim);
471params.n_threads.insert(params.n_threads.end(), p.begin(), p.end());
472} else if (arg == "-C" || arg == "--cpu-mask") {
473if (++i >= argc) {
474invalid_param = true;
475break;
476}
477auto p = string_split<std::string>(argv[i], split_delim);
478params.cpu_mask.insert(params.cpu_mask.end(), p.begin(), p.end());
479} else if (arg == "--cpu-strict") {
480if (++i >= argc) {
481invalid_param = true;
482break;
483}
484auto p = string_split<bool>(argv[i], split_delim);
485params.cpu_strict.insert(params.cpu_strict.end(), p.begin(), p.end());
486} else if (arg == "--poll") {
487if (++i >= argc) {
488invalid_param = true;
489break;
490}
491auto p = string_split<int>(argv[i], split_delim);
492params.poll.insert(params.poll.end(), p.begin(), p.end());
493} else if (arg == "-ngl" || arg == "--n-gpu-layers") {
494if (++i >= argc) {
495invalid_param = true;
496break;
497}
498auto p = string_split<int>(argv[i], split_delim);
499params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end());
500#ifdef GGML_USE_RPC
501} else if (arg == "-rpc" || arg == "--rpc") {
502if (++i >= argc) {
503invalid_param = true;
504break;
505}
506params.rpc_servers.push_back(argv[i]);
507#endif
508} else if (arg == "-sm" || arg == "--split-mode") {
509if (++i >= argc) {
510invalid_param = true;
511break;
512}
513auto p = string_split<std::string>(argv[i], split_delim);
514std::vector<llama_split_mode> modes;
515for (const auto & m : p) {
516llama_split_mode mode;
517if (m == "none") {
518mode = LLAMA_SPLIT_MODE_NONE;
519} else if (m == "layer") {
520mode = LLAMA_SPLIT_MODE_LAYER;
521} else if (m == "row") {
522mode = LLAMA_SPLIT_MODE_ROW;
523} else {
524invalid_param = true;
525break;
526}
527modes.push_back(mode);
528}
529if (invalid_param) {
530break;
531}
532params.split_mode.insert(params.split_mode.end(), modes.begin(), modes.end());
533} else if (arg == "-mg" || arg == "--main-gpu") {
534if (++i >= argc) {
535invalid_param = true;
536break;
537}
538params.main_gpu = string_split<int>(argv[i], split_delim);
539} else if (arg == "-nkvo" || arg == "--no-kv-offload") {
540if (++i >= argc) {
541invalid_param = true;
542break;
543}
544auto p = string_split<bool>(argv[i], split_delim);
545params.no_kv_offload.insert(params.no_kv_offload.end(), p.begin(), p.end());
546} else if (arg == "--numa") {
547if (++i >= argc) {
548invalid_param = true;
549break;
550} else {
551std::string value(argv[i]);
552/**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
553else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
554else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
555else { invalid_param = true; break; }
556}
557} else if (arg == "-fa" || arg == "--flash-attn") {
558if (++i >= argc) {
559invalid_param = true;
560break;
561}
562auto p = string_split<bool>(argv[i], split_delim);
563params.flash_attn.insert(params.flash_attn.end(), p.begin(), p.end());
564} else if (arg == "-mmp" || arg == "--mmap") {
565if (++i >= argc) {
566invalid_param = true;
567break;
568}
569auto p = string_split<bool>(argv[i], split_delim);
570params.use_mmap.insert(params.use_mmap.end(), p.begin(), p.end());
571} else if (arg == "-embd" || arg == "--embeddings") {
572if (++i >= argc) {
573invalid_param = true;
574break;
575}
576auto p = string_split<bool>(argv[i], split_delim);
577params.embeddings.insert(params.embeddings.end(), p.begin(), p.end());
578} else if (arg == "-ts" || arg == "--tensor-split") {
579if (++i >= argc) {
580invalid_param = true;
581break;
582}
583for (auto ts : string_split<std::string>(argv[i], split_delim)) {
584// split string by ; and /
585const std::regex regex{R"([;/]+)"};
586std::sregex_token_iterator it{ts.begin(), ts.end(), regex, -1};
587std::vector<std::string> split_arg{it, {}};
588GGML_ASSERT(split_arg.size() <= llama_max_devices());
589
590std::vector<float> tensor_split(llama_max_devices());
591for (size_t i = 0; i < llama_max_devices(); ++i) {
592if (i < split_arg.size()) {
593tensor_split[i] = std::stof(split_arg[i]);
594} else {
595tensor_split[i] = 0.0f;
596}
597}
598params.tensor_split.push_back(tensor_split);
599}
600} else if (arg == "-r" || arg == "--repetitions") {
601if (++i >= argc) {
602invalid_param = true;
603break;
604}
605params.reps = std::stoi(argv[i]);
606} else if (arg == "--prio") {
607if (++i >= argc) {
608invalid_param = true;
609break;
610}
611params.prio = (enum ggml_sched_priority) std::stoi(argv[i]);
612} else if (arg == "--delay") {
613if (++i >= argc) {
614invalid_param = true;
615break;
616}
617params.delay = std::stoi(argv[i]);
618} else if (arg == "-o" || arg == "--output") {
619if (++i >= argc) {
620invalid_param = true;
621break;
622}
623invalid_param = !output_format_from_str(argv[i], params.output_format);
624} else if (arg == "-oe" || arg == "--output-err") {
625if (++i >= argc) {
626invalid_param = true;
627break;
628}
629invalid_param = !output_format_from_str(argv[i], params.output_format_stderr);
630} else if (arg == "-v" || arg == "--verbose") {
631params.verbose = true;
632} else if (arg == "--progress") {
633params.progress = true;
634} else {
635invalid_param = true;
636break;
637}
638}
639if (invalid_param) {
640fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
641print_usage(argc, argv);
642exit(1);
643}
644
645// set defaults
646if (params.model.empty()) { params.model = cmd_params_defaults.model; }
647if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; }
648if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; }
649if (params.n_pg.empty()) { params.n_pg = cmd_params_defaults.n_pg; }
650if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; }
651if (params.n_ubatch.empty()) { params.n_ubatch = cmd_params_defaults.n_ubatch; }
652if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
653if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; }
654if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; }
655if (params.rpc_servers.empty()) { params.rpc_servers = cmd_params_defaults.rpc_servers; }
656if (params.split_mode.empty()) { params.split_mode = cmd_params_defaults.split_mode; }
657if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; }
658if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; }
659if (params.flash_attn.empty()) { params.flash_attn = cmd_params_defaults.flash_attn; }
660if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; }
661if (params.use_mmap.empty()) { params.use_mmap = cmd_params_defaults.use_mmap; }
662if (params.embeddings.empty()) { params.embeddings = cmd_params_defaults.embeddings; }
663if (params.n_threads.empty()) { params.n_threads = cmd_params_defaults.n_threads; }
664if (params.cpu_mask.empty()) { params.cpu_mask = cmd_params_defaults.cpu_mask; }
665if (params.cpu_strict.empty()) { params.cpu_strict = cmd_params_defaults.cpu_strict; }
666if (params.poll.empty()) { params.poll = cmd_params_defaults.poll; }
667
668return params;
669}
670
671struct cmd_params_instance {
672std::string model;
673int n_prompt;
674int n_gen;
675int n_batch;
676int n_ubatch;
677ggml_type type_k;
678ggml_type type_v;
679int n_threads;
680std::string cpu_mask;
681bool cpu_strict;
682int poll;
683int n_gpu_layers;
684std::string rpc_servers;
685llama_split_mode split_mode;
686int main_gpu;
687bool no_kv_offload;
688bool flash_attn;
689std::vector<float> tensor_split;
690bool use_mmap;
691bool embeddings;
692
693llama_model_params to_llama_mparams() const {
694llama_model_params mparams = llama_model_default_params();
695
696mparams.n_gpu_layers = n_gpu_layers;
697if (!rpc_servers.empty()) {
698mparams.rpc_servers = rpc_servers.c_str();
699}
700mparams.split_mode = split_mode;
701mparams.main_gpu = main_gpu;
702mparams.tensor_split = tensor_split.data();
703mparams.use_mmap = use_mmap;
704
705return mparams;
706}
707
708bool equal_mparams(const cmd_params_instance & other) const {
709return model == other.model &&
710n_gpu_layers == other.n_gpu_layers &&
711rpc_servers == other.rpc_servers &&
712split_mode == other.split_mode &&
713main_gpu == other.main_gpu &&
714use_mmap == other.use_mmap &&
715tensor_split == other.tensor_split;
716}
717
718llama_context_params to_llama_cparams() const {
719llama_context_params cparams = llama_context_default_params();
720
721cparams.n_ctx = n_prompt + n_gen;
722cparams.n_batch = n_batch;
723cparams.n_ubatch = n_ubatch;
724cparams.type_k = type_k;
725cparams.type_v = type_v;
726cparams.offload_kqv = !no_kv_offload;
727cparams.flash_attn = flash_attn;
728cparams.embeddings = embeddings;
729
730return cparams;
731}
732};
733
734static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_params & params) {
735std::vector<cmd_params_instance> instances;
736
737// this ordering minimizes the number of times that each model needs to be reloaded
738for (const auto & m : params.model)
739for (const auto & nl : params.n_gpu_layers)
740for (const auto & rpc : params.rpc_servers)
741for (const auto & sm : params.split_mode)
742for (const auto & mg : params.main_gpu)
743for (const auto & ts : params.tensor_split)
744for (const auto & mmp : params.use_mmap)
745for (const auto & embd : params.embeddings)
746for (const auto & nb : params.n_batch)
747for (const auto & nub : params.n_ubatch)
748for (const auto & tk : params.type_k)
749for (const auto & tv : params.type_v)
750for (const auto & nkvo : params.no_kv_offload)
751for (const auto & fa : params.flash_attn)
752for (const auto & nt : params.n_threads)
753for (const auto & cm : params.cpu_mask)
754for (const auto & cs : params.cpu_strict)
755for (const auto & pl : params.poll) {
756for (const auto & n_prompt : params.n_prompt) {
757if (n_prompt == 0) {
758continue;
759}
760cmd_params_instance instance = {
761/* .model = */ m,
762/* .n_prompt = */ n_prompt,
763/* .n_gen = */ 0,
764/* .n_batch = */ nb,
765/* .n_ubatch = */ nub,
766/* .type_k = */ tk,
767/* .type_v = */ tv,
768/* .n_threads = */ nt,
769/* .cpu_mask = */ cm,
770/* .cpu_strict = */ cs,
771/* .poll = */ pl,
772/* .n_gpu_layers = */ nl,
773/* .rpc_servers = */ rpc,
774/* .split_mode = */ sm,
775/* .main_gpu = */ mg,
776/* .no_kv_offload= */ nkvo,
777/* .flash_attn = */ fa,
778/* .tensor_split = */ ts,
779/* .use_mmap = */ mmp,
780/* .embeddings = */ embd,
781};
782instances.push_back(instance);
783}
784
785for (const auto & n_gen : params.n_gen) {
786if (n_gen == 0) {
787continue;
788}
789cmd_params_instance instance = {
790/* .model = */ m,
791/* .n_prompt = */ 0,
792/* .n_gen = */ n_gen,
793/* .n_batch = */ nb,
794/* .n_ubatch = */ nub,
795/* .type_k = */ tk,
796/* .type_v = */ tv,
797/* .n_threads = */ nt,
798/* .cpu_mask = */ cm,
799/* .cpu_strict = */ cs,
800/* .poll = */ pl,
801/* .n_gpu_layers = */ nl,
802/* .rpc_servers = */ rpc,
803/* .split_mode = */ sm,
804/* .main_gpu = */ mg,
805/* .no_kv_offload= */ nkvo,
806/* .flash_attn = */ fa,
807/* .tensor_split = */ ts,
808/* .use_mmap = */ mmp,
809/* .embeddings = */ embd,
810};
811instances.push_back(instance);
812}
813
814for (const auto & n_pg : params.n_pg) {
815if (n_pg.first == 0 && n_pg.second == 0) {
816continue;
817}
818cmd_params_instance instance = {
819/* .model = */ m,
820/* .n_prompt = */ n_pg.first,
821/* .n_gen = */ n_pg.second,
822/* .n_batch = */ nb,
823/* .n_ubatch = */ nub,
824/* .type_k = */ tk,
825/* .type_v = */ tv,
826/* .n_threads = */ nt,
827/* .cpu_mask = */ cm,
828/* .cpu_strict = */ cs,
829/* .poll = */ pl,
830/* .n_gpu_layers = */ nl,
831/* .rpc_servers = */ rpc,
832/* .split_mode = */ sm,
833/* .main_gpu = */ mg,
834/* .no_kv_offload= */ nkvo,
835/* .flash_attn = */ fa,
836/* .tensor_split = */ ts,
837/* .use_mmap = */ mmp,
838/* .embeddings = */ embd,
839};
840instances.push_back(instance);
841}
842}
843
844return instances;
845}
846
847struct test {
848static const std::string build_commit;
849static const int build_number;
850static const bool cuda;
851static const bool vulkan;
852static const bool kompute;
853static const bool metal;
854static const bool sycl;
855static const bool gpu_blas;
856static const bool blas;
857static const std::string cpu_info;
858static const std::string gpu_info;
859std::string model_filename;
860std::string model_type;
861uint64_t model_size;
862uint64_t model_n_params;
863int n_batch;
864int n_ubatch;
865int n_threads;
866std::string cpu_mask;
867bool cpu_strict;
868int poll;
869bool has_rpc;
870ggml_type type_k;
871ggml_type type_v;
872int n_gpu_layers;
873llama_split_mode split_mode;
874int main_gpu;
875bool no_kv_offload;
876bool flash_attn;
877std::vector<float> tensor_split;
878bool use_mmap;
879bool embeddings;
880int n_prompt;
881int n_gen;
882std::string test_time;
883std::vector<uint64_t> samples_ns;
884
885test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) {
886model_filename = inst.model;
887char buf[128];
888llama_model_desc(lmodel, buf, sizeof(buf));
889model_type = buf;
890model_size = llama_model_size(lmodel);
891model_n_params = llama_model_n_params(lmodel);
892n_batch = inst.n_batch;
893n_ubatch = inst.n_ubatch;
894n_threads = inst.n_threads;
895cpu_mask = inst.cpu_mask;
896cpu_strict = inst.cpu_strict;
897poll = inst.poll;
898has_rpc = !inst.rpc_servers.empty();
899type_k = inst.type_k;
900type_v = inst.type_v;
901n_gpu_layers = inst.n_gpu_layers;
902split_mode = inst.split_mode;
903main_gpu = inst.main_gpu;
904no_kv_offload = inst.no_kv_offload;
905flash_attn = inst.flash_attn;
906tensor_split = inst.tensor_split;
907use_mmap = inst.use_mmap;
908embeddings = inst.embeddings;
909n_prompt = inst.n_prompt;
910n_gen = inst.n_gen;
911// RFC 3339 date-time format
912time_t t = time(NULL);
913std::strftime(buf, sizeof(buf), "%FT%TZ", gmtime(&t));
914test_time = buf;
915
916(void) ctx;
917}
918
919uint64_t avg_ns() const {
920return ::avg(samples_ns);
921}
922
923uint64_t stdev_ns() const {
924return ::stdev(samples_ns);
925}
926
927std::vector<double> get_ts() const {
928int n_tokens = n_prompt + n_gen;
929std::vector<double> ts;
930std::transform(samples_ns.begin(), samples_ns.end(), std::back_inserter(ts), [n_tokens](uint64_t t) { return 1e9 * n_tokens / t; });
931return ts;
932}
933
934double avg_ts() const {
935return ::avg(get_ts());
936}
937
938double stdev_ts() const {
939return ::stdev(get_ts());
940}
941
942static std::string get_backend() {
943if (cuda) {
944return GGML_CUDA_NAME;
945}
946if (vulkan) {
947return "Vulkan";
948}
949if (kompute) {
950return "Kompute";
951}
952if (metal) {
953return "Metal";
954}
955if (sycl) {
956return GGML_SYCL_NAME;
957}
958if (gpu_blas) {
959return "GPU BLAS";
960}
961if (blas) {
962return "BLAS";
963}
964
965return "CPU";
966}
967
968static const std::vector<std::string> & get_fields() {
969static const std::vector<std::string> fields = {
970"build_commit", "build_number",
971"cuda", "vulkan", "kompute", "metal", "sycl", "rpc", "gpu_blas", "blas",
972"cpu_info", "gpu_info",
973"model_filename", "model_type", "model_size", "model_n_params",
974"n_batch", "n_ubatch",
975"n_threads", "cpu_mask", "cpu_strict", "poll",
976"type_k", "type_v",
977"n_gpu_layers", "split_mode",
978"main_gpu", "no_kv_offload", "flash_attn",
979"tensor_split", "use_mmap", "embeddings",
980"n_prompt", "n_gen", "test_time",
981"avg_ns", "stddev_ns",
982"avg_ts", "stddev_ts",
983};
984return fields;
985}
986
987enum field_type {STRING, BOOL, INT, FLOAT};
988
989static field_type get_field_type(const std::string & field) {
990if (field == "build_number" || field == "n_batch" || field == "n_ubatch" ||
991field == "n_threads" || field == "poll" ||
992field == "model_size" || field == "model_n_params" ||
993field == "n_gpu_layers" || field == "main_gpu" ||
994field == "n_prompt" || field == "n_gen" ||
995field == "avg_ns" || field == "stddev_ns") {
996return INT;
997}
998if (field == "cuda" || field == "vulkan" || field == "kompute" || field == "metal" ||
999field == "gpu_blas" || field == "blas" || field == "sycl" ||field == "f16_kv" || field == "no_kv_offload" ||
1000field == "cpu_strict" ||
1001field == "flash_attn" || field == "use_mmap" || field == "embeddings") {
1002return BOOL;
1003}
1004if (field == "avg_ts" || field == "stddev_ts") {
1005return FLOAT;
1006}
1007return STRING;
1008}
1009
1010std::vector<std::string> get_values() const {
1011std::string tensor_split_str;
1012int max_nonzero = 0;
1013for (size_t i = 0; i < llama_max_devices(); i++) {
1014if (tensor_split[i] > 0) {
1015max_nonzero = i;
1016}
1017}
1018for (int i = 0; i <= max_nonzero; i++) {
1019char buf[32];
1020snprintf(buf, sizeof(buf), "%.2f", tensor_split[i]);
1021tensor_split_str += buf;
1022if (i < max_nonzero) {
1023tensor_split_str += "/";
1024}
1025}
1026std::vector<std::string> values = {
1027build_commit, std::to_string(build_number),
1028std::to_string(cuda), std::to_string(vulkan), std::to_string(vulkan),
1029std::to_string(metal), std::to_string(sycl), std::to_string(has_rpc), std::to_string(gpu_blas), std::to_string(blas),
1030cpu_info, gpu_info,
1031model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
1032std::to_string(n_batch), std::to_string(n_ubatch),
1033std::to_string(n_threads), cpu_mask, std::to_string(cpu_strict), std::to_string(poll),
1034ggml_type_name(type_k), ggml_type_name(type_v),
1035std::to_string(n_gpu_layers), split_mode_str(split_mode),
1036std::to_string(main_gpu), std::to_string(no_kv_offload), std::to_string(flash_attn),
1037tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
1038std::to_string(n_prompt), std::to_string(n_gen), test_time,
1039std::to_string(avg_ns()), std::to_string(stdev_ns()),
1040std::to_string(avg_ts()), std::to_string(stdev_ts())
1041};
1042return values;
1043}
1044
1045std::map<std::string, std::string> get_map() const {
1046std::map<std::string, std::string> map;
1047auto fields = get_fields();
1048auto values = get_values();
1049std::transform(fields.begin(), fields.end(), values.begin(),
1050std::inserter(map, map.end()), std::make_pair<const std::string &, const std::string &>);
1051return map;
1052}
1053};
1054
1055const std::string test::build_commit = LLAMA_COMMIT;
1056const int test::build_number = LLAMA_BUILD_NUMBER;
1057const bool test::cuda = !!ggml_cpu_has_cuda();
1058const bool test::vulkan = !!ggml_cpu_has_vulkan();
1059const bool test::kompute = !!ggml_cpu_has_kompute();
1060const bool test::metal = !!ggml_cpu_has_metal();
1061const bool test::gpu_blas = !!ggml_cpu_has_gpublas();
1062const bool test::blas = !!ggml_cpu_has_blas();
1063const bool test::sycl = !!ggml_cpu_has_sycl();
1064const std::string test::cpu_info = get_cpu_info();
1065const std::string test::gpu_info = get_gpu_info();
1066
1067struct printer {
1068virtual ~printer() {}
1069
1070FILE * fout;
1071virtual void print_header(const cmd_params & params) { (void) params; }
1072virtual void print_test(const test & t) = 0;
1073virtual void print_footer() { }
1074};
1075
1076struct csv_printer : public printer {
1077static std::string escape_csv(const std::string & field) {
1078std::string escaped = "\"";
1079for (auto c : field) {
1080if (c == '"') {
1081escaped += "\"";
1082}
1083escaped += c;
1084}
1085escaped += "\"";
1086return escaped;
1087}
1088
1089void print_header(const cmd_params & params) override {
1090std::vector<std::string> fields = test::get_fields();
1091fprintf(fout, "%s\n", join(fields, ",").c_str());
1092(void) params;
1093}
1094
1095void print_test(const test & t) override {
1096std::vector<std::string> values = t.get_values();
1097std::transform(values.begin(), values.end(), values.begin(), escape_csv);
1098fprintf(fout, "%s\n", join(values, ",").c_str());
1099}
1100};
1101
1102
1103static std::string escape_json(const std::string & value) {
1104std::string escaped;
1105for (auto c : value) {
1106if (c == '"') {
1107escaped += "\\\"";
1108} else if (c == '\\') {
1109escaped += "\\\\";
1110} else if (c <= 0x1f) {
1111char buf[8];
1112snprintf(buf, sizeof(buf), "\\u%04x", c);
1113escaped += buf;
1114} else {
1115escaped += c;
1116}
1117}
1118return escaped;
1119}
1120
1121static std::string format_json_value(const std::string & field, const std::string & value) {
1122switch (test::get_field_type(field)) {
1123case test::STRING:
1124return "\"" + escape_json(value) + "\"";
1125case test::BOOL:
1126return value == "0" ? "false" : "true";
1127default:
1128return value;
1129}
1130}
1131
1132struct json_printer : public printer {
1133bool first = true;
1134
1135void print_header(const cmd_params & params) override {
1136fprintf(fout, "[\n");
1137(void) params;
1138}
1139
1140void print_fields(const std::vector<std::string> & fields, const std::vector<std::string> & values) {
1141assert(fields.size() == values.size());
1142for (size_t i = 0; i < fields.size(); i++) {
1143fprintf(fout, " \"%s\": %s,\n", fields.at(i).c_str(), format_json_value(fields.at(i), values.at(i)).c_str());
1144}
1145}
1146
1147void print_test(const test & t) override {
1148if (first) {
1149first = false;
1150} else {
1151fprintf(fout, ",\n");
1152}
1153fprintf(fout, " {\n");
1154print_fields(test::get_fields(), t.get_values());
1155fprintf(fout, " \"samples_ns\": [ %s ],\n", join(t.samples_ns, ", ").c_str());
1156fprintf(fout, " \"samples_ts\": [ %s ]\n", join(t.get_ts(), ", ").c_str());
1157fprintf(fout, " }");
1158fflush(fout);
1159}
1160
1161void print_footer() override {
1162fprintf(fout, "\n]\n");
1163}
1164};
1165
1166
1167struct jsonl_printer : public printer {
1168void print_fields(const std::vector<std::string> & fields, const std::vector<std::string> & values) {
1169assert(fields.size() == values.size());
1170for (size_t i = 0; i < fields.size(); i++) {
1171fprintf(fout, "\"%s\": %s, ", fields.at(i).c_str(), format_json_value(fields.at(i), values.at(i)).c_str());
1172}
1173}
1174
1175void print_test(const test & t) override {
1176fprintf(fout, "{");
1177print_fields(test::get_fields(), t.get_values());
1178fprintf(fout, "\"samples_ns\": [ %s ],", join(t.samples_ns, ", ").c_str());
1179fprintf(fout, "\"samples_ts\": [ %s ]", join(t.get_ts(), ", ").c_str());
1180fprintf(fout, "}\n");
1181fflush(fout);
1182}
1183};
1184
1185struct markdown_printer : public printer {
1186std::vector<std::string> fields;
1187
1188static int get_field_width(const std::string & field) {
1189if (field == "model") {
1190return -30;
1191}
1192if (field == "t/s") {
1193return 20;
1194}
1195if (field == "size" || field == "params") {
1196return 10;
1197}
1198if (field == "n_gpu_layers") {
1199return 3;
1200}
1201if (field == "n_threads") {
1202return 7;
1203}
1204if (field == "n_batch") {
1205return 7;
1206}
1207if (field == "n_ubatch") {
1208return 8;
1209}
1210if (field == "type_k" || field == "type_v") {
1211return 6;
1212}
1213if (field == "split_mode") {
1214return 5;
1215}
1216if (field == "flash_attn") {
1217return 2;
1218}
1219if (field == "use_mmap") {
1220return 4;
1221}
1222if (field == "test") {
1223return 13;
1224}
1225
1226int width = std::max((int)field.length(), 10);
1227
1228if (test::get_field_type(field) == test::STRING) {
1229return -width;
1230}
1231return width;
1232}
1233
1234static std::string get_field_display_name(const std::string & field) {
1235if (field == "n_gpu_layers") {
1236return "ngl";
1237}
1238if (field == "split_mode") {
1239return "sm";
1240}
1241if (field == "n_threads") {
1242return "threads";
1243}
1244if (field == "no_kv_offload") {
1245return "nkvo";
1246}
1247if (field == "flash_attn") {
1248return "fa";
1249}
1250if (field == "use_mmap") {
1251return "mmap";
1252}
1253if (field == "embeddings") {
1254return "embd";
1255}
1256if (field == "tensor_split") {
1257return "ts";
1258}
1259return field;
1260}
1261
1262void print_header(const cmd_params & params) override {
1263// select fields to print
1264fields.emplace_back("model");
1265fields.emplace_back("size");
1266fields.emplace_back("params");
1267fields.emplace_back("backend");
1268bool is_cpu_backend = test::get_backend() == "CPU" || test::get_backend() == "BLAS";
1269if (!is_cpu_backend) {
1270fields.emplace_back("n_gpu_layers");
1271}
1272if (params.n_threads.size() > 1 || params.n_threads != cmd_params_defaults.n_threads || is_cpu_backend) {
1273fields.emplace_back("n_threads");
1274}
1275if (params.cpu_mask.size() > 1 || params.cpu_mask != cmd_params_defaults.cpu_mask) {
1276fields.emplace_back("cpu_mask");
1277}
1278if (params.cpu_strict.size() > 1 || params.cpu_strict != cmd_params_defaults.cpu_strict) {
1279fields.emplace_back("cpu_strict");
1280}
1281if (params.poll.size() > 1 || params.poll != cmd_params_defaults.poll) {
1282fields.emplace_back("poll");
1283}
1284if (params.n_batch.size() > 1 || params.n_batch != cmd_params_defaults.n_batch) {
1285fields.emplace_back("n_batch");
1286}
1287if (params.n_ubatch.size() > 1 || params.n_ubatch != cmd_params_defaults.n_ubatch) {
1288fields.emplace_back("n_ubatch");
1289}
1290if (params.type_k.size() > 1 || params.type_k != cmd_params_defaults.type_k) {
1291fields.emplace_back("type_k");
1292}
1293if (params.type_v.size() > 1 || params.type_v != cmd_params_defaults.type_v) {
1294fields.emplace_back("type_v");
1295}
1296if (params.main_gpu.size() > 1 || params.main_gpu != cmd_params_defaults.main_gpu) {
1297fields.emplace_back("main_gpu");
1298}
1299if (params.split_mode.size() > 1 || params.split_mode != cmd_params_defaults.split_mode) {
1300fields.emplace_back("split_mode");
1301}
1302if (params.no_kv_offload.size() > 1 || params.no_kv_offload != cmd_params_defaults.no_kv_offload) {
1303fields.emplace_back("no_kv_offload");
1304}
1305if (params.flash_attn.size() > 1 || params.flash_attn != cmd_params_defaults.flash_attn) {
1306fields.emplace_back("flash_attn");
1307}
1308if (params.tensor_split.size() > 1 || params.tensor_split != cmd_params_defaults.tensor_split) {
1309fields.emplace_back("tensor_split");
1310}
1311if (params.use_mmap.size() > 1 || params.use_mmap != cmd_params_defaults.use_mmap) {
1312fields.emplace_back("use_mmap");
1313}
1314if (params.embeddings.size() > 1 || params.embeddings != cmd_params_defaults.embeddings) {
1315fields.emplace_back("embeddings");
1316}
1317fields.emplace_back("test");
1318fields.emplace_back("t/s");
1319
1320fprintf(fout, "|");
1321for (const auto & field : fields) {
1322fprintf(fout, " %*s |", get_field_width(field), get_field_display_name(field).c_str());
1323}
1324fprintf(fout, "\n");
1325fprintf(fout, "|");
1326for (const auto & field : fields) {
1327int width = get_field_width(field);
1328fprintf(fout, " %s%s |", std::string(std::abs(width) - 1, '-').c_str(), width > 0 ? ":" : "-");
1329}
1330fprintf(fout, "\n");
1331}
1332
1333void print_test(const test & t) override {
1334std::map<std::string, std::string> vmap = t.get_map();
1335
1336fprintf(fout, "|");
1337for (const auto & field : fields) {
1338std::string value;
1339char buf[128];
1340if (field == "model") {
1341value = t.model_type;
1342} else if (field == "size") {
1343if (t.model_size < 1024*1024*1024) {
1344snprintf(buf, sizeof(buf), "%.2f MiB", t.model_size / 1024.0 / 1024.0);
1345} else {
1346snprintf(buf, sizeof(buf), "%.2f GiB", t.model_size / 1024.0 / 1024.0 / 1024.0);
1347}
1348value = buf;
1349} else if (field == "params") {
1350if (t.model_n_params < 1000*1000*1000) {
1351snprintf(buf, sizeof(buf), "%.2f M", t.model_n_params / 1e6);
1352} else {
1353snprintf(buf, sizeof(buf), "%.2f B", t.model_n_params / 1e9);
1354}
1355value = buf;
1356} else if (field == "backend") {
1357value = test::get_backend();
1358if (t.has_rpc) {
1359value += "+RPC";
1360}
1361} else if (field == "test") {
1362if (t.n_prompt > 0 && t.n_gen == 0) {
1363snprintf(buf, sizeof(buf), "pp%d", t.n_prompt);
1364} else if (t.n_gen > 0 && t.n_prompt == 0) {
1365snprintf(buf, sizeof(buf), "tg%d", t.n_gen);
1366} else {
1367snprintf(buf, sizeof(buf), "pp%d+tg%d", t.n_prompt, t.n_gen);
1368}
1369value = buf;
1370} else if (field == "t/s") {
1371snprintf(buf, sizeof(buf), "%.2f ± %.2f", t.avg_ts(), t.stdev_ts());
1372value = buf;
1373} else if (vmap.find(field) != vmap.end()) {
1374value = vmap.at(field);
1375} else {
1376assert(false);
1377exit(1);
1378}
1379
1380int width = get_field_width(field);
1381if (field == "t/s") {
1382// HACK: the utf-8 character is 2 bytes
1383width += 1;
1384}
1385fprintf(fout, " %*s |", width, value.c_str());
1386}
1387fprintf(fout, "\n");
1388}
1389
1390void print_footer() override {
1391fprintf(fout, "\nbuild: %s (%d)\n", test::build_commit.c_str(), test::build_number);
1392}
1393};
1394
1395struct sql_printer : public printer {
1396static std::string get_sql_field_type(const std::string & field) {
1397switch (test::get_field_type(field)) {
1398case test::STRING:
1399return "TEXT";
1400case test::BOOL:
1401case test::INT:
1402return "INTEGER";
1403case test::FLOAT:
1404return "REAL";
1405default:
1406assert(false);
1407exit(1);
1408}
1409}
1410
1411void print_header(const cmd_params & params) override {
1412std::vector<std::string> fields = test::get_fields();
1413fprintf(fout, "CREATE TABLE IF NOT EXISTS test (\n");
1414for (size_t i = 0; i < fields.size(); i++) {
1415fprintf(fout, " %s %s%s\n", fields.at(i).c_str(), get_sql_field_type(fields.at(i)).c_str(), i < fields.size() - 1 ? "," : "");
1416}
1417fprintf(fout, ");\n");
1418fprintf(fout, "\n");
1419(void) params;
1420}
1421
1422void print_test(const test & t) override {
1423fprintf(fout, "INSERT INTO test (%s) ", join(test::get_fields(), ", ").c_str());
1424fprintf(fout, "VALUES (");
1425std::vector<std::string> values = t.get_values();
1426for (size_t i = 0; i < values.size(); i++) {
1427fprintf(fout, "'%s'%s", values.at(i).c_str(), i < values.size() - 1 ? ", " : "");
1428}
1429fprintf(fout, ");\n");
1430}
1431};
1432
1433static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) {
1434llama_set_n_threads(ctx, n_threads, n_threads);
1435
1436const llama_model * model = llama_get_model(ctx);
1437const int32_t n_vocab = llama_n_vocab(model);
1438
1439std::vector<llama_token> tokens(n_batch);
1440
1441int n_processed = 0;
1442
1443while (n_processed < n_prompt) {
1444int n_tokens = std::min(n_prompt - n_processed, n_batch);
1445tokens[0] = n_processed == 0 && llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
1446for (int i = 1; i < n_tokens; i++) {
1447tokens[i] = std::rand() % n_vocab;
1448}
1449llama_decode(ctx, llama_batch_get_one(tokens.data(), n_tokens, n_past + n_processed, 0));
1450n_processed += n_tokens;
1451}
1452
1453llama_synchronize(ctx);
1454}
1455
1456static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) {
1457llama_set_n_threads(ctx, n_threads, n_threads);
1458
1459const llama_model * model = llama_get_model(ctx);
1460const int32_t n_vocab = llama_n_vocab(model);
1461
1462llama_token token = llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
1463
1464for (int i = 0; i < n_gen; i++) {
1465llama_decode(ctx, llama_batch_get_one(&token, 1, n_past + i, 0));
1466llama_synchronize(ctx);
1467token = std::rand() % n_vocab;
1468}
1469}
1470
1471static void llama_null_log_callback(enum ggml_log_level level, const char * text, void * user_data) {
1472(void) level;
1473(void) text;
1474(void) user_data;
1475}
1476
1477static std::unique_ptr<printer> create_printer(output_formats format) {
1478switch (format) {
1479case NONE:
1480return nullptr;
1481case CSV:
1482return std::unique_ptr<printer>(new csv_printer());
1483case JSON:
1484return std::unique_ptr<printer>(new json_printer());
1485case JSONL:
1486return std::unique_ptr<printer>(new jsonl_printer());
1487case MARKDOWN:
1488return std::unique_ptr<printer>(new markdown_printer());
1489case SQL:
1490return std::unique_ptr<printer>(new sql_printer());
1491}
1492GGML_ABORT("fatal error");
1493}
1494
1495int main(int argc, char ** argv) {
1496// try to set locale for unicode characters in markdown
1497setlocale(LC_CTYPE, ".UTF-8");
1498
1499#if !defined(NDEBUG)
1500fprintf(stderr, "warning: asserts enabled, performance may be affected\n");
1501#endif
1502
1503#if (defined(_MSC_VER) && defined(_DEBUG)) || (!defined(_MSC_VER) && !defined(__OPTIMIZE__))
1504fprintf(stderr, "warning: debug build, performance may be affected\n");
1505#endif
1506
1507#if defined(__SANITIZE_ADDRESS__) || defined(__SANITIZE_THREAD__)
1508fprintf(stderr, "warning: sanitizer enabled, performance may be affected\n");
1509#endif
1510
1511cmd_params params = parse_cmd_params(argc, argv);
1512
1513// initialize llama.cpp
1514if (!params.verbose) {
1515llama_log_set(llama_null_log_callback, NULL);
1516}
1517llama_backend_init();
1518llama_numa_init(params.numa);
1519
1520set_process_priority(params.prio);
1521
1522// initialize printer
1523std::unique_ptr<printer> p = create_printer(params.output_format);
1524std::unique_ptr<printer> p_err = create_printer(params.output_format_stderr);
1525
1526if (p) {
1527p->fout = stdout;
1528p->print_header(params);
1529}
1530
1531if (p_err) {
1532p_err->fout = stderr;
1533p_err->print_header(params);
1534}
1535
1536std::vector<cmd_params_instance> params_instances = get_cmd_params_instances(params);
1537
1538llama_model * lmodel = nullptr;
1539const cmd_params_instance * prev_inst = nullptr;
1540
1541int params_idx = 0;
1542auto params_count = params_instances.size();
1543for (const auto & inst : params_instances) {
1544params_idx ++;
1545if (params.progress) {
1546fprintf(stderr, "llama-bench: benchmark %d/%ld: starting\n", params_idx, params_count);
1547}
1548// keep the same model between tests when possible
1549if (!lmodel || !prev_inst || !inst.equal_mparams(*prev_inst)) {
1550if (lmodel) {
1551llama_free_model(lmodel);
1552}
1553
1554lmodel = llama_load_model_from_file(inst.model.c_str(), inst.to_llama_mparams());
1555if (lmodel == NULL) {
1556fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, inst.model.c_str());
1557return 1;
1558}
1559prev_inst = &inst;
1560}
1561
1562llama_context * ctx = llama_new_context_with_model(lmodel, inst.to_llama_cparams());
1563if (ctx == NULL) {
1564fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, inst.model.c_str());
1565llama_free_model(lmodel);
1566return 1;
1567}
1568
1569test t(inst, lmodel, ctx);
1570
1571llama_kv_cache_clear(ctx);
1572
1573// cool off before the test
1574if (params.delay) {
1575std::this_thread::sleep_for(std::chrono::seconds(params.delay));
1576}
1577
1578struct ggml_threadpool_params tpp = ggml_threadpool_params_default(t.n_threads);
1579if (!parse_cpu_mask(t.cpu_mask, tpp.cpumask)) {
1580fprintf(stderr, "%s: failed to parse cpu-mask: %s\n", __func__, t.cpu_mask.c_str());
1581exit(1);
1582}
1583tpp.strict_cpu = t.cpu_strict;
1584tpp.poll = t.poll;
1585tpp.prio = params.prio;
1586
1587struct ggml_threadpool* threadpool = ggml_threadpool_new(&tpp);
1588if (!threadpool) {
1589fprintf(stderr, "%s: threadpool create failed : n_threads %d\n", __func__, tpp.n_threads);
1590exit(1);
1591}
1592
1593llama_attach_threadpool(ctx, threadpool, NULL);
1594
1595// warmup run
1596if (t.n_prompt > 0) {
1597if (params.progress) {
1598fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup prompt run\n", params_idx, params_count);
1599}
1600//test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads);
1601test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
1602}
1603if (t.n_gen > 0) {
1604if (params.progress) {
1605fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup generation run\n", params_idx, params_count);
1606}
1607test_gen(ctx, 1, 0, t.n_threads);
1608}
1609
1610for (int i = 0; i < params.reps; i++) {
1611llama_kv_cache_clear(ctx);
1612
1613uint64_t t_start = get_time_ns();
1614
1615if (t.n_prompt > 0) {
1616if (params.progress) {
1617fprintf(stderr, "llama-bench: benchmark %d/%ld: prompt run %d/%d\n", params_idx, params_count, i + 1, params.reps);
1618}
1619test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
1620}
1621if (t.n_gen > 0) {
1622if (params.progress) {
1623fprintf(stderr, "llama-bench: benchmark %d/%ld: generation run %d/%d\n", params_idx, params_count, i + 1, params.reps);
1624}
1625test_gen(ctx, t.n_gen, t.n_prompt, t.n_threads);
1626}
1627
1628uint64_t t_ns = get_time_ns() - t_start;
1629t.samples_ns.push_back(t_ns);
1630}
1631
1632if (p) {
1633p->print_test(t);
1634fflush(p->fout);
1635}
1636
1637if (p_err) {
1638p_err->print_test(t);
1639fflush(p_err->fout);
1640}
1641
1642llama_perf_context_print(ctx);
1643
1644llama_free(ctx);
1645
1646ggml_threadpool_free(threadpool);
1647}
1648
1649llama_free_model(lmodel);
1650
1651if (p) {
1652p->print_footer();
1653}
1654
1655if (p_err) {
1656p_err->print_footer();
1657}
1658
1659llama_backend_free();
1660
1661return 0;
1662}
1663