llama

Форк
0
251 строка · 7.2 Кб
1
import Foundation
2
import llama
3

4
let arguments = CommandLine.arguments
5

6
// Check that we have at least one argument (the model path)
7
guard arguments.count > 1 else {
8
    print("Usage: swift MODEL_PATH [PROMPT] [PARALLEL]")
9
    exit(1)
10
}
11

12
let modelPath: String = arguments[1]
13
let prompt: String = arguments.count > 2 ? arguments[2] : "Hello my name is"
14
let n_parallel: Int = arguments.count > 3 && Int(arguments[3]) != nil ? Int(arguments[3])! : 1
15

16
// total length of the sequences including the prompt
17
let n_len: Int = 32
18

19
// init LLM
20
llama_backend_init()
21
defer {
22
    llama_backend_free()
23
}
24

25
let model_params = llama_model_default_params()
26
guard let model = llama_load_model_from_file(modelPath.cString(using: .utf8), model_params) else {
27
    print("Failed to load model")
28
    exit(1)
29
}
30
defer {
31
    llama_free_model(model)
32
}
33

34
var tokens = tokenize(text: prompt, add_bos: true)
35

36
let n_kv_req = UInt32(tokens.count) + UInt32((n_len - Int(tokens.count)) * n_parallel)
37

38
var context_params = llama_context_default_params()
39
context_params.n_ctx = n_kv_req
40
context_params.n_batch = UInt32(max(n_len, n_parallel))
41
context_params.n_threads = 8
42
context_params.n_threads_batch = 8
43

44
let context = llama_new_context_with_model(model, context_params)
45
guard context != nil else {
46
    print("Failed to initialize context")
47
    exit(1)
48
}
49
defer {
50
    llama_free(context)
51
}
52

53
var sparams = llama_sampler_chain_default_params()
54

55
let smpl = llama_sampler_chain_init(sparams)
56
guard smpl != nil else {
57
    print("Failed to initialize sampling")
58
    exit(1)
59
}
60
defer {
61
    llama_sampler_free(smpl)
62
}
63

64
llama_sampler_chain_add(smpl, llama_sampler_init_top_k(40));
65
llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1));
66
llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.4));
67
llama_sampler_chain_add(smpl, llama_sampler_init_dist (1234));
68

69
let n_ctx = llama_n_ctx(context)
70

71
print("\nn_len = \(n_len), n_ctx = \(n_ctx), n_batch = \(context_params.n_batch), n_parallel = \(n_parallel), n_kv_req = \(n_kv_req)\n")
72

73
if n_kv_req > n_ctx {
74
    print("error: n_kv_req (%d) > n_ctx, the required KV cache size is not big enough\n", n_kv_req)
75
    exit(1)
76
}
77

78
var buffer: [CChar] = []
79
for id: llama_token in tokens {
80
    print(token_to_piece(token: id, buffer: &buffer) ?? "", terminator: "")
81
}
82

83
print("\n")
84

85
var batch = llama_batch_init(max(Int32(tokens.count), Int32(n_parallel)), 0, 1)
86
defer {
87
    llama_batch_free(batch)
88
}
89

90
// evaluate the initial prompt
91
batch.n_tokens = Int32(tokens.count)
92

93
for (i, token) in tokens.enumerated() {
94
    batch.token[i] = token
95
    batch.pos[i] = Int32(i)
96
    batch.n_seq_id[i] = 1
97
    // batch.seq_id[i][0] = 0
98
    // TODO: is this the proper way to do this?
99
    if let seq_id = batch.seq_id[i] {
100
        seq_id[0] = 0
101
    }
102
    batch.logits[i] = 0
103
}
104

105
// llama_decode will output logits only for the last token of the prompt
106
batch.logits[Int(batch.n_tokens) - 1] = 1
107

108
if llama_decode(context, batch) != 0 {
109
    print("llama_decode() failed")
110
    exit(1)
111
}
112

113
for i in 1 ..< n_parallel {
114
    llama_kv_cache_seq_cp(context, 0, Int32(i), 0, batch.n_tokens)
115
}
116

117
if n_parallel > 1 {
118
    print("generating \(n_parallel) sequences ...\n")
119
}
120

121
var streams: [String] = .init(repeating: "", count: n_parallel)
122
var streamBuffers: [[CChar]] = .init(repeating: [], count: n_parallel)
123
var i_batch = [Int32](repeating: batch.n_tokens - 1, count: n_parallel)
124

125
var n_cur = batch.n_tokens
126
var n_decode = 0
127

128
let t_main_start = ggml_time_us()
129

130
while n_cur <= n_len {
131
    // prepare the next batch
132
    batch.n_tokens = 0
133

134
    // sample the next token for each parallel sequence / stream
135
    for i in 0 ..< n_parallel {
136
        if i_batch[i] < 0 {
137
            // the stream has already finished
138
            continue
139
        }
140

141
        let new_token_id = llama_sampler_sample(smpl, context, i_batch[i])
142

143
        // is it an end of stream? -> mark the stream as finished
144
        if llama_token_is_eog(model, new_token_id) || n_cur == n_len {
145
            i_batch[i] = -1
146
            // print("")
147
            if n_parallel > 1 {
148
                print("stream \(i) finished at n_cur = \(n_cur)")
149
            }
150

151
            continue
152
        }
153

154
        let nextStringPiece = token_to_piece(token: new_token_id, buffer: &streamBuffers[i]) ?? ""
155

156
        // if there is only one stream, we print immediately to stdout
157
        if n_parallel == 1 {
158
            print(nextStringPiece, terminator: "")
159
        }
160
        streams[i] += nextStringPiece
161

162
        // push this new token for next evaluation
163
        batch.token[Int(batch.n_tokens)] = new_token_id
164
        batch.pos[Int(batch.n_tokens)] = n_cur
165
        batch.n_seq_id[Int(batch.n_tokens)] = 1
166
        if let seq_id = batch.seq_id[Int(batch.n_tokens)] {
167
            seq_id[0] = Int32(i)
168
        }
169
        batch.logits[Int(batch.n_tokens)] = 1
170

171
        i_batch[i] = batch.n_tokens
172

173
        batch.n_tokens += 1
174

175
        n_decode += 1
176
    }
177

178
    // all streams are finished
179
    if batch.n_tokens == 0 {
180
        break
181
    }
182

183
    n_cur += 1
184

185
    // evaluate the current batch with the transformer model
186
    if llama_decode(context, batch) != 0 {
187
        print("llama_decode() failed")
188
        exit(1)
189
    }
190
}
191

192
if n_parallel > 1 {
193
    print("\n")
194
    for (i, stream) in streams.enumerated() {
195
        print("sequence \(i):\n\n\(prompt)\(stream)\n")
196
    }
197
}
198

199
let t_main_end = ggml_time_us()
200

201
print("decoded \(n_decode) tokens in \(String(format: "%.2f", Double(t_main_end - t_main_start) / 1_000_000.0)) s, speed: \(String(format: "%.2f", Double(n_decode) / (Double(t_main_end - t_main_start) / 1_000_000.0))) t/s\n\n")
202

203
llama_perf_sampler_print(smpl)
204
llama_perf_context_print(context)
205

206
private func tokenize(text: String, add_bos: Bool) -> [llama_token] {
207
    let utf8Count = text.utf8.count
208
    let n_tokens = utf8Count + (add_bos ? 1 : 0)
209
    let tokens = UnsafeMutablePointer<llama_token>.allocate(capacity: n_tokens)
210
    let tokenCount = llama_tokenize(model, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, /*special tokens*/ false)
211
    var swiftTokens: [llama_token] = []
212
    for i in 0 ..< tokenCount {
213
        swiftTokens.append(tokens[Int(i)])
214
    }
215
    tokens.deallocate()
216
    return swiftTokens
217
}
218

219
private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String? {
220
    var result = [CChar](repeating: 0, count: 8)
221
    let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count), 0, false)
222
    if nTokens < 0 {
223
        let actualTokensCount = -Int(nTokens)
224
        result = .init(repeating: 0, count: actualTokensCount)
225
        let check = llama_token_to_piece(
226
            model,
227
            token,
228
            &result,
229
            Int32(result.count),
230
            0,
231
            false
232
        )
233
        assert(check == actualTokensCount)
234
    } else {
235
        result.removeLast(result.count - Int(nTokens))
236
    }
237
    if buffer.isEmpty, let utfString = String(cString: result + [0], encoding: .utf8) {
238
        return utfString
239
    } else {
240
        buffer.append(contentsOf: result)
241
        let data = Data(buffer.map { UInt8(bitPattern: $0) })
242
        if buffer.count >= 4 { // 4 bytes is the max length of a utf8 character so if we're here we need to reset the buffer
243
            buffer = []
244
        }
245
        guard let bufferString = String(data: data, encoding: .utf8) else {
246
            return nil
247
        }
248
        buffer = []
249
        return bufferString
250
    }
251
}
252

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.