text-generation-inference

Форк
0
353 строки · 12.4 Кб
1
import pytest
2
import torch
3

4
from copy import copy
5
from transformers import AutoTokenizer
6

7
from text_generation_server.pb import generate_pb2
8
from text_generation_server.models.causal_lm import CausalLM, CausalLMBatch
9

10

11
@pytest.fixture(scope="session")
12
def default_causal_lm():
13
    return CausalLM("gpt2")
14

15

16
@pytest.fixture(scope="session")
17
def gpt2_tokenizer():
18
    tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left")
19
    tokenizer.pad_token_id = 50256
20
    return tokenizer
21

22

23
@pytest.fixture
24
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
25
    return generate_pb2.Request(
26
        id=0,
27
        inputs="Test",
28
        prefill_logprobs=True,
29
        truncate=100,
30
        parameters=default_pb_parameters,
31
        stopping_parameters=default_pb_stop_parameters,
32
    )
33

34

35
@pytest.fixture
36
def default_pb_batch(default_pb_request):
37
    return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
38

39

40
@pytest.fixture
41
def default_causal_lm_batch(default_pb_batch, gpt2_tokenizer):
42
    return CausalLMBatch.from_pb(
43
        default_pb_batch, gpt2_tokenizer, torch.float32, torch.device("cpu")
44
    )
45

46

47
@pytest.fixture
48
def default_multi_requests_causal_lm_batch(default_pb_request, gpt2_tokenizer):
49
    req_0 = copy(default_pb_request)
50
    req_0.id = 1
51
    req_1 = default_pb_request
52
    req_1.id = 2
53
    req_1.stopping_parameters.max_new_tokens = 5
54

55
    batch_pb = generate_pb2.Batch(id=1, requests=[req_0, req_1], size=2)
56
    return CausalLMBatch.from_pb(
57
        batch_pb, gpt2_tokenizer, torch.float32, torch.device("cpu")
58
    )
59

60

61
def test_batch_from_pb(default_pb_batch, default_causal_lm_batch):
62
    batch = default_causal_lm_batch
63

64
    assert batch.batch_id == default_pb_batch.id
65
    assert batch.requests == default_pb_batch.requests
66

67
    assert len(batch.input_ids) == default_pb_batch.size
68
    assert batch.input_ids[0][-1] == 14402
69
    assert torch.all(batch.input_ids[0][:-1] == 50256)
70

71
    assert batch.attention_mask[0, 0] == 1
72
    assert torch.all(batch.attention_mask[0, 1:] == 0)
73

74
    assert batch.past_key_values is None
75

76
    assert all(
77
        [
78
            torch.equal(input_ids, all_input_ids[:, 0])
79
            for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
80
        ]
81
    )
82

83
    assert batch.input_lengths == [1]
84

85
    assert len(batch) == default_pb_batch.size
86
    assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
87

88
    assert batch.max_input_length == batch.input_lengths[0]
89

90

91
def test_batch_concatenate_no_prefill(default_causal_lm_batch):
92
    with pytest.raises(ValueError):
93
        CausalLMBatch.concatenate([default_causal_lm_batch, default_causal_lm_batch])
94

95

96
def test_causal_lm_batch_type(default_causal_lm):
97
    assert default_causal_lm.batch_type == CausalLMBatch
98

99

100
def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch):
101
    sequence_length = len(default_causal_lm_batch.all_input_ids[0])
102
    generations, next_batch, _ = default_causal_lm.generate_token(
103
        default_causal_lm_batch
104
    )
105

106
    assert len(generations) == len(next_batch)
107
    assert isinstance(next_batch, CausalLMBatch)
108

109
    assert len(next_batch.all_input_ids) == len(next_batch)
110
    assert len(next_batch.all_input_ids[0]) == sequence_length + 1
111
    assert len(next_batch.attention_mask[0]) == 11
112
    assert next_batch.all_input_ids[0][-1] == 13
113
    assert next_batch.all_input_ids[0][-2] == 14402
114
    assert torch.all(next_batch.all_input_ids[0][:-2] == 50256)
115

116
    assert torch.all(next_batch.attention_mask[0][0:2] == 1)
117
    assert torch.all(next_batch.attention_mask[0][2:] == 0)
118

119
    assert next_batch.input_ids.shape == (len(next_batch), 1)
120
    assert next_batch.input_ids[0, 0] == 13
121

122
    assert next_batch.input_lengths == [2]
123
    assert next_batch.max_input_length == next_batch.input_lengths[0]
124

125
    assert next_batch.past_key_values is not None
126
    assert all(
127
        [p[0].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
128
    )
129
    assert all(
130
        [p[1].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
131
    )
132
    assert all([generation.generated_text is None for generation in generations])
133
    assert all([len(generation.prefill_tokens) == 1 for generation in generations])
134
    assert all(
135
        [
136
            token_id.item() == 13
137
            for generation in generations
138
            for token_id in generation.tokens.token_ids
139
        ]
140
    )
141
    assert all(
142
        [
143
            token_text == "."
144
            for generation in generations
145
            for token_text in generation.tokens.texts
146
        ]
147
    )
148
    assert generations[0].request_id == 0
149

150

151
def test_causal_lm_generate_token_completion(
152
    default_causal_lm, default_causal_lm_batch
153
):
154
    next_batch = default_causal_lm_batch
155
    for _ in range(default_causal_lm_batch.stopping_criterias[0].max_new_tokens - 1):
156
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
157
        assert len(generations) == len(next_batch)
158

159
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
160
    assert next_batch is None
161

162
    assert len(generations) == 1
163
    assert generations[0].generated_text.text == ".java:784) at net.minecraft."
164
    assert generations[0].request_id == default_causal_lm_batch.requests[0].id
165
    assert (
166
        generations[0].generated_text.generated_tokens
167
        == default_causal_lm_batch.stopping_criterias[0].max_new_tokens
168
    )
169

170

171
def test_causal_lm_generate_token_completion_multi(
172
    default_causal_lm, default_multi_requests_causal_lm_batch
173
):
174
    next_batch = default_multi_requests_causal_lm_batch
175

176
    for i in range(
177
        default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 1
178
    ):
179
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
180
        assert len(generations) == len(next_batch)
181

182
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
183
    assert next_batch is not None
184

185
    assert len(generations) == 2
186
    assert generations[1].generated_text.text == ".java:784)"
187
    assert (
188
        generations[1].request_id
189
        == default_multi_requests_causal_lm_batch.requests[1].id
190
    )
191
    assert (
192
        generations[1].generated_text.generated_tokens
193
        == default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
194
    )
195
    # Copy stopping_criterias before filtering
196
    stopping_criterias = (
197
        default_multi_requests_causal_lm_batch.stopping_criterias.copy()
198
    )
199

200
    next_batch = next_batch.filter([next_batch.requests[0].id])
201

202
    for _ in range(
203
        stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
204
    ):
205
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
206
        assert len(generations) == len(next_batch)
207

208
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
209
    assert next_batch is None
210

211
    assert len(generations) == 1
212
    assert generations[0].generated_text.text == ".java:784) at net.minecraft."
213
    assert (
214
        generations[0].request_id
215
        == default_multi_requests_causal_lm_batch.requests[0].id
216
    )
217
    assert (
218
        generations[0].generated_text.generated_tokens
219
        == default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
220
    )
221

222

223
def test_batch_concatenate(
224
    default_causal_lm, default_causal_lm_batch, default_multi_requests_causal_lm_batch
225
):
226
    next_batch_0 = default_causal_lm_batch
227
    _, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
228
    _, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
229

230
    next_batch_1 = default_multi_requests_causal_lm_batch
231
    _, next_batch_1, _ = default_causal_lm.generate_token(next_batch_1)
232

233
    # Clone past_key_values before concatenating to compare after,
234
    # because they are removed from the concatenated batches
235
    next_batch_0_past_key_values = [
236
        (k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
237
    ]
238
    next_batch_1_past_key_values = [
239
        (k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
240
    ]
241

242
    next_batch = CausalLMBatch.concatenate([next_batch_0, next_batch_1])
243

244
    assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
245
    assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
246
    assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])
247

248
    assert torch.all(
249
        next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
250
    )
251
    assert torch.all(
252
        next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
253
    )
254
    assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
255

256
    assert next_batch.batch_id == 0
257
    assert next_batch.input_ids[0, 0] == 12355
258
    assert torch.all(next_batch.input_ids[1:] == 13)
259

260
    assert next_batch.input_lengths == [3, 2, 2]
261
    assert next_batch.max_input_length == 3
262

263
    assert next_batch.requests[0] == next_batch_0.requests[0]
264
    assert next_batch.requests[1:] == next_batch_1.requests
265

266
    assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
267
    assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
268

269
    assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
270
    assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
271

272
    assert next_batch.past_key_values is not None
273
    assert all([p[0].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
274
    assert all([p[1].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
275

276
    for i, past in enumerate(next_batch.past_key_values):
277
        assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:], past[0][0])
278
        assert torch.equal(
279
            next_batch_1_past_key_values[i][0][:, :, -1:], past[0][1:, :, -1:, :]
280
        )
281

282
        assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:], past[1][0])
283
        assert torch.equal(
284
            next_batch_1_past_key_values[i][1][:, :, -1:], past[1][1:, :, -1:, :]
285
        )
286

287
    for _ in range(
288
        default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2
289
    ):
290
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
291
        assert len(generations) == len(next_batch)
292

293
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
294
    assert next_batch is not None
295

296
    assert len(generations) == 3
297
    assert generations[2].generated_text.text == ".java:784)"
298
    assert (
299
        generations[2].request_id
300
        == default_multi_requests_causal_lm_batch.requests[1].id
301
    )
302
    assert (
303
        generations[2].generated_text.generated_tokens
304
        == default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
305
    )
306

307
    next_batch = next_batch.filter(
308
        [next_batch.requests[0].id, next_batch.requests[1].id]
309
    )
310

311
    for _ in range(
312
        default_causal_lm_batch.stopping_criterias[0].max_new_tokens
313
        - default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
314
        - 2
315
    ):
316
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
317
        assert len(generations) == len(next_batch)
318

319
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
320
    assert next_batch is not None
321

322
    assert len(generations) == 2
323
    assert generations[0].generated_text.text == ".java:784) at net.minecraft."
324
    assert generations[0].request_id == default_causal_lm_batch.requests[0].id
325
    assert (
326
        generations[0].generated_text.generated_tokens
327
        == default_causal_lm_batch.stopping_criterias[0].max_new_tokens
328
    )
329

330
    next_batch = next_batch.filter([next_batch.requests[1].id])
331

332
    for _ in range(
333
        default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
334
        - default_causal_lm_batch.stopping_criterias[0].max_new_tokens
335
        - default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
336
        - 4
337
    ):
338
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
339
        assert len(generations) == len(next_batch)
340

341
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
342
    assert next_batch is None
343

344
    assert len(generations) == 1
345
    assert generations[0].generated_text.text == ".java:784) at net.minecraft."
346
    assert (
347
        generations[0].request_id
348
        == default_multi_requests_causal_lm_batch.requests[0].id
349
    )
350
    assert (
351
        generations[0].generated_text.generated_tokens
352
        == default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
353
    )
354

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.