text-generation-inference

Форк
0
364 строки · 12.7 Кб
1
import pytest
2
import torch
3

4
from copy import copy
5

6
from transformers import AutoTokenizer
7

8
from text_generation_server.pb import generate_pb2
9
from text_generation_server.models.seq2seq_lm import Seq2SeqLM, Seq2SeqLMBatch
10

11

12
@pytest.fixture(scope="session")
13
def mt0_small_tokenizer():
14
    tokenizer = AutoTokenizer.from_pretrained(
15
        "bigscience/mt0-small", padding_side="left"
16
    )
17
    tokenizer.bos_token_id = 0
18
    return tokenizer
19

20

21
@pytest.fixture(scope="session")
22
def default_seq2seq_lm():
23
    return Seq2SeqLM("bigscience/mt0-small")
24

25

26
@pytest.fixture
27
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
28
    return generate_pb2.Request(
29
        id=0,
30
        inputs="Test",
31
        prefill_logprobs=True,
32
        truncate=100,
33
        parameters=default_pb_parameters,
34
        stopping_parameters=default_pb_stop_parameters,
35
    )
36

37

38
@pytest.fixture
39
def default_pb_batch(default_pb_request):
40
    return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
41

42

43
@pytest.fixture
44
def default_seq2seq_lm_batch(default_pb_batch, mt0_small_tokenizer):
45
    return Seq2SeqLMBatch.from_pb(
46
        default_pb_batch, mt0_small_tokenizer, torch.float32, torch.device("cpu")
47
    )
48

49

50
@pytest.fixture
51
def default_multi_requests_seq2seq_lm_batch(default_pb_request, mt0_small_tokenizer):
52
    req_0 = copy(default_pb_request)
53
    req_0.id = 1
54
    req_1 = default_pb_request
55
    req_1.id = 2
56
    req_1.stopping_parameters.max_new_tokens = 5
57

58
    batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)
59
    return Seq2SeqLMBatch.from_pb(
60
        batch_pb, mt0_small_tokenizer, torch.float32, torch.device("cpu")
61
    )
62

63

64
def test_batch_from_pb(default_pb_batch, default_seq2seq_lm_batch):
65
    batch = default_seq2seq_lm_batch
66
    sequence_length = len(default_seq2seq_lm_batch.input_ids[0])
67

68
    assert batch.batch_id == default_pb_batch.id
69
    assert batch.requests == default_pb_batch.requests
70

71
    assert batch.input_ids.shape == (default_pb_batch.size, sequence_length)
72
    assert batch.input_ids[0][-2] == 4268
73
    assert batch.input_ids[0][-1] == 1
74
    assert torch.all(batch.input_ids[0][:-2] == 0)
75

76
    assert torch.all(batch.attention_mask[0][-2:] == 1)
77
    assert torch.all(batch.attention_mask[0][:-2] == 0)
78

79
    assert len(batch.decoder_input_ids) == default_pb_batch.size
80
    assert batch.decoder_attention_mask is None
81
    assert batch.encoder_last_hidden_state is None
82

83
    assert batch.past_key_values is None
84

85
    assert batch.input_lengths == [2]
86
    assert batch.decoder_input_lengths == [1]
87

88
    assert len(batch) == default_pb_batch.size
89
    assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
90

91
    assert batch.max_input_length == batch.input_lengths[0]
92
    assert batch.max_decoder_input_length == batch.decoder_input_lengths[0]
93

94

95
def test_batch_concatenate_no_prefill(default_seq2seq_lm_batch):
96
    with pytest.raises(ValueError):
97
        Seq2SeqLMBatch.concatenate([default_seq2seq_lm_batch, default_seq2seq_lm_batch])
98

99

100
def test_seq2seq_lm_batch_type(default_seq2seq_lm):
101
    assert default_seq2seq_lm.batch_type == Seq2SeqLMBatch
102

103

104
def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch):
105
    sequence_length = len(default_seq2seq_lm_batch.input_ids[0])
106
    generations, next_batch, _ = default_seq2seq_lm.generate_token(
107
        default_seq2seq_lm_batch
108
    )
109

110
    assert len(generations) == len(next_batch)
111
    assert isinstance(next_batch, Seq2SeqLMBatch)
112

113
    assert next_batch.input_ids is None
114
    assert torch.equal(
115
        next_batch.attention_mask, default_seq2seq_lm_batch.attention_mask
116
    )
117
    assert next_batch.input_lengths == default_seq2seq_lm_batch.input_lengths
118
    assert next_batch.max_input_length == default_seq2seq_lm_batch.max_input_length
119
    assert (
120
        next_batch.next_token_choosers == default_seq2seq_lm_batch.next_token_choosers
121
    )
122
    assert next_batch.stopping_criterias == default_seq2seq_lm_batch.stopping_criterias
123

124
    assert len(next_batch.decoder_input_ids) == len(next_batch)
125
    assert next_batch.all_decoder_input_ids[0][0] == 0
126
    assert next_batch.all_decoder_input_ids[0][1] == 259
127
    assert next_batch.decoder_attention_mask is None
128
    assert next_batch.encoder_last_hidden_state.shape == (1, sequence_length, 512)
129

130
    assert next_batch.decoder_input_lengths == [2]
131
    assert next_batch.max_decoder_input_length == 2
132

133
    assert next_batch.past_key_values is not None
134
    assert all(
135
        [p[0].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]
136
    )
137
    assert all(
138
        [p[1].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]
139
    )
140
    assert all(
141
        [
142
            p[2].shape == (len(next_batch), 6, sequence_length, 64)
143
            for p in next_batch.past_key_values
144
        ]
145
    )
146
    assert all(
147
        [
148
            p[3].shape == (len(next_batch), 6, sequence_length, 64)
149
            for p in next_batch.past_key_values
150
        ]
151
    )
152
    assert all([generation.generated_text is None for generation in generations])
153
    assert all([len(generation.prefill_tokens) == 1 for generation in generations])
154
    assert all(
155
        [
156
            token_id.item() == 259
157
            for generation in generations
158
            for token_id in generation.tokens.token_ids
159
        ]
160
    )
161
    assert all(
162
        [
163
            token_text == " "
164
            for generation in generations
165
            for token_text in generation.tokens.texts
166
        ]
167
    )
168
    assert generations[0].request_id == 0
169

170

171
def test_seq2seq_lm_generate_token_completion(
172
    default_seq2seq_lm, default_seq2seq_lm_batch
173
):
174
    next_batch = default_seq2seq_lm_batch
175
    for _ in range(6):
176
        generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
177
        assert len(generations) == len(next_batch)
178

179
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
180
    assert next_batch is None
181

182
    assert len(generations) == 1
183
    assert generations[0].generated_text.text == "a few weeks"
184
    assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id
185
    assert generations[0].generated_text.generated_tokens == 7
186

187

188
def test_seq2seq_lm_generate_token_completion_multi(
189
    default_seq2seq_lm, default_multi_requests_seq2seq_lm_batch
190
):
191
    next_batch = default_multi_requests_seq2seq_lm_batch
192

193
    for i in range(4):
194
        generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
195
        assert len(generations) == len(next_batch)
196

197
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
198
    assert next_batch is not None
199

200
    assert len(generations) == 2
201
    assert generations[1].generated_text.text == "a few "
202
    assert (
203
        generations[1].request_id
204
        == default_multi_requests_seq2seq_lm_batch.requests[1].id
205
    )
206
    assert generations[1].generated_text.generated_tokens == 5
207

208
    next_batch = next_batch.filter([next_batch.requests[0].id])
209

210
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
211
    assert len(generations) == len(next_batch)
212

213
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
214
    assert next_batch is None
215

216
    assert len(generations) == 1
217
    assert generations[0].generated_text.text == "a few weeks"
218
    assert (
219
        generations[0].request_id
220
        == default_multi_requests_seq2seq_lm_batch.requests[0].id
221
    )
222
    assert generations[0].generated_text.generated_tokens == 7
223

224

225
def test_batch_concatenate(
226
    default_seq2seq_lm,
227
    default_seq2seq_lm_batch,
228
    default_multi_requests_seq2seq_lm_batch,
229
):
230
    next_batch_0 = default_seq2seq_lm_batch
231
    _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)
232
    _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)
233

234
    next_batch_1 = default_multi_requests_seq2seq_lm_batch
235
    _, next_batch_1, _ = default_seq2seq_lm.generate_token(next_batch_1)
236

237
    # Copy hidden state because it is removed from the concatenated branches
238
    next_batch_0_encoder_last_hidden_state = next_batch_0.encoder_last_hidden_state
239
    next_batch_1_encoder_last_hidden_state = next_batch_1.encoder_last_hidden_state
240

241
    # Clone past_key_values before concatenating to compare after,
242
    # because they are removed from the concatenated batches
243
    next_batch_0_past_key_values = [
244
        [t.clone() for t in layer] for layer in next_batch_0.past_key_values
245
    ]
246
    next_batch_1_past_key_values = [
247
        [t.clone() for t in layer] for layer in next_batch_1.past_key_values
248
    ]
249

250
    next_batch = Seq2SeqLMBatch.concatenate([next_batch_0, next_batch_1])
251

252
    assert next_batch.batch_id == 0
253

254
    assert torch.equal(
255
        next_batch.decoder_input_ids[0], next_batch_0.decoder_input_ids[0]
256
    )
257
    assert next_batch.all_decoder_input_ids[1][0] == 0
258
    assert next_batch.all_decoder_input_ids[2][0] == 0
259
    assert torch.equal(
260
        next_batch.decoder_input_ids[1:, -2:], next_batch_1.decoder_input_ids
261
    )
262

263
    assert torch.all(next_batch.decoder_attention_mask[0, :3] == 1)
264
    assert torch.all(next_batch.decoder_attention_mask[0, 3:] == 0)
265
    assert torch.all(next_batch.decoder_attention_mask[1:, 0] == 0)
266
    assert torch.all(next_batch.decoder_attention_mask[1:, 1:3] == 1)
267

268
    assert torch.equal(
269
        next_batch.encoder_last_hidden_state[0],
270
        next_batch_0_encoder_last_hidden_state[0, -2:],
271
    )
272
    assert torch.equal(
273
        next_batch.encoder_last_hidden_state[1:],
274
        next_batch_1_encoder_last_hidden_state[:, -2:],
275
    )
276

277
    assert next_batch.input_lengths == [2, 2, 2]
278
    assert next_batch.decoder_input_lengths == [3, 2, 2]
279
    assert next_batch.max_input_length == 2
280
    assert next_batch.max_decoder_input_length == 3
281

282
    assert next_batch.requests[0] == next_batch_0.requests[0]
283
    assert next_batch.requests[1:] == next_batch_1.requests
284

285
    assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
286
    assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
287

288
    assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
289
    assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
290

291
    assert next_batch.past_key_values is not None
292
    assert all(
293
        [p[0].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
294
    )
295
    assert all(
296
        [p[1].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
297
    )
298
    assert all(
299
        [p[2].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
300
    )
301
    assert all(
302
        [p[3].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
303
    )
304

305
    for i, past in enumerate(next_batch.past_key_values):
306
        assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:, :], past[0][0])
307
        assert torch.equal(
308
            next_batch_1_past_key_values[i][0][:, :, -1:, :], past[0][1:, :, -1:, :]
309
        )
310

311
        assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:, :], past[1][0])
312
        assert torch.equal(
313
            next_batch_1_past_key_values[i][1][:, :, -1:, :], past[1][1:, :, -1:, :]
314
        )
315

316
        assert torch.equal(next_batch_0_past_key_values[i][2][0, :, -2:, :], past[2][0])
317
        assert torch.equal(
318
            next_batch_1_past_key_values[i][2][:, :, -2:, :], past[2][1:]
319
        )
320

321
        assert torch.equal(next_batch_0_past_key_values[i][3][0, :, -2:, :], past[3][0])
322
        assert torch.equal(
323
            next_batch_1_past_key_values[i][3][:, :, -2:, :], past[3][1:]
324
        )
325

326
    for _ in range(3):
327
        generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
328
        assert len(generations) == len(next_batch)
329

330
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
331
    assert next_batch is not None
332

333
    assert len(generations) == 3
334
    assert generations[2].generated_text.text == "a few "
335
    assert (
336
        generations[2].request_id
337
        == default_multi_requests_seq2seq_lm_batch.requests[1].id
338
    )
339
    assert generations[2].generated_text.generated_tokens == 5
340

341
    next_batch = next_batch.filter(
342
        [next_batch.requests[0].id, next_batch.requests[1].id]
343
    )
344

345
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
346
    assert next_batch is not None
347

348
    assert len(generations) == 2
349
    assert generations[0].generated_text.text == "a few weeks"
350
    assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id
351
    assert generations[0].generated_text.generated_tokens == 7
352

353
    next_batch = next_batch.filter([next_batch.requests[1].id])
354

355
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
356
    assert next_batch is None
357

358
    assert len(generations) == 1
359
    assert generations[0].generated_text.text == "a few weeks"
360
    assert (
361
        generations[0].request_id
362
        == default_multi_requests_seq2seq_lm_batch.requests[0].id
363
    )
364
    assert generations[0].generated_text.generated_tokens == 7
365

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.