text-generation-inference
356 строк · 12.6 Кб
1import pytest
2import torch
3
4from copy import copy
5from transformers import AutoTokenizer
6
7from text_generation_server.pb import generate_pb2
8from text_generation_server.models.causal_lm import CausalLMBatch
9from text_generation_server.utils import weight_hub_files, download_weights
10from text_generation_server.models.bloom import BloomCausalLMBatch, BLOOMSharded
11
12
13@pytest.fixture(scope="session")
14def default_bloom():
15model_id = "bigscience/bloom-560m"
16revision = "main"
17filenames = weight_hub_files(model_id, revision, ".safetensors")
18download_weights(filenames, model_id, revision)
19return BLOOMSharded(model_id)
20
21
22@pytest.fixture(scope="session")
23def bloom_560m_tokenizer():
24return AutoTokenizer.from_pretrained("bigscience/bloom-560m", padding_side="left")
25
26
27@pytest.fixture
28def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
29return generate_pb2.Request(
30id=0,
31inputs="Test",
32prefill_logprobs=True,
33truncate=100,
34parameters=default_pb_parameters,
35stopping_parameters=default_pb_stop_parameters,
36)
37
38
39@pytest.fixture
40def default_pb_batch(default_pb_request):
41return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
42
43
44@pytest.fixture
45def default_bloom_batch(default_pb_batch, bloom_560m_tokenizer):
46return BloomCausalLMBatch.from_pb(
47default_pb_batch, bloom_560m_tokenizer, torch.float32, torch.device("cpu")
48)
49
50
51@pytest.fixture
52def default_multi_requests_bloom_batch(default_pb_request, bloom_560m_tokenizer):
53req_0 = copy(default_pb_request)
54req_0.id = 1
55req_1 = default_pb_request
56req_1.id = 2
57req_1.stopping_parameters.max_new_tokens = 5
58
59batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)
60return BloomCausalLMBatch.from_pb(
61batch_pb, bloom_560m_tokenizer, torch.float32, torch.device("cpu")
62)
63
64
65def test_batch_from_pb(default_pb_batch, default_bloom_batch):
66batch = default_bloom_batch
67
68assert batch.batch_id == default_pb_batch.id
69assert batch.requests == default_pb_batch.requests
70
71assert len(batch.input_ids) == default_pb_batch.size
72assert batch.input_ids[0][-1] == 10264
73assert torch.all(batch.input_ids[0][:-1] == 3)
74
75assert batch.attention_mask[0][0] == 1
76assert torch.all(batch.attention_mask[0][1:] == 0)
77
78assert batch.past_key_values is None
79
80assert all(
81[
82torch.equal(input_ids, all_input_ids[:, 0])
83for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
84]
85)
86
87assert batch.input_lengths == [1]
88
89assert len(batch) == default_pb_batch.size
90assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
91
92assert batch.max_input_length == batch.input_lengths[0]
93
94
95def test_batch_concatenate_no_prefill(default_bloom_batch):
96with pytest.raises(ValueError):
97BloomCausalLMBatch.concatenate([default_bloom_batch, default_bloom_batch])
98
99
100def test_causal_lm_batch_type(default_bloom):
101assert default_bloom.batch_type == BloomCausalLMBatch
102
103
104def test_causal_lm_generate_token(default_bloom, default_bloom_batch):
105sequence_length = len(default_bloom_batch.all_input_ids[0])
106generations, next_batch, _ = default_bloom.generate_token(default_bloom_batch)
107
108assert len(generations) == len(default_bloom_batch)
109assert isinstance(next_batch, CausalLMBatch)
110assert not next_batch.keys_head_dim_last
111
112assert len(next_batch.all_input_ids) == len(next_batch)
113assert len(next_batch.all_input_ids[0]) == sequence_length + 1
114assert len(next_batch.attention_mask[0]) == 11
115assert torch.all(next_batch.all_input_ids[0][-2:] == 10264)
116assert torch.all(next_batch.all_input_ids[0][:-2] == 3)
117
118assert torch.all(next_batch.attention_mask[0][:2] == 1)
119assert torch.all(next_batch.attention_mask[0][2:] == 0)
120
121assert next_batch.input_ids.shape == (len(next_batch), 1)
122assert next_batch.input_ids[0, 0] == 10264
123
124assert next_batch.input_lengths == [2]
125assert next_batch.max_input_length == next_batch.input_lengths[0]
126
127assert next_batch.past_key_values is not None
128assert all(
129[p[0].shape == (16, 64, sequence_length) for p in next_batch.past_key_values]
130)
131assert all(
132[p[1].shape == (16, sequence_length, 64) for p in next_batch.past_key_values]
133)
134assert all([generation.generated_text is None for generation in generations])
135assert all([len(generation.prefill_tokens) == 1 for generation in generations])
136assert all(
137[
138token_id.item() == 10264
139for generation in generations
140for token_id in generation.tokens.token_ids
141]
142)
143assert all(
144[
145token_text == "Test"
146for generation in generations
147for token_text in generation.tokens.texts
148]
149)
150assert generations[0].request_id == 0
151
152
153def test_causal_lm_generate_token_completion(default_bloom, default_bloom_batch):
154next_batch = default_bloom_batch
155for _ in range(default_bloom_batch.stopping_criterias[0].max_new_tokens - 1):
156generations, next_batch, _ = default_bloom.generate_token(next_batch)
157assert len(generations) == len(default_bloom_batch)
158
159generations, next_batch, _ = default_bloom.generate_token(next_batch)
160assert next_batch is None
161
162assert len(generations) == 1
163assert (
164generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
165)
166assert generations[0].request_id == default_bloom_batch.requests[0].id
167assert (
168generations[0].generated_text.generated_tokens
169== default_bloom_batch.stopping_criterias[0].max_new_tokens
170)
171
172
173def test_causal_lm_generate_token_completion_multi(
174default_bloom, default_multi_requests_bloom_batch
175):
176next_batch = default_multi_requests_bloom_batch
177
178for i in range(
179default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 1
180):
181generations, next_batch, _ = default_bloom.generate_token(next_batch)
182assert len(generations) == len(default_multi_requests_bloom_batch)
183
184generations, next_batch, _ = default_bloom.generate_token(next_batch)
185assert next_batch is not None
186
187assert len(generations) == 2
188assert generations[1].generated_text.text == "TestTestTestTestTest"
189assert (
190generations[1].request_id == default_multi_requests_bloom_batch.requests[1].id
191)
192assert (
193generations[1].generated_text.generated_tokens
194== default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
195)
196# Copy stopping_criterias before filtering
197stopping_criterias = default_multi_requests_bloom_batch.stopping_criterias.copy()
198
199next_batch = next_batch.filter([next_batch.requests[0].id])
200
201for _ in range(
202stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
203):
204generations, next_batch, _ = default_bloom.generate_token(next_batch)
205assert len(generations) == len(next_batch)
206
207generations, next_batch, _ = default_bloom.generate_token(next_batch)
208assert next_batch is None
209
210assert len(generations) == 1
211assert (
212generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
213)
214assert (
215generations[0].request_id == default_multi_requests_bloom_batch.requests[0].id
216)
217assert (
218generations[0].generated_text.generated_tokens
219== default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
220)
221
222
223def test_batch_concatenate(
224default_bloom, default_bloom_batch, default_multi_requests_bloom_batch
225):
226next_batch_0 = default_bloom_batch
227_, next_batch_0, _ = default_bloom.generate_token(next_batch_0)
228_, next_batch_0, _ = default_bloom.generate_token(next_batch_0)
229
230next_batch_1 = default_multi_requests_bloom_batch
231_, next_batch_1, _ = default_bloom.generate_token(next_batch_1)
232
233# Clone past_key_values before concatenating to compare after,
234# because they are removed from the concatenated batches
235next_batch_0_past_key_values = [
236(k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
237]
238next_batch_1_past_key_values = [
239(k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
240]
241
242next_batch = BloomCausalLMBatch.concatenate([next_batch_0, next_batch_1])
243
244assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
245assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
246assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])
247
248assert torch.all(
249next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
250)
251assert torch.all(
252next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
253)
254assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
255
256assert next_batch.batch_id == 0
257assert torch.all(next_batch.input_ids == 10264)
258
259assert next_batch.input_lengths == [3, 2, 2]
260assert next_batch.max_input_length == 3
261
262assert next_batch.requests[0] == next_batch_0.requests[0]
263assert next_batch.requests[1:] == next_batch_1.requests
264
265assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
266assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
267
268assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
269assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
270
271assert next_batch.past_key_values is not None
272assert all([p[0].shape == (3, 16, 64, 2) for p in next_batch.past_key_values])
273assert all([p[1].shape == (3, 16, 2, 64) for p in next_batch.past_key_values])
274
275for i, past in enumerate(next_batch.past_key_values):
276assert torch.equal(next_batch_0_past_key_values[i][0][:, :, -2:], past[0][0])
277assert torch.equal(
278next_batch_1_past_key_values[i][0][:, :, -1:],
279past[0][1:, :, :, -1].reshape(-1, 64, 1),
280)
281
282assert torch.equal(next_batch_0_past_key_values[i][1][:, -2:, :], past[1][0])
283assert torch.equal(
284next_batch_1_past_key_values[i][1][:, -1:, :],
285past[1][1:, :, -1, :].reshape(-1, 1, 64),
286)
287
288for _ in range(
289default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 2
290):
291generations, next_batch, _ = default_bloom.generate_token(next_batch)
292assert len(generations) == len(next_batch)
293
294generations, next_batch, _ = default_bloom.generate_token(next_batch)
295assert next_batch is not None
296
297assert len(generations) == 3
298assert generations[2].generated_text.text == "TestTestTestTestTest"
299assert (
300generations[2].request_id == default_multi_requests_bloom_batch.requests[1].id
301)
302assert (
303generations[2].generated_text.generated_tokens
304== default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
305)
306
307next_batch = next_batch.filter(
308[next_batch.requests[0].id, next_batch.requests[1].id]
309)
310
311for _ in range(
312default_bloom_batch.stopping_criterias[0].max_new_tokens
313- default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
314- 2
315):
316generations, next_batch, _ = default_bloom.generate_token(next_batch)
317assert len(generations) == len(next_batch)
318
319generations, next_batch, _ = default_bloom.generate_token(next_batch)
320assert next_batch is not None
321
322assert len(generations) == 2
323assert (
324generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
325)
326assert generations[0].request_id == default_bloom_batch.requests[0].id
327assert (
328generations[0].generated_text.generated_tokens
329== default_bloom_batch.stopping_criterias[0].max_new_tokens
330)
331
332next_batch = next_batch.filter([next_batch.requests[1].id])
333
334for _ in range(
335default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
336- default_bloom_batch.stopping_criterias[0].max_new_tokens
337- default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
338- 4
339):
340generations, next_batch, _ = default_bloom.generate_token(next_batch)
341assert len(generations) == len(next_batch)
342
343generations, next_batch, _ = default_bloom.generate_token(next_batch)
344assert next_batch is None
345
346assert len(generations) == 1
347assert (
348generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
349)
350assert (
351generations[0].request_id == default_multi_requests_bloom_batch.requests[0].id
352)
353assert (
354generations[0].generated_text.generated_tokens
355== default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
356)
357