text-generation-inference
353 строки · 12.4 Кб
1import pytest
2import torch
3
4from copy import copy
5from transformers import AutoTokenizer
6
7from text_generation_server.pb import generate_pb2
8from text_generation_server.models.causal_lm import CausalLM, CausalLMBatch
9
10
11@pytest.fixture(scope="session")
12def default_causal_lm():
13return CausalLM("gpt2")
14
15
16@pytest.fixture(scope="session")
17def gpt2_tokenizer():
18tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left")
19tokenizer.pad_token_id = 50256
20return tokenizer
21
22
23@pytest.fixture
24def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
25return generate_pb2.Request(
26id=0,
27inputs="Test",
28prefill_logprobs=True,
29truncate=100,
30parameters=default_pb_parameters,
31stopping_parameters=default_pb_stop_parameters,
32)
33
34
35@pytest.fixture
36def default_pb_batch(default_pb_request):
37return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
38
39
40@pytest.fixture
41def default_causal_lm_batch(default_pb_batch, gpt2_tokenizer):
42return CausalLMBatch.from_pb(
43default_pb_batch, gpt2_tokenizer, torch.float32, torch.device("cpu")
44)
45
46
47@pytest.fixture
48def default_multi_requests_causal_lm_batch(default_pb_request, gpt2_tokenizer):
49req_0 = copy(default_pb_request)
50req_0.id = 1
51req_1 = default_pb_request
52req_1.id = 2
53req_1.stopping_parameters.max_new_tokens = 5
54
55batch_pb = generate_pb2.Batch(id=1, requests=[req_0, req_1], size=2)
56return CausalLMBatch.from_pb(
57batch_pb, gpt2_tokenizer, torch.float32, torch.device("cpu")
58)
59
60
61def test_batch_from_pb(default_pb_batch, default_causal_lm_batch):
62batch = default_causal_lm_batch
63
64assert batch.batch_id == default_pb_batch.id
65assert batch.requests == default_pb_batch.requests
66
67assert len(batch.input_ids) == default_pb_batch.size
68assert batch.input_ids[0][-1] == 14402
69assert torch.all(batch.input_ids[0][:-1] == 50256)
70
71assert batch.attention_mask[0, 0] == 1
72assert torch.all(batch.attention_mask[0, 1:] == 0)
73
74assert batch.past_key_values is None
75
76assert all(
77[
78torch.equal(input_ids, all_input_ids[:, 0])
79for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
80]
81)
82
83assert batch.input_lengths == [1]
84
85assert len(batch) == default_pb_batch.size
86assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
87
88assert batch.max_input_length == batch.input_lengths[0]
89
90
91def test_batch_concatenate_no_prefill(default_causal_lm_batch):
92with pytest.raises(ValueError):
93CausalLMBatch.concatenate([default_causal_lm_batch, default_causal_lm_batch])
94
95
96def test_causal_lm_batch_type(default_causal_lm):
97assert default_causal_lm.batch_type == CausalLMBatch
98
99
100def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch):
101sequence_length = len(default_causal_lm_batch.all_input_ids[0])
102generations, next_batch, _ = default_causal_lm.generate_token(
103default_causal_lm_batch
104)
105
106assert len(generations) == len(next_batch)
107assert isinstance(next_batch, CausalLMBatch)
108
109assert len(next_batch.all_input_ids) == len(next_batch)
110assert len(next_batch.all_input_ids[0]) == sequence_length + 1
111assert len(next_batch.attention_mask[0]) == 11
112assert next_batch.all_input_ids[0][-1] == 13
113assert next_batch.all_input_ids[0][-2] == 14402
114assert torch.all(next_batch.all_input_ids[0][:-2] == 50256)
115
116assert torch.all(next_batch.attention_mask[0][0:2] == 1)
117assert torch.all(next_batch.attention_mask[0][2:] == 0)
118
119assert next_batch.input_ids.shape == (len(next_batch), 1)
120assert next_batch.input_ids[0, 0] == 13
121
122assert next_batch.input_lengths == [2]
123assert next_batch.max_input_length == next_batch.input_lengths[0]
124
125assert next_batch.past_key_values is not None
126assert all(
127[p[0].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
128)
129assert all(
130[p[1].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
131)
132assert all([generation.generated_text is None for generation in generations])
133assert all([len(generation.prefill_tokens) == 1 for generation in generations])
134assert all(
135[
136token_id.item() == 13
137for generation in generations
138for token_id in generation.tokens.token_ids
139]
140)
141assert all(
142[
143token_text == "."
144for generation in generations
145for token_text in generation.tokens.texts
146]
147)
148assert generations[0].request_id == 0
149
150
151def test_causal_lm_generate_token_completion(
152default_causal_lm, default_causal_lm_batch
153):
154next_batch = default_causal_lm_batch
155for _ in range(default_causal_lm_batch.stopping_criterias[0].max_new_tokens - 1):
156generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
157assert len(generations) == len(next_batch)
158
159generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
160assert next_batch is None
161
162assert len(generations) == 1
163assert generations[0].generated_text.text == ".java:784) at net.minecraft."
164assert generations[0].request_id == default_causal_lm_batch.requests[0].id
165assert (
166generations[0].generated_text.generated_tokens
167== default_causal_lm_batch.stopping_criterias[0].max_new_tokens
168)
169
170
171def test_causal_lm_generate_token_completion_multi(
172default_causal_lm, default_multi_requests_causal_lm_batch
173):
174next_batch = default_multi_requests_causal_lm_batch
175
176for i in range(
177default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 1
178):
179generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
180assert len(generations) == len(next_batch)
181
182generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
183assert next_batch is not None
184
185assert len(generations) == 2
186assert generations[1].generated_text.text == ".java:784)"
187assert (
188generations[1].request_id
189== default_multi_requests_causal_lm_batch.requests[1].id
190)
191assert (
192generations[1].generated_text.generated_tokens
193== default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
194)
195# Copy stopping_criterias before filtering
196stopping_criterias = (
197default_multi_requests_causal_lm_batch.stopping_criterias.copy()
198)
199
200next_batch = next_batch.filter([next_batch.requests[0].id])
201
202for _ in range(
203stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
204):
205generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
206assert len(generations) == len(next_batch)
207
208generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
209assert next_batch is None
210
211assert len(generations) == 1
212assert generations[0].generated_text.text == ".java:784) at net.minecraft."
213assert (
214generations[0].request_id
215== default_multi_requests_causal_lm_batch.requests[0].id
216)
217assert (
218generations[0].generated_text.generated_tokens
219== default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
220)
221
222
223def test_batch_concatenate(
224default_causal_lm, default_causal_lm_batch, default_multi_requests_causal_lm_batch
225):
226next_batch_0 = default_causal_lm_batch
227_, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
228_, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
229
230next_batch_1 = default_multi_requests_causal_lm_batch
231_, next_batch_1, _ = default_causal_lm.generate_token(next_batch_1)
232
233# Clone past_key_values before concatenating to compare after,
234# because they are removed from the concatenated batches
235next_batch_0_past_key_values = [
236(k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
237]
238next_batch_1_past_key_values = [
239(k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
240]
241
242next_batch = CausalLMBatch.concatenate([next_batch_0, next_batch_1])
243
244assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
245assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
246assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])
247
248assert torch.all(
249next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
250)
251assert torch.all(
252next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
253)
254assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
255
256assert next_batch.batch_id == 0
257assert next_batch.input_ids[0, 0] == 12355
258assert torch.all(next_batch.input_ids[1:] == 13)
259
260assert next_batch.input_lengths == [3, 2, 2]
261assert next_batch.max_input_length == 3
262
263assert next_batch.requests[0] == next_batch_0.requests[0]
264assert next_batch.requests[1:] == next_batch_1.requests
265
266assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
267assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
268
269assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
270assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
271
272assert next_batch.past_key_values is not None
273assert all([p[0].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
274assert all([p[1].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
275
276for i, past in enumerate(next_batch.past_key_values):
277assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:], past[0][0])
278assert torch.equal(
279next_batch_1_past_key_values[i][0][:, :, -1:], past[0][1:, :, -1:, :]
280)
281
282assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:], past[1][0])
283assert torch.equal(
284next_batch_1_past_key_values[i][1][:, :, -1:], past[1][1:, :, -1:, :]
285)
286
287for _ in range(
288default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2
289):
290generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
291assert len(generations) == len(next_batch)
292
293generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
294assert next_batch is not None
295
296assert len(generations) == 3
297assert generations[2].generated_text.text == ".java:784)"
298assert (
299generations[2].request_id
300== default_multi_requests_causal_lm_batch.requests[1].id
301)
302assert (
303generations[2].generated_text.generated_tokens
304== default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
305)
306
307next_batch = next_batch.filter(
308[next_batch.requests[0].id, next_batch.requests[1].id]
309)
310
311for _ in range(
312default_causal_lm_batch.stopping_criterias[0].max_new_tokens
313- default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
314- 2
315):
316generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
317assert len(generations) == len(next_batch)
318
319generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
320assert next_batch is not None
321
322assert len(generations) == 2
323assert generations[0].generated_text.text == ".java:784) at net.minecraft."
324assert generations[0].request_id == default_causal_lm_batch.requests[0].id
325assert (
326generations[0].generated_text.generated_tokens
327== default_causal_lm_batch.stopping_criterias[0].max_new_tokens
328)
329
330next_batch = next_batch.filter([next_batch.requests[1].id])
331
332for _ in range(
333default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
334- default_causal_lm_batch.stopping_criterias[0].max_new_tokens
335- default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
336- 4
337):
338generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
339assert len(generations) == len(next_batch)
340
341generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
342assert next_batch is None
343
344assert len(generations) == 1
345assert generations[0].generated_text.text == ".java:784) at net.minecraft."
346assert (
347generations[0].request_id
348== default_multi_requests_causal_lm_batch.requests[0].id
349)
350assert (
351generations[0].generated_text.generated_tokens
352== default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
353)
354