text-generation-inference
364 строки · 12.7 Кб
1import pytest2import torch3
4from copy import copy5
6from transformers import AutoTokenizer7
8from text_generation_server.pb import generate_pb29from text_generation_server.models.seq2seq_lm import Seq2SeqLM, Seq2SeqLMBatch10
11
12@pytest.fixture(scope="session")13def mt0_small_tokenizer():14tokenizer = AutoTokenizer.from_pretrained(15"bigscience/mt0-small", padding_side="left"16)17tokenizer.bos_token_id = 018return tokenizer19
20
21@pytest.fixture(scope="session")22def default_seq2seq_lm():23return Seq2SeqLM("bigscience/mt0-small")24
25
26@pytest.fixture27def default_pb_request(default_pb_parameters, default_pb_stop_parameters):28return generate_pb2.Request(29id=0,30inputs="Test",31prefill_logprobs=True,32truncate=100,33parameters=default_pb_parameters,34stopping_parameters=default_pb_stop_parameters,35)36
37
38@pytest.fixture39def default_pb_batch(default_pb_request):40return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)41
42
43@pytest.fixture44def default_seq2seq_lm_batch(default_pb_batch, mt0_small_tokenizer):45return Seq2SeqLMBatch.from_pb(46default_pb_batch, mt0_small_tokenizer, torch.float32, torch.device("cpu")47)48
49
50@pytest.fixture51def default_multi_requests_seq2seq_lm_batch(default_pb_request, mt0_small_tokenizer):52req_0 = copy(default_pb_request)53req_0.id = 154req_1 = default_pb_request55req_1.id = 256req_1.stopping_parameters.max_new_tokens = 557
58batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)59return Seq2SeqLMBatch.from_pb(60batch_pb, mt0_small_tokenizer, torch.float32, torch.device("cpu")61)62
63
64def test_batch_from_pb(default_pb_batch, default_seq2seq_lm_batch):65batch = default_seq2seq_lm_batch66sequence_length = len(default_seq2seq_lm_batch.input_ids[0])67
68assert batch.batch_id == default_pb_batch.id69assert batch.requests == default_pb_batch.requests70
71assert batch.input_ids.shape == (default_pb_batch.size, sequence_length)72assert batch.input_ids[0][-2] == 426873assert batch.input_ids[0][-1] == 174assert torch.all(batch.input_ids[0][:-2] == 0)75
76assert torch.all(batch.attention_mask[0][-2:] == 1)77assert torch.all(batch.attention_mask[0][:-2] == 0)78
79assert len(batch.decoder_input_ids) == default_pb_batch.size80assert batch.decoder_attention_mask is None81assert batch.encoder_last_hidden_state is None82
83assert batch.past_key_values is None84
85assert batch.input_lengths == [2]86assert batch.decoder_input_lengths == [1]87
88assert len(batch) == default_pb_batch.size89assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)90
91assert batch.max_input_length == batch.input_lengths[0]92assert batch.max_decoder_input_length == batch.decoder_input_lengths[0]93
94
95def test_batch_concatenate_no_prefill(default_seq2seq_lm_batch):96with pytest.raises(ValueError):97Seq2SeqLMBatch.concatenate([default_seq2seq_lm_batch, default_seq2seq_lm_batch])98
99
100def test_seq2seq_lm_batch_type(default_seq2seq_lm):101assert default_seq2seq_lm.batch_type == Seq2SeqLMBatch102
103
104def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch):105sequence_length = len(default_seq2seq_lm_batch.input_ids[0])106generations, next_batch, _ = default_seq2seq_lm.generate_token(107default_seq2seq_lm_batch
108)109
110assert len(generations) == len(next_batch)111assert isinstance(next_batch, Seq2SeqLMBatch)112
113assert next_batch.input_ids is None114assert torch.equal(115next_batch.attention_mask, default_seq2seq_lm_batch.attention_mask116)117assert next_batch.input_lengths == default_seq2seq_lm_batch.input_lengths118assert next_batch.max_input_length == default_seq2seq_lm_batch.max_input_length119assert (120next_batch.next_token_choosers == default_seq2seq_lm_batch.next_token_choosers121)122assert next_batch.stopping_criterias == default_seq2seq_lm_batch.stopping_criterias123
124assert len(next_batch.decoder_input_ids) == len(next_batch)125assert next_batch.all_decoder_input_ids[0][0] == 0126assert next_batch.all_decoder_input_ids[0][1] == 259127assert next_batch.decoder_attention_mask is None128assert next_batch.encoder_last_hidden_state.shape == (1, sequence_length, 512)129
130assert next_batch.decoder_input_lengths == [2]131assert next_batch.max_decoder_input_length == 2132
133assert next_batch.past_key_values is not None134assert all(135[p[0].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]136)137assert all(138[p[1].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]139)140assert all(141[142p[2].shape == (len(next_batch), 6, sequence_length, 64)143for p in next_batch.past_key_values144]145)146assert all(147[148p[3].shape == (len(next_batch), 6, sequence_length, 64)149for p in next_batch.past_key_values150]151)152assert all([generation.generated_text is None for generation in generations])153assert all([len(generation.prefill_tokens) == 1 for generation in generations])154assert all(155[156token_id.item() == 259157for generation in generations158for token_id in generation.tokens.token_ids159]160)161assert all(162[163token_text == " "164for generation in generations165for token_text in generation.tokens.texts166]167)168assert generations[0].request_id == 0169
170
171def test_seq2seq_lm_generate_token_completion(172default_seq2seq_lm, default_seq2seq_lm_batch173):174next_batch = default_seq2seq_lm_batch175for _ in range(6):176generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)177assert len(generations) == len(next_batch)178
179generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)180assert next_batch is None181
182assert len(generations) == 1183assert generations[0].generated_text.text == "a few weeks"184assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id185assert generations[0].generated_text.generated_tokens == 7186
187
188def test_seq2seq_lm_generate_token_completion_multi(189default_seq2seq_lm, default_multi_requests_seq2seq_lm_batch190):191next_batch = default_multi_requests_seq2seq_lm_batch192
193for i in range(4):194generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)195assert len(generations) == len(next_batch)196
197generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)198assert next_batch is not None199
200assert len(generations) == 2201assert generations[1].generated_text.text == "a few "202assert (203generations[1].request_id204== default_multi_requests_seq2seq_lm_batch.requests[1].id205)206assert generations[1].generated_text.generated_tokens == 5207
208next_batch = next_batch.filter([next_batch.requests[0].id])209
210generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)211assert len(generations) == len(next_batch)212
213generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)214assert next_batch is None215
216assert len(generations) == 1217assert generations[0].generated_text.text == "a few weeks"218assert (219generations[0].request_id220== default_multi_requests_seq2seq_lm_batch.requests[0].id221)222assert generations[0].generated_text.generated_tokens == 7223
224
225def test_batch_concatenate(226default_seq2seq_lm,227default_seq2seq_lm_batch,228default_multi_requests_seq2seq_lm_batch,229):230next_batch_0 = default_seq2seq_lm_batch231_, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)232_, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)233
234next_batch_1 = default_multi_requests_seq2seq_lm_batch235_, next_batch_1, _ = default_seq2seq_lm.generate_token(next_batch_1)236
237# Copy hidden state because it is removed from the concatenated branches238next_batch_0_encoder_last_hidden_state = next_batch_0.encoder_last_hidden_state239next_batch_1_encoder_last_hidden_state = next_batch_1.encoder_last_hidden_state240
241# Clone past_key_values before concatenating to compare after,242# because they are removed from the concatenated batches243next_batch_0_past_key_values = [244[t.clone() for t in layer] for layer in next_batch_0.past_key_values245]246next_batch_1_past_key_values = [247[t.clone() for t in layer] for layer in next_batch_1.past_key_values248]249
250next_batch = Seq2SeqLMBatch.concatenate([next_batch_0, next_batch_1])251
252assert next_batch.batch_id == 0253
254assert torch.equal(255next_batch.decoder_input_ids[0], next_batch_0.decoder_input_ids[0]256)257assert next_batch.all_decoder_input_ids[1][0] == 0258assert next_batch.all_decoder_input_ids[2][0] == 0259assert torch.equal(260next_batch.decoder_input_ids[1:, -2:], next_batch_1.decoder_input_ids261)262
263assert torch.all(next_batch.decoder_attention_mask[0, :3] == 1)264assert torch.all(next_batch.decoder_attention_mask[0, 3:] == 0)265assert torch.all(next_batch.decoder_attention_mask[1:, 0] == 0)266assert torch.all(next_batch.decoder_attention_mask[1:, 1:3] == 1)267
268assert torch.equal(269next_batch.encoder_last_hidden_state[0],270next_batch_0_encoder_last_hidden_state[0, -2:],271)272assert torch.equal(273next_batch.encoder_last_hidden_state[1:],274next_batch_1_encoder_last_hidden_state[:, -2:],275)276
277assert next_batch.input_lengths == [2, 2, 2]278assert next_batch.decoder_input_lengths == [3, 2, 2]279assert next_batch.max_input_length == 2280assert next_batch.max_decoder_input_length == 3281
282assert next_batch.requests[0] == next_batch_0.requests[0]283assert next_batch.requests[1:] == next_batch_1.requests284
285assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]286assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers287
288assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]289assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias290
291assert next_batch.past_key_values is not None292assert all(293[p[0].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]294)295assert all(296[p[1].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]297)298assert all(299[p[2].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]300)301assert all(302[p[3].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]303)304
305for i, past in enumerate(next_batch.past_key_values):306assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:, :], past[0][0])307assert torch.equal(308next_batch_1_past_key_values[i][0][:, :, -1:, :], past[0][1:, :, -1:, :]309)310
311assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:, :], past[1][0])312assert torch.equal(313next_batch_1_past_key_values[i][1][:, :, -1:, :], past[1][1:, :, -1:, :]314)315
316assert torch.equal(next_batch_0_past_key_values[i][2][0, :, -2:, :], past[2][0])317assert torch.equal(318next_batch_1_past_key_values[i][2][:, :, -2:, :], past[2][1:]319)320
321assert torch.equal(next_batch_0_past_key_values[i][3][0, :, -2:, :], past[3][0])322assert torch.equal(323next_batch_1_past_key_values[i][3][:, :, -2:, :], past[3][1:]324)325
326for _ in range(3):327generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)328assert len(generations) == len(next_batch)329
330generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)331assert next_batch is not None332
333assert len(generations) == 3334assert generations[2].generated_text.text == "a few "335assert (336generations[2].request_id337== default_multi_requests_seq2seq_lm_batch.requests[1].id338)339assert generations[2].generated_text.generated_tokens == 5340
341next_batch = next_batch.filter(342[next_batch.requests[0].id, next_batch.requests[1].id]343)344
345generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)346assert next_batch is not None347
348assert len(generations) == 2349assert generations[0].generated_text.text == "a few weeks"350assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id351assert generations[0].generated_text.generated_tokens == 7352
353next_batch = next_batch.filter([next_batch.requests[1].id])354
355generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)356assert next_batch is None357
358assert len(generations) == 1359assert generations[0].generated_text.text == "a few weeks"360assert (361generations[0].request_id362== default_multi_requests_seq2seq_lm_batch.requests[0].id363)364assert generations[0].generated_text.generated_tokens == 7365