transformers
675 строк · 26.6 Кб
1# coding=utf-8
2# Copyright 2022, The HuggingFace Inc. team. All rights reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15""" Testing suite for the PyTorch PLBART model. """
16
17
18import copy
19import tempfile
20import unittest
21
22from transformers import PLBartConfig, is_torch_available
23from transformers.testing_utils import (
24require_sentencepiece,
25require_tokenizers,
26require_torch,
27require_torch_fp16,
28slow,
29torch_device,
30)
31from transformers.utils import cached_property
32
33from ...generation.test_utils import GenerationTesterMixin
34from ...test_configuration_common import ConfigTester
35from ...test_modeling_common import ModelTesterMixin, ids_tensor
36from ...test_pipeline_mixin import PipelineTesterMixin
37
38
39if is_torch_available():
40import torch
41
42from transformers import (
43AutoTokenizer,
44PLBartForCausalLM,
45PLBartForConditionalGeneration,
46PLBartForSequenceClassification,
47PLBartModel,
48)
49from transformers.models.plbart.modeling_plbart import PLBartDecoder, PLBartEncoder
50
51
52def prepare_plbart_inputs_dict(
53config,
54input_ids,
55decoder_input_ids,
56attention_mask=None,
57decoder_attention_mask=None,
58head_mask=None,
59decoder_head_mask=None,
60cross_attn_head_mask=None,
61):
62if attention_mask is None:
63attention_mask = input_ids.ne(config.pad_token_id)
64if decoder_attention_mask is None:
65decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
66if head_mask is None:
67head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
68if decoder_head_mask is None:
69decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
70if cross_attn_head_mask is None:
71cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
72return {
73"input_ids": input_ids,
74"decoder_input_ids": decoder_input_ids,
75"attention_mask": attention_mask,
76"decoder_attention_mask": attention_mask,
77"head_mask": head_mask,
78"decoder_head_mask": decoder_head_mask,
79"cross_attn_head_mask": cross_attn_head_mask,
80}
81
82
83class PLBartModelTester:
84def __init__(
85self,
86parent,
87batch_size=13,
88seq_length=7,
89is_training=True,
90use_labels=False,
91vocab_size=99,
92hidden_size=16,
93num_hidden_layers=2,
94num_attention_heads=4,
95intermediate_size=4,
96hidden_act="gelu",
97hidden_dropout_prob=0.1,
98attention_probs_dropout_prob=0.1,
99max_position_embeddings=100,
100eos_token_id=2,
101pad_token_id=1,
102bos_token_id=0,
103):
104self.parent = parent
105self.batch_size = batch_size
106self.seq_length = seq_length
107self.is_training = is_training
108self.use_labels = use_labels
109self.vocab_size = vocab_size
110self.hidden_size = hidden_size
111self.num_hidden_layers = num_hidden_layers
112self.num_attention_heads = num_attention_heads
113self.intermediate_size = intermediate_size
114self.hidden_act = hidden_act
115self.hidden_dropout_prob = hidden_dropout_prob
116self.attention_probs_dropout_prob = attention_probs_dropout_prob
117self.max_position_embeddings = max_position_embeddings
118self.eos_token_id = eos_token_id
119self.pad_token_id = pad_token_id
120self.bos_token_id = bos_token_id
121
122def prepare_config_and_inputs(self):
123input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
124input_ids = input_ids.clamp(3)
125input_ids[:, -1] = self.eos_token_id # Eos Token
126
127decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
128
129config = self.get_config()
130inputs_dict = prepare_plbart_inputs_dict(config, input_ids, decoder_input_ids)
131return config, inputs_dict
132
133def get_config(self):
134return PLBartConfig(
135vocab_size=self.vocab_size,
136d_model=self.hidden_size,
137encoder_layers=self.num_hidden_layers,
138decoder_layers=self.num_hidden_layers,
139encoder_attention_heads=self.num_attention_heads,
140decoder_attention_heads=self.num_attention_heads,
141encoder_ffn_dim=self.intermediate_size,
142decoder_ffn_dim=self.intermediate_size,
143dropout=self.hidden_dropout_prob,
144attention_dropout=self.attention_probs_dropout_prob,
145max_position_embeddings=self.max_position_embeddings,
146eos_token_id=self.eos_token_id,
147bos_token_id=self.bos_token_id,
148pad_token_id=self.pad_token_id,
149)
150
151def prepare_config_and_inputs_for_common(self):
152config, inputs_dict = self.prepare_config_and_inputs()
153return config, inputs_dict
154
155def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
156model = PLBartModel(config=config).get_decoder().to(torch_device).eval()
157input_ids = inputs_dict["input_ids"]
158attention_mask = inputs_dict["attention_mask"]
159head_mask = inputs_dict["head_mask"]
160
161# first forward pass
162outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
163
164output, past_key_values = outputs.to_tuple()
165
166# create hypothetical multiple next token and extent to next_input_ids
167next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
168next_attn_mask = ids_tensor((self.batch_size, 3), 2)
169
170# append to next input_ids and
171next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
172next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
173
174output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
175output_with_past_key_values = model(
176next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values
177)
178output_from_past = output_with_past_key_values["last_hidden_state"]
179
180# select random slice
181random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
182output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
183output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
184
185self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
186
187# test that outputs are equal for slice
188self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
189
190def check_encoder_decoder_model_standalone(self, config, inputs_dict):
191model = PLBartModel(config=config).to(torch_device).eval()
192outputs = model(**inputs_dict)
193
194encoder_last_hidden_state = outputs.encoder_last_hidden_state
195last_hidden_state = outputs.last_hidden_state
196
197with tempfile.TemporaryDirectory() as tmpdirname:
198encoder = model.get_encoder()
199encoder.save_pretrained(tmpdirname)
200encoder = PLBartEncoder.from_pretrained(tmpdirname).to(torch_device)
201
202encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
2030
204]
205
206self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
207
208with tempfile.TemporaryDirectory() as tmpdirname:
209decoder = model.get_decoder()
210decoder.save_pretrained(tmpdirname)
211decoder = PLBartDecoder.from_pretrained(tmpdirname).to(torch_device)
212
213last_hidden_state_2 = decoder(
214input_ids=inputs_dict["decoder_input_ids"],
215attention_mask=inputs_dict["decoder_attention_mask"],
216encoder_hidden_states=encoder_last_hidden_state,
217encoder_attention_mask=inputs_dict["attention_mask"],
218)[0]
219
220self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
221
222
223@require_torch
224class PLBartModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
225all_model_classes = (
226(PLBartModel, PLBartForConditionalGeneration, PLBartForSequenceClassification) if is_torch_available() else ()
227)
228all_generative_model_classes = (PLBartForConditionalGeneration,) if is_torch_available() else ()
229pipeline_model_mapping = (
230{
231"conversational": PLBartForConditionalGeneration,
232"feature-extraction": PLBartModel,
233"summarization": PLBartForConditionalGeneration,
234"text-classification": PLBartForSequenceClassification,
235"text-generation": PLBartForCausalLM,
236"text2text-generation": PLBartForConditionalGeneration,
237"translation": PLBartForConditionalGeneration,
238"zero-shot": PLBartForSequenceClassification,
239}
240if is_torch_available()
241else {}
242)
243is_encoder_decoder = True
244fx_compatible = False # Fix me Michael
245test_pruning = False
246test_missing_keys = False
247
248# TODO: Fix the failed tests
249def is_pipeline_test_to_skip(
250self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
251):
252if pipeline_test_casse_name == "TranslationPipelineTests":
253# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
254# `PLBartConfig` was never used in pipeline tests: cannot create a simple tokenizer.
255return True
256
257return False
258
259def setUp(self):
260self.model_tester = PLBartModelTester(self)
261self.config_tester = ConfigTester(self, config_class=PLBartConfig)
262
263def test_config(self):
264self.config_tester.run_common_tests()
265
266def test_save_load_strict(self):
267config, inputs_dict = self.model_tester.prepare_config_and_inputs()
268for model_class in self.all_model_classes:
269model = model_class(config)
270
271with tempfile.TemporaryDirectory() as tmpdirname:
272model.save_pretrained(tmpdirname)
273model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
274self.assertEqual(info["missing_keys"], [])
275
276def test_decoder_model_past_with_large_inputs(self):
277config_and_inputs = self.model_tester.prepare_config_and_inputs()
278self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
279
280def test_encoder_decoder_model_standalone(self):
281config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
282self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
283
284# PLBartForSequenceClassification does not support inputs_embeds
285def test_inputs_embeds(self):
286config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
287
288for model_class in (PLBartModel, PLBartForConditionalGeneration):
289model = model_class(config)
290model.to(torch_device)
291model.eval()
292
293inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
294
295if not self.is_encoder_decoder:
296input_ids = inputs["input_ids"]
297del inputs["input_ids"]
298else:
299encoder_input_ids = inputs["input_ids"]
300decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
301del inputs["input_ids"]
302inputs.pop("decoder_input_ids", None)
303
304wte = model.get_input_embeddings()
305if not self.is_encoder_decoder:
306inputs["inputs_embeds"] = wte(input_ids)
307else:
308inputs["inputs_embeds"] = wte(encoder_input_ids)
309inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
310
311with torch.no_grad():
312model(**inputs)[0]
313
314@require_torch_fp16
315def test_generate_fp16(self):
316config, input_dict = self.model_tester.prepare_config_and_inputs()
317input_ids = input_dict["input_ids"]
318attention_mask = input_ids.ne(1).to(torch_device)
319model = PLBartForConditionalGeneration(config).eval().to(torch_device)
320model.half()
321model.generate(input_ids, attention_mask=attention_mask)
322model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
323
324@unittest.skip("Failing since #26752")
325def test_sample_generate(self):
326pass
327
328
329def assert_tensors_close(a, b, atol=1e-12, prefix=""):
330"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
331if a is None and b is None:
332return True
333try:
334if torch.allclose(a, b, atol=atol):
335return True
336raise
337except Exception:
338pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
339if a.numel() > 100:
340msg = f"tensor values are {pct_different:.1%} percent different."
341else:
342msg = f"{a} != {b}"
343if prefix:
344msg = prefix + ": " + msg
345raise AssertionError(msg)
346
347
348def _long_tensor(tok_lst):
349return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
350
351
352@require_torch
353@require_sentencepiece
354@require_tokenizers
355class AbstractSeq2SeqIntegrationTest(unittest.TestCase):
356maxDiff = 1000 # longer string compare tracebacks
357checkpoint_name = None
358
359@classmethod
360def setUpClass(cls):
361cls.tokenizer = AutoTokenizer.from_pretrained(cls.checkpoint_name, use_fast=False)
362return cls
363
364@cached_property
365def model(self):
366"""Only load the model if needed."""
367model = PLBartForConditionalGeneration.from_pretrained(self.checkpoint_name).to(torch_device)
368if "cuda" in torch_device:
369model = model.half()
370return model
371
372
373@require_torch
374@require_sentencepiece
375@require_tokenizers
376class PLBartJavaCsIntegrationTest(AbstractSeq2SeqIntegrationTest):
377checkpoint_name = "uclanlp/plbart-java-cs"
378src_text = [
379"public int maximum(int a, int b, int c){return Math.max(a, Math.max(b, c));}",
380"public int product(int a, int b, int c){return a*b*c;}",
381]
382tgt_text = [
383"public int maximum(int a, int b, int c){return Math.Max(",
384"public int Product(int a, int b, int c){return a * b *",
385]
386
387@slow
388def test_java_cs_generate_one(self):
389batch = self.tokenizer(
390["public int maximum(int a, int b, int c){return Math.max(a, Math.max(b, c));}"], return_tensors="pt"
391)
392batch = batch.to(torch_device)
393translated_tokens = self.model.generate(**batch)
394decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
395self.assertEqual(self.tgt_text[0], decoded[0])
396# self.assertEqual(self.tgt_text[1], decoded[1])
397
398@slow
399def test_java_cs_generate_batch(self):
400batch = self.tokenizer(self.src_text, return_tensors="pt", padding=True, truncation=True)
401batch = batch.to(torch_device)
402translated_tokens = self.model.generate(**batch)
403decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
404assert self.tgt_text == decoded
405
406def test_plbart_java_cs_config(self):
407plbart_models = ["uclanlp/plbart-java-cs"]
408expected = {"scale_embedding": True}
409for name in plbart_models:
410config = PLBartConfig.from_pretrained(name)
411for k, v in expected.items():
412try:
413self.assertEqual(v, getattr(config, k))
414except AssertionError as e:
415e.args += (name, k)
416raise
417
418def test_plbart_fast_forward(self):
419config = PLBartConfig(
420vocab_size=99,
421d_model=24,
422encoder_layers=2,
423decoder_layers=2,
424encoder_attention_heads=2,
425decoder_attention_heads=2,
426encoder_ffn_dim=32,
427decoder_ffn_dim=32,
428max_position_embeddings=48,
429add_final_layer_norm=True,
430)
431lm_model = PLBartForConditionalGeneration(config).to(torch_device)
432context = torch.tensor(
433[[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long
434)
435summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long)
436result = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
437expected_shape = (*summary.shape, config.vocab_size)
438self.assertEqual(result.logits.shape, expected_shape)
439
440
441@require_torch
442@require_sentencepiece
443@require_tokenizers
444class PLBartBaseIntegrationTest(AbstractSeq2SeqIntegrationTest):
445checkpoint_name = "uclanlp/plbart-base"
446src_text = ["Is 0 the first Fibonacci number ?", "Find the sum of all prime numbers ."]
447tgt_text = ["0 the first Fibonacci number?", "the sum of all prime numbers.......... the the"]
448
449def test_base_generate(self):
450inputs = self.tokenizer([self.src_text[0]], return_tensors="pt").to(torch_device)
451src_lan = self.tokenizer._convert_lang_code_special_format("en_XX")
452translated_tokens = self.model.generate(
453input_ids=inputs["input_ids"].to(torch_device),
454decoder_start_token_id=self.tokenizer.lang_code_to_id[src_lan],
455)
456decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
457self.assertEqual(self.tgt_text[0], decoded[0])
458
459@slow
460def test_fill_mask(self):
461inputs = self.tokenizer(["Is 0 the <mask> Fibonacci <mask> ?"], return_tensors="pt").to(torch_device)
462src_lan = self.tokenizer._convert_lang_code_special_format("en_XX")
463outputs = self.model.generate(
464inputs["input_ids"], decoder_start_token_id=self.tokenizer.lang_code_to_id[src_lan], num_beams=1
465)
466prediction: str = self.tokenizer.batch_decode(
467outputs, clean_up_tokenization_spaces=True, skip_special_tokens=True
468)[0]
469self.assertEqual(prediction, "0 0 the 0 the 0 the 0 the 0 the 0 the 0 the 0 the")
470
471
472class PLBartStandaloneDecoderModelTester:
473def __init__(
474self,
475parent,
476vocab_size=99,
477batch_size=13,
478d_model=16,
479decoder_seq_length=7,
480is_training=True,
481is_decoder=True,
482use_attention_mask=True,
483use_cache=False,
484use_labels=True,
485decoder_start_token_id=2,
486decoder_ffn_dim=32,
487decoder_layers=2,
488encoder_attention_heads=4,
489decoder_attention_heads=4,
490max_position_embeddings=30,
491is_encoder_decoder=False,
492pad_token_id=0,
493bos_token_id=1,
494eos_token_id=2,
495scope=None,
496):
497self.parent = parent
498self.batch_size = batch_size
499self.decoder_seq_length = decoder_seq_length
500# For common tests
501self.seq_length = self.decoder_seq_length
502self.is_training = is_training
503self.use_attention_mask = use_attention_mask
504self.use_labels = use_labels
505
506self.vocab_size = vocab_size
507self.d_model = d_model
508self.hidden_size = d_model
509self.num_hidden_layers = decoder_layers
510self.decoder_layers = decoder_layers
511self.decoder_ffn_dim = decoder_ffn_dim
512self.encoder_attention_heads = encoder_attention_heads
513self.decoder_attention_heads = decoder_attention_heads
514self.num_attention_heads = decoder_attention_heads
515self.eos_token_id = eos_token_id
516self.bos_token_id = bos_token_id
517self.pad_token_id = pad_token_id
518self.decoder_start_token_id = decoder_start_token_id
519self.use_cache = use_cache
520self.max_position_embeddings = max_position_embeddings
521self.is_encoder_decoder = is_encoder_decoder
522
523self.scope = None
524self.decoder_key_length = decoder_seq_length
525self.base_model_out_len = 2
526self.decoder_attention_idx = 1
527
528def prepare_config_and_inputs(self):
529input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
530
531attention_mask = None
532if self.use_attention_mask:
533attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
534
535lm_labels = None
536if self.use_labels:
537lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
538
539config = PLBartConfig(
540vocab_size=self.vocab_size,
541d_model=self.d_model,
542decoder_layers=self.decoder_layers,
543decoder_ffn_dim=self.decoder_ffn_dim,
544encoder_attention_heads=self.encoder_attention_heads,
545decoder_attention_heads=self.decoder_attention_heads,
546eos_token_id=self.eos_token_id,
547bos_token_id=self.bos_token_id,
548use_cache=self.use_cache,
549pad_token_id=self.pad_token_id,
550decoder_start_token_id=self.decoder_start_token_id,
551max_position_embeddings=self.max_position_embeddings,
552is_encoder_decoder=self.is_encoder_decoder,
553)
554
555return (config, input_ids, attention_mask, lm_labels)
556
557def create_and_check_decoder_model_past(
558self,
559config,
560input_ids,
561attention_mask,
562lm_labels,
563):
564config.use_cache = True
565model = PLBartDecoder(config=config).to(torch_device).eval()
566# first forward pass
567outputs = model(input_ids, use_cache=True)
568outputs_use_cache_conf = model(input_ids)
569outputs_no_past = model(input_ids, use_cache=False)
570
571self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
572self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
573
574past_key_values = outputs["past_key_values"]
575
576# create hypothetical next token and extent to next_input_ids
577next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
578
579# append to next input_ids and
580next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
581
582output_from_no_past = model(next_input_ids)["last_hidden_state"]
583output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
584
585# select random slice
586random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
587output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
588output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
589
590# test that outputs are equal for slice
591self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
592
593def create_and_check_decoder_model_attention_mask_past(
594self,
595config,
596input_ids,
597attention_mask,
598lm_labels,
599):
600model = PLBartDecoder(config=config).to(torch_device).eval()
601
602# create attention mask
603attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
604
605half_seq_length = input_ids.shape[-1] // 2
606attn_mask[:, half_seq_length:] = 0
607
608# first forward pass
609past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
610
611# create hypothetical next token and extent to next_input_ids
612next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
613
614# change a random masked slice from input_ids
615random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
616random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
617input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
618
619# append to next input_ids and attn_mask
620next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
621attn_mask = torch.cat(
622[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
623dim=1,
624)
625
626# get two different outputs
627output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
628output_from_past = model(next_tokens, attention_mask=attn_mask, past_key_values=past_key_values)[
629"last_hidden_state"
630]
631
632# select random slice
633random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
634output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
635output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
636
637# test that outputs are equal for slice
638self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
639
640def prepare_config_and_inputs_for_common(self):
641config_and_inputs = self.prepare_config_and_inputs()
642(config, input_ids, attention_mask, lm_labels) = config_and_inputs
643inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
644return config, inputs_dict
645
646
647@require_torch
648class PLBartStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
649all_model_classes = (PLBartDecoder, PLBartForCausalLM) if is_torch_available() else ()
650all_generative_model_classes = (PLBartForCausalLM,) if is_torch_available() else ()
651test_pruning = False
652is_encoder_decoder = False
653
654def setUp(self):
655self.model_tester = PLBartStandaloneDecoderModelTester(self, is_training=False)
656self.config_tester = ConfigTester(self, config_class=PLBartConfig)
657
658def test_config(self):
659self.config_tester.run_common_tests()
660
661def test_decoder_model_past(self):
662config_and_inputs = self.model_tester.prepare_config_and_inputs()
663self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
664
665def test_decoder_model_attn_mask_past(self):
666config_and_inputs = self.model_tester.prepare_config_and_inputs()
667self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
668
669def test_retain_grad_hidden_states_attentions(self):
670# decoder cannot keep gradients
671return
672
673@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :)
674def test_left_padding_compatibility(self):
675pass
676