transformers

Форк
0
/
test_modeling_tf_lxmert.py 
559 строк · 21.8 Кб
1
# coding=utf-8
2
# Copyright 2020 The HuggingFace Team. All rights reserved.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16
from __future__ import annotations
17

18
import tempfile
19
import unittest
20

21
import numpy as np
22

23
from transformers import LxmertConfig, is_tf_available
24
from transformers.testing_utils import require_tf, slow
25

26
from ...test_configuration_common import ConfigTester
27
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
28
from ...test_pipeline_mixin import PipelineTesterMixin
29

30

31
if is_tf_available():
32
    import tensorflow as tf
33

34
    from transformers.models.lxmert.modeling_tf_lxmert import TFLxmertForPreTraining, TFLxmertModel
35

36

37
class TFLxmertModelTester(object):
38
    def __init__(
39
        self,
40
        parent,
41
        vocab_size=300,
42
        hidden_size=28,
43
        num_attention_heads=2,
44
        num_labels=2,
45
        intermediate_size=64,
46
        hidden_act="gelu",
47
        hidden_dropout_prob=0.1,
48
        attention_probs_dropout_prob=0.1,
49
        max_position_embeddings=512,
50
        type_vocab_size=2,
51
        initializer_range=0.02,
52
        layer_norm_eps=1e-12,
53
        pad_token_id=0,
54
        num_qa_labels=30,
55
        num_object_labels=16,
56
        num_attr_labels=4,
57
        num_visual_features=10,
58
        l_layers=2,
59
        x_layers=1,
60
        r_layers=1,
61
        visual_feat_dim=128,
62
        visual_pos_dim=4,
63
        visual_loss_normalizer=6.67,
64
        seq_length=20,
65
        batch_size=8,
66
        is_training=True,
67
        task_matched=True,
68
        task_mask_lm=True,
69
        task_obj_predict=True,
70
        task_qa=True,
71
        visual_obj_loss=True,
72
        visual_attr_loss=True,
73
        visual_feat_loss=True,
74
        use_token_type_ids=True,
75
        use_lang_mask=True,
76
        output_attentions=False,
77
        output_hidden_states=False,
78
        scope=None,
79
    ):
80
        self.parent = parent
81
        self.vocab_size = vocab_size
82
        self.hidden_size = hidden_size
83
        self.num_attention_heads = num_attention_heads
84
        self.num_labels = num_labels
85
        self.intermediate_size = intermediate_size
86
        self.hidden_act = hidden_act
87
        self.hidden_dropout_prob = hidden_dropout_prob
88
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
89
        self.max_position_embeddings = max_position_embeddings
90
        self.type_vocab_size = type_vocab_size
91
        self.initializer_range = initializer_range
92
        self.layer_norm_eps = layer_norm_eps
93
        self.pad_token_id = pad_token_id
94
        self.num_qa_labels = num_qa_labels
95
        self.num_object_labels = num_object_labels
96
        self.num_attr_labels = num_attr_labels
97
        self.l_layers = l_layers
98
        self.x_layers = x_layers
99
        self.r_layers = r_layers
100
        self.visual_feat_dim = visual_feat_dim
101
        self.visual_pos_dim = visual_pos_dim
102
        self.visual_loss_normalizer = visual_loss_normalizer
103
        self.seq_length = seq_length
104
        self.batch_size = batch_size
105
        self.is_training = is_training
106
        self.use_lang_mask = use_lang_mask
107
        self.task_matched = task_matched
108
        self.task_mask_lm = task_mask_lm
109
        self.task_obj_predict = task_obj_predict
110
        self.task_qa = task_qa
111
        self.visual_obj_loss = visual_obj_loss
112
        self.visual_attr_loss = visual_attr_loss
113
        self.visual_feat_loss = visual_feat_loss
114
        self.num_visual_features = num_visual_features
115
        self.use_token_type_ids = use_token_type_ids
116
        self.output_attentions = output_attentions
117
        self.output_hidden_states = output_hidden_states
118
        self.scope = scope
119
        self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
120

121
    def prepare_config_and_inputs(self):
122
        output_attentions = self.output_attentions
123
        input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_size=self.vocab_size)
124
        visual_feats = tf.random.uniform((self.batch_size, self.num_visual_features, self.visual_feat_dim))
125
        bounding_boxes = tf.random.uniform((self.batch_size, self.num_visual_features, 4))
126

127
        input_mask = None
128
        if self.use_lang_mask:
129
            input_mask = random_attention_mask([self.batch_size, self.seq_length])
130
        token_type_ids = None
131
        if self.use_token_type_ids:
132
            token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
133
        obj_labels = None
134
        if self.task_obj_predict:
135
            obj_labels = {}
136
        if self.visual_attr_loss and self.task_obj_predict:
137
            obj_labels["attr"] = (
138
                ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels),
139
                ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels),
140
            )
141
        if self.visual_feat_loss and self.task_obj_predict:
142
            obj_labels["feat"] = (
143
                ids_tensor(
144
                    [self.batch_size, self.num_visual_features, self.visual_feat_dim], self.num_visual_features
145
                ),
146
                ids_tensor([self.batch_size, self.num_visual_features], self.num_visual_features),
147
            )
148
        if self.visual_obj_loss and self.task_obj_predict:
149
            obj_labels["obj"] = (
150
                ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels),
151
                ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels),
152
            )
153
        ans = None
154
        if self.task_qa:
155
            ans = ids_tensor([self.batch_size], self.num_qa_labels)
156
        masked_lm_labels = None
157
        if self.task_mask_lm:
158
            masked_lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
159
        matched_label = None
160
        if self.task_matched:
161
            matched_label = ids_tensor([self.batch_size], self.num_labels)
162

163
        config = LxmertConfig(
164
            vocab_size=self.vocab_size,
165
            hidden_size=self.hidden_size,
166
            num_attention_heads=self.num_attention_heads,
167
            num_labels=self.num_labels,
168
            intermediate_size=self.intermediate_size,
169
            hidden_act=self.hidden_act,
170
            hidden_dropout_prob=self.hidden_dropout_prob,
171
            attention_probs_dropout_prob=self.attention_probs_dropout_prob,
172
            max_position_embeddings=self.max_position_embeddings,
173
            type_vocab_size=self.type_vocab_size,
174
            initializer_range=self.initializer_range,
175
            layer_norm_eps=self.layer_norm_eps,
176
            pad_token_id=self.pad_token_id,
177
            num_qa_labels=self.num_qa_labels,
178
            num_object_labels=self.num_object_labels,
179
            num_attr_labels=self.num_attr_labels,
180
            l_layers=self.l_layers,
181
            x_layers=self.x_layers,
182
            r_layers=self.r_layers,
183
            visual_feat_dim=self.visual_feat_dim,
184
            visual_pos_dim=self.visual_pos_dim,
185
            visual_loss_normalizer=self.visual_loss_normalizer,
186
            task_matched=self.task_matched,
187
            task_mask_lm=self.task_mask_lm,
188
            task_obj_predict=self.task_obj_predict,
189
            task_qa=self.task_qa,
190
            visual_obj_loss=self.visual_obj_loss,
191
            visual_attr_loss=self.visual_attr_loss,
192
            visual_feat_loss=self.visual_feat_loss,
193
            output_attentions=self.output_attentions,
194
            output_hidden_states=self.output_hidden_states,
195
        )
196

197
        return (
198
            config,
199
            input_ids,
200
            visual_feats,
201
            bounding_boxes,
202
            token_type_ids,
203
            input_mask,
204
            obj_labels,
205
            masked_lm_labels,
206
            matched_label,
207
            ans,
208
            output_attentions,
209
        )
210

211
    def create_and_check_lxmert_model(
212
        self,
213
        config,
214
        input_ids,
215
        visual_feats,
216
        bounding_boxes,
217
        token_type_ids,
218
        input_mask,
219
        obj_labels,
220
        masked_lm_labels,
221
        matched_label,
222
        ans,
223
        output_attentions,
224
    ):
225
        model = TFLxmertModel(config=config)
226
        result = model(
227
            input_ids,
228
            visual_feats,
229
            bounding_boxes,
230
            token_type_ids=token_type_ids,
231
            attention_mask=input_mask,
232
            output_attentions=output_attentions,
233
        )
234
        result = model(
235
            input_ids,
236
            visual_feats,
237
            bounding_boxes,
238
            token_type_ids=token_type_ids,
239
            attention_mask=input_mask,
240
            output_attentions=not output_attentions,
241
        )
242
        result = model(input_ids, visual_feats, bounding_boxes, return_dict=False)
243
        result = model(input_ids, visual_feats, bounding_boxes, return_dict=True)
244

245
        self.parent.assertEqual(result.language_output.shape, (self.batch_size, self.seq_length, self.hidden_size))
246
        self.parent.assertEqual(
247
            result.vision_output.shape, (self.batch_size, self.num_visual_features, self.hidden_size)
248
        )
249
        self.parent.assertEqual(result.pooled_output.shape, (self.batch_size, self.hidden_size))
250

251
    def prepare_config_and_inputs_for_common(self, return_obj_labels=False):
252
        config_and_inputs = self.prepare_config_and_inputs()
253
        (
254
            config,
255
            input_ids,
256
            visual_feats,
257
            bounding_boxes,
258
            token_type_ids,
259
            input_mask,
260
            obj_labels,
261
            masked_lm_labels,
262
            matched_label,
263
            ans,
264
            output_attentions,
265
        ) = config_and_inputs
266

267
        inputs_dict = {
268
            "input_ids": input_ids,
269
            "visual_feats": visual_feats,
270
            "visual_pos": bounding_boxes,
271
            "token_type_ids": token_type_ids,
272
            "attention_mask": input_mask,
273
        }
274

275
        if return_obj_labels:
276
            inputs_dict["obj_labels"] = obj_labels
277
        else:
278
            config.task_obj_predict = False
279

280
        return config, inputs_dict
281

282
    def create_and_check_lxmert_for_pretraining(
283
        self,
284
        config,
285
        input_ids,
286
        visual_feats,
287
        bounding_boxes,
288
        token_type_ids,
289
        input_mask,
290
        obj_labels,
291
        masked_lm_labels,
292
        matched_label,
293
        ans,
294
        output_attentions,
295
    ):
296
        model = TFLxmertForPreTraining(config=config)
297
        result = model(
298
            input_ids,
299
            visual_feats,
300
            bounding_boxes,
301
            token_type_ids=token_type_ids,
302
            attention_mask=input_mask,
303
            masked_lm_labels=masked_lm_labels,
304
            obj_labels=obj_labels,
305
            matched_label=matched_label,
306
            ans=ans,
307
            output_attentions=output_attentions,
308
        )
309
        result = model(
310
            input_ids,
311
            visual_feats,
312
            bounding_boxes,
313
            token_type_ids=token_type_ids,
314
            attention_mask=input_mask,
315
            masked_lm_labels=masked_lm_labels,
316
            output_attentions=not output_attentions,
317
            return_dict=False,
318
        )
319
        result = model(
320
            input_ids,
321
            visual_feats,
322
            bounding_boxes,
323
            token_type_ids=token_type_ids,
324
            attention_mask=input_mask,
325
            masked_lm_labels=masked_lm_labels,
326
        )
327
        result = model(
328
            input_ids,
329
            visual_feats,
330
            bounding_boxes,
331
            token_type_ids=token_type_ids,
332
            attention_mask=input_mask,
333
            obj_labels=obj_labels,
334
        )
335
        result = model(
336
            input_ids,
337
            visual_feats,
338
            bounding_boxes,
339
            token_type_ids=token_type_ids,
340
            attention_mask=input_mask,
341
            matched_label=matched_label,
342
        )
343
        result = model(
344
            input_ids,
345
            visual_feats,
346
            bounding_boxes,
347
            token_type_ids=token_type_ids,
348
            attention_mask=input_mask,
349
            ans=ans,
350
        )
351
        result = model(
352
            input_ids,
353
            visual_feats,
354
            bounding_boxes,
355
            token_type_ids=token_type_ids,
356
            attention_mask=input_mask,
357
            masked_lm_labels=masked_lm_labels,
358
            obj_labels=obj_labels,
359
            matched_label=matched_label,
360
            ans=ans,
361
            output_attentions=not output_attentions,
362
        )
363

364
        self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
365

366

367
@require_tf
368
class TFLxmertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
369
    all_model_classes = (TFLxmertModel, TFLxmertForPreTraining) if is_tf_available() else ()
370
    pipeline_model_mapping = {"feature-extraction": TFLxmertModel} if is_tf_available() else {}
371
    test_head_masking = False
372
    test_onnx = False
373

374
    def setUp(self):
375
        self.model_tester = TFLxmertModelTester(self)
376
        self.config_tester = ConfigTester(self, config_class=LxmertConfig, hidden_size=37)
377

378
    def test_config(self):
379
        self.config_tester.run_common_tests()
380

381
    def test_lxmert_model(self):
382
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
383
        self.model_tester.create_and_check_lxmert_model(*config_and_inputs)
384

385
    def test_lxmert_for_pretraining(self):
386
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
387
        self.model_tester.create_and_check_lxmert_for_pretraining(*config_and_inputs)
388

389
    @slow
390
    def test_model_from_pretrained(self):
391
        for model_name in ["unc-nlp/lxmert-base-uncased"]:
392
            model = TFLxmertModel.from_pretrained(model_name)
393
            self.assertIsNotNone(model)
394

395
    def test_attention_outputs(self):
396
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
397

398
        encoder_seq_length = (
399
            self.model_tester.encoder_seq_length
400
            if hasattr(self.model_tester, "encoder_seq_length")
401
            else self.model_tester.seq_length
402
        )
403
        encoder_key_length = (
404
            self.model_tester.key_length if hasattr(self.model_tester, "key_length") else encoder_seq_length
405
        )
406

407
        for model_class in self.all_model_classes:
408
            inputs_dict["output_attentions"] = True
409
            inputs_dict["output_hidden_states"] = False
410
            model = model_class(config)
411
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
412
            language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1])
413

414
            self.assertEqual(model.config.output_hidden_states, False)
415

416
            self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"])
417
            self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"])
418
            self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"])
419

420
            attentions = [language_attentions, vision_attentions, cross_encoder_attentions]
421
            attention_shapes = [
422
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
423
                [
424
                    self.model_tester.num_attention_heads,
425
                    self.model_tester.num_visual_features,
426
                    self.model_tester.num_visual_features,
427
                ],
428
                [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features],
429
            ]
430

431
            for attention, attention_shape in zip(attentions, attention_shapes):
432
                self.assertListEqual(list(attention[0].shape[-3:]), attention_shape)
433
            out_len = len(outputs)
434

435
            # Check attention is always last and order is fine
436
            inputs_dict["output_attentions"] = True
437
            inputs_dict["output_hidden_states"] = True
438
            model = model_class(config)
439
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
440

441
            # 2 hidden states were added
442
            self.assertEqual(out_len + 2, len(outputs))
443
            language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1])
444
            self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"])
445
            self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"])
446
            self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"])
447

448
            attentions = [language_attentions, vision_attentions, cross_encoder_attentions]
449
            attention_shapes = [
450
                [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
451
                [
452
                    self.model_tester.num_attention_heads,
453
                    self.model_tester.num_visual_features,
454
                    self.model_tester.num_visual_features,
455
                ],
456
                [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features],
457
            ]
458

459
            for attention, attention_shape in zip(attentions, attention_shapes):
460
                self.assertListEqual(list(attention[0].shape[-3:]), attention_shape)
461

462
    def test_hidden_states_output(self):
463
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
464

465
        def check_hidden_states_output(config, inputs_dict, model_class):
466
            model = model_class(config)
467
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
468
            language_hidden_states, vision_hidden_states = outputs[-2], outputs[-1]
469

470
            self.assertEqual(len(language_hidden_states), self.model_tester.num_hidden_layers["language"] + 1)
471
            self.assertEqual(len(vision_hidden_states), self.model_tester.num_hidden_layers["vision"] + 1)
472

473
            seq_length = self.model_tester.seq_length
474
            num_visual_features = self.model_tester.num_visual_features
475

476
            self.assertListEqual(
477
                list(language_hidden_states[0].shape[-2:]),
478
                [seq_length, self.model_tester.hidden_size],
479
            )
480
            self.assertListEqual(
481
                list(vision_hidden_states[0].shape[-2:]),
482
                [num_visual_features, self.model_tester.hidden_size],
483
            )
484

485
        for model_class in self.all_model_classes:
486
            inputs_dict["output_hidden_states"] = True
487
            check_hidden_states_output(config, inputs_dict, model_class)
488

489
            del inputs_dict["output_hidden_states"]
490
            config.output_hidden_states = True
491
            check_hidden_states_output(config, inputs_dict, model_class)
492

493
    def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
494
        import torch
495

496
        pt_inputs_dict = {}
497
        for key, value in tf_inputs_dict.items():
498
            if isinstance(value, dict):
499
                pt_inputs_dict[key] = self.prepare_pt_inputs_from_tf_inputs(value)
500
            elif isinstance(value, (list, tuple)):
501
                pt_inputs_dict[key] = (self.prepare_pt_inputs_from_tf_inputs(iter_value) for iter_value in value)
502
            elif isinstance(key, bool):
503
                pt_inputs_dict[key] = value
504
            elif key == "input_values":
505
                pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32)
506
            elif key == "pixel_values":
507
                pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32)
508
            elif key == "input_features":
509
                pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32)
510
            # other general float inputs
511
            elif tf_inputs_dict[key].dtype.is_floating:
512
                pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.float32)
513
            else:
514
                pt_inputs_dict[key] = torch.from_numpy(value.numpy()).to(torch.long)
515

516
        return pt_inputs_dict
517

518
    def test_save_load(self):
519
        for model_class in self.all_model_classes:
520
            config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common(
521
                return_obj_labels="PreTraining" in model_class.__name__
522
            )
523

524
            model = model_class(config)
525
            outputs = model(self._prepare_for_class(inputs_dict, model_class))
526

527
            with tempfile.TemporaryDirectory() as tmpdirname:
528
                model.save_pretrained(tmpdirname)
529
                model = model_class.from_pretrained(tmpdirname)
530
                after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
531

532
                self.assert_outputs_same(after_outputs, outputs)
533

534

535
@require_tf
536
class TFLxmertModelIntegrationTest(unittest.TestCase):
537
    @slow
538
    def test_inference_masked_lm(self):
539
        model = TFLxmertModel.from_pretrained("unc-nlp/lxmert-base-uncased")
540
        input_ids = tf.constant([[101, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 102]])
541

542
        num_visual_features = 10
543
        _, visual_feats = np.random.seed(0), np.random.rand(1, num_visual_features, model.config.visual_feat_dim)
544
        _, visual_pos = np.random.seed(0), np.random.rand(1, num_visual_features, 4)
545
        visual_feats = tf.convert_to_tensor(visual_feats, dtype=tf.float32)
546
        visual_pos = tf.convert_to_tensor(visual_pos, dtype=tf.float32)
547
        output = model(input_ids, visual_feats=visual_feats, visual_pos=visual_pos)[0]
548
        expected_shape = [1, 11, 768]
549
        self.assertEqual(expected_shape, output.shape)
550
        expected_slice = tf.constant(
551
            [
552
                [
553
                    [0.24170142, -0.98075, 0.14797261],
554
                    [1.2540525, -0.83198136, 0.5112344],
555
                    [1.4070463, -1.1051831, 0.6990401],
556
                ]
557
            ]
558
        )
559
        tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
560

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.