transformers
232 строки · 8.5 Кб
1# coding=utf-8
2# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15""" Testing suite for the PyTorch ViTMSN model. """
16
17
18import unittest
19
20from transformers import ViTMSNConfig
21from transformers.testing_utils import require_torch, require_vision, slow, torch_device
22from transformers.utils import cached_property, is_torch_available, is_vision_available
23
24from ...test_configuration_common import ConfigTester
25from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
26from ...test_pipeline_mixin import PipelineTesterMixin
27
28
29if is_torch_available():
30import torch
31from torch import nn
32
33from transformers import ViTMSNForImageClassification, ViTMSNModel
34from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
35
36
37if is_vision_available():
38from PIL import Image
39
40from transformers import ViTImageProcessor
41
42
43class ViTMSNModelTester:
44def __init__(
45self,
46parent,
47batch_size=13,
48image_size=30,
49patch_size=2,
50num_channels=3,
51is_training=True,
52use_labels=True,
53hidden_size=32,
54num_hidden_layers=2,
55num_attention_heads=4,
56intermediate_size=37,
57hidden_act="gelu",
58hidden_dropout_prob=0.1,
59attention_probs_dropout_prob=0.1,
60type_sequence_label_size=10,
61initializer_range=0.02,
62scope=None,
63):
64self.parent = parent
65self.batch_size = batch_size
66self.image_size = image_size
67self.patch_size = patch_size
68self.num_channels = num_channels
69self.is_training = is_training
70self.use_labels = use_labels
71self.hidden_size = hidden_size
72self.num_hidden_layers = num_hidden_layers
73self.num_attention_heads = num_attention_heads
74self.intermediate_size = intermediate_size
75self.hidden_act = hidden_act
76self.hidden_dropout_prob = hidden_dropout_prob
77self.attention_probs_dropout_prob = attention_probs_dropout_prob
78self.type_sequence_label_size = type_sequence_label_size
79self.initializer_range = initializer_range
80self.scope = scope
81
82# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
83num_patches = (image_size // patch_size) ** 2
84self.seq_length = num_patches + 1
85
86def prepare_config_and_inputs(self):
87pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
88
89labels = None
90if self.use_labels:
91labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
92
93config = self.get_config()
94
95return config, pixel_values, labels
96
97def get_config(self):
98return ViTMSNConfig(
99image_size=self.image_size,
100patch_size=self.patch_size,
101num_channels=self.num_channels,
102hidden_size=self.hidden_size,
103num_hidden_layers=self.num_hidden_layers,
104num_attention_heads=self.num_attention_heads,
105intermediate_size=self.intermediate_size,
106hidden_act=self.hidden_act,
107hidden_dropout_prob=self.hidden_dropout_prob,
108attention_probs_dropout_prob=self.attention_probs_dropout_prob,
109initializer_range=self.initializer_range,
110)
111
112def create_and_check_model(self, config, pixel_values, labels):
113model = ViTMSNModel(config=config)
114model.to(torch_device)
115model.eval()
116result = model(pixel_values)
117self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
118
119def create_and_check_for_image_classification(self, config, pixel_values, labels):
120config.num_labels = self.type_sequence_label_size
121model = ViTMSNForImageClassification(config)
122model.to(torch_device)
123model.eval()
124result = model(pixel_values, labels=labels)
125print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}")
126print("Labels: {labels}")
127self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
128
129# test greyscale images
130config.num_channels = 1
131model = ViTMSNForImageClassification(config)
132model.to(torch_device)
133model.eval()
134
135pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
136result = model(pixel_values)
137self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
138
139def prepare_config_and_inputs_for_common(self):
140config_and_inputs = self.prepare_config_and_inputs()
141config, pixel_values, labels = config_and_inputs
142inputs_dict = {"pixel_values": pixel_values}
143return config, inputs_dict
144
145
146@require_torch
147class ViTMSNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
148"""
149Here we also overwrite some of the tests of test_modeling_common.py, as ViTMSN does not use input_ids, inputs_embeds,
150attention_mask and seq_length.
151"""
152
153all_model_classes = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
154pipeline_model_mapping = (
155{"image-feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
156if is_torch_available()
157else {}
158)
159
160test_pruning = False
161test_torchscript = False
162test_resize_embeddings = False
163test_head_masking = False
164
165def setUp(self):
166self.model_tester = ViTMSNModelTester(self)
167self.config_tester = ConfigTester(self, config_class=ViTMSNConfig, has_text_modality=False, hidden_size=37)
168
169def test_config(self):
170self.config_tester.run_common_tests()
171
172@unittest.skip(reason="ViTMSN does not use inputs_embeds")
173def test_inputs_embeds(self):
174pass
175
176def test_model_common_attributes(self):
177config, _ = self.model_tester.prepare_config_and_inputs_for_common()
178
179for model_class in self.all_model_classes:
180model = model_class(config)
181self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
182x = model.get_output_embeddings()
183self.assertTrue(x is None or isinstance(x, nn.Linear))
184
185def test_model(self):
186config_and_inputs = self.model_tester.prepare_config_and_inputs()
187self.model_tester.create_and_check_model(*config_and_inputs)
188
189def test_for_image_classification(self):
190config_and_inputs = self.model_tester.prepare_config_and_inputs()
191self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
192
193@slow
194def test_model_from_pretrained(self):
195for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
196model = ViTMSNModel.from_pretrained(model_name)
197self.assertIsNotNone(model)
198
199
200# We will verify our results on an image of cute cats
201def prepare_img():
202image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
203return image
204
205
206@require_torch
207@require_vision
208class ViTMSNModelIntegrationTest(unittest.TestCase):
209@cached_property
210def default_image_processor(self):
211return ViTImageProcessor.from_pretrained("facebook/vit-msn-small") if is_vision_available() else None
212
213@slow
214def test_inference_image_classification_head(self):
215torch.manual_seed(2)
216model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small").to(torch_device)
217
218image_processor = self.default_image_processor
219image = prepare_img()
220inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
221
222# forward pass
223with torch.no_grad():
224outputs = model(**inputs)
225
226# verify the logits
227expected_shape = torch.Size((1, 1000))
228self.assertEqual(outputs.logits.shape, expected_shape)
229
230expected_slice = torch.tensor([0.5588, 0.6853, -0.5929]).to(torch_device)
231
232self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
233