transformers

Форк
0
/
test_modeling_tf_sam.py 
671 строка · 25.0 Кб
1
# coding=utf-8
2
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15
""" Testing suite for the TensorFlow SAM model. """
16

17

18
from __future__ import annotations
19

20
import inspect
21
import unittest
22

23
import numpy as np
24
import requests
25

26
from transformers import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig
27
from transformers.testing_utils import require_tf, slow
28
from transformers.utils import is_tf_available, is_vision_available
29

30
from ...test_configuration_common import ConfigTester
31
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor
32
from ...test_pipeline_mixin import PipelineTesterMixin
33

34

35
if is_tf_available():
36
    import tensorflow as tf
37

38
    from transformers import SamProcessor, TFSamModel
39
    from transformers.modeling_tf_utils import keras
40

41
if is_vision_available():
42
    from PIL import Image
43

44

45
class TFSamPromptEncoderTester:
46
    def __init__(
47
        self,
48
        hidden_size=32,
49
        input_image_size=24,
50
        patch_size=2,
51
        mask_input_channels=4,
52
        num_point_embeddings=4,
53
        hidden_act="gelu",
54
    ):
55
        self.hidden_size = hidden_size
56
        self.input_image_size = input_image_size
57
        self.patch_size = patch_size
58
        self.mask_input_channels = mask_input_channels
59
        self.num_point_embeddings = num_point_embeddings
60
        self.hidden_act = hidden_act
61

62
    def get_config(self):
63
        return SamPromptEncoderConfig(
64
            image_size=self.input_image_size,
65
            patch_size=self.patch_size,
66
            mask_input_channels=self.mask_input_channels,
67
            hidden_size=self.hidden_size,
68
            num_point_embeddings=self.num_point_embeddings,
69
            hidden_act=self.hidden_act,
70
        )
71

72
    def prepare_config_and_inputs(self):
73
        dummy_points = floats_tensor([self.batch_size, 3, 2])
74
        config = self.get_config()
75

76
        return config, dummy_points
77

78

79
class TFSamMaskDecoderTester:
80
    def __init__(
81
        self,
82
        hidden_size=32,
83
        hidden_act="relu",
84
        mlp_dim=64,
85
        num_hidden_layers=2,
86
        num_attention_heads=4,
87
        attention_downsample_rate=2,
88
        num_multimask_outputs=3,
89
        iou_head_depth=3,
90
        iou_head_hidden_dim=32,
91
        layer_norm_eps=1e-6,
92
    ):
93
        self.hidden_size = hidden_size
94
        self.hidden_act = hidden_act
95
        self.mlp_dim = mlp_dim
96
        self.num_hidden_layers = num_hidden_layers
97
        self.num_attention_heads = num_attention_heads
98
        self.attention_downsample_rate = attention_downsample_rate
99
        self.num_multimask_outputs = num_multimask_outputs
100
        self.iou_head_depth = iou_head_depth
101
        self.iou_head_hidden_dim = iou_head_hidden_dim
102
        self.layer_norm_eps = layer_norm_eps
103

104
    def get_config(self):
105
        return SamMaskDecoderConfig(
106
            hidden_size=self.hidden_size,
107
            hidden_act=self.hidden_act,
108
            mlp_dim=self.mlp_dim,
109
            num_hidden_layers=self.num_hidden_layers,
110
            num_attention_heads=self.num_attention_heads,
111
            attention_downsample_rate=self.attention_downsample_rate,
112
            num_multimask_outputs=self.num_multimask_outputs,
113
            iou_head_depth=self.iou_head_depth,
114
            iou_head_hidden_dim=self.iou_head_hidden_dim,
115
            layer_norm_eps=self.layer_norm_eps,
116
        )
117

118
    def prepare_config_and_inputs(self):
119
        config = self.get_config()
120

121
        dummy_inputs = {
122
            "image_embedding": floats_tensor([self.batch_size, self.hidden_size]),
123
        }
124

125
        return config, dummy_inputs
126

127

128
class TFSamModelTester:
129
    def __init__(
130
        self,
131
        parent,
132
        hidden_size=36,
133
        intermediate_size=72,
134
        projection_dim=62,
135
        output_channels=32,
136
        num_hidden_layers=2,
137
        num_attention_heads=4,
138
        num_channels=3,
139
        image_size=24,
140
        patch_size=2,
141
        hidden_act="gelu",
142
        layer_norm_eps=1e-06,
143
        dropout=0.0,
144
        attention_dropout=0.0,
145
        initializer_range=0.02,
146
        initializer_factor=1.0,
147
        qkv_bias=True,
148
        mlp_ratio=4.0,
149
        use_abs_pos=True,
150
        use_rel_pos=True,
151
        rel_pos_zero_init=False,
152
        window_size=14,
153
        global_attn_indexes=[2, 5, 8, 11],
154
        num_pos_feats=16,
155
        mlp_dim=None,
156
        batch_size=2,
157
    ):
158
        self.parent = parent
159
        self.image_size = image_size
160
        self.patch_size = patch_size
161
        self.output_channels = output_channels
162
        self.num_channels = num_channels
163
        self.hidden_size = hidden_size
164
        self.projection_dim = projection_dim
165
        self.num_hidden_layers = num_hidden_layers
166
        self.num_attention_heads = num_attention_heads
167
        self.intermediate_size = intermediate_size
168
        self.dropout = dropout
169
        self.attention_dropout = attention_dropout
170
        self.initializer_range = initializer_range
171
        self.initializer_factor = initializer_factor
172
        self.hidden_act = hidden_act
173
        self.layer_norm_eps = layer_norm_eps
174
        self.qkv_bias = qkv_bias
175
        self.mlp_ratio = mlp_ratio
176
        self.use_abs_pos = use_abs_pos
177
        self.use_rel_pos = use_rel_pos
178
        self.rel_pos_zero_init = rel_pos_zero_init
179
        self.window_size = window_size
180
        self.global_attn_indexes = global_attn_indexes
181
        self.num_pos_feats = num_pos_feats
182
        self.mlp_dim = mlp_dim
183
        self.batch_size = batch_size
184

185
        # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
186
        num_patches = (image_size // patch_size) ** 2
187
        self.seq_length = num_patches + 1
188

189
        self.prompt_encoder_tester = TFSamPromptEncoderTester()
190
        self.mask_decoder_tester = TFSamMaskDecoderTester()
191

192
    def prepare_config_and_inputs(self):
193
        pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
194
        config = self.get_config()
195

196
        return config, pixel_values
197

198
    def get_config(self):
199
        vision_config = SamVisionConfig(
200
            image_size=self.image_size,
201
            patch_size=self.patch_size,
202
            num_channels=self.num_channels,
203
            hidden_size=self.hidden_size,
204
            projection_dim=self.projection_dim,
205
            num_hidden_layers=self.num_hidden_layers,
206
            num_attention_heads=self.num_attention_heads,
207
            intermediate_size=self.intermediate_size,
208
            dropout=self.dropout,
209
            attention_dropout=self.attention_dropout,
210
            initializer_range=self.initializer_range,
211
            initializer_factor=self.initializer_factor,
212
            output_channels=self.output_channels,
213
            qkv_bias=self.qkv_bias,
214
            mlp_ratio=self.mlp_ratio,
215
            use_abs_pos=self.use_abs_pos,
216
            use_rel_pos=self.use_rel_pos,
217
            rel_pos_zero_init=self.rel_pos_zero_init,
218
            window_size=self.window_size,
219
            global_attn_indexes=self.global_attn_indexes,
220
            num_pos_feats=self.num_pos_feats,
221
            mlp_dim=self.mlp_dim,
222
        )
223

224
        prompt_encoder_config = self.prompt_encoder_tester.get_config()
225

226
        mask_decoder_config = self.mask_decoder_tester.get_config()
227

228
        return SamConfig(
229
            vision_config=vision_config,
230
            prompt_encoder_config=prompt_encoder_config,
231
            mask_decoder_config=mask_decoder_config,
232
        )
233

234
    def create_and_check_model(self, config, pixel_values):
235
        model = TFSamModel(config=config)
236
        result = model(pixel_values)
237
        self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3))
238
        self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3))
239

240
    def create_and_check_get_image_features(self, config, pixel_values):
241
        model = TFSamModel(config=config)
242
        result = model.get_image_embeddings(pixel_values)
243
        self.parent.assertEqual(result[0].shape, (self.output_channels, 12, 12))
244

245
    def create_and_check_get_image_hidden_states(self, config, pixel_values):
246
        model = TFSamModel(config=config)
247
        result = model.vision_encoder(
248
            pixel_values,
249
            output_hidden_states=True,
250
            return_dict=True,
251
        )
252

253
        # after computing the convolutional features
254
        expected_hidden_states_shape = (self.batch_size, 12, 12, 36)
255
        self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1)
256
        self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape)
257

258
        result = model.vision_encoder(
259
            pixel_values,
260
            output_hidden_states=True,
261
            return_dict=False,
262
        )
263

264
        # after computing the convolutional features
265
        expected_hidden_states_shape = (self.batch_size, 12, 12, 36)
266
        self.parent.assertEqual(len(result[1]), self.num_hidden_layers + 1)
267
        self.parent.assertEqual(result[1][0].shape, expected_hidden_states_shape)
268

269
    def prepare_config_and_inputs_for_common(self):
270
        config_and_inputs = self.prepare_config_and_inputs()
271
        config, pixel_values = config_and_inputs
272
        inputs_dict = {"pixel_values": pixel_values}
273
        return config, inputs_dict
274

275

276
@require_tf
277
class TFSamModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
278
    """
279
    Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds,
280
    attention_mask and seq_length.
281
    """
282

283
    all_model_classes = (TFSamModel,) if is_tf_available() else ()
284
    pipeline_model_mapping = (
285
        {"feature-extraction": TFSamModel, "mask-generation": TFSamModel} if is_tf_available() else {}
286
    )
287
    test_pruning = False
288
    test_resize_embeddings = False
289
    test_head_masking = False
290
    test_onnx = False
291

292
    # TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working
293
    def is_pipeline_test_to_skip(
294
        self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
295
    ):
296
        return True
297

298
    def setUp(self):
299
        self.model_tester = TFSamModelTester(self)
300
        self.vision_config_tester = ConfigTester(self, config_class=SamVisionConfig, has_text_modality=False)
301
        self.prompt_encoder_config_tester = ConfigTester(
302
            self,
303
            config_class=SamPromptEncoderConfig,
304
            has_text_modality=False,
305
            num_attention_heads=12,
306
            num_hidden_layers=2,
307
        )
308
        self.mask_decoder_config_tester = ConfigTester(
309
            self, config_class=SamMaskDecoderConfig, has_text_modality=False
310
        )
311

312
    def test_config(self):
313
        self.vision_config_tester.run_common_tests()
314
        self.prompt_encoder_config_tester.run_common_tests()
315
        self.mask_decoder_config_tester.run_common_tests()
316

317
    @unittest.skip(reason="SAM's vision encoder does not use inputs_embeds")
318
    def test_inputs_embeds(self):
319
        pass
320

321
    def test_model_common_attributes(self):
322
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()
323

324
        for model_class in self.all_model_classes:
325
            model = model_class(config)
326
            self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer))
327
            x = model.get_output_embeddings()
328
            self.assertTrue(x is None or isinstance(x, keras.layers.Dense))
329

330
    def test_forward_signature(self):
331
        config, _ = self.model_tester.prepare_config_and_inputs_for_common()
332

333
        for model_class in self.all_model_classes:
334
            model = model_class(config)
335
            signature = inspect.signature(model.call)
336
            # signature.parameters is an OrderedDict => so arg_names order is deterministic
337
            arg_names = [*signature.parameters.keys()]
338

339
            expected_arg_names = ["pixel_values"]
340
            self.assertListEqual(arg_names[:1], expected_arg_names)
341

342
    def test_model(self):
343
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
344
        self.model_tester.create_and_check_model(*config_and_inputs)
345

346
    def test_get_image_features(self):
347
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
348
        self.model_tester.create_and_check_get_image_features(*config_and_inputs)
349

350
    def test_image_hidden_states(self):
351
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
352
        self.model_tester.create_and_check_get_image_hidden_states(*config_and_inputs)
353

354
    def test_attention_outputs(self):
355
        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
356
        config.return_dict = True
357

358
        expected_vision_attention_shape = (
359
            self.model_tester.batch_size * self.model_tester.num_attention_heads,
360
            196,
361
            196,
362
        )
363
        expected_mask_decoder_attention_shape = (self.model_tester.batch_size, 1, 144, 32)
364

365
        for model_class in self.all_model_classes:
366
            inputs_dict["output_attentions"] = True
367
            inputs_dict["output_hidden_states"] = False
368
            config.return_dict = True
369
            model = model_class(config)
370
            outputs = model(**self._prepare_for_class(inputs_dict, model_class))
371

372
            vision_attentions = outputs.vision_attentions
373
            self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers)
374

375
            mask_decoder_attentions = outputs.mask_decoder_attentions
376
            self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers)
377

378
            # check that output_attentions also work using config
379
            del inputs_dict["output_attentions"]
380
            config.output_attentions = True
381
            model = model_class(config)
382
            outputs = model(**self._prepare_for_class(inputs_dict, model_class))
383
            vision_attentions = outputs.vision_attentions
384
            self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers)
385

386
            mask_decoder_attentions = outputs.mask_decoder_attentions
387
            self.assertEqual(len(mask_decoder_attentions), self.model_tester.mask_decoder_tester.num_hidden_layers)
388

389
            self.assertListEqual(
390
                list(vision_attentions[0].shape[-4:]),
391
                list(expected_vision_attention_shape),
392
            )
393

394
            self.assertListEqual(
395
                list(mask_decoder_attentions[0].shape[-4:]),
396
                list(expected_mask_decoder_attention_shape),
397
            )
398

399
    @unittest.skip(reason="Hidden_states is tested in create_and_check_model tests")
400
    def test_hidden_states_output(self):
401
        pass
402

403
    @slow
404
    def test_model_from_pretrained(self):
405
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")  # sam-vit-huge blows out our memory
406
        self.assertIsNotNone(model)
407

408
    def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-4, name="outputs", attributes=None):
409
        super().check_pt_tf_outputs(
410
            tf_outputs=tf_outputs,
411
            pt_outputs=pt_outputs,
412
            model_class=model_class,
413
            tol=tol,
414
            name=name,
415
            attributes=attributes,
416
        )
417

418

419
def prepare_image():
420
    img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
421
    raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
422
    return raw_image
423

424

425
def prepare_dog_img():
426
    img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png"
427
    raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
428
    return raw_image
429

430

431
@require_tf
432
@slow
433
class TFSamModelIntegrationTest(unittest.TestCase):
434
    def test_inference_mask_generation_no_point(self):
435
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
436
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
437

438
        raw_image = prepare_image()
439
        inputs = processor(images=raw_image, return_tensors="tf")
440

441
        outputs = model(**inputs)
442
        scores = tf.squeeze(outputs.iou_scores)
443
        masks = outputs.pred_masks[0, 0, 0, 0, :3]
444
        self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.4515), atol=2e-4))
445
        self.assertTrue(np.allclose(masks.numpy(), np.array([-4.1807, -3.4949, -3.4483]), atol=1e-2))
446

447
    def test_inference_mask_generation_one_point_one_bb(self):
448
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
449
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
450

451
        raw_image = prepare_image()
452
        input_boxes = [[[650, 900, 1000, 1250]]]
453
        input_points = [[[820, 1080]]]
454

455
        inputs = processor(images=raw_image, input_boxes=input_boxes, input_points=input_points, return_tensors="tf")
456

457
        outputs = model(**inputs)
458
        scores = tf.squeeze(outputs.iou_scores)
459
        masks = outputs.pred_masks[0, 0, 0, 0, :3]
460

461
        self.assertTrue(np.allclose(scores[-1], np.array(0.9566), atol=2e-4))
462
        self.assertTrue(np.allclose(masks.numpy(), np.array([-12.7657, -12.3683, -12.5985]), atol=2e-2))
463

464
    def test_inference_mask_generation_batched_points_batched_images(self):
465
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
466
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
467

468
        raw_image = prepare_image()
469
        input_points = [
470
            [[[820, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]],
471
            [[[510, 1080]], [[820, 1080]], [[820, 1080]], [[820, 1080]]],
472
        ]
473

474
        inputs = processor(images=[raw_image, raw_image], input_points=input_points, return_tensors="tf")
475

476
        outputs = model(**inputs)
477
        scores = tf.squeeze(outputs.iou_scores)
478
        masks = outputs.pred_masks[0, 0, 0, 0, :3]
479

480
        EXPECTED_SCORES = np.array(
481
            [
482
                [
483
                    [0.6765, 0.9379, 0.8803],
484
                    [0.6765, 0.9379, 0.8803],
485
                    [0.6765, 0.9379, 0.8803],
486
                    [0.6765, 0.9379, 0.8803],
487
                ],
488
                [
489
                    [0.3317, 0.7264, 0.7646],
490
                    [0.6765, 0.9379, 0.8803],
491
                    [0.6765, 0.9379, 0.8803],
492
                    [0.6765, 0.9379, 0.8803],
493
                ],
494
            ]
495
        )
496
        EXPECTED_MASKS = np.array([-2.8552, -2.7990, -2.9612])
497
        self.assertTrue(np.allclose(scores.numpy(), EXPECTED_SCORES, atol=1e-3))
498
        self.assertTrue(np.allclose(masks.numpy(), EXPECTED_MASKS, atol=3e-2))
499

500
    def test_inference_mask_generation_one_point_one_bb_zero(self):
501
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
502
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
503

504
        raw_image = prepare_image()
505
        input_boxes = [[[620, 900, 1000, 1255]]]
506
        input_points = [[[820, 1080]]]
507
        labels = [[0]]
508

509
        inputs = processor(
510
            images=raw_image,
511
            input_boxes=input_boxes,
512
            input_points=input_points,
513
            input_labels=labels,
514
            return_tensors="tf",
515
        )
516

517
        outputs = model(**inputs)
518
        scores = tf.squeeze(outputs.iou_scores)
519
        self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.7894), atol=1e-4))
520

521
    def test_inference_mask_generation_one_point(self):
522
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
523
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
524

525
        raw_image = prepare_image()
526

527
        input_points = [[[400, 650]]]
528
        input_labels = [[1]]
529

530
        inputs = processor(images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="tf")
531

532
        outputs = model(**inputs)
533
        scores = tf.squeeze(outputs.iou_scores)
534

535
        self.assertTrue(np.allclose(scores[-1], np.array(0.9675), atol=1e-4))
536

537
        # With no label
538
        input_points = [[[400, 650]]]
539

540
        inputs = processor(images=raw_image, input_points=input_points, return_tensors="tf")
541

542
        outputs = model(**inputs)
543
        scores = tf.squeeze(outputs.iou_scores)
544

545
        self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9675), atol=1e-4))
546

547
    def test_inference_mask_generation_two_points(self):
548
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
549
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
550
        raw_image = prepare_image()
551

552
        input_points = [[[400, 650], [800, 650]]]
553
        input_labels = [[1, 1]]
554

555
        inputs = processor(images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="tf")
556

557
        outputs = model(**inputs)
558
        scores = tf.squeeze(outputs.iou_scores)
559

560
        self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9762), atol=1e-4))
561

562
        # no labels
563
        inputs = processor(images=raw_image, input_points=input_points, return_tensors="tf")
564

565
        outputs = model(**inputs)
566
        scores = tf.squeeze(outputs.iou_scores)
567

568
        self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.9762), atol=1e-4))
569

570
    def test_inference_mask_generation_two_points_batched(self):
571
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
572
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
573

574
        raw_image = prepare_image()
575

576
        input_points = [[[400, 650], [800, 650]], [[400, 650]]]
577
        input_labels = [[1, 1], [1]]
578

579
        inputs = processor(
580
            images=[raw_image, raw_image], input_points=input_points, input_labels=input_labels, return_tensors="tf"
581
        )
582

583
        outputs = model(**inputs)
584
        scores = tf.squeeze(outputs.iou_scores)
585

586
        self.assertTrue(np.allclose(scores[0][-1].numpy(), np.array(0.9762), atol=1e-4))
587
        self.assertTrue(np.allclose(scores[1][-1], np.array(0.9637), atol=1e-4))
588

589
    def test_inference_mask_generation_one_box(self):
590
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
591
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
592

593
        raw_image = prepare_image()
594

595
        input_boxes = [[[75, 275, 1725, 850]]]
596

597
        inputs = processor(images=raw_image, input_boxes=input_boxes, return_tensors="tf")
598

599
        outputs = model(**inputs)
600
        scores = tf.squeeze(outputs.iou_scores)
601

602
        self.assertTrue(np.allclose(scores[-1].numpy(), np.array(0.7937), atol=1e-4))
603

604
    def test_inference_mask_generation_batched_image_one_point(self):
605
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
606
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
607

608
        raw_image = prepare_image()
609
        raw_dog_image = prepare_dog_img()
610

611
        input_points = [[[820, 1080]], [[220, 470]]]
612

613
        inputs = processor(images=[raw_image, raw_dog_image], input_points=input_points, return_tensors="tf")
614

615
        outputs = model(**inputs)
616
        scores_batched = tf.squeeze(outputs.iou_scores)
617

618
        input_points = [[[220, 470]]]
619

620
        inputs = processor(images=raw_dog_image, input_points=input_points, return_tensors="tf")
621

622
        outputs = model(**inputs)
623
        scores_single = tf.squeeze(outputs.iou_scores)
624
        self.assertTrue(np.allclose(scores_batched[1, :].numpy(), scores_single.numpy(), atol=1e-4))
625

626
    def test_inference_mask_generation_two_points_point_batch(self):
627
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
628
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
629

630
        raw_image = prepare_image()
631

632
        input_points = tf.convert_to_tensor([[[400, 650]], [[220, 470]]])  # fmt: skip
633

634
        input_points = tf.expand_dims(input_points, 0)
635

636
        inputs = processor(raw_image, input_points=input_points, return_tensors="tf")
637

638
        outputs = model(**inputs)
639

640
        iou_scores = outputs.iou_scores
641
        self.assertTrue(iou_scores.shape == (1, 2, 3))
642
        self.assertTrue(
643
            np.allclose(
644
                iou_scores.numpy(),
645
                np.array([[[0.9105, 0.9825, 0.9675], [0.7646, 0.7943, 0.7774]]]),
646
                atol=1e-4,
647
                rtol=1e-4,
648
            )
649
        )
650

651
    def test_inference_mask_generation_three_boxes_point_batch(self):
652
        model = TFSamModel.from_pretrained("facebook/sam-vit-base")
653
        processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
654

655
        raw_image = prepare_image()
656

657
        # fmt: off
658
        input_boxes = tf.convert_to_tensor([[[620, 900, 1000, 1255]], [[75, 275, 1725, 850]],  [[75, 275, 1725, 850]]])
659
        EXPECTED_IOU = np.array([[[0.9773, 0.9881, 0.9522],
660
         [0.5996, 0.7661, 0.7937],
661
         [0.5996, 0.7661, 0.7937]]])
662
        # fmt: on
663
        input_boxes = tf.expand_dims(input_boxes, 0)
664

665
        inputs = processor(raw_image, input_boxes=input_boxes, return_tensors="tf")
666

667
        outputs = model(**inputs)
668

669
        iou_scores = outputs.iou_scores
670
        self.assertTrue(iou_scores.shape == (1, 3, 3))
671
        self.assertTrue(np.allclose(iou_scores.numpy(), EXPECTED_IOU, atol=1e-4, rtol=1e-4))
672

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.