transformers

Форк
0
/
test_image_processing_yolos.py 
531 строка · 23.6 Кб
1
# coding=utf-8
2
# Copyright 2021 HuggingFace Inc.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
#     http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15

16

17
import json
18
import pathlib
19
import unittest
20

21
from transformers.testing_utils import require_torch, require_vision, slow
22
from transformers.utils import is_torch_available, is_vision_available
23

24
from ...test_image_processing_common import AnnotationFormatTestMixin, ImageProcessingTestMixin, prepare_image_inputs
25

26

27
if is_torch_available():
28
    import torch
29

30
if is_vision_available():
31
    from PIL import Image
32

33
    from transformers import YolosImageProcessor
34

35

36
class YolosImageProcessingTester(unittest.TestCase):
37
    def __init__(
38
        self,
39
        parent,
40
        batch_size=7,
41
        num_channels=3,
42
        min_resolution=30,
43
        max_resolution=400,
44
        do_resize=True,
45
        size=None,
46
        do_normalize=True,
47
        image_mean=[0.5, 0.5, 0.5],
48
        image_std=[0.5, 0.5, 0.5],
49
        do_rescale=True,
50
        rescale_factor=1 / 255,
51
        do_pad=True,
52
    ):
53
        # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
54
        size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
55
        self.parent = parent
56
        self.batch_size = batch_size
57
        self.num_channels = num_channels
58
        self.min_resolution = min_resolution
59
        self.max_resolution = max_resolution
60
        self.do_resize = do_resize
61
        self.size = size
62
        self.do_normalize = do_normalize
63
        self.image_mean = image_mean
64
        self.image_std = image_std
65
        self.do_rescale = do_rescale
66
        self.rescale_factor = rescale_factor
67
        self.do_pad = do_pad
68

69
    def prepare_image_processor_dict(self):
70
        return {
71
            "do_resize": self.do_resize,
72
            "size": self.size,
73
            "do_normalize": self.do_normalize,
74
            "image_mean": self.image_mean,
75
            "image_std": self.image_std,
76
            "do_rescale": self.do_rescale,
77
            "rescale_factor": self.rescale_factor,
78
            "do_pad": self.do_pad,
79
        }
80

81
    def get_expected_values(self, image_inputs, batched=False):
82
        """
83
        This function computes the expected height and width when providing images to YolosImageProcessor,
84
        assuming do_resize is set to True with a scalar size.
85
        """
86
        if not batched:
87
            image = image_inputs[0]
88
            if isinstance(image, Image.Image):
89
                width, height = image.size
90
            else:
91
                height, width = image.shape[1], image.shape[2]
92

93
            size = self.size["shortest_edge"]
94
            max_size = self.size.get("longest_edge", None)
95
            if max_size is not None:
96
                min_original_size = float(min((height, width)))
97
                max_original_size = float(max((height, width)))
98
                if max_original_size / min_original_size * size > max_size:
99
                    size = int(round(max_size * min_original_size / max_original_size))
100

101
            if width < height and width != size:
102
                height = int(size * height / width)
103
                width = size
104
            elif height < width and height != size:
105
                width = int(size * width / height)
106
                height = size
107
            width_mod = width % 16
108
            height_mod = height % 16
109
            expected_width = width - width_mod
110
            expected_height = height - height_mod
111

112
        else:
113
            expected_values = []
114
            for image in image_inputs:
115
                expected_height, expected_width = self.get_expected_values([image])
116
                expected_values.append((expected_height, expected_width))
117
            expected_height = max(expected_values, key=lambda item: item[0])[0]
118
            expected_width = max(expected_values, key=lambda item: item[1])[1]
119

120
        return expected_height, expected_width
121

122
    def expected_output_image_shape(self, images):
123
        height, width = self.get_expected_values(images, batched=True)
124
        return self.num_channels, height, width
125

126
    def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
127
        return prepare_image_inputs(
128
            batch_size=self.batch_size,
129
            num_channels=self.num_channels,
130
            min_resolution=self.min_resolution,
131
            max_resolution=self.max_resolution,
132
            equal_resolution=equal_resolution,
133
            numpify=numpify,
134
            torchify=torchify,
135
        )
136

137

138
@require_torch
139
@require_vision
140
class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMixin, unittest.TestCase):
141
    image_processing_class = YolosImageProcessor if is_vision_available() else None
142

143
    def setUp(self):
144
        self.image_processor_tester = YolosImageProcessingTester(self)
145

146
    @property
147
    def image_processor_dict(self):
148
        return self.image_processor_tester.prepare_image_processor_dict()
149

150
    def test_image_processor_properties(self):
151
        image_processing = self.image_processing_class(**self.image_processor_dict)
152
        self.assertTrue(hasattr(image_processing, "image_mean"))
153
        self.assertTrue(hasattr(image_processing, "image_std"))
154
        self.assertTrue(hasattr(image_processing, "do_normalize"))
155
        self.assertTrue(hasattr(image_processing, "do_resize"))
156
        self.assertTrue(hasattr(image_processing, "size"))
157

158
    def test_image_processor_from_dict_with_kwargs(self):
159
        image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
160
        self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333})
161
        self.assertEqual(image_processor.do_pad, True)
162

163
        image_processor = self.image_processing_class.from_dict(
164
            self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False
165
        )
166
        self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84})
167
        self.assertEqual(image_processor.do_pad, False)
168

169
    def test_equivalence_padding(self):
170
        # Initialize image_processings
171
        image_processing_1 = self.image_processing_class(**self.image_processor_dict)
172
        image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False)
173
        # create random PyTorch tensors
174
        image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
175
        for image in image_inputs:
176
            self.assertIsInstance(image, torch.Tensor)
177

178
        # Test whether the method "pad" and calling the image processor return the same tensors
179
        encoded_images_with_method = image_processing_1.pad(image_inputs, return_tensors="pt")
180
        encoded_images = image_processing_2(image_inputs, return_tensors="pt")
181

182
        self.assertTrue(
183
            torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4)
184
        )
185

186
    def test_resize_max_size_respected(self):
187
        image_processor = self.image_processing_class(**self.image_processor_dict)
188

189
        # create torch tensors as image
190
        image = torch.randint(0, 256, (3, 100, 1500), dtype=torch.uint8)
191
        processed_image = image_processor(
192
            image, size={"longest_edge": 1333, "shortest_edge": 800}, do_pad=False, return_tensors="pt"
193
        )["pixel_values"]
194

195
        self.assertTrue(processed_image.shape[-1] <= 1333)
196
        self.assertTrue(processed_image.shape[-2] <= 800)
197

198
    @slow
199
    def test_call_pytorch_with_coco_detection_annotations(self):
200
        # prepare image and target
201
        image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
202
        with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f:
203
            target = json.loads(f.read())
204

205
        target = {"image_id": 39769, "annotations": target}
206

207
        # encode them
208
        image_processing = YolosImageProcessor.from_pretrained("hustvl/yolos-small")
209
        encoding = image_processing(images=image, annotations=target, return_tensors="pt")
210

211
        # verify pixel values
212
        expected_shape = torch.Size([1, 3, 800, 1056])
213
        self.assertEqual(encoding["pixel_values"].shape, expected_shape)
214

215
        expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
216
        self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
217

218
        # verify area
219
        expected_area = torch.tensor([5832.7256, 11144.6689, 484763.2500, 829269.8125, 146579.4531, 164177.6250])
220
        self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
221
        # verify boxes
222
        expected_boxes_shape = torch.Size([6, 4])
223
        self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
224
        expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
225
        self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
226
        # verify image_id
227
        expected_image_id = torch.tensor([39769])
228
        self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
229
        # verify is_crowd
230
        expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
231
        self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
232
        # verify class_labels
233
        expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
234
        self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
235
        # verify orig_size
236
        expected_orig_size = torch.tensor([480, 640])
237
        self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
238
        # verify size
239
        expected_size = torch.tensor([800, 1056])
240
        self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
241

242
    @slow
243
    def test_call_pytorch_with_coco_panoptic_annotations(self):
244
        # prepare image, target and masks_path
245
        image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
246
        with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f:
247
            target = json.loads(f.read())
248

249
        target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
250

251
        masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
252

253
        # encode them
254
        image_processing = YolosImageProcessor(format="coco_panoptic")
255
        encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt")
256

257
        # verify pixel values
258
        expected_shape = torch.Size([1, 3, 800, 1056])
259
        self.assertEqual(encoding["pixel_values"].shape, expected_shape)
260

261
        expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
262
        self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
263

264
        # verify area
265
        expected_area = torch.tensor([146591.5000, 163974.2500, 480092.2500, 11187.0000, 5824.5000, 7562.5000])
266
        self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
267
        # verify boxes
268
        expected_boxes_shape = torch.Size([6, 4])
269
        self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
270
        expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
271
        self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
272
        # verify image_id
273
        expected_image_id = torch.tensor([39769])
274
        self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
275
        # verify is_crowd
276
        expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
277
        self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
278
        # verify class_labels
279
        expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
280
        self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
281
        # verify masks
282
        expected_masks_sum = 815161
283
        self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum)
284
        # verify orig_size
285
        expected_orig_size = torch.tensor([480, 640])
286
        self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
287
        # verify size
288
        expected_size = torch.tensor([800, 1056])
289
        self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
290

291
    # Output size is slight different from DETR as yolos takes mod of 16
292
    @slow
293
    def test_batched_coco_detection_annotations(self):
294
        image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
295
        image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
296

297
        with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f:
298
            target = json.loads(f.read())
299

300
        annotations_0 = {"image_id": 39769, "annotations": target}
301
        annotations_1 = {"image_id": 39769, "annotations": target}
302

303
        # Adjust the bounding boxes for the resized image
304
        w_0, h_0 = image_0.size
305
        w_1, h_1 = image_1.size
306
        for i in range(len(annotations_1["annotations"])):
307
            coords = annotations_1["annotations"][i]["bbox"]
308
            new_bbox = [
309
                coords[0] * w_1 / w_0,
310
                coords[1] * h_1 / h_0,
311
                coords[2] * w_1 / w_0,
312
                coords[3] * h_1 / h_0,
313
            ]
314
            annotations_1["annotations"][i]["bbox"] = new_bbox
315

316
        images = [image_0, image_1]
317
        annotations = [annotations_0, annotations_1]
318

319
        image_processing = YolosImageProcessor()
320
        encoding = image_processing(
321
            images=images,
322
            annotations=annotations,
323
            return_segmentation_masks=True,
324
            return_tensors="pt",  # do_convert_annotations=True
325
        )
326

327
        # Check the pixel values have been padded
328
        postprocessed_height, postprocessed_width = 800, 1056
329
        expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
330
        self.assertEqual(encoding["pixel_values"].shape, expected_shape)
331

332
        # Check the bounding boxes have been adjusted for padded images
333
        self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
334
        self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
335
        expected_boxes_0 = torch.tensor(
336
            [
337
                [0.6879, 0.4609, 0.0755, 0.3691],
338
                [0.2118, 0.3359, 0.2601, 0.1566],
339
                [0.5011, 0.5000, 0.9979, 1.0000],
340
                [0.5010, 0.5020, 0.9979, 0.9959],
341
                [0.3284, 0.5944, 0.5884, 0.8112],
342
                [0.8394, 0.5445, 0.3213, 0.9110],
343
            ]
344
        )
345
        expected_boxes_1 = torch.tensor(
346
            [
347
                [0.4169, 0.2765, 0.0458, 0.2215],
348
                [0.1284, 0.2016, 0.1576, 0.0940],
349
                [0.3792, 0.4933, 0.7559, 0.9865],
350
                [0.3794, 0.5002, 0.7563, 0.9955],
351
                [0.1990, 0.5456, 0.3566, 0.8646],
352
                [0.5845, 0.4115, 0.3462, 0.7161],
353
            ]
354
        )
355
        self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3))
356
        self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3))
357

358
        # Check the masks have also been padded
359
        self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1056]))
360
        self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1056]))
361

362
        # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
363
        # format and not in the range [0, 1]
364
        encoding = image_processing(
365
            images=images,
366
            annotations=annotations,
367
            return_segmentation_masks=True,
368
            do_convert_annotations=False,
369
            return_tensors="pt",
370
        )
371
        self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
372
        self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
373
        # Convert to absolute coordinates
374
        unnormalized_boxes_0 = torch.vstack(
375
            [
376
                expected_boxes_0[:, 0] * postprocessed_width,
377
                expected_boxes_0[:, 1] * postprocessed_height,
378
                expected_boxes_0[:, 2] * postprocessed_width,
379
                expected_boxes_0[:, 3] * postprocessed_height,
380
            ]
381
        ).T
382
        unnormalized_boxes_1 = torch.vstack(
383
            [
384
                expected_boxes_1[:, 0] * postprocessed_width,
385
                expected_boxes_1[:, 1] * postprocessed_height,
386
                expected_boxes_1[:, 2] * postprocessed_width,
387
                expected_boxes_1[:, 3] * postprocessed_height,
388
            ]
389
        ).T
390
        # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
391
        expected_boxes_0 = torch.vstack(
392
            [
393
                unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
394
                unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
395
                unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
396
                unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
397
            ]
398
        ).T
399
        expected_boxes_1 = torch.vstack(
400
            [
401
                unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
402
                unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
403
                unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
404
                unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
405
            ]
406
        ).T
407
        self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1))
408
        self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1))
409

410
    # Output size is slight different from DETR as yolos takes mod of 16
411
    def test_batched_coco_panoptic_annotations(self):
412
        # prepare image, target and masks_path
413
        image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
414
        image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
415

416
        with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f:
417
            target = json.loads(f.read())
418

419
        annotation_0 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
420
        annotation_1 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
421

422
        w_0, h_0 = image_0.size
423
        w_1, h_1 = image_1.size
424
        for i in range(len(annotation_1["segments_info"])):
425
            coords = annotation_1["segments_info"][i]["bbox"]
426
            new_bbox = [
427
                coords[0] * w_1 / w_0,
428
                coords[1] * h_1 / h_0,
429
                coords[2] * w_1 / w_0,
430
                coords[3] * h_1 / h_0,
431
            ]
432
            annotation_1["segments_info"][i]["bbox"] = new_bbox
433

434
        masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
435

436
        images = [image_0, image_1]
437
        annotations = [annotation_0, annotation_1]
438

439
        # encode them
440
        image_processing = YolosImageProcessor(format="coco_panoptic")
441
        encoding = image_processing(
442
            images=images,
443
            annotations=annotations,
444
            masks_path=masks_path,
445
            return_tensors="pt",
446
            return_segmentation_masks=True,
447
        )
448

449
        # Check the pixel values have been padded
450
        postprocessed_height, postprocessed_width = 800, 1056
451
        expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
452
        self.assertEqual(encoding["pixel_values"].shape, expected_shape)
453

454
        # Check the bounding boxes have been adjusted for padded images
455
        self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
456
        self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
457
        expected_boxes_0 = torch.tensor(
458
            [
459
                [0.2625, 0.5437, 0.4688, 0.8625],
460
                [0.7719, 0.4104, 0.4531, 0.7125],
461
                [0.5000, 0.4927, 0.9969, 0.9854],
462
                [0.1688, 0.2000, 0.2063, 0.0917],
463
                [0.5492, 0.2760, 0.0578, 0.2187],
464
                [0.4992, 0.4990, 0.9984, 0.9979],
465
            ]
466
        )
467
        expected_boxes_1 = torch.tensor(
468
            [
469
                [0.1591, 0.3262, 0.2841, 0.5175],
470
                [0.4678, 0.2463, 0.2746, 0.4275],
471
                [0.3030, 0.2956, 0.6042, 0.5913],
472
                [0.1023, 0.1200, 0.1250, 0.0550],
473
                [0.3329, 0.1656, 0.0350, 0.1312],
474
                [0.3026, 0.2994, 0.6051, 0.5987],
475
            ]
476
        )
477
        self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3))
478
        self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3))
479

480
        # Check the masks have also been padded
481
        self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1056]))
482
        self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1056]))
483

484
        # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
485
        # format and not in the range [0, 1]
486
        encoding = image_processing(
487
            images=images,
488
            annotations=annotations,
489
            masks_path=masks_path,
490
            return_segmentation_masks=True,
491
            do_convert_annotations=False,
492
            return_tensors="pt",
493
        )
494
        self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
495
        self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
496
        # Convert to absolute coordinates
497
        unnormalized_boxes_0 = torch.vstack(
498
            [
499
                expected_boxes_0[:, 0] * postprocessed_width,
500
                expected_boxes_0[:, 1] * postprocessed_height,
501
                expected_boxes_0[:, 2] * postprocessed_width,
502
                expected_boxes_0[:, 3] * postprocessed_height,
503
            ]
504
        ).T
505
        unnormalized_boxes_1 = torch.vstack(
506
            [
507
                expected_boxes_1[:, 0] * postprocessed_width,
508
                expected_boxes_1[:, 1] * postprocessed_height,
509
                expected_boxes_1[:, 2] * postprocessed_width,
510
                expected_boxes_1[:, 3] * postprocessed_height,
511
            ]
512
        ).T
513
        # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
514
        expected_boxes_0 = torch.vstack(
515
            [
516
                unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
517
                unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
518
                unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
519
                unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
520
            ]
521
        ).T
522
        expected_boxes_1 = torch.vstack(
523
            [
524
                unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
525
                unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
526
                unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
527
                unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
528
            ]
529
        ).T
530
        self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
531
        self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
532

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.