transformers
531 строка · 23.6 Кб
1# coding=utf-8
2# Copyright 2021 HuggingFace Inc.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16
17import json
18import pathlib
19import unittest
20
21from transformers.testing_utils import require_torch, require_vision, slow
22from transformers.utils import is_torch_available, is_vision_available
23
24from ...test_image_processing_common import AnnotationFormatTestMixin, ImageProcessingTestMixin, prepare_image_inputs
25
26
27if is_torch_available():
28import torch
29
30if is_vision_available():
31from PIL import Image
32
33from transformers import YolosImageProcessor
34
35
36class YolosImageProcessingTester(unittest.TestCase):
37def __init__(
38self,
39parent,
40batch_size=7,
41num_channels=3,
42min_resolution=30,
43max_resolution=400,
44do_resize=True,
45size=None,
46do_normalize=True,
47image_mean=[0.5, 0.5, 0.5],
48image_std=[0.5, 0.5, 0.5],
49do_rescale=True,
50rescale_factor=1 / 255,
51do_pad=True,
52):
53# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
54size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
55self.parent = parent
56self.batch_size = batch_size
57self.num_channels = num_channels
58self.min_resolution = min_resolution
59self.max_resolution = max_resolution
60self.do_resize = do_resize
61self.size = size
62self.do_normalize = do_normalize
63self.image_mean = image_mean
64self.image_std = image_std
65self.do_rescale = do_rescale
66self.rescale_factor = rescale_factor
67self.do_pad = do_pad
68
69def prepare_image_processor_dict(self):
70return {
71"do_resize": self.do_resize,
72"size": self.size,
73"do_normalize": self.do_normalize,
74"image_mean": self.image_mean,
75"image_std": self.image_std,
76"do_rescale": self.do_rescale,
77"rescale_factor": self.rescale_factor,
78"do_pad": self.do_pad,
79}
80
81def get_expected_values(self, image_inputs, batched=False):
82"""
83This function computes the expected height and width when providing images to YolosImageProcessor,
84assuming do_resize is set to True with a scalar size.
85"""
86if not batched:
87image = image_inputs[0]
88if isinstance(image, Image.Image):
89width, height = image.size
90else:
91height, width = image.shape[1], image.shape[2]
92
93size = self.size["shortest_edge"]
94max_size = self.size.get("longest_edge", None)
95if max_size is not None:
96min_original_size = float(min((height, width)))
97max_original_size = float(max((height, width)))
98if max_original_size / min_original_size * size > max_size:
99size = int(round(max_size * min_original_size / max_original_size))
100
101if width < height and width != size:
102height = int(size * height / width)
103width = size
104elif height < width and height != size:
105width = int(size * width / height)
106height = size
107width_mod = width % 16
108height_mod = height % 16
109expected_width = width - width_mod
110expected_height = height - height_mod
111
112else:
113expected_values = []
114for image in image_inputs:
115expected_height, expected_width = self.get_expected_values([image])
116expected_values.append((expected_height, expected_width))
117expected_height = max(expected_values, key=lambda item: item[0])[0]
118expected_width = max(expected_values, key=lambda item: item[1])[1]
119
120return expected_height, expected_width
121
122def expected_output_image_shape(self, images):
123height, width = self.get_expected_values(images, batched=True)
124return self.num_channels, height, width
125
126def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
127return prepare_image_inputs(
128batch_size=self.batch_size,
129num_channels=self.num_channels,
130min_resolution=self.min_resolution,
131max_resolution=self.max_resolution,
132equal_resolution=equal_resolution,
133numpify=numpify,
134torchify=torchify,
135)
136
137
138@require_torch
139@require_vision
140class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMixin, unittest.TestCase):
141image_processing_class = YolosImageProcessor if is_vision_available() else None
142
143def setUp(self):
144self.image_processor_tester = YolosImageProcessingTester(self)
145
146@property
147def image_processor_dict(self):
148return self.image_processor_tester.prepare_image_processor_dict()
149
150def test_image_processor_properties(self):
151image_processing = self.image_processing_class(**self.image_processor_dict)
152self.assertTrue(hasattr(image_processing, "image_mean"))
153self.assertTrue(hasattr(image_processing, "image_std"))
154self.assertTrue(hasattr(image_processing, "do_normalize"))
155self.assertTrue(hasattr(image_processing, "do_resize"))
156self.assertTrue(hasattr(image_processing, "size"))
157
158def test_image_processor_from_dict_with_kwargs(self):
159image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
160self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333})
161self.assertEqual(image_processor.do_pad, True)
162
163image_processor = self.image_processing_class.from_dict(
164self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False
165)
166self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84})
167self.assertEqual(image_processor.do_pad, False)
168
169def test_equivalence_padding(self):
170# Initialize image_processings
171image_processing_1 = self.image_processing_class(**self.image_processor_dict)
172image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False)
173# create random PyTorch tensors
174image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
175for image in image_inputs:
176self.assertIsInstance(image, torch.Tensor)
177
178# Test whether the method "pad" and calling the image processor return the same tensors
179encoded_images_with_method = image_processing_1.pad(image_inputs, return_tensors="pt")
180encoded_images = image_processing_2(image_inputs, return_tensors="pt")
181
182self.assertTrue(
183torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4)
184)
185
186def test_resize_max_size_respected(self):
187image_processor = self.image_processing_class(**self.image_processor_dict)
188
189# create torch tensors as image
190image = torch.randint(0, 256, (3, 100, 1500), dtype=torch.uint8)
191processed_image = image_processor(
192image, size={"longest_edge": 1333, "shortest_edge": 800}, do_pad=False, return_tensors="pt"
193)["pixel_values"]
194
195self.assertTrue(processed_image.shape[-1] <= 1333)
196self.assertTrue(processed_image.shape[-2] <= 800)
197
198@slow
199def test_call_pytorch_with_coco_detection_annotations(self):
200# prepare image and target
201image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
202with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f:
203target = json.loads(f.read())
204
205target = {"image_id": 39769, "annotations": target}
206
207# encode them
208image_processing = YolosImageProcessor.from_pretrained("hustvl/yolos-small")
209encoding = image_processing(images=image, annotations=target, return_tensors="pt")
210
211# verify pixel values
212expected_shape = torch.Size([1, 3, 800, 1056])
213self.assertEqual(encoding["pixel_values"].shape, expected_shape)
214
215expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
216self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
217
218# verify area
219expected_area = torch.tensor([5832.7256, 11144.6689, 484763.2500, 829269.8125, 146579.4531, 164177.6250])
220self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
221# verify boxes
222expected_boxes_shape = torch.Size([6, 4])
223self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
224expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
225self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
226# verify image_id
227expected_image_id = torch.tensor([39769])
228self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
229# verify is_crowd
230expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
231self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
232# verify class_labels
233expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
234self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
235# verify orig_size
236expected_orig_size = torch.tensor([480, 640])
237self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
238# verify size
239expected_size = torch.tensor([800, 1056])
240self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
241
242@slow
243def test_call_pytorch_with_coco_panoptic_annotations(self):
244# prepare image, target and masks_path
245image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
246with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f:
247target = json.loads(f.read())
248
249target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
250
251masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
252
253# encode them
254image_processing = YolosImageProcessor(format="coco_panoptic")
255encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt")
256
257# verify pixel values
258expected_shape = torch.Size([1, 3, 800, 1056])
259self.assertEqual(encoding["pixel_values"].shape, expected_shape)
260
261expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
262self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
263
264# verify area
265expected_area = torch.tensor([146591.5000, 163974.2500, 480092.2500, 11187.0000, 5824.5000, 7562.5000])
266self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
267# verify boxes
268expected_boxes_shape = torch.Size([6, 4])
269self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
270expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
271self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
272# verify image_id
273expected_image_id = torch.tensor([39769])
274self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
275# verify is_crowd
276expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
277self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
278# verify class_labels
279expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
280self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
281# verify masks
282expected_masks_sum = 815161
283self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum)
284# verify orig_size
285expected_orig_size = torch.tensor([480, 640])
286self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
287# verify size
288expected_size = torch.tensor([800, 1056])
289self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
290
291# Output size is slight different from DETR as yolos takes mod of 16
292@slow
293def test_batched_coco_detection_annotations(self):
294image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
295image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
296
297with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f:
298target = json.loads(f.read())
299
300annotations_0 = {"image_id": 39769, "annotations": target}
301annotations_1 = {"image_id": 39769, "annotations": target}
302
303# Adjust the bounding boxes for the resized image
304w_0, h_0 = image_0.size
305w_1, h_1 = image_1.size
306for i in range(len(annotations_1["annotations"])):
307coords = annotations_1["annotations"][i]["bbox"]
308new_bbox = [
309coords[0] * w_1 / w_0,
310coords[1] * h_1 / h_0,
311coords[2] * w_1 / w_0,
312coords[3] * h_1 / h_0,
313]
314annotations_1["annotations"][i]["bbox"] = new_bbox
315
316images = [image_0, image_1]
317annotations = [annotations_0, annotations_1]
318
319image_processing = YolosImageProcessor()
320encoding = image_processing(
321images=images,
322annotations=annotations,
323return_segmentation_masks=True,
324return_tensors="pt", # do_convert_annotations=True
325)
326
327# Check the pixel values have been padded
328postprocessed_height, postprocessed_width = 800, 1056
329expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
330self.assertEqual(encoding["pixel_values"].shape, expected_shape)
331
332# Check the bounding boxes have been adjusted for padded images
333self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
334self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
335expected_boxes_0 = torch.tensor(
336[
337[0.6879, 0.4609, 0.0755, 0.3691],
338[0.2118, 0.3359, 0.2601, 0.1566],
339[0.5011, 0.5000, 0.9979, 1.0000],
340[0.5010, 0.5020, 0.9979, 0.9959],
341[0.3284, 0.5944, 0.5884, 0.8112],
342[0.8394, 0.5445, 0.3213, 0.9110],
343]
344)
345expected_boxes_1 = torch.tensor(
346[
347[0.4169, 0.2765, 0.0458, 0.2215],
348[0.1284, 0.2016, 0.1576, 0.0940],
349[0.3792, 0.4933, 0.7559, 0.9865],
350[0.3794, 0.5002, 0.7563, 0.9955],
351[0.1990, 0.5456, 0.3566, 0.8646],
352[0.5845, 0.4115, 0.3462, 0.7161],
353]
354)
355self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3))
356self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3))
357
358# Check the masks have also been padded
359self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1056]))
360self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1056]))
361
362# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
363# format and not in the range [0, 1]
364encoding = image_processing(
365images=images,
366annotations=annotations,
367return_segmentation_masks=True,
368do_convert_annotations=False,
369return_tensors="pt",
370)
371self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
372self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
373# Convert to absolute coordinates
374unnormalized_boxes_0 = torch.vstack(
375[
376expected_boxes_0[:, 0] * postprocessed_width,
377expected_boxes_0[:, 1] * postprocessed_height,
378expected_boxes_0[:, 2] * postprocessed_width,
379expected_boxes_0[:, 3] * postprocessed_height,
380]
381).T
382unnormalized_boxes_1 = torch.vstack(
383[
384expected_boxes_1[:, 0] * postprocessed_width,
385expected_boxes_1[:, 1] * postprocessed_height,
386expected_boxes_1[:, 2] * postprocessed_width,
387expected_boxes_1[:, 3] * postprocessed_height,
388]
389).T
390# Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
391expected_boxes_0 = torch.vstack(
392[
393unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
394unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
395unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
396unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
397]
398).T
399expected_boxes_1 = torch.vstack(
400[
401unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
402unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
403unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
404unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
405]
406).T
407self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1))
408self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1))
409
410# Output size is slight different from DETR as yolos takes mod of 16
411def test_batched_coco_panoptic_annotations(self):
412# prepare image, target and masks_path
413image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
414image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
415
416with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f:
417target = json.loads(f.read())
418
419annotation_0 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
420annotation_1 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
421
422w_0, h_0 = image_0.size
423w_1, h_1 = image_1.size
424for i in range(len(annotation_1["segments_info"])):
425coords = annotation_1["segments_info"][i]["bbox"]
426new_bbox = [
427coords[0] * w_1 / w_0,
428coords[1] * h_1 / h_0,
429coords[2] * w_1 / w_0,
430coords[3] * h_1 / h_0,
431]
432annotation_1["segments_info"][i]["bbox"] = new_bbox
433
434masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
435
436images = [image_0, image_1]
437annotations = [annotation_0, annotation_1]
438
439# encode them
440image_processing = YolosImageProcessor(format="coco_panoptic")
441encoding = image_processing(
442images=images,
443annotations=annotations,
444masks_path=masks_path,
445return_tensors="pt",
446return_segmentation_masks=True,
447)
448
449# Check the pixel values have been padded
450postprocessed_height, postprocessed_width = 800, 1056
451expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
452self.assertEqual(encoding["pixel_values"].shape, expected_shape)
453
454# Check the bounding boxes have been adjusted for padded images
455self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
456self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
457expected_boxes_0 = torch.tensor(
458[
459[0.2625, 0.5437, 0.4688, 0.8625],
460[0.7719, 0.4104, 0.4531, 0.7125],
461[0.5000, 0.4927, 0.9969, 0.9854],
462[0.1688, 0.2000, 0.2063, 0.0917],
463[0.5492, 0.2760, 0.0578, 0.2187],
464[0.4992, 0.4990, 0.9984, 0.9979],
465]
466)
467expected_boxes_1 = torch.tensor(
468[
469[0.1591, 0.3262, 0.2841, 0.5175],
470[0.4678, 0.2463, 0.2746, 0.4275],
471[0.3030, 0.2956, 0.6042, 0.5913],
472[0.1023, 0.1200, 0.1250, 0.0550],
473[0.3329, 0.1656, 0.0350, 0.1312],
474[0.3026, 0.2994, 0.6051, 0.5987],
475]
476)
477self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3))
478self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3))
479
480# Check the masks have also been padded
481self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1056]))
482self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1056]))
483
484# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
485# format and not in the range [0, 1]
486encoding = image_processing(
487images=images,
488annotations=annotations,
489masks_path=masks_path,
490return_segmentation_masks=True,
491do_convert_annotations=False,
492return_tensors="pt",
493)
494self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
495self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
496# Convert to absolute coordinates
497unnormalized_boxes_0 = torch.vstack(
498[
499expected_boxes_0[:, 0] * postprocessed_width,
500expected_boxes_0[:, 1] * postprocessed_height,
501expected_boxes_0[:, 2] * postprocessed_width,
502expected_boxes_0[:, 3] * postprocessed_height,
503]
504).T
505unnormalized_boxes_1 = torch.vstack(
506[
507expected_boxes_1[:, 0] * postprocessed_width,
508expected_boxes_1[:, 1] * postprocessed_height,
509expected_boxes_1[:, 2] * postprocessed_width,
510expected_boxes_1[:, 3] * postprocessed_height,
511]
512).T
513# Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
514expected_boxes_0 = torch.vstack(
515[
516unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
517unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
518unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
519unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
520]
521).T
522expected_boxes_1 = torch.vstack(
523[
524unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
525unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
526unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
527unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
528]
529).T
530self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1))
531self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
532