demo-ml-pennfudanped

Форк
0
/
coco_for_mask_r_cnn_dataset.py 
82 строки · 3.0 Кб
1
import torch
2
from PIL import Image
3
import numpy as np
4
from pycocotools.coco import COCO
5

6

7
class CocoForMaskRCNNDataset(torch.utils.data.Dataset):
8

9
    def __init__(self, images_root, annotations, transforms=None, entry_type=[]):
10
        self.imDir = images_root
11
        self.annFile = annotations
12
        self.entry_type = entry_type
13

14
        self.transforms = transforms
15

16
        self.coco=COCO(self.annFile)
17
        self.catIds = self.coco.getCatIds(catNms=self.entry_type)
18
        self.imgIds = self.coco.getImgIds(catIds=self.catIds)
19

20
    def __getitem__(self, idx):
21
        img = self.coco.loadImgs( self.imgIds[idx] )[0]
22
        im_path = f'{self.imDir}/{img["file_name"]}'
23
        image = Image.open(im_path).convert("RGB")
24

25
        annIds = self.coco.getAnnIds( imgIds=img['id'], catIds=self.catIds, iscrowd=None )
26
        anns = self.coco.loadAnns( annIds )
27
        # anns[0]['segmentation']
28
        # anns[0]['bbox']
29
        # id, category_id, image_id, iscrowd, area
30

31
        masks = []
32
        for i in range(len(anns)):
33
            masks.append(self.coco.annToMask(anns[i]))
34

35
        torch_masks = torch.as_tensor(np.array(masks, dtype=np.uint8), dtype=torch.uint8)
36

37
        num_objs = len(anns)
38
        labels = torch.ones((num_objs,), dtype=torch.int64)
39
        boxes = []
40
        area = []
41
        for ann in anns:
42
            # COCO: "bbox" : [x,y,width,height]
43
            boxes.append([
44
                ann['bbox'][0],
45
                ann['bbox'][1],
46
                ann['bbox'][0] + ann['bbox'][2],
47
                ann['bbox'][1] + ann['bbox'][3],
48
                ])
49
            area.append(ann['area'])
50
        image_id = torch.tensor([ self.imgIds[idx] ])
51
        iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
52

53
        target = {}
54

55
        # boxes (FloatTensor[N, 4]): the coordinates of the N bounding boxes in [x0, y0, x1, y1] format, ranging from 0 to W and 0 to H
56
        target["boxes"] = torch.as_tensor(boxes, dtype=torch.float32)
57

58
        # labels (Int64Tensor[N]): the label for each bounding box. 0 represents always the background class.
59
        target["labels"] = labels
60

61
        # (optionally) masks (UInt8Tensor[N, H, W]): The segmentation masks for each one of the objects
62
        target["masks"] = torch_masks
63

64
        # image_id (Int64Tensor[1]): an image identifier. It should be unique between all the images in the dataset, and is used during evaluation
65
        target["image_id"] = image_id
66

67
        # area (Tensor[N]): The area of the bounding box. This is used during evaluation with the COCO metric, to separate the metric scores between small, medium and large boxes.
68
        target["area"] = torch.as_tensor(boxes, dtype=torch.float32)
69

70
        # iscrowd (UInt8Tensor[N]): instances with iscrowd=True will be ignored during evaluation.
71
        target["iscrowd"] = iscrowd
72

73
        if self.transforms is not None:
74
            image, target = self.transforms(image, target)
75

76
        return image, target
77

78
    def get_categories(self):
79
        return self.coco.cats
80

81
    def __len__(self):
82
        return len(self.imgIds)
83

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.