demo-ml-pennfudanped

Форк
0
117 строк · 4.2 Кб
1
import numpy as np
2
import torch
3
import pycocotools.mask as mask_util
4
from pycocotools.cocoeval import COCOeval
5
from pycocotools.coco import COCO
6
import contextlib
7
import io
8

9

10
def prepare_results_segm(segmPredicted, target, labels, scores, masks):
11
    cocovalPrediction = []
12
    rles = [
13
        mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] for mask in masks
14
    ]
15
    for k, rle in enumerate(rles):
16
        rle["counts"] = rle["counts"].decode("utf-8")
17
        cocovalPrediction.append( {
18
            "image_id": target['image_id'].item(),
19
            "category_id": labels[k],
20
            "segmentation": rle,
21
            "score": scores[k],
22
        })
23
    segmPredicted.extend(cocovalPrediction)
24

25

26
def prepare_results_bbox(bboxPredicted, target, labels, scores, boxes):
27
    def convert_to_xywh(boxes):
28
        xmin, ymin, xmax, ymax = boxes.unbind(1)
29
        return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
30

31
    cocovalPrediction = []
32
    for k, box in enumerate(convert_to_xywh(boxes).tolist()):
33
        cocovalPrediction.append( {
34
            "image_id": target['image_id'].item(),
35
            "category_id": labels[k],
36
            "bbox": box,
37
            "score": scores[k],
38
        })
39
    bboxPredicted.extend(cocovalPrediction)
40

41

42
# def prepare_results_keypoints(coco, target, labels, scores, keypoints):
43
#     cocovalPrediction = []
44
#     for k, keypoint in enumerate(keypoints.flatten(start_dim=1).tolist()):
45
#         cocovalPrediction.append( {
46
#             "image_id": target['image_id'].item(),
47
#             "category_id": labels[k],
48
#             "keypoints": keypoint,
49
#             "score": scores[k],
50
#         })
51
#     return COCO.loadRes(coco, cocovalPrediction)
52

53

54
def accumulate_metrics(segmPredicted, bboxPredicted, targets, predictions):
55
    for target, prediction in zip(targets, predictions):
56
        masks = prediction["masks"].cpu()
57
        masks = masks > 0.5
58
        scores = prediction["scores"].tolist()
59
        labels = prediction["labels"].tolist()
60
        boxes = prediction["boxes"]
61

62
        prepare_results_segm(segmPredicted, target, labels, scores, masks)
63
        prepare_results_bbox(bboxPredicted, target, labels, scores, boxes)
64
        # if "keypoints" in prediction:
65
        #     keypoints = prediction["keypoints"]
66
        #     keypointsPredictedCoco = prepare_results_keypoints(data_loader_test.dataset.coco, target, labels, scores, keypoints)
67

68

69
coco_metric_names = {
70
    'mAP': 0,
71
    'mAP_50': 1,
72
    'mAP_75': 2,
73
    'mAP_s': 3,
74
    'mAP_m': 4,
75
    'mAP_l': 5,
76
    'AR@100': 6,
77
    'AR@300': 7,
78
    'AR@1000': 8,
79
    'AR_s@1000': 9,
80
    'AR_m@1000': 10,
81
    'AR_l@1000': 11
82
}
83

84

85
def compute_metrics(coco, segmPredicted, bboxPredicted):
86
    redirect_string = io.StringIO()
87
    with contextlib.redirect_stdout(redirect_string):
88
        segmPredictedCoco = COCO.loadRes(coco, segmPredicted)
89
        bboxPredictedCoco = COCO.loadRes(coco, bboxPredicted)
90

91
        metrics = {}
92

93
        for t, data in zip(('segm','bbox'), (segmPredictedCoco, bboxPredictedCoco)):
94

95
            cocoEval = COCOeval(coco, data, t)
96
            cocoEval.evaluate()
97
            cocoEval.accumulate()
98
            cocoEval.summarize()
99

100
            # Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=200 ] = 0.507
101
            # Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=200 ] = 0.699
102
            # Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=200 ] = 0.575
103
            # Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=200 ] = 0.586
104
            # Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=200 ] = 0.519
105
            # Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=200 ] = 0.501
106
            # Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=200 ] = 0.598
107
            # Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=200 ] = 0.640
108
            # Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=200 ] = 0.566
109
            # Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=200 ] = 0.564
110

111
            m = {}
112
            for item in coco_metric_names.keys():
113
                m[item] = cocoEval.stats[coco_metric_names[item]]
114

115
            metrics[t] = m
116

117
        return metrics

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.