demo-ml-pennfudanped
/
metrics.py
117 строк · 4.2 Кб
1import numpy as np2import torch3import pycocotools.mask as mask_util4from pycocotools.cocoeval import COCOeval5from pycocotools.coco import COCO6import contextlib7import io8
9
10def prepare_results_segm(segmPredicted, target, labels, scores, masks):11cocovalPrediction = []12rles = [13mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] for mask in masks14]15for k, rle in enumerate(rles):16rle["counts"] = rle["counts"].decode("utf-8")17cocovalPrediction.append( {18"image_id": target['image_id'].item(),19"category_id": labels[k],20"segmentation": rle,21"score": scores[k],22})23segmPredicted.extend(cocovalPrediction)24
25
26def prepare_results_bbox(bboxPredicted, target, labels, scores, boxes):27def convert_to_xywh(boxes):28xmin, ymin, xmax, ymax = boxes.unbind(1)29return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)30
31cocovalPrediction = []32for k, box in enumerate(convert_to_xywh(boxes).tolist()):33cocovalPrediction.append( {34"image_id": target['image_id'].item(),35"category_id": labels[k],36"bbox": box,37"score": scores[k],38})39bboxPredicted.extend(cocovalPrediction)40
41
42# def prepare_results_keypoints(coco, target, labels, scores, keypoints):
43# cocovalPrediction = []
44# for k, keypoint in enumerate(keypoints.flatten(start_dim=1).tolist()):
45# cocovalPrediction.append( {
46# "image_id": target['image_id'].item(),
47# "category_id": labels[k],
48# "keypoints": keypoint,
49# "score": scores[k],
50# })
51# return COCO.loadRes(coco, cocovalPrediction)
52
53
54def accumulate_metrics(segmPredicted, bboxPredicted, targets, predictions):55for target, prediction in zip(targets, predictions):56masks = prediction["masks"].cpu()57masks = masks > 0.558scores = prediction["scores"].tolist()59labels = prediction["labels"].tolist()60boxes = prediction["boxes"]61
62prepare_results_segm(segmPredicted, target, labels, scores, masks)63prepare_results_bbox(bboxPredicted, target, labels, scores, boxes)64# if "keypoints" in prediction:65# keypoints = prediction["keypoints"]66# keypointsPredictedCoco = prepare_results_keypoints(data_loader_test.dataset.coco, target, labels, scores, keypoints)67
68
69coco_metric_names = {70'mAP': 0,71'mAP_50': 1,72'mAP_75': 2,73'mAP_s': 3,74'mAP_m': 4,75'mAP_l': 5,76'AR@100': 6,77'AR@300': 7,78'AR@1000': 8,79'AR_s@1000': 9,80'AR_m@1000': 10,81'AR_l@1000': 1182}
83
84
85def compute_metrics(coco, segmPredicted, bboxPredicted):86redirect_string = io.StringIO()87with contextlib.redirect_stdout(redirect_string):88segmPredictedCoco = COCO.loadRes(coco, segmPredicted)89bboxPredictedCoco = COCO.loadRes(coco, bboxPredicted)90
91metrics = {}92
93for t, data in zip(('segm','bbox'), (segmPredictedCoco, bboxPredictedCoco)):94
95cocoEval = COCOeval(coco, data, t)96cocoEval.evaluate()97cocoEval.accumulate()98cocoEval.summarize()99
100# Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=200 ] = 0.507101# Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=200 ] = 0.699102# Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=200 ] = 0.575103# Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=200 ] = 0.586104# Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=200 ] = 0.519105# Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=200 ] = 0.501106# Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=200 ] = 0.598107# Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=200 ] = 0.640108# Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=200 ] = 0.566109# Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=200 ] = 0.564110
111m = {}112for item in coco_metric_names.keys():113m[item] = cocoEval.stats[coco_metric_names[item]]114
115metrics[t] = m116
117return metrics