pytorch-image-models
177 строк · 6.2 Кб
1""" PyTorch selectable adaptive pooling
2Adaptive pooling with the ability to select the type of pooling from:
3* 'avg' - Average pooling
4* 'max' - Max pooling
5* 'avgmax' - Sum of average and max pooling re-scaled by 0.5
6* 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim
7
8Both a functional and a nn.Module version of the pooling is provided.
9
10Hacked together by / Copyright 2020 Ross Wightman
11"""
12from typing import Optional, Tuple, Union13
14import torch15import torch.nn as nn16import torch.nn.functional as F17
18from .format import get_spatial_dim, get_channel_dim19
20_int_tuple_2_t = Union[int, Tuple[int, int]]21
22
23def adaptive_pool_feat_mult(pool_type='avg'):24if pool_type.endswith('catavgmax'):25return 226else:27return 128
29
30def adaptive_avgmax_pool2d(x, output_size: _int_tuple_2_t = 1):31x_avg = F.adaptive_avg_pool2d(x, output_size)32x_max = F.adaptive_max_pool2d(x, output_size)33return 0.5 * (x_avg + x_max)34
35
36def adaptive_catavgmax_pool2d(x, output_size: _int_tuple_2_t = 1):37x_avg = F.adaptive_avg_pool2d(x, output_size)38x_max = F.adaptive_max_pool2d(x, output_size)39return torch.cat((x_avg, x_max), 1)40
41
42def select_adaptive_pool2d(x, pool_type='avg', output_size: _int_tuple_2_t = 1):43"""Selectable global pooling function with dynamic input kernel size44"""
45if pool_type == 'avg':46x = F.adaptive_avg_pool2d(x, output_size)47elif pool_type == 'avgmax':48x = adaptive_avgmax_pool2d(x, output_size)49elif pool_type == 'catavgmax':50x = adaptive_catavgmax_pool2d(x, output_size)51elif pool_type == 'max':52x = F.adaptive_max_pool2d(x, output_size)53else:54assert False, 'Invalid pool type: %s' % pool_type55return x56
57
58class FastAdaptiveAvgPool(nn.Module):59def __init__(self, flatten: bool = False, input_fmt: F = 'NCHW'):60super(FastAdaptiveAvgPool, self).__init__()61self.flatten = flatten62self.dim = get_spatial_dim(input_fmt)63
64def forward(self, x):65return x.mean(self.dim, keepdim=not self.flatten)66
67
68class FastAdaptiveMaxPool(nn.Module):69def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):70super(FastAdaptiveMaxPool, self).__init__()71self.flatten = flatten72self.dim = get_spatial_dim(input_fmt)73
74def forward(self, x):75return x.amax(self.dim, keepdim=not self.flatten)76
77
78class FastAdaptiveAvgMaxPool(nn.Module):79def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):80super(FastAdaptiveAvgMaxPool, self).__init__()81self.flatten = flatten82self.dim = get_spatial_dim(input_fmt)83
84def forward(self, x):85x_avg = x.mean(self.dim, keepdim=not self.flatten)86x_max = x.amax(self.dim, keepdim=not self.flatten)87return 0.5 * x_avg + 0.5 * x_max88
89
90class FastAdaptiveCatAvgMaxPool(nn.Module):91def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):92super(FastAdaptiveCatAvgMaxPool, self).__init__()93self.flatten = flatten94self.dim_reduce = get_spatial_dim(input_fmt)95if flatten:96self.dim_cat = 197else:98self.dim_cat = get_channel_dim(input_fmt)99
100def forward(self, x):101x_avg = x.mean(self.dim_reduce, keepdim=not self.flatten)102x_max = x.amax(self.dim_reduce, keepdim=not self.flatten)103return torch.cat((x_avg, x_max), self.dim_cat)104
105
106class AdaptiveAvgMaxPool2d(nn.Module):107def __init__(self, output_size: _int_tuple_2_t = 1):108super(AdaptiveAvgMaxPool2d, self).__init__()109self.output_size = output_size110
111def forward(self, x):112return adaptive_avgmax_pool2d(x, self.output_size)113
114
115class AdaptiveCatAvgMaxPool2d(nn.Module):116def __init__(self, output_size: _int_tuple_2_t = 1):117super(AdaptiveCatAvgMaxPool2d, self).__init__()118self.output_size = output_size119
120def forward(self, x):121return adaptive_catavgmax_pool2d(x, self.output_size)122
123
124class SelectAdaptivePool2d(nn.Module):125"""Selectable global pooling layer with dynamic input kernel size126"""
127def __init__(128self,129output_size: _int_tuple_2_t = 1,130pool_type: str = 'fast',131flatten: bool = False,132input_fmt: str = 'NCHW',133):134super(SelectAdaptivePool2d, self).__init__()135assert input_fmt in ('NCHW', 'NHWC')136self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing137if not pool_type:138self.pool = nn.Identity() # pass through139self.flatten = nn.Flatten(1) if flatten else nn.Identity()140elif pool_type.startswith('fast') or input_fmt != 'NCHW':141assert output_size == 1, 'Fast pooling and non NCHW input formats require output_size == 1.'142if pool_type.endswith('catavgmax'):143self.pool = FastAdaptiveCatAvgMaxPool(flatten, input_fmt=input_fmt)144elif pool_type.endswith('avgmax'):145self.pool = FastAdaptiveAvgMaxPool(flatten, input_fmt=input_fmt)146elif pool_type.endswith('max'):147self.pool = FastAdaptiveMaxPool(flatten, input_fmt=input_fmt)148else:149self.pool = FastAdaptiveAvgPool(flatten, input_fmt=input_fmt)150self.flatten = nn.Identity()151else:152assert input_fmt == 'NCHW'153if pool_type == 'avgmax':154self.pool = AdaptiveAvgMaxPool2d(output_size)155elif pool_type == 'catavgmax':156self.pool = AdaptiveCatAvgMaxPool2d(output_size)157elif pool_type == 'max':158self.pool = nn.AdaptiveMaxPool2d(output_size)159else:160self.pool = nn.AdaptiveAvgPool2d(output_size)161self.flatten = nn.Flatten(1) if flatten else nn.Identity()162
163def is_identity(self):164return not self.pool_type165
166def forward(self, x):167x = self.pool(x)168x = self.flatten(x)169return x170
171def feat_mult(self):172return adaptive_pool_feat_mult(self.pool_type)173
174def __repr__(self):175return self.__class__.__name__ + '(' \176+ 'pool_type=' + self.pool_type \177+ ', flatten=' + str(self.flatten) + ')'178
179