pytorch-image-models
84 строки · 3.0 Кб
1""" Split Attention Conv2d (for ResNeSt Models)
2
3Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955
4
5Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt
6
7Modified for torchscript compat, performance, and consistency with timm by Ross Wightman
8"""
9import torch
10import torch.nn.functional as F
11from torch import nn
12
13from .helpers import make_divisible
14
15
16class RadixSoftmax(nn.Module):
17def __init__(self, radix, cardinality):
18super(RadixSoftmax, self).__init__()
19self.radix = radix
20self.cardinality = cardinality
21
22def forward(self, x):
23batch = x.size(0)
24if self.radix > 1:
25x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
26x = F.softmax(x, dim=1)
27x = x.reshape(batch, -1)
28else:
29x = torch.sigmoid(x)
30return x
31
32
33class SplitAttn(nn.Module):
34"""Split-Attention (aka Splat)
35"""
36def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None,
37dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8,
38act_layer=nn.ReLU, norm_layer=None, drop_layer=None, **kwargs):
39super(SplitAttn, self).__init__()
40out_channels = out_channels or in_channels
41self.radix = radix
42mid_chs = out_channels * radix
43if rd_channels is None:
44attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor)
45else:
46attn_chs = rd_channels * radix
47
48padding = kernel_size // 2 if padding is None else padding
49self.conv = nn.Conv2d(
50in_channels, mid_chs, kernel_size, stride, padding, dilation,
51groups=groups * radix, bias=bias, **kwargs)
52self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity()
53self.drop = drop_layer() if drop_layer is not None else nn.Identity()
54self.act0 = act_layer(inplace=True)
55self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups)
56self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity()
57self.act1 = act_layer(inplace=True)
58self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups)
59self.rsoftmax = RadixSoftmax(radix, groups)
60
61def forward(self, x):
62x = self.conv(x)
63x = self.bn0(x)
64x = self.drop(x)
65x = self.act0(x)
66
67B, RC, H, W = x.shape
68if self.radix > 1:
69x = x.reshape((B, self.radix, RC // self.radix, H, W))
70x_gap = x.sum(dim=1)
71else:
72x_gap = x
73x_gap = x_gap.mean((2, 3), keepdim=True)
74x_gap = self.fc1(x_gap)
75x_gap = self.bn1(x_gap)
76x_gap = self.act1(x_gap)
77x_attn = self.fc2(x_gap)
78
79x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1)
80if self.radix > 1:
81out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1)
82else:
83out = x * x_attn
84return out.contiguous()
85