pytorch-image-models
79 строк · 2.8 Кб
1""" Padding Helpers
2
3Hacked together by / Copyright 2020 Ross Wightman
4"""
5import math6from typing import List, Tuple7
8import torch9import torch.nn.functional as F10
11
12# Calculate symmetric padding for a convolution
13def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int:14padding = ((stride - 1) + dilation * (kernel_size - 1)) // 215return padding16
17
18# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
19def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int):20if isinstance(x, torch.Tensor):21return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0)22else:23return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0)24
25
26# Can SAME padding for given args be done statically?
27def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):28return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 029
30
31def pad_same_arg(32input_size: List[int],33kernel_size: List[int],34stride: List[int],35dilation: List[int] = (1, 1),36) -> List[int]:37ih, iw = input_size38kh, kw = kernel_size39pad_h = get_same_padding(ih, kh, stride[0], dilation[0])40pad_w = get_same_padding(iw, kw, stride[1], dilation[1])41return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]42
43
44# Dynamically pad input x with 'SAME' padding for conv with specified args
45def pad_same(46x,47kernel_size: List[int],48stride: List[int],49dilation: List[int] = (1, 1),50value: float = 0,51):52ih, iw = x.size()[-2:]53pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0])54pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1])55x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value)56return x57
58
59def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:60dynamic = False61if isinstance(padding, str):62# for any string padding, the padding will be calculated for you, one of three ways63padding = padding.lower()64if padding == 'same':65# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact66if is_static_pad(kernel_size, **kwargs):67# static case, no extra overhead68padding = get_padding(kernel_size, **kwargs)69else:70# dynamic 'SAME' padding, has runtime/GPU memory overhead71padding = 072dynamic = True73elif padding == 'valid':74# 'VALID' padding, same as padding=075padding = 076else:77# Default to PyTorch style 'same'-ish symmetric padding78padding = get_padding(kernel_size, **kwargs)79return padding, dynamic80