HairFastGAN
164 строки · 5.8 Кб
1# Ranger deep learning optimizer - RAdam + Lookahead + Gradient Centralization, combined into one optimizer.
2
3# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer
4# and/or
5# https://github.com/lessw2020/Best-Deep-Learning-Optimizers
6
7# Ranger has now been used to capture 12 records on the FastAI leaderboard.
8
9# This version = 20.4.11
10
11# Credits:
12# Gradient Centralization --> https://arxiv.org/abs/2004.01461v2 (a new optimization technique for DNNs), github: https://github.com/Yonghongwei/Gradient-Centralization
13# RAdam --> https://github.com/LiyuanLucasLiu/RAdam
14# Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code.
15# Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610
16
17# summary of changes:
18# 4/11/20 - add gradient centralization option. Set new testing benchmark for accuracy with it, toggle with use_gc flag at init.
19# full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights),
20# supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues.
21# changes 8/31/19 - fix references to *self*.N_sma_threshold;
22# changed eps to 1e-5 as better default than 1e-8.
23
24import math25import torch26from torch.optim.optimizer import Optimizer27
28
29class Ranger(Optimizer):30
31def __init__(self, params, lr=1e-3, # lr32alpha=0.5, k=6, N_sma_threshhold=5, # Ranger options33betas=(.95, 0.999), eps=1e-5, weight_decay=0, # Adam options34use_gc=True, gc_conv_only=False35# Gradient centralization on or off, applied to conv layers only or conv + fc layers36):37
38# parameter checks39if not 0.0 <= alpha <= 1.0:40raise ValueError(f'Invalid slow update rate: {alpha}')41if not 1 <= k:42raise ValueError(f'Invalid lookahead steps: {k}')43if not lr > 0:44raise ValueError(f'Invalid Learning Rate: {lr}')45if not eps > 0:46raise ValueError(f'Invalid eps: {eps}')47
48# parameter comments:49# beta1 (momentum) of .95 seems to work better than .90...50# N_sma_threshold of 5 seems better in testing than 4.51# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.52
53# prep defaults and init torch.optim base54defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold,55eps=eps, weight_decay=weight_decay)56super().__init__(params, defaults)57
58# adjustable threshold59self.N_sma_threshhold = N_sma_threshhold60
61# look ahead params62
63self.alpha = alpha64self.k = k65
66# radam buffer for state67self.radam_buffer = [[None, None, None] for ind in range(10)]68
69# gc on or off70self.use_gc = use_gc71
72# level of gradient centralization73self.gc_gradient_threshold = 3 if gc_conv_only else 174
75def __setstate__(self, state):76super(Ranger, self).__setstate__(state)77
78def step(self, closure=None):79loss = None80
81# Evaluate averages and grad, update param tensors82for group in self.param_groups:83
84for p in group['params']:85if p.grad is None:86continue87grad = p.grad.data.float()88
89if grad.is_sparse:90raise RuntimeError('Ranger optimizer does not support sparse gradients')91
92p_data_fp32 = p.data.float()93
94state = self.state[p] # get state dict for this param95
96if len(state) == 0: # if first time to run...init dictionary with our desired entries97# if self.first_run_check==0:98# self.first_run_check=199# print("Initializing slow buffer...should not see this at load from saved model!")100state['step'] = 0101state['exp_avg'] = torch.zeros_like(p_data_fp32)102state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)103
104# look ahead weight storage now in state dict105state['slow_buffer'] = torch.empty_like(p.data)106state['slow_buffer'].copy_(p.data)107
108else:109state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)110state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)111
112# begin computations113exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']114beta1, beta2 = group['betas']115
116# GC operation for Conv layers and FC layers117if grad.dim() > self.gc_gradient_threshold:118grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))119
120state['step'] += 1121
122# compute variance mov avg123exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)124# compute mean moving avg125exp_avg.mul_(beta1).add_(1 - beta1, grad)126
127buffered = self.radam_buffer[int(state['step'] % 10)]128
129if state['step'] == buffered[0]:130N_sma, step_size = buffered[1], buffered[2]131else:132buffered[0] = state['step']133beta2_t = beta2 ** state['step']134N_sma_max = 2 / (1 - beta2) - 1135N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)136buffered[1] = N_sma137if N_sma > self.N_sma_threshhold:138step_size = math.sqrt(139(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (140N_sma_max - 2)) / (1 - beta1 ** state['step'])141else:142step_size = 1.0 / (1 - beta1 ** state['step'])143buffered[2] = step_size144
145if group['weight_decay'] != 0:146p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)147
148# apply lr149if N_sma > self.N_sma_threshhold:150denom = exp_avg_sq.sqrt().add_(group['eps'])151p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)152else:153p_data_fp32.add_(-step_size * group['lr'], exp_avg)154
155p.data.copy_(p_data_fp32)156
157# integrated look ahead...158# we do it at the param level instead of group level159if state['step'] % group['k'] == 0:160slow_p = state['slow_buffer'] # get access to slow param tensor161slow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha162p.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor163
164return loss