|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""PyTorch optimization for BERT model.""" |
|
|
|
import logging |
|
import math |
|
|
|
import torch |
|
from torch.optim import Optimizer |
|
from torch.optim.lr_scheduler import LambdaLR |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class ConstantLRSchedule(LambdaLR): |
|
""" Constant learning rate schedule. |
|
""" |
|
def __init__(self, optimizer, last_epoch=-1): |
|
super(ConstantLRSchedule, self).__init__( |
|
optimizer, lambda _: 1.0, last_epoch=last_epoch) |
|
|
|
|
|
class WarmupConstantSchedule(LambdaLR): |
|
""" Linear warmup and then constant. |
|
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` |
|
training steps. Keeps learning rate schedule equal to 1. after warmup_steps. |
|
""" |
|
def __init__(self, optimizer, warmup_steps, last_epoch=-1): |
|
self.warmup_steps = warmup_steps |
|
super(WarmupConstantSchedule, self).__init__( |
|
optimizer, self.lr_lambda, last_epoch=last_epoch) |
|
|
|
def lr_lambda(self, step): |
|
if step < self.warmup_steps: |
|
return float(step) / float(max(1.0, self.warmup_steps)) |
|
return 1. |
|
|
|
|
|
class WarmupLinearSchedule(LambdaLR): |
|
""" Linear warmup and then linear decay. |
|
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps. |
|
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` |
|
steps. |
|
""" |
|
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1): |
|
self.warmup_steps = warmup_steps |
|
self.t_total = t_total |
|
super(WarmupLinearSchedule, self).__init__( |
|
optimizer, self.lr_lambda, last_epoch=last_epoch) |
|
|
|
def lr_lambda(self, step): |
|
if step < self.warmup_steps: |
|
return float(step) / float(max(1, self.warmup_steps)) |
|
return max(0.0, float(self.t_total - step) / float( |
|
max(1.0, self.t_total - self.warmup_steps))) |
|
|
|
|
|
class WarmupCosineSchedule(LambdaLR): |
|
""" Linear warmup and then cosine decay. |
|
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps. |
|
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps |
|
following a cosine curve. If `cycles` (default=0.5) is different from default, learning |
|
rate follows cosine function after warmup. |
|
""" |
|
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1): |
|
self.warmup_steps = warmup_steps |
|
self.t_total = t_total |
|
self.cycles = cycles |
|
super(WarmupCosineSchedule, self).__init__( |
|
optimizer, self.lr_lambda, last_epoch=last_epoch) |
|
|
|
def lr_lambda(self, step): |
|
if step < self.warmup_steps: |
|
return float(step) / float(max(1.0, self.warmup_steps)) |
|
|
|
progress = float(step - self.warmup_steps) / float( |
|
max(1, self.t_total - self.warmup_steps)) |
|
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress))) |
|
|
|
|
|
class WarmupCosineWithHardRestartsSchedule(LambdaLR): |
|
""" Linear warmup and then cosine cycles with hard restarts. |
|
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps. |
|
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times |
|
a cosine decaying learning rate (with hard restarts). |
|
""" |
|
def __init__(self, optimizer, warmup_steps, t_total, cycles=1., last_epoch=-1): |
|
self.warmup_steps = warmup_steps |
|
self.t_total = t_total |
|
self.cycles = cycles |
|
super(WarmupCosineWithHardRestartsSchedule, self).__init__( |
|
optimizer, self.lr_lambda, last_epoch=last_epoch) |
|
|
|
def lr_lambda(self, step): |
|
if step < self.warmup_steps: |
|
return float(step) / float(max(1, self.warmup_steps)) |
|
|
|
progress = float(step - self.warmup_steps) / float( |
|
max(1, self.t_total - self.warmup_steps)) |
|
if progress >= 1.0: |
|
return 0.0 |
|
return max(0.0, 0.5 * (1. + math.cos( |
|
math.pi * ((float(self.cycles) * progress) % 1.0)))) |
|
|
|
|
|
class AdamW(Optimizer): |
|
""" Implements Adam algorithm with weight decay fix. |
|
|
|
Parameters: |
|
lr (float): learning rate. Default 1e-3. |
|
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999) |
|
eps (float): Adams epsilon. Default: 1e-6 |
|
weight_decay (float): Weight decay. Default: 0.0 |
|
correct_bias (bool): can be set to False to avoid correcting bias in Adam |
|
(e.g. like in Bert TF repository). Default True. |
|
""" |
|
def __init__(self, |
|
params, |
|
lr=1e-3, |
|
betas=(0.9, 0.999), |
|
eps=1e-6, |
|
weight_decay=0.0, |
|
correct_bias=True): |
|
if lr < 0.0: |
|
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) |
|
if not 0.0 <= betas[0] < 1.0: |
|
raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)") |
|
if not 0.0 <= betas[1] < 1.0: |
|
raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)") |
|
if not 0.0 <= eps: |
|
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps)) |
|
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, |
|
correct_bias=correct_bias) |
|
super(AdamW, self).__init__(params, defaults) |
|
|
|
def step(self, closure=None): |
|
"""Performs a single optimization step. |
|
|
|
Arguments: |
|
closure (callable, optional): A closure that reevaluates the model |
|
and returns the loss. |
|
""" |
|
loss = None |
|
if closure is not None: |
|
loss = closure() |
|
|
|
for group in self.param_groups: |
|
for p in group['params']: |
|
if p.grad is None: |
|
continue |
|
grad = p.grad.data |
|
if grad.is_sparse: |
|
raise RuntimeError('Adam does not support sparse gradients, ' |
|
'please consider SparseAdam instead') |
|
|
|
state = self.state[p] |
|
|
|
|
|
if len(state) == 0: |
|
state['step'] = 0 |
|
|
|
state['exp_avg'] = torch.zeros_like(p.data) |
|
|
|
state['exp_avg_sq'] = torch.zeros_like(p.data) |
|
|
|
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] |
|
beta1, beta2 = group['betas'] |
|
|
|
state['step'] += 1 |
|
|
|
|
|
|
|
exp_avg.mul_(beta1).add_(1.0 - beta1, grad) |
|
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad) |
|
denom = exp_avg_sq.sqrt().add_(group['eps']) |
|
|
|
step_size = group['lr'] |
|
if group['correct_bias']: |
|
bias_correction1 = 1.0 - beta1 ** state['step'] |
|
bias_correction2 = 1.0 - beta2 ** state['step'] |
|
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 |
|
|
|
p.data.addcdiv_(-step_size, exp_avg, denom) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if group['weight_decay'] > 0.0: |
|
p.data.add_(-group['lr'] * group['weight_decay'], p.data) |
|
|
|
return loss |
|
|