-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathoptimizer.py
More file actions
94 lines (79 loc) · 3.37 KB
/
optimizer.py
File metadata and controls
94 lines (79 loc) · 3.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import math
from typing import Callable, Iterable, Tuple
import torch
from torch.optim import Optimizer
class AdamW(Optimizer):
def __init__(
self,
params: Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0])
)
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1])
)
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias
)
super().__init__(params, defaults)
def step(self, closure: Callable = None):
loss = None
if closure is not None:
with torch.enable_grad(): # Ensuring gradients are enabled for closure
loss = closure()
for group in self.param_groups:
lr = group["lr"]
beta1, beta2 = group["betas"]
eps = group["eps"]
weight_decay = group["weight_decay"]
correct_bias = group["correct_bias"]
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
state = self.state[p]
alpha = group["lr"]
beta1, beta2 = group["betas"]
eps = group["eps"]
weight_decay = group["weight_decay"]
correct_bias = group["correct_bias"]
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(p.data)
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
state["step"] += 1
t = state["step"]
# Decoupled weight decay (AdamW)
if weight_decay != 0:
p.data.add_(p.data, alpha=-alpha * weight_decay)
# Update biased first moment estimate
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# Update biased second raw moment estimate
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if correct_bias:
bias_correction1 = 1 - beta1 ** t
bias_correction2 = 1 - beta2 ** t
step_size = alpha * math.sqrt(bias_correction2) / bias_correction1
denom = exp_avg_sq.sqrt().add_(eps)
else:
step_size = alpha
denom = exp_avg_sq.sqrt().add_(eps)
p.data.addcdiv_(exp_avg, denom, value=-step_size)
return loss