-
Notifications
You must be signed in to change notification settings - Fork 0
/
adam.py
132 lines (95 loc) · 4.12 KB
/
adam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import math
import torch
import torch.optim as optim
class SharedAdam(optim.Adam):
"""Implements Adam algorithm with shared states.
Code for this Adam optimizer is from https://github.com/ikostrikov/pytorch-a3c
It is a small change on the default Adam optimizer to share momentum between processes
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = torch.zeros(1)
state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()
state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step'][0]
bias_correction2 = 1 - beta2 ** state['step'][0]
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
# import torch
# import torch.optim as optim
# class SharedAdam(optim.Adam):
# def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
# super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay)
# # For every group of parameters,
# for group in self.param_groups:
# for p in group['params']:
# state = self.state[p]
# state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()
# state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()
# def share_memory(self):
# for group in self.param_groups:
# for p in group['params']:
# state = self.state[p]
# state['step'].share_memory_()
# state['exp_avg'].share_memory_()
# state['exp_avg_sq'].share_memory_()
# def step(self, closure=None):
# loss = None
# if closure is not None:
# loss = closure()
# for group in self.param_groups:
# for p in group['params']:
# if p.grad is None:
# continue
# grad = p.grad.data
# state = self.state[p]
# exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
# beta1, beta2 = group['betas']
# state['step'] += 1
# if group['weight_decay'] != 0:
# grad = grad.add(group['weight_decay'], p.data)
# # Decay the first and second moment running average coefficient
# exp_avg.mul_(beta1).add_(1 - beta1, grad)
# exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# denom = exp_avg_sq.sqrt().add_(group['eps'])
# bias_correction1 = 1 - beta1 ** state['step'][0]
# bias_correction2 = 1 - beta2 ** state['step'][0]
# step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# p.data.addcdiv_(-step_size, exp_avg, denom)
# return loss