/
hypergrad.py
146 lines (115 loc) · 5.62 KB
/
hypergrad.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import torch
from itertools import repeat
from typing import List, Callable
from torch import Tensor
from torch.autograd import grad as torch_grad
'''
Based on the paper 'On the Iteration Complexity of Hypergradient Computation,' this code was created.
Source: https://github.com/prolearner/hypertorch/blob/master/hypergrad/hypergradients.py
Original Author: Riccardo Grazzi
'''
class DifferentiableOptimizer:
def __init__(self, loss_f, dim_mult, data_or_iter=None):
"""
Args:
loss_f: callable with signature (params, hparams, [data optional]) -> loss tensor
data_or_iter: (x, y) or iterator over the data needed for loss_f
"""
self.data_iterator = None
if data_or_iter:
self.data_iterator = data_or_iter if hasattr(data_or_iter, '__next__') else repeat(data_or_iter)
self.loss_f = loss_f
self.dim_mult = dim_mult
self.curr_loss = None
def get_opt_params(self, params):
opt_params = [p for p in params]
opt_params.extend([torch.zeros_like(p) for p in params for _ in range(self.dim_mult-1) ])
return opt_params
def step(self, params, hparams, create_graph):
raise NotImplementedError
def __call__(self, params, hparams, create_graph=True):
with torch.enable_grad():
return self.step(params, hparams, create_graph)
def get_loss(self, params, hparams):
if self.data_iterator:
data = next(self.data_iterator)
self.curr_loss = self.loss_f(params, hparams, data)
else:
self.curr_loss = self.loss_f(params, hparams)
return self.curr_loss
class GradientDescent(DifferentiableOptimizer):
def __init__(self, loss_f, step_size, data_or_iter=None):
super(GradientDescent, self).__init__(loss_f, dim_mult=1, data_or_iter=data_or_iter)
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
def step(self, params, hparams, create_graph):
loss = self.get_loss(params, hparams)
sz = self.step_size_f(hparams)
return gd_step(params, loss, sz, create_graph=create_graph)
def gd_step(params, loss, step_size, create_graph=True):
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
return [w - step_size * g for w, g in zip(params, grads)]
def grad_unused_zero(output, inputs, grad_outputs=None, retain_graph=False, create_graph=False):
grads = torch.autograd.grad(output, inputs, grad_outputs=grad_outputs, allow_unused=True,
retain_graph=retain_graph, create_graph=create_graph)
def grad_or_zeros(grad, var):
return torch.zeros_like(var) if grad is None else grad
return tuple(grad_or_zeros(g, v) for g, v in zip(grads, inputs))
def get_outer_gradients(outer_loss, params, hparams, retain_graph=True):
grad_outer_w = grad_unused_zero(outer_loss, params, retain_graph=retain_graph)
grad_outer_hparams = grad_unused_zero(outer_loss, hparams, retain_graph=retain_graph)
return grad_outer_w, grad_outer_hparams
def update_tensor_grads(hparams, grads):
for l, g in zip(hparams, grads):
if l.grad is None:
l.grad = torch.zeros_like(l)
if g is not None:
l.grad += g
def fixed_point(params: List[Tensor],
hparams: List[Tensor],
K: int ,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
stochastic=False) -> List[Tensor]:
"""
Computes the hypergradient by applying K steps of the fixed point method (it can end earlier when tol is reached).
Args:
params: the output of the inner solver procedure.
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
K: the maximum number of fixed point iterations
fp_map: the fixed point map which defines the inner problem
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
tol: end the method earlier when the normed difference between two iterates is less than tol
set_grad: if True set t.grad to the hypergradient for every t in hparams
stochastic: set this to True when fp_map is not a deterministic function of its inputs
Returns:
the list of hypergradients for each element in hparams
"""
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
if not stochastic:
w_mapped = fp_map(params, hparams)
vs = [torch.zeros_like(w) for w in params]
vs_vec = cat_list_to_tensor(vs)
for k in range(K):
vs_prev_vec = vs_vec
if stochastic:
w_mapped = fp_map(params, hparams)
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=False)
else:
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=True)
vs = [v + gow for v, gow in zip(vs, grad_outer_w)]
vs_vec = cat_list_to_tensor(vs)
if float(torch.norm(vs_vec - vs_prev_vec)) < tol:
break
if stochastic:
w_mapped = fp_map(params, hparams)
grads = torch_grad(w_mapped, hparams, grad_outputs=vs, allow_unused=True)
grads = [g + v if g is not None else v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def cat_list_to_tensor(list_tx):
return torch.cat([xx.reshape([-1]) for xx in list_tx])