-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfabolas.py
More file actions
98 lines (80 loc) · 3.22 KB
/
fabolas.py
File metadata and controls
98 lines (80 loc) · 3.22 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
from hpo import HPO
import math
import numpy as np
import random
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
class FabolasHPO(HPO):
def __init__(
self,
domains,
max_resources,
resource_type = 'epochs',
metric_to_monitor = "loss",
monitor_mode = min,
n_epochs =20,
n_configs = 400,
n_hp=50,
s = []
):
assert monitor_mode in [min, max], "monitor_mode must be either 'min' or 'max'"
super().__init__(
resource_type=resource_type,
max_resources=max_resources,
domains=domains
)
self.n_epochs = n_epochs
self.n_configs = n_configs
self.s = s if s else [2**i for i in range(-9,1)]
self.metric_to_monitor = metric_to_monitor
self.monitor_mode = monitor_mode
self.inf = float('inf') if self.monitor_mode == min else float('-inf')
self.kernel = Matern( nu=5/2)
self.gp_loss= GaussianProcessRegressor( kernel=self.kernel, normalize_y=True)
self.gp_cost = GaussianProcessRegressor( kernel=self.kernel, normalize_y=True)
def optimize(self):
configs = self._get_random_configs(self.n_configs)
models = [self.build_model(config) for config in configs]
config_loss_cost = []
for i, config in enumerate(configs):
model = models[i]
for s_i in self.s:
# Maybe reset the model? (try to not warm up too much)
_ = self.allocate_resources(
model=model,
config=config,
resources=s_i * self.max_resources
)
results = self.evaluate_model(model=model)
loss = results["loss"]
#Tuple of (config vector, loss, s_i) added to the list
config_loss_cost.append((self._config2vec(config), loss, s_i))
while True:
self.gp_loss.fit([x[0] for x in config_loss_cost],[x[1] for x in config_loss_cost])
self.gp_cost.fit([x[0] for x in config_loss_cost],[x[2] for x in config_loss_cost])
best_config= self._optimize_acquisition()
pass
def _config2vec(self, config):
# Convert config dict to normalized vector
vec = []
for param, domain in self.domains.items():
if isinstance(domain, tuple):
val = config[param]
# Normalize to [0,1]
vec.append((val - domain[0]) / (domain[1] - domain[0]))
return np.array(vec)
def _optimize_acquisition(self):
best_config = None
best_acquisition_value = -self.inf
for _ in range(self.n_hp):
# Sample a random configuration
config = self._get_random_config()
vec = self._config2vec(config)
s_i = random.choice(self.s)
result= self._acquisition_function(vec, s_i)
if result > best_acquisition_value:
best_acquisition_value = result
best_config = config
return best_config
def _acquisition_function(self, vec):
pass