-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
357 lines (318 loc) · 12.7 KB
/
train.py
File metadata and controls
357 lines (318 loc) · 12.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
import numpy as np
import random
import time
import os
import math
import json
import argparse
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import wandb
from main_code.utils.torch_objects import Tensor, LongTensor, device
from main_code.utils.utils import AverageMeter
from main_code.utils.data.data_loader import TSP_DATA_LOADER__RANDOM
from main_code.utils.logging.logging import Get_Logger
from main_code.environment.environment import GroupEnvironment
from main_code.utils.config.config import get_config
from main_code.testing.tsp_tester import TSPTester
from main_code.agents.policy_agent import PolicyAgent
from main_code.training.curriculum_scheduler import (
CurriculumScheduler,
StochasticCurriculumScheduler,
)
from main_code.nets.pomo import PomoNetwork
def train(
config,
invariance_weight=0.0,
use_curriculum_lr=False,
curriculum_stochastic=0,
curriculum_start=10,
curriculum_step_epoch=1,
curriculum_step_size=1,
curriculum_stddev=1.0,
save_dir="./logs",
save_folder_name="train",
num_workers=2,
):
# define wandb metrics
wandb.define_metric("epoch")
wandb.define_metric("train/*", step_metric="epoch")
wandb.define_metric("valid/*", summary="min", step_metric="epoch")
# Make Log File
logger, result_folder_path = Get_Logger(save_dir, save_folder_name)
# Objects to Use
actor = PomoNetwork(config).to(device)
actor.optimizer = optim.Adam(
actor.parameters(),
lr=config.ACTOR_LEARNING_RATE,
weight_decay=config.ACTOR_WEIGHT_DECAY,
)
actor.lr_stepper = lr_scheduler.StepLR(
actor.optimizer, step_size=config.LR_DECAY_EPOCH, gamma=config.LR_DECAY_GAMMA
)
if use_curriculum_lr == False:
curriculum_start = config.TSP_SIZE
if curriculum_stochastic:
curriculum_scheduler = StochasticCurriculumScheduler(
curriculum_start, config.TSP_SIZE, curriculum_stddev
)
else:
curriculum_scheduler = CurriculumScheduler(
curriculum_start,
config.TSP_SIZE,
curriculum_step_epoch,
curriculum_step_size,
)
# GO
timer_start = time.time()
best_valid_avg_len = dict()
best_valid_avg_error = dict()
for epoch in range(1, config.TOTAL_EPOCH + 1):
log_package = {"epoch": epoch, "timer_start": timer_start, "logger": logger}
# TRAIN
#######################################################
train_avg_len, actor_loss = train_one_epoch(
config, actor, curriculum_scheduler, invariance_weight, **log_package
)
# EVAL
#######################################################
improvement = True
sizes, valid_results = validate(config, actor, num_workers)
log_data = {
"epoch": epoch,
"train/avg_length": train_avg_len,
"train/actor_loss": actor_loss,
}
log_strings = []
for size, valid_result in zip(sizes, valid_results):
# in training distribution validation
valid_avg_error = valid_result.avg_approx_error
valid_avg_len = valid_result.avg_length
# out of training distribution validation
# track the best validation performance
# only save checkpoint if performance on all validation sets increased
if epoch == 1:
best_valid_avg_len[size] = valid_avg_len
best_valid_avg_error[size] = valid_avg_error
elif valid_avg_len < best_valid_avg_len[size]:
best_valid_avg_len[size] = valid_avg_len
best_valid_avg_error[size] = valid_avg_error
else:
improvement = False
valid_logs = {
f"valid/avg_length_{size}": valid_avg_len,
f"valid/avg_error_{size}": valid_avg_error,
f"valid/best_avg_length_{size}": best_valid_avg_len[size],
f"valid/best_avg_error_{size}": best_valid_avg_error[size],
}
log_data.update(valid_logs)
# update string for graphical logging
log_str = " <<< EVAL after Epoch:{:03d} for size {} >>> Avg.dist:{:5f} Avg.error:{:5f}%".format(
epoch, size, valid_avg_len, valid_avg_error
)
log_strings.append(log_str)
wandb.log(log_data)
fill_str = (
"--------------------------------------------------------------------------"
)
logger.info(fill_str)
for log_str in log_strings:
logger.info(log_str)
logger.info(fill_str)
# only save checkpoint if the performance has improved --> the last checkpoint is always the best
if improvement:
checkpoint_folder_path = "{}/CheckPoint_ep{:05d}".format(
result_folder_path, epoch
)
os.mkdir(checkpoint_folder_path)
model_save_path = "{}/ACTOR_state_dic.pt".format(checkpoint_folder_path)
torch.save(actor.state_dict(), model_save_path)
optimizer_save_path = "{}/OPTIM_state_dic.pt".format(checkpoint_folder_path)
torch.save(actor.optimizer.state_dict(), optimizer_save_path)
lr_stepper_save_path = "{}/LRSTEP_state_dic.pt".format(
checkpoint_folder_path
)
torch.save(actor.lr_stepper.state_dict(), lr_stepper_save_path)
# save config
with open(f"{checkpoint_folder_path}/config.json", "w") as f:
json.dump(config._items, f, indent=2)
def train_one_epoch(
config,
actor_group,
curriculum_scheduler,
invariance_weight,
epoch,
timer_start,
logger,
):
actor_group.train()
distance_AM = AverageMeter()
actor_loss_AM = AverageMeter()
tsp_size = curriculum_scheduler(epoch)
train_loader = TSP_DATA_LOADER__RANDOM(
num_samples=config.TRAIN_DATASET_SIZE,
num_nodes=tsp_size,
batch_size=config.TRAIN_BATCH_SIZE,
)
episode = 0
for data in train_loader:
# data.shape = (batch_s, TSP_SIZE, 2)
batch_s = data.size(0)
episode = episode + batch_s
# Actor Group Move
###############################################
env = GroupEnvironment(data, tsp_size)
group_s = tsp_size
group_state, reward, done = env.reset(group_size=group_s)
actor_group.reset(group_state)
# First Move is given
first_action = LongTensor(np.arange(group_s))[None, :].expand(batch_s, group_s)
group_state, reward, done = env.step(first_action)
group_prob_list = Tensor(np.zeros((batch_s, group_s, 0)))
while not done:
# actor_group.update(group_state)
action_probs = actor_group.get_action_probabilities(group_state)
# shape = (batch, group, TSP_SIZE)
action = (
action_probs.reshape(batch_s * group_s, -1)
.multinomial(1)
.squeeze(dim=1)
.reshape(batch_s, group_s)
)
# shape = (batch, group)
group_state, reward, done = env.step(action)
batch_idx_mat = torch.arange(batch_s)[:, None].expand(batch_s, group_s)
group_idx_mat = torch.arange(group_s)[None, :].expand(batch_s, group_s)
chosen_action_prob = action_probs[
batch_idx_mat, group_idx_mat, action
].reshape(batch_s, group_s)
# shape = (batch, group)
group_prob_list = torch.cat(
(group_prob_list, chosen_action_prob[:, :, None]), dim=2
)
# LEARNING - Actor
###############################################
group_reward = reward
group_log_prob = group_prob_list.log().sum(dim=2)
# shape = (batch, group)
group_advantage = group_reward - group_reward.mean(dim=1, keepdim=True)
# gradient ascent! therefor multiply with minus
group_loss = -group_advantage * group_log_prob
rl_loss = group_loss.mean()
# calculate invariance loss
# group_variance = group_reward.max(dim=1, keepdim=True)[0] - group_reward
group_variance = (group_reward.max(dim=1, keepdim=True)[0] - group_reward).mean(
dim=1, keepdim=True
)
# shape = (batch, group)
invariance_loss = (group_variance * group_log_prob).sum(dim=1).mean()
# shape = (batch, group)
loss = rl_loss + invariance_weight * invariance_loss
actor_group.optimizer.zero_grad()
loss.backward()
actor_group.optimizer.step()
# RECORDING
###############################################
max_reward, _ = group_reward.max(dim=1)
distance_AM.push(-max_reward) # reward was given as negative dist
actor_loss_AM.push(group_loss.detach().reshape(-1))
# LOGGING
###############################################
log_episode = (
math.ceil(config.TRAIN_DATASET_SIZE / (20 * config.TRAIN_BATCH_SIZE))
* config.TRAIN_BATCH_SIZE
)
if episode % log_episode == 0 or episode == config.TRAIN_DATASET_SIZE:
timestr = time.strftime("%H:%M:%S", time.gmtime(time.time() - timer_start))
actor_loss_result = actor_loss_AM.result()
avg_tour_len = distance_AM.result()
log_str = "Ep:{:03d}-{:07d}({:5.1f}%) Size:{} T:{:s} ALoss:{:+5f} CLoss:{:5f} Avg.dist:{:5f}".format(
epoch,
episode,
episode / config.TRAIN_DATASET_SIZE * 100,
tsp_size,
timestr,
actor_loss_result,
0,
avg_tour_len,
)
logger.info(log_str)
logger_start = time.time()
# LR STEP, after each epoch
actor_group.lr_stepper.step()
return avg_tour_len, actor_loss_result
def validate(config, actor_group, num_workers=2):
agent = PolicyAgent(actor_group)
valid_results = []
sizes = []
for valid_path in config.valid_paths:
num_samples = int(valid_path.split("_")[-1])
num_nodes = int(valid_path.split("_")[-2])
# iterate over all given valid paths
tester = TSPTester(
num_trajectories=num_nodes,
num_nodes=num_nodes,
num_samples=num_samples,
sampling_steps=1,
use_pomo_aug=False,
test_set_path=valid_path,
test_batch_size=config.TEST_BATCH_SIZE,
num_workers=num_workers,
)
# run test
test_result = tester.test(agent)
sizes.append(num_nodes)
valid_results.append(test_result)
return sizes, valid_results
def log_results():
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config_path", type=str, default="./configs/tsp20.json")
# currciulum learning
parser.add_argument("--use_curriculum_lr", type=int, default=0)
parser.add_argument("--curriculum_start", type=int, default=5)
parser.add_argument("--curriculum_step_epoch", type=int, default=1)
parser.add_argument("--curriculum_step_size", type=int, default=1)
parser.add_argument("--curriculum_stochastic", type=int, default=0)
parser.add_argument("--curriculum_stddev", type=float, default=1)
# invariance loss
parser.add_argument("--use_invariance_loss", type=int, default=0)
parser.add_argument("--invariance_weight", type=float, default=0.0)
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--save_dir", type=str, default="./results/train")
parser.add_argument("--save_folder_name", type=str, default="train")
parser.add_argument("--wandb_mode", type=str, default="disabled")
opts = parser.parse_known_args()[0]
# set seeds
np.random.seed(37)
random.seed(37)
torch.manual_seed(37)
torch.cuda.manual_seed(1)
torch.cuda.manual_seed_all(1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# torch.use_deterministic_algorithms(True)
# get config
config = get_config(opts.config_path)
wandb.init(
config=config,
mode=opts.wandb_mode,
# group=config.experiment_name,
job_type="training",
)
config = wandb.config
train(
config,
invariance_weight=opts.invariance_weight,
use_curriculum_lr=opts.use_curriculum_lr,
curriculum_stochastic=opts.curriculum_stochastic,
curriculum_start=opts.curriculum_start,
curriculum_step_epoch=opts.curriculum_step_epoch,
curriculum_step_size=opts.curriculum_step_size,
curriculum_stddev=opts.curriculum_stddev,
save_dir=opts.save_dir,
save_folder_name=opts.save_folder_name,
num_workers=opts.num_workers,
)