-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy patheval_model.py
More file actions
213 lines (171 loc) · 8 KB
/
eval_model.py
File metadata and controls
213 lines (171 loc) · 8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
import os
import json
import argparse
import numpy as np
import torch
import wandb
from alg_parameters import SetupParameters, RecordingParameters, EnvParameters
from episodic_buffer import EpisodicBuffer
from mapf_gym import MAPFEnv
from model import Model
from util import (reset_env, make_gif, set_global_seeds, get_torch_device,
wandb_eval_log)
NUM_TIMES = 100
CASE = [
[10, 15, 0.0], [10, 15, 0.15], [10, 15, 0.3],
[20, 15, 0.0], [20, 15, 0.15], [20, 15, 0.3],
[30, 15, 0.0], [30, 15, 0.15], [30, 15, 0.3],
[40, 15, 0.0], [40, 15, 0.15], [40, 15, 0.3],
]
set_global_seeds(SetupParameters.SEED)
def one_step(env0, actions, model0, pre_value, input_state, ps, episode_perf,
message, block, episodic_buffer0):
"""Run one step of the environment"""
obs, vector, reward, done, _, on_goal, _, _, _, _, _, max_on_goal, \
num_collide, _, modify_actions \
= env0.joint_step(actions, episode_perf['episode_len'], model0, pre_value, input_state,
ps, no_reward=False, message=message, block=block, episodic_buffer=episodic_buffer0)
vector[:, :, -1] = modify_actions
episode_perf['episode_len'] += 1
episode_perf['collide'] += num_collide
return reward, obs, vector, done, episode_perf, max_on_goal, on_goal
def eval_episode(env, model, device, episodic_buffer0, num_agent, save_gif):
"""Evaluate one episode of the trained model"""
episode_frames = []
# Reset environment
done, _, obs, vector, _ = reset_env(env, num_agent)
message = Model.init_message(num_agent, device)
hidden_state = Model.init_hidden_state(num_agent, device)
# Reset buffer
episodic_buffer0.reset(2e6, num_agent)
new_xy = env.get_positions()
episodic_buffer0.batch_add(new_xy)
episode_perf = {'episode_len': 0, 'max_goals': 0, 'collide': 0, 'success_rate': 0}
# Run episode
while not done:
if save_gif:
episode_frames.append(env._render())
# Predict
actions, hidden_state, v_all, ps, message, block \
= model.final_evaluate(obs, vector, hidden_state, message, num_agent)
# Move
rewards, obs, vector, done, episode_perf, max_on_goals, on_goal \
= one_step(env, actions, model, v_all, hidden_state, ps,
episode_perf, message, block, episodic_buffer0)
# Compute intrinsic rewards
new_xy = env.get_positions()
processed_rewards, _, intrinsic_reward, min_dist \
= episodic_buffer0.if_reward(new_xy, rewards, done, on_goal)
vector[:, :, 3] = rewards
vector[:, :, 4] = intrinsic_reward
vector[:, :, 5] = min_dist
# Compute episode performance
if episode_perf['episode_len'] < EnvParameters.EPISODE_LEN - 1:
episode_perf['success_rate'] = 1
episode_perf['max_goals'] = max_on_goals
episode_perf['collide'] = (episode_perf['collide'] / num_agent
/ (episode_perf['episode_len'] + 1))
# Save GIF
if save_gif:
if not os.path.exists(RecordingParameters.GIFS_PATH):
os.makedirs(RecordingParameters.GIFS_PATH)
episode_frames.append(env._render())
images = np.array(episode_frames)
image_name = f'agent_{num_agent}_grid_{env.SIZE}_obs_{env.PROB}.gif'
make_gif(images, os.path.join(RecordingParameters.GIFS_PATH, image_name))
return episode_perf
def eval_model(model_save=None, expt_name='SCRIMP_Eval', use_wandb=True,
device=torch.device('cpu')):
"""Evaluate the trained model"""
# Get the trained model
model_save = model_save or os.path.join('final', RecordingParameters.MODEL_SAVE)
if not os.path.exists(model_save):
raise FileNotFoundError(f"'{model_save}' does not exist!")
model_dict = torch.load(model_save, map_location=device)
model = Model(0, device)
model.network.load_state_dict(model_dict['model'])
print(f'Loaded the trained model. ({model_save})\n')
# Recording
eval_data = []
if use_wandb:
wandb_id = wandb.util.generate_id()
wandb.init(project='MAPF_evaluation',
name=expt_name,
# entity=RecordingParameters.ENTITY,
notes=f'Training state: {json.dumps(model_dict["training_state"])}',
config=model_dict['all_configs'],
id=wandb_id,
resume='allow')
print(f'Launched wandb. (ID: {wandb_id})\n')
# Start evaluation for each experiment case
print('Start evaluation.\n')
print('-' * 70)
for n, eval_params in enumerate(CASE):
print(f'[Case: {n+1}/{len(CASE)}]')
save_gif = True # Save one GIF for each case
num_agent, world_size, obstacle_prob = eval_params
env = MAPFEnv(num_agent, world_size, obstacle_prob, mode='eval')
episodic_buffer = EpisodicBuffer(total_step=2e6, num_agent=num_agent)
all_perf = {'episode_len': [], 'max_goals': [],
'collide': [], 'success_rate': []}
print(f'Agent: {eval_params[0]} World: {eval_params[1]} Obstacle: {eval_params[2]}')
# Evaluation loop
for j in range(NUM_TIMES):
# Evaluation
episode_perf = eval_episode(env, model, device, episodic_buffer,
num_agent, save_gif)
# Record metrics of one episode
for metric in episode_perf.keys():
if metric == 'episode_len' and episode_perf['success_rate'] == 1:
# Record only successful episodes for episode_len
all_perf[metric].append(episode_perf[metric])
else:
all_perf[metric].append(episode_perf[metric])
# Record GIF only for the first episode
save_gif = False
if (j+1) % 20 == 0:
print(f'Finished {j+1}/{NUM_TIMES} episodes.')
print(f'Finished all {NUM_TIMES} episodes.')
# Compute mean metrics
perf_mean, perf_std = {}, {}
for i in all_perf.keys(): # for all episodes
perf_mean[i] = np.nanmean(all_perf[i])
if i != 'success_rate':
perf_std[i] = np.nanstd(all_perf[i])
# Log results
eval_data.append({'eval_params': eval_params,
'perf_mean': perf_mean,
'perf_std': perf_std})
mean_log = f"EL: {perf_mean['episode_len']:.3f} ({perf_std['episode_len']:.3f}) " \
f"MR: {perf_mean['max_goals']:.3f} ({perf_std['max_goals']:.3f}) " \
f"CO: {perf_mean['collide']:.3f} ({perf_std['collide']:.3f}) " \
f"SR: {perf_mean['success_rate']:.3f}"
print(mean_log)
print('-' * 70)
# Write results to wandb
if use_wandb:
wandb_eval_log(eval_data, model_dict['all_configs'])
wandb.finish()
print('Completed evaluation.')
if __name__ == "__main__":
# Create the parser
parser = argparse.ArgumentParser(description='Evaluate a trained model.')
# Model path argument
parser.add_argument('model_path', type=str, nargs='?', default='final',
help='directory of the trained model, defaults to \'./final\'')
# GPU argument
parser.add_argument('-g', '--gpu', action='store_true', help='use GPU if specified')
# Wandb argument
parser.add_argument('--off-wandb', action='store_true', help='turn off wandb')
# Expt name argument
parser.add_argument('-n', '--expt-name', type=str, default='SCRIMP_Eval',
help='name of the experiment, defaults to \'SCRIMP_Eval\'')
# Parse the arguments
args = parser.parse_args()
# Check if the provided path is a directory
if not os.path.isdir(args.model_path):
raise ValueError('The provided model path is not a directory!')
model_save = os.path.join(args.model_path, RecordingParameters.MODEL_SAVE)
device = get_torch_device(use_gpu=args.gpu)
use_wandb = not args.off_wandb
eval_model(model_save, args.expt_name, use_wandb, device)