-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathactor.py
More file actions
143 lines (127 loc) · 4.47 KB
/
actor.py
File metadata and controls
143 lines (127 loc) · 4.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
# Copyright (c) 2018-present, Anurag Tiwari.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Actor to generate trajactories"""
import argparse
import random
import numpy as np
import os
import time
import deepmind_lab
import pprint
from model import model_A3C
import utils
import ray
import torch
import torch.nn.functional as F
ACTION_LIST = utils.getactions().values()
class trajectory(object):
"""class to store trajectory data."""
def __init__(self):
self.states = []
self.actions = []
self.rewards = []
self.pi_at_st = []
self.actor_id = None
self.lstm_hin = None
self.lstm_cin = None
def append(self, state, action, reward, pi, step):
self.states += [state]
self.actions += [action]
self.rewards += [reward]
self.pi_at_st += [pi]
self.terminal = False
def length(self):
return len(self.rewards)
@ray.remote(num_gpus=1)
class Actor(object):
"""Simple actor for DeepMind Lab."""
def __init__(self, idx, length, level, config, ps, savemodel_path, loadmodel_path, test):
#Running actor on fractional GPU, see https://github.com/ray-project/ray/issues/402#issuecomment-363590303
os.environ["CUDA_VISIBLE_DEVICES"] = str(ray.get_gpu_ids()[0] % 4)
print("Initialize Actor environment gpu id: ", os.environ["CUDA_VISIBLE_DEVICES"])
self.id = idx
self.steps = 0
self.parameterserver = ps
self.length = length
self.env = deepmind_lab.Lab(level, ['RGB_INTERLEAVED'], config=config)
self.env.reset()
action_spec = self.env.action_spec()
self.model = model_A3C(isActor=True)
self.model = self.model.cuda()
self.lstm_init = torch.zeros(1, 256).cuda() #TODO remove hardcoding
self.cin = self.lstm_init
self.hin = self.lstm_init
self.rewards = 0
self.savepath = savemodel_path
self.loadpath = loadmodel_path
self.test = test
def run_train(self):
"""Run the env for n steps and return a trajectory rollout."""
weights = ray.get(self.parameterserver.pull.remote())
self.model.load_state_dict(weights)
rollout = trajectory()
rollout.actor_id = self.id
totalreward = 0
self.steps += 1
rollout.lstm_hin = self.hin.tolist()
rollout.lstm_cin = self.cin.tolist()
obs, action, reward, pi = None, None, None, None
for _ in range(self.length):
if not self.env.is_running():
print('Environment stopped. Restarting...')
self.rewards = 0
self.env.reset()
self.steps = 0
self.cin = self.lstm_init
self.hin = self.lstm_init
obs = self.env.observations()
rollout.append(obs['RGB_INTERLEAVED'], action, reward, pi, self.steps)
rollout.terminal = True
break
obs = self.env.observations()
img_tensor = utils.createbatch([obs['RGB_INTERLEAVED']])
prob, (self.hin, self.cin) = self.model(img_tensor, self.cin, self.hin)
action_idx = prob.multinomial(1)[0].tolist()[0]
pi = prob[0][action_idx].tolist()
action = ACTION_LIST[action_idx]
reward = self.env.step(action, num_steps=4) #for action repeat=4
totalreward += reward
action = action_idx #ACTION_LIST[action_idx]
rollout.append(obs['RGB_INTERLEAVED'], action, reward, pi, self.steps)
self.rewards += totalreward
return rollout
def run_test(self):
"""Run the env for n steps and return a trajectory rollout."""
time.sleep(30)
weights = None
if not self.test:
weights = ray.get(self.parameterserver.pull.remote())
torch.save(weights, self.savepath)
else:
weights = torch.load(self.loadpath,
map_location=lambda storage, loc: storage)
self.model.load_state_dict(weights)
totalreward = 0
self.steps += 1
self.env.reset()
self.cin = self.lstm_init
self.hin = self.lstm_init
while self.env.is_running():
obs = self.env.observations()
img_tensor = utils.createbatch([obs['RGB_INTERLEAVED']])
prob, (self.hin, self.cin) = self.model(img_tensor, self.cin, self.hin)
action_idx = prob.max(1)[1].tolist()[0]
action = ACTION_LIST[action_idx]
reward = self.env.step(action, num_steps=4) #for action repeat=4
totalreward += reward
print("TEST ACTOR: Test Finished Total Reward for actor_id {}: {}".format(self.id, totalreward))
return
def get_id(self):
return self.id
def get_reward(self):
return self.reward
def get_steps(self):
return self.steps