-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathrun_pg.py
More file actions
84 lines (68 loc) · 2.52 KB
/
run_pg.py
File metadata and controls
84 lines (68 loc) · 2.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 22:25:59 2019
@author: clytie
"""
from collections import deque
def collect_complete_traj(pg, env):
states, actions, rewards, dones = [], [], [], []
state = env.reset()
frames = deque([state] * 4, maxlen=4)
st = np.zeros((84, 84, 4), dtype=np.uint8)
total_reward = 0
while True:
for idx, each in enumerate(frames):
st[:, :, idx: idx + 1] = each
states.append(st.copy()) # must be value
action = pg.get_action(np.asarray([st]))[0]
state, reward, done, _ = env.step(action)
frames.append(state)
actions.append(action)
rewards.append(reward)
dones.append(done)
total_reward += reward
if done:
for idx, each in enumerate(frames):
st[:, :, idx: idx + 1] = each
states.append(st.copy()) # must be value
break
return states, actions, rewards, dones, total_reward
if __name__ == "__main__":
import numpy as np
import random
import logging
from tqdm import tqdm
from algorithms.pg import PolicyGradient
from env.envs import GymEnv
logging.basicConfig(level=logging.INFO, format='%(asctime)s|%(levelname)s|%(message)s')
save_model_freq = 100
num_traj = 20
action_space = 4
state_space = (84, 84, 4)
num_env = num_traj // 5
envs = [GymEnv("BreakoutNoFrameskip-v4", random.randint(0, 100), random.randint(0, 100)) for _ in range(num_env)]
pg = PolicyGradient(action_space, state_space, save_path="./pg_log")
total_reward = deque([], maxlen=500)
nth_trajectory = 0
while True:
nth_trajectory += 1
s_batch, a_batch, r_batch = [], [], []
for i in tqdm(range(num_traj)):
s_traj, a_traj, r_traj, _ , reward = collect_complete_traj(pg, envs[random.randint(0, num_env - 1)])
s_batch.append(s_traj)
a_batch.append(a_traj)
r_batch.append(r_traj)
total_reward.append(reward)
mean_reward = np.mean(total_reward)
logging.info(
f'>>>>{np.round(mean_reward, 5)}, nth_trajectory{nth_trajectory}')
pg.update(s_batch, a_batch, r_batch)
pg.sw.add_scalar(
'epreward_mean',
mean_reward,
global_step=nth_trajectory)
if nth_trajectory % save_model_freq == 0:
pg.save_model()
for env in envs:
env.close()