forked from ShangtongZhang/DeepRL
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
156 lines (146 loc) · 6.08 KB
/
main.py
File metadata and controls
156 lines (146 loc) · 6.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
from async_agent import *
from dqn_agent import *
import logging
def dqn_cart_pole():
config = dict()
config['task_fn'] = lambda: CartPole()
config['optimizer_fn'] = lambda params: torch.optim.RMSprop(params, 0.001)
config['network_fn'] = lambda optimizer_fn: FCNet([8, 50, 200, 2], optimizer_fn)
# config['network_fn'] = lambda optimizer_fn: DuelingFCNet([8, 50, 200, 2], optimizer_fn)
config['policy_fn'] = lambda: GreedyPolicy(epsilon=1.0, final_step=10000, min_epsilon=0.1)
config['replay_fn'] = lambda: Replay(memory_size=10000, batch_size=10)
config['discount'] = 0.99
config['target_network_update_freq'] = 200
config['step_limit'] = 0
config['explore_steps'] = 1000
config['logger'] = gym.logger
config['history_length'] = 2
config['test_interval'] = 100
config['test_repetitions'] = 50
# config['double_q'] = True
config['double_q'] = False
agent = DQNAgent(**config)
agent.run()
def async_cart_pole():
config = dict()
config['task_fn'] = lambda: CartPole()
config['optimizer_fn'] = lambda params: torch.optim.Adam(params, 0.001)
config['network_fn'] = lambda: FCNet([4, 50, 200, 2])
config['policy_fn'] = lambda: GreedyPolicy(epsilon=0.5, final_step=5000, min_epsilon=0.1)
config['bootstrap'] = OneStepQLearning
# config['bootstrap'] = NStepQLearning
# config['bootstrap'] = OneStepSarsa
config['discount'] = 0.99
config['target_network_update_freq'] = 200
config['step_limit'] = 0
config['n_workers'] = 16
config['update_interval'] = 6
config['test_interval'] = 4000
config['test_repetitions'] = 50
config['history_length'] = 1
config['logger'] = gym.logger
agent = AsyncAgent(**config)
agent.run()
def a3c_cart_pole():
update_interval = 6
config = dict()
config['task_fn'] = lambda: CartPole()
config['optimizer_fn'] = lambda params: torch.optim.Adam(params, 0.001)
config['network_fn'] = lambda: ActorCriticFCNet([4, 200, 2])
config['policy_fn'] = SamplePolicy
config['bootstrap'] = AdvantageActorCritic
config['discount'] = 0.99
config['target_network_update_freq'] = 200
config['step_limit'] = 0
config['n_workers'] = 16
config['update_interval'] = update_interval
config['history_length'] = 1
config['test_interval'] = 4000
config['test_repetitions'] = 50
config['logger'] = gym.logger
agent = AsyncAgent(**config)
agent.run()
def dqn_pixel_atari(name):
config = dict()
history_length = 4
n_actions = 6
config['task_fn'] = lambda: PixelAtari(name, no_op=30, frame_skip=4, normalized_state=False)
config['optimizer_fn'] = lambda params: torch.optim.RMSprop(params, lr=0.00025, alpha=0.95, eps=0.01)
config['network_fn'] = lambda optimizer_fn: NatureConvNet(history_length, n_actions, optimizer_fn)
# config['network_fn'] = lambda optimizer_fn: DuelingNatureConvNet(history_length, n_actions, optimizer_fn)
config['policy_fn'] = lambda: GreedyPolicy(epsilon=1.0, final_step=1000000, min_epsilon=0.1)
config['replay_fn'] = lambda: Replay(memory_size=1000000, batch_size=32, dtype=np.uint8)
config['discount'] = 0.99
config['target_network_update_freq'] = 10000
config['step_limit'] = 0
config['explore_steps'] = 50000
config['logger'] = gym.logger
config['history_length'] = history_length
config['test_interval'] = 10
config['test_repetitions'] = 1
# config['double_q'] = True
config['double_q'] = False
agent = DQNAgent(**config)
agent.tag = 'dueling_'
agent.run()
def async_pixel_atari(name):
config = dict()
history_length = 1
n_actions = 6
config['task_fn'] = lambda: PixelAtari(name, no_op=30, frame_skip=4, frame_size=42)
config['optimizer_fn'] = lambda params: torch.optim.Adam(params, lr=0.0001)
config['network_fn'] = lambda: OpenAIConvNet(history_length,
n_actions)
config['policy_fn'] = lambda: StochasticGreedyPolicy(epsilons=[0.5, 0.5, 0.5],
final_step=2000000,
min_epsilons=[0.1, 0.01, 0.2],
probs=[0.4, 0.3, 0.3])
# config['bootstrap'] = OneStepQLearning
# config['bootstrap'] = NStepQLearning
config['bootstrap'] = OneStepSarsa
config['discount'] = 0.99
config['target_network_update_freq'] = 10000
config['step_limit'] = 10000
config['n_workers'] = 16
config['update_interval'] = 20
config['test_interval'] = 50000
config['test_repetitions'] = 1
config['history_length'] = history_length
config['logger'] = gym.logger
agent = AsyncAgent(**config)
agent.run()
def a3c_pixel_atari(name):
config = dict()
history_length = 1
n_actions = 6
config['task_fn'] = lambda: PixelAtari(name, no_op=30, frame_skip=4, frame_size=42)
config['optimizer_fn'] = lambda params: torch.optim.Adam(params, lr=0.0001)
config['network_fn'] = lambda: OpenAIActorCriticConvNet(history_length,
n_actions,
LSTM=False)
config['policy_fn'] = SamplePolicy
config['bootstrap'] = AdvantageActorCritic
config['discount'] = 0.99
config['target_network_update_freq'] = 0
config['step_limit'] = 10000
config['n_workers'] = 16
config['update_interval'] = 20
config['test_interval'] = 50000
config['test_repetitions'] = 1
config['history_length'] = history_length
config['logger'] = gym.logger
agent = AsyncAgent(**config)
agent.tag = ''
agent.run()
if __name__ == '__main__':
# gym.logger.setLevel(logging.DEBUG)
gym.logger.setLevel(logging.INFO)
# dqn_cart_pole()
# async_cart_pole()
# a3c_cart_pole()
# dqn_pixel_atari('PongNoFrameskip-v3')
async_pixel_atari('PongNoFrameskip-v3')
# a3c_pixel_atari('PongNoFrameskip-v3')
# dqn_pixel_atari('BreakoutNoFrameskip-v3')
# async_pixel_atari('BreakoutNoFrameskip-v3')
# a3c_pixel_atari('BreakoutNoFrameskip-v3')