-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent.py
More file actions
146 lines (128 loc) · 5.56 KB
/
agent.py
File metadata and controls
146 lines (128 loc) · 5.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# For implementing neural network for DQN
from collections import deque # Stores agent's memory
import numpy as np
from math import cos
import random
import torch
from ai_snake_game import SnakeGameAI, Point, Direction
from model import Linear_Net, NeuralNetTrainer, QTrainer
from plotter import plot
MAX_MEMORY = 100000
BATCH_SIZE = 1000
LEARN_RATE = 0.001
BLOCK_SIZE = 20
class Agent:
def __init__(self):
self.n_games = 0
self.epsilon = 0 # Controls randomness for exploration
self.gamma = 0.9 # Discount rate (how much weight placed on future reward? Will it impact the agent alot?). Must < 1
self.memory = deque(
maxlen=MAX_MEMORY) #popleft() of memory if len(memory) > MAX_MEMORY
self.model = Linear_Net(
input_size=11, hidden_size=256, output_size=3
) # Input must be 11 bc there are 11 conditions represented in 1 state. Output must be 3 bc it'll be mapped to an action [0,0,0] later
self.trainer = QTrainer(self.model, LEARN_RATE, self.gamma)
#self.trainer = NeuralNetTrainer(self.model, LEARN_RATE)
def get_state(self, game):
# Only 1 element == True, rest == False.
current_dir_left = game.direction == Direction.LEFT
current_dir_right = game.direction == Direction.RIGHT
current_dir_up = game.direction == Direction.UP
current_dir_down = game.direction == Direction.DOWN
head = game.snake[0]
left_pt = Point(head.x - 20, head.y)
right_pt = Point(head.x + 20, head.y)
up_pt = Point(head.x, head.y - 20)
down_pt = Point(head.x, head.y + 20)
danger_forward = ((current_dir_left and game.is_collision(left_pt))
or (current_dir_right and game.is_collision(right_pt))
or (current_dir_up and game.is_collision(up_pt))
or (current_dir_down and game.is_collision(down_pt)))
danger_right = ((current_dir_left and game.is_collision(up_pt))
or (current_dir_right and game.is_collision(down_pt))
or (current_dir_up and game.is_collision(right_pt))
or (current_dir_down and game.is_collision(left_pt)))
danger_left = ((current_dir_left and game.is_collision(down_pt))
or (current_dir_right and game.is_collision(up_pt))
or (current_dir_up and game.is_collision(left_pt))
or (current_dir_down and game.is_collision(right_pt)))
state = [
danger_forward,
danger_right,
danger_left,
current_dir_left,
current_dir_right,
current_dir_up,
current_dir_down,
game.food.x < game.head.x, # food left
game.food.x > game.head.x, # food right
game.food.y < game.head.y, # food up
game.food.y > game.head.y # food down
]
return np.array(state, dtype=int)
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state,
done)) # popleft() if MAX_MEMORY reached
def train_short_memory(self, state, action, reward, next_state, done):
self.trainer.train_step(state, action, reward, next_state, done)
def train_long_memory(self):
if len(self.memory) > BATCH_SIZE:
mini_sample = random.sample(self.memory, BATCH_SIZE) # Returns tuples
else:
mini_sample = self.memory
states, actions, rewards, next_states, dones = zip(*mini_sample)
self.trainer.train_step(states, actions, rewards, next_states, dones)
def get_action(self, state):
"""
# Neural Net implementation
decided_action = [0, 0, 0]
state0 = torch.tensor(state, dtype=torch.float)
prediction = self.model(state0) # Can be array of float elements
action_index = torch.argmax(prediction).item() # Returns max element's index in prediction
decided_action[action_index] = 1
return decided_action
"""
# Deep Q Learning implementation
self.epsilon = 80 - self.n_games
# self.epsilon = 0.4 * (cos(self.n_games/160) * np.pi) + 0.5
decided_action = [0, 0, 0]
if random.randint(0, 200) < self.epsilon: # Random Exploration
random_action_index = random.randint(0, 2)
decided_action[random_action_index] = 1
else: # Exploitation
state0 = torch.tensor(state, dtype=torch.float)
prediction = self.model(state0) # Can be array of float elements
action_index = torch.argmax(prediction).item() # Returns max element's index in prediction
decided_action[action_index] = 1
return decided_action
def train():
plot_scores = []
plot_mean_scores = []
total_score = 0
highest_record = 0
agent = Agent()
game = SnakeGameAI()
while True:
old_state = agent.get_state(game)
decided_action = agent.get_action(old_state)
reward, done, score = game.take_action(decided_action)
new_state = agent.get_state(game)
# Train short memory
agent.train_short_memory(old_state, decided_action, reward, new_state,
done)
agent.remember(old_state, decided_action, reward, new_state, done)
if done:
game.reset_game()
agent.n_games += 1
agent.train_long_memory() # Experience replay. Trains on all previous moves to improve itself.
if score > highest_record:
highest_record = score
agent.model.save()
print(f"Game count: {agent.n_games}. Score: {score}\nHighest record: {highest_record}")
plot_scores.append(score)
total_score += score
mean_score = total_score/ agent.n_games
plot_mean_scores.append(mean_score)
plot(plot_scores, plot_mean_scores)
if __name__ == "__main__":
train()