-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDQN_model.py
More file actions
22 lines (19 loc) · 858 Bytes
/
DQN_model.py
File metadata and controls
22 lines (19 loc) · 858 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import torch
import torch.nn as nn
from config import FRAME_STACK
#source https://www.researchgate.net/figure/A-standard-DQN-architecture-with-convolutional-layers-allows-comparisons-between-buffer_fig4_321962850
class DQN(nn.Module):
def __init__(self, action_dim):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(FRAME_STACK, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(64 * 7 * 7, 512)
self.fc2 = nn.Linear(512, action_dim)
def forward(self, x):
x = torch.relu(self.conv1(x))
x = torch.relu(self.conv2(x))
x = torch.relu(self.conv3(x))
x = x.view(x.size(0), -1) # Flatten
x = torch.relu(self.fc1(x))
return self.fc2(x)