-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsaideepm_koppula2_srijakod_project_final_deploy.py
More file actions
146 lines (123 loc) · 5.43 KB
/
saideepm_koppula2_srijakod_project_final_deploy.py
File metadata and controls
146 lines (123 loc) · 5.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import streamlit as st
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
# Function to preprocess the uploaded image
def transform_image(file_upload):
with Image.open(file_upload) as img:
# Resizing the image to 64x64
img = img.resize((64, 64))
# Normalize pixel values to be between 0 and 1
img_array = np.array(img) / 255.0
return img_array
class VGG(nn.Module):
def __init__(self, num_classes=43):
super(VGG, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(64),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(64),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(128),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(128),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(512),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(512),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(512),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(512)
)
self.avgpool = nn.AdaptiveAvgPool2d((2, 2))
self.classifier = nn.Sequential(
nn.Linear(512 * 2 * 2, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
# Creating a VGG model instance
vgg13_opt_model = VGG(num_classes=43)
vgg13_opt_model.load_state_dict(torch.load('best_vgg13_opt_model.pth', map_location=torch.device('cpu')))
vgg13_opt_model.eval()
class_names = ['Speed limit (20km/h)', 'Speed limit (30km/h)', 'No passing for vehicles over 3.5 metric tons', 'Right-of-way at the next intersection',
'Priority road', 'Yield', 'Stop', 'No vehicles', 'Vehicles over 3.5 metric tons prohibited', 'No entry', 'General caution',
'Dangerous curve to the left', 'Speed limit (50km/h)', 'Dangerous curve to the right', 'Double curve', 'Bumpy road', 'Slippery road',
'Road narrows on the right', 'Road work', 'Traffic signals', 'Pedestrians', 'Children crossing', 'Bicycles crossing',
'Speed limit (60km/h)', 'Beware of ice/snow', 'Wild animals crossing', 'End of all speed and passing limits', 'Turn right ahead',
'Turn left ahead', 'Ahead only', 'Go straight or right', 'Go straight or left', 'Keep right', 'Keep left', 'Speed limit (70km/h)',
'Roundabout mandatory', 'End of no passing', 'End of no passing by vehicles over 3.5 metric tons', 'Speed limit (80km/h)',
'End of speed limit (80km/h)', 'Speed limit (100km/h)', 'Speed limit (120km/h)','No passing']
st.markdown(
"""
<style>
.title {
text-align: center;
color: white;
padding: 20px;
background-color: #4CAF50;
border-radius: 10px;
}
.result-container {
text-align: center;
margin-top: 20px;
}
.result-text {
font-size: 24px;
color: white;
background-color: #4CAF50;
padding: 10px 20px;
border-radius: 5px;
}
</style>
""", unsafe_allow_html=True)
# Streamlit web interface
st.markdown("<h2 style='text-align: center; color: black;'>Advanced Driver Assistance System(ADAS)</h2>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center; color: black;'>Traffic Sign Board Recognition</h3>", unsafe_allow_html=True)
file_upload = st.file_uploader("upload an image", type=["jpg", "jpeg", "png"])
# Display the uploaded image
if file_upload:
st.image(file_upload, width=200)
if st.button('Click To Recognize'):
# Preprocess the uploaded image
image = transform_image(file_upload)
image_tensor = torch.tensor(image.transpose((2, 0, 1)), dtype=torch.float32).unsqueeze(0)
# Perform prediction
with torch.no_grad():
output = vgg13_opt_model(image_tensor)
probs = F.softmax(output, dim=1)
pred_class_idx = torch.argmax(probs, 1).item()
pred_class = class_names[pred_class_idx]
st.markdown(f"<div class='result-container'><p class='result-text'>Recognized Traffic Sign: {pred_class}</p></div>", unsafe_allow_html=True)