-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathchatbot.py
More file actions
185 lines (145 loc) · 5.19 KB
/
chatbot.py
File metadata and controls
185 lines (145 loc) · 5.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
# -*- coding: utf-8 -*-
"""Chatbot.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1nDwH5Gtaiw2CKH2ny8Eook4pVstBXhOA
# **Importing NLP Libraries**
---
"""
import nltk
nltk.download('punkt')
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
"""# **Importing Tensorflow Libraries**"""
# Libraries needed for Tensorflow processing
import tensorflow as tf
import numpy as np
import tflearn
import random
import json
from google.colab import files
files.upload()
intents
words = []
classes = []
documents = []
ignore = ['?']
# loop through each sentence in the intent's patterns
for intent in intents['intents']:
for pattern in intent['patterns']:
# tokenize each and every word in the sentence
w = nltk.word_tokenize(pattern)
# add word to the words list
words.extend(w)
# add word(s) to documents
documents.append((w, intent['tag']))
# add tags to our classes list
if intent['tag'] not in classes:
classes.append(intent['tag'])
# Perform stemming and lower each word as well as remove duplicates
words = [stemmer.stem(w.lower()) for w in words if w not in ignore]
words = sorted(list(set(words)))
# remove duplicate classes
classes = sorted(list(set(classes)))
print (len(documents), "documents")
print (len(classes), "classes", classes)
print (len(words), "unique stemmed words", words)
# create training data
training = []
output = []
# create an empty array for output
output_empty = [0] * len(classes)
# create training set, bag of words for each sentence
for doc in documents:
# initialize bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stemming each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
# create bag of words array
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
# output is '1' for current tag and '0' for rest of other tags
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffling features and turning it into np.array
random.shuffle(training)
training = np.array(training)
# creating training lists
train_x = list(training[:,0])
train_y = list(training[:,1])
# resetting underlying graph data
tf.reset_default_graph()
# Building neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 10)
net = tflearn.fully_connected(net, 10)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)
# Defining model and setting up tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training
model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)
model.save('model.tflearn')
import pickle
pickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open( "training_data", "wb" ) )
# restoring all the data structures
data = pickle.load( open( "training_data", "rb" ) )
words = data['words']
classes = data['classes']
train_x = data['train_x']
train_y = data['train_y']
with open('intents.json') as json_data:
intents = json.load(json_data)
# load the saved model
model.load('./model.tflearn')
def clean_up_sentence(sentence):
# tokenizing the pattern
sentence_words = nltk.word_tokenize(sentence)
# stemming each word
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
# returning bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=False):
# tokenizing the pattern
sentence_words = clean_up_sentence(sentence)
# generating bag of words
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
ERROR_THRESHOLD = 0.30
def classify(sentence):
# generate probabilities from the model
results = model.predict([bow(sentence, words)])[0]
# filter out predictions below a threshold
results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append((classes[r[0]], r[1]))
# return tuple of intent and probability
return return_list
def response(sentence, userID='123', show_details=False):
results = classify(sentence)
# if we have a classification then find the matching intent tag
if results:
# loop as long as there are matches to process
while results:
for i in intents['intents']:
# find a tag matching the first result
if i['tag'] == results[0][0]:
# a random response from the intent
return print(random.choice(i['responses']))
results.pop(0)
classify('What is your name?')
response('What is your name?')
response('Whats your name')
response('What your name')