-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
213 lines (172 loc) · 7.96 KB
/
app.py
File metadata and controls
213 lines (172 loc) · 7.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
import os
from moviepy.editor import VideoFileClip, concatenate_videoclips
from flask import Flask, request, send_file, render_template, redirect
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import nltk
from openai import OpenAI
from werkzeug.utils import secure_filename
from google.cloud import speech, storage
import io
from flask_dropzone import Dropzone
app = Flask(__name__)
app.config.update(
# DROPZONE_REDIRECT_VIEW='process'
DROPZONE_MAX_FILE_SIZE=100,
# DROPZONE_IN_FORM=True,
# DROPZONE_UPLOAD_ON_CLICK=True,
# DROPZONE_UPLOAD_ACTION='process', # URL or endpoint
# DROPZONE_UPLOAD_BTN_ID='submit'
)
dropzone = Dropzone(app)
client = OpenAI(api_key="sk-proj-mHYAUxNB8ehe2Vs86xhsT3BlbkFJXAUrAqco1AQc3jCyb0l4")
app.config["UPLOAD_FOLDER"] = "uploads/"
VIDEO_DIR = r"C:\Users\siria\OneDrive\Documents\code\VideoToSignApp\assets\Videos"
def find_video(word):
for extension in ['.mp4', '.mkv']:
video_path = os.path.normpath(os.path.join(VIDEO_DIR, word + extension))
print("This is video path 1:", video_path)
if os.path.exists(video_path):
return video_path
return None
def map_sentence_to_videos(lst):
words = lst[:10]
video_clips = []
for word in words:
video_path = find_video(word)
print("This is video path 2:", video_path)
if video_path:
video_clips.append(VideoFileClip(video_path))
else:
for char in word:
char_video_path = find_video(char)
print("This is video path 3:", char_video_path)
if char_video_path:
video_clips.append(VideoFileClip(char_video_path))
else:
print(f"No video found for the character: {char}")
# return None
print ("This is videoClips123: -", video_clips)
return video_clips
def text_to_sl(text):
#tokenizing the sentence
text.lower()
#tokenizing the sentence
words = word_tokenize(text)
tagged = nltk.pos_tag(words)
tense = {}
tense["future"] = len([word for word in tagged if word[1] == "MD"])
tense["present"] = len([word for word in tagged if word[1] in ["VBP", "VBZ","VBG"]])
tense["past"] = len([word for word in tagged if word[1] in ["VBD", "VBN"]])
tense["present_continuous"] = len([word for word in tagged if word[1] in ["VBG"]])
#stopwords that will be removed
stop_words = set(["mightn't", 're', 'wasn', 'wouldn', 'be', 'has', 'that', 'does', 'shouldn', 'do', "you've",'off', 'for', "didn't", 'm', 'ain', 'haven', "weren't", 'are', "she's", "wasn't", 'its', "haven't", "wouldn't", 'don', 'weren', 's', "you'd", "don't", 'doesn', "hadn't", 'is', 'was', "that'll", "should've", 'a', 'then', 'the', 'mustn', 'i', 'nor', 'as', "it's", "needn't", 'd', 'am', 'have', 'hasn', 'o', "aren't", "you'll", "couldn't", "you're", "mustn't", 'didn', "doesn't", 'll', 'an', 'hadn', 'whom', 'y', "hasn't", 'itself', 'couldn', 'needn', "shan't", 'isn', 'been', 'such', 'shan', "shouldn't", 'aren', 'being', 'were', 'did', 'ma', 't', 'having', 'mightn', 've', "isn't", "won't"])
#removing stopwords and applying lemmatizing nlp process to words
lr = WordNetLemmatizer()
filtered_text = []
for w,p in zip(words,tagged):
if w not in stop_words:
if p[1]=='VBG' or p[1]=='VBD' or p[1]=='VBZ' or p[1]=='VBN' or p[1]=='NN':
filtered_text.append(lr.lemmatize(w,pos='v'))
elif p[1]=='JJ' or p[1]=='JJR' or p[1]=='JJS'or p[1]=='RBR' or p[1]=='RBS':
filtered_text.append(lr.lemmatize(w,pos='a'))
else:
filtered_text.append(lr.lemmatize(w))
#adding the specific word to specify tense
words = filtered_text
temp=[]
for w in words:
if w=='I':
temp.append('Me')
else:
temp.append(w)
words = temp
probable_tense = max(tense,key=tense.get)
if probable_tense == "past" and tense["past"]>=1:
temp = ["Before"]
temp = temp + words
words = temp
elif probable_tense == "future" and tense["future"]>=1:
if "Will" not in words:
temp = ["Will"]
temp = temp + words
words = temp
else:
pass
elif probable_tense == "present":
if tense["present_continuous"]>=1:
temp = ["Now"]
temp = temp + words
words = temp
video_clips = map_sentence_to_videos(words)
print(video_clips)
if video_clips:
final_clip = concatenate_videoclips(video_clips, method='compose')
output_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),"static","output_video.mp4")
final_clip.write_videofile(output_path)
return "/static/output_video.mp4"
else:
return "Clips not found for the words or chars", 400
@app.route('/', methods=["GET","POST"])
def index():
return render_template('index.html')
@app.route('/process', methods=["GET","POST"])
def process():
if request.method =="POST":
print("File recieved")
if "file" not in request.files:
return redirect("index.html")
file = request.files["file"]
if file.filename == "":
return redirect("index.html")
print(file.filename)
if file:
destination = os.path.join(os.path.abspath(os.path.dirname(__file__)),app.config["UPLOAD_FOLDER"],secure_filename(file.filename))
file.save(destination)
video = VideoFileClip(destination)
audio = video.audio
audio.write_audiofile("output_audio.mp3")
client = speech.SpeechClient()
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="C:/Users/siria/AppData/Roaming/gcloud/application_default_credentials.json"
os.environ["GCLOUD_PROJECT"] = "my-new-project-310201"
storage_client = storage.Client()
buckets = list(storage_client.list_buckets())
bucket = storage_client.get_bucket("myaudio_files") # your bucket name
blob = bucket.blob('audios/FileConvert')
blob.upload_from_filename("output_audio.mp3")
link = 'gs://' + blob.id[:-(len(str(blob.generation)) + 1)]
print(blob.public_url)
audio = speech.RecognitionAudio(uri=link)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED,
sample_rate_hertz=16000,
language_code="en-US",
)
operation = client.long_running_recognize(config=config, audio=audio)
print("Waiting for operation to complete...")
response = operation.result(timeout=90)
transcript_builder = []
# Each result is for a consecutive portion of the audio. Iterate through
# them to get the transcripts for the entire audio file.
for result in response.results:
# The first alternative is the most likely one for this portion.
transcript_builder.append(f"{result.alternatives[0].transcript}")
# transcript_builder.append(f"\nConfidence: {result.alternatives[0].confidence}")
transcript = "".join(transcript_builder)
print(transcript)
outVid = text_to_sl(transcript)
# outVid = "/static/output_video.mp4"
print("This is 1:- ", outVid)
return render_template("index.html", output=outVid, transcript=transcript) # unable to see the video
else:
return render_template("index.html")
@app.route('/animation_view', methods=['POST'])
def animation_view():
if request.method == 'POST':
text = request.form['sentence']
outVid = text_to_sl(text)
print("This is 2:- ", outVid)
return render_template('index.html', output=outVid, transcript=text)
if __name__ == '__main__':
app.run(debug=True)