Skip to content

Commit 8196c87

Browse files
committed
Fixed merge bug
1 parent b6912b0 commit 8196c87

File tree

7 files changed

+331
-456
lines changed

7 files changed

+331
-456
lines changed

ai/chat-part1.js

Lines changed: 29 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -9,30 +9,19 @@ document.addEventListener("DOMContentLoaded", () => {
99
const voiceToggleBtn = document.getElementById("voice-toggle");
1010
const modelSelect = document.getElementById("model-select");
1111

12-
// Initialize current session from storage (or create a new one if none exists)
13-
let currentSession = Storage.getCurrentSession();
14-
if (!currentSession) {
15-
currentSession = Storage.createSession("New Chat");
16-
localStorage.setItem("currentSessionId", currentSession.id);
17-
}
18-
1912
const synth = window.speechSynthesis;
2013
let voices = [];
2114
let selectedVoice = null;
2215
let isSpeaking = false;
2316
let autoSpeakEnabled = localStorage.getItem("autoSpeakEnabled") === "true";
2417
let currentlySpeakingMessage = null;
25-
26-
// Combined variable declarations from both branches
2718
let activeUtterance = null;
2819
let recognition = null;
2920
let isListening = false;
3021
let voiceInputBtn = null;
31-
let slideshowInterval = null;
3222

33-
// Voice Chat Modal Elements (from develop branch)
34-
const voiceChatModal =
35-
document.getElementById("voice-chat-modal") || createVoiceChatModal();
23+
// Voice Chat Modal Elements
24+
const voiceChatModal = document.getElementById("voice-chat-modal") || createVoiceChatModal();
3625
const voiceChatBtn = document.getElementById("open-voice-chat-modal");
3726
const voiceChatClose = document.getElementById("voice-chat-modal-close");
3827
const voiceChatListen = document.getElementById("voice-chat-listen");
@@ -77,30 +66,8 @@ document.addEventListener("DOMContentLoaded", () => {
7766
voices = synth.getVoices();
7867
if (voices.length > 0) {
7968
voicesLoaded = true;
80-
// First try to restore a previously selected voice
8169
const savedVoiceIndex = localStorage.getItem("selectedVoiceIndex");
82-
if (savedVoiceIndex && voices[savedVoiceIndex]) {
83-
selectedVoice = voices[savedVoiceIndex];
84-
} else {
85-
// Otherwise, use a list of preferred voices
86-
const preferredVoices = [
87-
"Google UK English Female",
88-
"Microsoft Zira",
89-
"Samantha",
90-
"Victoria"
91-
];
92-
for (const name of preferredVoices) {
93-
const voice = voices.find((v) => v.name === name);
94-
if (voice) {
95-
selectedVoice = voice;
96-
break;
97-
}
98-
}
99-
if (!selectedVoice) {
100-
selectedVoice = voices.find((v) => v.name.toLowerCase().includes("female")) || voices[0];
101-
}
102-
}
103-
console.log("Selected voice:", selectedVoice ? selectedVoice.name : "None");
70+
selectedVoice = savedVoiceIndex && voices[savedVoiceIndex] ? voices[savedVoiceIndex] : voices.find(v => v.name.toLowerCase().includes("female")) || voices[0];
10471
resolve(selectedVoice);
10572
}
10673
}
@@ -111,10 +78,9 @@ document.addEventListener("DOMContentLoaded", () => {
11178
});
11279
}
11380

114-
loadVoices().then(() => {
115-
updateVoiceToggleUI();
116-
});
81+
loadVoices().then(() => updateVoiceToggleUI());
11782

83+
// Toggle auto-speak
11884
function toggleAutoSpeak() {
11985
autoSpeakEnabled = !autoSpeakEnabled;
12086
localStorage.setItem("autoSpeakEnabled", autoSpeakEnabled.toString());
@@ -124,41 +90,24 @@ document.addEventListener("DOMContentLoaded", () => {
12490

12591
function updateVoiceToggleUI() {
12692
if (voiceToggleBtn) {
127-
voiceToggleBtn.innerHTML = autoSpeakEnabled
128-
? '<i class="fas fa-volume-up"></i> Voice On'
129-
: '<i class="fas fa-volume-mute"></i> Voice Off';
93+
voiceToggleBtn.innerHTML = autoSpeakEnabled ? '<i class="fas fa-volume-up"></i> Voice On' : '<i class="fas fa-volume-mute"></i> Voice Off';
13094
voiceToggleBtn.style.backgroundColor = autoSpeakEnabled ? "#4CAF50" : "";
13195
}
13296
}
13397

98+
// Speak message with completion callback
13499
function speakMessage(text, onEnd = null) {
135100
if (!synth || !window.SpeechSynthesisUtterance) {
136101
showToast("Speech synthesis not supported");
137102
return;
138103
}
139-
if (isSpeaking) {
140-
synth.cancel();
141-
}
142-
143-
let cleanText = text
144-
.replace(/```[\s\S]*?```/g, "code block omitted.")
145-
.replace(/`[\s\S]*?`/g, "inline code omitted.")
146-
.replace(/https?:\/\/[^\s]+/g, "URL link.");
104+
if (isSpeaking) synth.cancel();
147105

106+
const cleanText = text.replace(/```[\s\S]*?```/g, "code block omitted.").replace(/`[\s\S]*?`/g, "inline code omitted.");
148107
const utterance = new SpeechSynthesisUtterance(cleanText);
149108
activeUtterance = utterance;
150-
if (selectedVoice) {
151-
utterance.voice = selectedVoice;
152-
} else {
153-
loadVoices().then((voice) => {
154-
if (voice) {
155-
utterance.voice = voice;
156-
synth.speak(utterance);
157-
}
158-
});
159-
return;
160-
}
161-
utterance.rate = 1.0;
109+
if (selectedVoice) utterance.voice = selectedVoice;
110+
utterance.rate = 0.9;
162111
utterance.pitch = 1.0;
163112
utterance.volume = 1.0;
164113

@@ -206,6 +155,7 @@ document.addEventListener("DOMContentLoaded", () => {
206155
}
207156
}
208157

158+
// Initialize speech recognition
209159
function initSpeechRecognition() {
210160
if ("webkitSpeechRecognition" in window) {
211161
recognition = new webkitSpeechRecognition();
@@ -268,43 +218,36 @@ document.addEventListener("DOMContentLoaded", () => {
268218

269219
// Send voice chat message to API and handle response
270220
function sendVoiceChatMessage(message) {
271-
// Use the global currentSession, but refresh it from Storage if needed
272-
const session = Storage.getCurrentSession();
273-
session.messages.push({ role: "user", content: message });
274-
Storage.updateSessionMessages(session.id, session.messages);
221+
const currentSession = Storage.getCurrentSession();
222+
currentSession.messages.push({ role: "user", content: message });
223+
Storage.updateSessionMessages(currentSession.id, currentSession.messages);
275224
window.addNewMessage({ role: "user", content: message }); // Display in chat
276225
statusText.textContent = "Waiting for AI response...";
277226

278227
const messages = [
279228
{ role: "system", content: "You are a helpful AI assistant. Respond concisely." },
280-
...session.messages.slice(-10).map((msg) => ({
281-
role: msg.role === "ai" ? "assistant" : "user",
282-
content: msg.content
283-
}))
229+
...currentSession.messages.slice(-10).map(msg => ({ role: msg.role === "ai" ? "assistant" : "user", content: msg.content }))
284230
];
285231

286-
const safeParam = window._pollinationsAPIConfig
287-
? `safe=${window._pollinationsAPIConfig.safe}`
288-
: "safe=false";
232+
const safeParam = window._pollinationsAPIConfig ? `safe=${window._pollinationsAPIConfig.safe}` : "safe=false";
289233
fetch(`https://text.pollinations.ai/openai?${safeParam}`, {
290234
method: "POST",
291235
headers: { "Content-Type": "application/json" },
292236
body: JSON.stringify({ messages, model: modelSelect.value || "unity", stream: false })
293237
})
294-
.then((res) => {
238+
.then(res => {
295239
if (!res.ok) throw new Error(`Pollinations error: ${res.status}`);
296240
return res.json();
297241
})
298-
.then((data) => {
242+
.then(data => {
299243
let aiContent = data.choices?.[0]?.message?.content || "Error: No response";
300244

301245
// Check for image generation request
302246
const lastUserMsg = message.toLowerCase();
303-
const isImageRequest =
304-
lastUserMsg.includes("image") ||
305-
lastUserMsg.includes("picture") ||
306-
lastUserMsg.includes("show me") ||
307-
lastUserMsg.includes("generate an image");
247+
const isImageRequest = lastUserMsg.includes("image") ||
248+
lastUserMsg.includes("picture") ||
249+
lastUserMsg.includes("show me") ||
250+
lastUserMsg.includes("generate an image");
308251
if (isImageRequest && !aiContent.includes("https://image.pollinations.ai")) {
309252
let imagePrompt = lastUserMsg.replace(/show me|generate|image of|picture of|image|picture/gi, "").trim();
310253
if (imagePrompt.length < 5 && aiContent.toLowerCase().includes("image")) {
@@ -313,22 +256,20 @@ document.addEventListener("DOMContentLoaded", () => {
313256
if (imagePrompt.length > 100) imagePrompt = imagePrompt.substring(0, 100);
314257
imagePrompt += ", photographic";
315258
const seed = Math.floor(Math.random() * 1000000);
316-
const imageUrl = `https://image.pollinations.ai/prompt/${encodeURIComponent(
317-
imagePrompt
318-
)}?width=512&height=512&seed=${seed}&${safeParam}&nolog=true`;
259+
const imageUrl = `https://image.pollinations.ai/prompt/${encodeURIComponent(imagePrompt)}?width=512&height=512&seed=${seed}&${safeParam}&nolog=true`;
319260
aiContent += `\n\n**Generated Image:**\n${imageUrl}`;
320261
}
321262

322-
session.messages.push({ role: "ai", content: aiContent });
323-
Storage.updateSessionMessages(session.id, session.messages);
263+
currentSession.messages.push({ role: "ai", content: aiContent });
264+
Storage.updateSessionMessages(currentSession.id, currentSession.messages);
324265
window.addNewMessage({ role: "ai", content: aiContent }); // Display in chat
325266
voiceChatTranscript.value = aiContent;
326267
statusText.textContent = "Speaking response...";
327268
speakMessage(aiContent, () => {
328269
statusText.textContent = "Press 'Listen' to start";
329270
});
330271
})
331-
.catch((err) => {
272+
.catch(err => {
332273
showToast("Failed to get AI response");
333274
statusText.textContent = "Error: Try again";
334275
});
@@ -387,7 +328,7 @@ document.addEventListener("DOMContentLoaded", () => {
387328
toast.textContent = message;
388329
toast.style.opacity = "1";
389330
clearTimeout(toast.timeout);
390-
toast.timeout = setTimeout(() => (toast.style.opacity = "0"), duration);
331+
toast.timeout = setTimeout(() => toast.style.opacity = "0", duration);
391332
}
392333

393334
window._chatInternals = {
@@ -397,7 +338,6 @@ document.addEventListener("DOMContentLoaded", () => {
397338
clearChatBtn,
398339
voiceToggleBtn,
399340
modelSelect,
400-
currentSession,
401341
synth,
402342
voices,
403343
selectedVoice,
@@ -407,12 +347,11 @@ document.addEventListener("DOMContentLoaded", () => {
407347
recognition,
408348
isListening,
409349
voiceInputBtn,
410-
slideshowInterval,
411350
toggleAutoSpeak,
412351
updateVoiceToggleUI,
413352
speakMessage,
414353
stopSpeaking,
415354
initSpeechRecognition,
416355
showToast
417356
};
418-
});
357+
});

0 commit comments

Comments
 (0)