Skip to content

Commit 9658435

Browse files
committed
fix(tts): improve playback stability (canplaythrough/loadedmetadata, longer start grace, cache-bust TTS fetch) and avoid early skips; fix(chat): add random seed to all chat calls to avoid identical outputs with same history
1 parent 009d074 commit 9658435

1 file changed

Lines changed: 11 additions & 3 deletions

File tree

src/main.js

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -909,6 +909,8 @@ async function fetchTtsAudioUrl(text, voice) {
909909
if (a.safeFalse) u.searchParams.set('safe', 'false');
910910
if (a.system) u.searchParams.set('system', 'Speak exactly the provided text verbatim. Do not add, rephrase, or omit any words. Read only the content after the line break.');
911911
if (ref) u.searchParams.set('referrer', ref);
912+
// cache-buster to avoid any gateway caches returning truncated audio
913+
u.searchParams.set('cb', String(Date.now()) + Math.random().toString(36).slice(2));
912914
try {
913915
const resp = await fetch(u.toString(), { method: 'GET' });
914916
if (!resp.ok) throw new Error(`TTS HTTP ${resp.status}`);
@@ -1024,6 +1026,7 @@ function tryStartPlayback(job) {
10241026
const audio = new Audio(url);
10251027
audio.preload = 'auto';
10261028
audio.currentTime = 0;
1029+
try { audio.load(); } catch {}
10271030
job.audio = audio;
10281031
let started = false;
10291032
let watchdog = null;
@@ -1033,8 +1036,10 @@ function tryStartPlayback(job) {
10331036
if (job.cancelled) return;
10341037
audio.play().catch(() => {});
10351038
};
1039+
audio.addEventListener('loadedmetadata', startPlay, { once: true });
10361040
audio.addEventListener('canplay', startPlay, { once: true });
1037-
watchdog = setTimeout(startPlay, 800);
1041+
audio.addEventListener('canplaythrough', startPlay, { once: true });
1042+
watchdog = setTimeout(startPlay, 1500);
10381043

10391044
audio.addEventListener('playing', () => {
10401045
if (job.cancelled) return;
@@ -1052,11 +1057,12 @@ function tryStartPlayback(job) {
10521057

10531058
const stallTimer = setTimeout(() => {
10541059
if (!started) {
1060+
// Give slower decoders more time; mark as error only after generous grace
10551061
setTtsChunkState(job, index, 'error');
10561062
job.playIndex += 1;
10571063
tryStartPlayback(job);
10581064
}
1059-
}, 2000);
1065+
}, 7000);
10601066

10611067
audio.addEventListener('ended', () => {
10621068
if (job.cancelled) return;
@@ -1187,6 +1193,7 @@ async function handleChatResponse(initialResponse, model, endpoint) {
11871193
messages: state.conversation,
11881194
...(shouldIncludeTools(model, endpoint) ? { tools: [IMAGE_TOOL], tool_choice: 'auto' } : {}),
11891195
response_format: { type: 'json_object' },
1196+
seed: generateSeed(),
11901197
},
11911198
client,
11921199
);
@@ -1243,7 +1250,7 @@ async function handleChatResponse(initialResponse, model, endpoint) {
12431250
// Secondary salvage: retry once without JSON response_format for long-form text
12441251
try {
12451252
const salvageMessages = state.conversation.slice(0, -1); // drop the empty assistant turn
1246-
const retryResp = await chat({ model: model.id, endpoint, messages: salvageMessages }, client);
1253+
const retryResp = await chat({ model: model.id, endpoint, messages: salvageMessages, seed: generateSeed() }, client);
12471254
const retryMsg = retryResp?.choices?.[0]?.message;
12481255
const retryContent = normalizeContent(retryMsg?.content);
12491256
if (retryContent && retryContent.trim()) {
@@ -1699,6 +1706,7 @@ async function requestChatCompletion(model, endpoints) {
16991706
messages: state.conversation,
17001707
...(shouldIncludeTools(model, endpoint) ? { tools: [IMAGE_TOOL], tool_choice: 'auto' } : {}),
17011708
response_format: { type: 'json_object' },
1709+
seed: generateSeed(),
17021710
},
17031711
client,
17041712
);

0 commit comments

Comments
 (0)