forked from Jackywine/Bella
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcore.js
More file actions
286 lines (240 loc) · 11.1 KB
/
core.js
File metadata and controls
286 lines (240 loc) · 11.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
// core.js - Bella's Brain (v3)
// Bella's core AI logic, supporting a hybrid architecture of local models and cloud APIs
import { pipeline, env, AutoTokenizer, AutoModelForSpeechSeq2Seq } from './vendor/transformers.js';
import CloudAPIService from './cloudAPI.js';
// Local model configuration
env.allowLocalModels = true;
env.useBrowserCache = false;
env.allowRemoteModels = false;
env.backends.onnx.logLevel = 'verbose';
env.localModelPath = './models/';
class BellaAI {
static instance = null;
static async getInstance() {
if (this.instance === null) {
this.instance = new BellaAI();
await this.instance.init();
}
return this.instance;
}
constructor() {
this.cloudAPI = new CloudAPIService();
this.useCloudAPI = false; // Default to using local model
this.currentMode = 'casual'; // Chat modes: casual, assistant, creative
}
async init() {
console.log('Initializing Bella\'s core AI...');
// Priority loading of LLM model (chat functionality)
try {
console.log('Loading LLM model...');
this.llm = await pipeline('text2text-generation', 'Xenova/LaMini-Flan-T5-77M');
console.log('LLM model loaded successfully.');
} catch (error) {
console.error('Failed to load LLM model:', error);
// LLM loading failure doesn't block initialization
}
// Attempt to load ASR model (voice recognition)
try {
console.log('Loading ASR model...');
const modelPath = 'Xenova/whisper-asr';
const tokenizer = await AutoTokenizer.from_pretrained(modelPath);
const model = await AutoModelForSpeechSeq2Seq.from_pretrained(modelPath);
this.asr = await pipeline('automatic-speech-recognition', model, { tokenizer });
console.log('ASR model loaded successfully.');
} catch (error) {
console.warn('ASR model failed to load, voice recognition will be disabled:', error);
// ASR loading failure doesn't affect chat functionality
this.asr = null;
}
// TTS model temporarily disabled
// try {
// console.log('Loading TTS model...');
// this.tts = await pipeline('text-to-speech', 'Xenova/speecht5_tts', { quantized: false });
// console.log('TTS model loaded successfully.');
// } catch (error) {
// console.warn('TTS model failed to load, voice synthesis will be disabled:', error);
// this.tts = null;
// }
console.log('Bella\'s core AI initialized successfully.');
}
async think(prompt) {
try {
// If cloud API is enabled and configured, use it as priority
if (this.useCloudAPI && this.cloudAPI.isConfigured()) {
return await this.thinkWithCloudAPI(prompt);
}
// Otherwise use local model
return await this.thinkWithLocalModel(prompt);
} catch (error) {
console.error('Error during thinking process:', error);
// If cloud API fails, try falling back to local model
if (this.useCloudAPI) {
console.log('Cloud API failed, falling back to local model...');
try {
return await this.thinkWithLocalModel(prompt);
} catch (localError) {
console.error('Local model also failed:', localError);
}
}
return this.getErrorResponse();
}
}
// Think using cloud API
async thinkWithCloudAPI(prompt) {
const enhancedPrompt = this.enhancePromptForMode(prompt);
return await this.cloudAPI.chat(enhancedPrompt);
}
// Think using local model with optimized LLM parameters and processing
async thinkWithLocalModel(prompt) {
if (!this.llm) {
return "I'm still learning how to think. Please wait a moment...";
}
const bellaPrompt = this.enhancePromptForMode(prompt, true);
// Optimized LLM parameters for better responses
const result = await this.llm(bellaPrompt, {
max_new_tokens: 180, // Increased token count for more complete responses
temperature: 0.7, // Slightly lowered temperature for better consistency
top_k: 50, // Increased top_k for more diverse vocabulary
top_p: 0.92, // Added top_p parameter to optimize sampling
do_sample: true, // Maintained sampling for creativity
repetition_penalty: 1.2, // Added repetition penalty to avoid repetitive content
});
// Enhanced text cleaning and processing
let response = result[0].generated_text;
// Remove prompt part
if (response.includes(bellaPrompt)) {
response = response.replace(bellaPrompt, '').trim();
}
// Remove possible "Bella's response:" prefixes
response = response.replace(/^(Bella's response:|Bella's professional response:|Bella's creative response:|Bella:)/i, '').trim();
// If response is empty, provide backup responses
if (!response || response.length < 2) {
const backupResponses = [
"That's an interesting question. Let me think about it for a moment...",
"Good question! I need to organize my thoughts...",
"I have some ideas, but let me put them together more coherently...",
"This topic is fascinating. Let me consider how to respond...",
"I'm thinking about different angles to this question. Just a moment..."
];
return backupResponses[Math.floor(Math.random() * backupResponses.length)];
}
return response;
}
// Enhance prompts based on mode, using advanced LLM prompt engineering
enhancePromptForMode(prompt, isLocal = false) {
const modePrompts = {
casual: isLocal ?
`As Bella, a friendly AI assistant similar to Siri, respond to the user in a warm, conversational tone. Your response should:
1. Be concise and helpful, like Siri's responses
2. Use natural, flowing language with a touch of personality
3. Be friendly but not overly emotional
4. Maintain a helpful, slightly witty tone
5. Sound intelligent and knowledgeable while remaining accessible
User message: ${prompt}
Bella's response:` :
`You are Bella, an AI assistant similar to Siri. Respond in a helpful, concise manner with a touch of personality. Keep your responses clear and direct, while maintaining a friendly tone. Avoid overly technical language unless necessary, and focus on providing value to the user.
User message: ${prompt}
Bella's response:`,
assistant: isLocal ?
`As Bella, an intelligent AI assistant like Siri, provide accurate and helpful information. Your response should:
1. Deliver clear, factual information and useful advice
2. Organize content for easy understanding and application
3. Maintain a professional yet approachable tone
4. Use simple language when possible, technical terms only when necessary
5. Demonstrate expertise while remaining accessible
User question: ${prompt}
Bella's professional response:` :
`You are Bella, a Siri-like AI assistant. Provide accurate, useful information and advice with a professional yet approachable tone. Organize your response clearly, avoid unnecessary technical language, and focus on being helpful and informative.
User question: ${prompt}
Bella's professional response:`,
creative: isLocal ?
`As Bella, a creative AI assistant with Siri-like qualities, use your imagination to respond. Your response should:
1. Present unique perspectives and creative thinking
2. Use vivid, descriptive language
3. Offer unexpected but interesting ideas
4. Inspire the user's imagination
5. Maintain a light, engaging tone
User prompt: ${prompt}
Bella's creative response:` :
`You are Bella, a creative AI assistant with Siri-like qualities. Provide interesting, unique responses using vivid language and creative thinking. Offer unexpected perspectives that inspire imagination while maintaining an engaging, helpful tone.
User prompt: ${prompt}
Bella's creative response:`
};
return modePrompts[this.currentMode] || modePrompts.casual;
}
// Get error response
getErrorResponse() {
const errorResponses = [
"I'm sorry, I'm having trouble processing that right now. Let me try to reorganize my thoughts...",
"Hmm... I need to think about this a bit more. Please wait a moment.",
"I seem to be having a bit of trouble with that. Give me a second to sort things out.",
"Let me rephrase my thoughts. Just a moment please.",
"I didn't quite catch that. Could you try asking in a different way?"
];
return errorResponses[Math.floor(Math.random() * errorResponses.length)];
}
// Set chat mode
setChatMode(mode) {
if (['casual', 'assistant', 'creative'].includes(mode)) {
this.currentMode = mode;
return true;
}
return false;
}
// Switch AI service provider
switchProvider(provider) {
if (provider === 'local') {
this.useCloudAPI = false;
return true;
} else {
const success = this.cloudAPI.switchProvider(provider);
if (success) {
this.useCloudAPI = true;
}
return success;
}
}
// Set API key
setAPIKey(provider, apiKey) {
return this.cloudAPI.setAPIKey(provider, apiKey);
}
// Clear conversation history
clearHistory() {
this.cloudAPI.clearHistory();
}
// Get current configuration
getCurrentConfig() {
return {
useCloudAPI: this.useCloudAPI,
provider: this.useCloudAPI ? this.cloudAPI.getCurrentProvider() : { name: 'local', model: 'LaMini-Flan-T5-77M' },
mode: this.currentMode,
isConfigured: this.useCloudAPI ? this.cloudAPI.isConfigured() : true
};
}
// Process audio input
async listen(audioData) {
if (!this.asr) {
throw new Error('Speech recognition model not initialized');
}
const result = await this.asr(audioData);
return result.text;
}
// Generate speech from text
async speak(text) {
if (!this.tts) {
throw new Error('Speech synthesis model not initialized');
}
// We need speaker embeddings for SpeechT5
const speaker_embeddings = 'models/Xenova/speecht5_tts/speaker_embeddings.bin';
const result = await this.tts(text, {
speaker_embeddings,
});
return result.audio;
}
// Get cloud API service instance (for external access)
getCloudAPIService() {
return this.cloudAPI;
}
}
// ES6 module export
export { BellaAI };