-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathqueryOpenRouter.js
More file actions
74 lines (65 loc) · 2.72 KB
/
queryOpenRouter.js
File metadata and controls
74 lines (65 loc) · 2.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import OpenAI from 'openai';
import wrap from 'word-wrap';
import chalk from 'chalk';
import { log } from './util.js';
/**
* Queries an AI model with given parameters
* @param {string} aiModelName - Name of the AI model to use
* @param {string} systemMessage - System context message
* @param {string} userPrompt - User query prompt
* @param {boolean} outputStream - Whether to stream the output
* @param {boolean} compressPrompt - Whether to compress the prompt to fit model's maximum context size
* @param {string} apiKey - OpenRouter API Key
* @param {object} aiParameters - AI fine-tuning parameters
* @returns {Promise<void>}
* @throws {Error} When AI query fails
*/
async function queryAI(aiModelName, systemMessage, userPrompt, outputStream, compressPrompt, baseURL, apiKey, aiParameters) {
const client = new OpenAI({
baseURL: baseURL,
apiKey: apiKey,
defaultHeaders: {
"HTTP-Referer": "https://github.com/EAddario/askhal",
"X-Title": "Ask HAL"
}
});
let messages = [{ "role": "user", "content": userPrompt }];
if (systemMessage)
messages. unshift({ "role": "system", "content": systemMessage });
let openRouterRequest = {
model: aiModelName,
messages: messages,
transforms: [],
stream: outputStream,
temperature: (aiParameters.TEMPERATURE) && aiParameters.TEMPERATURE,
top_p: (aiParameters.TOP_P) && aiParameters.TOP_P,
top_k: (aiParameters.TOP_K) && aiParameters.TOP_K,
frequency_penalty: (aiParameters.FREQUENCY_PENALTY) && aiParameters.FREQUENCY_PENALTY,
presence_penalty: (aiParameters.PRESENCE_PENALTY) && aiParameters.PRESENCE_PENALTY,
repetition_penalty: (aiParameters.REPETITION_PENALTY) && aiParameters.REPETITION_PENALTY,
};
if (compressPrompt)
openRouterRequest.transforms = "middle-out";
try {
const result = await client.chat.completions.create(openRouterRequest);
log.info('');
if (outputStream) {
for await (const chunk of result) {
process.stdout.write(chalk.cyanBright(chunk.choices[0].delta.content || ''));
}
log.info('');
} else {
const iaModelResponse = wrap(result.choices[0].message.content, { width: 160, indent: '' });
log.message(iaModelResponse);
}
log.info('');
} catch (err) {
log.error(`could not query model [${aiModelName}] - ${err.message}`);
if (err.response) {
log.error(`status: ${err.response.status}`);
log.error(`data: ${JSON.stringify(err.response.data)}`);
}
throw new Error(`could not query model [${aiModelName}]`);
}
}
export { queryAI };