-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathindex.js
More file actions
79 lines (67 loc) · 2.15 KB
/
index.js
File metadata and controls
79 lines (67 loc) · 2.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
// const { OpenAIApi } = require("openai"); // Only import OpenAIApi, not Configuration
// const express = require("express");
// const bodyParser = require("body-parser");
// const cors = require("cors");
// require("dotenv").config();
// const port = process.env.PORT || 3000;
// const token = process.env.OPENAI_API_KEY;
// const OpenAI = new OpenAIApi({
// apiKey: token, // Directly pass the API key here
// });
// const app = express();
// app.use(bodyParser.json());
// app.use(cors());
// app.post("/message", async (req, res, next) => {
// try {
// const response = await OpenAI.createChatCompletion({
// model: "gpt-4.1",
// prompt: req.body.prompt,
// temperature: 0.7,
// top_p: 1,
// frequency_penalty: 0,
// presence_penalty: 0,
// max_tokens: 1024,
// }).then((data) => {
// res.send(data.choices[0].message.content);
// });
// return response;
// } catch (error) {
// console.error(error);
// res.status(500).send("Internal Server Error");
// }
// });
// app.listen(port, () => {
// console.log(`Server is running on port ${port}`);
// });
const { OpenAI } = require("openai"); // Correct way to import OpenAI class
const express = require("express");
const bodyParser = require("body-parser");
const cors = require("cors");
require("dotenv").config();
const port = process.env.PORT || 3000;
const token = process.env.OPENAI_API_KEY;
const openai = new OpenAI({
apiKey: token, // Provide the API key directly
});
const app = express();
app.use(bodyParser.json());
app.use(cors());
app.post("/message", async (req, res) => {
try {
// Using OpenAI's `chat.completions.create` for chat completion API
const response = await openai.chat.completions.create({
model: "text-davinci-003", // Use the correct model name
prompt: req.body.prompt,
temperature: 0.7,
max_tokens: 1024,
});
// Send back the assistant's response
res.send(response.choices[0].message.content);
} catch (error) {
console.error(error);
res.status(500).send("Internal Server Error");
}
});
app.listen(port, () => {
console.log(`Server is running on port ${port}`);
});