-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm_api_server.py
More file actions
36 lines (30 loc) · 1.08 KB
/
llm_api_server.py
File metadata and controls
36 lines (30 loc) · 1.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import os
from openai import OpenAI
from fastapi import FastAPI, Query
app = FastAPI()
client = OpenAI(
# 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx",
api_key=os.getenv("DASHSCOPE_API_KEY"),
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)
@app.get("/llm/") # 使用举例:http://127.0.0.1:8088/llm/?p=你是一个有用的助手&q=你好
async def get_result(p: str = Query(...), q: str = Query(...)):
completion = client.chat.completions.create(
model="qwen-plus",
messages=[{'role': 'system', 'content': p},
{'role': 'user', 'content': q}],
stream=True,
stream_options={"include_usage": True}
)
response_text = ""
try:
for chunk in completion:
if chunk.choices is []:
break
response_text += chunk.choices[0].delta.content
except Exception as e:
print("Error:", e)
return {"answer": response_text}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8088)