-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathui.py
More file actions
97 lines (81 loc) · 3.15 KB
/
ui.py
File metadata and controls
97 lines (81 loc) · 3.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
# ui.py
import streamlit as st
from typing import Dict, Any
from agent import agent
from client import Qwen3Client, DeepAgentClient
class ChatUI:
def __init__(self, session_state):
self._init_page()
# self._init_state()
self.session_state = session_state
if "messages" not in st.session_state:
st.session_state.messages = []
def _init_page(self):
st.set_page_config(
layout='wide',
page_title='RAG',
page_icon='./docs/imgs/avic.png')
st.header('RAG聊天机器人', divider='rainbow')
# def _init_state(self):
# if "messages" not in st.session_state:
# st.session_state.messages = []
def initialize_chatbot(self, cfg, mode="llm"):
if mode=="agent":
return DeepAgentClient(agent)
else:
return Qwen3Client(cfg["server_url"])
def sidebar(self) -> Dict[str, Any]:
st.sidebar.title("模型设置")
cfg = {
"server_url": st.sidebar.text_input(
"vLLM Server URL",
value="http://localhost:54257"
),
"model": st.sidebar.text_input(
"模型名称",
value="/root/share/new_models/qwen3/Qwen3-0.6B"
),
"temperature": st.sidebar.slider("temperature", 0.0, 2.0, 0.7, 0.05),
"top_p": st.sidebar.slider("top_p", 0.1, 1.0, 0.8, 0.05),
"top_k": st.sidebar.slider("top_k", 1, 100, 20),
"max_tokens": st.sidebar.number_input(
"max_tokens", 128, 16384, 8192
),
"presence_penalty": st.sidebar.slider(
"presence_penalty", 0.0, 2.0, 1.5, 0.1
),
"frequency_penalty": st.sidebar.slider(
"frequency_penalty", 0.0, 2.0, 0.0, 0.1
),
"enable_thinking": st.sidebar.checkbox(
"启用 thinking", value=False
),
"stream": st.sidebar.checkbox(
"流式输出", value=True
)
}
uploaded_file = st.sidebar.file_uploader('上传文件')
if st.sidebar.button("清空对话"):
st.session_state.messages = []
# st.experimental_rerun()
return cfg, uploaded_file
def render_history(self):
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
def get_user_input(self) -> str | None:
return st.chat_input("请输入内容")
def render_user(self, text: str):
with st.chat_message("user"):
st.markdown(text)
def render_assistant_stream(self, delta_text: str):
if "assistant_placeholder" not in st.session_state:
st.session_state.assistant_placeholder = st.empty()
st.session_state.assistant_text = ""
st.session_state.assistant_text += delta_text
st.session_state.assistant_placeholder.markdown(
st.session_state.assistant_text
)
def render_assistant_final(self, text: str):
with st.chat_message("assistant"):
st.markdown(text)