-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathchat_app.py
More file actions
147 lines (120 loc) Β· 4.65 KB
/
chat_app.py
File metadata and controls
147 lines (120 loc) Β· 4.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
"""
RAG Study Chat App - Simple Chat UI following Streamlit best practices
"""
import streamlit as st
from langchain_openai import ChatOpenAI
from langchain_core.messages import (
AIMessage,
HumanMessage
)
from dotenv import load_dotenv
from module.simple_rag_chat import SimpleRAGChat
from module.vector_db import VectorDB
load_dotenv()
# Page config
st.set_page_config(
page_title="RAG Chat Demo",
page_icon="π¬",
layout="wide"
)
def initialize_session():
"""Initialize session state variables"""
if "session_input" not in st.session_state:
st.session_state.session_input = ""
@st.cache_resource
def get_rag_instance():
return SimpleRAGChat(
llm=ChatOpenAI(
model="gpt-4o-mini",
temperature=0.1,
streaming=True,
max_tokens=1000,
request_timeout=30
),
vector_store=VectorDB(storage_path="./db/faiss"),
summarizer_llm=ChatOpenAI(
model="gpt-3.5-turbo",
temperature=0.1,
max_tokens=200,
request_timeout=15
)
)
def main():
st.title("π¬ RAG Chat Demo")
st.caption("π λ¬Έμ κΈ°λ° μ§μμλ΅ μμ€ν
")
initialize_session()
simple_rag = get_rag_instance();
# Display chat messages from history on app rerun
for message in simple_rag.get_history_list() :
role = ""
if isinstance(message, HumanMessage):
role = "user"
elif isinstance(message, AIMessage):
role = "assistant"
else:
# μ§μνμ§ μλ νμ
μ 건λλ°κ±°λ μλ¬ μ²λ¦¬
continue
with st.chat_message(role):
st.markdown(message.content)
# React to user input
prompt = st.chat_input("λ©μμ§λ₯Ό μ
λ ₯νμΈμ...", disabled=st.session_state.session_input != "")
if prompt:
st.session_state.session_input = prompt
# st.rerun() μ κ±° - μ€νΈλ¦¬λ° μ²λ¦¬μμ μ§μ μ²λ¦¬
if st.session_state.session_input:
with st.chat_message("user"):
st.markdown(st.session_state.session_input)
# μ€νΈλ¦¬λ° μλ΅ μ²λ¦¬
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# μ€νΈλ¦¬λ° μλ΅ μμ±
try:
for chunk in simple_rag.send_stream(st.session_state.session_input):
if chunk: # λΉ μ²ν¬κ° μλ κ²½μ°λ§ μ²λ¦¬
full_response += chunk
message_placeholder.markdown(full_response + "β")
# μ΅μ’
μλ΅ νμ (컀μ μ κ±°)
message_placeholder.markdown(full_response)
except Exception as e:
message_placeholder.error(f"μ€νΈλ¦¬λ° μ€ μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}")
# μ
λ ₯ μ΄κΈ°ν ν νμ΄μ§ μλ‘κ³ μΉ¨
st.session_state.session_input = ""
st.rerun()
# Sidebar with info
with st.sidebar:
st.header("βΉοΈ μ 보")
# Clear chat button
if st.button("ποΈ λν μ΄κΈ°ν", type="secondary", use_container_width=True):
simple_rag.new_history_session()
st.rerun()
st.markdown("**νμ¬ μν:**")
if simple_rag.get_history_length():
st.success(f"π¬ {simple_rag.get_history_length()}κ° λ©μμ§")
else:
st.info("λνλ₯Ό μμν΄λ³΄μΈμ!")
st.markdown("---")
st.markdown("**μ¬μ© κ°λ₯ν μ§λ¬Έ μμ:**")
example_questions = [
"AI λμ
μ λ΅μ 무μμΈκ°μ?",
"곡곡λΆλ¬Έμ λμ§νΈ μ νμ λν΄ μ€λͺ
ν΄μ£ΌμΈμ",
"μΈκ³΅μ§λ₯ μ μ±
μ μ£Όμ λ°©ν₯μ?",
"λμ§νΈ μ λΆ νμ λ°©μμ?"
]
for question in example_questions:
if st.button(f"π‘ {question[:20]}...", key=f"example_{hash(question)}", use_container_width=True):
# st.inputμ μ
λ ₯λ λ΄μ©μ μ΄κΈ°ν
st.session_state.session_input = question
st.rerun()
st.markdown("---")
st.markdown("**π‘ ν:**")
st.markdown(
"""
- ꡬ체μ μΈ μ§λ¬Έμ νλ©΄ λ μ νν λ΅λ³μ λ°μ μ μμ΅λλ€
- ν λ²μ νλμ μ£Όμ μ λν΄ λ¬Όμ΄λ³΄μΈμ
- μλ΅μ΄ μ€μκ°μΌλ‘ μ€νΈλ¦¬λ°λ©λλ€
- λ¬Έμ κΈ°λ° RAG μμ€ν
μΌλ‘ μ νν λ΅λ³μ μ 곡ν©λλ€
"""
)
if __name__ == "__main__":
main()