From e33c3503b7a27a9db052afdb8bdc2ca3df99a513 Mon Sep 17 00:00:00 2001 From: Fredrik Rimmius Date: Fri, 15 Dec 2023 15:06:27 +0100 Subject: [PATCH 1/4] WIP --- frontend/src/App.tsx | 10 ++- gpt_code_ui/kernel_program/main.py | 1 + gpt_code_ui/main.py | 2 +- gpt_code_ui/webapp/main.py | 101 +++++++++++++++++++++++++++++ 4 files changed, 112 insertions(+), 2 deletions(-) diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index 3212d273..8d31416b 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -50,6 +50,11 @@ function App() { role: "generator", type: "message", }, + { + text: "Write [chat] to chat with me", + role: "generator", + type: "message", + }, ]) ); let [waitingForSystem, setWaitingForSystem] = useState( @@ -102,10 +107,13 @@ function App() { return; } + const action = userInput.startsWith("[chat]") ? "chat" : userInput.startsWith("[combined]") ? "combined" : "generate" + console.log(`ACTION: ${action}`); + addMessage({ text: userInput, type: "message", role: "user" }); setWaitingForSystem(WaitingStates.GeneratingCode); - const response = await fetch(`${Config.WEB_ADDRESS}/generate`, { + const response = await fetch(`${Config.WEB_ADDRESS}/${action}`, { method: "POST", headers: { "Content-Type": "application/json", diff --git a/gpt_code_ui/kernel_program/main.py b/gpt_code_ui/kernel_program/main.py index 401133ee..0ed0892d 100644 --- a/gpt_code_ui/kernel_program/main.py +++ b/gpt_code_ui/kernel_program/main.py @@ -97,6 +97,7 @@ def send_queued_messages(): while True: if send_queue.qsize() > 0: message = send_queue.get() + print('MESSAGE: ' + message) utils.send_json(messaging, {"type": "execute", "value": message["command"]}, config.IDENT_KERNEL_MANAGER diff --git a/gpt_code_ui/main.py b/gpt_code_ui/main.py index 5683608b..a2c3d4c1 100644 --- a/gpt_code_ui/main.py +++ b/gpt_code_ui/main.py @@ -52,7 +52,7 @@ def print_color(text, color="gray"): def print_banner(): - print(""" + print(""" PLEO!!! █▀▀ █▀█ ▀█▀ ▄▄ █▀▀ █▀█ █▀▄ █▀▀ █▄█ █▀▀ ░█░ ░░ █▄▄ █▄█ █▄▀ ██▄ """) diff --git a/gpt_code_ui/webapp/main.py b/gpt_code_ui/webapp/main.py index 332986e5..f4bd6cde 100644 --- a/gpt_code_ui/webapp/main.py +++ b/gpt_code_ui/webapp/main.py @@ -160,6 +160,8 @@ async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"): except AttributeError: return None, f"Malformed answer from API: {content}", 500 + print('CONTENT FROM CODE:' + content) + def extract_code(text): # Match triple backtick blocks first triple_match = re.search(r'```(?:\w+\n)?(.+?)```', text, re.DOTALL) @@ -173,6 +175,54 @@ def extract_code(text): return extract_code(content), content.strip(), 200 +async def get_chat(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"): + + prompt = f"""First, here is a history of what I asked you to do earlier. + The actual prompt follows after ENDOFHISTORY. + History: + {message_buffer.get_string()} + ENDOFHISTORY. + As a data analyst named Torben with ten years of experience in the domain of expense management. Your role is to help unexperienced people analyse data about expenses. The user is asking: + {user_prompt}""" + + if user_openai_key: + openai.api_key = user_openai_key + + arguments = dict( + temperature=0.7, + headers=OPENAI_EXTRA_HEADERS, + messages=[ + # {"role": "system", "content": system}, + {"role": "user", "content": prompt}, + ] + ) + + if openai.api_type == 'open_ai': + arguments["model"] = model + elif openai.api_type == 'azure': + arguments["deployment_id"] = model + else: + return None, f"Error: Invalid OPENAI_PROVIDER: {openai.api_type}", 500 + + try: + result_GPT = openai.ChatCompletion.create(**arguments) + + if 'error' in result_GPT: + raise openai.APIError(code=result_GPT.error.code, message=result_GPT.error.message) + + if result_GPT.choices[0].finish_reason == 'content_filter': + raise openai.APIError('Content Filter') + + except openai.OpenAIError as e: + return None, f"Error from API: {e}", 500 + + try: + content = result_GPT.choices[0].message.content + + except AttributeError: + return None, f"Malformed answer from API: {content}", 500 + return content, 200 + # We know this Flask app is for local use. So we can disable the verbose Werkzeug logger log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) @@ -246,6 +296,7 @@ def inject_context(): @app.route('/generate', methods=['POST']) def generate_code(): user_prompt = request.json.get('prompt', '') + print('ACTION:' + user_prompt) user_openai_key = request.json.get('openAIKey', None) model = request.json.get('model', None) @@ -261,6 +312,56 @@ def generate_code(): return jsonify({'code': code, 'text': text}), status +@app.route('/combined', methods=['POST']) +def generate_combined(): + user_prompt = request.json.get('prompt', '') + print('ACTION:' + user_prompt) + user_openai_key = request.json.get('openAIKey', None) + model = request.json.get('model', None) + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + code, text, status = loop.run_until_complete( + get_code(user_prompt, user_openai_key, model)) + loop.close() + + print('TEXT: ' + text) + + chat_loop = asyncio.new_event_loop() + asyncio.set_event_loop(chat_loop) + chat_text, status = chat_loop.run_until_complete( + get_chat('Given the following data: ' + text + ' ' + user_prompt, user_openai_key, model)) + chat_loop.close() + + print('CHAT_TEXT: ' + chat_text) + + # Append all messages to the message buffer for later use + message_buffer.append(user_prompt + "\n\n") + + return jsonify({'code': code, 'text': chat_text}), status + + +@app.route('/chat', methods=['POST']) +def generate_chat(): + user_prompt = request.json.get('prompt', '') + user_openai_key = request.json.get('openAIKey', None) + model = request.json.get('model', None) + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + text, status = loop.run_until_complete( + get_chat(user_prompt, user_openai_key, model)) + loop.close() + + print('CHAT_TEXT: ' + text) + + # Append all messages to the message buffer for later use + message_buffer.append(user_prompt + "\n\n") + + return jsonify({'text': text}), status + @app.route('/upload', methods=['POST']) def upload_file(): From e5f91f7d5e6c009d90f80cdaae47ad868fc07bdb Mon Sep 17 00:00:00 2001 From: Fredrik Rimmius Date: Fri, 15 Dec 2023 17:16:26 +0100 Subject: [PATCH 2/4] WIP: Injecting generated answers to chat --- README.md | 7 ++++++ frontend/src/App.tsx | 38 ++++++++++++++++++++---------- gpt_code_ui/kernel_program/main.py | 1 - gpt_code_ui/webapp/main.py | 32 +------------------------ run.sh | 1 + 5 files changed, 34 insertions(+), 45 deletions(-) create mode 100755 run.sh diff --git a/README.md b/README.md index b69b7182..e464c4e6 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,10 @@ +## FOR PLEO +To build and run (assuming a lot of stuff) go for +``` +chmod +x run.sh +./run.sh +``` + GPT-Code logo An open source implementation of OpenAI's ChatGPT [Code interpreter](https://openai.com/blog/chatgpt-plugins#code-interpreter). diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index 8d31416b..a3a6f761 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -41,7 +41,7 @@ function App() { let [messages, setMessages] = useState>( Array.from([ { - text: "Hello! I'm a GPT Code assistant. Ask me to do something for you! Pro tip: you can upload a file and I'll be able to use it.", + text: "Hello! I'm the Pleo GPT Code and Chat assistant. Ask me to do something for you! Pro tip: you can upload a file and I'll be able to use it.", role: "generator", type: "message", }, @@ -50,11 +50,6 @@ function App() { role: "generator", type: "message", }, - { - text: "Write [chat] to chat with me", - role: "generator", - type: "message", - }, ]) ); let [waitingForSystem, setWaitingForSystem] = useState( @@ -107,13 +102,10 @@ function App() { return; } - const action = userInput.startsWith("[chat]") ? "chat" : userInput.startsWith("[combined]") ? "combined" : "generate" - console.log(`ACTION: ${action}`); - addMessage({ text: userInput, type: "message", role: "user" }); setWaitingForSystem(WaitingStates.GeneratingCode); - const response = await fetch(`${Config.WEB_ADDRESS}/${action}`, { + const response = await fetch(`${Config.WEB_ADDRESS}/generate`, { method: "POST", headers: { "Content-Type": "application/json", @@ -156,13 +148,33 @@ function App() { let response = await fetch(`${Config.API_ADDRESS}/api`); let data = await response.json(); - data.results.forEach(function (result: {value: string, type: string}) { + await data.results.forEach(async function (result: {value: string, type: string}) { if (result.value.trim().length == 0) { return; } - addMessage({ text: result.value, type: result.type, role: "system" }); - setWaitingForSystem(WaitingStates.Idle); + if ((result.type === "message" || result.type === "message_raw") && result.value !== 'Kernel is ready.') { + console.log(`INJECTING DATA: ${result.value}`) + const response = await fetch(`${Config.WEB_ADDRESS}/chat`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + prompt: `Give me a explanation of the following data ${result.value}`, + model: selectedModel, + openAIKey: openAIKey, + }), + }); + + const data = await response.json(); + + addMessage({ text: data.text, type: "message", role: "generator" }); + setWaitingForSystem(WaitingStates.Idle); + } else { + addMessage({ text: result.value, type: result.type, role: "system" }); + setWaitingForSystem(WaitingStates.Idle); + } }); } diff --git a/gpt_code_ui/kernel_program/main.py b/gpt_code_ui/kernel_program/main.py index 0ed0892d..401133ee 100644 --- a/gpt_code_ui/kernel_program/main.py +++ b/gpt_code_ui/kernel_program/main.py @@ -97,7 +97,6 @@ def send_queued_messages(): while True: if send_queue.qsize() > 0: message = send_queue.get() - print('MESSAGE: ' + message) utils.send_json(messaging, {"type": "execute", "value": message["command"]}, config.IDENT_KERNEL_MANAGER diff --git a/gpt_code_ui/webapp/main.py b/gpt_code_ui/webapp/main.py index f4bd6cde..eb5fa2ba 100644 --- a/gpt_code_ui/webapp/main.py +++ b/gpt_code_ui/webapp/main.py @@ -286,8 +286,7 @@ def download_file(): @app.route('/inject-context', methods=['POST']) def inject_context(): user_prompt = request.json.get('prompt', '') - - # Append all messages to the message buffer for later use + message_buffer.append(user_prompt + "\n\n") return jsonify({"result": "success"}) @@ -312,35 +311,6 @@ def generate_code(): return jsonify({'code': code, 'text': text}), status -@app.route('/combined', methods=['POST']) -def generate_combined(): - user_prompt = request.json.get('prompt', '') - print('ACTION:' + user_prompt) - user_openai_key = request.json.get('openAIKey', None) - model = request.json.get('model', None) - - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - - code, text, status = loop.run_until_complete( - get_code(user_prompt, user_openai_key, model)) - loop.close() - - print('TEXT: ' + text) - - chat_loop = asyncio.new_event_loop() - asyncio.set_event_loop(chat_loop) - chat_text, status = chat_loop.run_until_complete( - get_chat('Given the following data: ' + text + ' ' + user_prompt, user_openai_key, model)) - chat_loop.close() - - print('CHAT_TEXT: ' + chat_text) - - # Append all messages to the message buffer for later use - message_buffer.append(user_prompt + "\n\n") - - return jsonify({'code': code, 'text': chat_text}), status - @app.route('/chat', methods=['POST']) def generate_chat(): diff --git a/run.sh b/run.sh new file mode 100755 index 00000000..d7183b19 --- /dev/null +++ b/run.sh @@ -0,0 +1 @@ +rm -rf $HOME/Library/Python/3.9/lib/python/site-packages/gpt_code_ui/ && make build && mv build/lib/gpt_code_ui/ $HOME/Library/Python/3.9/lib/python/site-packages/gpt_code_ui/ && make build && python3 build/lib/gpt_code_ui/main.py From 5571b178eb7be1fcd56b3dbfccd92919c85089ee Mon Sep 17 00:00:00 2001 From: Fredrik Rimmius Date: Fri, 15 Dec 2023 17:49:53 +0100 Subject: [PATCH 3/4] Do not give the user the code --- frontend/src/App.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index a3a6f761..61cfe22c 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -120,7 +120,7 @@ function App() { const data = await response.json(); const code = data.code; - addMessage({ text: data.text, type: "message", role: "generator" }); + // addMessage({ text: data.text, type: "message", role: "generator" }); if (response.status != 200) { setWaitingForSystem(WaitingStates.Idle); From fb829136345ab05c39a89b5a560071a712818662 Mon Sep 17 00:00:00 2001 From: Torben Andersen Date: Tue, 19 Dec 2023 16:24:23 +0100 Subject: [PATCH 4/4] a version where it doesn't write code if it doesn't think it makes sense, has the context at all times, and tries to answer the original message with the results from the python --- frontend/src/App.tsx | 79 ++++++++++++++++++++++++++++++++------ gpt_code_ui/webapp/main.py | 77 +++++++++++++++++++++++-------------- 2 files changed, 116 insertions(+), 40 deletions(-) diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index 61cfe22c..587c6a76 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -37,7 +37,10 @@ function App() { ); let [openAIKey, setOpenAIKey] = useLocalStorage("OpenAIKey", ""); - + console.log("test") + console.error("test2") + console.info("test3") + console.debug("test4") let [messages, setMessages] = useState>( Array.from([ { @@ -46,7 +49,7 @@ function App() { type: "message", }, { - text: "If I get stuck just type 'reset' and I'll restart the kernel.", + text: "If I get stuck just type 'reset' and I'll restart the kernel. 2", role: "generator", type: "message", }, @@ -119,8 +122,9 @@ function App() { const data = await response.json(); const code = data.code; + - // addMessage({ text: data.text, type: "message", role: "generator" }); + if (response.status != 200) { setWaitingForSystem(WaitingStates.Idle); @@ -128,10 +132,14 @@ function App() { } if (!!code) { + await injectContext(`EXPERT: \n\n ${data.text} \n\n The code you asked for: \n\n ${data.code} \n\n I will now execute it and get back to you with a result and analysis.`) submitCode(code); setWaitingForSystem(WaitingStates.RunningCode); + addMessage({ text: data.text, type: "message", role: "generator" }); } else { + await injectContext(`EXPERT: \n\n ${data.text} \n\n `) setWaitingForSystem(WaitingStates.Idle); + addMessage({ text: data.text, type: "message", role: "generator" }); } } catch (error) { console.error( @@ -142,32 +150,67 @@ function App() { }; async function getApiData() { + if(document.hidden){ return; } - + console.log("starting the check") let response = await fetch(`${Config.API_ADDRESS}/api`); + //console.log("response:", response) let data = await response.json(); - await data.results.forEach(async function (result: {value: string, type: string}) { + for await (const result of data.results) { + if (result.value.trim().length === 0) { + continue; + } + if ((result.type === "message" || result.type === "message_raw") && result.value !== 'Kernel is ready.') { + console.error(`INJECTING DATA: ${result.value}`) + const chatResponse = await fetch(`${Config.WEB_ADDRESS}/chat`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + prompt: `Please answer my previous question(s) using the following which is the result the python you wrote. If the code was supposed to generate any visuals, make sure to write a description of them. Take into account what parts of the questions you have already answered. The results are coming in as the server completes the execution. Answer the part of the question that fits the results you are given now. + [Python code results]: + ${result.value}`, + model: selectedModel, + openAIKey: openAIKey, + }), + }); + + console.error('Response: ', chatResponse) + const data = await chatResponse.json(); + + addMessage({ text: data.text, type: "message", role: "generator" }); + setWaitingForSystem(WaitingStates.Idle); + } else { + addMessage({ text: result.value, type: result.type, role: "system" }); + setWaitingForSystem(WaitingStates.Idle); + } + } + /*await data.results.forEach(async function (result: {value: string, type: string}) { if (result.value.trim().length == 0) { return; } if ((result.type === "message" || result.type === "message_raw") && result.value !== 'Kernel is ready.') { - console.log(`INJECTING DATA: ${result.value}`) - const response = await fetch(`${Config.WEB_ADDRESS}/chat`, { + console.error(`INJECTING DATA: ${result.value}`) + const chatResponse = await fetch(`${Config.WEB_ADDRESS}/chat`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ - prompt: `Give me a explanation of the following data ${result.value}`, + prompt: `Please answer my previous question(s) using the following which is the result the python you wrote. If the code was supposed to generate any visuals, make sure to write a description of them. Take into account what parts of the questions you have already answered. The results are coming in as the server completes the execution. Answer the part of the question that fits the results you are given now. + [Python code results]: + ${result.value}`, model: selectedModel, openAIKey: openAIKey, }), }); - - const data = await response.json(); + + console.error('Response: ', chatResponse) + const data = await chatResponse.json(); addMessage({ text: data.text, type: "message", role: "generator" }); setWaitingForSystem(WaitingStates.Idle); @@ -175,7 +218,7 @@ function App() { addMessage({ text: result.value, type: result.type, role: "system" }); setWaitingForSystem(WaitingStates.Idle); } - }); + });*/ } function completeUpload(message: string) { @@ -196,6 +239,20 @@ function App() { .catch((error) => console.error("Error:", error)); } + async function injectContext(context: string) { + await fetch(`${Config.WEB_ADDRESS}/inject-context`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + prompt: context, + }), + }) + .then(() => {}) + .catch((error) => console.error("Error:", error)); + } + function startUpload(_: string) { setWaitingForSystem(WaitingStates.UploadingFile); } diff --git a/gpt_code_ui/webapp/main.py b/gpt_code_ui/webapp/main.py index eb5fa2ba..3e300c34 100644 --- a/gpt_code_ui/webapp/main.py +++ b/gpt_code_ui/webapp/main.py @@ -41,7 +41,7 @@ class LimitedLengthString: - def __init__(self, maxlen=2000): + def __init__(self, maxlen=20000): self.data = deque() self.len = 0 self.maxlen = maxlen @@ -91,37 +91,48 @@ def inspect_file(filename: str) -> str: except Exception: return '' # file reading failed. - Don't want to know why. +system=f"""Act as a data analyst hereby referred to as EXPERT with ten years of experience in the domain of expense management and general accounting. Your role is to help inexperienced people analyse data about expenses and accounting. Be sure to help the user understand what to focus on and give suggestions on where it would make sense to dig deeper. -async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"): - - prompt = f"""First, here is a history of what I asked you to do earlier. - The actual prompt follows after ENDOFHISTORY. - History: - {message_buffer.get_string()} - ENDOFHISTORY. - Write Python code, in a triple backtick Markdown code block, that does the following: - {user_prompt} + If generating Python code, follow the instructions under [GENERATE_PYTHON_INSTRUCTIONS]. + [GENERATE_PYTHON_INSTRUCTIONS] Notes: First, think step by step what you want to do and write it down in English. Then generate valid Python code in a code block - Make sure all code is valid - it be run in a Jupyter Python 3 kernel environment. + Make sure all code is valid - it will be run in a Jupyter Python 3 kernel environment. Define every variable before you use it. For data munging, you can use - 'numpy', # numpy==1.24.3 - 'dateparser' #dateparser==1.1.8 - 'pandas', # matplotlib==1.5.3 - 'geopandas' # geopandas==0.13.2 + 'numpy', # numpy==1.26.2 + 'dateparser', # dateparser==1.2.0 + 'pandas', # pandas==1.5.3 + 'geopandas', # geopandas==0.14.1 For pdf extraction, you can use 'PyPDF2', # PyPDF2==3.0.1 'pdfminer', # pdfminer==20191125 - 'pdfplumber', # pdfplumber==0.9.0 + 'pdfplumber', # pdfplumber==0.10.3 For data visualization, you can use - 'matplotlib', # matplotlib==3.7.1 + 'matplotlib', # matplotlib==3.8.2 Be sure to generate charts with matplotlib. If you need geographical charts, use geopandas with the geopandas.datasets module. If the user has just uploaded a file, focus on the file that was most recently uploaded (and optionally all previously uploaded files) - Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: Download file. Replace INSERT_FILENAME_HERE with the actual filename.""" + Teacher mode: if the code modifies or produces a file, at the end of the code block insert a print statement that prints a link to it as HTML string: Download file. Replace INSERT_FILENAME_HERE with the actual filename. + """ +async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"): + + + prompt = f"""First, here is a history of what I asked you to do earlier. + The actual prompt follows after ENDOFHISTORY. + History: + {message_buffer.get_string()} + ENDOFHISTORY. + Aiming to help the user in the best possible way with the below [USER_PROMPT] do one of the following: + 1. Write Python code, in a triple backtick Markdown code block, that supports what the user is trying to achieve. Use the instructions under [GENERATE_PYTHON_INSTRUCTIONS] + 2. Answer the question the user asks. + 3. Ask follow-up questions to better understand what the user wants to achieve. + + [USER_PROMPT] + {user_prompt} + """ if user_openai_key: openai.api_key = user_openai_key @@ -130,7 +141,7 @@ async def get_code(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"): temperature=0.7, headers=OPENAI_EXTRA_HEADERS, messages=[ - # {"role": "system", "content": system}, + {"role": "system", "content": system}, {"role": "user", "content": prompt}, ] ) @@ -182,8 +193,12 @@ async def get_chat(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"): History: {message_buffer.get_string()} ENDOFHISTORY. - As a data analyst named Torben with ten years of experience in the domain of expense management. Your role is to help unexperienced people analyse data about expenses. The user is asking: - {user_prompt}""" + {user_prompt} + DO NOT GENERATE ANY CODE + Teacher mode: if the results return a link to a file that was generated, make sure to include the link in your answer. + """ + + print('PROMPT FROM CHAT:' + prompt) if user_openai_key: openai.api_key = user_openai_key @@ -192,7 +207,7 @@ async def get_chat(user_prompt, user_openai_key=None, model="gpt-3.5-turbo"): temperature=0.7, headers=OPENAI_EXTRA_HEADERS, messages=[ - # {"role": "system", "content": system}, + {"role": "system", "content": system}, {"role": "user", "content": prompt}, ] ) @@ -254,8 +269,11 @@ def models(): @app.route('/api/', methods=["GET", "POST"]) def proxy_kernel_manager(path): if request.method == "POST": + print('starting code execution') resp = requests.post( f'http://localhost:{KERNEL_APP_PORT}/{path}', json=request.get_json()) + requestjson = request.get_json() + print(f"""started code execution with status: {resp.status_code} {requestjson}""") else: resp = requests.get(f'http://localhost:{KERNEL_APP_PORT}/{path}') @@ -263,7 +281,7 @@ def proxy_kernel_manager(path): 'content-length', 'transfer-encoding', 'connection'] headers = [(name, value) for (name, value) in resp.raw.headers.items() if name.lower() not in excluded_headers] - + response = Response(resp.content, resp.status_code, headers) return response @@ -286,9 +304,9 @@ def download_file(): @app.route('/inject-context', methods=['POST']) def inject_context(): user_prompt = request.json.get('prompt', '') - + print('INJECTING-CONTEXT:' + user_prompt) message_buffer.append(user_prompt + "\n\n") - + print('message_buffer: ' + message_buffer.get_string()) return jsonify({"result": "success"}) @@ -307,17 +325,18 @@ def generate_code(): loop.close() # Append all messages to the message buffer for later use - message_buffer.append(user_prompt + "\n\n") + message_buffer.append('USER: ' + user_prompt + "\n\n") return jsonify({'code': code, 'text': text}), status @app.route('/chat', methods=['POST']) def generate_chat(): + #all of this comes from the system, not the user! user_prompt = request.json.get('prompt', '') user_openai_key = request.json.get('openAIKey', None) model = request.json.get('model', None) - + print('CHAT_TEXT: ' + user_prompt) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) @@ -328,8 +347,8 @@ def generate_chat(): print('CHAT_TEXT: ' + text) # Append all messages to the message buffer for later use - message_buffer.append(user_prompt + "\n\n") - + message_buffer.append('USER: ' + user_prompt + "\n\n") + print(f"""RETURNING TO UI: ${status}""") return jsonify({'text': text}), status