forked from pasquini-dario/LLMmap
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain_interactive.py
More file actions
67 lines (50 loc) · 1.93 KB
/
main_interactive.py
File metadata and controls
67 lines (50 loc) · 1.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import os
import argparse
import time
from prompt_toolkit import PromptSession
from prompt_toolkit.key_binding import KeyBindings
from LLMmap.inference import load_LLMmap
kb = KeyBindings()
@kb.add('enter')
def accept_input(event):
event.current_buffer.validate_and_handle()
session = PromptSession(
multiline=True,
key_bindings=kb,
)
def int_loop(inf):
# ANSI color codes
INSTRUCTION_COLOR = '\033[93m' # Yellow
QUERY_COLOR = '\033[94m' # Blue
PROMPT_COLOR = '\033[92m' # Green
RESET_COLOR = '\033[0m' # Reset color
# Print the instruction in yellow
print("\n\n" + INSTRUCTION_COLOR + "[Instruction] Submit the given query to the LLM app and copy/paste the output produced and then ENTER. Let's start:")
input("[Press any key to continue]: " + RESET_COLOR)
print("-" * 50)
n = len(inf.queries)
answers = []
for i in range(n):
print('\n\n')
query = inf.queries[i]
# Print the query in blue
print(INSTRUCTION_COLOR + f"[Query to submit ({i+1}/{n})]:\n"+QUERY_COLOR+f"{query}\n" + RESET_COLOR)
print(INSTRUCTION_COLOR + "[LLM app response]:" + RESET_COLOR, end=' ')
answer = session.prompt()
answers.append(answer)
time.sleep(1)
print(INSTRUCTION_COLOR+"\n\n### RESULTS ###")
p = inf(answers)
inf.print_result(p)
print(RESET_COLOR)
if __name__ == "__main__":
# Create the parser
parser = argparse.ArgumentParser(description='Interactive session for LLM fingeprinting')
parser.add_argument('--inference_model_path', type=str, help='Path inference model to use', default='./data/pretrained_models/default')
# Parse the arguments
args = parser.parse_args()
conf, inf = load_LLMmap(args.inference_model_path)
print("\n##### LLMs supported #####")
print('',*inf.llms_supported, sep="\n\t")
print("#"*50)
int_loop(inf)