-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathutils.py
More file actions
137 lines (115 loc) · 4.46 KB
/
utils.py
File metadata and controls
137 lines (115 loc) · 4.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
import sys
import openai
from tenacity import (
retry,
stop_after_attempt, # type: ignore
wait_random_exponential, # type: ignore
)
from typing import Optional, List
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
# syf 2502不能跑后新加的
os.environ["http_proxy"] = "http://localhost:7890"
os.environ["https_proxy"] = "http://localhost:7890"
Model = Literal["openai/gpt-4", "openai/gpt-3.5-turbo", "openai/gpt-3.5-turbo-instruct", 'qwen-turbo', 'qwen-plus', 'qwen-max']
# @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def get_completion_before2502(prompt: str, temperature: float = 0.0, max_tokens: int = 256, stop_strs: Optional[List[str]] = None) -> str:
# os.environ["http_proxy"] = "127.0.0.1:7890"
# os.environ["https_proxy"] = "127.0.0.1:7890"
# gets API Key from environment variable OPENAI_API_KEY
client = openai.OpenAI(
# base_url="https://openrouter.ai/api/v1",
base_url="https://openrouter.ai/",
api_key=os.getenv('OPENAI_API_KEY')
)
response = client.completions.create(
model='openai/gpt-3.5-turbo-instruct',
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=stop_strs,
)
# print ("response: ", response)
return response.choices[0].text
# syf修改新写法: (0203后原来方法好像改了)
# @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def get_completion(prompt: str, temperature: float = 0.0, max_tokens: int = 256, stop_strs: Optional[List[str]] = None) -> str:
# os.environ["http_proxy"] = "127.0.0.1:7890"
# os.environ["https_proxy"] = "127.0.0.1:7890"
# syf 2502不能跑后新加的
os.environ["http_proxy"] = "http://localhost:7890"
os.environ["https_proxy"] = "http://localhost:7890"
# gets API Key from environment variable OPENAI_API_KEY
client = openai.OpenAI(
base_url="https://openrouter.ai/api/v1",
# base_url="https://openrouter.ai/",
api_key=os.getenv('OPENAI_API_KEY')
)
messages = [
{
"role": "user",
"content": prompt
}
]
# 原调用写法
# response = client.completions.create(
# model='openai/gpt-3.5-turbo-instruct',
# prompt=prompt,
# temperature=temperature,
# max_tokens=max_tokens,
# top_p=1,
# frequency_penalty=0.0,
# presence_penalty=0.0,
# stop=stop_strs,
# )
# # print ("response: ", response)
# return response.choices[0].text
# syf 0203后修改
response = client.chat.completions.create(
model = 'openai/gpt-3.5-turbo-instruct', # 原本写法
# model = 'openai/gpt-4o-mini',
messages = messages,
max_tokens=max_tokens,
stop=stop_strs,
temperature=temperature,
)
return response.choices[0].message.content
# @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def get_chat(prompt: str, model: Model, temperature: float = 0.0, max_tokens: int = 256, stop_strs: Optional[List[str]] = None, is_batched: bool = False) -> str:
assert model != "text-davinci-003"
# os.environ["http_proxy"] = "127.0.0.1:7890"
# os.environ["https_proxy"] = "127.0.0.1:7890"
# syf 2502不能跑后新加的
os.environ["http_proxy"] = "http://localhost:7890"
os.environ["https_proxy"] = "http://localhost:7890"
messages = [
{
"role": "user",
"content": prompt
}
]
# gets API Key from environment variable OPENAI_API_KEY
client = openai.OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=os.getenv('OPENAI_API_KEY')
# api_key="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" # syf跑utils.py测试用
)
response = client.chat.completions.create(
model = model, # 原本写法
# model='openai/gpt-4o-mini', # 0205 syf测试用,因为gpt-3.5-turbo总断 gpt-3.5-turbo-instruct貌似好些
messages = messages,
max_tokens=max_tokens,
stop=stop_strs,
temperature=temperature,
)
return response.choices[0].message.content
if __name__ == '__main__':
# r = get_chat("say hello world", 'openai/gpt-3.5-turbo')
r = get_chat("say hello world", 'openai/gpt-4')
print(r)