Skip to content

Commit 050bc3c

Browse files
authored
init
0 parents  commit 050bc3c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+40957
-0
lines changed

MANIFEST.in

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
include whl/*
2+
include XEdu/hub/none.jpg
3+
include XEdu/hub/*
4+
include XEdu/hub/font/*
5+
include XEdu/examples/demo.py
6+
prune XEdu/examples/*
7+
include install_requires.txt
8+
include XEdu/hub/tokenizer/*

PKG-INFO

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
Metadata-Version: 2.1
2+
Name: XEdu-python
3+
Version: 0.2.3
4+
Summary: XEdu
5+
Home-page: https://github.com/OpenXLab-Edu/OpenMMLab-Edu
6+
Author: OpenXLab
7+
Author-email: wangbolun@pjlab.org.cn
8+
License: MIT License
9+
Platform: UNKNOWN
10+
Requires-Python: >=3.6
11+
12+
UNKNOWN
13+

XEdu/LLM/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .client import Client
2+
3+
__all__ = ['Client']

XEdu/LLM/client.py

Lines changed: 218 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,218 @@
1+
from .llms import Moonshot,Deepseek,Openrouter,GLM,ERNIE,BaseURL,Qwen
2+
import os
3+
import sys,subprocess
4+
import requests
5+
import random
6+
provider_id_map = {
7+
"openrouter": Openrouter,
8+
"moonshot": Moonshot,
9+
"deepseek": Deepseek,
10+
"glm": GLM,
11+
"ernie": ERNIE,
12+
"qwen":Qwen,
13+
}
14+
provider_id_map_zh = {
15+
"openrouter":"openrouter",
16+
"月之暗面-Kimi":"moonshot",
17+
"幻方-深度求索":"deepseek",
18+
"智谱-智谱清言":"glm",
19+
"百度-文心一言":"ernie",
20+
"阿里-通义千问":"qwen",
21+
}
22+
default_model_map = {
23+
"openrouter": "mistralai/mistral-7b-instruct:free",
24+
"moonshot": "moonshot-v1-8k",
25+
"deepseek": "deepseek-chat",
26+
"glm":"glm-4",
27+
"ernie":"ERNIE_speed_8k",
28+
"qwen":"qwen2-1.5b-instruct",
29+
}
30+
class Client:
31+
"""docstring for Client
32+
"""
33+
34+
def __init__(self, provider=None,base_url=None,api_key=None,model=None,secret_key=None,xedu_url=None):
35+
self.provider = provider
36+
self.base_url = base_url
37+
self.api_key = api_key
38+
self.model = model
39+
if provider:
40+
if provider in provider_id_map_zh.keys():
41+
provider = provider_id_map_zh[provider]
42+
43+
index =[ i for i in provider_id_map_zh.keys() if provider in i]
44+
if len(index) != 0:
45+
provider = provider_id_map_zh[index[0]]
46+
# self.base_url = ServiceProvider.BASE_URL[provider]
47+
if not self.model:
48+
self.model = default_model_map[provider]
49+
if provider in ['ernie']:
50+
self.client = provider_id_map[provider](api_key=self.api_key,secret_key=secret_key,model=self.model)
51+
else:
52+
self.client = provider_id_map[provider](api_key=self.api_key,model=self.model)
53+
print(f"|| Selected provider: {self.provider} || Current model: {self.model} ||")
54+
elif base_url:
55+
self.client = BaseURL(base_url=base_url,api_key=self.api_key,model=self.model)
56+
elif xedu_url:
57+
self.base_url = xedu_url
58+
from .llms.gradioapi import GradioClient as gc
59+
self.client = gc(xedu_url)
60+
# 选择合适的model
61+
try:
62+
model_list = self.try_list_models()
63+
if len(model_list) == 0:
64+
raise Exception("No model available")
65+
free_model_list = [i for i in model_list if 'free' in i]
66+
if len(free_model_list) > 0:
67+
self.model = random.choice(free_model_list)
68+
else:
69+
self.model = random.choice(model_list)
70+
except:
71+
pass
72+
if self.model is not None:
73+
print(f"|| Selected base URL: {self.base_url} || Current model: {self.model} ||")
74+
# else:
75+
# print(f"|| Selected base URL: {self.base_url} ||")
76+
# if self.model not in self.client.list_models():
77+
# try:
78+
# self.model = self.support_model()[0]
79+
# except:
80+
# pass
81+
82+
83+
def inference(self, message, stream=False,**kwargs):
84+
return self.client.inference(message, stream=stream, **kwargs)
85+
86+
def support_model(self):
87+
return self.client.list_models()
88+
89+
def try_list_models(self):
90+
url = self.base_url + "/models"
91+
payload={}
92+
headers = {
93+
'Accept': 'application/json',
94+
'Authorization': 'Bearer {}'.format(self.api_key),
95+
}
96+
response = requests.request("GET", url, headers=headers, data=payload)
97+
id_list = []
98+
try:
99+
for i in response.json()['data']:
100+
id_list.append(i['id'])
101+
except:
102+
raise Exception(response.json())
103+
return id_list
104+
105+
def support_provider(lang='en'):
106+
if lang == "zh":
107+
return list(provider_id_map_zh.keys())
108+
return list(provider_id_map.keys())
109+
110+
def _restart_script(self):
111+
"""Restarts the current script."""
112+
current_script = sys.argv[0]
113+
subprocess.run([sys.executable, current_script])
114+
115+
def upgrade_env(self):
116+
try:
117+
import gradio as gr
118+
if gr.__version__ < "4.0.0":
119+
raise ImportError
120+
block = gr.Blocks()
121+
except:
122+
subprocess.run([sys.executable, "-m", "pip", "install", "gradio", "-U"], check=True)
123+
self._restart_script()
124+
import importlib
125+
importlib.invalidate_caches() # 清理导入缓存
126+
import gradio as gr
127+
# 重新加载模块
128+
importlib.reload(gr)
129+
130+
def set_system(self,system_info):
131+
self.system_info = system_info
132+
def run(self,host='0.0.0.0',port=7860,share=False):
133+
if host == "0.0.0.0":
134+
from .utils import find_available_port,get_local_ip
135+
port = find_available_port(port)
136+
ip = get_local_ip()
137+
print(f"Running on local URL: http://{ip}:{port}")
138+
print(f"Running on local URL: http://127.0.0.1:{port}")
139+
140+
self.upgrade_env()
141+
import gradio as gr
142+
block = gr.Blocks( theme=gr.themes.Monochrome())
143+
import time
144+
145+
def predict(message, history):
146+
history_openai_format = []
147+
if hasattr(self, "system_info"):
148+
history_openai_format.append({"role": "system", "content": self.system_info})
149+
for human, assistant in history:
150+
history_openai_format.append({"role": "user", "content": human })
151+
history_openai_format.append({"role": "assistant", "content":assistant})
152+
history_openai_format.append({"role": "user", "content": message})
153+
response = self.client.inference(
154+
message=history_openai_format,
155+
stream=True,
156+
)
157+
partial_message = ""
158+
for i in response:
159+
partial_message += i
160+
yield partial_message
161+
block = gr.Blocks( theme=gr.themes.Monochrome())
162+
# block = gr.Blocks()
163+
text = gr.Textbox(
164+
container=False,
165+
show_label=False,
166+
label="Message",
167+
placeholder="请输入消息...",
168+
scale=7,
169+
autofocus=True,
170+
)
171+
# 清言两个字用蓝色字体
172+
if hasattr(self, "system_info"):
173+
ph = """<center><b>系统设定:</b> {}</center>""".format(self.system_info)
174+
else:
175+
ph = """<h1><center>做有用的AI教育</center></h1>
176+
<p>官方文档: <a href="https://xedu.readthedocs.io/zh-cn/master/xedu_llm.html">https://xedu.readthedocs.io/zh-cn/master/xedu_llm.html</a></p>
177+
"""
178+
179+
# bot = gr.Chatbot(render_markdown=True)
180+
bot = gr.Chatbot(placeholder=ph,label='对话记录',render_markdown=True,bubble_full_width=False)
181+
with block:
182+
# gr.Markdown(f"""<h1><center>🤖️ LLM Chatbot 🤖️</center></h1>
183+
# <h2><center>by XEdu</center></h2>
184+
# """)
185+
gr.Markdown(f"""<h1><center>🤖️ LLM Chatbot by XEdu🤖️</center></h1>""")
186+
provider = self.provider if self.provider else self.base_url
187+
# 横向排列两个文本框,不可编辑
188+
with gr.Row():
189+
gr.Textbox(label='服务提供商', value=provider,interactive=False)
190+
# gr.Label(label='provider',value=f"{provider}",height=20)
191+
# gr.Text(label='provider',value=f"{provider}")
192+
# gr.Text(label='base_url',value=f"{self.base_url}")
193+
gr.Textbox(label='模型', value=self.model, interactive=False)
194+
# gr.Label(label='model',value=f"{self.model}",height=20)
195+
# gr.Text(label='provider',value=f"{provider}, model: {self.model}")
196+
# with gr.Row():
197+
# with gr.Column(visible=False,scale=1) as sidebar_left:
198+
# gr.Slider(label='Max New Tokens', minimum=10, maximum=100,interactive=True)
199+
# gr.Slider(label='Temperature', minimum=0.1, maximum=1.0)
200+
# gr.Slider(label='Top-p', minimum=0.1, maximum=1.0)
201+
# with gr.Column(scale=2):
202+
demo = gr.ChatInterface(
203+
predict,
204+
retry_btn="🔄重试",undo_btn="↩️撤回",clear_btn="🗑️清空",submit_btn="🤗提交",
205+
textbox=text,
206+
chatbot=bot,
207+
fill_height=True,
208+
# examples=["hi,how are you","你好,我是小红,你叫什么名字?"]
209+
).queue()
210+
211+
212+
# sidebar_state = gr.State(False)
213+
# btn_toggle_sidebar = gr.Button("Toggle Sidebar")
214+
# btn_toggle_sidebar.click(toggle_sidebar, [sidebar_state], [sidebar_left, sidebar_state])
215+
if port:
216+
block.launch(server_name=host, share=share, server_port=port)
217+
else:
218+
block.launch(server_name=host, share=share)

XEdu/LLM/llms/__init__.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from .openrouter import Openrouter
2+
from .moonshot import Moonshot
3+
from .deepseek import Deepseek
4+
from .glm import GLM
5+
from .ernie import ERNIE
6+
from .qwen import Qwen
7+
from .baseurl import BaseURL
8+
9+
__all__ = ['Openrouter', 'Moonshot', 'Deepseek', 'GLM', 'ERNIE', 'BaseURL','Qwen']

XEdu/LLM/llms/base.py

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
import requests
2+
import json
3+
4+
5+
class BaseLLM:
6+
def __init__(self, api_key, model=None):
7+
self.api_key = api_key
8+
self.model = model
9+
self.base_url = None
10+
11+
12+
def inference(self, message,stream=False,**kwargs):
13+
if stream:
14+
return self._stream_infer(message,**kwargs)
15+
else:
16+
return self._infer(message,**kwargs)
17+
18+
def _set_message(self, messages,stream=True):
19+
if stream:
20+
send_messages = {
21+
"model": self.model, # Optional
22+
"messages": messages,
23+
"stream":True
24+
}
25+
else:
26+
send_messages = {
27+
"model": self.model, # Optional
28+
"messages": messages,
29+
}
30+
return send_messages
31+
32+
def _set_url(self):
33+
return self.base_url + "/chat/completions"
34+
def _stream_infer(self, message,**kwargs):
35+
"""
36+
实时推理,返回推理结果
37+
"""
38+
if isinstance(message,str):
39+
messages = [{"role": "user", "content": message}]
40+
else:
41+
messages = message
42+
senf_messages = self._set_message(messages,stream=True)
43+
response = requests.post(
44+
# url=self.base_url+"/chat/completions",
45+
url=self._set_url(),
46+
headers=self._set_headers(stream=True),
47+
data=json.dumps({**senf_messages,**kwargs}),
48+
stream=True,
49+
)
50+
return self._analyse_stream_response(response)
51+
52+
def _analyse_stream_response(self, response):
53+
"""
54+
解析推理结果
55+
"""
56+
for i in response.iter_lines():
57+
text = i.decode("UTF-8")
58+
if text.startswith("data: ") and not text.endswith("[DONE]"):
59+
a = json.loads(text[6:])
60+
if a["choices"][0]["finish_reason"] != "stop":
61+
content = a["choices"][0]["delta"]["content"]
62+
if content is not None:
63+
yield content
64+
65+
def _set_content(self, response):
66+
return response.json()["choices"][0]["message"]["content"]
67+
68+
def _set_headers(self,stream=False):
69+
headers={
70+
'Content-Type': 'application/json',
71+
'Accept': 'application/json',
72+
"Authorization": f"Bearer {self.api_key}",
73+
}
74+
return headers
75+
76+
def _infer(self, message,**kwargs):
77+
"""
78+
单轮推理,返回推理结果
79+
"""
80+
if isinstance(message,str):
81+
messages = [{"role": "user", "content": message}]
82+
else:
83+
messages = message
84+
# send_messages = {
85+
# "model": self.model, # Optional
86+
# "messages": messages,
87+
# }
88+
send_messages = self._set_message(messages,stream=False)
89+
response = requests.post(
90+
# url=self.base_url+"/chat/completions",
91+
url=self._set_url(),
92+
headers=self._set_headers(),
93+
data=json.dumps({**send_messages,**kwargs})
94+
)
95+
try:
96+
content = self._set_content(response)
97+
except Exception as e:
98+
content = response.json()
99+
return content
100+
101+
def list_models(self):
102+
url = self.base_url + "/models"
103+
payload={}
104+
headers = {
105+
'Accept': 'application/json',
106+
'Authorization': 'Bearer {}'.format(self.api_key),
107+
}
108+
response = requests.request("GET", url, headers=headers, data=payload)
109+
id_list = []
110+
try:
111+
for i in response.json()['data']:
112+
id_list.append(i['id'])
113+
except:
114+
raise Exception(response.json())
115+
return id_list

XEdu/LLM/llms/baseurl.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from .base import BaseLLM
2+
3+
class BaseURL(BaseLLM):
4+
def __init__(self, api_key, base_url,model):
5+
super().__init__(api_key)
6+
self.model = model
7+
self.base_url = base_url

XEdu/LLM/llms/deepseek.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from .base import BaseLLM
2+
3+
class Deepseek(BaseLLM):
4+
def __init__(self, api_key, model="deepseek-chat"):
5+
super().__init__(api_key)
6+
self.model = model
7+
self.base_url = "https://api.deepseek.com"

0 commit comments

Comments
 (0)