Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 24 additions & 1 deletion fastchat/conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2075,7 +2075,7 @@ def get_conv_template(name: str) -> Conversation:
Conversation(
name="metharme",
system_template="<|system|>{system_message}",
system_message="""Enter RP mode. You shall reply to the user while staying
system_message="""Enter RP mode. You shall reply to the user while staying
in character. Your responses must be detailed, creative, immersive, and drive the scenario
forward.""",
roles=("<|user|>", "<|model|>"),
Expand Down Expand Up @@ -2298,6 +2298,17 @@ def get_conv_template(name: str) -> Conversation:
)
)

register_conv_template(
Conversation(
name="bailing",
system_message="",
roles=("user", "assistant"),
messages=[],
sep_style=None,
sep=None,
)
)


if __name__ == "__main__":
from fastchat.conversation import get_conv_template
Expand Down Expand Up @@ -2340,3 +2351,15 @@ def get_conv_template(name: str) -> Conversation:
conv.append_message(conv.roles[0], "How are you?")
conv.append_message(conv.roles[1], None)
print(conv.get_prompt())

print("\n")

print("-- Bailing template --")
conv = get_conv_template("bailing")
conv.append_message(conv.roles[0], "Hello!")
conv.append_message(conv.roles[1], "Hi!")
conv.append_message(conv.roles[0], "How are you?")
conv.append_message(conv.roles[1], None)
print(conv.to_openai_api_messages())

print("\n")
94 changes: 94 additions & 0 deletions fastchat/serve/api_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from typing import Optional
import time

import httpx
import requests

from fastchat.utils import build_logger
Expand Down Expand Up @@ -246,6 +247,18 @@ def get_api_provider_stream_iter(
api_key=model_api_dict["api_key"],
conversation_id=state.conv_id,
)
elif model_api_dict["api_type"] == "bailing":
messages = conv.to_openai_api_messages()
stream_iter = bailing_api_stream_iter(
model_name=model_api_dict["model_name"],
messages=messages,
temperature=temperature,
top_p=top_p,
max_tokens=max_new_tokens,
api_base=model_api_dict.get("api_base"),
api_key=model_api_dict.get("api_key"),
generation_args=model_api_dict.get("recommended_config"),
)
else:
raise NotImplementedError()

Expand Down Expand Up @@ -1264,3 +1277,84 @@ def metagen_api_stream_iter(
"text": f"**API REQUEST ERROR** Reason: Unknown.",
"error_code": 1,
}


def bailing_api_stream_iter(
model_name,
messages,
temperature,
top_p,
max_tokens,
api_base=None,
api_key=None,
generation_args=None,
):
url = api_base if api_base else "https://bailingchat.alipay.com/chat/completions"
token = api_key if api_key else os.environ.get("BAILING_API_KEY")
if token:
headers = {"Authorization": f"Bearer {token}"}
else:
raise ValueError(f"There is not valid token.")

headers["Content-Type"] = "application/json"

request = {"model": model_name, "messages": messages}
request["stream"] = True
# default value
request["temperature"] = 0.01
request["top_p"] = 1.0
request["top_k"] = -1
request["n"] = 1
request["logprobs"] = 1
request["use_beam_search"] = False
request["max_tokens"] = 16384

if generation_args:
request.update(generation_args)

timeout = httpx.Timeout(
300.0, read=200.0
) # timeout is 300s, and read timeout is 200s
client = httpx.Client(timeout=timeout, http2=True)
retry_num = 0
while retry_num < 3:
try:
total_text, curr_buf = "", ""
with client.stream("POST", url, json=request, headers=headers) as resp:
if resp.status_code == 200:
for new_buf in resp.iter_text():
curr_buf += new_buf
while True:
x_part = curr_buf.split("\n\n", 1)
if len(x_part) > 1: # part can be sent
y_part = x_part[0][len("data:") :].strip()
try:
y = json.loads(y_part)
output_text = y["choices"][0]["delta"]["content"]
except Exception as e:
curr_buf = x_part[1] # finish one response
logger.error(
f"Read the error content. Content: {y_part}, Info:{e}"
)
continue # skip current part
total_text += output_text
curr_buf = x_part[
1
] # finish one response and look to leftover
yield {"text": total_text, "error_code": 0}
else: # no part can be sent and continue to read
break
return # finish the total
else:
logger.error(
f"Error occurs and retry if possible. status_code={resp.status_code}"
)
except Exception as exc:
if total_text:
logger.error(f"Reading interrupted. Info:{exc}")
break
else:
logger.error(f"Error occurs and retry if possible. Info:{exc}")
retry_num += 1
else:
raise ValueError(f"Exceed the maximal retry times.")
3 changes: 2 additions & 1 deletion fastchat/serve/gradio_web_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,8 @@ def load_demo(url_params, request: gr.Request):
controller_url, args.register_api_endpoint_file, vision_arena=False
)

return load_demo_single(models, url_params)
current_context = Context(text_models=models, models=models)
return load_demo_single(current_context, url_params)


def vote_last_response(state, vote_type, model_selector, request: gr.Request):
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ classifiers = [
"License :: OSI Approved :: Apache Software License",
]
dependencies = [
"aiohttp", "fastapi", "httpx", "markdown2[all]", "nh3", "numpy",
"aiohttp", "fastapi", "httpx[http2]", "markdown2[all]", "nh3", "numpy",
"prompt_toolkit>=3.0.0", "pydantic<3,>=2.0.0", "pydantic-settings", "psutil", "requests", "rich>=10.0.0",
"shortuuid", "tiktoken", "uvicorn",
]
Expand Down