Skip to content

Commit d60ab78

Browse files
authored
Merge pull request #16 from Serverless-Devs/fix-agentscope-adapter-stream
fix(agentrun/integration/agentscope): 启用模型适配器中的流式响应
2 parents 8486571 + 0e79c14 commit d60ab78

File tree

2 files changed

+87
-1
lines changed

2 files changed

+87
-1
lines changed

agentrun/integration/agentscope/model_adapter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def wrap_model(self, common_model: CommonModel) -> Any:
5151
return OpenAIChatModel(
5252
model_name=info.model or "",
5353
api_key=info.api_key,
54-
stream=False,
54+
stream=True,
5555
client_args={
5656
"base_url": info.base_url,
5757
"http_client": AsyncClient(headers=info.headers),

tests/unittests/integration/test_integration.py

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,9 +85,17 @@ def extract_payload(request):
8585

8686
def build_response(request, route):
8787
payload = extract_payload(request)
88+
is_stream = payload.get("stream", False)
8889
response_json = self._build_response(
8990
payload.get("messages") or [], payload.get("tools")
9091
)
92+
if is_stream:
93+
# Return SSE streaming response
94+
return respx.MockResponse(
95+
status_code=200,
96+
content=self._build_sse_stream(response_json),
97+
headers={"content-type": "text/event-stream"},
98+
)
9199
return respx.MockResponse(status_code=200, json=response_json)
92100

93101
# Route all requests to the mock base URL (already within respx.mock context)
@@ -172,6 +180,84 @@ def _build_model_response(self, messages: list, tools_payload):
172180
response_dict = self._build_response(messages, tools_payload)
173181
return ModelResponse(**response_dict)
174182

183+
def _build_sse_stream(self, response_json: dict) -> bytes:
184+
"""Build SSE stream from response JSON for streaming requests"""
185+
chunks = []
186+
choice = response_json.get("choices", [{}])[0]
187+
message = choice.get("message", {})
188+
tool_calls = message.get("tool_calls")
189+
190+
# First chunk with role
191+
first_chunk = {
192+
"id": response_json.get("id", "chatcmpl-mock"),
193+
"object": "chat.completion.chunk",
194+
"created": response_json.get("created", 1234567890),
195+
"model": response_json.get("model", "mock-model"),
196+
"choices": [{
197+
"index": 0,
198+
"delta": {"role": "assistant", "content": ""},
199+
"finish_reason": None,
200+
}],
201+
}
202+
chunks.append(f"data: {json.dumps(first_chunk)}\n\n")
203+
204+
if tool_calls:
205+
# Stream tool calls
206+
for i, tool_call in enumerate(tool_calls):
207+
tc_chunk = {
208+
"id": response_json.get("id", "chatcmpl-mock"),
209+
"object": "chat.completion.chunk",
210+
"created": response_json.get("created", 1234567890),
211+
"model": response_json.get("model", "mock-model"),
212+
"choices": [{
213+
"index": 0,
214+
"delta": {
215+
"tool_calls": [{
216+
"index": i,
217+
"id": tool_call.get("id"),
218+
"type": "function",
219+
"function": tool_call.get("function"),
220+
}],
221+
},
222+
"finish_reason": None,
223+
}],
224+
}
225+
chunks.append(f"data: {json.dumps(tc_chunk)}\n\n")
226+
else:
227+
# Stream content
228+
content = message.get("content", "")
229+
if content:
230+
content_chunk = {
231+
"id": response_json.get("id", "chatcmpl-mock"),
232+
"object": "chat.completion.chunk",
233+
"created": response_json.get("created", 1234567890),
234+
"model": response_json.get("model", "mock-model"),
235+
"choices": [{
236+
"index": 0,
237+
"delta": {"content": content},
238+
"finish_reason": None,
239+
}],
240+
}
241+
chunks.append(f"data: {json.dumps(content_chunk)}\n\n")
242+
243+
# Final chunk with finish_reason
244+
finish_reason = "tool_calls" if tool_calls else "stop"
245+
final_chunk = {
246+
"id": response_json.get("id", "chatcmpl-mock"),
247+
"object": "chat.completion.chunk",
248+
"created": response_json.get("created", 1234567890),
249+
"model": response_json.get("model", "mock-model"),
250+
"choices": [{
251+
"index": 0,
252+
"delta": {},
253+
"finish_reason": finish_reason,
254+
}],
255+
}
256+
chunks.append(f"data: {json.dumps(final_chunk)}\n\n")
257+
chunks.append("data: [DONE]\n\n")
258+
259+
return "".join(chunks).encode("utf-8")
260+
175261
def _assert_tools(self, tools_payload):
176262
assert isinstance(tools_payload, list)
177263
assert (

0 commit comments

Comments
 (0)