From 87fb47a04ba39cb00916d6e41d21e2d360a0acf6 Mon Sep 17 00:00:00 2001 From: amabito <192487536+amabito@users.noreply.github.com> Date: Mon, 16 Feb 2026 22:34:33 +0900 Subject: [PATCH] fix: preserve partial LLM completion on stream abort When a user aborts an LLM streaming request, the partial completion was lost (errorPromptLog.completion was empty ""). This adds an accumulatedCompletion variable that tracks streamed content and preserves it on abort, supporting both string and MessagePart[] content. --- core/llm/streamChat.ts | 19 +++++++ core/llm/streamChat.vitest.ts | 97 +++++++++++++++++++++++++++++++++++ 2 files changed, 116 insertions(+) create mode 100644 core/llm/streamChat.vitest.ts diff --git a/core/llm/streamChat.ts b/core/llm/streamChat.ts index ec1f046c5ec..4c70ff990d5 100644 --- a/core/llm/streamChat.ts +++ b/core/llm/streamChat.ts @@ -94,13 +94,17 @@ export async function* llmStreamChat( completionOptions, abortController, }); + let accumulatedCompletion = ""; let next = await gen.next(); while (!next.done) { if (abortController.signal.aborted) { + errorPromptLog.completion = accumulatedCompletion; next = await gen.return(errorPromptLog); break; } if (next.value) { + const content = typeof next.value === "string" ? next.value : ""; + accumulatedCompletion += content; yield { role: "assistant", content: next.value, @@ -120,15 +124,30 @@ export async function* llmStreamChat( completionOptions, messageOptions, ); + let accumulatedCompletion = ""; let next = await gen.next(); while (!next.done) { if (abortController.signal.aborted) { + errorPromptLog.completion = accumulatedCompletion; next = await gen.return(errorPromptLog); break; } const chunk = next.value; + // Accumulate the content from chunks + if (chunk.content) { + const content = + typeof chunk.content === "string" + ? chunk.content + : Array.isArray(chunk.content) + ? chunk.content + .map((part) => (part.type === "text" ? part.text : "")) + .join("") + : ""; + accumulatedCompletion += content; + } + yield chunk; next = await gen.next(); } diff --git a/core/llm/streamChat.vitest.ts b/core/llm/streamChat.vitest.ts new file mode 100644 index 00000000000..63c0e460824 --- /dev/null +++ b/core/llm/streamChat.vitest.ts @@ -0,0 +1,97 @@ +import { describe, test, expect } from "vitest"; + +/** + * Tests for the content accumulation logic used in streamChat.ts. + * + * The core change in streamChat.ts adds an `accumulatedCompletion` variable + * that tracks partial output from streaming chunks. This logic must handle + * both string content and MessagePart[] content correctly. + * + * These are unit tests for the extraction/accumulation behavior. + * Integration tests for the full llmStreamChat flow are covered by + * existing e2e tests. + */ +describe("streamChat content accumulation logic", () => { + // Mirror the extraction logic from streamChat.ts lines 140-147 + function extractContent(content: unknown): string { + if (typeof content === "string") { + return content; + } + if (Array.isArray(content)) { + return content + .map((part: any) => (part.type === "text" ? part.text : "")) + .join(""); + } + return ""; + } + + test("should extract string content from chunks", () => { + expect(extractContent("Hello world")).toBe("Hello world"); + }); + + test("should extract text from MessagePart[] content", () => { + const parts = [ + { type: "text", text: "Part 1 " }, + { type: "text", text: "Part 2" }, + ]; + expect(extractContent(parts)).toBe("Part 1 Part 2"); + }); + + test("should skip non-text MessageParts (e.g. imageUrl)", () => { + const parts = [ + { type: "text", text: "Hello " }, + { type: "imageUrl", imageUrl: { url: "http://example.com/img.png" } }, + { type: "text", text: "world" }, + ]; + expect(extractContent(parts)).toBe("Hello world"); + }); + + test("should return empty string for undefined/null content", () => { + expect(extractContent(undefined)).toBe(""); + expect(extractContent(null)).toBe(""); + }); + + test("should accumulate content across multiple streaming chunks", () => { + const chunks = [ + { content: "Hello " }, + { content: "world" }, + { content: "!" }, + ]; + let accumulated = ""; + for (const chunk of chunks) { + accumulated += extractContent(chunk.content); + } + expect(accumulated).toBe("Hello world!"); + }); + + test("should accumulate mixed string and MessagePart[] chunks", () => { + const chunks = [ + { content: "Start " }, + { + content: [ + { type: "text", text: "middle " }, + { type: "text", text: "part" }, + ], + }, + { content: " end" }, + ]; + let accumulated = ""; + for (const chunk of chunks) { + accumulated += extractContent(chunk.content); + } + expect(accumulated).toBe("Start middle part end"); + }); + + test("should handle empty chunks without error", () => { + const chunks = [ + { content: "Hello" }, + { content: "" }, + { content: " world" }, + ]; + let accumulated = ""; + for (const chunk of chunks) { + accumulated += extractContent(chunk.content); + } + expect(accumulated).toBe("Hello world"); + }); +});