Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions wren-ai-service/src/pipelines/generation/sql_diagnosis.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,11 @@ async def generate_sql_diagnosis(
@observe(capture_input=False)
async def post_process(
generate_sql_diagnosis: dict,
) -> str:
return orjson.loads(generate_sql_diagnosis.get("replies")[0])
) -> dict:
reply = generate_sql_diagnosis.get("replies", [""])[0]
if not reply or not reply.strip():
return {"reasoning": "LLM did not return any response."}
return orjson.loads(reply)


## End of Pipeline
Expand Down
10 changes: 9 additions & 1 deletion wren-ai-service/src/providers/llm/litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,16 @@ async def _run(

generation_kwargs = {
**combined_generation_kwargs,
**(generation_kwargs or {}),
**(generation_kwargs or {}),
}
# Strip response_format with type=json_schema — only supported by OpenAI
# native models. Custom api_base models (e.g. Ollama, LiteLLM proxy with
# non-OpenAI backends) return empty responses when this is forwarded.
# System prompts already include explicit JSON format instructions.
if self._api_base and isinstance(
generation_kwargs.get("response_format"), dict
) and generation_kwargs["response_format"].get("type") == "json_schema":
generation_kwargs.pop("response_format")

allowed_openai_params = generation_kwargs.get(
"allowed_openai_params", []
Expand Down