Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions .history/conf_20250626170352.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# [!NOTE]
# Read the `docs/configuration_guide.md` carefully, and update the
# configurations to match your specific settings and requirements.
# - Replace `api_key` with your own credentials.
# - Replace `base_url` and `model` name if you want to use a custom model.
# - A restart is required every time you change the `config.yaml` file.

BASIC_MODEL:
base_url: https://api.deepseek.com/v1
model: "deepseek-chat"
api_key: sk-5756cf7066054365bb76ac200c4f1a26


# Reasoning model is optional.
# Uncomment the following settings if you want to use reasoning model
# for planning.

# REASONING_MODEL:
# base_url: https://ark-cn-beijing.bytedance.net/api/v3
# model: "doubao-1-5-thinking-pro-m-250428"
# api_key: xxxx
21 changes: 21 additions & 0 deletions .history/conf_20250630134255.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# [!NOTE]
# Read the `docs/configuration_guide.md` carefully, and update the
# configurations to match your specific settings and requirements.
# - Replace `api_key` with your own credentials.
# - Replace `base_url` and `model` name if you want to use a custom model.
# - A restart is required every time you change the `config.yaml` file.

BASIC_MODEL:
base_url: https://www.sophnet.com/api/open-apis/v1/chat/completions
model: "deepseek-chat"
api_key: sk-5756cf7066054365bb76ac200c4f1a26


# Reasoning model is optional.
# Uncomment the following settings if you want to use reasoning model
# for planning.

# REASONING_MODEL:
# base_url: https://api.deepseek.com/v1
# model: "deepseek-chat"
# api_key: sk-5756cf7066054365bb76ac200c4f1a26
21 changes: 21 additions & 0 deletions .history/conf_20250630134307.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# [!NOTE]
# Read the `docs/configuration_guide.md` carefully, and update the
# configurations to match your specific settings and requirements.
# - Replace `api_key` with your own credentials.
# - Replace `base_url` and `model` name if you want to use a custom model.
# - A restart is required every time you change the `config.yaml` file.

BASIC_MODEL:
base_url: https://www.sophnet.com/api/open-apis/v1/chat/completions
model: "deepseek-chat"
api_key: OE3HhfIOuAoO44i8fDoCY1dtRHb8SBmJSpDWVdCPAfWztRbnmAQtBKAaSqQbti5uBPLHDEFBdTeC4EWki-ltzw


# Reasoning model is optional.
# Uncomment the following settings if you want to use reasoning model
# for planning.

# REASONING_MODEL:
# base_url: https://api.deepseek.com/v1
# model: "deepseek-chat"
# api_key: sk-5756cf7066054365bb76ac200c4f1a26
21 changes: 21 additions & 0 deletions .history/conf_20250630134322.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# [!NOTE]
# Read the `docs/configuration_guide.md` carefully, and update the
# configurations to match your specific settings and requirements.
# - Replace `api_key` with your own credentials.
# - Replace `base_url` and `model` name if you want to use a custom model.
# - A restart is required every time you change the `config.yaml` file.

BASIC_MODEL:
base_url: https://www.sophnet.com/api/open-apis/v1/chat/completions
model: "DeepSeek-V3-Fast"
api_key: OE3HhfIOuAoO44i8fDoCY1dtRHb8SBmJSpDWVdCPAfWztRbnmAQtBKAaSqQbti5uBPLHDEFBdTeC4EWki-ltzw


# Reasoning model is optional.
# Uncomment the following settings if you want to use reasoning model
# for planning.

# REASONING_MODEL:
# base_url: https://api.deepseek.com/v1
# model: "deepseek-chat"
# api_key: sk-5756cf7066054365bb76ac200c4f1a26
21 changes: 21 additions & 0 deletions .history/conf_20250630135307.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# [!NOTE]
# Read the `docs/configuration_guide.md` carefully, and update the
# configurations to match your specific settings and requirements.
# - Replace `api_key` with your own credentials.
# - Replace `base_url` and `model` name if you want to use a custom model.
# - A restart is required every time you change the `config.yaml` file.

BASIC_MODEL:
base_url: https://api.deepseek.com/v1
model: "deepseek-chat"
api_key: sk-5756cf7066054365bb76ac200c4f1a26


# Reasoning model is optional.
# Uncomment the following settings if you want to use reasoning model
# for planning.

# REASONING_MODEL:
# base_url: https://ark-cn-beijing.bytedance.net/api/v3
# model: "doubao-1-5-thinking-pro-m-250428"
# api_key: xxxx
21 changes: 21 additions & 0 deletions .history/conf_20250630135340.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# [!NOTE]
# Read the `docs/configuration_guide.md` carefully, and update the
# configurations to match your specific settings and requirements.
# - Replace `api_key` with your own credentials.
# - Replace `base_url` and `model` name if you want to use a custom model.
# - A restart is required every time you change the `config.yaml` file.

BASIC_MODEL:
base_url: https://api.deepseek.com/v1
model: "deepseek-chat"
api_key: sk-5756cf7066054365bb76ac200c4f1a26


# Reasoning model is optional.
# Uncomment the following settings if you want to use reasoning model
# for planning.

# REASONING_MODEL:
# base_url: https://ark-cn-beijing.bytedance.net/api/v3
# model: "doubao-1-5-thinking-pro-m-250428"
# api_key: xxxx
26 changes: 26 additions & 0 deletions .history/conf_20250630135426.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# [!NOTE]
# Read the `docs/configuration_guide.md` carefully, and update the
# configurations to match your specific settings and requirements.
# - Replace `api_key` with your own credentials.
# - Replace `base_url` and `model` name if you want to use a custom model.
# - A restart is required every time you change the `config.yaml` file.

BASIC_MODEL:
base_url: https://www.sophnet.com/api/open-apis/v1
model: "deepseek-chat"
api_key: sk-5756cf7066054365bb76ac200c4f1a26

# BASIC_MODEL:
# base_url: https://api.deepseek.com/v1
# model: "deepseek-chat"
# api_key: sk-5756cf7066054365bb76ac200c4f1a26


# Reasoning model is optional.
# Uncomment the following settings if you want to use reasoning model
# for planning.

# REASONING_MODEL:
# base_url: https://ark-cn-beijing.bytedance.net/api/v3
# model: "doubao-1-5-thinking-pro-m-250428"
# api_key: xxxx
26 changes: 26 additions & 0 deletions .history/conf_20250630135453.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# [!NOTE]
# Read the `docs/configuration_guide.md` carefully, and update the
# configurations to match your specific settings and requirements.
# - Replace `api_key` with your own credentials.
# - Replace `base_url` and `model` name if you want to use a custom model.
# - A restart is required every time you change the `config.yaml` file.

BASIC_MODEL:
base_url: https://www.sophnet.com/api/open-apis/v1
model: "DeepSeek-R1-0528"
api_key: OE3HhfIOuAoO44i8fDoCY1dtRHb8SBmJSpDWVdCPAfWztRbnmAQtBKAaSqQbti5uBPLHDEFBdTeC4EWki-ltzw

# BASIC_MODEL:
# base_url: https://api.deepseek.com/v1
# model: "deepseek-chat"
# api_key: sk-5756cf7066054365bb76ac200c4f1a26


# Reasoning model is optional.
# Uncomment the following settings if you want to use reasoning model
# for planning.

# REASONING_MODEL:
# base_url: https://ark-cn-beijing.bytedance.net/api/v3
# model: "doubao-1-5-thinking-pro-m-250428"
# api_key: xxxx
26 changes: 26 additions & 0 deletions .history/conf_20250630140652.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# [!NOTE]
# Read the `docs/configuration_guide.md` carefully, and update the
# configurations to match your specific settings and requirements.
# - Replace `api_key` with your own credentials.
# - Replace `base_url` and `model` name if you want to use a custom model.
# - A restart is required every time you change the `config.yaml` file.

BASIC_MODEL:
base_url: https://www.sophnet.com/api/open-apis/v1
model: "DeepSeek-V3-Fast"
api_key: OE3HhfIOuAoO44i8fDoCY1dtRHb8SBmJSpDWVdCPAfWztRbnmAQtBKAaSqQbti5uBPLHDEFBdTeC4EWki-ltzw

# BASIC_MODEL:
# base_url: https://api.deepseek.com/v1
# model: "deepseek-chat"
# api_key: sk-5756cf7066054365bb76ac200c4f1a26


# Reasoning model is optional.
# Uncomment the following settings if you want to use reasoning model
# for planning.

# REASONING_MODEL:
# base_url: https://ark-cn-beijing.bytedance.net/api/v3
# model: "doubao-1-5-thinking-pro-m-250428"
# api_key: xxxx
15 changes: 4 additions & 11 deletions src/graph/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,10 +104,7 @@ def planner_node(
if configurable.enable_deep_thinking:
llm = get_llm_by_type("reasoning")
elif AGENT_LLM_MAP["planner"] == "basic":
llm = get_llm_by_type("basic").with_structured_output(
Plan,
method="json_mode",
)
llm = get_llm_by_type("basic")
else:
llm = get_llm_by_type(AGENT_LLM_MAP["planner"])

Expand All @@ -116,13 +113,9 @@ def planner_node(
return Command(goto="reporter")

full_response = ""
if AGENT_LLM_MAP["planner"] == "basic" and not configurable.enable_deep_thinking:
response = llm.invoke(messages)
full_response = response.model_dump_json(indent=4, exclude_none=True)
else:
response = llm.stream(messages)
for chunk in response:
full_response += chunk.content
response = llm.stream(messages)
for chunk in response:
full_response += chunk.content
logger.debug(f"Current state messages: {state['messages']}")
logger.info(f"Planner response: {full_response}")

Expand Down
7 changes: 4 additions & 3 deletions src/llms/llm.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT

from pathlib import Path
from typing import Any, Dict
Expand Down Expand Up @@ -70,6 +68,9 @@ def _create_llm_use_conf(

if llm_type == "reasoning":
merged_conf["api_base"] = merged_conf.pop("base_url", None)
if "response_format" in merged_conf:
# 确保 response_format 被正确传递
pass

return (
ChatOpenAI(**merged_conf)
Expand Down Expand Up @@ -132,4 +133,4 @@ def get_configured_llm_models() -> dict[str, list[str]]:

# In the future, we will use reasoning_llm and vl_llm for different purposes
# reasoning_llm = get_llm_by_type("reasoning")
# vl_llm = get_llm_by_type("vision")
# vl_llm = get_llm_by_type("vision")
26 changes: 22 additions & 4 deletions src/podcast/graph/script_writer_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,32 @@

def script_writer_node(state: PodcastState):
logger.info("Generating script for podcast...")
model = get_llm_by_type(
AGENT_LLM_MAP["podcast_script_writer"]
).with_structured_output(Script, method="json_mode")
script = model.invoke(
model = get_llm_by_type(AGENT_LLM_MAP["podcast_script_writer"])
response = model.invoke(
[
SystemMessage(content=get_prompt_template("podcast/podcast_script_writer")),
HumanMessage(content=state["input"]),
],
)

# 手动解析 JSON 响应
try:
import json
from src.utils.json_utils import repair_json_output

# 修复和解析 JSON 响应
content = repair_json_output(response.content)
script_data = json.loads(content)

# 创建 Script 对象
script = Script(**script_data)
except Exception as e:
logger.error(f"Failed to parse script JSON: {e}")
# 如果解析失败,创建一个默认的 Script 对象
script = Script(
locale="en",
lines=[]
)

print(script)
return {"script": script, "audio_chunks": []}
2 changes: 1 addition & 1 deletion web/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ RUN \

##### RUNNER

FROM gcr.io/distroless/nodejs20-debian12 AS runner
FROM node:20-alpine AS runner
WORKDIR /app

ENV NODE_ENV=production
Expand Down
Loading