|
| 1 | +from google.adk.models.base_llm import BaseLlm |
| 2 | +from google.adk.models.llm_response import LlmResponse |
| 3 | +from google.genai import types as genai_types |
| 4 | + |
| 5 | +class MockPlannerLlm(BaseLlm): |
| 6 | + """A mock planner LLM that routes to sub-agents based on keywords.""" |
| 7 | + |
| 8 | + async def generate_content_async(self, llm_request, stream: bool = False): |
| 9 | + user_queries: list[str] = [] |
| 10 | + for content in llm_request.contents: |
| 11 | + if content.role == "user": |
| 12 | + for part in content.parts or []: |
| 13 | + if getattr(part, "text", None): |
| 14 | + user_queries.append(part.text) |
| 15 | + query = user_queries[-1] if user_queries else "" |
| 16 | + |
| 17 | + function_calls = [] |
| 18 | + if "网络" in query or "network" in query: |
| 19 | + function_calls.append( |
| 20 | + genai_types.FunctionCall( |
| 21 | + name="network_agent", |
| 22 | + args={"prompt": query} |
| 23 | + ) |
| 24 | + ) |
| 25 | + if "服务器" in query or "主机" in query or "host" in query: |
| 26 | + function_calls.append( |
| 27 | + genai_types.FunctionCall( |
| 28 | + name="host_agent", |
| 29 | + args={"prompt": query} |
| 30 | + ) |
| 31 | + ) |
| 32 | + if "应用" in query or "app" in query: |
| 33 | + function_calls.append( |
| 34 | + genai_types.FunctionCall( |
| 35 | + name="app_agent", |
| 36 | + args={"prompt": query} |
| 37 | + ) |
| 38 | + ) |
| 39 | + if "安全" in query or "sec" in query: |
| 40 | + function_calls.append( |
| 41 | + genai_types.FunctionCall( |
| 42 | + name="sec_agent", |
| 43 | + args={"prompt": query} |
| 44 | + ) |
| 45 | + ) |
| 46 | + |
| 47 | + if function_calls: |
| 48 | + parts = [genai_types.Part(function_call=call) for call in function_calls] |
| 49 | + response_content = genai_types.Content( |
| 50 | + role="model", |
| 51 | + parts=parts, |
| 52 | + ) |
| 53 | + else: |
| 54 | + text = f"抱歉,无法理解您的问题 '{query}'。请说明问题属于 网络/主机/应用/安全 中的哪一类。" |
| 55 | + response_content = genai_types.Content( |
| 56 | + role="model", |
| 57 | + parts=[genai_types.Part.from_text(text=text)], |
| 58 | + ) |
| 59 | + |
| 60 | + yield LlmResponse(content=response_content) |
| 61 | + |
| 62 | +class MockSubAgentLlm(BaseLlm): |
| 63 | + """A mock sub-agent LLM that provides a canned response.""" |
| 64 | + |
| 65 | + async def generate_content_async(self, llm_request, stream: bool = False): |
| 66 | + # Extract the function call from the request |
| 67 | + function_call = None |
| 68 | + for content in llm_request.contents: |
| 69 | + if content.role == "function": |
| 70 | + function_call = content |
| 71 | + |
| 72 | + if function_call: |
| 73 | + response_text = f"已收到来自 {function_call.parts[0].function_response.name} 的请求" |
| 74 | + else: |
| 75 | + response_text = "已收到请求" |
| 76 | + |
| 77 | + |
| 78 | + response_content = genai_types.Content( |
| 79 | + role="model", |
| 80 | + parts=[genai_types.Part.from_text(text=response_text)], |
| 81 | + ) |
| 82 | + |
| 83 | + yield LlmResponse(content=response_content) |
0 commit comments