Skip to content

Commit be8e7fb

Browse files
committed
style: apply black and isort formatting, remove unused imports
Signed-off-by: Mihai Criveti <crivetimihai@gmail.com>
1 parent ebec343 commit be8e7fb

File tree

2 files changed

+57
-105
lines changed

2 files changed

+57
-105
lines changed

agent_runtimes/langchain_agent/agent_langchain.py

Lines changed: 20 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -75,11 +75,8 @@ def create_llm(config: AgentConfig) -> BaseChatModel:
7575
}
7676

7777
if provider not in providers:
78-
raise ValueError(
79-
f"Unsupported LLM provider: {provider}. "
80-
f"Supported providers: {', '.join(providers.keys())}"
81-
)
82-
78+
raise ValueError(f"Unsupported LLM provider: {provider}. " f"Supported providers: {', '.join(providers.keys())}")
79+
8380
return providers[provider](config, common_args)
8481

8582

@@ -89,11 +86,7 @@ def _create_openai_llm(config: AgentConfig, common_args: Dict[str, Any]) -> Base
8986
if not config.openai_api_key:
9087
raise ValueError("OPENAI_API_KEY is required for OpenAI provider")
9188

92-
openai_args = {
93-
"model": config.default_model,
94-
"api_key": config.openai_api_key,
95-
**common_args
96-
}
89+
openai_args = {"model": config.default_model, "api_key": config.openai_api_key, **common_args}
9790

9891
if config.openai_base_url:
9992
openai_args["base_url"] = config.openai_base_url
@@ -106,45 +99,26 @@ def _create_openai_llm(config: AgentConfig, common_args: Dict[str, Any]) -> Base
10699
def _create_azure_llm(config: AgentConfig, common_args: Dict[str, Any]) -> BaseChatModel:
107100
"""Create Azure OpenAI LLM instance."""
108101

109-
required_fields = [
110-
config.azure_openai_api_key,
111-
config.azure_openai_endpoint,
112-
config.azure_deployment_name
113-
]
102+
required_fields = [config.azure_openai_api_key, config.azure_openai_endpoint, config.azure_deployment_name]
114103

115104
if not all(required_fields):
116-
raise ValueError(
117-
"Azure OpenAI requires AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, and AZURE_DEPLOYMENT_NAME"
118-
)
119-
105+
raise ValueError("Azure OpenAI requires AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, and AZURE_DEPLOYMENT_NAME")
106+
120107
return AzureChatOpenAI(
121-
api_key=config.azure_openai_api_key,
122-
azure_endpoint=config.azure_openai_endpoint,
123-
api_version=config.azure_openai_api_version,
124-
azure_deployment=config.azure_deployment_name,
125-
**common_args
108+
api_key=config.azure_openai_api_key, azure_endpoint=config.azure_openai_endpoint, api_version=config.azure_openai_api_version, azure_deployment=config.azure_deployment_name, **common_args
126109
)
127110

128111

129112
def _create_bedrock_llm(config: AgentConfig, common_args: Dict[str, Any]) -> BaseChatModel:
130113
"""Create AWS Bedrock LLM instance."""
131114

132115
if BedrockChat is None:
133-
raise ImportError(
134-
"langchain-aws is required for Bedrock support. "
135-
"Install with: pip install langchain-aws"
136-
)
137-
138-
required_fields = [
139-
config.aws_access_key_id,
140-
config.aws_secret_access_key,
141-
config.bedrock_model_id
142-
]
116+
raise ImportError("langchain-aws is required for Bedrock support. " "Install with: pip install langchain-aws")
117+
118+
required_fields = [config.aws_access_key_id, config.aws_secret_access_key, config.bedrock_model_id]
143119

144120
if not all(required_fields):
145-
raise ValueError(
146-
"AWS Bedrock requires AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and BEDROCK_MODEL_ID"
147-
)
121+
raise ValueError("AWS Bedrock requires AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and BEDROCK_MODEL_ID")
148122

149123
return BedrockChat(
150124
model_id=config.bedrock_model_id,
@@ -157,37 +131,23 @@ def _create_bedrock_llm(config: AgentConfig, common_args: Dict[str, Any]) -> Bas
157131
def _create_ollama_llm(config: AgentConfig, common_args: Dict[str, Any]) -> BaseChatModel:
158132
"""Create OLLAMA LLM instance."""
159133
if ChatOllama is None:
160-
raise ImportError(
161-
"langchain-community is required for OLLAMA support. "
162-
"Install with: pip install langchain-community"
163-
)
164-
134+
raise ImportError("langchain-community is required for OLLAMA support. " "Install with: pip install langchain-community")
135+
165136
if not config.ollama_model:
166137
raise ValueError("OLLAMA_MODEL is required for OLLAMA provider")
167-
168-
return ChatOllama(
169-
model=config.ollama_model,
170-
base_url=config.ollama_base_url,
171-
**common_args
172-
)
138+
139+
return ChatOllama(model=config.ollama_model, base_url=config.ollama_base_url, **common_args)
173140

174141

175142
def _create_anthropic_llm(config: AgentConfig, common_args: Dict[str, Any]) -> BaseChatModel:
176143
"""Create Anthropic LLM instance."""
177144
if ChatAnthropic is None:
178-
raise ImportError(
179-
"langchain-anthropic is required for Anthropic support. "
180-
"Install with: pip install langchain-anthropic"
181-
)
182-
145+
raise ImportError("langchain-anthropic is required for Anthropic support. " "Install with: pip install langchain-anthropic")
146+
183147
if not config.anthropic_api_key:
184148
raise ValueError("ANTHROPIC_API_KEY is required for Anthropic provider")
185149

186-
return ChatAnthropic(
187-
model=config.default_model,
188-
api_key=config.anthropic_api_key,
189-
**common_args
190-
)
150+
return ChatAnthropic(model=config.default_model, api_key=config.anthropic_api_key, **common_args)
191151

192152

193153
class MCPTool(BaseTool):
@@ -371,12 +331,7 @@ def is_initialized(self) -> bool:
371331
async def check_readiness(self) -> bool:
372332
"""Check if agent is ready to handle requests"""
373333
try:
374-
return (
375-
self._initialized
376-
and self.agent_executor is not None
377-
and len(self.tools) >= 0 # Allow 0 tools for testing
378-
and await self.test_gateway_connection()
379-
)
334+
return self._initialized and self.agent_executor is not None and len(self.tools) >= 0 and await self.test_gateway_connection() # Allow 0 tools for testing
380335
except Exception:
381336
return False
382337

@@ -428,9 +383,7 @@ async def run_async(
428383
chat_history.append(SystemMessage(content=msg["content"]))
429384

430385
# Run the agent
431-
result = await self.agent_executor.ainvoke(
432-
{"input": input_text, "chat_history": chat_history, "tool_names": [tool.name for tool in self.tools]}
433-
)
386+
result = await self.agent_executor.ainvoke({"input": input_text, "chat_history": chat_history, "tool_names": [tool.name for tool in self.tools]})
434387

435388
return result["output"]
436389

0 commit comments

Comments
 (0)