From cfb17f26bf8deb60dbac10fd32b7f87fad5a71a6 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 13:38:41 -0800 Subject: [PATCH 1/6] add langgraph multi agent sample --- .../langgraph-multi-agents/Dockerfile | 16 ++ .../langgraph-multi-agents/agent.yaml | 25 +++ .../langgraph-multi-agents/main.py | 157 ++++++++++++++++++ .../langgraph-multi-agents/requirements.txt | 8 + 4 files changed, 206 insertions(+) create mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/Dockerfile create mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml create mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/main.py create mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/requirements.txt diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/Dockerfile b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/Dockerfile new file mode 100644 index 000000000..33027fa60 --- /dev/null +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.11-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml new file mode 100644 index 000000000..8b93f3c8c --- /dev/null +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml @@ -0,0 +1,25 @@ +name: LangGraphMultiAgent +description: This LangGraph agent can perform arithmetic calculations such as addition, subtraction, multiplication, and division. +metadata: + example: + - role: user + content: |- + What's the latest news in AI? And return the word count of your answer. + tags: + - example + - learning + authors: + - migu +template: + name: LangGraphMultiAgentLG + kind: hosted + environment_variables: + - name: AZURE_OPENAI_ENDPOINT + value: ${AZURE_OPENAI_ENDPOINT} + - name: OPENAI_API_VERSION + value: 2025-03-01-preview + - name: APPLICATIONINSIGHTS_CONNECTION_STRING + value: ${APPLICATIONINSIGHTS_CONNECTION_STRING} +resources: + - kind: model + id: gpt-4o-mini diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/main.py b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/main.py new file mode 100644 index 000000000..996166c7a --- /dev/null +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/main.py @@ -0,0 +1,157 @@ +import os +import logging + +from dotenv import load_dotenv +from langchain.chat_models import init_chat_model +from langchain_core.tools import tool +from langgraph.graph import ( + END, + START, + MessagesState, + StateGraph, +) +from typing_extensions import Literal +from azure.identity import DefaultAzureCredential, get_bearer_token_provider + +from azure.ai.agentserver.langgraph import from_langgraph +from azure.monitor.opentelemetry import configure_azure_monitor +from langchain.agents import create_agent + + + +from typing import Literal + +from langchain_core.messages import BaseMessage, HumanMessage +from langgraph.graph import MessagesState, END +from langgraph.types import Command + + +logger = logging.getLogger(__name__) + +load_dotenv() + +if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): + configure_azure_monitor(enable_live_metrics=True, logger_name="__main__") + +deployment_name = os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o-mini") + +try: + credential = DefaultAzureCredential() + token_provider = get_bearer_token_provider( + credential, "https://cognitiveservices.azure.com/.default" + ) + llm = init_chat_model( + f"azure_openai:{deployment_name}", + azure_ad_token_provider=token_provider, + ) +except Exception: + logger.exception("Calculator Agent failed to start") + raise + + +# Define tools +def make_system_prompt(suffix: str) -> str: + return ( + "You are a helpful AI assistant, collaborating with other assistants." + " Use the provided tools to progress towards answering the question." + " If you are unable to fully answer, that's OK, another assistant with different tools " + " will help where you left off. Execute what you can to make progress." + " If you or any of the other assistants have the final answer or deliverable," + " prefix your response with FINAL ANSWER so the team knows to stop." + f"\n{suffix}" + ) + +def research_tool(query: str) -> str: + """A mock research tool that simulates looking up information.""" + # In a real implementation, this would call an external API or database. + return f"Research results for '{query}'" + + +def word_count_tool(text: str) -> int: + """A tool that counts the number of words in a given text.""" + return len(text.split()) + + +def get_next_node(last_message: BaseMessage, goto: str): + if "FINAL ANSWER" in last_message.content: + # Any agent decided the work is done + return END + return goto + + +# Research agent and node +research_agent = create_agent( + llm, + tools=[research_tool], # TODO: use a tool + system_prompt=make_system_prompt( + "You can only do research. You are working with a word count agent colleague." + ), +) + + +def research_node( + state: MessagesState, +) -> Command[Literal["word_counter", END]]: + result = research_agent.invoke(state) + goto = get_next_node(result["messages"][-1], "word_counter") + # wrap in a human message, as not all providers allow + # AI message at the last position of the input messages list + result["messages"][-1] = HumanMessage( + content=result["messages"][-1].content, name="researcher" + ) + return Command( + update={ + # share internal message history of research agent with other agents + "messages": result["messages"], + }, + goto=goto, + ) + +# word count agent and node +word_count_agent = create_agent( + llm, + [word_count_tool], # TODO: use a tool + system_prompt=make_system_prompt( + "You can only count words in a string. You are working with a researcher colleague." + ), +) + + +def word_count_node(state: MessagesState) -> Command[Literal["researcher", END]]: + result = word_count_agent.invoke(state) + goto = get_next_node(result["messages"][-1], "researcher") + # wrap in a human message, as not all providers allow + # AI message at the last position of the input messages list + result["messages"][-1] = HumanMessage( + content=result["messages"][-1].content, name="word_counter" + ) + return Command( + update={ + # share internal message history of chart agent with other agents + "messages": result["messages"], + }, + goto=goto, + ) + + +# Build workflow +def build_agent() -> "StateGraph": + workflow = StateGraph(MessagesState) + workflow.add_node("researcher", research_node) + workflow.add_node("word_counter", word_count_node) + + workflow.add_edge(START, "researcher") + + # Compile the agent + return workflow.compile() + + +# Build workflow and run agent +if __name__ == "__main__": + try: + agent = build_agent() + adapter = from_langgraph(agent) + adapter.run() + except Exception: + logger.exception("Multi-Agent encountered an error while running") + raise diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/requirements.txt b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/requirements.txt new file mode 100644 index 000000000..a761a1375 --- /dev/null +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/requirements.txt @@ -0,0 +1,8 @@ +langgraph==1.0.2 + +azure-ai-agentserver-langgraph==1.0.0b4 + +pytest==8.4.2 +azure-identity==1.25.0 +python-dotenv==1.1.1 +azure-monitor-opentelemetry==1.8.1 From 83e4f4c15177431456122d47a4910b37fe0d320d Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Thu, 13 Nov 2025 15:19:07 -0800 Subject: [PATCH 2/6] add langgraph multi agent with mcp sample --- .../Dockerfile | 16 ++ .../agent.yaml | 25 +++ .../langgraph-multi-agents-with-mcp/main.py | 197 ++++++++++++++++++ .../requirements.txt | 12 ++ .../langgraph-multi-agents/agent.yaml | 4 +- 5 files changed, 251 insertions(+), 3 deletions(-) create mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/Dockerfile create mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml create mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py create mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/requirements.txt diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/Dockerfile b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/Dockerfile new file mode 100644 index 000000000..33027fa60 --- /dev/null +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.11-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml new file mode 100644 index 000000000..d5235c409 --- /dev/null +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml @@ -0,0 +1,25 @@ +name: LangGraphMultiAgent-MCP +description: This LangGraph agent can perform arithmetic calculations such as addition, subtraction, multiplication, and division. +metadata: + example: + - role: user + content: |- + What's the latest news in AI? And return the word count of your answer. + tags: + - example + - learning +template: + name: LangGraphMultiAgentMCP + kind: hosted + environment_variables: + - name: AZURE_OPENAI_ENDPOINT + value: ${AZURE_OPENAI_ENDPOINT} + - name: OPENAI_API_VERSION + value: 2025-03-01-preview + - name: APPLICATIONINSIGHTS_CONNECTION_STRING + value: ${APPLICATIONINSIGHTS_CONNECTION_STRING} + - name: AZURE_AI_PROJECT_TOOL_CONNECTION_ID + value: ${AZURE_AI_PROJECT_TOOL_CONNECTION_ID} +resources: + - kind: model + id: gpt-4o-mini diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py new file mode 100644 index 000000000..89fccf32d --- /dev/null +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py @@ -0,0 +1,197 @@ +import asyncio +import os +import logging + +from dotenv import load_dotenv +from langchain.chat_models import init_chat_model +from langchain_core.tools import tool +from langgraph.graph import ( + END, + START, + MessagesState, + StateGraph, +) +from typing_extensions import Literal +from azure.identity import DefaultAzureCredential, get_bearer_token_provider + +from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient +from azure.ai.agentserver.langgraph import ToolClient, from_langgraph + +from azure.monitor.opentelemetry import configure_azure_monitor +from langchain.agents import create_agent + + + +from typing import Literal + +from langchain_core.messages import BaseMessage, HumanMessage +from langgraph.graph import MessagesState, END +from langgraph.types import Command + + +logger = logging.getLogger(__name__) + +load_dotenv() + +if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): + configure_azure_monitor(enable_live_metrics=True, logger_name="__main__") + +deployment_name = os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o-mini") + +try: + credential = DefaultAzureCredential() + token_provider = get_bearer_token_provider( + credential, "https://cognitiveservices.azure.com/.default" + ) + llm = init_chat_model( + f"azure_openai:{deployment_name}", + azure_ad_token_provider=token_provider, + ) +except Exception: + logger.exception("Calculator Agent failed to start") + raise + + +async def get_tools_from_mcp(): + # Get configuration from environment + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") + tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") + + if not project_endpoint: + raise ValueError( + "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " + "Set it to your Azure AI project endpoint, e.g., " + "https://.services.ai.azure.com/api/projects/" + ) + + print(f"Using project endpoint: {project_endpoint}, tool connection ID: {tool_connection_id}") + # Create Azure credentials + credential = DefaultAzureCredential() + tool_definitions = [ + { + "type": "mcp", + "project_connection_id": tool_connection_id, + }, + ] + # Create the AzureAIToolClient + # This client supports both MCP tools and Azure AI Tools API + tool_client = AzureAIToolClient( + endpoint=project_endpoint, + credential=credential, + tools=tool_definitions + ) + + # Create the ToolClient + client = ToolClient(tool_client) + + # List all available tools and convert to LangChain format + print("Fetching tools from Azure AI Tool Client...") + tools = await client.list_tools() + print(f"Found {len(tools)} tools:") + for tool in tools: + print(f" - {tool.name}: {tool.description}") + return tools + + +# Define tools +def make_system_prompt(suffix: str) -> str: + return ( + "You are a helpful AI assistant, collaborating with other assistants." + " Use the provided tools to progress towards answering the question." + " If you are unable to fully answer, that's OK, another assistant with different tools " + " will help where you left off. Execute what you can to make progress." + " If you or any of the other assistants have the final answer or deliverable," + " prefix your response with FINAL ANSWER so the team knows to stop." + f"\n{suffix}" + ) + + +def word_count_tool(text: str) -> int: + """A tool that counts the number of words in a given text.""" + return len(text.split()) + + +def get_next_node(last_message: BaseMessage, goto: str): + if "FINAL ANSWER" in last_message.content: + # Any agent decided the work is done + return END + return goto + + +tools = asyncio.run(get_tools_from_mcp()) +# Research agent and node +research_agent = create_agent( + llm, + tools=tools, # TODO: use a tool + system_prompt=make_system_prompt( + "You can only do research. You are working with a word count agent colleague." + ), +) + + +def research_node( + state: MessagesState, +) -> Command[Literal["word_counter", END]]: + result = research_agent.invoke(state) + goto = get_next_node(result["messages"][-1], "word_counter") + # wrap in a human message, as not all providers allow + # AI message at the last position of the input messages list + result["messages"][-1] = HumanMessage( + content=result["messages"][-1].content, name="researcher" + ) + return Command( + update={ + # share internal message history of research agent with other agents + "messages": result["messages"], + }, + goto=goto, + ) + +# word count agent and node +word_count_agent = create_agent( + llm, + [word_count_tool], # TODO: use a tool + system_prompt=make_system_prompt( + "You can only count words in a string. You are working with a researcher colleague." + ), +) + + +def word_count_node(state: MessagesState) -> Command[Literal["researcher", END]]: + result = word_count_agent.invoke(state) + goto = get_next_node(result["messages"][-1], "researcher") + # wrap in a human message, as not all providers allow + # AI message at the last position of the input messages list + result["messages"][-1] = HumanMessage( + content=result["messages"][-1].content, name="word_counter" + ) + return Command( + update={ + # share internal message history of chart agent with other agents + "messages": result["messages"], + }, + goto=goto, + ) + + +# Build workflow +def build_agent() -> "StateGraph": + workflow = StateGraph(MessagesState) + workflow.add_node("researcher", research_node) + workflow.add_node("word_counter", word_count_node) + + workflow.add_edge(START, "researcher") + + # Compile the agent + return workflow.compile() + + +# Build workflow and run agent +if __name__ == "__main__": + try: + agent = build_agent() + adapter = from_langgraph(agent) + adapter.run() + except Exception: + logger.exception("Multi-Agent encountered an error while running") + raise diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/requirements.txt b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/requirements.txt new file mode 100644 index 000000000..b5d6e4247 --- /dev/null +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/requirements.txt @@ -0,0 +1,12 @@ +langgraph==1.0.2 + +azure-ai-agentserver-langgraph==1.0.0b4 + +pytest==8.4.2 +azure-identity==1.25.0 +python-dotenv==1.1.1 +azure-monitor-opentelemetry==1.8.1 + +# MCP client for fetching tools from MCP servers +mcp>=1.0.0 +httpx>=0.27.0 diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml index 8b93f3c8c..eeb3dd1d5 100644 --- a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml @@ -8,10 +8,8 @@ metadata: tags: - example - learning - authors: - - migu template: - name: LangGraphMultiAgentLG + name: LangGraphMultiAgent kind: hosted environment_variables: - name: AZURE_OPENAI_ENDPOINT From b8bf916bceae6983316e7e1e1f7153ee7835f6c7 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 14 Nov 2025 13:28:24 -0800 Subject: [PATCH 3/6] refined sample --- .../langgraph-multi-agents-with-mcp/main.py | 185 ++++++++++-------- 1 file changed, 98 insertions(+), 87 deletions(-) diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py index 89fccf32d..b66fd6934 100644 --- a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py @@ -1,6 +1,6 @@ -import asyncio import os import logging +import asyncio from dotenv import load_dotenv from langchain.chat_models import init_chat_model @@ -11,18 +11,16 @@ MessagesState, StateGraph, ) -from typing_extensions import Literal from azure.identity import DefaultAzureCredential, get_bearer_token_provider -from azure.ai.agentserver.core.client.tools.aio import AzureAIToolClient -from azure.ai.agentserver.langgraph import ToolClient, from_langgraph - +from azure.ai.agentserver.langgraph import from_langgraph from azure.monitor.opentelemetry import configure_azure_monitor from langchain.agents import create_agent +from langgraph.graph.state import CompiledStateGraph +from langchain_core.tools import StructuredTool - -from typing import Literal +from typing import Literal, List from langchain_core.messages import BaseMessage, HumanMessage from langgraph.graph import MessagesState, END @@ -52,47 +50,6 @@ raise -async def get_tools_from_mcp(): - # Get configuration from environment - project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") - tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") - - if not project_endpoint: - raise ValueError( - "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " - "Set it to your Azure AI project endpoint, e.g., " - "https://.services.ai.azure.com/api/projects/" - ) - - print(f"Using project endpoint: {project_endpoint}, tool connection ID: {tool_connection_id}") - # Create Azure credentials - credential = DefaultAzureCredential() - tool_definitions = [ - { - "type": "mcp", - "project_connection_id": tool_connection_id, - }, - ] - # Create the AzureAIToolClient - # This client supports both MCP tools and Azure AI Tools API - tool_client = AzureAIToolClient( - endpoint=project_endpoint, - credential=credential, - tools=tool_definitions - ) - - # Create the ToolClient - client = ToolClient(tool_client) - - # List all available tools and convert to LangChain format - print("Fetching tools from Azure AI Tool Client...") - tools = await client.list_tools() - print(f"Found {len(tools)} tools:") - for tool in tools: - print(f" - {tool.name}: {tool.description}") - return tools - - # Define tools def make_system_prompt(suffix: str) -> str: return ( @@ -117,40 +74,10 @@ def get_next_node(last_message: BaseMessage, goto: str): return END return goto - -tools = asyncio.run(get_tools_from_mcp()) -# Research agent and node -research_agent = create_agent( - llm, - tools=tools, # TODO: use a tool - system_prompt=make_system_prompt( - "You can only do research. You are working with a word count agent colleague." - ), -) - - -def research_node( - state: MessagesState, -) -> Command[Literal["word_counter", END]]: - result = research_agent.invoke(state) - goto = get_next_node(result["messages"][-1], "word_counter") - # wrap in a human message, as not all providers allow - # AI message at the last position of the input messages list - result["messages"][-1] = HumanMessage( - content=result["messages"][-1].content, name="researcher" - ) - return Command( - update={ - # share internal message history of research agent with other agents - "messages": result["messages"], - }, - goto=goto, - ) - # word count agent and node word_count_agent = create_agent( llm, - [word_count_tool], # TODO: use a tool + [word_count_tool], system_prompt=make_system_prompt( "You can only count words in a string. You are working with a researcher colleague." ), @@ -173,9 +100,41 @@ def word_count_node(state: MessagesState) -> Command[Literal["researcher", END]] goto=goto, ) +def build_researcher_node(tools): + async def research_node( + state: MessagesState, + ) -> Command[Literal["word_counter", END]]: + # Research agent and node + research_agent = create_agent( + llm, + tools=tools, + system_prompt=make_system_prompt( + "You can only do research. You are working with a word count agent colleague." + ), + ) + + result = await research_agent.ainvoke(state) + goto = get_next_node(result["messages"][-1], "word_counter") + # wrap in a human message, as not all providers allow + # AI message at the last position of the input messages list + result["messages"][-1] = HumanMessage( + content=result["messages"][-1].content, name="researcher" + ) + return Command( + update={ + # share internal message history of research agent with other agents + "messages": result["messages"], + }, + goto=goto, + ) + + return research_node + # Build workflow -def build_agent() -> "StateGraph": +def build_agent(tools) -> "StateGraph": + research_node = build_researcher_node(tools) + workflow = StateGraph(MessagesState) workflow.add_node("researcher", research_node) workflow.add_node("word_counter", word_count_node) @@ -186,12 +145,64 @@ def build_agent() -> "StateGraph": return workflow.compile() +def create_graph_factory(): + """Create a factory function that builds a graph with ToolClient. + + This function returns a factory that takes a ToolClient and returns + a CompiledStateGraph. The graph is created at runtime for every request, + allowing it to access the latest tool configuration dynamically. + """ + + async def graph_factory(tools: List[StructuredTool]) -> CompiledStateGraph: + print("\nCreating LangGraph agent with tools from factory...") + agent = build_agent(tools) + print("Agent created successfully!") + return agent + + return graph_factory + + +async def quickstart(): + """Build and return a LangGraphAdapter using a graph factory function.""" + + # Get configuration from environment + project_endpoint = os.getenv("AZURE_AI_PROJECT_ENDPOINT") + + if not project_endpoint: + raise ValueError( + "AZURE_AI_PROJECT_ENDPOINT environment variable is required. " + "Set it to your Azure AI project endpoint, e.g., " + "https://.services.ai.azure.com/api/projects/" + ) + + # Create Azure credentials + credential = DefaultAzureCredential() + + # Create a factory function that will build the graph at runtime + # The factory will receive a ToolClient when the agent first runs + graph_factory = create_graph_factory() + + # tools defined in the project + tool_connection_id = os.getenv("AZURE_AI_PROJECT_TOOL_CONNECTION_ID") + tools = [{"type": "mcp", "project_connection_id": tool_connection_id}] + + adapter = from_langgraph(graph_factory, credentials=credential, tools=tools) + + print("Adapter created! Graph will be built on every request.") + return adapter + + +async def main(): # pragma: no cover - sample entrypoint + """Main function to run the agent.""" + adapter = await quickstart() + + if adapter: + print("\nStarting agent server...") + print("The graph factory will be called for every request that arrives.") + await adapter.run_async() + + + # Build workflow and run agent if __name__ == "__main__": - try: - agent = build_agent() - adapter = from_langgraph(agent) - adapter.run() - except Exception: - logger.exception("Multi-Agent encountered an error while running") - raise + asyncio.run(main()) From 25f8f996f48a61518f7862c3040d8774b254bb80 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 14 Nov 2025 13:32:17 -0800 Subject: [PATCH 4/6] refined import --- .../agent.yaml | 2 +- .../langgraph-multi-agents-with-mcp/main.py | 23 ++++++++----------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml index d5235c409..06d7baace 100644 --- a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml @@ -4,7 +4,7 @@ metadata: example: - role: user content: |- - What's the latest news in AI? And return the word count of your answer. + Give me a short answer for how to create a resource group using azure cli? And return the word count of your answer. tags: - example - learning diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py index b66fd6934..892493b0f 100644 --- a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/main.py @@ -3,28 +3,25 @@ import asyncio from dotenv import load_dotenv +from typing import Literal, List + +from langchain.agents import create_agent from langchain.chat_models import init_chat_model -from langchain_core.tools import tool +from langchain_core.messages import BaseMessage, HumanMessage +from langchain_core.tools import StructuredTool from langgraph.graph import ( END, START, MessagesState, StateGraph, ) -from azure.identity import DefaultAzureCredential, get_bearer_token_provider - -from azure.ai.agentserver.langgraph import from_langgraph -from azure.monitor.opentelemetry import configure_azure_monitor -from langchain.agents import create_agent from langgraph.graph.state import CompiledStateGraph -from langchain_core.tools import StructuredTool - +from langgraph.types import Command -from typing import Literal, List +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.monitor.opentelemetry import configure_azure_monitor -from langchain_core.messages import BaseMessage, HumanMessage -from langgraph.graph import MessagesState, END -from langgraph.types import Command +from azure.ai.agentserver.langgraph import from_langgraph logger = logging.getLogger(__name__) @@ -112,7 +109,7 @@ async def research_node( "You can only do research. You are working with a word count agent colleague." ), ) - + result = await research_agent.ainvoke(state) goto = get_next_node(result["messages"][-1], "word_counter") # wrap in a human message, as not all providers allow From 06b00a3171672bc75128d739d465cbedee9a20ec Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 14 Nov 2025 13:59:32 -0800 Subject: [PATCH 5/6] remove unused sample --- .../langgraph-multi-agents/Dockerfile | 16 -- .../langgraph-multi-agents/agent.yaml | 23 --- .../langgraph-multi-agents/main.py | 157 ------------------ .../langgraph-multi-agents/requirements.txt | 8 - 4 files changed, 204 deletions(-) delete mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/Dockerfile delete mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml delete mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/main.py delete mode 100644 samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/requirements.txt diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/Dockerfile b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/Dockerfile deleted file mode 100644 index 33027fa60..000000000 --- a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.11-slim - -WORKDIR /app - -COPY . user_agent/ -WORKDIR /app/user_agent - -RUN if [ -f requirements.txt ]; then \ - pip install -r requirements.txt; \ - else \ - echo "No requirements.txt found"; \ - fi - -EXPOSE 8088 - -CMD ["python", "main.py"] diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml deleted file mode 100644 index eeb3dd1d5..000000000 --- a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/agent.yaml +++ /dev/null @@ -1,23 +0,0 @@ -name: LangGraphMultiAgent -description: This LangGraph agent can perform arithmetic calculations such as addition, subtraction, multiplication, and division. -metadata: - example: - - role: user - content: |- - What's the latest news in AI? And return the word count of your answer. - tags: - - example - - learning -template: - name: LangGraphMultiAgent - kind: hosted - environment_variables: - - name: AZURE_OPENAI_ENDPOINT - value: ${AZURE_OPENAI_ENDPOINT} - - name: OPENAI_API_VERSION - value: 2025-03-01-preview - - name: APPLICATIONINSIGHTS_CONNECTION_STRING - value: ${APPLICATIONINSIGHTS_CONNECTION_STRING} -resources: - - kind: model - id: gpt-4o-mini diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/main.py b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/main.py deleted file mode 100644 index 996166c7a..000000000 --- a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/main.py +++ /dev/null @@ -1,157 +0,0 @@ -import os -import logging - -from dotenv import load_dotenv -from langchain.chat_models import init_chat_model -from langchain_core.tools import tool -from langgraph.graph import ( - END, - START, - MessagesState, - StateGraph, -) -from typing_extensions import Literal -from azure.identity import DefaultAzureCredential, get_bearer_token_provider - -from azure.ai.agentserver.langgraph import from_langgraph -from azure.monitor.opentelemetry import configure_azure_monitor -from langchain.agents import create_agent - - - -from typing import Literal - -from langchain_core.messages import BaseMessage, HumanMessage -from langgraph.graph import MessagesState, END -from langgraph.types import Command - - -logger = logging.getLogger(__name__) - -load_dotenv() - -if os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING"): - configure_azure_monitor(enable_live_metrics=True, logger_name="__main__") - -deployment_name = os.getenv("AZURE_AI_MODEL_DEPLOYMENT_NAME", "gpt-4o-mini") - -try: - credential = DefaultAzureCredential() - token_provider = get_bearer_token_provider( - credential, "https://cognitiveservices.azure.com/.default" - ) - llm = init_chat_model( - f"azure_openai:{deployment_name}", - azure_ad_token_provider=token_provider, - ) -except Exception: - logger.exception("Calculator Agent failed to start") - raise - - -# Define tools -def make_system_prompt(suffix: str) -> str: - return ( - "You are a helpful AI assistant, collaborating with other assistants." - " Use the provided tools to progress towards answering the question." - " If you are unable to fully answer, that's OK, another assistant with different tools " - " will help where you left off. Execute what you can to make progress." - " If you or any of the other assistants have the final answer or deliverable," - " prefix your response with FINAL ANSWER so the team knows to stop." - f"\n{suffix}" - ) - -def research_tool(query: str) -> str: - """A mock research tool that simulates looking up information.""" - # In a real implementation, this would call an external API or database. - return f"Research results for '{query}'" - - -def word_count_tool(text: str) -> int: - """A tool that counts the number of words in a given text.""" - return len(text.split()) - - -def get_next_node(last_message: BaseMessage, goto: str): - if "FINAL ANSWER" in last_message.content: - # Any agent decided the work is done - return END - return goto - - -# Research agent and node -research_agent = create_agent( - llm, - tools=[research_tool], # TODO: use a tool - system_prompt=make_system_prompt( - "You can only do research. You are working with a word count agent colleague." - ), -) - - -def research_node( - state: MessagesState, -) -> Command[Literal["word_counter", END]]: - result = research_agent.invoke(state) - goto = get_next_node(result["messages"][-1], "word_counter") - # wrap in a human message, as not all providers allow - # AI message at the last position of the input messages list - result["messages"][-1] = HumanMessage( - content=result["messages"][-1].content, name="researcher" - ) - return Command( - update={ - # share internal message history of research agent with other agents - "messages": result["messages"], - }, - goto=goto, - ) - -# word count agent and node -word_count_agent = create_agent( - llm, - [word_count_tool], # TODO: use a tool - system_prompt=make_system_prompt( - "You can only count words in a string. You are working with a researcher colleague." - ), -) - - -def word_count_node(state: MessagesState) -> Command[Literal["researcher", END]]: - result = word_count_agent.invoke(state) - goto = get_next_node(result["messages"][-1], "researcher") - # wrap in a human message, as not all providers allow - # AI message at the last position of the input messages list - result["messages"][-1] = HumanMessage( - content=result["messages"][-1].content, name="word_counter" - ) - return Command( - update={ - # share internal message history of chart agent with other agents - "messages": result["messages"], - }, - goto=goto, - ) - - -# Build workflow -def build_agent() -> "StateGraph": - workflow = StateGraph(MessagesState) - workflow.add_node("researcher", research_node) - workflow.add_node("word_counter", word_count_node) - - workflow.add_edge(START, "researcher") - - # Compile the agent - return workflow.compile() - - -# Build workflow and run agent -if __name__ == "__main__": - try: - agent = build_agent() - adapter = from_langgraph(agent) - adapter.run() - except Exception: - logger.exception("Multi-Agent encountered an error while running") - raise diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/requirements.txt b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/requirements.txt deleted file mode 100644 index a761a1375..000000000 --- a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -langgraph==1.0.2 - -azure-ai-agentserver-langgraph==1.0.0b4 - -pytest==8.4.2 -azure-identity==1.25.0 -python-dotenv==1.1.1 -azure-monitor-opentelemetry==1.8.1 From f52a4ddbc91c00c1df5949e701d9a09d61550fa8 Mon Sep 17 00:00:00 2001 From: Lu Sun Date: Fri, 14 Nov 2025 15:58:18 -0800 Subject: [PATCH 6/6] fix agent yaml --- .../hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml index 06d7baace..cabc11bff 100644 --- a/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml +++ b/samples/microsoft/python/getting-started-agents/hosted-agents/langgraph-multi-agents-with-mcp/agent.yaml @@ -1,5 +1,5 @@ name: LangGraphMultiAgent-MCP -description: This LangGraph agent can perform arithmetic calculations such as addition, subtraction, multiplication, and division. +description: This LangGraph agents interacts with MCP to collection information and perform tasks. metadata: example: - role: user @@ -19,7 +19,7 @@ template: - name: APPLICATIONINSIGHTS_CONNECTION_STRING value: ${APPLICATIONINSIGHTS_CONNECTION_STRING} - name: AZURE_AI_PROJECT_TOOL_CONNECTION_ID - value: ${AZURE_AI_PROJECT_TOOL_CONNECTION_ID} + value: learnmcp resources: - kind: model id: gpt-4o-mini