-
Notifications
You must be signed in to change notification settings - Fork 183
Expand file tree
/
Copy pathlangchainv1_quickstart.py
More file actions
122 lines (94 loc) · 3.25 KB
/
langchainv1_quickstart.py
File metadata and controls
122 lines (94 loc) · 3.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
"""LangChain v1 style weather agent example.
https://docs.langchain.com/oss/python/langchain-quickstart
This example mirrors the pattern from the LangChain v1 Quickstart docs,
adapted to this repo's multiple-provider model configuration.
"""
from __future__ import annotations
import os
from dataclasses import dataclass
import azure.identity
from dotenv import load_dotenv
from langchain.agents import create_agent
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.runtime import get_runtime
from pydantic import BaseModel
from rich import print
load_dotenv(override=True)
API_HOST = os.getenv("API_HOST", "azure")
if API_HOST == "azure":
token_provider = azure.identity.get_bearer_token_provider(
azure.identity.DefaultAzureCredential(),
"https://cognitiveservices.azure.com/.default",
)
model = ChatOpenAI(
model=os.environ.get("AZURE_OPENAI_CHAT_DEPLOYMENT"),
base_url=os.environ["AZURE_OPENAI_ENDPOINT"] + "/openai/v1",
api_key=token_provider,
use_responses_api=True,
)
elif API_HOST == "ollama":
model = ChatOpenAI(
model=os.environ.get("OLLAMA_MODEL", "gemma4:e4b"),
base_url=os.environ.get("OLLAMA_ENDPOINT", "http://localhost:11434/v1"),
api_key="none",
use_responses_api=True,
)
else:
model = ChatOpenAI(
model=os.getenv("OPENAI_MODEL", "gpt-4o-mini"),
use_responses_api=True,
)
system_prompt = """You are an expert weather forecaster, who speaks in puns.
You have access to two tools:
- get_weather_for_location: use this to get the weather for a specific location
- get_user_location: use this to get the user's location
If a user asks you for the weather, make sure you know the location.
If you can tell from the question that they mean whereever they are,
use the get_user_location tool to find their location."""
# Mock user locations keyed by user id (string)
USER_LOCATION = {
"1": "Florida",
"2": "SF",
}
@dataclass
class UserContext:
user_id: str
@tool
def get_weather(city: str) -> str:
"""Get weather for a given city."""
return f"It's always sunny in {city}!"
@tool
def get_user_info(config: RunnableConfig) -> str:
"""Retrieve user information based on user ID."""
runtime = get_runtime(UserContext)
user_id = runtime.context.user_id
return USER_LOCATION[user_id]
class WeatherResponse(BaseModel):
conditions: str
punny_response: str
checkpointer = InMemorySaver()
agent = create_agent(
model=model,
system_prompt=system_prompt,
tools=[get_user_info, get_weather],
response_format=WeatherResponse,
checkpointer=checkpointer,
)
def main():
config = {"configurable": {"thread_id": "1"}}
context = UserContext(user_id="1")
r1 = agent.invoke(
{"messages": [{"role": "user", "content": "what is the weather outside?"}]}, config=config, context=context
)
print(r1.get("structured_response"))
r2 = agent.invoke(
{"messages": [{"role": "user", "content": "Thanks"}]},
config=config,
context=context,
)
print(r2.get("structured_response"))
if __name__ == "__main__":
main()