forked from davehague/mcp-talk
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path05_http_client_demo.py
More file actions
223 lines (187 loc) Β· 8.33 KB
/
05_http_client_demo.py
File metadata and controls
223 lines (187 loc) Β· 8.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
# HTTP MCP Client Demo with LLM Integration
# This demonstrates connecting to an HTTP MCP server and letting an LLM use its tools
import asyncio
import json
import aiohttp
from openai import OpenAI
async def main():
"""
Demo showing how to connect to an HTTP MCP server via Python
and interact with its tools and resources with LLM integration.
"""
server_url = "http://localhost:8000/mcp"
print("π Connecting to HTTP MCP Server...")
print(f"π Server URL: {server_url}")
print("=" * 50)
async with aiohttp.ClientSession() as session:
# 1. Initialize MCP connection
print("π€ Initializing MCP connection...")
init_request = {
"jsonrpc": "2.0",
"id": 1,
"method": "initialize",
"params": {
"protocolVersion": "2024-11-05",
"capabilities": {"roots": {"listChanged": True}},
"clientInfo": {"name": "HTTP Demo Client", "version": "1.0.0"},
},
}
async with session.post(server_url, json=init_request) as response:
if response.status != 200:
print(f"β Failed to connect: HTTP {response.status}")
return
result = await response.json()
print(f"β
Connected! Server: {result['result']['serverInfo']['name']}")
print()
# 2. List available tools
print("π§ Discovering available tools...")
tools_request = {
"jsonrpc": "2.0",
"id": 2,
"method": "tools/list",
"params": {},
}
async with session.post(server_url, json=tools_request) as response:
tools_result = await response.json()
tools = tools_result["result"]["tools"]
print(f"π Available tools ({len(tools)}):")
for tool in tools:
print(f" β’ {tool['name']}: {tool['description']}")
print()
# 3. List available resources
print("π Discovering available resources...")
resources_request = {
"jsonrpc": "2.0",
"id": 3,
"method": "resources/list",
"params": {},
}
try:
async with session.post(server_url, json=resources_request) as response:
resources_result = await response.json()
if "result" in resources_result:
resources = resources_result["result"]["resources"]
print(f"π Available resources ({len(resources)}):")
for resource in resources:
print(f" β’ {resource['uri']}: {resource['name']}")
else:
print("π No resources available")
except Exception as e:
print(f"π No resources available: {e}")
print()
# 4. Demo: Let LLM use HTTP MCP tools
if tools:
print("π€ Letting LLM use HTTP MCP tools...")
# Convert MCP tools to OpenAI tool format
openai_tools = []
for tool in tools:
openai_tools.append(
{
"type": "function",
"function": {
"name": tool["name"],
"description": tool["description"],
"parameters": tool.get(
"inputSchema", {"type": "object", "properties": {}}
),
},
}
)
# Ask LLM to use the HTTP MCP server tools
client = OpenAI() # Make sure OPENAI_API_KEY is set
try:
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": "Get the current server time and start monitoring with a 3 second interval. Use session ID 'llm-demo-session'.",
}
],
tools=openai_tools,
tool_choice="auto",
)
# Execute any tool calls the LLM wants to make
message = response.choices[0].message
if message.tool_calls:
print(f"π§ LLM wants to use {len(message.tool_calls)} tool(s):")
for i, tool_call in enumerate(message.tool_calls):
print(f" β’ {tool_call.function.name}")
# Execute the tool call via HTTP MCP
arguments = json.loads(tool_call.function.arguments)
mcp_request = {
"jsonrpc": "2.0",
"id": 10 + i, # Unique ID for each tool call
"method": "tools/call",
"params": {
"name": tool_call.function.name,
"arguments": arguments,
},
}
async with session.post(
server_url, json=mcp_request
) as response:
result = await response.json()
if "result" in result:
content = result["result"]["content"][0]["text"]
print(f" β
Result: {content}")
else:
error_msg = result.get("error", {}).get(
"message", "Unknown error"
)
print(f" β Error: {error_msg}")
else:
print(f"π¬ LLM response: {message.content}")
except Exception as e:
print(f"β LLM integration failed: {e}")
print(" (Make sure OPENAI_API_KEY is set)")
# Fallback to direct HTTP MCP usage
print("\\nπ Falling back to direct HTTP MCP usage...")
# Test get_server_time tool directly
direct_request = {
"jsonrpc": "2.0",
"id": 20,
"method": "tools/call",
"params": {"name": "get_server_time", "arguments": {}},
}
async with session.post(server_url, json=direct_request) as response:
result = await response.json()
if "result" in result:
content = result["result"]["content"][0]["text"]
print(f" β° Server time: {content}")
print()
# 5. Demo: Read a resource via HTTP
if "resources" in locals() and resources:
print("π Reading a resource via HTTP...")
# Read the first available resource
resource_uri = resources[0]["uri"]
read_request = {
"jsonrpc": "2.0",
"id": 30,
"method": "resources/read",
"params": {"uri": resource_uri},
}
async with session.post(server_url, json=read_request) as response:
result = await response.json()
if "result" in result:
content = result["result"]["contents"][0]["text"]
print(f" π Resource content: {content[:200]}...")
else:
error_msg = result.get("error", {}).get("message", "Unknown error")
print(f" β Error reading resource: {error_msg}")
print()
print("β
HTTP MCP Demo completed!")
print()
print("π Key differences from stdio MCP:")
print(" β’ Uses HTTP POST requests instead of process communication")
print(" β’ Server runs independently (can be remote)")
print(" β’ Multiple clients can connect simultaneously")
print(" β’ Better for distributed systems and web deployments")
if __name__ == "__main__":
print("π HTTP MCP Client Demo")
print("=" * 40)
print("This demo connects to a running HTTP MCP server")
print("Make sure your server is running:")
print(" uvicorn 04_https_streamable:app --reload")
print()
asyncio.run(main())