-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm_code_generator.py
More file actions
307 lines (249 loc) · 10.7 KB
/
llm_code_generator.py
File metadata and controls
307 lines (249 loc) · 10.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
#!/usr/bin/env python3
"""
LLM Code Generator for API Research Study
Generates Python code using multiple LLM providers
"""
import asyncio
import aiohttp
import json
import os
import time
from typing import Dict, Optional, Tuple
import logging
logger = logging.getLogger(__name__)
class LLMCodeGenerator:
"""Generates API integration code using multiple LLM providers"""
def __init__(self):
self.session: Optional[aiohttp.ClientSession] = None
self.api_keys = self.load_api_keys()
def load_api_keys(self) -> Dict[str, str]:
"""Load LLM API keys from environment"""
keys = {}
# Load from .env file
try:
with open('.env', 'r') as f:
for line in f:
if '=' in line and not line.startswith('#'):
key, value = line.strip().split('=', 1)
if key in ['ANTHROPIC_API_KEY', 'OPENAI_API_KEY', 'GOOGLE_API_KEY']:
keys[key] = value
except Exception as e:
logger.error(f"❌ Failed to load API keys: {e}")
logger.info(f"🔑 Loaded {len(keys)} LLM API keys")
return keys
async def initialize(self):
"""Initialize HTTP session"""
self.session = aiohttp.ClientSession()
logger.info("✅ LLM code generator initialized")
async def generate_code(self, api_name: str, documentation: str, auth_type: str, env_key: str) -> Tuple[bool, str, int, str]:
"""
Generate Python code for API integration
Returns:
Tuple of (success, generated_code, generation_time_ms, model_used)
"""
# Try each LLM model
models = ['claude', 'gpt', 'gemini']
for model in models:
try:
success, code, time_ms = await self.generate_with_model(
model, api_name, documentation, auth_type, env_key
)
if success:
return True, code, time_ms, model
except Exception as e:
logger.warning(f"⚠️ {model} failed: {e}")
continue
return False, "", 0, "none"
async def generate_with_model(self, model: str, api_name: str, documentation: str, auth_type: str, env_key: str) -> Tuple[bool, str, int]:
"""Generate code using specific LLM model"""
start_time = time.time()
prompt = self.create_standardized_prompt(api_name, documentation, auth_type, env_key)
if model == 'claude':
success, code = await self.call_claude(prompt)
elif model == 'gpt':
success, code = await self.call_openai(prompt)
elif model == 'gemini':
success, code = await self.call_gemini(prompt)
else:
return False, "", 0
generation_time = int((time.time() - start_time) * 1000)
if success:
logger.info(f"✅ {model} generated {len(code)} characters for {api_name} ({generation_time}ms)")
else:
logger.warning(f"⚠️ {model} failed to generate code for {api_name}")
return success, code, generation_time
def create_standardized_prompt(self, api_name: str, documentation: str, auth_type: str, env_key: str) -> str:
"""Create standardized prompt for fair comparison across LLMs"""
auth_instructions = {
'query': f"Use the API key from environment variable {env_key} as a query parameter",
'header': f"Use the API key from environment variable {env_key} in the request headers",
'path': f"Use the API key from environment variable {env_key} in the URL path",
'bearer': f"Use the API key from environment variable {env_key} as a Bearer token in Authorization header",
'basic': f"Use the credentials from environment variables for HTTP Basic authentication"
}
prompt = f"""
You are tasked with creating Python code to integrate with the {api_name} API based on the following documentation.
DOCUMENTATION:
{documentation}
REQUIREMENTS:
1. Create a complete Python script that makes a simple API call to {api_name}
2. {auth_instructions.get(auth_type, 'Use appropriate authentication method')}
3. Include proper error handling and response parsing
4. Use the requests library for HTTP calls
5. Include necessary imports
6. Add comments explaining the code
7. Make the code production-ready with proper exception handling
8. Test with a simple, safe API endpoint (like getting current data or basic info)
AUTHENTICATION TYPE: {auth_type}
ENVIRONMENT VARIABLE: {env_key}
Please provide ONLY the Python code without any explanations or markdown formatting.
The code should be ready to run as-is.
"""
return prompt.strip()
async def call_claude(self, prompt: str) -> Tuple[bool, str]:
"""Call Anthropic Claude API"""
if 'ANTHROPIC_API_KEY' not in self.api_keys:
return False, "No Anthropic API key"
headers = {
'Content-Type': 'application/json',
'x-api-key': self.api_keys['ANTHROPIC_API_KEY'],
'anthropic-version': '2023-06-01'
}
data = {
'model': 'claude-3-5-sonnet-20241022',
'max_tokens': 1000,
'messages': [
{
'role': 'user',
'content': prompt
}
]
}
try:
async with self.session.post(
'https://api.anthropic.com/v1/messages',
headers=headers,
json=data,
timeout=30
) as response:
if response.status == 200:
result = await response.json()
code = result['content'][0]['text'].strip()
return True, code
else:
error_text = await response.text()
logger.error(f"Claude API error {response.status}: {error_text}")
return False, f"API Error: {response.status}"
except Exception as e:
logger.error(f"Claude request failed: {e}")
return False, f"Request failed: {str(e)}"
async def call_openai(self, prompt: str) -> Tuple[bool, str]:
"""Call OpenAI GPT API"""
if 'OPENAI_API_KEY' not in self.api_keys:
return False, "No OpenAI API key"
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_keys["OPENAI_API_KEY"]}'
}
data = {
'model': 'gpt-4',
'messages': [
{
'role': 'user',
'content': prompt
}
],
'max_tokens': 1000,
'temperature': 0.1
}
try:
async with self.session.post(
'https://api.openai.com/v1/chat/completions',
headers=headers,
json=data,
timeout=30
) as response:
if response.status == 200:
result = await response.json()
code = result['choices'][0]['message']['content'].strip()
return True, code
else:
error_text = await response.text()
logger.error(f"OpenAI API error {response.status}: {error_text}")
return False, f"API Error: {response.status}"
except Exception as e:
logger.error(f"OpenAI request failed: {e}")
return False, f"Request failed: {str(e)}"
async def call_gemini(self, prompt: str) -> Tuple[bool, str]:
"""Call Google Gemini API"""
if 'GOOGLE_API_KEY' not in self.api_keys:
return False, "No Google API key"
url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key={self.api_keys['GOOGLE_API_KEY']}"
data = {
'contents': [
{
'parts': [
{
'text': prompt
}
]
}
],
'generationConfig': {
'temperature': 0.1,
'maxOutputTokens': 1000
}
}
try:
async with self.session.post(
url,
json=data,
timeout=30
) as response:
if response.status == 200:
result = await response.json()
code = result['candidates'][0]['content']['parts'][0]['text'].strip()
return True, code
else:
error_text = await response.text()
logger.error(f"Gemini API error {response.status}: {error_text}")
return False, f"API Error: {response.status}"
except Exception as e:
logger.error(f"Gemini request failed: {e}")
return False, f"Request failed: {str(e)}"
async def cleanup(self):
"""Clean up HTTP session"""
if self.session:
await self.session.close()
logger.info("🧹 LLM code generator cleaned up")
# Test function
async def test_code_generation():
"""Test code generation with sample documentation"""
generator = LLMCodeGenerator()
try:
await generator.initialize()
sample_doc = """
# Weather API Documentation
Get current weather data for any location.
## Authentication
Use your API key as a query parameter: ?appid=YOUR_API_KEY
## Endpoint
GET https://api.openweathermap.org/data/2.5/weather?q={city}&appid={API_KEY}
## Example Response
{
"weather": [{"main": "Clear", "description": "clear sky"}],
"main": {"temp": 280.32, "pressure": 1012},
"name": "London"
}
"""
success, code, time_ms, model = await generator.generate_code(
"OpenWeatherMap", sample_doc, "query", "OPENWEATHER_API_KEY"
)
print(f"Success: {success}")
print(f"Model: {model}")
print(f"Time: {time_ms}ms")
print(f"Generated code:\n{code}")
finally:
await generator.cleanup()
if __name__ == "__main__":
asyncio.run(test_code_generation())