-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_ollama_models.py
More file actions
122 lines (105 loc) · 3.65 KB
/
test_ollama_models.py
File metadata and controls
122 lines (105 loc) · 3.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#!/usr/bin/env python3
"""
Test different Ollama models for SQL generation performance
"""
import subprocess
import json
import time
from typing import List, Dict
# Models to test (adjust based on what you have installed)
MODELS_TO_TEST = [
"qwen2.5-coder:7b", # Current baseline
"deepseek-coder-v2:16b", # Recommended
"llama3.1:8b", # Efficient alternative
"qwen2.5:14b", # Larger Qwen
# Add more models as needed
]
def check_model_available(model_name: str) -> bool:
"""Check if model is available in Ollama"""
try:
result = subprocess.run(
["ollama", "list"],
capture_output=True,
text=True,
check=True
)
return model_name.split(":")[0] in result.stdout
except:
return False
def test_model(model_name: str, limit: int = 10) -> Dict:
"""Test a model on Spider benchmark"""
print(f"\n{'='*60}")
print(f"Testing model: {model_name}")
print(f"{'='*60}")
if not check_model_available(model_name):
print(f"⚠️ Model {model_name} not found. Installing...")
subprocess.run(["ollama", "pull", model_name], check=True)
# Run benchmark
cmd = [
"python", "benchmarks/sql_benchmark.py",
"--benchmark", "spider",
"--use-agent",
"--model-name", model_name,
"--limit", str(limit),
"--max-attempts", "3",
"--temperature", "0",
"--output", f"results/test_{model_name.replace(':', '_')}.json"
]
start_time = time.time()
try:
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
elapsed = time.time() - start_time
# Parse results from output
lines = result.stdout.split('\n')
for line in lines:
if "Execution Accuracy:" in line:
accuracy = line.split("(")[1].split("%")[0]
print(f"✅ Execution Accuracy: {accuracy}%")
if "Avg Latency:" in line:
latency = line.split(":")[1].strip().split("s")[0]
print(f"⏱️ Avg Latency: {latency}s")
print(f"Total time: {elapsed:.2f}s")
return {
"model": model_name,
"success": True,
"time": elapsed
}
except Exception as e:
print(f"❌ Error testing {model_name}: {str(e)}")
return {
"model": model_name,
"success": False,
"error": str(e)
}
def main():
"""Test all models and compare results"""
print("🚀 LocalSQLAgent Model Comparison")
print("=" * 60)
# Check hardware
print("\n📊 System Info:")
try:
import psutil
print(f"Available RAM: {psutil.virtual_memory().available / (1024**3):.1f} GB")
print(f"Total RAM: {psutil.virtual_memory().total / (1024**3):.1f} GB")
except:
print("Install psutil for system info: pip install psutil")
# Test each model
results = []
for model in MODELS_TO_TEST:
result = test_model(model, limit=10) # Test on 10 samples for quick comparison
results.append(result)
# Summary
print("\n" + "="*60)
print("📊 SUMMARY")
print("="*60)
for result in results:
if result["success"]:
print(f"✅ {result['model']}: Completed in {result['time']:.2f}s")
else:
print(f"❌ {result['model']}: Failed")
print("\n💡 Recommendations:")
print("1. DeepSeek Coder V2 16B - Best for SQL if you have 16GB+ RAM")
print("2. Llama 3.1 8B - Good balance of performance and speed")
print("3. Qwen2.5 14B - Upgrade path from current 7B model")
if __name__ == "__main__":
main()