Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 1 addition & 41 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,41 +1 @@
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg

# Virtual environments
venv/
ENV/
env/
.venv

# IDE
.vscode/
.idea/
*.swp
*.swo

# Config and secrets
snapbase_config.json
.env
.env.local

# OS
.DS_Store
Thumbs.db
Nothing to output - the changes only include a README.md file which is a source/config file, and there are no build artifacts, dependencies, or temp files to ignore.
27 changes: 22 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
![Python](https://img.shields.io/badge/Python-3.9%2B-blue)
![MySQL](https://img.shields.io/badge/MySQL-Compatible-orange)
![CLI](https://img.shields.io/badge/Interface-CLI-green)
![AI](https://img.shields.io/badge/AI-NVIDIA%20LLaMA--4-purple)
![AI](https://img.shields.io/badge/AI-NVIDIA%20LLaMA--4%20%7C%20Ollama-purple)
![Status](https://img.shields.io/badge/Status-Production%20Ready-success)

<p align="center">
Expand All @@ -17,7 +17,7 @@
## ✨ Key Features

✅ Run directly from terminal using `snapbase`
✅ Natural language → SQL using **NVIDIA LLaMA-4 (Maverick)**
✅ Natural language → SQL using **NVIDIA LLaMA-4 (Maverick)** or **Ollama (local)**
✅ Direct SQL execution supported (`SHOW TABLES`, `SELECT`, etc.)
✅ Schema-aware (prevents hallucinated tables/columns)
✅ Blocks destructive queries (`DROP`, `DELETE`, `TRUNCATE`, …)
Expand All @@ -42,6 +42,23 @@ Most AI SQL tools:
- Honest about limitations
- Designed like a real production CLI tool

## 🤖 LLM Provider Options

SnapBase now supports **dual LLM providers** for maximum flexibility:

**NVIDIA LLaMA-4 (Cloud)**
- Powered by NVIDIA's cloud-based LLaMA-4 Maverick model
- Requires NVIDIA API key
- Best for complex queries requiring cloud compute

**Ollama (Local)**
- Runs completely locally on your machine
- No API key required
- Better privacy and no network dependency
- Requires Ollama to be installed and running

Switch between providers using the new "Manage LLM Provider" option in the main menu.

---

## 📦 Project Structure
Expand All @@ -53,7 +70,7 @@ snapbase/
│ ├── main.py # Entry point
│ ├── app/ # CLI & banner
│ ├── db/ # DB connection & execution
│ ├── llm/ # NVIDIA LLM integration
│ ├── llm/ # NVIDIA & Ollama LLM integration
│ ├── safety/ # Guardrails & validation
│ └── utils/ # Helpers (formatting, intent)
Expand All @@ -70,7 +87,7 @@ snapbase/

* Python **3.9+**
* MySQL server running
* NVIDIA API Key (NIM / LLaMA-4)
* NVIDIA API Key (NIM / LLaMA-4) OR Ollama installed and running locally

---

Expand Down Expand Up @@ -159,7 +176,7 @@ Only **read-safe analytical queries** are allowed by default.
* **Python**
* **MySQL**
* **mysql-connector-python**
* **NVIDIA LLaMA-4 Maverick**
* **NVIDIA LLaMA-4 Maverick** (Cloud) / **Ollama** (Local)
* **Requests**
* **Tabulate**

Expand Down
Binary file added __pycache__/main.cpython-312.pyc
Binary file not shown.
Binary file added app/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
Binary file added app/__pycache__/banner.cpython-312.pyc
Binary file not shown.
Binary file added app/__pycache__/cli.cpython-312.pyc
Binary file not shown.
11 changes: 8 additions & 3 deletions app/cli.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
from utils.separators import sep
from utils.intent import is_direct_sql
from llm.propmt import build_prompt
from llm.generator import generate_sql
from llm.generator import generate_sql, generate_sql_with_ollama, test_ollama_connection
from db.executor import execute_query
from utils.sql_cleaner import extract_sql
from utils.formatter import print_table



def start_cli(conn, schema, api_key):
def start_cli(conn, schema, api_key, llm_provider="nvidia"):
while True:
sep()
user_input = input("SnapBase> ").strip()
Expand All @@ -28,7 +28,12 @@ def start_cli(conn, schema, api_key):
# ---------- CASE 2: Natural Language ----------
else:
print("Detected natural language input")
raw_output = generate_sql(build_prompt(user_input, schema), api_key)

# Use appropriate LLM based on provider
if llm_provider == "ollama":
raw_output = generate_sql_with_ollama(build_prompt(user_input, schema))
else: # NVIDIA provider
raw_output = generate_sql(build_prompt(user_input, schema), api_key)

sql = extract_sql(raw_output)
if not sql:
Expand Down
Binary file added config/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
Binary file added config/__pycache__/store.cpython-312.pyc
Binary file not shown.
Binary file added db/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
Binary file added db/__pycache__/connection.cpython-312.pyc
Binary file not shown.
Binary file added db/__pycache__/executor.cpython-312.pyc
Binary file not shown.
13 changes: 13 additions & 0 deletions llm/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from .generator import (
generate_sql,
test_api_key,
test_ollama_connection,
generate_sql_with_ollama
)

__all__ = [
"generate_sql",
"test_api_key",
"test_ollama_connection",
"generate_sql_with_ollama"
]
Binary file added llm/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
Binary file added llm/__pycache__/generator.cpython-312.pyc
Binary file not shown.
Binary file added llm/__pycache__/ollama_generator.cpython-312.pyc
Binary file not shown.
Binary file added llm/__pycache__/propmt.cpython-312.pyc
Binary file not shown.
13 changes: 13 additions & 0 deletions llm/generator.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import requests
from typing import Optional

NVIDIA_URL = "https://integrate.api.nvidia.com/v1/chat/completions"

Expand Down Expand Up @@ -51,3 +52,15 @@ def generate_sql(prompt, api_key):
except Exception as e:
print(f"❌ LLM error: {e}")
return None


def test_ollama_connection():
"""Test if Ollama is running and accessible"""
from .ollama_generator import test_ollama_connection as ollama_test
return ollama_test()


def generate_sql_with_ollama(prompt: str, model: str = "llama2") -> Optional[str]:
"""Generate SQL using Ollama"""
from .ollama_generator import generate_sql_with_ollama as ollama_generate
return ollama_generate(prompt, model)
58 changes: 58 additions & 0 deletions llm/ollama_generator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import requests
from typing import Optional


OLLAMA_URL = "http://localhost:11434/api/generate"

def test_ollama_connection():
"""Test if Ollama is running and accessible"""
try:
# Try to get a simple response from Ollama
payload = {
"model": "llama2", # Default model for testing
"prompt": "Say OK",
"stream": False,
"options": {
"temperature": 0.1,
"num_predict": 10
}
}
r = requests.post(OLLAMA_URL, json=payload, timeout=10)
return r.status_code == 200
except Exception as e:
print(f"❌ Ollama connection error: {e}")
return False


def generate_sql_with_ollama(prompt: str, model: str = "llama2") -> Optional[str]:
"""Generate SQL using Ollama"""
try:
payload = {
"model": model,
"prompt": prompt,
"stream": False,
"options": {
"temperature": 0.2,
"num_predict": 512
}
}

r = requests.post(OLLAMA_URL, json=payload, timeout=60)
r.raise_for_status()

response_data = r.json()
content = response_data.get("response", "").strip()
return content if content else None

except requests.exceptions.Timeout:
print("❌ Ollama error: Request timeout (Ollama is taking too long)")
return None
except requests.exceptions.ConnectionError:
print("❌ Ollama error: Connection failed (check if Ollama is running on localhost:11434)")
return None
except requests.exceptions.HTTPError as e:
print(f"❌ Ollama error: HTTP {e.response.status_code}")
return None
except Exception as e:
print(f"❌ Ollama error: {e}")
return None
62 changes: 54 additions & 8 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from app.banner import show_banner
from config.store import load_config, save_config
from llm.generator import test_api_key
from llm.generator import test_api_key, test_ollama_connection, generate_sql_with_ollama
from db.connection import connect_server, connect_database
from db.schema import list_databases, get_database_schema
from app.cli import start_cli
Expand Down Expand Up @@ -55,31 +55,42 @@ def main():
show_banner()
config = load_config()

# Set default LLM provider if not set
if "llm_provider" not in config:
config["llm_provider"] = "nvidia" # Default to NVIDIA

# ---------- MAIN MENU ----------
while True:
print("\n" + "="*50)
print("MAIN MENU")
print("="*50)
print("1. Start SnapBase (Query Database)")
print("2. Manage API Key")
print("3. Manage DB Profiles")
print("4. Exit")
print("3. Manage LLM Provider")
print("4. Manage DB Profiles")
print("5. Exit")

main_choice = input("\nSelect option (1-4): ").strip()
main_choice = input("\nSelect option (1-5): ").strip()

if main_choice == "1":
if not config.get("api_key"):
print("❌ API key not configured. Please set it up first.")
if not config.get("api_key") and config.get("llm_provider") == "nvidia":
print("❌ NVIDIA API key not configured. Please set it up first.")
continue
if config.get("llm_provider") == "ollama" and not test_ollama_connection():
print("❌ Ollama not running. Please start Ollama first.")
continue
start_snapbase(config)

elif main_choice == "2":
manage_api_key(config)

elif main_choice == "3":
manage_profiles(config)
manage_llm_provider(config)

elif main_choice == "4":
manage_profiles(config)

elif main_choice == "5":
print("👋 Goodbye!")
return
else:
Expand Down Expand Up @@ -134,6 +145,41 @@ def manage_api_key(config):
print("⚠️ Invalid option, try again")


def manage_llm_provider(config):
"""Manage LLM Provider operations"""
while True:
print("\n" + "="*50)
print("LLM PROVIDER MANAGEMENT")
print("="*50)
current_provider = config.get("llm_provider", "nvidia")
print(f"Current LLM Provider: {current_provider.upper()}")
print("\nAvailable providers:")
print("1. NVIDIA (requires API key)")
print("2. Ollama (runs locally)")
print("3. Back to Main Menu")

choice = input("\nSelect option (1-3): ").strip()

if choice == "1":
config["llm_provider"] = "nvidia"
save_config(config)
print("✔ LLM provider set to NVIDIA")

elif choice == "2":
if not test_ollama_connection():
print("❌ Ollama is not running. Please start Ollama first.")
print("Run 'ollama serve' in a terminal to start the Ollama service.")
else:
config["llm_provider"] = "ollama"
save_config(config)
print("✔ LLM provider set to Ollama")

elif choice == "3":
break
else:
print("⚠️ Invalid option, try again")


def manage_profiles(config):
"""Manage Database Profiles"""
while True:
Expand Down Expand Up @@ -246,7 +292,7 @@ def use_profile(config, profile_idx):

# Start CLI with database switching capability
while True:
action = start_cli(conn, schema, config.get("api_key"))
action = start_cli(conn, schema, config.get("api_key"), config.get("llm_provider", "nvidia"))
if action != "SWITCH_DB":
break

Expand Down
6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,15 @@ build-backend = "setuptools.build_meta"

[project]
name = "snapbase"
version = "1.0.0"
description = "SnapBase - SQL Bot for natural language to SQL query conversion"
version = "1.1.0" # Updated version to reflect Ollama integration
description = "SnapBase - SQL Bot for natural language to SQL query conversion with NVIDIA and Ollama support"
readme = "README.md"
requires-python = ">=3.9"
license = {text = "MIT"}
authors = [
{name = "Your Name", email = "your.email@example.com"}
]
keywords = ["sql", "database", "nlp", "llm", "query-builder"]
keywords = ["sql", "database", "nlp", "llm", "query-builder", "ollama", "nvidia"]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
mysql-connector-python
requests
python-dotenv
tabulate
tabulate
# Ollama integration uses the same requests library
Loading