diff --git a/README.md b/README.md
index f199598..985086e 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
# PrivatePilot
IDE extension for private coding
-**Private Pilot** is a powerful VSCode extension designed to enhance developer productivity by integrating AI-driven assistance directly within the Visual Studio Code IDE. Leveraging the Ollama AI backend, Private Pilot provides real-time code suggestions, automated bug fixes, intelligent code commenting, and an interactive chat interface to streamline your coding workflow.
+**Private Pilot** is a powerful VSCode extension designed to enhance developer productivity by integrating AI-driven assistance directly within the Visual Studio Code IDE. It can connect to multiple language models—including Ollama, OpenAI, Grok, and Claude—to provide real-time code suggestions, automated bug fixes, intelligent code commenting, and an interactive chat interface to streamline your workflow.
diff --git a/private-pilot-extension/README.md b/private-pilot-extension/README.md
index 46a4937..caa9fae 100644
--- a/private-pilot-extension/README.md
+++ b/private-pilot-extension/README.md
@@ -1,7 +1,7 @@
# Private Pilot VSCode Extension
## Overview
-**Private Pilot** is a powerful VSCode extension designed to enhance developer productivity by integrating AI-driven assistance directly within the Visual Studio Code IDE. Leveraging the Ollama AI backend, Private Pilot provides real-time code suggestions, automated bug fixes, intelligent code commenting, and an interactive chat interface to streamline your coding workflow.
+**Private Pilot** is a powerful VSCode extension designed to enhance developer productivity by integrating AI-driven assistance directly within the Visual Studio Code IDE. It can communicate with multiple large language model providers—including Ollama, OpenAI, Grok, and Claude—to deliver real-time code suggestions, automated bug fixes, intelligent code commenting, and an interactive chat interface to streamline your coding workflow.
**Version:** 0.3.0
@@ -35,7 +35,7 @@ Alternatively, install via the [VSCode Marketplace](https://marketplace.visualst
- Generate comments or documentation by selecting code and using context-aware prompts.
## Configuration
-- **Ollama Backend**: Ensure the Ollama backend is running and accessible. Configure the endpoint in the extension settings if needed.
+- **Model Provider**: Choose between `ollama`, `openai`, `grok`, or `claude` in the extension settings. Each provider has its own endpoint and API key fields.
- **Verbosity Levels**: Customize the level of detail for AI-generated comments in the extension settings.
- **Project Management Tools**: Link your Jira, GitHub, or Trello accounts in the settings for seamless integration.
@@ -43,7 +43,7 @@ Alternatively, install via the [VSCode Marketplace](https://marketplace.visualst
- **VSCode**: Version 1.60.0 or later
- **Node.js**: Required for development and building the extension
- **TypeScript**: Used for development
-- **Ollama Backend**: Required for AI functionality
+- **LLM Backend**: Ensure your chosen provider (Ollama, OpenAI, Grok, or Claude) is reachable. Some providers require an API key.
## Development
To contribute or customize the extension:
diff --git a/private-pilot-extension/package.json b/private-pilot-extension/package.json
index 29dccff..8d5d8d4 100644
--- a/private-pilot-extension/package.json
+++ b/private-pilot-extension/package.json
@@ -153,6 +153,12 @@
"configuration": {
"title": "Code Rewriter",
"properties": {
+ "codeRewriter.provider": {
+ "type": "string",
+ "enum": ["ollama", "openai", "grok", "claude"],
+ "default": "ollama",
+ "description": "LLM provider to use"
+ },
"codeRewriter.ollamaEndpoint": {
"type": "string",
"default": "http://localhost:11434/api/generate",
@@ -162,6 +168,51 @@
"type": "string",
"default": "llama2",
"description": "Ollama model to use for code improvement"
+ },
+ "codeRewriter.openaiApiKey": {
+ "type": "string",
+ "default": "",
+ "description": "API key for OpenAI"
+ },
+ "codeRewriter.openaiModel": {
+ "type": "string",
+ "default": "gpt-3.5-turbo",
+ "description": "OpenAI model"
+ },
+ "codeRewriter.openaiEndpoint": {
+ "type": "string",
+ "default": "https://api.openai.com/v1/chat/completions",
+ "description": "OpenAI endpoint"
+ },
+ "codeRewriter.grokEndpoint": {
+ "type": "string",
+ "default": "",
+ "description": "Endpoint for Grok API"
+ },
+ "codeRewriter.grokApiKey": {
+ "type": "string",
+ "default": "",
+ "description": "API key for Grok"
+ },
+ "codeRewriter.grokModel": {
+ "type": "string",
+ "default": "latest",
+ "description": "Grok model"
+ },
+ "codeRewriter.claudeEndpoint": {
+ "type": "string",
+ "default": "",
+ "description": "Endpoint for Claude API"
+ },
+ "codeRewriter.claudeApiKey": {
+ "type": "string",
+ "default": "",
+ "description": "API key for Claude"
+ },
+ "codeRewriter.claudeModel": {
+ "type": "string",
+ "default": "claude-3-opus-20240229",
+ "description": "Claude model"
}
}
}
diff --git a/private-pilot-extension/src/extension.ts b/private-pilot-extension/src/extension.ts
index e0c5f1e..e68cce7 100644
--- a/private-pilot-extension/src/extension.ts
+++ b/private-pilot-extension/src/extension.ts
@@ -1,7 +1,8 @@
// src/extension.ts
import * as vscode from 'vscode';
-import axios from 'axios';
-import { improveCode } from './prompts';
+import axios from 'axios';
+import { improveCode } from './prompts';
+import { getModelProvider } from './modelProviders';
import {
typingDelay,
getSelectedText,
@@ -31,7 +32,7 @@ export function activate(context: vscode.ExtensionContext) {
'private-pilot.autoComment': handleAutoComment,
'private-pilot.createCode': handleCreateCode,
'private-pilot.askQuestion': handleAskQuestion,
- 'private-pilot.rewriteCode': handleOllamaRequest,
+ 'private-pilot.rewriteCode': handleModelRequest,
};
// Register all commands and push to subscriptions
@@ -62,7 +63,7 @@ async function handleImproveCode() {
const preprompt = improveCode + selectedText;
// Simulate deleting and then typing
- const textToType = await getOllamaText(preprompt);
+ const textToType = await getLLMText(preprompt);
if (textToType === null) {
vscode.window.showErrorMessage('Failed to get improved code from Ollama');
@@ -118,7 +119,7 @@ async function handleContextualImprove() {
const preprompt = `${improveCode}${selectedText}\n\nthe full context of the file that contains the codeblock is below\n\n${fulltext}`;
// Simulate deleting and then typing
- const textToType = await getOllamaText(preprompt);
+ const textToType = await getLLMText(preprompt);
if (textToType === null) {
vscode.window.showErrorMessage('Failed to get improved code from Ollama');
@@ -260,59 +261,33 @@ async function handleAskQuestion() {
});
}
-async function getOllamaText(prompt: string): Promise {
- vscode.window.showInformationMessage('Ollama request triggered');
- console.log('getOllamaText request triggered...');
-
- try {
- const config = vscode.workspace.getConfiguration('codeRewriter');
- const ollamaEndpoint = config.get('ollamaEndpoint') || getFallbackURL('api/generate');
- const ollamaModel = config.get('ollamaModel') || FALLBACK_MODEL;
-
- const response = await axios.post(
- ollamaEndpoint,
- {
- model: ollamaModel,
- prompt: prompt,
- stream: false,
- },
- {
- headers: {
- 'Content-Type': 'application/json',
- },
- },
- );
-
- // Extract the improved code from the response
- if (response.data && response.data.response) {
- const improvedCode = extractCodeFromResponse(response.data.response);
- console.log('Improved code:', improvedCode);
-
- vscode.window.showInformationMessage('Code successfully returned from Ollama');
- return improvedCode;
- } else {
- vscode.window.showErrorMessage('Invalid response from Ollama');
- return '';
- }
- } catch (error) {
- let errorMessage = 'Failed to rewrite code';
-
- if (axios.isAxiosError(error)) {
- if (error.response) {
- errorMessage = `Ollama API error: ${error.response.status} ${error.response.statusText}`;
- } else if (error.request) {
- errorMessage = 'Could not connect to Ollama. Make sure Ollama is running.';
- }
- }
-
- vscode.window.showErrorMessage(errorMessage);
- console.error('Code rewriting error:', error);
- return null;
- }
-}
-async function handleOllamaRequest(prompt?: string) {
- vscode.window.showInformationMessage('Ollama request triggered');
- console.log('Ollama request triggered...');
+async function getLLMText(prompt: string): Promise {
+ console.log('getLLMText request triggered...');
+
+ try {
+ const provider = getModelProvider();
+ const result = await provider.generate(prompt);
+ vscode.window.showInformationMessage('Code successfully returned from model');
+ return result;
+ } catch (error) {
+ let errorMessage = 'Failed to rewrite code';
+
+ if (axios.isAxiosError(error)) {
+ if (error.response) {
+ errorMessage = `Model API error: ${error.response.status} ${error.response.statusText}`;
+ } else if (error.request) {
+ errorMessage = 'Could not connect to model backend.';
+ }
+ }
+
+ vscode.window.showErrorMessage(errorMessage);
+ console.error('Code rewriting error:', error);
+ return null;
+ }
+}
+async function handleModelRequest(prompt?: string) {
+ vscode.window.showInformationMessage('LLM request triggered');
+ console.log('LLM request triggered...');
try {
const editor = vscode.window.activeTextEditor;
if (!editor) {
@@ -342,58 +317,36 @@ async function handleOllamaRequest(prompt?: string) {
async (progress: vscode.Progress<{ message?: string; increment?: number }>) => {
progress.report({ increment: 0 });
- try {
- const config = vscode.workspace.getConfiguration('codeRewriter');
- const ollamaEndpoint = config.get('ollamaEndpoint') || getFallbackURL('api/generate');
- const ollamaModel = config.get('ollamaModel') || FALLBACK_MODEL;
-
- const response = await axios.post(
- ollamaEndpoint,
- {
- model: ollamaModel,
- prompt: prompt || `Improve this code:\n\n${selectedText}\n\nImproved code:`,
- stream: false,
- },
- {
- headers: {
- 'Content-Type': 'application/json',
- },
- },
- );
-
- progress.report({ increment: 100 });
-
- console.log('API response:', response);
-
- // Extract the improved code from the response
- if (response.data && response.data.response) {
- const improvedCode = extractCodeFromResponse(response.data.response);
- console.log('Selected text:', selectedText);
- console.log('Improved code:', improvedCode);
-
- // Replace the selected text with the improved code
- await editor.edit((editBuilder: vscode.TextEditorEdit) => {
- editBuilder.replace(selection, improvedCode);
- });
-
- vscode.window.showInformationMessage('Code successfully rewritten');
- } else {
- vscode.window.showErrorMessage('Invalid response from Ollama');
- }
- } catch (error) {
- let errorMessage = 'Failed to rewrite code';
-
- if (axios.isAxiosError(error)) {
- if (error.response) {
- errorMessage = `Ollama API error: ${error.response.status} ${error.response.statusText}`;
- } else if (error.request) {
- errorMessage = 'Could not connect to Ollama. Make sure Ollama is running.';
- }
- }
-
- vscode.window.showErrorMessage(errorMessage);
- console.error('Code rewriting error:', error);
- }
+ try {
+ const provider = getModelProvider();
+ const improvedCode = await provider.generate(
+ prompt || `Improve this code:\n\n${selectedText}\n\nImproved code:`,
+ );
+
+ progress.report({ increment: 100 });
+
+ console.log('Improved code:', improvedCode);
+
+ // Replace the selected text with the improved code
+ await editor.edit((editBuilder: vscode.TextEditorEdit) => {
+ editBuilder.replace(selection, improvedCode);
+ });
+
+ vscode.window.showInformationMessage('Code successfully rewritten');
+ } catch (error) {
+ let errorMessage = 'Failed to rewrite code';
+
+ if (axios.isAxiosError(error)) {
+ if (error.response) {
+ errorMessage = `Model API error: ${error.response.status} ${error.response.statusText}`;
+ } else if (error.request) {
+ errorMessage = 'Could not connect to model backend.';
+ }
+ }
+
+ vscode.window.showErrorMessage(errorMessage);
+ console.error('Code rewriting error:', error);
+ }
},
);
} catch (error) {
diff --git a/private-pilot-extension/src/modelProviders.ts b/private-pilot-extension/src/modelProviders.ts
new file mode 100644
index 0000000..739548a
--- /dev/null
+++ b/private-pilot-extension/src/modelProviders.ts
@@ -0,0 +1,139 @@
+import axios from 'axios';
+import * as vscode from 'vscode';
+import { extractCodeFromResponse, getFallbackURL, FALLBACK_MODEL } from './common';
+
+export interface ModelProvider {
+ generate(prompt: string): Promise;
+}
+
+export class OllamaProvider implements ModelProvider {
+ constructor(private endpoint: string, private model: string) {}
+
+ async generate(prompt: string): Promise {
+ const response = await axios.post(
+ this.endpoint,
+ { model: this.model, prompt, stream: false },
+ { headers: { 'Content-Type': 'application/json' } },
+ );
+
+ if (response.data && response.data.response) {
+ return extractCodeFromResponse(response.data.response);
+ }
+ throw new Error('Invalid response from Ollama');
+ }
+}
+
+export class OpenAIProvider implements ModelProvider {
+ constructor(
+ private apiKey: string,
+ private model: string,
+ private endpoint: string = 'https://api.openai.com/v1/chat/completions',
+ ) {}
+
+ async generate(prompt: string): Promise {
+ const response = await axios.post(
+ this.endpoint,
+ {
+ model: this.model,
+ messages: [{ role: 'user', content: prompt }],
+ },
+ {
+ headers: {
+ 'Content-Type': 'application/json',
+ Authorization: `Bearer ${this.apiKey}`,
+ },
+ },
+ );
+
+ if (response.data?.choices?.length) {
+ const content = response.data.choices[0].message.content;
+ return extractCodeFromResponse(content);
+ }
+ throw new Error('Invalid response from OpenAI');
+ }
+}
+
+export class GrokProvider implements ModelProvider {
+ constructor(
+ private endpoint: string,
+ private apiKey: string,
+ private model: string,
+ ) {}
+
+ async generate(prompt: string): Promise {
+ const response = await axios.post(
+ this.endpoint,
+ { model: this.model, prompt },
+ {
+ headers: {
+ 'Content-Type': 'application/json',
+ Authorization: `Bearer ${this.apiKey}`,
+ },
+ },
+ );
+
+ if (response.data && response.data.response) {
+ return extractCodeFromResponse(response.data.response);
+ }
+ throw new Error('Invalid response from Grok');
+ }
+}
+
+export class ClaudeProvider implements ModelProvider {
+ constructor(
+ private endpoint: string,
+ private apiKey: string,
+ private model: string,
+ ) {}
+
+ async generate(prompt: string): Promise {
+ const response = await axios.post(
+ this.endpoint,
+ { model: this.model, prompt },
+ {
+ headers: {
+ 'Content-Type': 'application/json',
+ Authorization: `Bearer ${this.apiKey}`,
+ },
+ },
+ );
+
+ if (response.data && (response.data.completion || response.data.response)) {
+ const content = response.data.completion ?? response.data.response;
+ return extractCodeFromResponse(content);
+ }
+ throw new Error('Invalid response from Claude');
+ }
+}
+
+export function getModelProvider(): ModelProvider {
+ const config = vscode.workspace.getConfiguration('codeRewriter');
+ const provider = (config.get('provider') || 'ollama').toLowerCase();
+
+ switch (provider) {
+ case 'openai':
+ return new OpenAIProvider(
+ config.get('openaiApiKey') || '',
+ config.get('openaiModel') || 'gpt-3.5-turbo',
+ config.get('openaiEndpoint') || 'https://api.openai.com/v1/chat/completions',
+ );
+ case 'grok':
+ return new GrokProvider(
+ config.get('grokEndpoint') || '',
+ config.get('grokApiKey') || '',
+ config.get('grokModel') || 'latest',
+ );
+ case 'claude':
+ return new ClaudeProvider(
+ config.get('claudeEndpoint') || '',
+ config.get('claudeApiKey') || '',
+ config.get('claudeModel') || 'claude-3-opus-20240229',
+ );
+ case 'ollama':
+ default:
+ return new OllamaProvider(
+ config.get('ollamaEndpoint') || getFallbackURL('api/generate'),
+ config.get('ollamaModel') || FALLBACK_MODEL,
+ );
+ }
+}