Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 17 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
```
3. Set up your environment variables:
- Add your `GROQ_API_KEY` to the `.env` file
- Add your `OPENAI_API_KEY` to the `.env` file
- Add your `AZURE_OPENAI_API_KEY` to the `.env` file
- Add your `AZURE_OPENAI_ENDPOINT` to the `.env` file

4. Run the development server:
```bash
Expand All @@ -23,19 +26,27 @@

5. Open [http://localhost:3000](http://localhost:3000) in your browser to see the result.

## Selecting AI Model Provider and Model

To select an AI model provider and model, follow these steps:

1. In the chat interface, you will see two dropdown menus.
2. The first dropdown menu allows you to select the AI model provider. The available options are:
- Groq
- OpenAI
- Google Cloud AI
- Azure AI
3. The second dropdown menu allows you to select the AI model. The available options are:
- Llama 3 8B 8192
- GPT-3.5 Turbo
- PaLM 2
- Davinci
4. Select the desired provider and model from the dropdown menus before starting the conversation.

## Project Screenshots


![Chat Interface](./public/images/screenshot1.png)

![Chat Interface](./public/images/screenshot2.png)

![Chat Interface](./public/images/screenshot3.png)





173 changes: 153 additions & 20 deletions src/app/actions.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -6,23 +6,115 @@ import { createOpenAI } from '@ai-sdk/openai';
import { createStreamableUI } from 'ai/rsc';
import { ReactNode } from 'react';
import { z } from 'zod';
import { Configuration, OpenAIApi } from "openai"; // Import OpenAI API
import axios from 'axios';
import dotenv from 'dotenv';

// Load environment variables from .env file
dotenv.config();

// Retrieve API key and endpoint from environment variables
const apiKey = process.env.AZURE_OPENAI_API_KEY;
const endpoint = process.env.AZURE_OPENAI_ENDPOINT;

if (!apiKey || !endpoint) {
throw new Error('API key or endpoint is not defined in the environment variables');
}

// Add Groq provider
const groq = createOpenAI({
baseURL: 'https://api.groq.com/openai/v1',
apiKey: process.env.GROQ_API_KEY,
});

// Add OpenAI provider
const openai = createOpenAI({
apiKey: process.env.OPENAI_API_KEY,
});

// Add Google Cloud AI provider
const googleCloudAI = createOpenAI({
apiKey: process.env.GOOGLE_CLOUD_AI_API_KEY,
});

// Add Azure AI provider
const azureAI = {
async getChatCompletion(messages: CoreMessage[]) {
try {
const response = await axios.post(
endpoint,
{
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
...messages,
],
},
{
headers: {
'Content-Type': 'application/json',
'api-key': apiKey,
},
}
);

return response.data.choices[0].message.content;
} catch (error) {
console.error('Error:', error.response ? error.response.data : error.message);
throw error;
}
}
};

export interface Message {
role: 'user' | 'assistant';
content: string;
display?: ReactNode;
}

// Function to get model provider
function getModelProvider(provider: 'groq' | 'openai' | 'googleCloudAI' | 'azureAI' | 'compare') {
const modelProviders = {
groq,
openai,
googleCloudAI,
azureAI,
};
return modelProviders[provider];
}

// Function to compare responses from different AI models
export async function compareAIModels(messages: CoreMessage[], models: string[]) {
const results = await Promise.all(models.map(async (model) => {
const result = await streamText({
model: openai(model), // Assuming openai for comparison, adjust as needed
messages,
});
return {
model,
response: result.textStream,
};
}));

return results;
}

// Streaming Chat
export async function continueTextConversation(messages: CoreMessage[]) {
export async function continueTextConversation(messages: CoreMessage[], provider: 'groq' | 'openai' | 'googleCloudAI' | 'azureAI' | 'compare' = 'groq', model: string = 'llama3-8b-8192') {
if (provider === 'compare') {
const models = ['llama3-8b-8192', 'gpt-3.5-turbo', 'palm-2', 'davinci']; // Example models to compare
const results = await compareAIModels(messages, models);
return results;
}

const modelProvider = getModelProvider(provider);

if (provider === 'azureAI') {
const response = await azureAI.getChatCompletion(messages);
return createStreamableValue(response).value;
}

const result = await streamText({
model: groq('llama3-8b-8192'), // Use Groq model
model: modelProvider(model), // Use selected model
messages,
});

Expand All @@ -31,27 +123,43 @@ export async function continueTextConversation(messages: CoreMessage[]) {
}

// Gen UIs
export async function continueConversation(history: Message[]) {
export async function continueConversation(history: Message[], provider: 'groq' | 'openai' | 'googleCloudAI' | 'azureAI' | 'compare' = 'groq', model: string = 'llama3-8b-8192') {
if (provider === 'compare') {
const models = ['llama3-8b-8192', 'gpt-3.5-turbo', 'palm-2', 'davinci']; // Example models to compare
const results = await compareAIModels(history, models);
return {
messages: [
...history,
...results.map(result => ({
role: 'assistant' as const,
content: result.response,
})),
],
};
}

const stream = createStreamableUI();

const modelProvider = getModelProvider(provider);

if (provider === 'azureAI') {
const response = await azureAI.getChatCompletion(history);
return {
messages: [
...history,
{
role: 'assistant' as const,
content: response,
},
],
};
}

const { text, toolResults } = await generateText({
model: groq('llama3-8b-8192'), // Use Groq model
model: modelProvider(model), // Use selected model
system: 'You are a friendly weather assistant!',
messages: history,
tools: {
// showWeather: {
// description: 'Show the weather for a given location.',
// parameters: z.object({
// city: z.string().describe('The city to show the weather for.'),
// unit: z
// .enum(['F'])
// .describe('The unit to display the temperature in'),
// }),
// execute: async ({ city, unit }) => {
// return `Here's the weather for ${city}!`;
// },
// },
},
tools: {},
});

return {
Expand All @@ -68,6 +176,31 @@ export async function continueConversation(history: Message[]) {

// Utils
export async function checkAIAvailability() {
const envVarExists = !!process.env.GROQ_API_KEY;
const envVarExists = !!process.env.GROQ_API_KEY || !!process.env.OPENAI_API_KEY || !!process.env.AZURE_OPENAI_API_KEY || !!process.env.AZURE_OPENAI_ENDPOINT;
return envVarExists;
}
}

// Function to get chat completion using OpenAI API
export async function getChatCompletion() {
try {
const response = await axios.post(
endpoint,
{
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Tell me a joke.' },
],
},
{
headers: {
'Content-Type': 'application/json',
'api-key': apiKey,
},
}
);

console.log(response.data.choices[0].message.content);
} catch (error) {
console.error('Error:', error.response ? error.response.data : error.message);
}
}
37 changes: 35 additions & 2 deletions src/app/genui/page.tsx
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
// import Chat from "@/components/chat";
'use client';

import { useState } from 'react';
Expand All @@ -13,12 +12,16 @@ export const maxDuration = 30;
export default function GenUI() {
const [conversation, setConversation] = useState<Message[]>([]);
const [input, setInput] = useState<string>('');
const [selectedProvider, setSelectedProvider] = useState<string>('groq'); // Add state for selected provider
const [selectedModel, setSelectedModel] = useState<string>('llama3-8b-8192'); // Add state for selected model
const [compareMode, setCompareMode] = useState<boolean>(false); // Add state for comparison mode

const handleSubmit = async () => {
const { messages } = await continueConversation([
// exclude React components from being sent back to the server:
...conversation.map(({ role, content }) => ({ role, content })),
{ role: 'user', content: input },
]);
], compareMode ? 'compare' : selectedProvider, selectedModel); // Pass both provider and model, handle comparison mode
setInput("")
setConversation(messages);
}
Expand Down Expand Up @@ -51,6 +54,28 @@ export default function GenUI() {
<div className="w-full max-w-xl mx-auto">
<Card className="p-2">
<div className="flex">
<select
value={selectedProvider}
onChange={(e) => setSelectedProvider(e.target.value)}
className="mr-2 p-2 border rounded"
disabled={compareMode} // Disable provider selection in comparison mode
>
<option value="groq">Groq</option>
<option value="openai">OpenAI</option>
<option value="googleCloudAI">Google Cloud AI</option>
<option value="azureAI">Azure AI</option>
</select>
<select
value={selectedModel}
onChange={(e) => setSelectedModel(e.target.value)}
className="mr-2 p-2 border rounded"
disabled={compareMode} // Disable model selection in comparison mode
>
<option value="llama3-8b-8192">Llama 3 8B 8192</option>
<option value="gpt-3.5-turbo">GPT-3.5 Turbo</option>
<option value="palm-2">PaLM 2</option>
<option value="davinci">Davinci</option>
</select>
<Input
type="text"
value={input}
Expand All @@ -68,6 +93,14 @@ export default function GenUI() {
<IconArrowUp />
</Button>
</div>
<div className="flex justify-end mt-2">
<Button
variant={compareMode ? 'default' : 'outline'}
onClick={() => setCompareMode(!compareMode)}
>
{compareMode ? 'Disable Comparison Mode' : 'Enable Comparison Mode'}
</Button>
</div>
</Card>
</div>

Expand Down
3 changes: 0 additions & 3 deletions src/components/cards/aboutcard.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,6 @@ export default function AboutCard() {
</CardHeader>
<CardContent className="text-sm text-muted-foreground/90 leading-normal prose">
<p className="mb-3">Start a conversation by entering a message below:</p>



</CardContent>
</Card>
</div>
Expand Down
4 changes: 1 addition & 3 deletions src/components/cards/genuicard.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@ export default function GenUICard() {
<CardDescription>Start Chatting</CardDescription>
</CardHeader>
<CardContent className="text-sm text-muted-foreground/90 leading-normal prose">
<p className="mb-3">A simple prompt based way to enter into conversation./</p>


<p className="mb-3">A simple prompt based way to enter into conversation.</p>
</CardContent>
</Card>
</div>
Expand Down
Loading