diff --git a/README.md b/README.md index 522a267..cdc51f9 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,9 @@ ``` 3. Set up your environment variables: - Add your `GROQ_API_KEY` to the `.env` file + - Add your `OPENAI_API_KEY` to the `.env` file + - Add your `AZURE_OPENAI_API_KEY` to the `.env` file + - Add your `AZURE_OPENAI_ENDPOINT` to the `.env` file 4. Run the development server: ```bash @@ -23,19 +26,27 @@ 5. Open [http://localhost:3000](http://localhost:3000) in your browser to see the result. +## Selecting AI Model Provider and Model +To select an AI model provider and model, follow these steps: +1. In the chat interface, you will see two dropdown menus. +2. The first dropdown menu allows you to select the AI model provider. The available options are: + - Groq + - OpenAI + - Google Cloud AI + - Azure AI +3. The second dropdown menu allows you to select the AI model. The available options are: + - Llama 3 8B 8192 + - GPT-3.5 Turbo + - PaLM 2 + - Davinci +4. Select the desired provider and model from the dropdown menus before starting the conversation. ## Project Screenshots - ![Chat Interface](./public/images/screenshot1.png) ![Chat Interface](./public/images/screenshot2.png) ![Chat Interface](./public/images/screenshot3.png) - - - - - diff --git a/src/app/actions.tsx b/src/app/actions.tsx index f295564..dfa65ab 100644 --- a/src/app/actions.tsx +++ b/src/app/actions.tsx @@ -6,6 +6,20 @@ import { createOpenAI } from '@ai-sdk/openai'; import { createStreamableUI } from 'ai/rsc'; import { ReactNode } from 'react'; import { z } from 'zod'; +import { Configuration, OpenAIApi } from "openai"; // Import OpenAI API +import axios from 'axios'; +import dotenv from 'dotenv'; + +// Load environment variables from .env file +dotenv.config(); + +// Retrieve API key and endpoint from environment variables +const apiKey = process.env.AZURE_OPENAI_API_KEY; +const endpoint = process.env.AZURE_OPENAI_ENDPOINT; + +if (!apiKey || !endpoint) { + throw new Error('API key or endpoint is not defined in the environment variables'); +} // Add Groq provider const groq = createOpenAI({ @@ -13,16 +27,94 @@ const groq = createOpenAI({ apiKey: process.env.GROQ_API_KEY, }); +// Add OpenAI provider +const openai = createOpenAI({ + apiKey: process.env.OPENAI_API_KEY, +}); + +// Add Google Cloud AI provider +const googleCloudAI = createOpenAI({ + apiKey: process.env.GOOGLE_CLOUD_AI_API_KEY, +}); + +// Add Azure AI provider +const azureAI = { + async getChatCompletion(messages: CoreMessage[]) { + try { + const response = await axios.post( + endpoint, + { + messages: [ + { role: 'system', content: 'You are a helpful assistant.' }, + ...messages, + ], + }, + { + headers: { + 'Content-Type': 'application/json', + 'api-key': apiKey, + }, + } + ); + + return response.data.choices[0].message.content; + } catch (error) { + console.error('Error:', error.response ? error.response.data : error.message); + throw error; + } + } +}; + export interface Message { role: 'user' | 'assistant'; content: string; display?: ReactNode; } +// Function to get model provider +function getModelProvider(provider: 'groq' | 'openai' | 'googleCloudAI' | 'azureAI' | 'compare') { + const modelProviders = { + groq, + openai, + googleCloudAI, + azureAI, + }; + return modelProviders[provider]; +} + +// Function to compare responses from different AI models +export async function compareAIModels(messages: CoreMessage[], models: string[]) { + const results = await Promise.all(models.map(async (model) => { + const result = await streamText({ + model: openai(model), // Assuming openai for comparison, adjust as needed + messages, + }); + return { + model, + response: result.textStream, + }; + })); + + return results; +} + // Streaming Chat -export async function continueTextConversation(messages: CoreMessage[]) { +export async function continueTextConversation(messages: CoreMessage[], provider: 'groq' | 'openai' | 'googleCloudAI' | 'azureAI' | 'compare' = 'groq', model: string = 'llama3-8b-8192') { + if (provider === 'compare') { + const models = ['llama3-8b-8192', 'gpt-3.5-turbo', 'palm-2', 'davinci']; // Example models to compare + const results = await compareAIModels(messages, models); + return results; + } + + const modelProvider = getModelProvider(provider); + + if (provider === 'azureAI') { + const response = await azureAI.getChatCompletion(messages); + return createStreamableValue(response).value; + } + const result = await streamText({ - model: groq('llama3-8b-8192'), // Use Groq model + model: modelProvider(model), // Use selected model messages, }); @@ -31,27 +123,43 @@ export async function continueTextConversation(messages: CoreMessage[]) { } // Gen UIs -export async function continueConversation(history: Message[]) { +export async function continueConversation(history: Message[], provider: 'groq' | 'openai' | 'googleCloudAI' | 'azureAI' | 'compare' = 'groq', model: string = 'llama3-8b-8192') { + if (provider === 'compare') { + const models = ['llama3-8b-8192', 'gpt-3.5-turbo', 'palm-2', 'davinci']; // Example models to compare + const results = await compareAIModels(history, models); + return { + messages: [ + ...history, + ...results.map(result => ({ + role: 'assistant' as const, + content: result.response, + })), + ], + }; + } + const stream = createStreamableUI(); + const modelProvider = getModelProvider(provider); + + if (provider === 'azureAI') { + const response = await azureAI.getChatCompletion(history); + return { + messages: [ + ...history, + { + role: 'assistant' as const, + content: response, + }, + ], + }; + } + const { text, toolResults } = await generateText({ - model: groq('llama3-8b-8192'), // Use Groq model + model: modelProvider(model), // Use selected model system: 'You are a friendly weather assistant!', messages: history, - tools: { - // showWeather: { - // description: 'Show the weather for a given location.', - // parameters: z.object({ - // city: z.string().describe('The city to show the weather for.'), - // unit: z - // .enum(['F']) - // .describe('The unit to display the temperature in'), - // }), - // execute: async ({ city, unit }) => { - // return `Here's the weather for ${city}!`; - // }, - // }, - }, + tools: {}, }); return { @@ -68,6 +176,31 @@ export async function continueConversation(history: Message[]) { // Utils export async function checkAIAvailability() { - const envVarExists = !!process.env.GROQ_API_KEY; + const envVarExists = !!process.env.GROQ_API_KEY || !!process.env.OPENAI_API_KEY || !!process.env.AZURE_OPENAI_API_KEY || !!process.env.AZURE_OPENAI_ENDPOINT; return envVarExists; -} \ No newline at end of file +} + +// Function to get chat completion using OpenAI API +export async function getChatCompletion() { + try { + const response = await axios.post( + endpoint, + { + messages: [ + { role: 'system', content: 'You are a helpful assistant.' }, + { role: 'user', content: 'Tell me a joke.' }, + ], + }, + { + headers: { + 'Content-Type': 'application/json', + 'api-key': apiKey, + }, + } + ); + + console.log(response.data.choices[0].message.content); + } catch (error) { + console.error('Error:', error.response ? error.response.data : error.message); + } +} diff --git a/src/app/genui/page.tsx b/src/app/genui/page.tsx index a3a4728..1bb501e 100644 --- a/src/app/genui/page.tsx +++ b/src/app/genui/page.tsx @@ -1,4 +1,3 @@ -// import Chat from "@/components/chat"; 'use client'; import { useState } from 'react'; @@ -13,12 +12,16 @@ export const maxDuration = 30; export default function GenUI() { const [conversation, setConversation] = useState([]); const [input, setInput] = useState(''); + const [selectedProvider, setSelectedProvider] = useState('groq'); // Add state for selected provider + const [selectedModel, setSelectedModel] = useState('llama3-8b-8192'); // Add state for selected model + const [compareMode, setCompareMode] = useState(false); // Add state for comparison mode + const handleSubmit = async () => { const { messages } = await continueConversation([ // exclude React components from being sent back to the server: ...conversation.map(({ role, content }) => ({ role, content })), { role: 'user', content: input }, - ]); + ], compareMode ? 'compare' : selectedProvider, selectedModel); // Pass both provider and model, handle comparison mode setInput("") setConversation(messages); } @@ -51,6 +54,28 @@ export default function GenUI() {
+ +
+
+ +
diff --git a/src/components/cards/aboutcard.tsx b/src/components/cards/aboutcard.tsx index 3bf5b98..127654c 100644 --- a/src/components/cards/aboutcard.tsx +++ b/src/components/cards/aboutcard.tsx @@ -17,9 +17,6 @@ export default function AboutCard() {

Start a conversation by entering a message below:

- - -
diff --git a/src/components/cards/genuicard.tsx b/src/components/cards/genuicard.tsx index 9b7dd07..34ab2a0 100644 --- a/src/components/cards/genuicard.tsx +++ b/src/components/cards/genuicard.tsx @@ -16,9 +16,7 @@ export default function GenUICard() { Start Chatting -

A simple prompt based way to enter into conversation./

- - +

A simple prompt based way to enter into conversation.

diff --git a/src/components/chat.tsx b/src/components/chat.tsx index b57df4b..73cccb7 100644 --- a/src/components/chat.tsx +++ b/src/components/chat.tsx @@ -15,6 +15,9 @@ export const maxDuration = 30; export default function Chat() { const [messages, setMessages] = useState([]); const [input, setInput] = useState(''); + const [selectedProvider, setSelectedProvider] = useState('groq'); // Add state for selected provider + const [selectedModel, setSelectedModel] = useState('llama3-8b-8192'); // Add state for selected model + const [compareMode, setCompareMode] = useState(false); // Add state for comparison mode const handleSubmit = async (e: React.FormEvent) => { e.preventDefault() @@ -24,7 +27,7 @@ export default function Chat() { ]; setMessages(newMessages); setInput(''); - const result = await continueTextConversation(newMessages); + const result = await continueTextConversation(newMessages, compareMode ? 'compare' : selectedProvider, selectedModel); // Pass both provider and model, handle comparison mode for await (const content of readStreamableValue(result)) { setMessages([ ...newMessages, @@ -57,6 +60,28 @@ export default function Chat() {
+ + )} +
+ +
diff --git a/src/components/header.tsx b/src/components/header.tsx index 47879fd..fb5c159 100644 --- a/src/components/header.tsx +++ b/src/components/header.tsx @@ -12,7 +12,6 @@ export async function Header() { Chat Interface - ) } diff --git a/tsconfig.json b/tsconfig.json index fccf2dc..7232f4a 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,27 +1,11 @@ { "compilerOptions": { - "lib": ["dom", "dom.iterable", "esnext"], - "allowJs": true, - "skipLibCheck": true, - "types": ["node"], + "target": "ES6", + "module": "commonjs", "strict": true, - "noEmit": true, "esModuleInterop": true, - "module": "esnext", - "moduleResolution": "bundler", - "resolveJsonModule": true, - "isolatedModules": true, - "jsx": "preserve", - "incremental": true, - "plugins": [ - { - "name": "next" - } - ], - "paths": { - "@/*": ["./src/*"] - } + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true }, - "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], - "exclude": ["node_modules"] + "include": ["src"] }