-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodelhelper.py
More file actions
56 lines (41 loc) · 1.69 KB
/
modelhelper.py
File metadata and controls
56 lines (41 loc) · 1.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_community.chat_models import ChatAnthropic
from langchain_community.chat_models import ChatCohere
load_dotenv()
my_key_openai = os.getenv("openai_apikey")
def ask_gpt(prompt, temperature, max_tokens):
llm = ChatOpenAI(api_key=my_key_openai,
temperature=temperature,
max_tokens=max_tokens,
model="gpt-4.1")
AI_Response = llm.invoke(prompt)
return AI_Response.text
#################################################
my_key_google = os.getenv("google_apikey")
def ask_gemini(prompt, temperature, max_tokens):
llm = ChatGoogleGenerativeAI(api_key=my_key_google,
temperature=temperature,
model="gemini-pro")
AI_Response = llm.invoke(prompt)
return AI_Response.text
################################################
my_key_anthropic = os.getenv("anthropic_apikey")
def ask_claude(prompt, temperature, max_tokens):
llm = ChatAnthropic(api_key=my_key_anthropic,
temperature=temperature,
max_tokens=max_tokens,
model="claude-2.1")
AI_Response = llm.invoke(prompt)
return AI_Response.text
################################################
my_key_cohere = os.getenv("cohere_apikey")
def ask_command(prompt, temperature, max_tokens):
llm = ChatCohere(api_key=my_key_cohere,
temperature=temperature,
max_tokens=max_tokens,
model="command")
AI_Response = llm.invoke(prompt)
return AI_Response.text