-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtempCodeRunnerFile.py
More file actions
79 lines (62 loc) · 2.57 KB
/
tempCodeRunnerFile.py
File metadata and controls
79 lines (62 loc) · 2.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from src.search import rag_retriever
from langchain_groq import ChatGroq
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage
import os
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("api_key")
class GroqLLM:
def __init__(self, model_name: str = "llama-3.1-70b-versatile", api_key: str =None):
"""
Initialize Groq LLM
Args:
model_name: Groq model name (qwen2-72b-instruct, llama3-70b-8192, etc.)
api_key: Groq API key (or set GROQ_API_KEY environment variable)
"""
self.model_name = model_name
self.api_key = api_key
if not self.api_key:
raise ValueError("Groq API key is required. Set GROQ_API_KEY environment variable or pass api_key parameter.")
self.llm = ChatGroq(
groq_api_key=self.api_key,
model_name=self.model_name,
temperature=0.1,
max_tokens=1024
)
print(f"Initialized Groq LLM with model: {self.model_name}")
def generate_response(self, query: str, context: str, max_length: int = 500) -> str:
"""
Generate response using retrieved context
Args:
query: User question
context: Retrieved document context
max_length: Maximum response length
Returns:
Generated response string
"""
# Create prompt template
prompt_template = PromptTemplate(
input_variables=["context", "question"],
template="""You are a helpful AI assistant. Use the following context to answer the question accurately and concisely.
Context:
{context}
Question: {question}
Answer: Provide a clear and informative answer based on the context above. If the context doesn't contain enough information to answer the question, say no."""
)
# Format the prompt
formatted_prompt = prompt_template.format(context=context, question=query)
try:
# Generate response
messages = [HumanMessage(content=formatted_prompt)]
response = self.llm.invoke(messages)
return response.content
except Exception as e:
return f"Error generating response: {str(e)}"
if __name__ == "__main__":
groq_llm = GroqLLM(model_name="llama-3.1-8b-instant", api_key=api_key)
query = "Brief description of Dandelion "
retrieved_docs = rag_retriever.retrieve(query)
context = "\n\n".join([doc["content"] for doc in retrieved_docs])
response = groq_llm.generate_response(query, context)
print(response)