Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions bun.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions packages/server/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
"typescript": "^5"
},
"dependencies": {
"@huggingface/inference": "^4.13.0",
"@prisma/client": "^6.16.2",
"dayjs": "^1.11.18",
"dotenv": "^17.2.2",
Expand Down
2 changes: 2 additions & 0 deletions packages/server/prompts/summarizeReviewLlama.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Summarize the following customer reviews into a short paragraph
highlighting key themes, both positive and negative.
42 changes: 42 additions & 0 deletions packages/server/providers/llm.provider.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import OpenAI from 'openai';
import { InferenceClient } from '@huggingface/inference';
import summarizeReviewPrompt from '../prompts/summarizeReviewLlama.txt';

type GenerateResponseQuery = {
prompt: string;
Expand All @@ -12,6 +14,14 @@ type GenerateResponseResult = {
id: string;
};

type SummarizeTextQuery = {
text: string;
};

type SummarizeTextResult = {
summary: string;
};

export class LlmProvider {
constructor(private readonly openAiClient: OpenAI) {}
async generateResponse({
Expand All @@ -31,10 +41,42 @@ export class LlmProvider {

return { message: response.output_text, id: response.id };
}

async summarize({ text }: SummarizeTextQuery): Promise<SummarizeTextResult> {
const output = await inferenceClient.summarization({
model: 'facebook/bart-large-cnn',
inputs: text,
provider: 'hf-inference',
});
return { summary: output.summary_text };
}

async summarizeReviews({
text: reviews,
}: SummarizeTextQuery): Promise<SummarizeTextResult> {
const chatCompletion = await inferenceClient.chatCompletion({
provider: 'novita',
model: 'meta-llama/Llama-3.1-8B-Instruct',
messages: [
{
role: 'system',
content: summarizeReviewPrompt,
},
{
role: 'user',
content: reviews,
},
],
});

return { summary: chatCompletion?.choices[0]?.message.content ?? '' };
}
}

const openAiClient = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});

const inferenceClient = new InferenceClient(process.env.HF_TOKEN);

export const llmProvider = new LlmProvider(openAiClient);
5 changes: 5 additions & 0 deletions packages/server/services/review.service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,15 @@ export class ReviewService {
}

const joinedReviews = reviews.map((r) => r.content).join('\n\n');
/*
Replace if you want to use OpenAI instead of an open-source model
const prompt = template.replace('{{ reviews }}', joinedReviews);
const { message: summary } = await this.llmProvider.generateResponse({
prompt,
maxOutputTokens: 500,
}); */
const { summary } = await this.llmProvider.summarizeReviews({
text: joinedReviews,
});
await this.reviewsRepository.upsertReviewSummary(productId, summary);
return summary;
Expand Down
Loading