-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodels.py
More file actions
97 lines (72 loc) · 3.71 KB
/
models.py
File metadata and controls
97 lines (72 loc) · 3.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
"""Pydantic models for flashcard data structures."""
from typing import Optional
from pydantic import BaseModel
class Flashcard(BaseModel):
"""Single flashcard with question and answer."""
question: str
answer: str
class FlashcardSet(BaseModel):
"""Collection of flashcards."""
flashcards: list[Flashcard]
class Critique(BaseModel):
"""AI critique of flashcard quality."""
is_acceptable: bool
feedback: str
issues: list[str]
class StudyRating(BaseModel):
"""Individual flashcard rating from user."""
flashcard_index: int # Which card (0-based)
difficulty: int # 1-5 scale: 1=know well, 2=easy, 3=moderate, 4=difficult, 5=very difficult
class StudySession(BaseModel):
"""Complete study session results."""
flashcards: list[Flashcard] # All cards shown
ratings: list[StudyRating] # All ratings collected
timestamp: str
class KnowledgeGaps(BaseModel):
"""AI analysis of student performance."""
strong_areas: list[str] # What they know well
weak_areas: list[str] # What needs work
critical_gaps: list[str] # Major knowledge gaps
recommended_additions: list[Flashcard] # New cards to add
recommended_removals: list[int] # Card indices to remove
gap_report: str # Human-readable summary
class AdaptiveUpdate(BaseModel):
"""Final result of adaptation."""
original_count: int
cards_removed: list[Flashcard]
cards_added: list[Flashcard]
final_flashcards: FlashcardSet
gap_report: str
class FlashcardEvaluation(BaseModel):
"""Evaluation of a single flashcard."""
atomicity: int # 1-10: One clear concept per card
clarity: int # 1-10: Unambiguous, precise, and complete questions and answers
learning_value: int # 1-10: Promotes active recall and deep understanding
accuracy: int # 1-10: Factually correct and free from errors
feedback: str # Detailed feedback per criterion
class DeckEvaluation(BaseModel):
"""Evaluation of a flashcard deck."""
flashcard_evaluations: list[FlashcardEvaluation] # One per card - LLM provides this
average_scores: Optional[dict[str, float]] = None # Computed from flashcard_evaluations - not from LLM
overall_deck_score: Optional[float] = None # Computed from flashcard_evaluations - not from LLM
class GapCardEvaluation(BaseModel):
"""Evaluation of how well new cards address a specific gap."""
gap_description: str # The identified gap
addressing_cards: list[int] # List of new card indices that address this gap
personalization_score: int # 1-10: How well new cards address this specific gap (personalization effectiveness)
relevance_feedback: str # Detailed explanation
class RemovalEvaluation(BaseModel):
"""Evaluation of whether a card removal was appropriate."""
removed_card_index: int # Index of removed card
removed_card_question: str # Question text
user_rating: int # User's difficulty rating (1-5)
personalization_score: int # 1-10: Whether removal was appropriate (personalization effectiveness)
removal_feedback: str # Explanation
class AdaptationEvaluation(BaseModel):
"""Evaluation of the adaptation stage."""
gap_evaluations: list[GapCardEvaluation] # One per identified gap
removal_evaluations: list[RemovalEvaluation] # One per removed card
average_gap_personalization: float # Average personalization score for gap-filling cards
average_removal_personalization: float # Average personalization score for removals
overall_personalization: float # Overall personalization effectiveness (weighted average)
overall_adaptation_effectiveness: int # 1-10: Overall score (deprecated, use overall_personalization)