diff --git a/docs/chat-mode-spec.md b/docs/chat-mode-spec.md new file mode 100644 index 0000000..79f6161 --- /dev/null +++ b/docs/chat-mode-spec.md @@ -0,0 +1,801 @@ +# Chat Mode Feature — Technical Specification + +## 1. Overview + +### 1.1 Feature Summary +Add an AI-guided conversational interface (Chat Mode) alongside the existing 4-tab workflow UI. Users toggle between modes via a button in the Header. Chat Mode follows the same workflow (Materials → Learning Objectives → Questions → Export) but with an AI assistant guiding each step through natural language conversation and embedded interactive components. + +### 1.2 Design Decisions +| Decision | Choice | Rationale | +|----------|--------|-----------| +| State sync direction | One-way (Chat → Workflow) | Simpler; chat dispatches Redux actions so workflow reflects changes | +| File upload in chat | Drag-and-drop / paste URL in input area | Natural chat UX, no modal interruption | +| Embedded UI granularity | Hybrid (Option C) | Inline UI for simple choices, modal for complex edits, suggest workflow switch for bulk edits | +| LLM provider for chat | OpenAI only | Mature function calling support required | +| Conversation persistence | MongoDB | Users can resume conversations across sessions | + +### 1.3 Constraints +- New files must not exceed 300 lines +- Follow existing codebase patterns (Redux Toolkit, Express Router, Mongoose, Shadcn/ui) +- Reuse existing services and API client where possible +- No changes to existing workflow behavior + +--- + +## 2. Architecture + +### 2.1 High-Level Flow + +``` +User message → Frontend ChatInput + → POST /api/create/chat/message (SSE stream) + → Backend ChatOrchestrator + → OpenAI API (with tool definitions) + → If tool_call: ChatToolExecutor runs internal service + → Return tool result to OpenAI for next response + → Stream assistant response + tool results to frontend + → Frontend renders message + dispatches Redux actions + → Workflow UI reflects changes via shared Redux store +``` + +### 2.2 Design Patterns + +| Pattern | Where | Purpose | +|---------|-------|---------| +| **Strategy** | `ChatToolExecutor` | Each tool maps to a strategy function that calls the appropriate service | +| **Observer** | Redux + PubSub (existing) | Chat actions dispatch Redux actions; workflow observes store changes | +| **Facade** | `ChatOrchestrationService` | Single entry point orchestrating LLM calls, tool execution, and streaming | +| **Adapter** | `useChatActions` hook | Adapts tool call results into Redux dispatch calls | +| **Factory** | `ChatMessageRenderer` | Renders different message types (text, tool result, inline UI, error) | + +### 2.3 Sequence Diagram + +``` +Frontend Backend OpenAI + │ │ │ + │ POST /chat/message │ │ + │ { conversationId, msg } │ │ + │──────────────────────────>│ │ + │ │ chat.completions.create │ + │ │ { messages, tools } │ + │ │──────────────────────────>│ + │ │ │ + │ │ tool_call: create_folder │ + │ │<──────────────────────────│ + │ │ │ + │ SSE: tool_call_start │ Execute: folderService │ + │<──────────────────────────│ │ + │ │ │ + │ SSE: tool_call_result │ Return result to OpenAI │ + │<──────────────────────────│──────────────────────────>│ + │ │ │ + │ │ Stream text response │ + │ SSE: text_chunk │<──────────────────────────│ + │<──────────────────────────│ │ + │ │ │ + │ SSE: message_complete │ │ + │<──────────────────────────│ │ + │ │ │ + │ Dispatch Redux action │ │ + │ (addQuizLocally, etc.) │ │ +``` + +--- + +## 3. Data Model + +### 3.1 Conversation (MongoDB) + +```javascript +// Model: Conversation +{ + _id: ObjectId, + user: ObjectId (ref: User), // Owner + title: String, // Auto-generated or user-set + folder: ObjectId (ref: Folder), // Associated folder (set during chat) + quiz: ObjectId (ref: Quiz), // Associated quiz (set during chat) + messages: [{ + role: 'user' | 'assistant' | 'system' | 'tool', + content: String, // Text content + toolCalls: [{ // For assistant messages with tool calls + id: String, // OpenAI tool_call_id + name: String, // Function name + arguments: Object, // Parsed arguments + result: Object, // Execution result + status: 'pending' | 'success' | 'error' + }], + metadata: { // For frontend rendering hints + inlineUI: { // Optional: embedded UI component spec + type: String, // 'checkbox-list' | 'button-group' | 'file-upload' | ... + props: Object, // Component-specific props + userResponse: Object // User's selection (filled after interaction) + } + }, + timestamp: Date + }], + context: { // Current workflow progress + step: 'init' | 'folder' | 'materials' | 'objectives' | 'plan' | 'questions' | 'export', + folderId: String, + quizId: String, + materialIds: [String], + objectiveIds: [String], + planId: String + }, + status: 'active' | 'completed' | 'archived', + createdAt: Date, + updatedAt: Date +} +``` + +### 3.2 Redux Chat State + +```typescript +// New slice: chatSlice.ts +interface ChatState { + conversations: ConversationSummary[]; // List of past conversations + activeConversationId: string | null; + messages: ChatMessage[]; // Messages for active conversation + isStreaming: boolean; // Currently receiving SSE response + streamingMessage: string; // Accumulated text during streaming + pendingToolCalls: ToolCallInfo[]; // Tool calls awaiting results + context: ConversationContext; // Current step, folderId, quizId, etc. + loading: boolean; + error: string | null; +} +``` + +--- + +## 4. Backend API Design + +### 4.1 Endpoints + +``` +POST /api/create/chat/conversations Create new conversation +GET /api/create/chat/conversations List user's conversations +GET /api/create/chat/conversations/:id Get conversation with messages +DELETE /api/create/chat/conversations/:id Delete conversation + +POST /api/create/chat/conversations/:id/messages Send message (SSE response) +``` + +### 4.2 SSE Event Types + +| Event | Payload | Description | +|-------|---------|-------------| +| `text_chunk` | `{ content: string }` | Streaming text fragment | +| `tool_call_start` | `{ toolCallId, name, arguments }` | LLM is calling a tool | +| `tool_call_result` | `{ toolCallId, name, result, status }` | Tool execution completed | +| `inline_ui` | `{ type, props }` | Render embedded UI component | +| `context_update` | `{ step, folderId?, quizId?, ... }` | Workflow context changed | +| `message_complete` | `{ messageId }` | Assistant message finished | +| `error` | `{ code, message }` | Error occurred | + +### 4.3 Request/Response Examples + +**Send Message:** +``` +POST /api/create/chat/conversations/:id/messages +Content-Type: application/json + +{ + "content": "I want to create a quiz about machine learning", + "inlineUIResponse": { // Optional: user's response to inline UI + "messageId": "msg_abc", + "selection": { "selectedIds": ["lo_1", "lo_3"] } + } +} + +Response: SSE stream (see event types above) +``` + +--- + +## 5. Function Calling (Tool Definitions) + +### 5.1 Tool Schema + +All tools follow OpenAI's function calling format. The system prompt instructs the LLM about the workflow steps and when to use each tool. + +```javascript +// 10 tools organized by workflow step +const CHAT_TOOLS = [ + // Step 1: Setup + { name: 'create_folder', description: 'Create a new course folder' }, + { name: 'list_folders', description: 'List user\'s existing folders' }, + { name: 'create_quiz', description: 'Create a new quiz in a folder' }, + + // Step 2: Materials + { name: 'add_material_url', description: 'Add learning material from a URL' }, + { name: 'list_materials', description: 'List materials in a folder' }, + { name: 'assign_materials', description: 'Assign materials to a quiz' }, + + // Step 3: Learning Objectives + { name: 'generate_objectives', description: 'Generate learning objectives from materials' }, + { name: 'save_objectives', description: 'Save selected learning objectives' }, + + // Step 4: Questions + { name: 'generate_plan', description: 'Generate a question distribution plan' }, + { name: 'generate_questions', description: 'Generate questions based on plan (streaming)' }, + + // Step 5: Export + { name: 'export_h5p', description: 'Export quiz as H5P package' }, +]; +``` + +See [Section 9: Tool Definitions Detail](#9-tool-definitions-detail) for full parameter schemas. + +### 5.2 System Prompt + +``` +You are a quiz creation assistant for TLEF-CREATE. Guide users through creating +educational quizzes step by step: + +1. SETUP: Help create or select a folder and quiz +2. MATERIALS: Help upload/select learning materials +3. OBJECTIVES: Generate and refine learning objectives +4. QUESTIONS: Generate questions from objectives +5. EXPORT: Export the quiz as H5P + +Rules: +- Always confirm before executing actions that create or modify data +- When presenting choices (objectives, question types), use the inline_ui + metadata to render interactive components in the chat +- For file uploads, instruct the user to drag files into the chat or paste URLs +- If the user wants to make detailed edits to multiple questions, suggest + switching to the Review tab in workflow mode +- Keep responses concise and educational +- Track progress in the context object +``` + +--- + +## 6. Frontend Component Architecture + +### 6.1 Component Tree + +``` +Header.tsx (modified — add toggle button) +│ +├── [Workflow Mode] QuizView.tsx (existing, unchanged) +│ +└── [Chat Mode] ChatMode.tsx + ├── ChatSidebar.tsx // Conversation history list + ├── ChatMessageList.tsx // Scrollable message area + │ └── ChatMessage.tsx (×N) // Individual message + │ ├── ChatToolResult.tsx // Tool call result display + │ └── ChatInlineAction.tsx // Embedded UI (checkboxes, buttons) + └── ChatInput.tsx // Text input + file drop zone +``` + +### 6.2 New Files + +| File | Lines (est.) | Responsibility | +|------|:---:|---| +| **Components** | | | +| `src/components/chat/ChatMode.tsx` | ~180 | Main chat container, layout, conversation management | +| `src/components/chat/ChatSidebar.tsx` | ~120 | Conversation history list, new/delete conversation | +| `src/components/chat/ChatMessageList.tsx` | ~100 | Auto-scrolling message list, loading states | +| `src/components/chat/ChatMessage.tsx` | ~150 | Single message bubble, renders text + tool results + inline UI | +| `src/components/chat/ChatToolResult.tsx` | ~200 | Renders tool call results as cards (folder created, LOs generated, etc.) | +| `src/components/chat/ChatInlineAction.tsx` | ~200 | Interactive components: checkbox lists, button groups, confirm buttons | +| `src/components/chat/ChatInput.tsx` | ~200 | Text input, file drop zone, URL paste detection, send button | +| **Hooks** | | | +| `src/hooks/useChatStream.ts` | ~180 | SSE connection for chat responses, event parsing, reconnection | +| `src/hooks/useChatActions.ts` | ~150 | Maps tool call results to Redux dispatches for workflow sync | +| **Redux** | | | +| `src/store/slices/chatSlice.ts` | ~250 | Chat state, async thunks for conversations, message management | +| **Services** | | | +| `src/services/chatApi.ts` | ~80 | Chat-specific API calls (extends api.ts pattern) | +| **Types** | | | +| `src/types/chat.ts` | ~100 | TypeScript interfaces for chat messages, tools, inline UI | +| **Styles** | | | +| `src/styles/components/chat/ChatMode.css` | ~150 | Chat layout, message bubbles, animations | + +### 6.3 Modified Files + +| File | Change | +|------|--------| +| `src/components/Header.tsx` | Add mode toggle button (Chat/Workflow) | +| `src/store/index.ts` | Add `chat: chatReducer` to store | +| `src/App.tsx` | Add mode state, conditional rendering of ChatMode vs QuizView | +| `src/services/api.ts` | Add `chatApi` section (~30 lines) OR use separate `chatApi.ts` | + +--- + +## 7. Backend File Architecture + +### 7.1 New Files + +| File | Lines (est.) | Responsibility | +|------|:---:|---| +| **Controller** | | | +| `routes/create/controllers/chatController.js` | ~200 | Express router: CRUD conversations, SSE message endpoint | +| **Services** | | | +| `routes/create/services/chatOrchestrationService.js` | ~280 | LLM conversation loop: send messages, handle tool calls, stream response | +| `routes/create/services/chatToolDefinitions.js` | ~200 | OpenAI tool schemas (all 11 tools with parameter definitions) | +| `routes/create/services/chatToolExecutor.js` | ~250 | Strategy pattern: maps tool names to service calls, executes, returns results | +| **Model** | | | +| `routes/create/models/Conversation.js` | ~120 | Mongoose schema for conversation persistence | +| **Config** | | | +| `routes/create/config/chatSystemPrompt.js` | ~80 | System prompt template with workflow instructions | + +### 7.2 Modified Files + +| File | Change | +|------|--------| +| `routes/create/createRoutes.js` | Mount `chatController` at `/chat` (~3 lines) | + +--- + +## 8. State Synchronization (Chat → Workflow) + +### 8.1 Sync Strategy + +When the chat backend executes a tool (e.g., `create_folder`), the SSE `tool_call_result` event contains the created resource data. The frontend `useChatActions` hook intercepts these results and dispatches the corresponding Redux actions: + +```typescript +// useChatActions.ts — mapping table +const TOOL_DISPATCH_MAP: Record void> = { + create_folder: (r, d) => d(addQuizLocally(r.folder)), // or navigate + create_quiz: (r, d) => d(addQuizLocally(r.quiz)), + add_material_url: (r, d) => d(addMaterialLocally(r.material)), + assign_materials: (r, d) => d(assignMaterials.fulfilled(r)), + generate_objectives: (r, d) => d(setObjectivesFromChat(r.objectives)), + save_objectives: (r, d) => d(saveObjectives.fulfilled(r)), + generate_plan: (r, d) => d(setCurrentPlan(r.plan)), + generate_questions: (r, d) => d(setQuestionsForQuiz(r)), + export_h5p: (r, d) => { /* trigger download */ }, +}; +``` + +### 8.2 Context Tracking + +The backend tracks workflow progress in `conversation.context`: + +```javascript +// After each tool call, update context +context: { + step: 'objectives', // Current workflow stage + folderId: '507f1f77...', // Created/selected folder + quizId: '507f1f77...', // Created/selected quiz + materialIds: ['...'], // Uploaded/selected materials + objectiveIds: ['...'], // Generated/saved objectives + planId: '507f1f77...' // Generated plan +} +``` + +This context is injected into each LLM call so the AI knows what's been done and what comes next. + +### 8.3 Navigation Sync + +When user switches from Chat Mode back to Workflow Mode: +1. Read `chatSlice.context` (folderId, quizId) +2. Navigate to `/course/:folderId/quiz/:quizId` +3. Redux store already has the data from chat actions +4. Workflow tabs reflect all work done in chat + +--- + +## 9. Tool Definitions Detail + +### 9.1 Setup Tools + +```javascript +{ + name: 'create_folder', + description: 'Create a new course folder for organizing quizzes', + parameters: { + type: 'object', + properties: { + name: { type: 'string', description: 'Folder name (e.g. course name)' }, + description: { type: 'string', description: 'Optional folder description' } + }, + required: ['name'] + } +} + +{ + name: 'list_folders', + description: 'List all folders owned by the current user', + parameters: { type: 'object', properties: {} } +} + +{ + name: 'create_quiz', + description: 'Create a new quiz within a folder', + parameters: { + type: 'object', + properties: { + folderId: { type: 'string', description: 'Folder ID to create quiz in' }, + title: { type: 'string', description: 'Quiz title' } + }, + required: ['folderId', 'title'] + } +} +``` + +### 9.2 Material Tools + +```javascript +{ + name: 'add_material_url', + description: 'Add learning material from a URL. The system will fetch and process the content.', + parameters: { + type: 'object', + properties: { + folderId: { type: 'string', description: 'Folder to add material to' }, + url: { type: 'string', description: 'URL of the learning material' }, + title: { type: 'string', description: 'Optional title for the material' } + }, + required: ['folderId', 'url'] + } +} + +{ + name: 'list_materials', + description: 'List all materials in a folder', + parameters: { + type: 'object', + properties: { + folderId: { type: 'string', description: 'Folder ID' } + }, + required: ['folderId'] + } +} + +{ + name: 'assign_materials', + description: 'Assign selected materials to a quiz for question generation', + parameters: { + type: 'object', + properties: { + quizId: { type: 'string', description: 'Quiz ID' }, + materialIds: { type: 'array', items: { type: 'string' }, description: 'Material IDs to assign' } + }, + required: ['quizId', 'materialIds'] + } +} +``` + +### 9.3 Objective Tools + +```javascript +{ + name: 'generate_objectives', + description: 'Generate learning objectives from assigned materials using AI. Returns a list for user to review and select.', + parameters: { + type: 'object', + properties: { + quizId: { type: 'string', description: 'Quiz ID with assigned materials' }, + count: { type: 'number', description: 'Number of objectives to generate (default 5)' }, + approach: { type: 'string', enum: ['support', 'challenge', 'balanced'], description: 'Pedagogical approach' } + }, + required: ['quizId'] + } +} + +{ + name: 'save_objectives', + description: 'Save the selected learning objectives for question generation', + parameters: { + type: 'object', + properties: { + quizId: { type: 'string', description: 'Quiz ID' }, + objectiveIds: { type: 'array', items: { type: 'string' }, description: 'Selected objective IDs to keep' } + }, + required: ['quizId', 'objectiveIds'] + } +} +``` + +### 9.4 Question Tools + +```javascript +{ + name: 'generate_plan', + description: 'Generate a question distribution plan specifying question types and counts per objective', + parameters: { + type: 'object', + properties: { + quizId: { type: 'string', description: 'Quiz ID' }, + totalQuestions: { type: 'number', description: 'Total questions to generate (default 10)' }, + questionTypes: { type: 'array', items: { type: 'string', enum: ['mc', 'tf', 'matching', 'ordering', 'cloze', 'sa', 'essay', 'flashcard'] }, description: 'Preferred question types' } + }, + required: ['quizId'] + } +} + +{ + name: 'generate_questions', + description: 'Generate questions based on the approved plan. This is a long-running operation that streams progress.', + parameters: { + type: 'object', + properties: { + quizId: { type: 'string', description: 'Quiz ID' }, + planId: { type: 'string', description: 'Approved plan ID' } + }, + required: ['quizId', 'planId'] + } +} +``` + +### 9.5 Export Tools + +```javascript +{ + name: 'export_h5p', + description: 'Export the quiz as an H5P interactive content package for download', + parameters: { + type: 'object', + properties: { + quizId: { type: 'string', description: 'Quiz ID to export' }, + format: { type: 'string', enum: ['h5p', 'pdf'], description: 'Export format (default h5p)' } + }, + required: ['quizId'] + } +} +``` + +--- + +## 10. Inline UI Components + +### 10.1 Component Types + +The `ChatInlineAction` component renders different UI types based on `metadata.inlineUI.type`: + +| Type | When Used | Rendered As | +|------|-----------|-------------| +| `checkbox-list` | Select LOs, select materials, select question types | Checkboxes with labels + Confirm button | +| `button-group` | Quick choices (yes/no, number of questions, approach) | Row of buttons | +| `file-upload` | Material upload step | Drop zone + URL paste field | +| `question-preview` | After question generation | Collapsed question cards with expand | +| `plan-summary` | After plan generation | Table showing type × objective distribution | +| `confirm` | Before executing destructive/important actions | Confirm / Cancel buttons | + +### 10.2 Inline UI Flow + +1. Backend LLM decides to present choices → includes `inline_ui` in SSE metadata +2. Frontend `ChatMessage` renders `ChatInlineAction` with the spec +3. User interacts (checks boxes, clicks button) +4. User's selection is sent in the next `POST /chat/conversations/:id/messages` as `inlineUIResponse` +5. Backend receives selection, may execute tool call, continues conversation + +### 10.3 Modal Escalation + +When the AI determines the operation is too complex for inline UI (e.g., editing question content), it sends a `inline_ui` with type `modal-trigger`: + +```json +{ + "type": "modal-trigger", + "props": { + "label": "Edit Questions in Detail", + "modalComponent": "QuestionEditModal", + "data": { "quizId": "...", "questionIds": ["..."] } + } +} +``` + +The frontend renders a button that, when clicked, opens the appropriate modal (reusing existing modal components). + +### 10.4 Workflow Switch Suggestion + +For bulk editing, the AI suggests switching: + +```json +{ + "type": "workflow-switch", + "props": { + "label": "Switch to Review Tab", + "targetTab": "review", + "folderId": "...", + "quizId": "..." + } +} +``` + +Frontend renders a styled button that navigates to the workflow view at the correct tab. + +--- + +## 11. File Upload in Chat + +### 11.1 User Experience + +The `ChatInput` component supports: + +1. **Drag & Drop**: User drags files onto the chat input area + - Shows a visual drop zone overlay + - Accepts: PDF, DOCX, TXT, PPTX + - File is uploaded immediately via existing `materialsApi.uploadFile()` + +2. **URL Paste**: User pastes a URL in the text input + - Auto-detected via regex pattern + - Shown as a chip/pill above the input + - Sent as part of the message; backend calls `add_material_url` tool + +3. **Text Paste**: Long text is treated as text material + - If message exceeds a threshold (e.g., 500 chars), offer to save as text material + +### 11.2 Upload Flow + +``` +User drops file → ChatInput shows file preview chip +User sends message → Frontend uploads file via materialsApi.uploadFile() + → After upload success, sends chat message with materialId reference + → Backend LLM acknowledges upload, continues workflow + → useChatActions dispatches addMaterialLocally() +``` + +--- + +## 12. Error Handling + +### 12.1 LLM Errors +- Timeout: After 60s with no response, show retry button in chat +- Rate limit: Show "Please wait" with cooldown timer +- Invalid tool call: Backend catches, returns error result to LLM, LLM self-corrects + +### 12.2 Tool Execution Errors +- Backend wraps each tool execution in try/catch +- Error result returned to LLM as tool result with `status: 'error'` +- LLM acknowledges the error and suggests alternatives +- Frontend shows error as a distinct message style (red border) + +### 12.3 SSE Connection Errors +- Reuse `useSSE` reconnection pattern (exponential backoff, max 5 attempts) +- On permanent failure, show "Connection lost" with manual reconnect button +- Messages already received are preserved in Redux state + +--- + +## 13. Implementation Phases + +### Phase 1: Foundation (Core Chat Loop) +**Goal**: User can chat with AI, AI can call tools, basic text conversation works. + +**Backend:** +- [ ] `Conversation` model +- [ ] `chatController.js` — CRUD + SSE message endpoint +- [ ] `chatOrchestrationService.js` — OpenAI integration with tool calling loop +- [ ] `chatToolDefinitions.js` — All tool schemas +- [ ] `chatToolExecutor.js` — Execute `create_folder`, `list_folders`, `create_quiz` only +- [ ] `chatSystemPrompt.js` — System prompt +- [ ] Mount in `createRoutes.js` + +**Frontend:** +- [ ] `chat.ts` types +- [ ] `chatSlice.ts` — State management +- [ ] `chatApi.ts` — API calls +- [ ] `useChatStream.ts` — SSE hook +- [ ] `ChatMode.tsx` — Main container +- [ ] `ChatMessageList.tsx` + `ChatMessage.tsx` — Message rendering +- [ ] `ChatInput.tsx` — Text input (no file upload yet) +- [ ] `ChatToolResult.tsx` — Basic tool result cards +- [ ] Toggle button in `Header.tsx` +- [ ] Wire up in `App.tsx` + +**Sync:** +- [ ] `useChatActions.ts` — Dispatch for create_folder, create_quiz + +### Phase 2: Full Workflow (Materials + Objectives + Questions) +**Goal**: Complete workflow through chat with inline UI and state sync. + +**Backend:** +- [ ] Implement remaining tools in `chatToolExecutor.js`: materials, objectives, plan, questions +- [ ] Handle `generate_questions` streaming within chat SSE (nested streaming) +- [ ] Inline UI metadata generation in orchestration service + +**Frontend:** +- [ ] `ChatInlineAction.tsx` — Checkbox lists, button groups, confirm +- [ ] `ChatInput.tsx` — Add file drag & drop, URL paste detection +- [ ] `useChatActions.ts` — All remaining Redux sync mappings +- [ ] `ChatSidebar.tsx` — Conversation history +- [ ] Chat → Workflow navigation sync (switch back shows correct state) + +### Phase 3: Polish & Export +**Goal**: Export support, modal escalation, edge cases. + +**Backend:** +- [ ] Implement export tools in executor +- [ ] Conversation title auto-generation (LLM summarize) +- [ ] Conversation archiving / cleanup + +**Frontend:** +- [ ] Modal escalation (question editing modal triggered from chat) +- [ ] Workflow switch suggestion button +- [ ] `ChatMode.css` — Animations, responsive design +- [ ] Question preview inline component +- [ ] Plan summary inline component +- [ ] Error states and retry UI +- [ ] Loading skeletons during streaming +- [ ] Mobile responsive chat layout + +--- + +## 14. Testing Strategy + +### 14.1 Backend Tests + +| Test File | Type | Coverage | +|-----------|------|----------| +| `__tests__/unit/chatToolExecutor.test.js` | Unit | Tool execution strategies, error handling | +| `__tests__/unit/chatToolDefinitions.test.js` | Unit | Schema validation | +| `__tests__/integration/chat.test.js` | Integration | Full conversation flow, SSE events, persistence | + +### 14.2 Frontend Tests + +| Test File | Type | Coverage | +|-----------|------|----------| +| `src/components/chat/__tests__/ChatMessage.test.tsx` | Component | Renders text, tool results, inline UI | +| `src/components/chat/__tests__/ChatInput.test.tsx` | Component | Send message, file drop, URL detection | +| `src/components/chat/__tests__/ChatInlineAction.test.tsx` | Component | User interactions, selection callbacks | +| `src/hooks/__tests__/useChatStream.test.ts` | Hook | SSE parsing, reconnection, event mapping | +| `src/hooks/__tests__/useChatActions.test.ts` | Hook | Redux dispatch mapping correctness | + +--- + +## 15. Security Considerations + +- **Authentication**: Chat endpoints use existing `authenticateToken` middleware +- **Authorization**: Conversations are scoped to `user` field; all tool executions verify resource ownership +- **Input Sanitization**: User messages sanitized before LLM prompt injection + - Strip system-prompt-like patterns + - Limit message length (e.g., 4000 chars) +- **Rate Limiting**: Chat message endpoint rate-limited (e.g., 20 messages/minute) +- **Tool Execution**: All tools execute through existing service layer which already validates permissions +- **File Upload**: Reuses existing Multer validation (file type, size limits) + +--- + +## 16. Performance Considerations + +- **Conversation History Truncation**: Only send last N messages (e.g., 20) + system prompt to LLM to stay within token limits. Older messages summarized. +- **SSE Keep-Alive**: Heartbeat every 15s to prevent proxy timeouts +- **Lazy Loading**: Chat components code-split with `React.lazy()` — only loaded when Chat Mode activated +- **Message Pagination**: Load older messages on scroll-up (not all at once) +- **Debounced Input**: URL detection regex runs on debounced input (300ms) + +--- + +## Appendix A: File Inventory + +### New Files (17 files) + +``` +# Frontend (13 files) +src/types/chat.ts ~100 lines +src/store/slices/chatSlice.ts ~250 lines +src/services/chatApi.ts ~80 lines +src/hooks/useChatStream.ts ~180 lines +src/hooks/useChatActions.ts ~150 lines +src/components/chat/ChatMode.tsx ~180 lines +src/components/chat/ChatSidebar.tsx ~120 lines +src/components/chat/ChatMessageList.tsx ~100 lines +src/components/chat/ChatMessage.tsx ~150 lines +src/components/chat/ChatToolResult.tsx ~200 lines +src/components/chat/ChatInlineAction.tsx ~200 lines +src/components/chat/ChatInput.tsx ~200 lines +src/styles/components/chat/ChatMode.css ~150 lines + +# Backend (6 files) +routes/create/models/Conversation.js ~120 lines +routes/create/controllers/chatController.js ~200 lines +routes/create/services/chatOrchestrationService.js ~280 lines +routes/create/services/chatToolDefinitions.js ~200 lines +routes/create/services/chatToolExecutor.js ~250 lines +routes/create/config/chatSystemPrompt.js ~80 lines +``` + +### Modified Files (4 files) + +``` +src/components/Header.tsx +20 lines (toggle button) +src/store/index.ts +3 lines (add chat reducer) +src/App.tsx +15 lines (mode routing) +routes/create/createRoutes.js +3 lines (mount chat controller) +``` + +**Total estimated new code**: ~2,990 lines across 17 new files (avg ~176 lines/file) +**Max file size**: 280 lines (chatOrchestrationService.js) diff --git a/docs/feature-ideas.md b/docs/feature-ideas.md new file mode 100644 index 0000000..2dd3b47 --- /dev/null +++ b/docs/feature-ideas.md @@ -0,0 +1,348 @@ +# TLEF-CREATE Feature Ideas & Product Roadmap + +> Brainstorm date: 2026-02-26 +> Status: Ideation — prioritize and spec out individually before implementation + +--- + +## Current Product Snapshot + +| Capability | Status | +|---|---| +| 8 question types (MC, TF, flashcard, matching, ordering, cloze, summary, discussion) | Done | +| AI generation from materials (RAG + LLM) | Done | +| H5P export + real H5P preview | Done | +| PDF export (questions / answers / combined) | Done | +| Chat Mode (AI-guided conversational workflow) | Spec'd | +| LMS integration | None | +| Quiz import | None | +| Collaboration | None | +| Analytics | None | +| Question bank | None | + +--- + +## Tier 1 — High Impact, Core Differentiators + +### 1.1 Quiz Import & Convert + +**Pain point**: Educators have years of existing quizzes in Word, PDF, Google Forms, Canvas, Quizlet. Asking them to recreate from scratch is a non-starter. + +**Feature**: +- Drag in a Word/PDF exam → AI extracts question structure (type, stem, options, answer) +- Structured extraction via LLM (question text → JSON schema) → batch `Question.create()` +- Support formats: `.docx`, `.pdf`, `.txt`, plain paste +- Future: Quizlet CSV import, Canvas QTI XML import + +**Why it matters**: Turns the adoption pitch from "create new content" to "bring your existing content and make it better." Drastically lowers the barrier to first value. + +**Technical path**: Reuse material upload pipeline + new LLM prompt strategy for structured extraction. Output maps to existing Question model. + +**Effort estimate**: Medium (new prompt engineering + parsing layer, reuse existing upload/question infra) + +--- + +### 1.2 AI Quality Dashboard + +**Pain point**: Educators generate 20 questions but can't tell if they're pedagogically sound — Bloom's coverage? Difficulty balance? Ambiguous wording? + +**Feature**: +``` +Quiz Quality Report +├── Bloom's Taxonomy Coverage (bar chart) +│ Remember ████████░░ (3) +│ Understand ██████████ (4) +│ Apply ██████░░░░ (2) +│ Analyze ████░░░░░░ (1) +│ Evaluate ░░░░░░░░░░ (0) ⚠️ +│ Create ░░░░░░░░░░ (0) ⚠️ +├── Difficulty Distribution (easy/medium/hard pie chart) +├── Potential Issues +│ ⚠️ Q3: "All of the above" — weak distractor +│ ⚠️ Q7: Negative phrasing — may confuse +│ ⚠️ Q12: Only 2 distractors — add more +│ ✅ No duplicate concepts detected +├── Content Coverage Map (material sections vs questions) +└── [Fix Issues Automatically] button +``` + +**Why it matters**: Elevates the tool from "question generator" to "teaching design consultant." No competitor does this well. Makes instructors feel the tool understands pedagogy, not just content. + +**Technical path**: Bloom's keyword matching (client-side) + LLM deep review for ambiguity/distractor quality. New tab in Review & Edit or standalone page. + +**Effort estimate**: Medium (Bloom's analysis is lightweight; LLM review prompt + UI) + +--- + +### 1.3 Quiz Variants — Anti-Cheat Multi-Version + +**Pain point**: 3 sections of the same course need 3 different exams. Manually creating variants is tedious and error-prone. + +**Feature**: +- **Variant A**: Same questions, shuffled option order +- **Variant B**: AI generates equivalent questions (same LO, different wording/scenario/numbers) +- **Variant C**: Same LO, different question type (MC → TF → Cloze) +- Export as separate H5P packages or one randomized Question Set +- Side-by-side comparison view of variants + +**Why it matters**: Solves a universal pain point that AI is uniquely good at. "30 seconds to generate 3 versions of a midterm" is a genuine wow moment. + +**Technical path**: Variant A = shuffle (trivial). Variant B/C = new LLM prompt: "Given this question, generate an equivalent that tests the same concept with different context." Reuse existing `convertQuestionToH5P`. + +**Effort estimate**: Low–Medium (prompt engineering + new "Variants" UI panel) + +--- + +## Tier 2 — Strong Value Add + +### 2.1 Cross-Quiz Question Bank + +**Pain point**: After 3 semesters, an instructor has generated 200+ questions across 6 quizzes. Finding and reusing good ones is impossible. + +**Feature**: +- All generated questions auto-indexed into a personal Question Bank +- Semantic search by topic, concept, LO text +- Filter by type, difficulty, course, date, usage count +- "Pull from Bank" when creating a new quiz — search and add existing questions +- Usage tracking: how many times each question has been used, in which quizzes + +**Why it matters**: Transforms the product from a "one-shot tool" into an "appreciating asset." The more you use it, the more valuable your library becomes. This is the strongest retention mechanism possible. + +**Technical path**: Already have Qdrant for RAG. Generate embeddings for each Question, store in a new `question-bank` collection. Frontend: new Question Bank page + "Add to Quiz" flow. + +**Effort estimate**: Medium (embedding pipeline + new search UI + "add to quiz" integration) + +--- + +### 2.2 LMS Direct Publish (Canvas / Moodle) + +**Pain point**: Export H5P → download → open Canvas → navigate to assignment → upload → configure → repeat for every edit. 5 minutes of tedium per quiz update. + +**Feature**: +- "Publish to Canvas" button alongside Export +- OAuth2 flow to connect Canvas account (one-time setup) +- Auto-create Canvas Assignment + upload H5P +- "Re-publish" after edits — one-click update +- Future: Moodle support via similar REST API + +**Why it matters**: Eliminates the last-mile friction that makes instructors dread updating quizzes. Turns a 5-minute manual process into 1 second. + +**Technical path**: Canvas REST API supports file upload + assignment creation. New `canvasIntegrationService.js` backend + OAuth flow + "Publish" button in export section. + +**Effort estimate**: High (OAuth integration, Canvas API, error handling, token management) + +--- + +### 2.3 Multimedia Source Support + +**Pain point**: A lot of teaching happens via lecture recordings and YouTube videos, not just PDFs and docs. + +**Feature**: +- Paste a YouTube URL → auto-transcribe via Whisper API +- Upload lecture recording (mp3/mp4) → transcribe → use as material +- Timestamps preserved: each generated question links back to the specific moment in the video +- "Watch the relevant clip" link on each question for student study + +**Why it matters**: Unlocks an entirely new content source that text-only tools can't touch. Particularly valuable for flipped classrooms. + +**Technical path**: Whisper API / yt-dlp for transcription. Transcripts feed into existing RAG pipeline. Store timestamp metadata alongside chunks. + +**Effort estimate**: Medium (transcription service + timestamp tracking) + +--- + +### 2.4 Study Guide Auto-Generation + +**Pain point**: Students want study materials, not just quizzes. Instructors don't have time to create both. + +**Feature**: +- "Generate Study Guide" button alongside Export +- AI creates a structured study document from the same materials: + - Key concepts per learning objective + - Summary paragraphs + - "Try this" practice prompts + - Links back to source material sections +- Export as PDF or Markdown +- Complements the quiz: "Here's what to study, and here's how to test yourself" + +**Why it matters**: Doubles the value of uploaded materials. Instructors get two outputs (quiz + study guide) from one input (materials). Students love it. + +**Technical path**: New LLM prompt chain using existing RAG context. New export format alongside H5P/PDF. + +**Effort estimate**: Low–Medium (mostly prompt engineering + PDF template) + +--- + +## Tier 3 — Innovative / Exploratory + +### 3.1 Smart Distractor Generation + +**Feature**: When generating MC questions, AI specifically crafts distractors based on **common student misconceptions** rather than random wrong answers. + +- Analyzes the topic to identify typical mistakes students make +- Generates distractors that test real understanding vs surface recall +- Tags each distractor with the misconception it targets +- "This student chose B — they likely confused X with Y" + +**Why it matters**: The quality of distractors is what separates a good MC question from a trivial one. This makes auto-generated questions rival hand-crafted ones. + +--- + +### 3.2 Content Coverage Map + +**Feature**: Visual heatmap showing which parts of the uploaded material are covered by questions and which are gaps. + +``` +Chapter 1: Introduction ████████████ (8 questions) +Chapter 2: Core Concepts █████████░░ (6 questions) +Chapter 3: Applications ███░░░░░░░░ (2 questions) ⚠️ Under-covered +Chapter 4: Case Studies ░░░░░░░░░░░ (0 questions) ⚠️ No coverage +``` + +- Click on a gap → "Generate 3 questions for Chapter 4" +- Ensures comprehensive assessment + +**Why it matters**: Addresses a real concern: "Did I cover everything?" Currently requires manual cross-referencing. + +--- + +### 3.3 Collaborative Quiz Authoring + +**Feature**: Invite TAs or co-instructors to collaboratively build a quiz. + +- Share a quiz with edit permissions via email/link +- Real-time presence indicators (who's editing what) +- Comment/suggest mode: TA suggests a question change, instructor approves +- Activity log: who added/edited/deleted what + +**Why it matters**: Large courses have 5-10 TAs who all contribute to assessments. Currently no tool bridges the gap between "solo authoring" and "LMS quiz editor." + +**Effort**: High (auth model changes, real-time sync, permission system) + +--- + +### 3.4 Spaced Repetition Export (Anki) + +**Feature**: Export flashcards and Q&A pairs as Anki decks (.apkg format). + +- Auto-convert flashcard and MC questions into Anki cards +- Include difficulty tags for Anki's algorithm +- Students import into Anki for long-term retention study + +**Why it matters**: Anki has millions of active users. Being the bridge between "instructor creates content" and "student studies with spaced repetition" is a unique value chain. + +--- + +### 3.5 Multi-Language Quiz Generation + +**Feature**: Generate quizzes in multiple languages from the same source material. + +- "Translate this quiz to French" → AI translates all questions, options, feedback +- Maintains correct answers and question structure +- Side-by-side bilingual preview +- Useful for multilingual programs + +--- + +### 3.6 Rubric Auto-Generation + +**Feature**: For discussion and essay questions, auto-generate grading rubrics. + +- AI creates criteria, performance levels, and point allocations +- Aligned to the learning objective +- Export as PDF rubric alongside the quiz +- Rubric preview in the Review tab + +--- + +### 3.7 Quiz Difficulty Simulation + +**Feature**: Before deploying, simulate how a class of N students might perform. + +- AI estimates expected score distribution based on question characteristics +- Identifies questions that are "too easy" (>95% predicted correct) or "too hard" (<20%) +- Suggests rebalancing: "Replace Q7 (predicted 98% correct) with a harder variant" + +--- + +### 3.8 Source Citation on Every Question + +**Feature**: Each generated question automatically links back to the exact passage/page in the source material. + +- "This question was generated from Material X, page 3, paragraph 2" +- Click to highlight the source passage +- Useful for: verifying accuracy, student study references, academic integrity + +**Technical path**: RAG already retrieves relevant chunks with metadata. Store the chunk reference on each Question document. + +--- + +### 3.9 Voice-Based Quiz Creation + +**Feature**: "Create 5 multiple choice questions about photosynthesis, medium difficulty" + +- Voice input in Chat Mode +- Natural language commands for quick quiz creation +- Useful for instructors who think out loud + +--- + +### 3.10 Template Marketplace + +**Feature**: Community library of quiz templates. + +- "Intro to Psychology — Midterm Template (30 MC + 5 Essay)" +- Instructors can publish anonymized quiz structures +- Others can fork and customize with their own materials +- Rating system for templates + +--- + +## Priority Matrix + +| Feature | Impact | Effort | Differentiation | Priority | +|---|:---:|:---:|:---:|:---:| +| Quiz Import & Convert | High | Medium | Medium | **1** | +| AI Quality Dashboard | High | Medium | Very High | **2** | +| Quiz Variants | High | Low | High | **3** | +| Question Bank | High | Medium | Medium | **4** | +| Study Guide Generation | Medium | Low | High | **5** | +| Content Coverage Map | Medium | Low | High | **6** | +| Smart Distractors | Medium | Low | Very High | **7** | +| Source Citation | Medium | Low | Medium | **8** | +| Multimedia Sources | High | Medium | High | **9** | +| LMS Direct Publish | Very High | High | Medium | **10** | +| Rubric Generation | Medium | Low | Medium | **11** | +| Spaced Repetition / Anki | Medium | Medium | High | **12** | +| Multi-Language | Medium | Medium | Medium | **13** | +| Difficulty Simulation | Medium | Medium | Very High | **14** | +| Collaborative Authoring | High | Very High | Medium | **15** | +| Voice Creation | Low | Medium | Low | **16** | +| Template Marketplace | Medium | High | Medium | **17** | + +--- + +## Recommended Implementation Sequence + +### Phase A — Quick Wins (1-2 weeks each) +1. Quiz Variants (low effort, high wow factor) +2. Study Guide Generation (reuse existing RAG, new prompt) +3. Smart Distractors (enhance existing MC generation prompt) +4. Source Citation (store RAG chunk refs on Question model) + +### Phase B — Core Differentiators (2-4 weeks each) +5. Quiz Import & Convert (new parsing pipeline) +6. AI Quality Dashboard (Bloom's analysis + LLM review) +7. Content Coverage Map (visualization of RAG coverage) +8. Question Bank (Qdrant embedding + search UI) + +### Phase C — Platform Features (4-8 weeks each) +9. Multimedia Sources (Whisper integration) +10. LMS Direct Publish (Canvas OAuth + API) +11. Rubric Generation + Anki Export +12. Multi-Language Support + +### Phase D — Advanced (future) +13. Difficulty Simulation +14. Collaborative Authoring +15. Template Marketplace diff --git a/docs/h5p-market-analysis.md b/docs/h5p-market-analysis.md new file mode 100644 index 0000000..d3dd14a --- /dev/null +++ b/docs/h5p-market-analysis.md @@ -0,0 +1,354 @@ +# H5P Ecosystem — Market Analysis & Opportunity Map + +> Date: 2026-02-26 +> Purpose: Identify gaps in the H5P ecosystem that TLEF-CREATE can fill + +--- + +## 1. Current Market Players + +### 1.1 H5P.com (Official SaaS) — $690+/year + +| Feature | Details | +|---|---| +| Smart Import (AI) | Video/PDF/URL → auto-generate interactive content | +| Content types | 50+ (all official types) | +| Analytics | Drill-down reports (scores, answers, time) — **paid only** | +| LTI integration | SSO + gradebook passback — **paid only** | +| Language | English only fully supported | +| Limitations | Closed SaaS; no API for third-party tools; no pedagogical structure (no LO alignment, no Bloom's); Smart Import produces generic questions without curriculum mapping | + +> Source: [H5P Pricing](https://h5p.com/pricing), [Smart Import](https://campaigns.h5p.com/h5p-smart-import/) + +### 1.2 H5P.org (Open Source) — Free + +| Feature | Details | +|---|---| +| Hosting | Requires WordPress, Moodle, or Drupal plugin | +| Content types | Same 50+ types | +| AI features | None | +| Analytics | None (basic LMS gradebook only) | +| Limitations | No standalone editor; no Smart Import; depends on CMS/LMS admin access | + +> Source: [H5P.org](https://h5p.org/), [Self-Hosting H5P](https://isu.pressbooks.pub/openpedagogy/chapter/self-hosting-h5p/) + +### 1.3 Lumi Education — Free Desktop Editor + +| Feature | Details | +|---|---| +| Platform | Electron desktop app (Windows/Mac/Linux) | +| Offline | Full offline H5P authoring | +| Export | HTML, SCORM | +| AI | None | +| Limitations | H5P library versions outdated and hard to update; no analytics; no collaboration; project appears semi-maintained | + +> Source: [Lumi Education](https://lumi.education/en/), [GitHub Issues](https://github.com/Lumieducation/Lumi/issues/2647) + +### 1.4 AI H5P Generator (aih5pgenerator.online) — SaaS + +| Feature | Details | +|---|---| +| AI generation | Describe content → generate H5P | +| Content types | Crosswords, quizzes, dialog cards, drag words | +| Limitations | Limited content types; no material upload; no LO alignment; no pedagogical framework; basic quality | + +> Source: [AI H5P Generator](https://aih5pgenerator.online/) + +### 1.5 H5P-AI-Generator (GitHub, pascalkienast) — Open Source + +| Feature | Details | +|---|---| +| Platform | Next.js web app | +| AI | Conversational UI → generate H5P | +| Content types | MC, TF, Course Presentation, Interactive Book, Branching Scenario | +| Limitations | Experimental; no material-based generation; no RAG; no curriculum mapping | + +> Source: [GitHub](https://github.com/pascalkienast/H5P-AI-Generator) + +### 1.6 H5P Interactive Video Generator v2 (GitHub) — Niche Tool + +| Feature | Details | +|---|---| +| Platform | Streamlit app | +| AI | Groq AI, YouTube summary → MCQ at timestamps | +| Limitations | Requires manual summary input with timestamps; only MCQ; very basic | + +> Source: [GitHub](https://github.com/dgcruzing/H5P-Interactive-Video-Generator-v2) + +--- + +## 2. H5P Content Type Landscape + +H5P has ~50 content types. The most complex and high-value ones: + +| Content Type | Complexity | Authoring Pain | AI Generation Exists? | +|---|:---:|:---:|:---:| +| **Interactive Video** | Very High | Extreme (timestamp + question placement) | Barely (Streamlit hack) | +| **Branching Scenario** | Very High | Extreme (tree structure design) | No | +| **Course Presentation** | High | High (slide-by-slide + embedded quiz) | No | +| **Interactive Book** | High | High (multi-chapter + activities) | No | +| **Question Set** | Medium | Medium | Yes (TLEF-CREATE, Smart Import) | +| **Column** | Medium | Low | Yes (TLEF-CREATE, Smart Import) | +| Multiple Choice / TF / etc. | Low | Low | Yes (many tools) | +| Dialog Cards (Flashcards) | Low | Low | Yes (many tools) | + +**Key insight**: Everyone fights over the easy content types (MC, TF, flashcards). Nobody solves the hard ones (Interactive Video, Branching Scenario, Course Presentation). + +--- + +## 3. Identified Gaps — What Nobody Does Well + +### Gap 1: Interactive Video from Materials 🎯🎯🎯 + +**What exists**: H5P Interactive Video lets you embed MC, TF, fill-in-blank, drag-text questions at specific timestamps in a video. It's one of H5P's most popular content types. + +**What's missing**: No tool can take a lecture video + course materials → automatically identify key concepts at each timestamp → generate appropriate questions → output a complete H5P Interactive Video package. + +**The pain today**: An instructor manually: +1. Watches the entire lecture video +2. Notes timestamps where key concepts appear +3. Writes questions for each timestamp +4. Manually places them in H5P editor +5. This takes 2-4 hours for a 50-minute lecture + +**The opportunity**: "Paste a YouTube URL, upload your slides → get a complete Interactive Video H5P in 2 minutes." + +**Technical feasibility**: +- Whisper API for transcription with timestamps +- RAG: align transcript chunks with uploaded materials +- LLM: generate questions per timestamp region +- H5P Interactive Video JSON structure is well-documented +- TLEF-CREATE already has the question generation pipeline + +--- + +### Gap 2: Branching Scenario Generator 🎯🎯🎯 + +**What exists**: H5P Branching Scenario — a "choose your own adventure" content type where student choices lead to different paths. Extremely powerful for case-based learning (medical diagnosis, legal reasoning, ethical dilemmas). + +**What's missing**: Creating branching scenarios is the most tedious H5P authoring task. No tool generates them automatically. The tree structure requires: +- Defining nodes (content screens) +- Defining branches (choices at each node) +- Defining outcomes (scoring per path) +- Ensuring all paths are pedagogically valid + +**The pain today**: A nursing instructor wants to create a patient diagnosis scenario. They spend 6+ hours designing the tree in H5P editor, which has no preview-while-editing. + +**The opportunity**: "Describe a case study scenario → AI generates a complete branching tree with 3-4 decision points, correct/incorrect paths, and feedback at each node." + +**Technical feasibility**: +- LLM is excellent at generating narrative trees +- H5P Branching Scenario JSON schema is structured (nodes + edges) +- Can constrain tree depth/breadth via prompt +- Output as .h5p package using existing packaging code + +--- + +### Gap 3: Course Presentation / Interactive Book Generator 🎯🎯 + +**What exists**: Course Presentation (like PowerPoint with embedded quizzes) and Interactive Book (multi-chapter textbook with activities). These are H5P's "premium" content types. + +**What's missing**: No tool generates these from materials. Smart Import can create basic content but without curriculum structure. + +**The opportunity**: "Upload lecture slides (PDF/PPTX) → auto-generate an H5P Course Presentation where each slide has its original content PLUS 1-2 embedded quiz questions." + +**Technical feasibility**: +- Parse PPTX slides (existing libraries: python-pptx / pptx2json) +- Each slide → Course Presentation slide + AI-generated check-question +- Course Presentation H5P JSON is well-structured + +--- + +### Gap 4: xAPI Analytics Dashboard (Free Alternative to H5P.com) 🎯🎯 + +**What exists**: H5P emits xAPI statements (scores, answers, time, completion). H5P.com offers drill-down analytics for $690/year. Self-hosted users get nothing. + +**What's missing**: A free/open-source analytics dashboard for H5P xAPI data. Learning Locker is a general LRS — not H5P-specific, and complex to set up. + +**The opportunity**: "Deploy our lightweight xAPI receiver → get a beautiful H5P analytics dashboard showing per-question performance, student struggle points, time-on-task, and difficulty calibration — for free." + +**Why this matters**: This directly undercuts H5P.com's paid analytics. Every Moodle/WordPress admin running self-hosted H5P would want this. + +**Technical feasibility**: +- xAPI receiver = simple Express endpoint that stores statements +- Dashboard = React charts (already have the frontend stack) +- H5P xAPI schema is standardized and well-documented + +> Source: [H5P xAPI docs](https://h5p.org/documentation/x-api), [xAPI + H5P](https://xapi.com.au/how-to-capture-xapi-statements-from-h5p-in-moodle/) + +--- + +### Gap 5: H5P Content Transformation Engine 🎯 + +**What's missing**: No tool converts between H5P content types. Examples: +- Quiz (Question Set) → Course Presentation (one question per slide) +- Flashcards (Dialog Cards) → Branching Scenario (card → node) +- Question Set → Interactive Book (one chapter per LO) +- Any quiz → Interactive Video overlay (if video URL provided) + +**The opportunity**: "You already have 20 quiz questions. Click 'Transform' → get them as a Course Presentation, Interactive Book, or Interactive Video." + +**Technical feasibility**: JSON-to-JSON transformation. Both source and target H5P schemas are known. This is pure mapping logic. + +--- + +### Gap 6: Pedagogically-Aware H5P Generation 🎯 + +**What every competitor lacks**: All existing AI-to-H5P tools generate content **without pedagogical structure**: +- No learning objective alignment +- No Bloom's taxonomy awareness +- No difficulty calibration +- No content coverage mapping +- No question quality analysis + +**TLEF-CREATE already has this** (LO pipeline, difficulty settings, pedagogical approach selection). This is the core differentiator — but it's invisible to users outside UBC. + +**The opportunity**: Position as "the only H5P tool that understands curriculum design, not just content generation." + +--- + +## 4. Competitive Positioning Map + +``` + Pedagogical Intelligence + ↑ + │ + │ ★ TLEF-CREATE + │ (LOs, Bloom's, difficulty, + │ RAG from materials) + │ + │ H5P.com Smart Import + │ (AI but no pedagogy) + │ + Simple ────────────┼──────────────── Complex + Content Types │ Content Types + (MC, TF, Flash) │ (Interactive Video, + │ Branching, Course Pres) + │ + Lumi │ + (manual only) │ + │ + AI H5P Gen │ + (basic AI) │ + │ + ↓ + No Pedagogy +``` + +**TLEF-CREATE sits in the top-left quadrant**: strong pedagogy, but limited to simpler content types (quiz/column). The biggest opportunity is to **move right** — support complex H5P content types while keeping the pedagogical advantage. + +--- + +## 5. Strategic Recommendations + +### Priority 1: Interactive Video Generator + +**Why #1**: +- Highest pain point (hours of manual work per video) +- Video-based learning is the dominant modality +- YouTube is the #1 educational resource +- No real competitor +- Technical path is clear (Whisper + existing RAG + existing question gen) + +**MVP scope**: +1. User pastes YouTube URL +2. Backend transcribes with Whisper (with timestamps) +3. Transcript feeds into existing RAG pipeline +4. LLM generates questions per ~5-minute segment +5. Output as H5P Interactive Video .h5p package +6. User can preview and adjust question placement + +**Positioning**: "Turn any lecture video into an interactive H5P lesson in 2 minutes." + +--- + +### Priority 2: Branching Scenario Generator + +**Why #2**: +- Zero competition (nobody does this) +- Extremely high-value for professional education (nursing, medicine, law, business) +- H5P Branching Scenario is under-used precisely because it's too hard to author +- Universities would pay for this specifically + +**MVP scope**: +1. User describes a scenario or uploads a case study +2. LLM generates a branching tree (3-4 decision points, 2-3 options each) +3. Each node: narrative text + optional embedded question +4. Each ending: score + feedback +5. Output as H5P Branching Scenario .h5p package +6. Visual tree editor for adjustment + +**Positioning**: "AI-powered case-based learning. Describe a scenario, get a Branching Scenario." + +--- + +### Priority 3: Content Type Transformation + +**Why #3**: +- Low effort (JSON → JSON mapping, no LLM needed for basic transforms) +- Multiplies the value of every question already generated +- Users create quiz once → get 4 different H5P formats +- Unique feature nobody else offers + +**MVP scope**: +- Question Set → Course Presentation (1 question per slide) +- Dialog Cards → standalone Flashcard H5P +- Question Set + Video URL → Interactive Video scaffold +- Any content → Interactive Book (1 chapter per LO) + +**Positioning**: "One quiz, every H5P format." + +--- + +### Priority 4: xAPI Analytics Dashboard + +**Why #4**: +- Undercuts H5P.com's $690/year analytics offering +- Attracts the entire self-hosted H5P community +- Positions TLEF-CREATE as infrastructure, not just a generator +- Creates ongoing engagement (users return to check analytics) + +**MVP scope**: +- xAPI statement receiver endpoint +- Per-question analytics: % correct, avg time, score distribution +- Per-student view: progress, struggle points +- Export analytics as CSV/PDF + +**Positioning**: "Free H5P analytics. No H5P.com subscription needed." + +--- + +## 6. What TLEF-CREATE Already Has That Others Don't + +| Advantage | Detail | +|---|---| +| Learning Objective pipeline | Material → LO → Questions (curriculum-aligned) | +| Pedagogical approach selection | Support / Challenge / Balanced | +| RAG-based generation | Questions grounded in actual course materials | +| 8 question types | Broader than most AI H5P tools | +| Real H5P preview | In-browser rendering matching LMS output | +| Question quality metadata | Difficulty, type, LO tagging | + +**This is the moat.** Competitors generate "quiz questions from text." TLEF-CREATE generates "curriculum-aligned assessments from course materials." The positioning should make this distinction loud and clear. + +--- + +## 7. Summary: Where to Go Next + +``` +Today: Materials → LOs → Quiz Questions → H5P Quiz/Column export + (8 question types, strong pedagogy, limited H5P output types) + +Next: Materials → LOs → Quiz Questions ─┬→ H5P Quiz (existing) + ├→ H5P Interactive Video (new) + + Video URL ───────────────────────┤ + ├→ H5P Branching Scenario (new) + + Case Study ──────────────────────┤ + ├→ H5P Course Presentation (new) + ├→ H5P Interactive Book (new) + └→ Transform between any ↑ (new) + + + xAPI Analytics ← Student performance data (new) +``` + +The strategic move is: **expand H5P output types while keeping the pedagogical intelligence that no competitor has.** diff --git a/docs/image-hotspot-architecture.md b/docs/image-hotspot-architecture.md new file mode 100644 index 0000000..1bdfc1a --- /dev/null +++ b/docs/image-hotspot-architecture.md @@ -0,0 +1,398 @@ +# Image Hotspot Question Generator — Technical Architecture + +> Date: 2026-02-26 +> Goal: Upload an image → AI identifies key objects → auto-generates H5P hotspot-based questions + +--- + +## 1. The Problem + +Instructor has an image (anatomy diagram, map, circuit, artwork, lab photo). +They want to create interactive questions like: +- "Click on the mitochondria" (Find the Hotspot) +- "What is this organ?" with a hotspot revealing the answer (Image Hotspots) +- "Drag the labels to the correct positions" (Drag and Drop) + +**Manual process**: Open H5P editor → eyeball coordinates → draw hotspot rectangles → type questions → repeat 10x. Takes 30-60 minutes per image. + +**Goal**: Upload image → AI does it in 30 seconds. + +--- + +## 2. Technical Challenge: Precise Object Localization + +| Approach | Identifies Objects | Coordinate Precision | Understands Context | +|---|:---:|:---:|:---:| +| GPT-4o Vision | ✅ Excellent | ❌ ~10-20% off | ✅ Excellent | +| Gemini 2.5 | ✅ Excellent | ⚠️ Normalized 0-1000, occasional misses | ✅ Excellent | +| Grounding DINO | ✅ (from text prompt) | ✅ Precise bounding boxes | ❌ No context | +| Florence-2 | ✅ Caption + grounding | ✅ Good bounding boxes | ⚠️ Limited | +| SAM (Segment Anything) | ❌ (needs prompt) | ✅✅ Pixel-level masks | ❌ None | + +**Key insight**: No single model does both well. The solution is a **two-stage pipeline**. + +--- + +## 3. Recommended Architecture: Two-Stage Pipeline + +``` + Stage 1: UNDERSTAND Stage 2: LOCATE + (Vision LLM) (Grounding Model) + + Image ──────────► GPT-4o / Gemini / Claude ─────────► Grounding DINO + │ │ + ├─ "What objects are in ├─ "Find: mitochondria" + │ this image?" │ → bbox [0.23, 0.45, 0.38, 0.62] + │ │ + ├─ "Which are educationally ├─ "Find: cell membrane" + │ important?" │ → bbox [0.01, 0.02, 0.99, 0.98] + │ │ + ├─ Generate questions: ├─ "Find: nucleus" + │ Q1: "Click on the │ → bbox [0.40, 0.35, 0.60, 0.55] + │ mitochondria" │ + │ Q2: "What organelle │ + │ produces ATP?" │ + │ │ + ▼ ▼ + Object List + Precise Coordinates + Questions (bounding boxes) + │ │ + └──────────── MERGE ─────────────────┘ + │ + ▼ + H5P Hotspot Content + (questions + coordinates) +``` + +### Stage 1: Vision LLM — "What's important in this image?" + +**Input**: Image + (optional) topic context from uploaded materials + +**Prompt**: +``` +You are an educational content analyzer. Given this image from a +[Biology / Geography / Art History] course: + +1. Identify all notable objects, regions, or elements in the image +2. For each object, provide: + - name: the object name (e.g., "mitochondria") + - description: brief educational description + - question: a quiz question targeting this object + - question_type: "find_hotspot" | "label" | "info" + - difficulty: "easy" | "medium" | "hard" +3. Rank by educational importance + +Return as JSON array. +``` + +**Output**: +```json +[ + { + "name": "mitochondria", + "description": "Powerhouse of the cell, produces ATP through cellular respiration", + "question": "Click on the organelle responsible for ATP production", + "question_type": "find_hotspot", + "difficulty": "medium" + }, + { + "name": "nucleus", + "description": "Contains genetic material (DNA) and controls cell activities", + "question": "Identify the structure that contains the cell's DNA", + "question_type": "find_hotspot", + "difficulty": "easy" + } +] +``` + +**Why Vision LLM here**: It understands educational context, can prioritize what's worth quizzing, and generates pedagogically sound questions. Coordinate precision doesn't matter at this stage. + +### Stage 2: Grounding Model — "Where exactly is each object?" + +For each object identified in Stage 1, query a grounding model: + +**Option A: Grounding DINO via Roboflow API** (recommended) +```javascript +// For each identified object: +const response = await fetch('https://detect.roboflow.com/grounding-dino/1', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + api_key: ROBOFLOW_API_KEY, + image: base64Image, + text: 'mitochondria', // from Stage 1 + confidence: 0.3 + }) +}); + +// Returns: +{ + "predictions": [{ + "x": 245, "y": 380, // center point + "width": 120, "height": 140, // bbox dimensions + "confidence": 0.87, + "class": "mitochondria" + }] +} +``` + +**Option B: Gemini 2.5 native bounding boxes** +```javascript +const result = await gemini.generateContent({ + contents: [{ + parts: [ + { inlineData: { mimeType: 'image/png', data: base64Image } }, + { text: 'Return a bounding box for: mitochondria. Format: [y_min, x_min, y_max, x_max] normalized 0-1000.' } + ] + }] +}); +// Returns: [450, 230, 620, 380] +// Convert: divide by 1000 → [0.45, 0.23, 0.62, 0.38] +``` + +**Option C: Replicate hosted Grounding DINO** +```javascript +const output = await replicate.run('adirik/grounding-dino', { + input: { + image: imageUrl, + text_prompt: 'mitochondria', + box_threshold: 0.3 + } +}); +``` + +### Stage 3: Merge & Generate H5P + +Combine Stage 1 (questions + context) with Stage 2 (coordinates) → H5P content JSON. + +--- + +## 4. Coordinate Conversion to H5P + +H5P hotspot coordinates are **percentages relative to image dimensions** (0-100%). + +```javascript +function bboxToH5PHotspot(bbox, imageWidth, imageHeight) { + // bbox from Grounding DINO: { x, y, width, height } in pixels + // H5P wants: { x: %, y: %, width: %, height: % } as percentages + return { + x: ((bbox.x - bbox.width / 2) / imageWidth) * 100, + y: ((bbox.y - bbox.height / 2) / imageHeight) * 100, + width: (bbox.width / imageWidth) * 100, + height: (bbox.height / imageHeight) * 100 + }; +} +``` + +--- + +## 5. Target H5P Content Types + +### 5.1 Find the Hotspot (H5P.ImageHotspotQuestion) + +**Use case**: "Click on the correct area" + +```json +{ + "library": "H5P.ImageHotspotQuestion 1.8", + "params": { + "imageHotspotQuestion": { + "backgroundImageSettings": { + "backgroundImage": { + "path": "images/cell-diagram.png", + "width": 800, + "height": 600 + } + }, + "hotspotSettings": { + "hotspot": [ + { + "userDefined": true, + "computedSettings": { + "x": 23.5, + "y": 45.2, + "width": 15.0, + "height": 17.0, + "figure": "rectangle" + }, + "feedbackText": "Correct! This is the mitochondria." + } + ], + "showFeedback": true, + "taskDescription": "Click on the organelle that produces ATP." + } + } + } +} +``` + +### 5.2 Image Hotspots (H5P.ImageHotspots) — Informational + +**Use case**: Click hotspots to learn about each part + +```json +{ + "library": "H5P.ImageHotspots 1.10", + "params": { + "image": { "path": "images/cell-diagram.png" }, + "hotspots": [ + { + "position": { "x": 31.0, "y": 53.6 }, + "header": "Mitochondria", + "content": [{ + "library": "H5P.Text 1.1", + "params": { "text": "

The powerhouse of the cell...

" } + }] + } + ] + } +} +``` + +### 5.3 Drag and Drop (H5P.DragQuestion) — Label Placement + +**Use case**: Drag labels to correct positions on the image + +--- + +## 6. Practical Recommendations + +### Which grounding approach to use? + +| Scenario | Recommended Approach | Why | +|---|---|---| +| **Diagrams with text labels** (anatomy charts, circuit diagrams) | Gemini 2.5 only (single-stage) | Text labels give strong grounding signals; Gemini handles well | +| **Photos of real objects** (lab equipment, specimens) | Grounding DINO via Roboflow | Real-world objects need specialized detection | +| **Maps / Geography** | Gemini 2.5 + manual adjustment | Named regions are conceptual, not visual objects | +| **Artwork / Art History** | GPT-4o Stage 1 + Gemini Stage 2 | Needs both artistic understanding and spatial grounding | +| **Any image + high precision needed** | Grounding DINO (best precision) | Production-grade bounding boxes | + +### Cost per image + +| Service | Cost | Latency | +|---|---|---| +| GPT-4o (Stage 1) | ~$0.01-0.03 per image | ~3-5s | +| Gemini 2.5 (combined) | ~$0.005-0.02 per image | ~2-4s | +| Grounding DINO via Roboflow | Free tier: 10k/month | ~1-2s | +| Grounding DINO via Replicate | ~$0.002 per prediction | ~2-3s | + +### Fallback: Manual Adjustment UI + +Even with the best AI, some hotspots will need tweaking. Provide a simple adjustment UI: + +``` +┌──────────────────────────────────────────────────┐ +│ 🖼️ [Cell Diagram Image] │ +│ │ +│ ┌──────┐ │ +│ │ mito │ ← AI-placed hotspot (draggable) │ +│ └──────┘ │ +│ ┌────────┐ │ +│ │nucleus │ ← drag to adjust │ +│ └────────┘ │ +│ │ +│ Detected Objects: │ +│ ✅ mitochondria (87% confidence) [Adjust] │ +│ ✅ nucleus (94% confidence) [Adjust] │ +│ ⚠️ ribosome (42% confidence) [Adjust][Remove]│ +│ ➕ [Add Manual Hotspot] │ +│ │ +│ Questions: │ +│ Q1: "Click on the organelle that produces ATP" │ +│ Q2: "Identify the structure containing DNA" │ +│ Q3: "Where are proteins synthesized?" │ +│ │ +│ [Generate H5P] [Preview] │ +└──────────────────────────────────────────────────┘ +``` + +--- + +## 7. Implementation Plan + +### Phase 1: Single-stage (Gemini only) — Fastest to ship + +1. User uploads image + provides topic/context +2. Send to Gemini 2.5 with combined prompt: + - Identify objects + return bounding boxes (0-1000 normalized) + - Generate questions for each object +3. Convert coordinates to H5P percentage format +4. Generate H5P.ImageHotspotQuestion content +5. Preview + manual adjustment UI +6. Export as .h5p + +**Effort**: ~1-2 weeks +**Precision**: Good enough for labeled diagrams, ~70-80% accuracy on photos + +### Phase 2: Two-stage pipeline — Production quality + +1. Add Grounding DINO via Roboflow API as Stage 2 +2. Vision LLM identifies objects → Grounding DINO locates them precisely +3. Confidence-based filtering (hide low-confidence detections) +4. Support all 3 H5P types (Hotspot Question, Image Hotspots, Drag and Drop) + +**Effort**: +1 week on top of Phase 1 +**Precision**: ~90%+ accuracy + +### Phase 3: Interactive editing — Polish + +1. Drag-to-adjust hotspot positions on canvas +2. Add/remove hotspots manually +3. Resize hotspot regions +4. Re-generate questions for adjusted hotspots + +--- + +## 8. Example: End-to-End Flow + +``` +Instructor uploads: human-heart-diagram.png +Provides context: "Cardiovascular system, Biology 101" + + ┌─ Stage 1 (GPT-4o) ──────────────────────────────────────┐ + │ Identified 6 objects: │ + │ 1. Left ventricle — pumps oxygenated blood │ + │ 2. Right atrium — receives deoxygenated blood │ + │ 3. Aorta — largest artery │ + │ 4. Pulmonary artery — carries blood to lungs │ + │ 5. Superior vena cava — returns blood from upper body │ + │ 6. Mitral valve — between left atrium and ventricle │ + │ │ + │ Generated questions: │ + │ Q1: "Click on the chamber that pumps blood to the body" │ + │ Q2: "Find the vessel that carries blood to the lungs" │ + │ Q3: "Identify the valve between the left chambers" │ + └───────────────────────────────────────────────────────────┘ + │ + ▼ + ┌─ Stage 2 (Grounding DINO) ──────────────────────────────┐ + │ "left ventricle" → bbox [320, 280, 420, 380] (94%) │ + │ "right atrium" → bbox [180, 120, 260, 200] (91%) │ + │ "aorta" → bbox [280, 60, 340, 160] (88%) │ + │ "pulmonary artery" → bbox [220, 80, 300, 140] (85%) │ + │ "superior vena cava" → bbox [160, 40, 200, 150] (82%) │ + │ "mitral valve" → bbox [300, 240, 340, 270] (79%) │ + └──────────────────────────────────────────────────────────┘ + │ + ▼ + ┌─ Output ─────────────────────────────────────────────────┐ + │ H5P.ImageHotspotQuestion with: │ + │ - Background: human-heart-diagram.png │ + │ - 3 questions, each with correct hotspot zone │ + │ - Feedback text per hotspot │ + │ - Additional info hotspots for learning │ + └──────────────────────────────────────────────────────────┘ +``` + +--- + +## References + +- [Grounding DINO — Roboflow](https://roboflow.com/model/grounding-dino) +- [Grounding DINO — Replicate API](https://replicate.com/adirik/grounding-dino) +- [Florence-2 Overview](https://medium.com/data-science/florence-2-mastering-multiple-vision-tasks-with-a-single-vlm-model-435d251976d0) +- [Gemini Bounding Boxes](https://ai.google.dev/gemini-api/docs/image-understanding) +- [GPT-4o Localization Limitations](https://community.openai.com/t/gpt-4o-model-image-coordinate-recognition/907625) +- [H5P Image Hotspot Question](https://h5p.org/image-hotspot-question) +- [H5P Image Hotspots](https://h5p.org/image-hotspots) +- [H5P Image Hotspots semantics.json](https://github.com/h5p/h5p-image-hotspots/blob/master/semantics.json) diff --git a/docs/product-brainstorm.md b/docs/product-brainstorm.md new file mode 100644 index 0000000..23c9d22 --- /dev/null +++ b/docs/product-brainstorm.md @@ -0,0 +1,496 @@ +# Product Brainstorm — PM Perspective + +> Date: 2026-02-26 +> Lens: User journeys, habit loops, moats, expansion, positioning + +--- + +## Part 1: Re-thinking the Core Product Loop + +Current loop: +``` +Upload Materials → Generate LOs → Generate Questions → Export H5P +``` + +This is a **one-shot linear pipeline**. User comes in, gets output, leaves. No reason to return until next semester. + +The question isn't "what features to add" — it's **"how do we make this a tool people live in, not visit once?"** + +--- + +## Part 2: Product Experience Ideas + +### 2.1 "Generate from Syllabus" — The Ultimate Onboarding + +**Insight**: The first 5 minutes determine if a user stays. Currently: create folder → upload materials → wait → set LOs → wait → generate. Too many steps before value. + +**Experience**: +``` +Instructor uploads their course syllabus (1 PDF) + → AI parses it: 13 weeks, topics per week, textbook references + → Auto-generates a semester plan: + + ┌─────────────────────────────────────────────────┐ + │ CPSC 110 — Fall 2026 Assessment Plan │ + │ │ + │ Week 1: Intro to Programming │ + │ 📄 Suggested materials: Ch. 1-2 │ + │ 🎯 3 Learning Objectives (auto-generated) │ + │ 📝 Quiz: 5 MC + 2 TF (draft ready) │ + │ │ + │ Week 2: Data Types & Variables │ + │ 📄 Suggested materials: Ch. 3 │ + │ 🎯 4 Learning Objectives │ + │ 📝 Quiz: 4 MC + 2 Cloze + 1 Ordering │ + │ │ + │ ... (13 weeks) │ + │ │ + │ [Upload Materials & Generate All] [Customize] │ + └─────────────────────────────────────────────────┘ +``` + +**Why this matters**: +- Time-to-value: 2 minutes to see an entire semester's assessment structure +- Instructor thinks "this tool gets me" before writing a single question +- Even without uploading materials, they see the *potential* +- Converts "I'll try this later" into "let me fill in week 1 right now" + +**The hook**: "Upload your syllabus. Get a semester of assessments." + +--- + +### 2.2 Micro-Assessment Mode — Daily 3-Question Pulses + +**Insight**: Research shows spaced retrieval practice (frequent low-stakes quizzes) is far more effective than one big exam. But creating daily mini-quizzes is impractical. + +**Experience**: +``` +Instructor uploads this week's readings + → Instead of one 20-question quiz: + → AI generates 5 daily micro-quizzes (Mon-Fri), 3 questions each + → Each day's quiz targets different LOs with spacing built in + → Auto-exports as 5 separate H5P packages + + Mon: Q1 (LO1, easy), Q2 (LO2, easy), Q3 (LO3, easy) + Tue: Q1 (LO1, medium), Q2 (LO2, medium), Q3 (new LO4, easy) + Wed: Q1 (LO3, medium), Q2 (LO4, medium), Q3 (LO1, hard) ← spaced + Thu: Q1 (LO2, hard), Q2 (LO4, medium), Q3 (LO3, hard) + Fri: Q1 (LO1-4 mixed), Q2 (cumulative), Q3 (challenge) +``` + +**Why this matters**: +- Backed by learning science (Ebbinghaus, Bjork's desirable difficulties) +- Instructors want to do this but can't justify the time +- Creates a weekly recurring use case (not one-shot) +- Differentiator: no tool thinks about *temporal distribution* of assessment + +--- + +### 2.3 Student-Created Questions — Flip the Model + +**Insight**: Research shows creating questions is a more effective learning strategy than answering them (generative learning theory). But there's no structured way for students to do this. + +**Experience**: +``` +Instructor enables "Student Question Mode" for a quiz + → Shares a link with students + → Students submit questions using a simplified creation form + → AI auto-evaluates each submission: + - Is it a valid question? (grammar, clarity) + - Does it align with the LOs? + - Is it a duplicate of existing questions? + - Bloom's level classification + → Instructor sees a curated dashboard: + + ┌─────────────────────────────────────────────────┐ + │ Student-Submitted Questions (47 received) │ + │ │ + │ ⭐ Top-Rated by AI │ + │ 1. "Which sorting algorithm..." — Sarah L. │ + │ Quality: 92/100 | Bloom's: Apply | LO: #3 │ + │ [Add to Quiz] [Edit] [Reject] │ + │ │ + │ 2. "In the context of..." — James K. │ + │ Quality: 88/100 | Bloom's: Analyze | LO: #1 │ + │ [Add to Quiz] [Edit] [Reject] │ + │ │ + │ ⚠️ Needs Review │ + │ 3. "What is the definition of..." — Amy T. │ + │ Quality: 45/100 | Issue: Too basic (Remember)│ + │ [Suggest Improvement to Student] │ + └─────────────────────────────────────────────────┘ +``` + +**Why this matters**: +- Turns students from consumers into contributors +- Instructor gets free question content + sees what students think is important +- Students learn more by creating than by answering +- Creates a viral loop: students tell other students about the tool +- Academic research paper potential (publish on this novel approach) + +--- + +### 2.4 Assessment Health Score — Gamify Instructor Behavior + +**Insight**: Instructors don't know if their assessments are "good." They have no benchmark, no feedback loop. They just hope. + +**Experience**: +``` +┌─────────────────────────────────────────────────┐ +│ Assessment Health Score: 72 / 100 │ +│ ████████████████████████░░░░░░░░ │ +│ │ +│ 📊 Breakdown: │ +│ Bloom's Coverage ████████░░ 16/20 │ +│ Difficulty Balance ██████░░░░ 12/20 │ +│ Question Variety ████████████ 20/20 │ +│ Content Coverage ██████████░ 14/20 │ +│ Freshness (not reused)██████░░░░ 10/20 │ +│ │ +│ 💡 Quick Wins to Improve: │ +│ +8 pts: Add 2 questions at Evaluate/Create level│ +│ +5 pts: Redistribute: too many Easy, need Hard │ +│ +3 pts: Chapter 7 has zero questions │ +│ │ +│ [Auto-Fix All] [Fix One by One] │ +└─────────────────────────────────────────────────┘ +``` + +**Why this matters**: +- Turns abstract quality into a tangible number +- "Quick wins" make improvement feel achievable +- Gamification: instructors want to hit 90+ +- "Auto-Fix All" is the magic button — AI fills gaps automatically +- Creates a reason to return and iterate (not one-and-done) + +--- + +### 2.5 Progressive Quiz — Adaptive Difficulty via Branching + +**Insight**: Fixed-difficulty quizzes are either too easy for strong students or too hard for weak ones. Adaptive testing exists in standardized tests (GRE, GMAT) but not in classroom quizzes. + +**Experience**: +``` +Instructor clicks "Generate Adaptive Quiz" + → AI generates each question at 3 difficulty levels (Easy/Med/Hard) + → Auto-creates an H5P Branching Scenario: + + Start → Q1 (Medium) + ├── Correct → Q2 (Hard) + │ ├── Correct → Q3 (Hard) → "Advanced" ending + │ └── Wrong → Q3 (Medium) → "Proficient" ending + └── Wrong → Q2 (Easy) + ├── Correct → Q3 (Medium) → "Developing" ending + └── Wrong → Q3 (Easy) → "Needs Review" ending + + Each ending: personalized feedback + suggested resources +``` + +**Why this matters**: +- Combines two powerful H5P types (Questions + Branching Scenario) +- Adaptive testing is a known best practice that's too hard to implement manually +- Every student gets an appropriately challenging experience +- The instructor doesn't need to understand branching — AI handles the tree +- This is genuinely novel — no tool does this + +--- + +### 2.6 "What Would Students Ask?" — Anticipate Confusion + +**Insight**: The best instructors anticipate where students get confused. But this takes years of experience. + +**Experience**: +``` +After uploading materials, before generating quiz questions: + +┌─────────────────────────────────────────────────┐ +│ 🤔 Predicted Student Confusion Points │ +│ │ +│ 1. "Students often confuse polymorphism with │ +│ overloading. Consider adding a question │ +│ that explicitly distinguishes them." │ +│ [Generate Distinguishing Question] │ +│ │ +│ 2. "The relationship between abstract classes │ +│ and interfaces is a common source of │ +│ misconceptions." │ +│ [Generate Misconception-Targeting Question] │ +│ │ +│ 3. "Students may struggle with why recursion │ +│ needs a base case — the materials explain │ +│ HOW but not WHY." │ +│ [Generate Conceptual Question] │ +│ │ +│ Based on: analysis of 10,000+ student │ +│ interactions across similar courses │ +└─────────────────────────────────────────────────┘ +``` + +**Why this matters**: +- Goes beyond "generate questions" to "help you teach better" +- Positions the tool as a teaching assistant, not just a quiz maker +- Generated questions target actual misunderstandings, not surface recall +- Instructors feel the tool has expertise they don't have + +--- + +### 2.7 Universal Export — Be the Interchange Format + +**Insight**: H5P is one format. But instructors use Canvas, Moodle, Google Forms, Kahoot, Quizlet, printed exams. Being locked to H5P limits adoption. + +**Experience**: +``` +Export Menu: + ┌─────────────────────────────────────────┐ + │ Export Your Quiz │ + │ │ + │ 📦 H5P Package (.h5p) [Download] │ + │ 📄 PDF (Print-ready) [Download] │ + │ 📋 QTI 2.1 (Canvas native) [Download] │ + │ 📋 Moodle XML [Download] │ + │ 📊 Google Forms [Create] │ + │ 🎮 Kahoot [Create] │ + │ 📚 Anki Deck (.apkg) [Download] │ + │ 📝 Plain Text / Markdown [Download] │ + │ 🔗 Shareable Web Link [Generate] │ + └─────────────────────────────────────────┘ +``` + +**Why this matters**: +- Removes "but we don't use H5P" as an objection +- Canvas QTI export alone would unlock every Canvas-using university +- Shareable web link = no LMS needed, just send a URL to students +- Positions TLEF-CREATE as THE quiz creation tool, not "an H5P tool" +- Each export format opens a new market segment + +**Priority exports**: +1. QTI 2.1 (Canvas) — biggest LMS market +2. Moodle XML — second biggest LMS +3. Shareable link (self-hosted H5P preview page) +4. Kahoot — viral growth potential + +--- + +### 2.8 Clone & Adapt — Cross-Course Reuse + +**Insight**: Many courses cover overlapping concepts (e.g., statistics in psychology, biology, economics). A professor teaching 2 courses wants to reuse question structures but with different domain context. + +**Experience**: +``` +"Clone Quiz to Another Course" + → Select source: PSYC 100 - Statistics Basics Quiz + → Select target: BIOL 200 - Research Methods + → AI adapts every question: + + Original (PSYC 100): + "A psychologist surveys 100 patients. The mean anxiety + score is 7.2 with SD 1.5. What is the 95% CI?" + + Adapted (BIOL 200): + "A biologist samples 100 organisms. The mean body mass + is 7.2g with SD 1.5g. What is the 95% CI?" + + → Same statistical concept, domain-appropriate context + → Instructor reviews and publishes +``` + +**Why this matters**: +- Professors teaching multiple courses save massive time +- Cross-department collaboration potential +- Each adaptation = new quiz in the Question Bank (compounding value) + +--- + +### 2.9 Assessment Calendar — Semester-Level View + +**Insight**: Individual quizzes exist in isolation. Nobody has a bird's-eye view of "what am I assessing, when, and how does it all fit together?" + +**Experience**: +``` +┌──────────────────────────────────────────────────────────────┐ +│ CPSC 110 — Fall 2026 Assessment Calendar │ +│ │ +│ Sep ──── Oct ──── Nov ──── Dec │ +│ W1 W2 W3 W4 W5 W6 W7 W8 W9 W10 W11 W12 W13 │ +│ ▪ ▪ ▪ ■ ▪ ▪ ▪ ■ ▪ ▪ ▪ ■ ★ │ +│ │ +│ ▪ = Weekly micro-quiz (3 Qs) │ +│ ■ = Midterm assessment (20 Qs) │ +│ ★ = Final comprehensive (40 Qs) │ +│ │ +│ LO Coverage Over Time: │ +│ LO1: ████░░░░░░░░░ (covered weeks 1-4, not revisited) ⚠️ │ +│ LO2: ██████████████ (well-distributed) ✅ │ +│ LO3: ░░░░░░████████ (only in second half) ⚠️ │ +│ LO4: ████████████░░ (drops off before final) ⚠️ │ +│ │ +│ 💡 Suggestion: Add LO1 review questions to Week 8 midterm │ +│ 💡 Suggestion: LO3 needs early introduction in Week 3 │ +│ │ +│ [Auto-Rebalance] [Add Quiz] [Export Semester Plan] │ +└──────────────────────────────────────────────────────────────┘ +``` + +**Why this matters**: +- No tool thinks at the semester level — everyone thinks quiz by quiz +- Visual LO coverage over time reveals gaps instructors never notice +- "Auto-Rebalance" adjusts the entire semester's assessment plan +- This is the view a curriculum coordinator or accreditation reviewer wants + +--- + +### 2.10 Anti-Pattern Detection — Quiz Design Linting + +**Insight**: There are well-known quiz design anti-patterns that even experienced instructors fall into. Like code linting, but for assessments. + +**Experience**: +``` +Quiz Design Lint Report — 7 issues found + +❌ Critical: + • Q4, Q8, Q15: Correct answer is always option (C) + → Students will notice the pattern + → [Shuffle Automatically] + + • Q3: "Which of the following is NOT..." + → Negative stems increase cognitive load unfairly + → [Rewrite as Positive Stem] + +⚠️ Warning: + • 12 of 15 questions are Multiple Choice + → Low variety reduces assessment validity + → [Convert 3 to Different Types] + + • Q7: Stem is 180 words, options are 5 words each + → Reading burden is in the wrong place + → [Simplify Stem] + + • Q11, Q12: Test the same concept (both about linked lists) + → Redundant coverage, missing other topics + → [Replace Q12 with New Topic] + +ℹ️ Info: + • Average question reading time: 45 seconds + → 15 questions × 45s = ~11 min reading time + → Recommended for a 50-min exam: 15-20 min reading + → ✅ Within range + + • No "All of the above" or "None of the above" detected ✅ +``` + +**Why this matters**: +- Makes invisible quality problems visible +- Each issue has a one-click fix (not just complaints) +- Educates instructors about assessment design (they learn from the linting) +- Builds trust: "this tool catches things I wouldn't have noticed" + +--- + +## Part 3: Product Strategy Themes + +### Theme A: "AI Teaching Assistant, Not Just Quiz Generator" + +Features: Confusion prediction (2.6), Health Score (2.4), Anti-Pattern Detection (2.10), Assessment Calendar (2.9) + +Positioning: "An AI that understands pedagogy. It doesn't just make questions — it makes your teaching better." + +Target: Instructors who care about teaching quality (the ones who go to teaching workshops). + +### Theme B: "One Input, Every Output" + +Features: Universal Export (2.7), Content Type Transformation, Interactive Video / Branching Scenario generation + +Positioning: "Create once. Export everywhere. H5P, Canvas, Moodle, Kahoot, PDF, Anki." + +Target: Pragmatic instructors who need flexibility across platforms. + +### Theme C: "Assessment Intelligence Platform" + +Features: Syllabus-to-Semester (2.1), Micro-Assessment (2.2), Calendar (2.9), xAPI Analytics, Health Score (2.4) + +Positioning: "The operating system for educational assessment. Plan, create, deploy, analyze, improve." + +Target: Institutional buyers (department chairs, instructional design teams, CTLs). + +### Theme D: "Community-Powered Learning" + +Features: Student Questions (2.3), Question Bank sharing, Template Marketplace, Clone & Adapt (2.8) + +Positioning: "Every question makes the platform smarter. Every instructor benefits from the community." + +Target: Long-term retention and network effects. The "GitHub for educational content" vision. + +--- + +## Part 4: What to Build vs. What to Position + +Some of these are **features** (need engineering). Some are **positioning** (need marketing). + +| Idea | Is it a feature or positioning? | +|---|---| +| Syllabus-to-Semester | Feature (high impact onboarding) | +| Assessment Health Score | Feature (medium effort, high differentiation) | +| Anti-Pattern Detection | Feature (mostly prompt engineering) | +| Universal Export (QTI/Moodle XML) | Feature (format mapping) | +| "AI Teaching Assistant" narrative | Positioning (reframe existing capabilities) | +| Student-Created Questions | Feature (new user role + flow) | +| Micro-Assessment Mode | Feature (scheduling logic + spaced repetition algorithm) | +| Progressive/Adaptive Quiz | Feature (branching generation + difficulty levels) | +| Confusion Point Prediction | Feature (LLM prompt + UI) | +| Assessment Calendar | Feature (semester-level data model + visualization) | +| Clone & Adapt | Feature (LLM context transformation) | + +--- + +## Part 5: The "10x Moment" Test + +For each feature, ask: **"What's the moment the user says 'holy shit'?"** + +| Feature | 10x Moment | +|---|---| +| Syllabus → Semester Plan | "I uploaded my syllabus and it planned 13 weeks of assessments in 30 seconds" | +| Interactive Video | "I pasted a YouTube link and got an interactive quiz video in 2 minutes" | +| Branching Scenario | "It created a patient diagnosis simulation from my case study notes" | +| Anti-Pattern Detection | "It caught that all my correct answers were option C — I never would have noticed" | +| Health Score + Auto-Fix | "My quiz went from 65 to 91 quality score with one click" | +| Student Questions | "My students submitted 47 questions and the AI ranked them — the top ones are better than mine" | +| Adaptive Quiz | "Each student got a different difficulty path and the weak ones got extra practice" | +| Universal Export | "I created one quiz and deployed it to Canvas, Kahoot, and printed PDF in 10 seconds" | + +**If you can't articulate the 10x moment, the feature isn't worth building.** + +--- + +## Part 6: Moat Analysis + +What makes this defensible over time? + +| Moat Type | How TLEF-CREATE Builds It | +|---|---| +| **Data moat** | Every question generated improves the system. Question Bank accumulates. Student performance data (xAPI) trains better difficulty calibration. | +| **Switching cost** | Semester of quizzes + question bank + assessment calendar = too much to recreate elsewhere | +| **Network effects** | Student-created questions, template sharing, cross-instructor question bank | +| **Pedagogical IP** | Bloom's classification, anti-pattern detection, confusion prediction — competitors can copy features but not the pedagogical knowledge baked in | +| **Ecosystem lock-in** | Universal export = easy to adopt. But Assessment Calendar + Health Score + Analytics = reasons to stay | + +--- + +## Appendix: Quick Reference — All Ideas Ranked + +| # | Idea | Impact | Effort | 10x Moment? | +|:---:|---|:---:|:---:|:---:| +| 1 | Syllabus → Semester Plan | 🔴 | 🟡 | ✅ | +| 2 | Interactive Video Generator | 🔴 | 🟡 | ✅ | +| 3 | Assessment Health Score | 🔴 | 🟢 | ✅ | +| 4 | Anti-Pattern Detection | 🟡 | 🟢 | ✅ | +| 5 | Universal Export (QTI, Moodle XML) | 🔴 | 🟡 | ✅ | +| 6 | Branching Scenario Generator | 🔴 | 🟡 | ✅ | +| 7 | Progressive Adaptive Quiz | 🟡 | 🟡 | ✅ | +| 8 | Micro-Assessment Mode | 🟡 | 🟢 | ✅ | +| 9 | Student-Created Questions | 🟡 | 🟡 | ✅ | +| 10 | Confusion Point Prediction | 🟡 | 🟢 | ✅ | +| 11 | Assessment Calendar | 🟡 | 🟡 | ⚠️ | +| 12 | Clone & Adapt | 🟡 | 🟢 | ⚠️ | +| 13 | Content Type Transformation | 🟡 | 🟢 | ⚠️ | + +🔴 = High | 🟡 = Medium | 🟢 = Low diff --git a/package-lock.json b/package-lock.json index 2fed45d..271850a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,6 +12,7 @@ "@hookform/resolvers": "^3.9.0", "@reduxjs/toolkit": "^2.8.2", "@tanstack/react-query": "^5.56.2", + "adm-zip": "^0.5.16", "agenda": "^5.0.0", "archiver": "^7.0.1", "bcryptjs": "^3.0.2", @@ -6585,6 +6586,15 @@ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, + "node_modules/adm-zip": { + "version": "0.5.16", + "resolved": "https://registry.npmjs.org/adm-zip/-/adm-zip-0.5.16.tgz", + "integrity": "sha512-TGw5yVi4saajsSEgz25grObGHEUaDrniwvA2qwSC060KfqGPdglhvPMA2lPIoxs3PQIItj2iag35fONcQqgUaQ==", + "license": "MIT", + "engines": { + "node": ">=12.0" + } + }, "node_modules/agenda": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/agenda/-/agenda-5.0.0.tgz", diff --git a/package.json b/package.json index 9975574..7510f8d 100644 --- a/package.json +++ b/package.json @@ -37,6 +37,7 @@ "@hookform/resolvers": "^3.9.0", "@reduxjs/toolkit": "^2.8.2", "@tanstack/react-query": "^5.56.2", + "adm-zip": "^0.5.16", "agenda": "^5.0.0", "archiver": "^7.0.1", "bcryptjs": "^3.0.2", diff --git a/routes/create/controllers/h5pPreviewController.js b/routes/create/controllers/h5pPreviewController.js new file mode 100644 index 0000000..c37e1cd --- /dev/null +++ b/routes/create/controllers/h5pPreviewController.js @@ -0,0 +1,653 @@ +import express from 'express'; +import multer from 'multer'; +import AdmZip from 'adm-zip'; +import path from 'path'; +import fs from 'fs/promises'; +import { fileURLToPath } from 'url'; +import { v4 as uuidv4 } from 'uuid'; +import { successResponse, errorResponse } from '../utils/responseFormatter.js'; +import { asyncHandler } from '../utils/asyncHandler.js'; +import { HTTP_STATUS } from '../config/constants.js'; +import { authenticateToken } from '../middleware/auth.js'; +import Quiz from '../models/Quiz.js'; +import { convertQuestionToH5P } from '../services/h5pExportService.js'; +import LIBRARY_REGISTRY, { getNeededLibraries } from '../config/h5pLibraryRegistry.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +const router = express.Router(); + +// Upload directory for extracted H5P previews +const UPLOAD_BASE = path.join(__dirname, '..', 'uploads', 'h5p-preview'); +const H5P_LIBS_DIR = path.join(__dirname, '..', 'h5p-libs'); +const MAX_AGE_MS = 60 * 60 * 1000; // 1 hour TTL for extracted files + +// Configure multer for .h5p file uploads (in-memory, max 50MB) +const upload = multer({ + storage: multer.memoryStorage(), + limits: { fileSize: 50 * 1024 * 1024 }, + fileFilter: (_req, file, cb) => { + if (file.originalname.endsWith('.h5p') || file.mimetype === 'application/zip') { + cb(null, true); + } else { + cb(new Error('Only .h5p files are allowed')); + } + } +}); + +/** + * POST /upload — Accept .h5p file, extract, return metadata + */ +router.post('/upload', upload.single('h5pFile'), asyncHandler(async (req, res) => { + if (!req.file) { + return errorResponse(res, 'No .h5p file provided', 'NO_FILE', HTTP_STATUS.BAD_REQUEST); + } + + const id = uuidv4(); + const extractDir = path.join(UPLOAD_BASE, id); + + // Ensure upload directory exists + await fs.mkdir(extractDir, { recursive: true }); + + // Extract the .h5p ZIP + const zip = new AdmZip(req.file.buffer); + zip.extractAllTo(extractDir, true); + + // Parse h5p.json + const h5pJsonPath = path.join(extractDir, 'h5p.json'); + let h5pJson; + try { + const raw = await fs.readFile(h5pJsonPath, 'utf-8'); + h5pJson = JSON.parse(raw); + } catch (e) { + // Clean up on failure + await fs.rm(extractDir, { recursive: true, force: true }); + return errorResponse(res, 'Invalid .h5p file: missing or malformed h5p.json', 'INVALID_H5P', HTTP_STATUS.BAD_REQUEST); + } + + // Run cleanup of old extracted dirs (fire-and-forget) + cleanupOldPreviews().catch(() => {}); + + return successResponse(res, { + id, + title: h5pJson.title || 'Untitled', + mainLibrary: h5pJson.mainLibrary, + preloadedDependencies: h5pJson.preloadedDependencies || [] + }, 'H5P file uploaded and extracted'); +})); + +/** + * GET /core/h5p-core.js — Serve the minimal H5P runtime + */ +router.get('/core/h5p-core.js', asyncHandler(async (req, res) => { + const corePath = path.join(__dirname, '..', 'h5p-core', 'h5p-core.js'); + res.type('application/javascript').sendFile(corePath); +})); + +/** + * GET /quiz/:quizId/render — Render quiz questions as real H5P content in-browser. + * Each question gets its own numbered header + H5P.newRunnable() instance. + * Supports ?lo= to filter by a specific learning objective. + */ +router.get('/quiz/:quizId/render', authenticateToken, asyncHandler(async (req, res) => { + const { quizId } = req.params; + const loFilter = req.query.lo || null; + + // Fetch quiz with populated questions and learning objectives + const quiz = await Quiz.findOne({ _id: quizId, createdBy: req.user.id }) + .populate({ + path: 'questions', + populate: { path: 'learningObjective', select: 'text order' }, + options: { sort: { order: 1 } } + }) + .populate('learningObjectives', 'text order'); + + if (!quiz) { + return errorResponse(res, 'Quiz not found', 'NOT_FOUND', HTTP_STATUS.NOT_FOUND); + } + + let questions = quiz.questions || []; + + // Filter by learning objective if specified + if (loFilter && loFilter !== 'null') { + const loIndex = parseInt(loFilter, 10); + if (!isNaN(loIndex) && quiz.learningObjectives && quiz.learningObjectives[loIndex]) { + const targetLOText = quiz.learningObjectives[loIndex].text; + questions = questions.filter(q => { + const loText = q.learningObjective?.text; + return loText === targetLOText; + }); + } + } + + if (questions.length === 0) { + res.removeHeader('Content-Security-Policy'); + res.setHeader('X-Frame-Options', 'SAMEORIGIN'); + return res.type('text/html').send(` +

No questions to display.

`); + } + + // Convert each question to H5P format + const h5pQuestions = []; + for (const question of questions) { + const h5pContent = convertQuestionToH5P(question, quiz); + if (h5pContent) { + h5pQuestions.push({ question, h5pContent }); + } + } + + // Determine needed libraries from all question types + const questionTypes = new Set(questions.map(q => q.type)); + const neededLibNames = getNeededLibraries(questionTypes, { + hasMixedContent: false, + isFlashcardOnly: questionTypes.size === 1 && questionTypes.has('flashcard') + }); + + // Build a synthetic h5p.json with those dependencies for resolveDependencies + const preloadedDependencies = []; + for (const libName of neededLibNames) { + const lib = LIBRARY_REGISTRY[libName]; + if (lib) { + preloadedDependencies.push({ + machineName: libName, + majorVersion: lib.majorVersion, + minorVersion: lib.minorVersion + }); + } + } + + const syntheticH5pJson = { + title: quiz.name || 'Quiz Preview', + mainLibrary: 'H5P.Column', + preloadedDependencies + }; + + // Create a temp directory for this preview, copy needed library files + const previewId = uuidv4(); + const extractDir = path.join(UPLOAD_BASE, previewId); + await fs.mkdir(extractDir, { recursive: true }); + + // Symlink ALL library dirs from h5p-libs into the temp directory. + // This is instant (vs mergeDir copying thousands of files) and ensures + // resolveDependencies finds transitive deps without triggering mergeDir. + try { + const libEntries = await fs.readdir(H5P_LIBS_DIR, { withFileTypes: true }); + for (const entry of libEntries) { + if (entry.isDirectory()) { + const src = path.join(H5P_LIBS_DIR, entry.name); + const dest = path.join(extractDir, entry.name); + try { + await fs.symlink(src, dest, 'dir'); + } catch { + // Already exists or other issue, skip + } + } + } + } catch { + // h5p-libs dir not accessible + } + + // Resolve CSS/JS dependencies in correct load order + const { cssFiles, jsFiles } = await resolveDependencies(syntheticH5pJson, extractDir); + + const basePath = `/h5p-preview-files/${previewId}`; + const cssTags = cssFiles.map(f => ` `).join('\n'); + const jsTags = jsFiles.map(f => ` `).join('\n'); + + // Build per-question HTML blocks and H5P.newRunnable() calls + const questionBlocks = []; + const runnableCalls = []; + + h5pQuestions.forEach(({ question, h5pContent }, idx) => { + const num = idx + 1; + const typeLabel = formatQuestionType(question.type); + const difficulty = question.difficulty ? ` \u00b7 ${capitalize(question.difficulty)}` : ''; + const containerId = `h5p-question-${idx}`; + + questionBlocks.push(` +
+
+ Q${num} + ${escapeHtml(typeLabel)}${escapeHtml(difficulty)} +
+
+
`); + + runnableCalls.push(` + (function() { + var library = ${JSON.stringify(h5pContent)}; + var $container = jQuery('#${containerId}'); + $container.addClass('h5p-content'); + H5P.newRunnable(library, 'preview-${previewId}-${idx}', $container, false, { + metadata: library.metadata || {} + }); + })();`); + }); + + const html = ` + + + + + ${escapeHtml(quiz.name || 'Quiz Preview')} + +${cssTags} + + +
+${questionBlocks.join('\n')} +
+ + + +${jsTags} + + + +`; + + // Fire-and-forget cleanup of old previews + cleanupOldPreviews().catch(() => {}); + + res.removeHeader('Content-Security-Policy'); + res.setHeader('X-Frame-Options', 'SAMEORIGIN'); + res.type('text/html').send(html); +})); + +/** + * GET /:id/render — Generate and serve the full HTML page for rendering H5P content + */ +router.get('/:id/render', asyncHandler(async (req, res) => { + const { id } = req.params; + const extractDir = path.join(UPLOAD_BASE, id); + + // Verify the extracted directory exists + try { + await fs.access(extractDir); + } catch { + return errorResponse(res, 'Preview not found. It may have expired.', 'NOT_FOUND', HTTP_STATUS.NOT_FOUND); + } + + // Read h5p.json + const h5pJson = JSON.parse(await fs.readFile(path.join(extractDir, 'h5p.json'), 'utf-8')); + + // Read content.json + let contentJson; + try { + contentJson = JSON.parse(await fs.readFile(path.join(extractDir, 'content', 'content.json'), 'utf-8')); + } catch { + return errorResponse(res, 'Missing content/content.json in H5P package', 'INVALID_H5P', HTTP_STATUS.BAD_REQUEST); + } + + // Resolve all dependencies (topological sort) + const { cssFiles, jsFiles } = await resolveDependencies(h5pJson, extractDir); + + // Build the base path for static files + const basePath = `/h5p-preview-files/${id}`; + + // Build the main library string "H5P.MultiChoice 1.16" + const mainLib = h5pJson.mainLibrary; + const mainDep = (h5pJson.preloadedDependencies || []).find(d => d.machineName === mainLib); + const mainLibString = mainDep + ? `${mainLib} ${mainDep.majorVersion}.${mainDep.minorVersion}` + : mainLib; + + // Generate CSS link tags + const cssTags = cssFiles.map(f => ` `).join('\n'); + + // Generate JS script tags + const jsTags = jsFiles.map(f => ` `).join('\n'); + + const html = ` + + + + + ${escapeHtml(h5pJson.title || 'H5P Preview')} + +${cssTags} + + +
+ + + +${jsTags} + + + +`; + + // Override Helmet's CSP to allow framing and inline scripts/CDN resources + res.removeHeader('Content-Security-Policy'); + res.setHeader('X-Frame-Options', 'SAMEORIGIN'); + res.type('text/html').send(html); +})); + +/** + * Resolve the full dependency tree from h5p.json into ordered CSS and JS file lists. + * Uses topological sort (Kahn's algorithm) to ensure correct load order. + */ +async function resolveDependencies(h5pJson, extractDir) { + const deps = h5pJson.preloadedDependencies || []; + + // Map: "machineName-major.minor" → { dirName, css[], js[], deps[] } + const libMap = new Map(); + const adjacency = new Map(); // key → [dependency keys] + const inDegree = new Map(); + + // BFS to discover all libraries and their transitive dependencies + const queue = [...deps]; + const visited = new Set(); + + while (queue.length > 0) { + const dep = queue.shift(); + const key = `${dep.machineName}-${dep.majorVersion}.${dep.minorVersion}`; + if (visited.has(key)) continue; + visited.add(key); + + // Find the library directory — could be in extracted H5P or in h5p-libs + const dirName = `${dep.machineName}-${dep.majorVersion}.${dep.minorVersion}`; + let libJsonPath = path.join(extractDir, dirName, 'library.json'); + let libBasePath = dirName; // relative path for URL generation + let libDirExists = false; + + try { + await fs.access(libJsonPath); + libDirExists = true; + } catch { + // Try the shared h5p-libs directory + libJsonPath = path.join(H5P_LIBS_DIR, dirName, 'library.json'); + try { + await fs.access(libJsonPath); + libDirExists = true; + } catch { + // Library not found — skip + } + } + + if (!libDirExists) { + libMap.set(key, { dirName, css: [], js: [], deps: [] }); + adjacency.set(key, []); + inDegree.set(key, inDegree.get(key) || 0); + continue; + } + + const libJson = JSON.parse(await fs.readFile(libJsonPath, 'utf-8')); + + // Merge from shared h5p-libs into extracted dir so static serving works. + // Always merge (not just when missing) because the .h5p archive may contain + // incomplete library dirs (e.g. metadata only, no dist/ build artifacts). + const extractedLibDir = path.join(extractDir, dirName); + const sharedLibDir = path.join(H5P_LIBS_DIR, dirName); + try { + await fs.access(sharedLibDir); + await mergeDir(sharedLibDir, extractedLibDir); + } catch { + // Shared lib not available, rely on whatever's in the archive + } + + const css = (libJson.preloadedCss || []).map(f => `${dirName}/${f.path}`); + const js = (libJson.preloadedJs || []).map(f => `${dirName}/${f.path}`); + const subDeps = libJson.preloadedDependencies || []; + const subDepKeys = subDeps.map(d => `${d.machineName}-${d.majorVersion}.${d.minorVersion}`); + + libMap.set(key, { dirName, css, js, deps: subDepKeys }); + adjacency.set(key, subDepKeys); + + if (!inDegree.has(key)) { + inDegree.set(key, 0); + } + + // Enqueue sub-dependencies + for (const subDep of subDeps) { + queue.push(subDep); + } + } + + // Build in-degree counts + for (const [key, depKeys] of adjacency) { + for (const depKey of depKeys) { + inDegree.set(depKey, (inDegree.get(depKey) || 0)); + } + } + // A depends on B means B must load before A → A has edge to B + // In-degree: count how many things depend on each lib (incoming edges) + // Actually, for topological sort with Kahn's, we need: if A depends on B, then B must come first. + // So the edge is B → A (B must come before A), and A's in-degree increases. + const reverseAdj = new Map(); + const realInDegree = new Map(); + for (const key of adjacency.keys()) { + reverseAdj.set(key, []); + realInDegree.set(key, 0); + } + for (const [key, depKeys] of adjacency) { + for (const depKey of depKeys) { + if (!reverseAdj.has(depKey)) reverseAdj.set(depKey, []); + reverseAdj.get(depKey).push(key); + realInDegree.set(key, (realInDegree.get(key) || 0) + 1); + } + } + + // Kahn's algorithm + const sorted = []; + const q = []; + for (const [key, deg] of realInDegree) { + if (deg === 0) q.push(key); + } + + while (q.length > 0) { + const current = q.shift(); + sorted.push(current); + for (const neighbor of (reverseAdj.get(current) || [])) { + realInDegree.set(neighbor, realInDegree.get(neighbor) - 1); + if (realInDegree.get(neighbor) === 0) { + q.push(neighbor); + } + } + } + + // If there are nodes not in sorted (cycle), add them at the end + for (const key of adjacency.keys()) { + if (!sorted.includes(key)) { + sorted.push(key); + } + } + + // Collect CSS and JS in dependency order, filtering out files that don't exist on disk + const cssFiles = []; + const jsFiles = []; + for (const key of sorted) { + const lib = libMap.get(key); + if (lib) { + for (const f of lib.css) { + const fullPath = path.join(extractDir, f); + try { + await fs.access(fullPath); + cssFiles.push(f); + } catch { + // File doesn't exist (e.g. missing dist/ build), skip it + } + } + for (const f of lib.js) { + const fullPath = path.join(extractDir, f); + try { + await fs.access(fullPath); + jsFiles.push(f); + } catch { + // File doesn't exist (e.g. missing dist/ build), skip it + } + } + } + } + + return { cssFiles, jsFiles }; +} + +/** + * Recursively merge src into dest — copies files that don't already exist in dest. + * This fills in missing build artifacts (dist/) without overwriting archive contents. + */ +async function mergeDir(src, dest) { + await fs.mkdir(dest, { recursive: true }); + const entries = await fs.readdir(src, { withFileTypes: true }); + for (const entry of entries) { + const srcPath = path.join(src, entry.name); + const destPath = path.join(dest, entry.name); + if (entry.isDirectory()) { + await mergeDir(srcPath, destPath); + } else { + try { + await fs.access(destPath); + // File already exists in archive, skip + } catch { + await fs.copyFile(srcPath, destPath); + } + } + } +} + +/** + * Clean up extracted preview directories older than MAX_AGE_MS + */ +async function cleanupOldPreviews() { + try { + await fs.access(UPLOAD_BASE); + } catch { + return; // Directory doesn't exist yet + } + + const entries = await fs.readdir(UPLOAD_BASE, { withFileTypes: true }); + const now = Date.now(); + + for (const entry of entries) { + if (!entry.isDirectory()) continue; + const dirPath = path.join(UPLOAD_BASE, entry.name); + try { + const stat = await fs.stat(dirPath); + if (now - stat.mtimeMs > MAX_AGE_MS) { + await fs.rm(dirPath, { recursive: true, force: true }); + } + } catch { + // Ignore errors during cleanup + } + } +} + +/** + * Simple HTML escape + */ +function escapeHtml(str) { + return str + .replace(/&/g, '&') + .replace(//g, '>') + .replace(/"/g, '"'); +} + +/** + * Format a question type for display + */ +function formatQuestionType(type) { + const labels = { + 'multiple-choice': 'Multiple Choice', + 'true-false': 'True / False', + 'flashcard': 'Flashcard', + 'matching': 'Matching', + 'ordering': 'Ordering', + 'cloze': 'Fill in the Blanks', + 'summary': 'Summary', + 'discussion': 'Discussion' + }; + return labels[type] || type; +} + +/** + * Capitalize the first letter + */ +function capitalize(str) { + if (!str) return ''; + return str.charAt(0).toUpperCase() + str.slice(1); +} + +export default router; diff --git a/routes/create/createRoutes.js b/routes/create/createRoutes.js index 504566f..653d491 100644 --- a/routes/create/createRoutes.js +++ b/routes/create/createRoutes.js @@ -15,6 +15,7 @@ import questionController from './controllers/questionController.js'; import exportController from './controllers/exportController.js'; import streamingController from './controllers/streamingController.js'; import searchController from './controllers/searchController.js'; +import h5pPreviewController from './controllers/h5pPreviewController.js'; const router = express.Router(); @@ -71,8 +72,8 @@ router.use('/auth/saml/login', authLimiter); router.use('/materials/upload', uploadLimiter); // Apply API rate limiting to all routes except config and streaming endpoints router.use((req, res, next) => { - if (req.path === '/auth/config' || req.path.startsWith('/streaming/')) { - return next(); // Skip rate limiting for config and streaming endpoints + if (req.path === '/auth/config' || req.path.startsWith('/streaming/') || req.path.startsWith('/h5p-preview/')) { + return next(); // Skip rate limiting for config, streaming, and h5p-preview endpoints } return apiLimiter(req, res, next); }); @@ -98,6 +99,7 @@ router.use('/questions', questionController); router.use('/export', exportController); router.use('/streaming', streamingController); router.use('/search', searchController); +router.use('/h5p-preview', h5pPreviewController); // DEBUG: Route to log all chunks in Qdrant (for debugging purposes) router.get('/debug/qdrant-chunks', async (req, res) => { diff --git a/routes/create/h5p-core/h5p-core.js b/routes/create/h5p-core/h5p-core.js new file mode 100644 index 0000000..9fc29b5 --- /dev/null +++ b/routes/create/h5p-core/h5p-core.js @@ -0,0 +1,738 @@ +/** + * Minimal H5P Core Runtime + * + * Provides the global H5P object that all H5P content type libraries depend on. + * This is a stripped-down version of the official h5p.js runtime, containing only + * the pieces needed to render content (no editor, no server communication). + */ + +var H5P = H5P || {}; + +/** + * EventDispatcher — base class that all H5P content types extend. + * Provides on/off/once/trigger event system. + */ +H5P.EventDispatcher = (function () { + /** + * @class + */ + function EventDispatcher() { + this.listeners = {}; + } + + EventDispatcher.prototype.on = function (type, listener, thisArg) { + if (typeof listener === 'function') { + if (!this.listeners[type]) { + this.listeners[type] = []; + } + this.listeners[type].push({ fn: listener, thisArg: thisArg }); + } + return this; + }; + + EventDispatcher.prototype.once = function (type, listener, thisArg) { + if (typeof listener === 'function') { + var self = this; + var wrapper = function () { + self.off(type, wrapper); + listener.apply(this, arguments); + }; + wrapper._original = listener; + this.on(type, wrapper, thisArg); + } + return this; + }; + + EventDispatcher.prototype.off = function (type, listener) { + if (this.listeners[type]) { + if (listener) { + this.listeners[type] = this.listeners[type].filter(function (l) { + return l.fn !== listener && l.fn._original !== listener; + }); + } else { + this.listeners[type] = []; + } + } + }; + + EventDispatcher.prototype.trigger = function (event, extra, eventData) { + if (typeof event === 'string') { + event = new H5P.Event(event, extra, eventData); + } + event.type = event.type || 'unknown'; + if (this.listeners[event.type]) { + var listeners = this.listeners[event.type].slice(); + for (var i = 0; i < listeners.length; i++) { + listeners[i].fn.call(listeners[i].thisArg || this, event); + } + } + + // Bubble xAPI events up through parent chain + if (event.type === 'xAPI' && !event.preventBubbling && this.parent) { + if (this.parent.trigger) { + this.parent.trigger(event); + } + } + + // Propagate xAPI events to external dispatcher (but not from the dispatcher itself) + if (event.type === 'xAPI' && H5P.externalDispatcher && this !== H5P.externalDispatcher) { + H5P.externalDispatcher.trigger(event); + } + }; + + // Content type API methods — the official H5P runtime provides these on all instances. + // H5P libraries (Column, QuestionSet, etc.) call these in their constructors. + EventDispatcher.prototype.setActivityStarted = function () {}; + EventDispatcher.prototype.getScore = function () { return 0; }; + EventDispatcher.prototype.getMaxScore = function () { return 0; }; + EventDispatcher.prototype.getTitle = function () { return ''; }; + EventDispatcher.prototype.getAnswerGiven = function () { return false; }; + EventDispatcher.prototype.showSolutions = function () {}; + EventDispatcher.prototype.resetTask = function () {}; + EventDispatcher.prototype.getXAPIData = function () { return { statement: {} }; }; + EventDispatcher.prototype.getCurrentState = function () { return {}; }; + EventDispatcher.prototype.isRoot = function () { return false; }; + + // xAPI instance methods — libraries call these on `this` (not the static H5P.createXAPIEventTemplate) + EventDispatcher.prototype.createXAPIEventTemplate = function (verb, extra) { + var event = H5P.createXAPIEventTemplate(verb, extra); + event.setObject(this); + if (this.parent) { + event.setContext(this); + } + return event; + }; + + EventDispatcher.prototype.triggerXAPI = function (verb, extra) { + var event = this.createXAPIEventTemplate(verb, extra); + this.trigger(event); + return event; + }; + + EventDispatcher.prototype.triggerXAPIScored = function (score, maxScore, verb, completion, success) { + var event = this.createXAPIEventTemplate(verb || 'answered'); + event.setScoredResult(score, maxScore, this, completion, success); + this.trigger(event); + return event; + }; + + EventDispatcher.prototype.triggerXAPICompleted = function (score, maxScore, success) { + var event = this.createXAPIEventTemplate('completed'); + event.setScoredResult(score, maxScore, this, true, success); + this.trigger(event); + return event; + }; + + return EventDispatcher; +})(); + +/** + * H5P.Event + */ +H5P.Event = function (type, data, extras) { + this.type = type; + this.data = data || {}; + this.extras = extras || {}; + this.preventBubbling = false; + this.scheduledForLater = false; + + this.setBubbling = function (val) { + this.preventBubbling = !val; + }; + + this.getBubbling = function () { + return !this.preventBubbling; + }; + + this.preventDefault = function () { + this.defaultPrevented = true; + }; + + this.getScore = function () { + return this.data.statement && this.data.statement.result + ? this.data.statement.result.score && this.data.statement.result.score.raw + : null; + }; + + this.getMaxScore = function () { + return this.data.statement && this.data.statement.result + ? this.data.statement.result.score && this.data.statement.result.score.max + : null; + }; + + this.getVerifiedStatementValue = function (keys) { + var val = this.data.statement; + for (var i = 0; i < keys.length; i++) { + if (val === undefined || val === null) return null; + val = val[keys[i]]; + } + return val; + }; +}; + +/** + * XAPIEvent — wrapper for xAPI statements + */ +H5P.XAPIEvent = function () { + H5P.Event.call(this, 'xAPI', { statement: {} }, { bubbles: true, external: true }); +}; + +H5P.XAPIEvent.prototype = Object.create(H5P.Event.prototype); +H5P.XAPIEvent.prototype.constructor = H5P.XAPIEvent; + +H5P.XAPIEvent.prototype.setScoredResult = function (score, maxScore, instance, completion, success) { + this.data.statement.result = this.data.statement.result || {}; + this.data.statement.result.score = { + min: 0, + raw: score, + max: maxScore, + scaled: maxScore > 0 ? score / maxScore : 0 + }; + if (typeof completion === 'boolean') { + this.data.statement.result.completion = completion; + } + if (typeof success === 'boolean') { + this.data.statement.result.success = success; + } +}; + +H5P.XAPIEvent.prototype.setVerb = function (verb) { + if (typeof verb === 'string') { + if (verb.indexOf('http') !== 0) { + verb = 'http://adlnet.gov/expapi/verbs/' + verb; + } + this.data.statement.verb = { + id: verb, + display: { 'en-US': verb.split('/').pop() } + }; + } else if (typeof verb === 'object') { + this.data.statement.verb = verb; + } +}; + +H5P.XAPIEvent.prototype.getVerb = function (full) { + var statement = this.data.statement; + if (statement && statement.verb) { + if (full) return statement.verb; + return statement.verb.id ? statement.verb.id.split('/').pop() : ''; + } + return null; +}; + +H5P.XAPIEvent.prototype.setObject = function (instance) { + if (instance && instance.contentId) { + this.data.statement.object = { + id: 'h5p-content-' + instance.contentId, + objectType: 'Activity' + }; + } +}; + +H5P.XAPIEvent.prototype.setContext = function (instance) { + if (instance && instance.parent) { + this.data.statement.context = { + contextActivities: { + parent: [{ id: 'h5p-content-' + instance.parent.contentId, objectType: 'Activity' }] + } + }; + } +}; + +H5P.XAPIEvent.prototype.setActor = function () { + this.data.statement.actor = { + account: { name: 'preview-user', homePage: window.location.origin }, + objectType: 'Agent' + }; +}; + +H5P.XAPIEvent.prototype.getScore = function () { + return this.getVerifiedStatementValue(['result', 'score', 'raw']); +}; + +H5P.XAPIEvent.prototype.getMaxScore = function () { + return this.getVerifiedStatementValue(['result', 'score', 'max']); +}; + +H5P.XAPIEvent.prototype.getContentXAPIId = function (instance) { + if (instance && instance.contentId) { + return 'h5p-content-' + instance.contentId; + } + return null; +}; + +/** + * Create an xAPI event template + */ +H5P.createXAPIEventTemplate = function (verb, extra) { + var event = new H5P.XAPIEvent(); + event.setActor(); + event.setVerb(verb); + if (extra) { + for (var key in extra) { + if (extra.hasOwnProperty(key)) { + event.data.statement[key] = extra[key]; + } + } + } + return event; +}; + +/** + * External event dispatcher — singleton for bubbled xAPI events + */ +H5P.externalDispatcher = new H5P.EventDispatcher(); + +/** + * jQuery reference — set after jQuery loads + */ +H5P.jQuery = (typeof jQuery !== 'undefined') ? jQuery : (typeof $ !== 'undefined' ? $ : null); + +/** + * Global state + */ +H5P.isFramed = (window.self !== window.top); +H5P.instances = []; +H5P.contentDatas = {}; + +/** + * Resolve content file paths (images, audio, etc.) + */ +H5P.getPath = function (path, contentId) { + if (path.substr(0, 7) === 'http://' || path.substr(0, 8) === 'https://') { + return path; + } + if (H5P.contentBasePath) { + return H5P.contentBasePath + '/' + path; + } + return path; +}; + +/** + * HTML-escape a string for safe rendering as title/text + */ +H5P.createTitle = function (rawTitle) { + if (!rawTitle) return ''; + var div = document.createElement('div'); + div.textContent = rawTitle; + return div.innerHTML; +}; + +/** + * Generate a random UUID + */ +H5P.createUUID = function () { + return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) { + var r = Math.random() * 16 | 0; + var v = c === 'x' ? r : (r & 0x3 | 0x8); + return v.toString(16); + }); +}; + +/** + * Fisher-Yates shuffle + */ +H5P.shuffleArray = function (arr) { + if (!Array.isArray(arr)) return arr; + for (var i = arr.length - 1; i > 0; i--) { + var j = Math.floor(Math.random() * (i + 1)); + var temp = arr[i]; + arr[i] = arr[j]; + arr[j] = temp; + } + return arr; +}; + +/** + * String trim wrapper + */ +H5P.trim = function (value) { + return (typeof value === 'string') ? value.trim() : value; +}; + +/** + * JSON deep clone + */ +H5P.cloneObject = function (object, recursive) { + var clone = object instanceof Array ? [] : {}; + for (var i in object) { + if (object.hasOwnProperty(i)) { + if (recursive !== undefined && recursive && typeof object[i] === 'object' && object[i] !== null) { + clone[i] = H5P.cloneObject(object[i], recursive); + } else { + clone[i] = object[i]; + } + } + } + return clone; +}; + +/** + * Resolve "H5P.MultiChoice" → H5P.MultiChoice constructor + */ +H5P.classFromName = function (name) { + var parts = name.split('.'); + var current = window; + for (var i = 0; i < parts.length; i++) { + current = current[parts[i]]; + if (!current) return undefined; + } + return current; +}; + +/** + * Attach a library instance to a container + */ +H5P.newRunnable = function (library, contentId, $attachTo, skipResize, extras) { + var nameSplit, versionSplit; + + try { + if (typeof library === 'string') { + // Parse "H5P.MultiChoice 1.16" + var parts = library.split(' '); + nameSplit = parts[0]; + versionSplit = parts[1] ? parts[1].split('.') : [1, 0]; + } else if (library.library) { + var lparts = library.library.split(' '); + nameSplit = lparts[0]; + versionSplit = lparts[1] ? lparts[1].split('.') : [1, 0]; + } else if (library.machineName) { + nameSplit = library.machineName; + versionSplit = [library.majorVersion || 1, library.minorVersion || 0]; + } else { + return undefined; + } + } catch (e) { + return undefined; + } + + var constructor = H5P.classFromName(nameSplit); + if (typeof constructor !== 'function') { + console.warn('H5P: Library not loaded:', nameSplit, '- rendering placeholder'); + // Return a stub instance so parent content types (e.g. Column) don't crash + var stub = new H5P.EventDispatcher(); + stub.libraryInfo = { machineName: nameSplit, majorVersion: parseInt(versionSplit[0]), minorVersion: parseInt(versionSplit[1]) }; + stub.contentId = contentId; + stub.attach = function ($container) { + $container.html('
' + + '' + nameSplit + ' — Library not available for preview.' + + '
'); + }; + if ($attachTo) { + stub.attach(H5P.jQuery($attachTo)); + } + return stub; + } + + var params = (library && library.params) ? library.params : {}; + var subContentId = (library && library.subContentId) ? library.subContentId : undefined; + var metadata = (library && library.metadata) ? library.metadata : {}; + + extras = extras || {}; + extras.metadata = metadata; + extras.subContentId = subContentId; + + var instance; + try { + instance = new constructor(params, contentId, extras); + } catch (e) { + console.warn('H5P: Failed to create instance of', nameSplit, e, '- rendering placeholder'); + var fallback = new H5P.EventDispatcher(); + fallback.libraryInfo = { machineName: nameSplit, majorVersion: parseInt(versionSplit[0]), minorVersion: parseInt(versionSplit[1]) }; + fallback.contentId = contentId; + fallback.attach = function ($container) { + $container.html('
' + + '' + nameSplit + ' — Failed to initialize.' + + '
'); + }; + if ($attachTo) { + fallback.attach(H5P.jQuery($attachTo)); + } + return fallback; + } + + if (instance) { + instance.libraryInfo = { + machineName: nameSplit, + majorVersion: parseInt(versionSplit[0]), + minorVersion: parseInt(versionSplit[1]) + }; + instance.contentId = contentId; + instance.subContentId = subContentId; + + if ($attachTo) { + instance.attach(H5P.jQuery($attachTo)); + } + + H5P.instances.push(instance); + } + + return instance; +}; + +/** + * Translate function — returns the text as-is in preview mode. + * H5P libraries call H5P.t() for i18n strings. + */ +H5P.t = function (key, vars, ns) { + // In preview mode, just return the key or l10n default + return key; +}; + +/** + * Get the user's locale/language + */ +H5P.getLanguage = function () { + return 'en'; +}; + +/** + * Communicate with host (no-op in preview) + */ +H5P.communicator = { + on: function () {}, + send: function () {} +}; + +/** + * Clipboard — stub for copy/paste support + */ +H5P.clipboardify = function () {}; +H5P.getClipboard = function () { return null; }; +H5P.setClipboard = function () {}; + +/** + * Confirmation dialog stub + */ +H5P.ConfirmationDialog = function (options) { + var self = this; + H5P.EventDispatcher.call(self); + self.options = options || {}; + + self.show = function () { return self; }; + self.hide = function () { return self; }; + self.getElement = function () { + return H5P.jQuery('
')[0]; + }; + self.appendTo = function () { return self; }; + self.setOffset = function () { return self; }; +}; +H5P.ConfirmationDialog.prototype = Object.create(H5P.EventDispatcher.prototype); +H5P.ConfirmationDialog.prototype.constructor = H5P.ConfirmationDialog; + +/** + * Content user data — stub for save/load state + */ +H5P.getUserData = function (contentId, dataType, done) { + if (typeof done === 'function') { + done(undefined, null); + } +}; + +H5P.setUserData = function () {}; +H5P.deleteUserData = function () {}; + +/** + * Fullscreen — stub + */ +H5P.fullScreen = function ($element, instance) {}; +H5P.isFullscreen = false; +H5P.fullScreenBrowserPrefix = undefined; +H5P.semiFullScreen = function () {}; +H5P.exitFullScreen = function () {}; + +/** + * Content copyrights — stub + */ +H5P.ContentCopyrights = function () { + this.media = []; + this.content = []; + this.addMedia = function (media) { this.media.push(media); }; + this.addContent = function (content) { this.content.push(content); }; + this.toString = function () { return ''; }; +}; + +H5P.MediaCopyright = function (copyright, labels, order) { + this.copyright = copyright || {}; + this.toString = function () { return ''; }; +}; + +H5P.Thumbnail = function (source, width, height) { + this.source = source; + this.width = width; + this.height = height; + this.toString = function () { return ''; }; +}; + +H5P.getCopyrights = function () { return ''; }; + +/** + * Tooltip stub + */ +H5P.Tooltip = H5P.Tooltip || function (element, options) { + // Simple tooltip — no-op for preview +}; + +/** + * H5P.Transition helper + */ +H5P.Transition = H5P.Transition || { + onTransitionEnd: function ($element, callback, timeout) { + if (typeof callback === 'function') { + setTimeout(callback, timeout || 0); + } + } +}; + +/** + * Resize observer/trigger + */ +H5P.trigger = function (instance, eventName, data) { + if (instance && instance.trigger) { + instance.trigger(eventName, data); + } +}; + +H5P.on = function (instance, eventName, callback) { + if (instance && instance.on) { + instance.on(eventName, callback); + } +}; + +/** + * $body — set during init + */ +H5P.$body = null; +H5P.$window = null; + +/** + * Dialog class — used by some content types + */ +H5P.Dialog = function (name, title, content, $element) { + var self = this; + H5P.EventDispatcher.call(self); + + var $dialog = H5P.jQuery(''); + + self.open = function () { + $dialog.addClass('h5p-open'); + $dialog.find('.h5p-close').on('click', function () { self.close(); }); + if ($element) $element.append($dialog); + return self; + }; + + self.close = function () { + $dialog.removeClass('h5p-open'); + self.trigger('close'); + return self; + }; + + self.getElement = function () { return $dialog; }; +}; +H5P.Dialog.prototype = Object.create(H5P.EventDispatcher.prototype); +H5P.Dialog.prototype.constructor = H5P.Dialog; + +/** + * JoubelScoreBar integration — used by Question types + */ +H5P.JoubelScoreBar = H5P.JoubelScoreBar || function (maxScore, label, helpText, scoreExplanationButtonLabel) { + var self = this; + H5P.EventDispatcher.call(self); + + self.setScore = function (score) {}; + self.setMaxScore = function (maxScore) {}; + self.getElement = function () { return H5P.jQuery('
'); }; + self.appendTo = function ($container) {}; +}; + +/** + * Main init function — called to bootstrap H5P content + */ +H5P.init = function (container, integration) { + if (!container || !integration) { + console.error('H5P.init: container and integration required'); + return; + } + + var $ = H5P.jQuery; + if (!$) { + console.error('H5P.init: jQuery is required'); + return; + } + + H5P.$body = $('body'); + H5P.$window = $(window); + + // Set up content path resolution + H5P.contentBasePath = integration.contentPath || ''; + + var contentData; + try { + contentData = typeof integration.contentData === 'string' + ? JSON.parse(integration.contentData) + : integration.contentData; + } catch (e) { + console.error('H5P.init: Failed to parse content data', e); + return; + } + + if (!contentData) { + console.error('H5P.init: No content data'); + return; + } + + // Build the library string "H5P.MultiChoice 1.16" + var libraryString = integration.mainLibrary; + if (!libraryString) { + console.error('H5P.init: mainLibrary not specified'); + return; + } + + var contentId = integration.contentId || 'preview-' + H5P.createUUID(); + + // Store content data for potential sub-content access + H5P.contentDatas[contentId] = contentData; + + // Create the wrapper + var $container = $(container); + $container.addClass('h5p-content h5p-initialized'); + $container.attr('data-content-id', contentId); + + // Create the runnable + var library = { + library: libraryString, + params: contentData, + metadata: integration.metadata || { title: integration.title || 'H5P Preview' } + }; + + var instance = H5P.newRunnable(library, contentId, $container, false, { + metadata: library.metadata + }); + + if (!instance) { + $container.html('

Failed to initialize H5P content. The main library "' + + libraryString + '" could not be loaded.

'); + return; + } + + // Trigger initial resize + if (instance.$ && instance.$.trigger) { + instance.$.trigger('resize'); + } + if (instance.trigger) { + instance.trigger('resize'); + } + + // Listen for window resize + $(window).on('resize', function () { + if (instance.trigger) { + instance.trigger('resize'); + } + }); + + return instance; +}; diff --git a/server.js b/server.js index 933c4da..5636fb7 100644 --- a/server.js +++ b/server.js @@ -164,6 +164,9 @@ app.post('/Shibboleth.sso/SAML2/POST', (req, res) => { app.handle(req, res); }); +// Static serving for extracted H5P preview files (before API routes to avoid rate limiting) +app.use('/h5p-preview-files', express.static(path.join(__dirname, 'routes', 'create', 'uploads', 'h5p-preview'))); + // Mount the API router FIRST (before static files) app.use('/api/create', createRoutes); diff --git a/src/App.tsx b/src/App.tsx index 277e351..cf07da1 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -8,6 +8,7 @@ import QuizView from './components/QuizView'; import UserAccount from './components/UserAccount'; import Login from './components/Login'; import NotFound from "./pages/NotFound"; +import H5PPreview from "./pages/H5PPreview"; import { useState, useEffect } from 'react'; import { API_URL } from './config/api'; @@ -66,6 +67,9 @@ const App = () => { : } /> : } /> + {/* H5P Preview — no auth required (dev tool) */} + } /> + {/* SAML callback route */} } /> diff --git a/src/components/review/ReviewEdit.tsx b/src/components/review/ReviewEdit.tsx index 3ead00f..9031193 100644 --- a/src/components/review/ReviewEdit.tsx +++ b/src/components/review/ReviewEdit.tsx @@ -10,13 +10,11 @@ import { fetchQuestions, deleteQuestion, updateQuestion as updateQuestionThunk } import { selectQuestionsByQuiz } from '../../store/selectors'; import RegeneratePromptModal from '../RegeneratePromptModal'; import PdfExportModal from '../PdfExportModal'; -import InteractiveQuestionView from './InteractiveQuestionView'; import ManualQuestionForm from './ManualQuestionForm'; import QuestionCard from './QuestionCard'; import { useQuestionEditHandlers } from './useQuestionEditHandlers'; import { ReviewEditProps, ExtendedQuestion } from './reviewTypes'; import '../../styles/components/ReviewEdit.css'; -import '../../styles/components/InteractiveQuestions.css'; const ReviewEdit = ({ quizId, learningObjectives }: ReviewEditProps) => { const [searchParams] = useSearchParams(); @@ -35,18 +33,6 @@ const ReviewEdit = ({ quizId, learningObjectives }: ReviewEditProps) => { const [showManualAdd, setShowManualAdd] = useState(false); const [filterByLO, setFilterByLO] = useState(null); - const [expandedBulletPoints, setExpandedBulletPoints] = useState<{[questionId: string]: {[bulletIndex: number]: boolean}}>({}); - - const toggleBulletPoint = (questionId: string, bulletIndex: number) => { - setExpandedBulletPoints(prev => ({ - ...prev, - [questionId]: { - ...prev[questionId], - [bulletIndex]: !prev[questionId]?.[bulletIndex] - } - })); - }; - const handlers = useQuestionEditHandlers(questions, setQuestions); // Load questions from Redux on mount @@ -343,17 +329,36 @@ const ReviewEdit = ({ quizId, learningObjectives }: ReviewEditProps) => { ) : viewMode === 'interact' ? (
- {filteredQuestions.map((question, index) => ( -
- -
- ))} +