From 3e26bbd6c7602fcd5274f3e1cf94b8b7f88531ad Mon Sep 17 00:00:00 2001 From: marco Date: Sat, 22 Nov 2025 23:25:37 +0100 Subject: [PATCH 1/2] Add modular plugin system with 6 plugins and management commands - Implemented dynamic plugin loading system with provider awareness - Added content processing plugins: summarize_youtube_video, web_reader - Added multimodal validation plugins: watch_video, watch_picture, listen_audio - Added content generation plugin: generate_picture - Created config_plugins.py for plugin activation/deactivation - Added Telegram commands: /plugins, /enable_plugin, /disable_plugin, etc. - Updated main.py to support plugin system and multimodal inputs - Comprehensive documentation in plugins/README.md and individual plugin READMEs --- main.py | 409 +++++++++++++++++- plugins/README.md | 192 ++++++++ plugins/config_plugins.py | 56 +++ plugins/generate_picture/README.md | 21 + plugins/generate_picture/license.md | 24 + plugins/generate_picture/main.py | 107 +++++ plugins/generate_picture/requirements.txt | 0 plugins/listen_audio/README.md | 21 + plugins/listen_audio/license.md | 24 + plugins/listen_audio/main.py | 93 ++++ plugins/listen_audio/requirements.txt | 0 plugins/summarize_youtube_video/README.md | 16 + plugins/summarize_youtube_video/__init__.py | 0 plugins/summarize_youtube_video/license.md | 24 + plugins/summarize_youtube_video/main.py | 150 +++++++ .../summarize_youtube_video/requirements.txt | 1 + plugins/watch_picture/README.md | 21 + plugins/watch_picture/license.md | 24 + plugins/watch_picture/main.py | 92 ++++ plugins/watch_picture/requirements.txt | 0 plugins/watch_video/README.md | 21 + plugins/watch_video/license.md | 24 + plugins/watch_video/main.py | 94 ++++ plugins/watch_video/requirements.txt | 0 plugins/web_reader/README.md | 17 + plugins/web_reader/license.md | 24 + plugins/web_reader/main.py | 120 +++++ plugins/web_reader/requirements.txt | 2 + requirements.txt | 4 +- 29 files changed, 1576 insertions(+), 5 deletions(-) create mode 100644 plugins/README.md create mode 100644 plugins/config_plugins.py create mode 100644 plugins/generate_picture/README.md create mode 100644 plugins/generate_picture/license.md create mode 100644 plugins/generate_picture/main.py create mode 100644 plugins/generate_picture/requirements.txt create mode 100644 plugins/listen_audio/README.md create mode 100644 plugins/listen_audio/license.md create mode 100644 plugins/listen_audio/main.py create mode 100644 plugins/listen_audio/requirements.txt create mode 100644 plugins/summarize_youtube_video/README.md create mode 100644 plugins/summarize_youtube_video/__init__.py create mode 100644 plugins/summarize_youtube_video/license.md create mode 100644 plugins/summarize_youtube_video/main.py create mode 100644 plugins/summarize_youtube_video/requirements.txt create mode 100644 plugins/watch_picture/README.md create mode 100644 plugins/watch_picture/license.md create mode 100644 plugins/watch_picture/main.py create mode 100644 plugins/watch_picture/requirements.txt create mode 100644 plugins/watch_video/README.md create mode 100644 plugins/watch_video/license.md create mode 100644 plugins/watch_video/main.py create mode 100644 plugins/watch_video/requirements.txt create mode 100644 plugins/web_reader/README.md create mode 100644 plugins/web_reader/license.md create mode 100644 plugins/web_reader/main.py create mode 100644 plugins/web_reader/requirements.txt diff --git a/main.py b/main.py index ed7b66a..089c3ff 100644 --- a/main.py +++ b/main.py @@ -3,6 +3,7 @@ PROVIDER_FROM_ENV, ask_gpt_multi_message, ) +from plugins import config_plugins from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup from telegram.ext import ( Application, @@ -16,6 +17,47 @@ import io from utils.images import openai_requirements_image_resize, encode_image_to_data_url from config import MAX_IMAGES_PER_MESSAGE +from utils.images import openai_requirements_image_resize, encode_image_to_data_url +from config import MAX_IMAGES_PER_MESSAGE +import importlib.util +import sys + +# Dynamic Plugin Loading +PLUGINS = [] +PLUGINS_DIR = os.path.join(os.path.dirname(__file__), "plugins") + +def load_plugins(): + global PLUGINS + PLUGINS = [] + if not os.path.exists(PLUGINS_DIR): + print(f"Plugins directory not found: {PLUGINS_DIR}") + return + + for plugin_name in os.listdir(PLUGINS_DIR): + plugin_path = os.path.join(PLUGINS_DIR, plugin_name) + if os.path.isdir(plugin_path): + # Check if plugin is enabled in config + if not config_plugins.is_plugin_enabled(plugin_name): + print(f"Plugin {plugin_name} is disabled in config.") + continue + + main_py = os.path.join(plugin_path, "main.py") + if os.path.exists(main_py): + try: + spec = importlib.util.spec_from_file_location(f"plugins.{plugin_name}", main_py) + module = importlib.util.module_from_spec(spec) + sys.modules[f"plugins.{plugin_name}"] = module + spec.loader.exec_module(module) + + if hasattr(module, "is_plugin_applicable") and hasattr(module, "process_messages"): + PLUGINS.append(module) + print(f"Loaded plugin: {plugin_name}") + else: + print(f"Plugin {plugin_name} missing required functions.") + except Exception as e: + print(f"Error loading plugin {plugin_name}: {e}") + +load_plugins() """ Note: @@ -104,6 +146,84 @@ async def start_game_callback( await start_new_game(update, context) +async def plugins_status(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Show the status of all plugins.""" + user_id = update.effective_user.id + if user_id not in ALLOWED_USER_IDS: + await update.message.reply_text(f"No permission. Your user_id is {user_id}.") + return + + status = config_plugins.get_plugin_status() + message = "**Plugin Status:**\n\n" + for plugin_name, enabled in status.items(): + status_emoji = "✅" if enabled else "❌" + message += f"{status_emoji} `{plugin_name}`: {'Enabled' if enabled else 'Disabled'}\n" + + await update.message.reply_text(message, parse_mode="Markdown") + + +async def enable_plugin_cmd(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Enable a specific plugin.""" + user_id = update.effective_user.id + if user_id not in ALLOWED_USER_IDS: + await update.message.reply_text(f"No permission. Your user_id is {user_id}.") + return + + if not context.args: + await update.message.reply_text("Usage: /enable_plugin ") + return + + plugin_name = context.args[0] + if config_plugins.enable_plugin(plugin_name): + load_plugins() # Reload plugins + await update.message.reply_text(f"✅ Plugin `{plugin_name}` enabled.", parse_mode="Markdown") + else: + await update.message.reply_text(f"❌ Plugin `{plugin_name}` not found.", parse_mode="Markdown") + + +async def disable_plugin_cmd(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Disable a specific plugin.""" + user_id = update.effective_user.id + if user_id not in ALLOWED_USER_IDS: + await update.message.reply_text(f"No permission. Your user_id is {user_id}.") + return + + if not context.args: + await update.message.reply_text("Usage: /disable_plugin ") + return + + plugin_name = context.args[0] + if config_plugins.disable_plugin(plugin_name): + load_plugins() # Reload plugins + await update.message.reply_text(f"❌ Plugin `{plugin_name}` disabled.", parse_mode="Markdown") + else: + await update.message.reply_text(f"❌ Plugin `{plugin_name}` not found.", parse_mode="Markdown") + + +async def enable_all_plugins_cmd(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Enable all plugins.""" + user_id = update.effective_user.id + if user_id not in ALLOWED_USER_IDS: + await update.message.reply_text(f"No permission. Your user_id is {user_id}.") + return + + config_plugins.enable_all_plugins() + load_plugins() # Reload plugins + await update.message.reply_text("✅ All plugins enabled.") + + +async def disable_all_plugins_cmd(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + """Disable all plugins.""" + user_id = update.effective_user.id + if user_id not in ALLOWED_USER_IDS: + await update.message.reply_text(f"No permission. Your user_id is {user_id}.") + return + + config_plugins.disable_all_plugins() + load_plugins() # Reload plugins + await update.message.reply_text("❌ All plugins disabled.") + + def update_provider_from_user_input(user_input): switch7 = False report = "" @@ -142,15 +262,65 @@ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> if user_input.lower().startswith(indicator): user_input = user_input[len(indicator) :].strip() + # Plugin processing + # Create a temporary message list to pass to plugins + # We need to construct it as if it was in the history, but it's just the current message for now + # Actually, plugins might need history? The spec says `is_plugin_applicable(messages)`. + # So we should pass the current history + the new message? + # But we haven't appended the new message to history yet. + # Let's construct a temporary list. + + temp_messages = [] + if user_id in MESSAGES_BY_USER: + temp_messages = MESSAGES_BY_USER[user_id].copy() + + # Append current message + temp_messages.append({"role": "user", "content": user_input}) + + plugin_processed = False + user_input_to_process = user_input + + for plugin in PLUGINS: + try: + # Pass the selected provider to the plugin + # The provider might be None if not set yet, defaulting to env + current_provider = SELECTED_PROVIDER if SELECTED_PROVIDER else PROVIDER_FROM_ENV + + if plugin.is_plugin_applicable(temp_messages, current_provider): + print(f"Plugin {plugin.__name__} triggered.") + # process_messages modifies the messages list in place or returns it? + # The spec says "Modifies the messages accordingly". + # Let's assume it modifies the last message content if needed. + # We should pass a copy or the actual list? + # If we pass temp_messages, we can check if the last message content changed. + + # We need to be careful. If we pass temp_messages and it modifies it, + # we extract the content from the last message. + + # we extract the content from the last message. + + updated_messages = plugin.process_messages(temp_messages, current_provider) + if updated_messages: + last_msg = updated_messages[-1] + if last_msg["content"] != user_input: + user_input_to_process = last_msg["content"] + plugin_processed = True + await update.message.reply_text(f"Processed by plugin: {plugin.__name__.split('.')[-1]}") + break # Stop after first plugin triggers? + except Exception as e: + print(f"Error executing plugin {plugin.__name__}: {e}") + + # If no plugin processed it, user_input_to_process remains user_input + if user_id in MESSAGES_BY_USER: MESSAGES_BY_USER[user_id].append( - {"role": "user", "content": user_input}, + {"role": "user", "content": user_input_to_process}, ) else: # the user posted his first message MESSAGES_BY_USER[user_id] = [ {"role": "system", "content": SYSTEM_MSG}, - {"role": "user", "content": user_input}, + {"role": "user", "content": user_input_to_process}, ] # answer = ask_gpt_single_message(user_input, SYSTEM_MSG, max_length=500) @@ -245,12 +415,49 @@ async def handle_photo_message(update: Update, context: ContextTypes.DEFAULT_TYP content_parts.append({"type": "text", "text": update.message.caption.strip()}) content_parts.append(image_content) + user_input = update.message.caption or "" + + # Provider update logic + switch7, report = update_provider_from_user_input(user_input) + if switch7: + await update.message.reply_text(report) + + # Strip indicator + for provider, indicators in PROVIDER_INDICATORS.items(): + for indicator in indicators: + if user_input.lower().startswith(indicator): + user_input = user_input[len(indicator) :].strip() + + # Temp messages for plugins + temp_messages = [] if user_id in MESSAGES_BY_USER: - MESSAGES_BY_USER[user_id].append({"role": "user", "content": content_parts}) + temp_messages = MESSAGES_BY_USER[user_id].copy() + + temp_messages.append({"role": "user", "content": content_parts}) + + plugin_processed = False + current_provider = SELECTED_PROVIDER if SELECTED_PROVIDER else PROVIDER_FROM_ENV + final_messages = temp_messages # Default + + for plugin in PLUGINS: + try: + if plugin.is_plugin_applicable(temp_messages, current_provider): + print(f"Plugin {plugin.__name__} triggered.") + updated_messages = plugin.process_messages(temp_messages, current_provider) + if updated_messages: + final_messages = updated_messages + plugin_processed = True + await update.message.reply_text(f"Processed by plugin: {plugin.__name__.split('.')[-1]}") + break + except Exception as e: + print(f"Error executing plugin {plugin.__name__}: {e}") + + if user_id in MESSAGES_BY_USER: + MESSAGES_BY_USER[user_id].append(final_messages[-1]) else: MESSAGES_BY_USER[user_id] = [ {"role": "system", "content": SYSTEM_MSG}, - {"role": "user", "content": content_parts}, + final_messages[-1], ] answer = ask_gpt_multi_message( @@ -363,6 +570,193 @@ async def handle_image_document_message(update: Update, context: ContextTypes.DE await update.message.reply_text(answer) +async def handle_video_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + print("In the handle_video_message function...") + user_id = update.effective_user.id + + if user_id in ALLOWED_USER_IDS: + try: + video = update.message.video + if not video: + return + + print(f"DEBUG(video): file_id={video.file_id}, mime={video.mime_type}, size={video.file_size}") + + video_content = { + "type": "video", + "file_id": video.file_id, + "mime_type": video.mime_type, + "file_size": video.file_size, + "file_name": getattr(video, "file_name", "video.mp4") + } + + content_parts = [] + if isinstance(update.message.caption, str) and len(update.message.caption.strip()) > 0: + content_parts.append({"type": "text", "text": update.message.caption.strip()}) + content_parts.append(video_content) + + user_input = update.message.caption or "" + + # Provider update logic + switch7, report = update_provider_from_user_input(user_input) + if switch7: + await update.message.reply_text(report) + + # Strip indicator + for provider, indicators in PROVIDER_INDICATORS.items(): + for indicator in indicators: + if user_input.lower().startswith(indicator): + user_input = user_input[len(indicator) :].strip() + + # Temp messages for plugins + temp_messages = [] + if user_id in MESSAGES_BY_USER: + temp_messages = MESSAGES_BY_USER[user_id].copy() + + temp_messages.append({"role": "user", "content": content_parts}) + + plugin_processed = False + current_provider = SELECTED_PROVIDER if SELECTED_PROVIDER else PROVIDER_FROM_ENV + final_messages = temp_messages # Default + + for plugin in PLUGINS: + try: + if plugin.is_plugin_applicable(temp_messages, current_provider): + print(f"Plugin {plugin.__name__} triggered.") + updated_messages = plugin.process_messages(temp_messages, current_provider) + if updated_messages: + final_messages = updated_messages + plugin_processed = True + await update.message.reply_text(f"Processed by plugin: {plugin.__name__.split('.')[-1]}") + break + except Exception as e: + print(f"Error executing plugin {plugin.__name__}: {e}") + + if user_id in MESSAGES_BY_USER: + MESSAGES_BY_USER[user_id].append(final_messages[-1]) + else: + MESSAGES_BY_USER[user_id] = [ + {"role": "system", "content": SYSTEM_MSG}, + final_messages[-1], + ] + + answer = ask_gpt_multi_message( + MESSAGES_BY_USER[user_id], + max_length=500, + user_defined_provider=SELECTED_PROVIDER, + ) + + MESSAGES_BY_USER[user_id].append({"role": "assistant", "content": answer}) + if len(MESSAGES_BY_USER[user_id]) > MAX_MESSAGES_NUM: + MESSAGES_BY_USER[user_id] = MESSAGES_BY_USER[user_id][-MAX_MESSAGES_NUM:] + MESSAGES_BY_USER[user_id].insert(0, {"role": "system", "content": SYSTEM_MSG}) + + await update.message.reply_text(answer) + + except Exception as e: + print(f"Error handling video: {e}") + await update.message.reply_text("Sorry, failed to process the video.") + else: + answer = f"Eh? Du hast doch keine Berechtigung. Deine user_id ist {user_id}." + print(answer) + await update.message.reply_text(answer) + + +async def handle_audio_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: + print("In the handle_audio_message function...") + user_id = update.effective_user.id + + if user_id in ALLOWED_USER_IDS: + try: + audio = update.message.audio or update.message.voice + if not audio: + return + + # Determine type + msg_type = "audio" if update.message.audio else "voice" + + print(f"DEBUG({msg_type}): file_id={audio.file_id}, mime={audio.mime_type}, size={audio.file_size}") + + audio_content = { + "type": msg_type, + "file_id": audio.file_id, + "mime_type": audio.mime_type, + "file_size": audio.file_size, + "file_name": getattr(audio, "file_name", "audio.mp3") + } + + content_parts = [] + if isinstance(update.message.caption, str) and len(update.message.caption.strip()) > 0: + content_parts.append({"type": "text", "text": update.message.caption.strip()}) + content_parts.append(audio_content) + + user_input = update.message.caption or "" + + # Provider update logic + switch7, report = update_provider_from_user_input(user_input) + if switch7: + await update.message.reply_text(report) + + # Strip indicator + for provider, indicators in PROVIDER_INDICATORS.items(): + for indicator in indicators: + if user_input.lower().startswith(indicator): + user_input = user_input[len(indicator) :].strip() + + # Temp messages for plugins + temp_messages = [] + if user_id in MESSAGES_BY_USER: + temp_messages = MESSAGES_BY_USER[user_id].copy() + + temp_messages.append({"role": "user", "content": content_parts}) + + plugin_processed = False + current_provider = SELECTED_PROVIDER if SELECTED_PROVIDER else PROVIDER_FROM_ENV + final_messages = temp_messages # Default + + for plugin in PLUGINS: + try: + if plugin.is_plugin_applicable(temp_messages, current_provider): + print(f"Plugin {plugin.__name__} triggered.") + updated_messages = plugin.process_messages(temp_messages, current_provider) + if updated_messages: + final_messages = updated_messages + plugin_processed = True + await update.message.reply_text(f"Processed by plugin: {plugin.__name__.split('.')[-1]}") + break + except Exception as e: + print(f"Error executing plugin {plugin.__name__}: {e}") + + if user_id in MESSAGES_BY_USER: + MESSAGES_BY_USER[user_id].append(final_messages[-1]) + else: + MESSAGES_BY_USER[user_id] = [ + {"role": "system", "content": SYSTEM_MSG}, + final_messages[-1], + ] + + answer = ask_gpt_multi_message( + MESSAGES_BY_USER[user_id], + max_length=500, + user_defined_provider=SELECTED_PROVIDER, + ) + + MESSAGES_BY_USER[user_id].append({"role": "assistant", "content": answer}) + if len(MESSAGES_BY_USER[user_id]) > MAX_MESSAGES_NUM: + MESSAGES_BY_USER[user_id] = MESSAGES_BY_USER[user_id][-MAX_MESSAGES_NUM:] + MESSAGES_BY_USER[user_id].insert(0, {"role": "system", "content": SYSTEM_MSG}) + + await update.message.reply_text(answer) + + except Exception as e: + print(f"Error handling audio: {e}") + await update.message.reply_text("Sorry, failed to process the audio.") + else: + answer = f"Eh? Du hast doch keine Berechtigung. Deine user_id ist {user_id}." + print(answer) + await update.message.reply_text(answer) + + async def restrict(update: Update, context: ContextTypes.DEFAULT_TYPE): user_id = update.effective_user.id text = f"Keine Berechtigung für user_id {user_id}." @@ -384,8 +778,15 @@ def main(): app.add_handler(restrict_handler) app.add_handler(CommandHandler("start", start)) + app.add_handler(CommandHandler("plugins", plugins_status)) + app.add_handler(CommandHandler("enable_plugin", enable_plugin_cmd)) + app.add_handler(CommandHandler("disable_plugin", disable_plugin_cmd)) + app.add_handler(CommandHandler("enable_all_plugins", enable_all_plugins_cmd)) + app.add_handler(CommandHandler("disable_all_plugins", disable_all_plugins_cmd)) app.add_handler(CallbackQueryHandler(start_game_callback, pattern="^start_game$")) app.add_handler(MessageHandler(filters.PHOTO, handle_photo_message)) + app.add_handler(MessageHandler(filters.VIDEO, handle_video_message)) + app.add_handler(MessageHandler(filters.AUDIO | filters.VOICE, handle_audio_message)) app.add_handler(MessageHandler(filters.Document.ALL, handle_image_document_message)) app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message)) app.run_polling() diff --git a/plugins/README.md b/plugins/README.md new file mode 100644 index 0000000..d985eb2 --- /dev/null +++ b/plugins/README.md @@ -0,0 +1,192 @@ +# Telegram LLM Bot - Plugin System + +**Author:** Marco Baturan + +## Overview + +This directory contains the plugin system for the Telegram LLM Bot. Plugins are modular extensions that enhance the bot's functionality by adding specialized capabilities such as content summarization, multimodal input validation, and web scraping. + +## Architecture + +The plugin system is designed with the following principles: + +### Dynamic Loading +- Plugins are automatically discovered and loaded from subdirectories at startup +- Each plugin must be in its own directory with a `main.py` file +- Plugins can be enabled/disabled via configuration without code changes + +### Plugin Interface +Each plugin must implement two functions in `main.py`: + +1. **`is_plugin_applicable(messages, provider)`** + - Determines if the plugin should process the current message + - Receives the message history and the active AI provider + - Returns `True` if the plugin should handle this message, `False` otherwise + +2. **`process_messages(messages, provider)`** + - Modifies the message content before sending to the LLM + - Can replace user input with processed content (e.g., transcripts, summaries) + - Returns the modified messages list + +### Provider Awareness +Plugins receive the active AI provider (e.g., "openai", "anthropic", "gemini") and can: +- Check if the provider supports required capabilities (vision, audio, video) +- Block unsupported content with helpful error messages +- Adapt behavior based on provider features + +## Available Plugins + +### Content Processing + +#### 📺 `summarize_youtube_video` +- **Purpose:** Automatically summarizes YouTube videos +- **Trigger:** Detects YouTube URLs in messages +- **Process:** Fetches transcript and generates executive summary +- **Requirements:** `youtube-transcript-api` +- **Compatibility:** All text-based providers + +#### 🌐 `web_reader` +- **Purpose:** Summarizes web page content +- **Trigger:** Detects HTTP/HTTPS URLs (excluding YouTube) +- **Process:** Scrapes page text and generates summary +- **Requirements:** `requests`, `beautifulsoup4` +- **Compatibility:** All text-based providers + +### Multimodal Input Validation + +#### 🎬 `watch_video` +- **Purpose:** Validates video upload compatibility +- **Trigger:** User uploads a video file +- **Process:** Checks if provider supports native video analysis +- **Supported Providers:** Gemini, OpenAI (GPT-4o) +- **Action:** Blocks video if provider doesn't support it + +#### 🖼️ `watch_picture` +- **Purpose:** Validates image upload compatibility +- **Trigger:** User uploads an image +- **Process:** Checks if provider supports vision +- **Supported Providers:** Gemini, OpenAI, Anthropic +- **Action:** Blocks image if provider doesn't support it + +#### 🎵 `listen_audio` +- **Purpose:** Validates audio upload compatibility +- **Trigger:** User uploads audio or voice note +- **Process:** Checks if provider supports native audio +- **Supported Providers:** Gemini, OpenAI (GPT-4o) +- **Action:** Blocks audio if provider doesn't support it + +### Content Generation + +#### 🎨 `generate_picture` +- **Purpose:** Validates image generation requests +- **Trigger:** Keywords like "generate image", "draw", "dibuja" +- **Process:** Checks if provider supports image generation +- **Supported Providers:** Gemini, OpenAI +- **Action:** Blocks request if provider doesn't support generation + +## Plugin Management + +### Configuration File +`config_plugins.py` controls which plugins are active: + +```python +PLUGIN_STATUS = { + "summarize_youtube_video": True, + "web_reader": True, + "watch_video": True, + "watch_picture": True, + "listen_audio": True, + "generate_picture": True, +} +``` + +### Telegram Commands + +Manage plugins directly from Telegram: + +- **`/plugins`** - Show status of all plugins +- **`/enable_plugin `** - Enable a specific plugin + - Example: `/enable_plugin summarize_youtube_video` +- **`/disable_plugin `** - Disable a specific plugin + - Example: `/disable_plugin web_reader` +- **`/enable_all_plugins`** - Enable all plugins +- **`/disable_all_plugins`** - Disable all plugins (useful for cost savings) + +### Cost Management +Disable expensive plugins during low-budget periods: +``` +/disable_plugin summarize_youtube_video +/disable_plugin web_reader +``` + +## Creating a New Plugin + +### Directory Structure +``` +plugins/ +├── your_plugin_name/ +│ ├── main.py # Required: Plugin logic +│ ├── requirements.txt # Required: Dependencies +│ ├── license.md # Required: License (Public Domain) +│ └── README.md # Required: Documentation +``` + +### Template (`main.py`) +```python +def is_plugin_applicable(messages, provider): + """ + Check if this plugin should process the message. + + Args: + messages: List of message dicts with 'role' and 'content' + provider: Active AI provider (e.g., 'openai', 'anthropic') + + Returns: + bool: True if plugin should handle this message + """ + if not messages: + return False + + last_message = messages[-1] + if last_message.get("role") != "user": + return False + + # Your detection logic here + return False + +def process_messages(messages, provider): + """ + Process and modify the messages. + + Args: + messages: List of message dicts + provider: Active AI provider + + Returns: + list: Modified messages + """ + # Your processing logic here + return messages +``` + +### Registration +1. Add plugin name to `config_plugins.py` `PLUGIN_STATUS` +2. Set to `True` to enable by default +3. Plugin will be auto-loaded on next bot restart + +## Plugin Execution Order + +Plugins are processed in directory listing order. The **first** plugin that returns `True` from `is_plugin_applicable()` will process the message. Subsequent plugins are skipped. + +## Licensing + +All plugins in this directory are released into the **Public Domain** under the Unlicense. See individual `license.md` files for details. + +## Dependencies + +Install all plugin dependencies: +```bash +pip install -r requirements.txt +``` + +Individual plugin requirements are listed in their respective `requirements.txt` files. diff --git a/plugins/config_plugins.py b/plugins/config_plugins.py new file mode 100644 index 0000000..f7f3877 --- /dev/null +++ b/plugins/config_plugins.py @@ -0,0 +1,56 @@ +""" +Plugin Configuration Manager + +This file controls which plugins are active. +Set a plugin to False to deactivate it and save on API costs. +""" + +# Plugin activation status +PLUGIN_STATUS = { + "summarize_youtube_video": True, + "web_reader": True, + "watch_video": True, + "watch_picture": True, + "listen_audio": True, + "generate_picture": True, +} + +def is_plugin_enabled(plugin_name): + """ + Check if a plugin is enabled. + + Args: + plugin_name: Name of the plugin (e.g., 'summarize_youtube_video') + + Returns: + bool: True if enabled, False otherwise + """ + return PLUGIN_STATUS.get(plugin_name, False) + +def enable_plugin(plugin_name): + """Enable a specific plugin.""" + if plugin_name in PLUGIN_STATUS: + PLUGIN_STATUS[plugin_name] = True + return True + return False + +def disable_plugin(plugin_name): + """Disable a specific plugin.""" + if plugin_name in PLUGIN_STATUS: + PLUGIN_STATUS[plugin_name] = False + return True + return False + +def enable_all_plugins(): + """Enable all plugins.""" + for plugin_name in PLUGIN_STATUS: + PLUGIN_STATUS[plugin_name] = True + +def disable_all_plugins(): + """Disable all plugins.""" + for plugin_name in PLUGIN_STATUS: + PLUGIN_STATUS[plugin_name] = False + +def get_plugin_status(): + """Get the current status of all plugins.""" + return PLUGIN_STATUS.copy() diff --git a/plugins/generate_picture/README.md b/plugins/generate_picture/README.md new file mode 100644 index 0000000..99ccb3e --- /dev/null +++ b/plugins/generate_picture/README.md @@ -0,0 +1,21 @@ +# Generate Picture Plugin + +## Description +This plugin detects user requests to generate images (e.g., "create an image of...", "draw a cat"). It ensures that such requests are only processed by AI providers that support image generation. + +## How it Works +1. **Detection:** The plugin scans user messages for keywords indicating an intent to generate images (e.g., "generate image", "create picture", "draw", "dibuja", "crea una imagen"). +2. **Provider Check:** It checks the currently active AI provider. + - **Supported:** Gemini (via Imagen), OpenAI (via DALL-E). + - **Unsupported:** Anthropic (Claude 3.5), Grok, Llama 3.2 (Text-only or Vision-input only). +3. **Action:** + - If supported, the request is passed to the model. + - If unsupported, the request is blocked with a warning. + +## Requirements +- None. + +## Compatibility +- **Gemini:** Supported. +- **OpenAI:** Supported. +- **Others:** Not supported (Request will be blocked). diff --git a/plugins/generate_picture/license.md b/plugins/generate_picture/license.md new file mode 100644 index 0000000..fdddb29 --- /dev/null +++ b/plugins/generate_picture/license.md @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/plugins/generate_picture/main.py b/plugins/generate_picture/main.py new file mode 100644 index 0000000..4ca4e88 --- /dev/null +++ b/plugins/generate_picture/main.py @@ -0,0 +1,107 @@ +""" +Generate Picture Plugin + +This plugin validates that image generation requests are only sent to AI providers +that support image generation capabilities. It detects user intent through keywords +in multiple languages and blocks requests to providers without generation support. + +Supported Providers: +- Gemini (Google): Image generation via Imagen +- OpenAI: Image generation via DALL-E + +Unsupported Providers: +- Anthropic (Claude): Vision input only, no generation +- Others: Typically no generation support +""" + +import re + +# List of providers that support image generation +SUPPORTED_PROVIDERS = ["gemini", "openai"] + +# Keywords in English and Spanish indicating image generation intent +KEYWORDS = [ + "generate image", "create image", "draw", "paint", "picture of", + "generar imagen", "crear imagen", "dibuja", "pinta", "foto de", + "generate a picture", "create a picture", "haz un dibujo", "haz una imagen" +] + +def is_plugin_applicable(messages, provider): + """ + Determines if this plugin should process the current message. + + Returns True if the user's message contains keywords indicating + an intent to generate an image. Uses a multilingual keyword list + to detect requests in English and Spanish. + + Args: + messages: List of message dicts with 'role' and 'content' + provider: Active AI provider (e.g., 'openai', 'anthropic') + + Returns: + bool: True if message contains image generation keywords + """ + if not messages: + return False + + last_message = messages[-1] + if last_message.get("role") != "user": + return False + + content = last_message.get("content", "") + + # Extract text content (handle both string and multimodal list formats) + text_content = "" + if isinstance(content, list): + for part in content: + if part.get("type") == "text": + text_content += part.get("text", "") + " " + elif isinstance(content, str): + text_content = content + + # Case-insensitive keyword matching + text_content = text_content.lower() + + for keyword in KEYWORDS: + if keyword in text_content: + return True + + return False + +def process_messages(messages, provider): + """ + Validates provider compatibility with image generation. + + If the provider supports image generation, the request passes through unchanged. + If not, the request is replaced with an error message explaining that the + user needs to switch to a compatible provider. + + Args: + messages: List of message dicts + provider: Active AI provider + + Returns: + list: Modified messages (either unchanged or with error message) + """ + if not messages: + return messages + + # Check if provider is supported (partial string matching) + is_supported = False + if provider: + provider_lower = provider.lower() + for supported in SUPPORTED_PROVIDERS: + if supported in provider_lower: + is_supported = True + break + + if is_supported: + print(f"Plugin generate_picture: Provider {provider} supports image generation. Proceeding.") + # Request passes through unchanged - provider will handle generation + return messages + else: + print(f"Plugin generate_picture: Provider {provider} does NOT support image generation. Blocking.") + # Replace request with helpful error message + messages[-1]["content"] = f"Sorry, the current AI provider ({provider}) does not support image generation. Please switch to Gemini or OpenAI." + return messages + diff --git a/plugins/generate_picture/requirements.txt b/plugins/generate_picture/requirements.txt new file mode 100644 index 0000000..e69de29 diff --git a/plugins/listen_audio/README.md b/plugins/listen_audio/README.md new file mode 100644 index 0000000..bb47050 --- /dev/null +++ b/plugins/listen_audio/README.md @@ -0,0 +1,21 @@ +# Listen Audio Plugin + +## Description +This plugin handles audio and voice message uploads. It ensures that audio content is only sent to multimodal LLMs that natively support audio analysis. + +## How it Works +1. **Detection:** The plugin detects if a message contains an audio file or a voice note. +2. **Provider Check:** It checks the currently active AI provider. + - **Supported:** Gemini, OpenAI (GPT-4o). + - **Unsupported:** Anthropic (Claude 3.5), Grok, Llama 3.2. +3. **Action:** + - If supported, the audio is passed to the model. + - If unsupported, the audio is blocked with a warning. + +## Requirements +- None. + +## Compatibility +- **Gemini:** Supported (Native audio). +- **OpenAI (GPT-4o):** Supported (Native audio). +- **Others:** Not supported (Audio will be blocked). diff --git a/plugins/listen_audio/license.md b/plugins/listen_audio/license.md new file mode 100644 index 0000000..fdddb29 --- /dev/null +++ b/plugins/listen_audio/license.md @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/plugins/listen_audio/main.py b/plugins/listen_audio/main.py new file mode 100644 index 0000000..88dde35 --- /dev/null +++ b/plugins/listen_audio/main.py @@ -0,0 +1,93 @@ +""" +Listen Audio Plugin + +This plugin validates that audio/voice uploads are only sent to AI providers +that support native audio analysis. This prevents API errors for providers +that only support text or vision. + +Supported Providers: +- Gemini (Google): Native audio understanding +- OpenAI (GPT-4o): Native audio understanding + +Unsupported Providers: +- Anthropic (Claude): Vision only, no audio +- Others: Typically text/vision only +""" + +# List of providers that support native audio input +SUPPORTED_PROVIDERS = ["gemini", "openai"] + +def is_plugin_applicable(messages, provider): + """ + Determines if this plugin should process the current message. + + Returns True if the last user message contains audio or voice content. + This allows the plugin to intercept audio uploads and validate + provider compatibility before sending to the API. + + Args: + messages: List of message dicts with 'role' and 'content' + provider: Active AI provider (e.g., 'openai', 'anthropic') + + Returns: + bool: True if message contains audio/voice content + """ + if not messages: + return False + + last_message = messages[-1] + if last_message.get("role") != "user": + return False + + content = last_message.get("content", "") + + # Check if content contains audio or voice (content can be a list of parts for multimodal) + has_audio = False + if isinstance(content, list): + for part in content: + if part.get("type") == "audio" or part.get("type") == "voice": + has_audio = True + break + + if has_audio: + return True + + return False + +def process_messages(messages, provider): + """ + Validates provider compatibility with audio content. + + If the provider supports audio, the message passes through unchanged. + If not, the audio content is replaced with a user-friendly error message + explaining that they need to switch to a compatible provider. + + Args: + messages: List of message dicts + provider: Active AI provider + + Returns: + list: Modified messages (either unchanged or with error message) + """ + if not messages: + return messages + + # Check if provider is supported (partial string matching) + is_supported = False + if provider: + provider_lower = provider.lower() + for supported in SUPPORTED_PROVIDERS: + if supported in provider_lower: + is_supported = True + break + + if is_supported: + print(f"Plugin listen_audio: Provider {provider} supports audio. Proceeding.") + # Audio content passes through unchanged - the provider wrapper will handle it + return messages + else: + print(f"Plugin listen_audio: Provider {provider} does NOT support audio. Blocking.") + # Replace audio content with helpful error message + messages[-1]["content"] = f"Sorry, the current AI provider ({provider}) does not support audio analysis." + return messages + diff --git a/plugins/listen_audio/requirements.txt b/plugins/listen_audio/requirements.txt new file mode 100644 index 0000000..e69de29 diff --git a/plugins/summarize_youtube_video/README.md b/plugins/summarize_youtube_video/README.md new file mode 100644 index 0000000..b9c52e8 --- /dev/null +++ b/plugins/summarize_youtube_video/README.md @@ -0,0 +1,16 @@ +# Summarize YouTube Video Plugin + +## Description +This plugin automatically detects YouTube links in the chat, fetches the video transcript using `youtube-transcript-api`, and generates a concise executive summary using the active LLM. + +## How it Works +1. **Detection:** The plugin scans user messages for "youtube.com" or "youtu.be" links. +2. **Transcript Fetching:** It attempts to download the transcript (preferring Spanish or English). +3. **Prompt Generation:** It replaces the original message with a structured prompt containing the transcript and specific instructions for an objective summary. +4. **Processing:** The LLM processes the prompt and returns the summary. + +## Requirements +- `youtube-transcript-api` + +## Compatibility +- Works with all text-based LLM providers (OpenAI, Anthropic, Gemini, etc.). diff --git a/plugins/summarize_youtube_video/__init__.py b/plugins/summarize_youtube_video/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/plugins/summarize_youtube_video/license.md b/plugins/summarize_youtube_video/license.md new file mode 100644 index 0000000..fdddb29 --- /dev/null +++ b/plugins/summarize_youtube_video/license.md @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/plugins/summarize_youtube_video/main.py b/plugins/summarize_youtube_video/main.py new file mode 100644 index 0000000..dbf5072 --- /dev/null +++ b/plugins/summarize_youtube_video/main.py @@ -0,0 +1,150 @@ +import re +from youtube_transcript_api import YouTubeTranscriptApi + +def get_youtube_video_id(url): + """ + Extracts the video ID from a YouTube URL. + """ + patron = r'(?:v=|\/)([0-9A-Za-z_-]{11})' + resultado = re.search(patron, url) + return resultado.group(1) if resultado else None + +def extract_phrases_and_concatenate(json_data): + """ + Extracts sentences from a JSON file and concatenates them into a single text variable. + """ + sentence = [] + if isinstance(json_data, list): + for item in json_data: + if 'text' in item: + sentence.append(item['text']) + else: + print("Unexpected JSON format") + + full_text = " ".join(sentence) + return full_text + +def get_transcript_from_url(url): + """ + Tries to get the transcript for a YouTube URL. + Returns the transcript text or None if failed. + """ + video_id = get_youtube_video_id(url) + if not video_id: + return None + + try: + # Try fetching transcript in Spanish first, then English, then auto-generated + transcript_list = YouTubeTranscriptApi.list_transcripts(video_id) + + try: + transcript = transcript_list.find_manually_created_transcript(['es', 'en']) + except: + try: + transcript = transcript_list.find_generated_transcript(['es', 'en']) + except: + transcript = next(iter(transcript_list)) + + json_data = transcript.fetch() + return extract_phrases_and_concatenate(json_data) + except Exception as e: + print(f"Error fetching transcript: {e}") + return None + +def get_summarization_prompt(transcript_text): + """ + Returns the prompt to be sent to the LLM for summarization. + """ + return f"""STRICT INFORMATION PROCESSING INSTRUCTIONS: +1. OUTPUT FORMAT: +- Executive summary in maximum 5 points +- Neutral and direct language +- No subjective assessments +- Style: informative and objective + +2. MANDATORY ANALYSIS: +- Identify MAIN FACTS +- Extract CONCRETE DATA +- Contextualize without personal opinion +- Prioritize verifiable information + +3. RESTRICTIONS: +- Prohibited use of emotional adjectives +- Avoid personal interpretations +- Maximum linguistic neutrality +- Mathematical precision in description + +4. STRUCTURE: +[Objective headline] +- Point 1: What happened +- Point 2: Who was involved +- Point 3: When and where +- Point 4: Immediate consequences +- Point 5: Relevant context + +CONTENT TO SUMMARIZE: +{transcript_text}""" + +def is_plugin_applicable(messages, provider): + """ + Returns True if the last message from the user contains a YouTube link. + The provider argument is used to check if the model supports native video processing. + """ + if not messages: + return False + + last_message = messages[-1] + if last_message.get("role") != "user": + return False + + content = last_message.get("content", "") + # Handle case where content might be a list (for images) + if isinstance(content, list): + # Check text parts + for part in content: + if part.get("type") == "text": + text = part.get("text", "") + if "youtube.com" in text or "youtu.be" in text: + return True + return False + + if isinstance(content, str): + if "youtube.com" in content or "youtu.be" in content: + return True + + return False + +def process_messages(messages, provider): + """ + Modifies the messages by replacing the YouTube link with the transcript and summarization prompt. + """ + if not messages: + return messages + + last_message = messages[-1] + content = last_message.get("content", "") + + url = None + if isinstance(content, str): + url = content + elif isinstance(content, list): + for part in content: + if part.get("type") == "text": + text = part.get("text", "") + if "youtube.com" in text or "youtu.be" in text: + url = text + break + + if url: + print(f"Plugin processing YouTube URL: {url}") + transcript = get_transcript_from_url(url) + if transcript: + prompt = get_summarization_prompt(transcript) + # Replace the content of the last message with the prompt + # We keep the role as 'user' so the LLM thinks the user asked for this summary + messages[-1]["content"] = prompt + print("Plugin: Transcript attached to messages.") + else: + print("Plugin: Failed to fetch transcript.") + + return messages diff --git a/plugins/summarize_youtube_video/requirements.txt b/plugins/summarize_youtube_video/requirements.txt new file mode 100644 index 0000000..d90f031 --- /dev/null +++ b/plugins/summarize_youtube_video/requirements.txt @@ -0,0 +1 @@ +youtube-transcript-api==0.6.3 diff --git a/plugins/watch_picture/README.md b/plugins/watch_picture/README.md new file mode 100644 index 0000000..0c322a1 --- /dev/null +++ b/plugins/watch_picture/README.md @@ -0,0 +1,21 @@ +# Watch Picture Plugin + +## Description +This plugin handles image uploads. It ensures that image content is sent to multimodal LLMs that support vision capabilities. + +## How it Works +1. **Detection:** The plugin detects if a message contains an image (photo or image document). +2. **Provider Check:** It checks the currently active AI provider. + - **Supported:** Gemini, OpenAI (GPT-4o), Anthropic (Claude 3.5), Grok, Llama 3.2. + - **Unsupported:** Text-only models (if any). +3. **Action:** + - If supported, the image is passed to the model. + - If unsupported, the image is blocked with a warning. + +## Requirements +- None. + +## Compatibility +- **Gemini:** Supported. +- **OpenAI:** Supported. +- **Anthropic:** Supported. diff --git a/plugins/watch_picture/license.md b/plugins/watch_picture/license.md new file mode 100644 index 0000000..fdddb29 --- /dev/null +++ b/plugins/watch_picture/license.md @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/plugins/watch_picture/main.py b/plugins/watch_picture/main.py new file mode 100644 index 0000000..3ea7d17 --- /dev/null +++ b/plugins/watch_picture/main.py @@ -0,0 +1,92 @@ +""" +Watch Picture Plugin + +This plugin validates that image uploads are only sent to AI providers +that support vision capabilities. Most modern LLMs support images, but +this plugin provides a safety check and helpful error messages. + +Supported Providers: +- Gemini (Google): Vision support +- OpenAI (GPT-4o, GPT-4 Vision): Vision support +- Anthropic (Claude 3.5): Vision support + +Unsupported Providers: +- Text-only models (if any) +""" + +# List of providers that support image/vision input +SUPPORTED_PROVIDERS = ["gemini", "openai", "anthropic"] + +def is_plugin_applicable(messages, provider): + """ + Determines if this plugin should process the current message. + + Returns True if the last user message contains image content. + This allows the plugin to intercept image uploads and validate + provider compatibility. + + Args: + messages: List of message dicts with 'role' and 'content' + provider: Active AI provider (e.g., 'openai', 'anthropic') + + Returns: + bool: True if message contains image content + """ + if not messages: + return False + + last_message = messages[-1] + if last_message.get("role") != "user": + return False + + content = last_message.get("content", "") + + # Check if content contains images (content can be a list of parts for multimodal) + has_image = False + if isinstance(content, list): + for part in content: + if part.get("type") == "image_url" or part.get("type") == "image": + has_image = True + break + + if has_image: + return True + + return False + +def process_messages(messages, provider): + """ + Validates provider compatibility with image content. + + If the provider supports images, the message passes through unchanged. + If not, the image content is replaced with an error message. + + Args: + messages: List of message dicts + provider: Active AI provider + + Returns: + list: Modified messages (either unchanged or with error message) + """ + if not messages: + return messages + + # Check if provider is supported (partial string matching) + is_supported = False + if provider: + provider_lower = provider.lower() + for supported in SUPPORTED_PROVIDERS: + if supported in provider_lower: + is_supported = True + break + + if is_supported: + print(f"Plugin watch_picture: Provider {provider} supports images. Proceeding.") + # Image content passes through unchanged + return messages + else: + print(f"Plugin watch_picture: Provider {provider} does NOT support images. Blocking.") + # Replace image content with error message + messages[-1]["content"] = f"Sorry, the current AI provider ({provider}) does not support image analysis." + return messages + diff --git a/plugins/watch_picture/requirements.txt b/plugins/watch_picture/requirements.txt new file mode 100644 index 0000000..e69de29 diff --git a/plugins/watch_video/README.md b/plugins/watch_video/README.md new file mode 100644 index 0000000..6d2df3e --- /dev/null +++ b/plugins/watch_video/README.md @@ -0,0 +1,21 @@ +# Watch Video Plugin + +## Description +This plugin handles video file uploads. It ensures that video content is only sent to multimodal LLMs that natively support video analysis. + +## How it Works +1. **Detection:** The plugin detects if a message contains a video file. +2. **Provider Check:** It checks the currently active AI provider. + - **Supported:** Gemini, OpenAI (GPT-4o). + - **Unsupported:** Anthropic, Grok, Llama, etc. +3. **Action:** + - If supported, the video is passed to the model for analysis. + - If unsupported, the video is blocked, and a warning message is returned to the user, advising them to switch to a supported provider. + +## Requirements +- None (uses Telegram Bot API for file handling). + +## Compatibility +- **Gemini:** Supported (Native video analysis). +- **OpenAI (GPT-4o):** Supported. +- **Others:** Not supported (Video will be blocked). diff --git a/plugins/watch_video/license.md b/plugins/watch_video/license.md new file mode 100644 index 0000000..fdddb29 --- /dev/null +++ b/plugins/watch_video/license.md @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/plugins/watch_video/main.py b/plugins/watch_video/main.py new file mode 100644 index 0000000..a6ee70e --- /dev/null +++ b/plugins/watch_video/main.py @@ -0,0 +1,94 @@ +""" +Watch Video Plugin + +This plugin validates that video uploads are only sent to AI providers +that support native video analysis. This prevents API errors and provides +helpful feedback to users when they try to send videos to text-only models. + +Supported Providers: +- Gemini (Google): Native video understanding +- OpenAI (GPT-4o): Native video understanding + +Unsupported Providers: +- Anthropic (Claude): Vision only, no video +- Others: Typically text-only +""" + +# List of providers that support native video input +SUPPORTED_PROVIDERS = ["gemini", "openai"] + +def is_plugin_applicable(messages, provider): + """ + Determines if this plugin should process the current message. + + Returns True if the last user message contains video content. + This allows the plugin to intercept video uploads and validate + provider compatibility before sending to the API. + + Args: + messages: List of message dicts with 'role' and 'content' + provider: Active AI provider (e.g., 'openai', 'anthropic') + + Returns: + bool: True if message contains video content + """ + if not messages: + return False + + last_message = messages[-1] + if last_message.get("role") != "user": + return False + + content = last_message.get("content", "") + + # Check if content contains video (content can be a list of parts for multimodal) + has_video = False + if isinstance(content, list): + for part in content: + if part.get("type") == "video" or part.get("type") == "video_url": + has_video = True + break + + if has_video: + return True + + return False + +def process_messages(messages, provider): + """ + Validates provider compatibility with video content. + + If the provider supports video, the message passes through unchanged. + If not, the video content is replaced with a user-friendly error message + explaining that they need to switch to a compatible provider. + + Args: + messages: List of message dicts + provider: Active AI provider + + Returns: + list: Modified messages (either unchanged or with error message) + """ + if not messages: + return messages + + # Check if provider is supported + # We use partial matching because provider string might be "openai" or "openai:gpt-4o" etc. + is_supported = False + if provider: + provider_lower = provider.lower() + for supported in SUPPORTED_PROVIDERS: + if supported in provider_lower: + is_supported = True + break + + if is_supported: + print(f"Plugin watch_video: Provider {provider} supports video. Proceeding.") + # Video content passes through unchanged - the provider wrapper will handle it + return messages + else: + print(f"Plugin watch_video: Provider {provider} does NOT support video. Blocking.") + # Replace video content with helpful error message + messages[-1]["content"] = f"Sorry, the current AI provider ({provider}) does not support video analysis. Please switch to Gemini or GPT-4o." + return messages + diff --git a/plugins/watch_video/requirements.txt b/plugins/watch_video/requirements.txt new file mode 100644 index 0000000..e69de29 diff --git a/plugins/web_reader/README.md b/plugins/web_reader/README.md new file mode 100644 index 0000000..6b7576b --- /dev/null +++ b/plugins/web_reader/README.md @@ -0,0 +1,17 @@ +# Web Reader Plugin + +## Description +This plugin detects general web URLs (excluding YouTube), fetches the page content, extracts the visible text, and asks the LLM to provide a comprehensive summary. + +## How it Works +1. **Detection:** The plugin scans for `http` or `https` URLs. It explicitly ignores YouTube links (handled by another plugin). +2. **Scraping:** It uses `requests` and `BeautifulSoup` to fetch the HTML and extract text, removing scripts and styles. +3. **Prompt Generation:** It replaces the URL with a prompt containing the page text and a request for summary. +4. **Processing:** The LLM summarizes the content. + +## Requirements +- `requests` +- `beautifulsoup4` + +## Compatibility +- Works with all text-based LLM providers. diff --git a/plugins/web_reader/license.md b/plugins/web_reader/license.md new file mode 100644 index 0000000..fdddb29 --- /dev/null +++ b/plugins/web_reader/license.md @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/plugins/web_reader/main.py b/plugins/web_reader/main.py new file mode 100644 index 0000000..6588e4b --- /dev/null +++ b/plugins/web_reader/main.py @@ -0,0 +1,120 @@ +import requests +from bs4 import BeautifulSoup +import re + +def extract_text_from_url(url): + """ + Fetches the content of a URL and extracts the visible text. + """ + try: + # Set a user agent to avoid being blocked by some sites + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' + } + response = requests.get(url, headers=headers, timeout=10) + response.raise_for_status() + + soup = BeautifulSoup(response.content, 'html.parser') + + # Remove script and style elements + for script in soup(["script", "style"]): + script.extract() + + # Get text + text = soup.get_text() + + # Break into lines and remove leading/trailing space on each + lines = (line.strip() for line in text.splitlines()) + # Break multi-headlines into a line each + chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + # Drop blank lines + text = '\n'.join(chunk for chunk in chunks if chunk) + + # Limit text length to avoid context window issues (e.g., 10000 chars) + return text[:10000] + + except Exception as e: + print(f"Error fetching URL {url}: {e}") + return None + +def get_summarization_prompt(text): + """ + Returns the prompt for summarizing the web page content. + """ + return f"""Please provide a brief and comprehensive summary of the following web page content. +Focus on the main points and key information. + +CONTENT: +{text}""" + +def is_plugin_applicable(messages, provider): + """ + Returns True if the last message contains a URL but is NOT a YouTube link. + """ + if not messages: + return False + + last_message = messages[-1] + if last_message.get("role") != "user": + return False + + content = last_message.get("content", "") + + url = None + if isinstance(content, str): + # Simple regex for URL + url_match = re.search(r'https?://\S+', content) + if url_match: + url = url_match.group(0) + elif isinstance(content, list): + for part in content: + if part.get("type") == "text": + text = part.get("text", "") + url_match = re.search(r'https?://\S+', text) + if url_match: + url = url_match.group(0) + break + + if url: + # Exclude YouTube links as they are handled by another plugin + if "youtube.com" in url or "youtu.be" in url: + return False + return True + + return False + +def process_messages(messages, provider): + """ + Modifies the messages by replacing the URL with the web page content and summarization prompt. + """ + if not messages: + return messages + + last_message = messages[-1] + content = last_message.get("content", "") + + url = None + if isinstance(content, str): + url_match = re.search(r'https?://\S+', content) + if url_match: + url = url_match.group(0) + elif isinstance(content, list): + for part in content: + if part.get("type") == "text": + text = part.get("text", "") + url_match = re.search(r'https?://\S+', text) + if url_match: + url = url_match.group(0) + break + + if url: + print(f"Plugin web_reader processing URL: {url}") + text = extract_text_from_url(url) + if text: + prompt = get_summarization_prompt(text) + messages[-1]["content"] = prompt + print("Plugin web_reader: Content attached to messages.") + else: + print("Plugin web_reader: Failed to fetch content.") + + return messages diff --git a/plugins/web_reader/requirements.txt b/plugins/web_reader/requirements.txt new file mode 100644 index 0000000..74c270f --- /dev/null +++ b/plugins/web_reader/requirements.txt @@ -0,0 +1,2 @@ +requests==2.32.3 +beautifulsoup4==4.12.3 diff --git a/requirements.txt b/requirements.txt index 9f4cf3d..d7ab9b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,6 @@ openai==2.7.1 anthropic==0.31.2 httpx==0.27.2 httpcore==1.0.6 -Pillow==10.4.0 \ No newline at end of file +Pillow==10.4.0youtube-transcript-api==0.6.3 +beautifulsoup4==4.12.3 +requests==2.32.3 From 28bfeedf30551a7906b2431a26899301448b991c Mon Sep 17 00:00:00 2001 From: marco Date: Fri, 28 Nov 2025 01:01:29 +0100 Subject: [PATCH 2/2] Add reaction_tracker plugin for learning from user feedback - Track message reactions (likes, emojis) on bot responses - Store reactions in SQLite database with analytics - Passive plugin that doesn't interfere with message pipeline - Includes engagement statistics and sentiment analysis - Supports learning from user votes to improve bot quality - Complete documentation and integration guide --- plugins/config_plugins.py | 1 + plugins/reaction_tracker/INTEGRATION.md | 114 ++++++++ plugins/reaction_tracker/README.md | 228 +++++++++++++++ plugins/reaction_tracker/analytics.py | 314 ++++++++++++++++++++ plugins/reaction_tracker/config.py | 75 +++++ plugins/reaction_tracker/database.py | 338 ++++++++++++++++++++++ plugins/reaction_tracker/license.md | 24 ++ plugins/reaction_tracker/main.py | 243 ++++++++++++++++ plugins/reaction_tracker/requirements.txt | 1 + 9 files changed, 1338 insertions(+) create mode 100644 plugins/reaction_tracker/INTEGRATION.md create mode 100644 plugins/reaction_tracker/README.md create mode 100644 plugins/reaction_tracker/analytics.py create mode 100644 plugins/reaction_tracker/config.py create mode 100644 plugins/reaction_tracker/database.py create mode 100644 plugins/reaction_tracker/license.md create mode 100644 plugins/reaction_tracker/main.py create mode 100644 plugins/reaction_tracker/requirements.txt diff --git a/plugins/config_plugins.py b/plugins/config_plugins.py index f7f3877..d8be116 100644 --- a/plugins/config_plugins.py +++ b/plugins/config_plugins.py @@ -13,6 +13,7 @@ "watch_picture": True, "listen_audio": True, "generate_picture": True, + "reaction_tracker": True, # Tracks message reactions for learning from user feedback } def is_plugin_enabled(plugin_name): diff --git a/plugins/reaction_tracker/INTEGRATION.md b/plugins/reaction_tracker/INTEGRATION.md new file mode 100644 index 0000000..e86c3d3 --- /dev/null +++ b/plugins/reaction_tracker/INTEGRATION.md @@ -0,0 +1,114 @@ +# Reaction Tracker Integration Instructions + +## For telegram-llm-bot/main.py + +Add this code after the plugin loading section (around line 200-300): + +```python +# ============================================================================ +# REACTION TRACKER INTEGRATION +# ============================================================================ +# Register reaction tracker handlers (must be done separately from plugin pipeline) +try: + from plugins.reaction_tracker.main import get_handlers + reaction_handlers = get_handlers() + for handler in reaction_handlers: + if handler: + app.add_handler(handler) + print("✅ Reaction tracker handlers registered") +except Exception as e: + print(f"⚠️ Could not register reaction tracker: {e}") +``` + +## Update allowed_updates + +Find the `run_polling()` call at the end of main.py and update it: + +```python +# OLD: +app.run_polling() + +# NEW: +app.run_polling( + allowed_updates=[ + "message", + "edited_message", + "channel_post", + "edited_channel_post", + "message_reaction", # ADD THIS + "message_reaction_count", # AND THIS + "inline_query", + "chosen_inline_result", + "callback_query", + ] +) +``` + +## Optional: Add Statistics Commands + +Add these command handlers: + +```python +async def reaction_stats_command(update: Update, context: ContextTypes.DEFAULT_TYPE): + """Show reaction statistics.""" + from plugins.reaction_tracker.main import get_tracker + from plugins.reaction_tracker.analytics import ReactionAnalytics + + tracker = get_tracker() + analytics = ReactionAnalytics(tracker.db) + + chat_id = update.effective_chat.id + stats = analytics.get_engagement_stats(chat_id=chat_id, days_back=7) + + await update.message.reply_text( + f"📊 **Reaction Statistics (Last 7 Days)**\n\n" + f"Total Reactions: {stats['total_reactions']}\n" + f"Unique Users: {stats['unique_users']}\n" + f"Unique Messages: {stats['unique_messages']}\n" + f"Avg Reactions/Message: {stats['avg_reactions_per_message']:.2f}\n" + f"Avg Reactions/User: {stats['avg_reactions_per_user']:.2f}" + ) + +async def top_reactions_command(update: Update, context: ContextTypes.DEFAULT_TYPE): + """Show most reacted messages.""" + from plugins.reaction_tracker.main import get_tracker + + tracker = get_tracker() + chat_id = update.effective_chat.id + top_messages = tracker.get_top_messages(chat_id=chat_id, limit=5) + + if not top_messages: + await update.message.reply_text("No reactions yet!") + return + + response = "🏆 **Top Reacted Messages**\n\n" + for i, msg in enumerate(top_messages, 1): + breakdown = msg['reaction_breakdown'] + emoji_str = " ".join([f"{emoji}×{count}" for emoji, count in breakdown.items()]) + response += f"{i}. Message #{msg['message_id']}\n" + response += f" Total: {msg['total_count']} | {emoji_str}\n\n" + + await update.message.reply_text(response) + +# Register commands +app.add_handler(CommandHandler("reactionstats", reaction_stats_command)) +app.add_handler(CommandHandler("topreactions", top_reactions_command)) +``` + +## Verification + +After integration, test with: + +1. Send a message from the bot +2. React to it with an emoji +3. Run `/reactionstats` to see if it's tracked +4. Check `plugins/reaction_tracker/reactions.db` exists + +## Troubleshooting + +If reactions aren't being tracked: + +1. Check bot is admin in the chat +2. Verify `allowed_updates` includes reaction types +3. Check console for "✅ Reaction tracker handlers registered" +4. Ensure `python-telegram-bot>=20.8` is installed diff --git a/plugins/reaction_tracker/README.md b/plugins/reaction_tracker/README.md new file mode 100644 index 0000000..2a73c1d --- /dev/null +++ b/plugins/reaction_tracker/README.md @@ -0,0 +1,228 @@ +# Reaction Tracker Plugin + +**Author:** Marco +**License:** Public Domain (Unlicense) + +## Overview + +This plugin tracks message reactions (👍, ❤️, 🔥, etc.) on bot messages to learn from user feedback. It stores reactions in a SQLite database and provides analytics to identify which bot responses users prefer. + +## Purpose + +Enable the bot to learn from user reactions: +- Track which responses get positive reactions (👍, ❤️, 🔥) +- Identify poorly received responses (👎, 💔) +- Analyze engagement patterns over time +- Use reaction data to improve bot responses + +## How It Works + +This is a **passive plugin** that doesn't modify messages in the processing pipeline. Instead, it: + +1. Registers handlers for `MessageReactionUpdated` and `MessageReactionCountUpdated` events +2. Stores reaction data in a SQLite database (`reactions.db`) +3. Provides analytics methods to query reaction statistics + +## Features + +- ✅ Track individual user reactions +- ✅ Track aggregate reaction counts +- ✅ Support for anonymous reactions (from channels) +- ✅ SQLite database with thread-safe operations +- ✅ Analytics for identifying top-rated messages +- ✅ Query interface for statistics + +## Database Schema + +### message_reactions +Stores individual reaction events: +- `chat_id`, `message_id`, `user_id`, `actor_chat_id` +- `reaction_emoji`, `action` (added/removed), `timestamp` + +### reaction_counts +Stores aggregate statistics: +- `chat_id`, `message_id`, `total_count` +- `reaction_breakdown` (JSON: emoji → count) + +## Integration + +### 1. Enable in config_plugins.py + +```python +PLUGIN_STATUS = { + # ... other plugins ... + "reaction_tracker": True, +} +``` + +### 2. Register Handlers in main.py + +Add this code in `main.py` after loading plugins: + +```python +# Register reaction tracker handlers +try: + from plugins.reaction_tracker.main import get_handlers + for handler in get_handlers(): + if handler: + app.add_handler(handler) + print("✅ Reaction tracker handlers registered") +except Exception as e: + print(f"⚠️ Could not register reaction tracker: {e}") +``` + +### 3. Update allowed_updates + +In `main.py`, when calling `run_polling()`, add reaction updates: + +```python +app.run_polling( + allowed_updates=[ + "message", + "message_reaction", # ADD THIS + "message_reaction_count" # AND THIS + ] +) +``` + +## Usage + +### Query Statistics + +```python +from plugins.reaction_tracker.main import get_tracker +from plugins.reaction_tracker.analytics import ReactionAnalytics + +tracker = get_tracker() +analytics = ReactionAnalytics(tracker.db) + +# Get most reacted messages +top_messages = tracker.get_top_messages(chat_id=123, limit=10) + +# Get engagement stats +stats = analytics.get_engagement_stats(chat_id=123, days_back=7) +print(f"Total reactions: {stats['total_reactions']}") + +# Get most popular emojis +popular = analytics.get_most_popular_emoji(chat_id=123, days_back=7) +``` + +### Add Commands (Optional) + +You can add commands to query statistics: + +```python +async def stats_command(update: Update, context: ContextTypes.DEFAULT_TYPE): + """Show reaction statistics.""" + from plugins.reaction_tracker.main import get_tracker + from plugins.reaction_tracker.analytics import ReactionAnalytics + + tracker = get_tracker() + analytics = ReactionAnalytics(tracker.db) + + chat_id = update.effective_chat.id + stats = analytics.get_engagement_stats(chat_id=chat_id, days_back=7) + + await update.message.reply_text( + f"📊 Reactions (7 days):\n" + f"Total: {stats['total_reactions']}\n" + f"Users: {stats['unique_users']}\n" + f"Messages: {stats['unique_messages']}" + ) + +# Register command +app.add_handler(CommandHandler("reactionstats", stats_command)) +``` + +## Learning from Reactions + +### Identify Best Responses + +```python +# Get top-rated bot messages +top_messages = tracker.get_top_messages(chat_id=123, limit=10) + +# Analyze sentiment +positive_emojis = {"👍", "❤️", "🔥", "💯", "⭐"} +negative_emojis = {"👎", "💔", "😢"} + +for msg in top_messages: + breakdown = msg['reaction_breakdown'] + positive = sum(count for emoji, count in breakdown.items() if emoji in positive_emojis) + negative = sum(count for emoji, count in breakdown.items() if emoji in negative_emojis) + + if positive > negative: + print(f"✅ Message {msg['message_id']}: Well received") + else: + print(f"❌ Message {msg['message_id']}: Poorly received") +``` + +## Requirements + +- `python-telegram-bot>=20.8` +- Telegram Bot API 6.0+ +- Bot must be admin in groups/channels to receive reaction updates + +## Important Notes + +1. **Bot Permissions**: Bot must be an administrator in groups/channels +2. **allowed_updates**: Must include `"message_reaction"` and `"message_reaction_count"` +3. **Passive Plugin**: Doesn't modify messages, only tracks reactions +4. **Database**: Stored in plugin directory as `reactions.db` + +## Troubleshooting + +### Reactions Not Being Tracked + +1. Check bot is admin in the chat +2. Verify `allowed_updates` includes reaction types +3. Check `python-telegram-bot` version is >= 20.8 +4. Ensure handlers are registered in `main.py` + +### Database Issues + +Database is stored in the plugin directory. To reset: +```bash +rm plugins/reaction_tracker/reactions.db +``` + +## Analytics Methods + +Available in `analytics.py`: + +- `get_most_popular_emoji()` - Most used reactions +- `get_most_active_users()` - Top reactors +- `get_engagement_stats()` - Overall metrics +- `get_trending_messages()` - High-velocity reactions +- `export_to_json()` - Export data + +## Example: Sentiment Analysis + +```python +def analyze_bot_sentiment(chat_id): + """Analyze user sentiment towards bot responses.""" + analytics = ReactionAnalytics(tracker.db) + + popular = analytics.get_most_popular_emoji(chat_id=chat_id, days_back=7) + + positive = {"👍", "❤️", "🔥", "💯", "⭐"} + negative = {"👎", "💔", "😢"} + + pos_count = sum(c for e, c in popular if e in positive) + neg_count = sum(c for e, c in popular if e in negative) + + sentiment = (pos_count - neg_count) / (pos_count + neg_count) + return sentiment # -1 (negative) to +1 (positive) +``` + +## Future Enhancements + +- Automatic response quality scoring +- Integration with AI training data +- Reaction-based response ranking +- User preference learning +- A/B testing support + +--- + +**Note**: This plugin requires manual handler registration in `main.py`. See integration instructions above. diff --git a/plugins/reaction_tracker/analytics.py b/plugins/reaction_tracker/analytics.py new file mode 100644 index 0000000..cfe3003 --- /dev/null +++ b/plugins/reaction_tracker/analytics.py @@ -0,0 +1,314 @@ +""" +Analytics utilities for Telegram Reaction Tracker + +Provides analytics and reporting features for reaction data. +""" + +import logging +from typing import Dict, List, Optional, Tuple +from datetime import datetime, timedelta +from collections import Counter +import json + +from .database import ReactionDatabase + + +logger = logging.getLogger(__name__) + + +class ReactionAnalytics: + """ + Analytics engine for reaction data. + + Provides methods to analyze and report on reaction patterns. + """ + + def __init__(self, database: ReactionDatabase): + """ + Initialize analytics engine. + + Args: + database: ReactionDatabase instance + """ + self.db = database + + def get_most_popular_emoji( + self, + chat_id: Optional[int] = None, + days_back: Optional[int] = None + ) -> List[Tuple[str, int]]: + """ + Get most popular reaction emojis. + + Args: + chat_id: Filter by chat ID (None = all chats) + days_back: Only include reactions from last N days + + Returns: + List of (emoji, count) tuples ordered by popularity + """ + with self.db._get_connection() as conn: + cursor = conn.cursor() + + query = """ + SELECT reaction_emoji, COUNT(*) as count + FROM message_reactions + WHERE action = 'added' + """ + params = [] + + if chat_id is not None: + query += " AND chat_id = ?" + params.append(chat_id) + + if days_back is not None: + cutoff = int((datetime.now() - timedelta(days=days_back)).timestamp()) + query += " AND timestamp >= ?" + params.append(cutoff) + + query += " GROUP BY reaction_emoji ORDER BY count DESC" + + cursor.execute(query, params) + return [(row[0], row[1]) for row in cursor.fetchall()] + + def get_most_active_users( + self, + chat_id: Optional[int] = None, + days_back: Optional[int] = None, + limit: int = 10 + ) -> List[Tuple[int, int]]: + """ + Get most active users (by reaction count). + + Args: + chat_id: Filter by chat ID (None = all chats) + days_back: Only include reactions from last N days + limit: Maximum number of results + + Returns: + List of (user_id, reaction_count) tuples + """ + with self.db._get_connection() as conn: + cursor = conn.cursor() + + query = """ + SELECT user_id, COUNT(*) as count + FROM message_reactions + WHERE user_id IS NOT NULL AND action = 'added' + """ + params = [] + + if chat_id is not None: + query += " AND chat_id = ?" + params.append(chat_id) + + if days_back is not None: + cutoff = int((datetime.now() - timedelta(days=days_back)).timestamp()) + query += " AND timestamp >= ?" + params.append(cutoff) + + query += " GROUP BY user_id ORDER BY count DESC LIMIT ?" + params.append(limit) + + cursor.execute(query, params) + return [(row[0], row[1]) for row in cursor.fetchall()] + + def get_reaction_timeline( + self, + chat_id: int, + message_id: int + ) -> List[Dict]: + """ + Get timeline of reactions for a specific message. + + Args: + chat_id: Chat ID + message_id: Message ID + + Returns: + List of reaction events ordered by time + """ + reactions = self.db.get_message_reactions(chat_id, message_id) + + # Sort by timestamp + reactions.sort(key=lambda x: x['timestamp']) + + return reactions + + def get_user_favorite_emoji(self, user_id: int) -> Optional[str]: + """ + Get user's most frequently used reaction emoji. + + Args: + user_id: User ID + + Returns: + Most used emoji or None + """ + with self.db._get_connection() as conn: + cursor = conn.cursor() + + cursor.execute(""" + SELECT reaction_emoji, COUNT(*) as count + FROM message_reactions + WHERE user_id = ? AND action = 'added' + GROUP BY reaction_emoji + ORDER BY count DESC + LIMIT 1 + """, (user_id,)) + + row = cursor.fetchone() + return row[0] if row else None + + def get_engagement_stats( + self, + chat_id: Optional[int] = None, + days_back: int = 7 + ) -> Dict: + """ + Get overall engagement statistics. + + Args: + chat_id: Filter by chat ID (None = all chats) + days_back: Number of days to analyze + + Returns: + Dictionary with engagement metrics + """ + cutoff = int((datetime.now() - timedelta(days=days_back)).timestamp()) + + with self.db._get_connection() as conn: + cursor = conn.cursor() + + # Total reactions + query = "SELECT COUNT(*) FROM message_reactions WHERE action = 'added' AND timestamp >= ?" + params = [cutoff] + + if chat_id is not None: + query += " AND chat_id = ?" + params.append(chat_id) + + cursor.execute(query, params) + total_reactions = cursor.fetchone()[0] + + # Unique users + query = "SELECT COUNT(DISTINCT user_id) FROM message_reactions WHERE action = 'added' AND timestamp >= ? AND user_id IS NOT NULL" + params = [cutoff] + + if chat_id is not None: + query += " AND chat_id = ?" + params.append(chat_id) + + cursor.execute(query, params) + unique_users = cursor.fetchone()[0] + + # Unique messages + query = "SELECT COUNT(DISTINCT message_id) FROM message_reactions WHERE action = 'added' AND timestamp >= ?" + params = [cutoff] + + if chat_id is not None: + query += " AND chat_id = ?" + params.append(chat_id) + + cursor.execute(query, params) + unique_messages = cursor.fetchone()[0] + + return { + 'total_reactions': total_reactions, + 'unique_users': unique_users, + 'unique_messages': unique_messages, + 'avg_reactions_per_message': total_reactions / unique_messages if unique_messages > 0 else 0, + 'avg_reactions_per_user': total_reactions / unique_users if unique_users > 0 else 0, + 'period_days': days_back + } + + def export_to_json( + self, + chat_id: Optional[int] = None, + days_back: Optional[int] = None + ) -> str: + """ + Export reaction data to JSON format. + + Args: + chat_id: Filter by chat ID (None = all chats) + days_back: Only include reactions from last N days + + Returns: + JSON string with reaction data + """ + with self.db._get_connection() as conn: + cursor = conn.cursor() + + query = "SELECT * FROM message_reactions WHERE 1=1" + params = [] + + if chat_id is not None: + query += " AND chat_id = ?" + params.append(chat_id) + + if days_back is not None: + cutoff = int((datetime.now() - timedelta(days=days_back)).timestamp()) + query += " AND timestamp >= ?" + params.append(cutoff) + + query += " ORDER BY timestamp DESC" + + cursor.execute(query, params) + + reactions = [dict(row) for row in cursor.fetchall()] + + return json.dumps({ + 'export_date': datetime.now().isoformat(), + 'total_records': len(reactions), + 'reactions': reactions + }, indent=2) + + def get_trending_messages( + self, + chat_id: Optional[int] = None, + hours_back: int = 24, + limit: int = 5 + ) -> List[Dict]: + """ + Get trending messages (high reaction velocity). + + Args: + chat_id: Filter by chat ID (None = all chats) + hours_back: Time window to analyze + limit: Maximum number of results + + Returns: + List of trending message stats + """ + cutoff = int((datetime.now() - timedelta(hours=hours_back)).timestamp()) + + with self.db._get_connection() as conn: + cursor = conn.cursor() + + query = """ + SELECT chat_id, message_id, COUNT(*) as reaction_count + FROM message_reactions + WHERE action = 'added' AND timestamp >= ? + """ + params = [cutoff] + + if chat_id is not None: + query += " AND chat_id = ?" + params.append(chat_id) + + query += " GROUP BY chat_id, message_id ORDER BY reaction_count DESC LIMIT ?" + params.append(limit) + + cursor.execute(query, params) + + trending = [] + for row in cursor.fetchall(): + trending.append({ + 'chat_id': row[0], + 'message_id': row[1], + 'reaction_count': row[2], + 'reactions_per_hour': row[2] / hours_back + }) + + return trending diff --git a/plugins/reaction_tracker/config.py b/plugins/reaction_tracker/config.py new file mode 100644 index 0000000..1293d32 --- /dev/null +++ b/plugins/reaction_tracker/config.py @@ -0,0 +1,75 @@ +""" +Configuration module for Telegram Reaction Tracker + +Manages plugin settings and configuration options. +""" + +from dataclasses import dataclass +from typing import Optional, List +import os + + +@dataclass +class Config: + """ + Configuration settings for the Reaction Tracker plugin. + + Attributes: + database_path: Path to SQLite database file + track_anonymous: Whether to track anonymous reactions + store_message_text: Whether to store associated message text + allowed_reactions: List of reaction emojis to track (None = all) + max_history_days: How long to keep reaction history (0 = forever) + enable_analytics: Whether to enable analytics features + verbose_logging: Enable detailed logging + """ + + database_path: str = "reactions.db" + track_anonymous: bool = True + store_message_text: bool = True + allowed_reactions: Optional[List[str]] = None + max_history_days: int = 0 # 0 = keep forever + enable_analytics: bool = True + verbose_logging: bool = False + + @classmethod + def from_env(cls) -> "Config": + """ + Create configuration from environment variables. + + Environment variables: + REACTION_DB_PATH: Database path + REACTION_TRACK_ANONYMOUS: Track anonymous (true/false) + REACTION_STORE_TEXT: Store message text (true/false) + REACTION_MAX_HISTORY_DAYS: Max history days + REACTION_VERBOSE: Verbose logging (true/false) + + Returns: + Config instance + """ + return cls( + database_path=os.getenv("REACTION_DB_PATH", "reactions.db"), + track_anonymous=os.getenv("REACTION_TRACK_ANONYMOUS", "true").lower() == "true", + store_message_text=os.getenv("REACTION_STORE_TEXT", "true").lower() == "true", + max_history_days=int(os.getenv("REACTION_MAX_HISTORY_DAYS", "0")), + verbose_logging=os.getenv("REACTION_VERBOSE", "false").lower() == "true" + ) + + def validate(self) -> None: + """ + Validate configuration settings. + + Raises: + ValueError: If configuration is invalid + """ + if not self.database_path: + raise ValueError("database_path cannot be empty") + + if self.max_history_days < 0: + raise ValueError("max_history_days must be >= 0") + + if self.allowed_reactions is not None: + if not isinstance(self.allowed_reactions, list): + raise ValueError("allowed_reactions must be a list or None") + if not all(isinstance(r, str) for r in self.allowed_reactions): + raise ValueError("All allowed_reactions must be strings") diff --git a/plugins/reaction_tracker/database.py b/plugins/reaction_tracker/database.py new file mode 100644 index 0000000..744ca54 --- /dev/null +++ b/plugins/reaction_tracker/database.py @@ -0,0 +1,338 @@ +""" +Database layer for Telegram Reaction Tracker + +Handles all database operations for storing and querying reaction data. +""" + +import sqlite3 +import json +import logging +from typing import List, Dict, Optional, Tuple +from datetime import datetime, timedelta +from contextlib import contextmanager +import threading + + +logger = logging.getLogger(__name__) + + +class ReactionDatabase: + """ + Database abstraction layer for reaction tracking. + + Manages SQLite database operations with thread-safe access. + """ + + def __init__(self, database_path: str): + """ + Initialize database connection. + + Args: + database_path: Path to SQLite database file + """ + self.database_path = database_path + self._local = threading.local() + self._init_database() + + @contextmanager + def _get_connection(self): + """ + Get thread-local database connection. + + Yields: + sqlite3.Connection: Database connection + """ + if not hasattr(self._local, 'connection'): + self._local.connection = sqlite3.connect( + self.database_path, + check_same_thread=False + ) + self._local.connection.row_factory = sqlite3.Row + + try: + yield self._local.connection + except Exception as e: + self._local.connection.rollback() + logger.error(f"Database error: {e}") + raise + else: + self._local.connection.commit() + + def _init_database(self) -> None: + """Initialize database schema.""" + with self._get_connection() as conn: + cursor = conn.cursor() + + # Create message_reactions table + cursor.execute(""" + CREATE TABLE IF NOT EXISTS message_reactions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + chat_id INTEGER NOT NULL, + message_id INTEGER NOT NULL, + user_id INTEGER, + actor_chat_id INTEGER, + reaction_emoji TEXT NOT NULL, + action TEXT NOT NULL, + timestamp INTEGER NOT NULL, + message_text TEXT + ) + """) + + # Create reaction_counts table + cursor.execute(""" + CREATE TABLE IF NOT EXISTS reaction_counts ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + chat_id INTEGER NOT NULL, + message_id INTEGER NOT NULL, + total_count INTEGER NOT NULL, + reaction_breakdown TEXT, + last_updated INTEGER NOT NULL, + UNIQUE(chat_id, message_id) + ) + """) + + # Create indexes + cursor.execute(""" + CREATE INDEX IF NOT EXISTS idx_message_lookup + ON message_reactions(chat_id, message_id) + """) + + cursor.execute(""" + CREATE INDEX IF NOT EXISTS idx_user_reactions + ON message_reactions(user_id, timestamp) + """) + + cursor.execute(""" + CREATE INDEX IF NOT EXISTS idx_timestamp + ON message_reactions(timestamp) + """) + + logger.info(f"Database initialized at {self.database_path}") + + def store_reaction( + self, + chat_id: int, + message_id: int, + reaction_emoji: str, + action: str, + timestamp: int, + user_id: Optional[int] = None, + actor_chat_id: Optional[int] = None, + message_text: Optional[str] = None + ) -> int: + """ + Store a single reaction event. + + Args: + chat_id: Chat ID + message_id: Message ID + reaction_emoji: Emoji used for reaction + action: 'added' or 'removed' + timestamp: Unix timestamp + user_id: User ID (None for anonymous) + actor_chat_id: Actor chat ID (for anonymous) + message_text: Associated message text + + Returns: + int: Row ID of inserted record + """ + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute(""" + INSERT INTO message_reactions + (chat_id, message_id, user_id, actor_chat_id, reaction_emoji, action, timestamp, message_text) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, (chat_id, message_id, user_id, actor_chat_id, reaction_emoji, action, timestamp, message_text)) + + row_id = cursor.lastrowid + logger.debug(f"Stored reaction: {action} {reaction_emoji} on message {message_id}") + return row_id + + def update_reaction_count( + self, + chat_id: int, + message_id: int, + total_count: int, + reaction_breakdown: Dict[str, int], + timestamp: int + ) -> None: + """ + Update aggregate reaction counts for a message. + + Args: + chat_id: Chat ID + message_id: Message ID + total_count: Total number of reactions + reaction_breakdown: Dictionary mapping emoji to count + timestamp: Unix timestamp + """ + breakdown_json = json.dumps(reaction_breakdown) + + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute(""" + INSERT INTO reaction_counts (chat_id, message_id, total_count, reaction_breakdown, last_updated) + VALUES (?, ?, ?, ?, ?) + ON CONFLICT(chat_id, message_id) DO UPDATE SET + total_count = excluded.total_count, + reaction_breakdown = excluded.reaction_breakdown, + last_updated = excluded.last_updated + """, (chat_id, message_id, total_count, breakdown_json, timestamp)) + + logger.debug(f"Updated reaction count for message {message_id}: {total_count} total") + + def get_message_reactions(self, chat_id: int, message_id: int) -> List[Dict]: + """ + Get all reactions for a specific message. + + Args: + chat_id: Chat ID + message_id: Message ID + + Returns: + List of reaction records + """ + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute(""" + SELECT * FROM message_reactions + WHERE chat_id = ? AND message_id = ? + ORDER BY timestamp DESC + """, (chat_id, message_id)) + + return [dict(row) for row in cursor.fetchall()] + + def get_message_stats(self, chat_id: int, message_id: int) -> Optional[Dict]: + """ + Get reaction statistics for a message. + + Args: + chat_id: Chat ID + message_id: Message ID + + Returns: + Dictionary with stats or None if not found + """ + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute(""" + SELECT * FROM reaction_counts + WHERE chat_id = ? AND message_id = ? + """, (chat_id, message_id)) + + row = cursor.fetchone() + if row: + result = dict(row) + result['reaction_breakdown'] = json.loads(result['reaction_breakdown']) + return result + return None + + def get_top_reacted_messages( + self, + chat_id: Optional[int] = None, + limit: int = 10, + min_reactions: int = 1 + ) -> List[Dict]: + """ + Get most reacted messages. + + Args: + chat_id: Filter by chat ID (None = all chats) + limit: Maximum number of results + min_reactions: Minimum reaction count + + Returns: + List of message stats ordered by reaction count + """ + with self._get_connection() as conn: + cursor = conn.cursor() + + if chat_id is not None: + cursor.execute(""" + SELECT * FROM reaction_counts + WHERE chat_id = ? AND total_count >= ? + ORDER BY total_count DESC + LIMIT ? + """, (chat_id, min_reactions, limit)) + else: + cursor.execute(""" + SELECT * FROM reaction_counts + WHERE total_count >= ? + ORDER BY total_count DESC + LIMIT ? + """, (min_reactions, limit)) + + results = [] + for row in cursor.fetchall(): + result = dict(row) + result['reaction_breakdown'] = json.loads(result['reaction_breakdown']) + results.append(result) + + return results + + def get_user_reaction_history( + self, + user_id: int, + limit: int = 50, + days_back: Optional[int] = None + ) -> List[Dict]: + """ + Get reaction history for a specific user. + + Args: + user_id: User ID + limit: Maximum number of results + days_back: Only include reactions from last N days (None = all) + + Returns: + List of user's reactions + """ + with self._get_connection() as conn: + cursor = conn.cursor() + + if days_back is not None: + cutoff = int((datetime.now() - timedelta(days=days_back)).timestamp()) + cursor.execute(""" + SELECT * FROM message_reactions + WHERE user_id = ? AND timestamp >= ? + ORDER BY timestamp DESC + LIMIT ? + """, (user_id, cutoff, limit)) + else: + cursor.execute(""" + SELECT * FROM message_reactions + WHERE user_id = ? + ORDER BY timestamp DESC + LIMIT ? + """, (user_id, limit)) + + return [dict(row) for row in cursor.fetchall()] + + def cleanup_old_reactions(self, days_to_keep: int) -> int: + """ + Remove reactions older than specified days. + + Args: + days_to_keep: Number of days to keep + + Returns: + Number of deleted records + """ + cutoff = int((datetime.now() - timedelta(days=days_to_keep)).timestamp()) + + with self._get_connection() as conn: + cursor = conn.cursor() + cursor.execute(""" + DELETE FROM message_reactions + WHERE timestamp < ? + """, (cutoff,)) + + deleted = cursor.rowcount + logger.info(f"Cleaned up {deleted} old reactions") + return deleted + + def close(self) -> None: + """Close database connection.""" + if hasattr(self._local, 'connection'): + self._local.connection.close() + logger.info("Database connection closed") diff --git a/plugins/reaction_tracker/license.md b/plugins/reaction_tracker/license.md new file mode 100644 index 0000000..fdddb29 --- /dev/null +++ b/plugins/reaction_tracker/license.md @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/plugins/reaction_tracker/main.py b/plugins/reaction_tracker/main.py new file mode 100644 index 0000000..5352e6b --- /dev/null +++ b/plugins/reaction_tracker/main.py @@ -0,0 +1,243 @@ +""" +Reaction Tracker Plugin + +This plugin tracks message reactions (likes, emojis) on bot messages to learn +from user feedback. It stores reactions in a database and provides analytics +to identify which bot responses users prefer. + +This is a passive plugin that doesn't modify messages - it only tracks reactions +via handlers that need to be registered separately in the main bot. + +Author: Marco +License: Public Domain (Unlicense) +""" + +import os +import sys +import logging +from typing import Optional, Dict, List +from datetime import datetime + +# Add parent directory to path for imports +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + +try: + from telegram import Update + from telegram.ext import ContextTypes, MessageReactionHandler + TELEGRAM_AVAILABLE = True +except ImportError: + TELEGRAM_AVAILABLE = False + logging.warning("python-telegram-bot not installed. Reaction tracking will be disabled.") + +from database import ReactionDatabase +from config import Config + + +logger = logging.getLogger(__name__) + + +class ReactionTracker: + """ + Tracks message reactions for learning from user feedback. + + This plugin is passive - it doesn't modify messages in the plugin pipeline. + Instead, it provides handlers that must be registered in main.py. + """ + + def __init__(self, database_path: Optional[str] = None): + """Initialize reaction tracker.""" + if database_path is None: + # Store in plugin directory + plugin_dir = os.path.dirname(os.path.abspath(__file__)) + database_path = os.path.join(plugin_dir, "reactions.db") + + self.config = Config( + database_path=database_path, + track_anonymous=True, + store_message_text=False, # Bot handles message storage separately + verbose_logging=False + ) + + self.db = ReactionDatabase(database_path) + logger.info(f"ReactionTracker initialized with database: {database_path}") + + async def handle_reaction_update( + self, + update: Update, + context: ContextTypes.DEFAULT_TYPE + ) -> None: + """Handle MessageReactionUpdated events.""" + if not TELEGRAM_AVAILABLE: + return + + reaction = update.message_reaction + if reaction is None: + return + + chat_id = reaction.chat.id + message_id = reaction.message_id + timestamp = int(reaction.date.timestamp() if hasattr(reaction.date, 'timestamp') else reaction.date) + + user_id = reaction.user.id if reaction.user else None + actor_chat_id = reaction.actor_chat.id if reaction.actor_chat else None + + # Process old and new reactions + old_emojis = set() + new_emojis = set() + + for reaction_type in reaction.old_reaction: + if hasattr(reaction_type, 'emoji'): + old_emojis.add(reaction_type.emoji) + + for reaction_type in reaction.new_reaction: + if hasattr(reaction_type, 'emoji'): + new_emojis.add(reaction_type.emoji) + + # Determine what was added and removed + added = new_emojis - old_emojis + removed = old_emojis - new_emojis + + # Store added reactions + for emoji in added: + self.db.store_reaction( + chat_id=chat_id, + message_id=message_id, + reaction_emoji=emoji, + action='added', + timestamp=timestamp, + user_id=user_id, + actor_chat_id=actor_chat_id + ) + logger.info(f"Reaction added: {emoji} on message {message_id}") + + # Store removed reactions + for emoji in removed: + self.db.store_reaction( + chat_id=chat_id, + message_id=message_id, + reaction_emoji=emoji, + action='removed', + timestamp=timestamp, + user_id=user_id, + actor_chat_id=actor_chat_id + ) + + async def handle_reaction_count_update( + self, + update: Update, + context: ContextTypes.DEFAULT_TYPE + ) -> None: + """Handle MessageReactionCountUpdated events.""" + if not TELEGRAM_AVAILABLE: + return + + reaction_count = update.message_reaction_count + if reaction_count is None: + return + + chat_id = reaction_count.chat.id + message_id = reaction_count.message_id + timestamp = int(reaction_count.date.timestamp() if hasattr(reaction_count.date, 'timestamp') else reaction_count.date) + + # Build reaction breakdown + reaction_breakdown = {} + total_count = 0 + + for reaction in reaction_count.reactions: + if hasattr(reaction.type, 'emoji'): + emoji = reaction.type.emoji + count = reaction.total_count + reaction_breakdown[emoji] = count + total_count += count + + # Update database + self.db.update_reaction_count( + chat_id=chat_id, + message_id=message_id, + total_count=total_count, + reaction_breakdown=reaction_breakdown, + timestamp=timestamp + ) + + def get_reaction_handler(self): + """Get MessageReactionHandler for individual reactions.""" + if not TELEGRAM_AVAILABLE: + return None + return MessageReactionHandler(self.handle_reaction_update) + + def get_reaction_count_handler(self): + """Get MessageReactionHandler for reaction counts.""" + if not TELEGRAM_AVAILABLE: + return None + return MessageReactionHandler( + self.handle_reaction_count_update, + message_reaction_count=True + ) + + def get_message_stats(self, chat_id: int, message_id: int) -> Optional[Dict]: + """Get reaction statistics for a specific message.""" + return self.db.get_message_stats(chat_id, message_id) + + def get_top_messages( + self, + chat_id: Optional[int] = None, + limit: int = 10 + ) -> List[Dict]: + """Get most reacted messages.""" + return self.db.get_top_reacted_messages(chat_id, limit, min_reactions=1) + + def close(self): + """Close database connection.""" + self.db.close() + + +# Global tracker instance +_tracker_instance = None + + +def get_tracker() -> ReactionTracker: + """Get or create global tracker instance.""" + global _tracker_instance + if _tracker_instance is None: + _tracker_instance = ReactionTracker() + return _tracker_instance + + +# Plugin interface functions (required by plugin system) +def is_plugin_applicable(messages, provider): + """ + This plugin doesn't modify messages in the pipeline. + It only tracks reactions via handlers. + + Returns False so it doesn't interfere with message processing. + """ + return False + + +def process_messages(messages, provider): + """ + This plugin doesn't modify messages. + Reaction tracking happens via handlers registered in main.py. + """ + return messages + + +# Helper function to get handlers for registration in main.py +def get_handlers(): + """ + Get reaction handlers for registration in main bot. + + Usage in main.py: + from plugins.reaction_tracker.main import get_handlers + for handler in get_handlers(): + if handler: + app.add_handler(handler) + """ + if not TELEGRAM_AVAILABLE: + return [] + + tracker = get_tracker() + return [ + tracker.get_reaction_handler(), + tracker.get_reaction_count_handler() + ] diff --git a/plugins/reaction_tracker/requirements.txt b/plugins/reaction_tracker/requirements.txt new file mode 100644 index 0000000..d94afc0 --- /dev/null +++ b/plugins/reaction_tracker/requirements.txt @@ -0,0 +1 @@ +python-telegram-bot>=20.8