diff --git a/1-sysadmin-toolkit/README.md b/1-sysadmin-toolkit/README.md new file mode 100644 index 0000000..3527b9a --- /dev/null +++ b/1-sysadmin-toolkit/README.md @@ -0,0 +1,195 @@ +# SysAdmin Toolkit + +## Áttekintés / Overview + +Python és Bash alapú rendszergazda eszköztár log elemzéshez, rendszer monitorozáshoz, lemezterület analízishez és szolgáltatás kezeléshez. + +Python and Bash based system administration toolkit for log analysis, system monitoring, disk space analysis, and service management. + +## Funkciók / Features + +### Python Toolkit + +- **Log Analyzer** - Syslog és auth.log elemzés + - Syslog formátum parsing (RFC 3164) + - Bejelentkezés statisztikák (sikeres/sikertelen) + - Hibaüzenet aggregáció + - Programonkénti bontás + +- **System Health** - Rendszer állapot monitorozás + - CPU használat és load average + - Memória és swap monitorozás + - Lemez partíciók állapota + - Top folyamatok listázása + +- **Disk Analyzer** - Lemezterület elemzés + - Könyvtár méret analízis + - Nagy fájlok keresése + - Fájlrendszer használat + +- **Service Manager** - Szolgáltatás kezelés + - Systemd szolgáltatások státusza + - Kritikus szolgáltatások ellenőrzése + - Szolgáltatás naplók lekérdezése + +### Bash Scripts + +- **system-report.sh** - Átfogó rendszer riport generálása +- **log-cleanup.sh** - Log fájlok tisztítása retention policy alapján + +## Telepítés / Installation + +```bash +# Virtuális környezet aktiválása / Activate virtual environment +source venv/bin/activate + +# Függőségek már telepítve vannak / Dependencies are already installed +# psutil, typer, rich, pydantic +``` + +## Használat / Usage + +### CLI Interface + +```bash +# A projekt gyökérből / From project root +cd 1-sysadmin-toolkit + +# Rendszer állapot / System health +python -m toolkit health + +# Top folyamatok / Top processes +python -m toolkit processes --count 10 --sort cpu + +# Log elemzés / Log analysis +python -m toolkit logs /var/log/syslog + +# Lemez használat / Disk usage +python -m toolkit disk / + +# Nagy fájlok keresése / Find large files +python -m toolkit large-files /var/log --min-size 100 --count 20 + +# Könyvtár méretek / Directory sizes +python -m toolkit dir-sizes /home + +# Szolgáltatások listázása / List services +python -m toolkit services --state running + +# Szolgáltatás státusz / Service status +python -m toolkit service nginx + +# Kritikus szolgáltatások ellenőrzése / Check critical services +python -m toolkit check-services sshd nginx mysql +``` + +### Bash Scripts + +```bash +# Rendszer riport generálása / Generate system report +./scripts/system-report.sh /tmp/report.txt + +# Log tisztítás előnézet / Log cleanup preview +./scripts/log-cleanup.sh --dry-run --days 30 + +# Log tisztítás végrehajtás / Execute log cleanup +./scripts/log-cleanup.sh --days 7 --size 500 +``` + +### Python API + +```python +from toolkit import ( + get_system_health, + analyze_logs, + find_large_files, + check_critical_services, +) + +# Rendszer egészség / System health +health = get_system_health() +print(f"CPU: {health.cpu_percent}%") +print(f"Memory: {health.memory_percent}%") + +# Log elemzés / Log analysis +result = analyze_logs("/var/log/syslog") +print(f"Errors: {result.error_count}") +print(f"Failed logins: {result.failed_logins}") + +# Nagy fájlok / Large files +files = find_large_files("/var/log", min_size_bytes=100*1024*1024) +for f in files: + print(f"{f.path}: {f.size_bytes} bytes") + +# Szolgáltatások / Services +services = check_critical_services(["nginx", "sshd"]) +for name, status in services.items(): + print(f"{name}: {status.state.value}") +``` + +## Könyvtárstruktúra / Directory Structure + +``` +1-sysadmin-toolkit/ +├── README.md # Ez a dokumentum / This document +├── toolkit/ +│ ├── __init__.py # Package exports +│ ├── __main__.py # CLI entry point +│ ├── cli.py # Typer CLI commands +│ ├── models.py # Pydantic models +│ ├── log_analyzer.py # Log analysis tools +│ ├── system_health.py # System monitoring +│ ├── disk_analyzer.py # Disk space analysis +│ └── service_manager.py # Systemd service management +└── scripts/ + ├── system-report.sh # System report generator + └── log-cleanup.sh # Log cleanup script +``` + +## CLI Parancsok / CLI Commands + +| Parancs / Command | Leírás / Description | +|-------------------|----------------------| +| `health` | Rendszer egészségi állapot / System health status | +| `processes` | Top folyamatok / Top processes | +| `logs` | Log fájl elemzés / Log file analysis | +| `disk` | Lemez használat / Disk usage | +| `large-files` | Nagy fájlok keresése / Find large files | +| `dir-sizes` | Könyvtár méretek / Directory sizes | +| `services` | Szolgáltatások listázása / List services | +| `service` | Szolgáltatás részletek / Service details | +| `check-services` | Kritikus szolgáltatások / Critical services check | + +## Modellek / Models + +### LogEntry +Log bejegyzés reprezentációja timestamp-el, host névvel, programmal és üzenettel. + +### LogAnalysisResult +Elemzési eredmény hibaszámmal, warning számmal, bejelentkezési statisztikákkal. + +### SystemHealth +Rendszer állapot CPU, memória, lemez és load információkkal. + +### DiskUsage +Lemez partíció használati adatok. + +### ProcessInfo +Folyamat információ CPU és memória használattal. + +### ServiceStatus +Systemd szolgáltatás státusz. + +## Tesztek / Tests + +```bash +# Tesztek futtatása / Run tests +pytest tests/test_sysadmin_toolkit/ -v + +# Coverage riport / Coverage report +pytest tests/test_sysadmin_toolkit/ --cov=toolkit +``` + +## Licenc / License + +MIT License diff --git a/1-sysadmin-toolkit/scripts/log-cleanup.sh b/1-sysadmin-toolkit/scripts/log-cleanup.sh new file mode 100644 index 0000000..408b784 --- /dev/null +++ b/1-sysadmin-toolkit/scripts/log-cleanup.sh @@ -0,0 +1,237 @@ +#!/bin/bash +# ============================================================================= +# Log Cleanup Script / Log Tisztító Script +# ============================================================================= +# Cleans up old log files based on retention policy. +# +# Régi log fájlok tisztítása megőrzési szabályok alapján. +# +# Usage / Használat: +# ./log-cleanup.sh [--dry-run] [--days N] [--size M] +# +# Options / Opciók: +# --dry-run Preview changes without deleting / Előnézet törlés nélkül +# --days N Delete files older than N days / N napnál régebbi fájlok törlése +# --size M Delete files larger than M MB / M MB-nál nagyobb fájlok törlése +# +# Example / Példa: +# ./log-cleanup.sh --dry-run --days 30 +# ./log-cleanup.sh --days 7 --size 100 +# ============================================================================= + +set -euo pipefail + +# Configuration / Konfiguráció +LOG_DIRS=("/var/log" "/var/log/journal" "/var/log/nginx" "/var/log/apache2") +DEFAULT_DAYS=30 +DEFAULT_SIZE_MB=500 +DRY_RUN=false +DAYS=$DEFAULT_DAYS +SIZE_MB=$DEFAULT_SIZE_MB + +# Colors / Színek +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +# Parse arguments / Argumentumok feldolgozása +while [[ $# -gt 0 ]]; do + case $1 in + --dry-run) + DRY_RUN=true + shift + ;; + --days) + DAYS="$2" + shift 2 + ;; + --size) + SIZE_MB="$2" + shift 2 + ;; + --help|-h) + echo "Usage: $0 [--dry-run] [--days N] [--size M]" + echo "" + echo "Options:" + echo " --dry-run Preview changes without deleting" + echo " --days N Delete files older than N days (default: $DEFAULT_DAYS)" + echo " --size M Delete files larger than M MB (default: $DEFAULT_SIZE_MB)" + exit 0 + ;; + *) + echo -e "${RED}Unknown option: $1${NC}" + exit 1 + ;; + esac +done + +# Counters / Számlálók +TOTAL_FILES=0 +TOTAL_SIZE=0 +DELETED_FILES=0 +DELETED_SIZE=0 + +# Print header / Fejléc nyomtatása +echo "==============================================" +echo " LOG CLEANUP SCRIPT / LOG TISZTÍTÓ SCRIPT" +echo "==============================================" +echo "" +echo "Settings / Beállítások:" +echo " - Days threshold: $DAYS days" +echo " - Size threshold: $SIZE_MB MB" +echo " - Dry run mode: $DRY_RUN" +echo "" + +# Function to format size / Méret formázó függvény +format_size() { + local size=$1 + if [ "$size" -ge 1073741824 ]; then + echo "$(echo "scale=2; $size / 1073741824" | bc) GB" + elif [ "$size" -ge 1048576 ]; then + echo "$(echo "scale=2; $size / 1048576" | bc) MB" + elif [ "$size" -ge 1024 ]; then + echo "$(echo "scale=2; $size / 1024" | bc) KB" + else + echo "$size B" + fi +} + +# Function to clean old files / Régi fájlok tisztító függvény +clean_old_files() { + local dir=$1 + local days=$2 + + echo -e "${BLUE}Scanning for files older than $days days in: $dir${NC}" + + if [ ! -d "$dir" ]; then + echo -e "${YELLOW} Directory does not exist, skipping...${NC}" + return + fi + + # Find old files / Régi fájlok keresése + while IFS= read -r -d '' file; do + local size + size=$(stat -c%s "$file" 2>/dev/null || echo 0) + TOTAL_FILES=$((TOTAL_FILES + 1)) + TOTAL_SIZE=$((TOTAL_SIZE + size)) + + if [ "$DRY_RUN" = true ]; then + echo -e " ${YELLOW}[DRY RUN]${NC} Would delete: $file ($(format_size $size))" + else + rm -f "$file" && echo -e " ${GREEN}Deleted:${NC} $file ($(format_size $size))" + fi + + DELETED_FILES=$((DELETED_FILES + 1)) + DELETED_SIZE=$((DELETED_SIZE + size)) + done < <(find "$dir" -type f -name "*.log*" -mtime +"$days" -print0 2>/dev/null) + + # Also find old .gz files / Régi .gz fájlok keresése + while IFS= read -r -d '' file; do + local size + size=$(stat -c%s "$file" 2>/dev/null || echo 0) + TOTAL_FILES=$((TOTAL_FILES + 1)) + TOTAL_SIZE=$((TOTAL_SIZE + size)) + + if [ "$DRY_RUN" = true ]; then + echo -e " ${YELLOW}[DRY RUN]${NC} Would delete: $file ($(format_size $size))" + else + rm -f "$file" && echo -e " ${GREEN}Deleted:${NC} $file ($(format_size $size))" + fi + + DELETED_FILES=$((DELETED_FILES + 1)) + DELETED_SIZE=$((DELETED_SIZE + size)) + done < <(find "$dir" -type f -name "*.gz" -mtime +"$days" -print0 2>/dev/null) +} + +# Function to clean large files / Nagy fájlok tisztító függvény +clean_large_files() { + local dir=$1 + local size_mb=$2 + local size_bytes=$((size_mb * 1024 * 1024)) + + echo -e "${BLUE}Scanning for files larger than ${size_mb}MB in: $dir${NC}" + + if [ ! -d "$dir" ]; then + echo -e "${YELLOW} Directory does not exist, skipping...${NC}" + return + fi + + while IFS= read -r -d '' file; do + local size + size=$(stat -c%s "$file" 2>/dev/null || echo 0) + + if [ "$size" -ge "$size_bytes" ]; then + TOTAL_FILES=$((TOTAL_FILES + 1)) + TOTAL_SIZE=$((TOTAL_SIZE + size)) + + if [ "$DRY_RUN" = true ]; then + echo -e " ${YELLOW}[DRY RUN]${NC} Would truncate: $file ($(format_size $size))" + else + # Truncate instead of delete for active logs + # Csonkítás törlés helyett aktív logokhoz + if [[ "$file" == *.log ]]; then + truncate -s 0 "$file" && echo -e " ${GREEN}Truncated:${NC} $file ($(format_size $size))" + else + rm -f "$file" && echo -e " ${GREEN}Deleted:${NC} $file ($(format_size $size))" + fi + fi + + DELETED_FILES=$((DELETED_FILES + 1)) + DELETED_SIZE=$((DELETED_SIZE + size)) + fi + done < <(find "$dir" -type f \( -name "*.log*" -o -name "*.gz" \) -print0 2>/dev/null) +} + +# Clean journal logs / Journal logok tisztítása +clean_journal_logs() { + echo -e "${BLUE}Cleaning systemd journal logs...${NC}" + + if command -v journalctl &> /dev/null; then + local journal_size + journal_size=$(journalctl --disk-usage 2>/dev/null | grep -oP '\d+\.?\d*[KMG]' || echo "unknown") + echo " Current journal size: $journal_size" + + if [ "$DRY_RUN" = true ]; then + echo -e " ${YELLOW}[DRY RUN]${NC} Would vacuum journal to keep only $DAYS days" + else + journalctl --vacuum-time="${DAYS}d" 2>/dev/null && echo -e " ${GREEN}Journal cleaned${NC}" + fi + else + echo -e " ${YELLOW}journalctl not available, skipping...${NC}" + fi +} + +# Main execution / Fő végrehajtás +echo "Starting cleanup / Tisztítás indítása..." +echo "" + +# Clean old files in each directory / Régi fájlok tisztítása minden könyvtárban +for dir in "${LOG_DIRS[@]}"; do + clean_old_files "$dir" "$DAYS" + clean_large_files "$dir" "$SIZE_MB" + echo "" +done + +# Clean journal logs / Journal logok tisztítása +clean_journal_logs + +# Summary / Összefoglaló +echo "" +echo "==============================================" +echo " SUMMARY / ÖSSZEFOGLALÓ" +echo "==============================================" +echo " Files processed: $TOTAL_FILES" +echo " Total size found: $(format_size $TOTAL_SIZE)" +echo " Files cleaned: $DELETED_FILES" +echo " Space freed: $(format_size $DELETED_SIZE)" + +if [ "$DRY_RUN" = true ]; then + echo "" + echo -e "${YELLOW}This was a dry run. No files were actually deleted.${NC}" + echo -e "${YELLOW}Run without --dry-run to perform actual cleanup.${NC}" +fi + +echo "" +echo "Done! / Kész!" diff --git a/1-sysadmin-toolkit/scripts/system-report.sh b/1-sysadmin-toolkit/scripts/system-report.sh new file mode 100644 index 0000000..9c7b633 --- /dev/null +++ b/1-sysadmin-toolkit/scripts/system-report.sh @@ -0,0 +1,210 @@ +#!/bin/bash +# ============================================================================= +# System Report Generator / Rendszer Riport Generátor +# ============================================================================= +# Generates a comprehensive system report with hardware, software, and +# performance information. +# +# Átfogó rendszer riportot generál hardver, szoftver és teljesítmény +# információkkal. +# +# Usage / Használat: +# ./system-report.sh [output_file] +# +# Example / Példa: +# ./system-report.sh /tmp/system-report.txt +# ============================================================================= + +set -euo pipefail + +# Colors / Színek +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Output file / Kimeneti fájl +OUTPUT_FILE="${1:-/dev/stdout}" + +# Print header / Fejléc nyomtatása +print_header() { + echo "==============================================" + echo " SYSTEM REPORT / RENDSZER RIPORT" + echo " Generated: $(date '+%Y-%m-%d %H:%M:%S')" + echo " Hostname: $(hostname)" + echo "==============================================" + echo "" +} + +# Print section / Szekció nyomtatása +print_section() { + echo "" + echo "----------------------------------------------" + echo " $1" + echo "----------------------------------------------" +} + +# System information / Rendszer információ +system_info() { + print_section "SYSTEM INFORMATION / RENDSZER INFORMÁCIÓ" + + echo "Hostname: $(hostname)" + echo "Kernel: $(uname -r)" + echo "OS: $(cat /etc/os-release 2>/dev/null | grep "PRETTY_NAME" | cut -d'"' -f2 || echo "Unknown")" + echo "Architecture: $(uname -m)" + echo "Uptime: $(uptime -p 2>/dev/null || uptime)" + echo "Boot Time: $(who -b 2>/dev/null | awk '{print $3, $4}' || echo "Unknown")" +} + +# CPU information / CPU információ +cpu_info() { + print_section "CPU INFORMATION / CPU INFORMÁCIÓ" + + echo "CPU Model: $(grep "model name" /proc/cpuinfo 2>/dev/null | head -1 | cut -d':' -f2 | xargs || echo "Unknown")" + echo "CPU Cores: $(nproc 2>/dev/null || echo "Unknown")" + echo "CPU MHz: $(grep "cpu MHz" /proc/cpuinfo 2>/dev/null | head -1 | cut -d':' -f2 | xargs || echo "Unknown")" + + if command -v lscpu &> /dev/null; then + echo "" + echo "CPU Details:" + lscpu | grep -E "^(Thread|Core|Socket|CPU\(s\)|Vendor|Model name)" | sed 's/^/ /' + fi + + echo "" + echo "Load Average: $(cat /proc/loadavg 2>/dev/null | awk '{print $1, $2, $3}' || echo "Unknown")" +} + +# Memory information / Memória információ +memory_info() { + print_section "MEMORY INFORMATION / MEMÓRIA INFORMÁCIÓ" + + if command -v free &> /dev/null; then + free -h + else + cat /proc/meminfo | head -10 + fi +} + +# Disk information / Lemez információ +disk_info() { + print_section "DISK INFORMATION / LEMEZ INFORMÁCIÓ" + + echo "Filesystem Usage / Fájlrendszer használat:" + df -h --output=source,size,used,avail,pcent,target 2>/dev/null | grep -v "tmpfs\|loop" || df -h + + echo "" + echo "Block Devices / Blokk eszközök:" + lsblk -o NAME,SIZE,TYPE,MOUNTPOINT 2>/dev/null || echo "lsblk not available" +} + +# Network information / Hálózati információ +network_info() { + print_section "NETWORK INFORMATION / HÁLÓZATI INFORMÁCIÓ" + + echo "IP Addresses / IP címek:" + ip -4 addr show 2>/dev/null | grep inet | awk '{print " " $2, $NF}' || hostname -I + + echo "" + echo "Default Gateway / Alapértelmezett átjáró:" + ip route show default 2>/dev/null | awk '{print " " $3}' || echo " Unknown" + + echo "" + echo "DNS Servers / DNS szerverek:" + grep "nameserver" /etc/resolv.conf 2>/dev/null | awk '{print " " $2}' || echo " Unknown" + + echo "" + echo "Network Interfaces / Hálózati interfészek:" + ip link show 2>/dev/null | grep -E "^[0-9]" | awk '{print " " $2}' | sed 's/:$//' || ls /sys/class/net +} + +# Services status / Szolgáltatások állapota +services_status() { + print_section "KEY SERVICES STATUS / KULCS SZOLGÁLTATÁSOK ÁLLAPOTA" + + if command -v systemctl &> /dev/null; then + echo "Running Services / Futó szolgáltatások: $(systemctl list-units --type=service --state=running --no-legend 2>/dev/null | wc -l)" + echo "Failed Services / Hibás szolgáltatások: $(systemctl list-units --type=service --state=failed --no-legend 2>/dev/null | wc -l)" + + echo "" + echo "Failed Services List / Hibás szolgáltatások listája:" + systemctl list-units --type=service --state=failed --no-legend 2>/dev/null | awk '{print " " $1, $4}' || echo " None" + else + echo "systemctl not available" + fi +} + +# Top processes / Top folyamatok +top_processes() { + print_section "TOP PROCESSES / TOP FOLYAMATOK" + + echo "Top 10 by CPU / Top 10 CPU szerint:" + ps aux --sort=-%cpu 2>/dev/null | head -11 | awk '{print $1, $2, $3 "%", $11}' | column -t || echo "ps not available" + + echo "" + echo "Top 10 by Memory / Top 10 memória szerint:" + ps aux --sort=-%mem 2>/dev/null | head -11 | awk '{print $1, $2, $4 "%", $11}' | column -t || echo "ps not available" +} + +# Security information / Biztonsági információ +security_info() { + print_section "SECURITY INFORMATION / BIZTONSÁGI INFORMÁCIÓ" + + echo "Logged in Users / Bejelentkezett felhasználók:" + who 2>/dev/null || echo " None" + + echo "" + echo "Last 5 Logins / Utolsó 5 bejelentkezés:" + last -5 2>/dev/null || echo " Unknown" + + echo "" + echo "Failed Login Attempts (last 10) / Sikertelen bejelentkezések (utolsó 10):" + lastb -10 2>/dev/null || echo " Requires root access" +} + +# Docker status / Docker állapot +docker_status() { + print_section "DOCKER STATUS / DOCKER ÁLLAPOT" + + if command -v docker &> /dev/null; then + echo "Docker Version / Docker verzió:" + docker --version 2>/dev/null || echo " Unknown" + + echo "" + echo "Running Containers / Futó konténerek:" + docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Image}}" 2>/dev/null || echo " Cannot access docker" + + echo "" + echo "Docker Disk Usage / Docker lemezhasználat:" + docker system df 2>/dev/null || echo " Cannot access docker" + else + echo "Docker not installed" + fi +} + +# Main function / Fő függvény +main() { + { + print_header + system_info + cpu_info + memory_info + disk_info + network_info + services_status + top_processes + docker_status + security_info + + echo "" + echo "==============================================" + echo " END OF REPORT / RIPORT VÉGE" + echo "==============================================" + } > "$OUTPUT_FILE" + + if [ "$OUTPUT_FILE" != "/dev/stdout" ]; then + echo -e "${GREEN}Report saved to: $OUTPUT_FILE${NC}" + fi +} + +main diff --git a/1-sysadmin-toolkit/toolkit/__init__.py b/1-sysadmin-toolkit/toolkit/__init__.py new file mode 100644 index 0000000..947299b --- /dev/null +++ b/1-sysadmin-toolkit/toolkit/__init__.py @@ -0,0 +1,69 @@ +""" +SysAdmin Toolkit - Rendszergazda eszköztár. + +System administration utilities for log analysis, system health monitoring, +disk usage analysis, and service management. + +Rendszergazda segédprogramok log elemzéshez, rendszer állapot monitorozáshoz, +lemezterület analízishez és szolgáltatás menedzsmenthez. +""" + +from .models import ( + LogEntry, + LogAnalysisResult, + SystemHealth, + DiskUsage, + ProcessInfo, + ServiceStatus, +) +from .log_analyzer import LogAnalyzer, parse_syslog, parse_auth_log, analyze_logs +from .system_health import ( + get_system_health, + get_cpu_info, + get_memory_info, + get_disk_info, + get_top_processes, +) +from .disk_analyzer import ( + analyze_directory, + find_large_files, + get_directory_sizes, + get_filesystem_usage, +) +from .service_manager import ( + get_service_status, + list_services, + check_critical_services, +) + +__all__ = [ + # Models + "LogEntry", + "LogAnalysisResult", + "SystemHealth", + "DiskUsage", + "ProcessInfo", + "ServiceStatus", + # Log analyzer + "LogAnalyzer", + "parse_syslog", + "parse_auth_log", + "analyze_logs", + # System health + "get_system_health", + "get_cpu_info", + "get_memory_info", + "get_disk_info", + "get_top_processes", + # Disk analyzer + "analyze_directory", + "find_large_files", + "get_directory_sizes", + "get_filesystem_usage", + # Service manager + "get_service_status", + "list_services", + "check_critical_services", +] + +__version__ = "1.0.0" diff --git a/1-sysadmin-toolkit/toolkit/__main__.py b/1-sysadmin-toolkit/toolkit/__main__.py new file mode 100644 index 0000000..1513c26 --- /dev/null +++ b/1-sysadmin-toolkit/toolkit/__main__.py @@ -0,0 +1,14 @@ +""" +SysAdmin Toolkit - Package entry point. + +Allows running the toolkit as a module: + python -m toolkit + +Lehetővé teszi a toolkit futtatását modulként: + python -m toolkit +""" + +from .cli import main + +if __name__ == "__main__": + main() diff --git a/1-sysadmin-toolkit/toolkit/cli.py b/1-sysadmin-toolkit/toolkit/cli.py new file mode 100644 index 0000000..a2429a1 --- /dev/null +++ b/1-sysadmin-toolkit/toolkit/cli.py @@ -0,0 +1,542 @@ +""" +SysAdmin Toolkit CLI - Parancssori felület. + +Command-line interface for system administration tasks. + +Parancssori felület rendszergazda feladatokhoz. +""" + +from datetime import datetime +from pathlib import Path +from typing import Optional + +import typer +from rich.console import Console +from rich.panel import Panel +from rich.progress import Progress, SpinnerColumn, TextColumn +from rich.table import Table + +from . import disk_analyzer, log_analyzer, service_manager, system_health +from .models import ServiceState + +# Typer alkalmazás +app = typer.Typer( + name="sysadmin-toolkit", + help="SysAdmin Toolkit - Rendszergazda eszköztár / System Administration Toolkit", + add_completion=False, +) + +console = Console() + + +# ============================================ +# System Health Commands +# ============================================ + + +@app.command("health") +def show_system_health() -> None: + """ + Rendszer egészségi állapot megjelenítése. + + Show system health status including CPU, memory, disk, and load. + """ + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + console=console, + ) as progress: + progress.add_task("Rendszer állapot lekérdezése...", total=None) + health = system_health.get_system_health() + + # Fejléc + console.print( + Panel( + f"[bold cyan]{health.hostname}[/bold cyan] - System Health Report", + subtitle=f"Generated at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", + ) + ) + + # CPU és Load táblázat + cpu_table = Table(title="CPU & Load", show_header=True, header_style="bold magenta") + cpu_table.add_column("Metric", style="cyan") + cpu_table.add_column("Value", justify="right") + + cpu_color = "green" if health.cpu_percent < 70 else "yellow" if health.cpu_percent < 90 else "red" + cpu_table.add_row("CPU Usage", f"[{cpu_color}]{health.cpu_percent:.1f}%[/{cpu_color}]") + cpu_table.add_row("CPU Cores", str(health.cpu_count)) + if health.cpu_freq_mhz: + cpu_table.add_row("CPU Frequency", f"{health.cpu_freq_mhz:.0f} MHz") + cpu_table.add_row("Load Average (1m)", f"{health.load_avg_1m:.2f}") + cpu_table.add_row("Load Average (5m)", f"{health.load_avg_5m:.2f}") + cpu_table.add_row("Load Average (15m)", f"{health.load_avg_15m:.2f}") + + # Memória táblázat + mem_table = Table(title="Memory", show_header=True, header_style="bold magenta") + mem_table.add_column("Metric", style="cyan") + mem_table.add_column("Value", justify="right") + + mem_color = "green" if health.memory_percent < 70 else "yellow" if health.memory_percent < 90 else "red" + mem_table.add_row("Memory Usage", f"[{mem_color}]{health.memory_percent:.1f}%[/{mem_color}]") + mem_table.add_row("Total", _format_bytes(health.memory_total_bytes)) + mem_table.add_row("Used", _format_bytes(health.memory_used_bytes)) + mem_table.add_row("Available", _format_bytes(health.memory_available_bytes)) + mem_table.add_row("Swap Usage", f"{health.swap_percent:.1f}%") + mem_table.add_row("Swap Used", _format_bytes(health.swap_used_bytes)) + + console.print(cpu_table) + console.print(mem_table) + + # Disk táblázat + if health.disk_partitions: + disk_table = Table(title="Disk Partitions", show_header=True, header_style="bold magenta") + disk_table.add_column("Mount Point", style="cyan") + disk_table.add_column("Device") + disk_table.add_column("Type") + disk_table.add_column("Total", justify="right") + disk_table.add_column("Used", justify="right") + disk_table.add_column("Free", justify="right") + disk_table.add_column("Usage", justify="right") + + for disk in health.disk_partitions: + usage_color = "green" if disk.percent_used < 70 else "yellow" if disk.percent_used < 90 else "red" + disk_table.add_row( + disk.mountpoint, + disk.device, + disk.fstype, + _format_bytes(disk.total_bytes), + _format_bytes(disk.used_bytes), + _format_bytes(disk.free_bytes), + f"[{usage_color}]{disk.percent_used:.1f}%[/{usage_color}]", + ) + + console.print(disk_table) + + # Egyéb információk + info_table = Table(title="System Info", show_header=True, header_style="bold magenta") + info_table.add_column("Metric", style="cyan") + info_table.add_column("Value", justify="right") + + info_table.add_row("Uptime", _format_uptime(health.uptime_seconds)) + info_table.add_row("Boot Time", health.boot_time.strftime("%Y-%m-%d %H:%M:%S")) + info_table.add_row("Running Processes", str(health.process_count)) + info_table.add_row("Logged In Users", str(health.users_logged_in)) + + console.print(info_table) + + +@app.command("processes") +def show_top_processes( + count: int = typer.Option(10, "--count", "-n", help="Megjelenítendő folyamatok száma"), + sort_by: str = typer.Option("cpu", "--sort", "-s", help="Rendezés: cpu vagy memory"), +) -> None: + """ + Top folyamatok megjelenítése. + + Show top processes by CPU or memory usage. + """ + processes = system_health.get_top_processes(count=count, sort_by=sort_by) + + table = Table(title=f"Top {count} Processes (sorted by {sort_by})", show_header=True) + table.add_column("PID", style="cyan", justify="right") + table.add_column("Name") + table.add_column("User") + table.add_column("Status") + table.add_column("CPU %", justify="right") + table.add_column("Memory %", justify="right") + table.add_column("RSS", justify="right") + + for proc in processes: + table.add_row( + str(proc.pid), + proc.name[:20], + proc.username[:10], + proc.status, + f"{proc.cpu_percent:.1f}", + f"{proc.memory_percent:.1f}", + _format_bytes(proc.memory_rss_bytes), + ) + + console.print(table) + + +# ============================================ +# Log Analysis Commands +# ============================================ + + +@app.command("logs") +def analyze_log_file( + path: Path = typer.Argument(..., help="Log fájl útvonala"), + year: Optional[int] = typer.Option(None, "--year", "-y", help="Év a timestamp-ekhez"), +) -> None: + """ + Log fájl elemzése. + + Analyze a log file and show statistics. + """ + if not path.exists(): + console.print(f"[red]Error:[/red] File not found: {path}") + raise typer.Exit(1) + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + console=console, + ) as progress: + progress.add_task("Log fájl elemzése...", total=None) + result = log_analyzer.analyze_logs(path, year=year) + + console.print(Panel(f"[bold]Log Analysis Report: {path}[/bold]")) + + # Összefoglaló + summary_table = Table(title="Summary", show_header=True, header_style="bold magenta") + summary_table.add_column("Metric", style="cyan") + summary_table.add_column("Value", justify="right") + + summary_table.add_row("Total Entries", str(result.total_entries)) + summary_table.add_row("[red]Errors[/red]", str(result.error_count)) + summary_table.add_row("[yellow]Warnings[/yellow]", str(result.warning_count)) + summary_table.add_row("[green]Successful Logins[/green]", str(result.successful_logins)) + summary_table.add_row("[red]Failed Logins[/red]", str(result.failed_logins)) + + if result.time_range_start: + summary_table.add_row("Time Range Start", result.time_range_start.strftime("%Y-%m-%d %H:%M:%S")) + if result.time_range_end: + summary_table.add_row("Time Range End", result.time_range_end.strftime("%Y-%m-%d %H:%M:%S")) + + console.print(summary_table) + + # Bejegyzések programonként + if result.entries_by_program: + program_table = Table(title="Entries by Program (Top 10)", show_header=True) + program_table.add_column("Program", style="cyan") + program_table.add_column("Count", justify="right") + + sorted_programs = sorted( + result.entries_by_program.items(), + key=lambda x: x[1], + reverse=True + )[:10] + + for program, count in sorted_programs: + program_table.add_row(program, str(count)) + + console.print(program_table) + + # Bejegyzések szintenként + if result.entries_by_level: + level_table = Table(title="Entries by Level", show_header=True) + level_table.add_column("Level", style="cyan") + level_table.add_column("Count", justify="right") + + for level, count in result.entries_by_level.items(): + color = "red" if level in ("error", "critical", "emergency") else "yellow" if level == "warning" else "white" + level_table.add_row(f"[{color}]{level}[/{color}]", str(count)) + + console.print(level_table) + + # Top hibaüzenetek + if result.top_error_messages: + console.print("\n[bold]Top Error Messages:[/bold]") + for i, msg in enumerate(result.top_error_messages[:5], 1): + console.print(f" {i}. [red]{msg[:80]}[/red]") + + +# ============================================ +# Disk Analysis Commands +# ============================================ + + +@app.command("disk") +def show_disk_usage( + path: Path = typer.Argument("/", help="Elemzendő útvonal"), +) -> None: + """ + Lemezterület használat megjelenítése. + + Show disk usage for a path. + """ + usage = disk_analyzer.get_filesystem_usage(str(path)) + + console.print(Panel(f"[bold]Disk Usage: {usage.mountpoint}[/bold]")) + + table = Table(show_header=True, header_style="bold magenta") + table.add_column("Metric", style="cyan") + table.add_column("Value", justify="right") + + usage_color = "green" if usage.percent_used < 70 else "yellow" if usage.percent_used < 90 else "red" + table.add_row("Device", usage.device) + table.add_row("Filesystem Type", usage.fstype) + table.add_row("Total", _format_bytes(usage.total_bytes)) + table.add_row("Used", _format_bytes(usage.used_bytes)) + table.add_row("Free", _format_bytes(usage.free_bytes)) + table.add_row("Usage", f"[{usage_color}]{usage.percent_used:.1f}%[/{usage_color}]") + + console.print(table) + + +@app.command("large-files") +def find_large_files_cmd( + path: Path = typer.Argument(".", help="Keresési könyvtár"), + min_size: int = typer.Option(100, "--min-size", "-s", help="Minimum méret MB-ban"), + count: int = typer.Option(20, "--count", "-n", help="Maximum eredmények száma"), +) -> None: + """ + Nagy fájlok keresése. + + Find large files in a directory. + """ + min_size_bytes = min_size * 1024 * 1024 + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + console=console, + ) as progress: + progress.add_task("Nagy fájlok keresése...", total=None) + files = disk_analyzer.find_large_files(path, min_size_bytes=min_size_bytes, max_results=count) + + if not files: + console.print(f"[yellow]No files found larger than {min_size} MB[/yellow]") + return + + table = Table(title=f"Large Files (> {min_size} MB)", show_header=True) + table.add_column("Size", justify="right", style="cyan") + table.add_column("Modified") + table.add_column("Owner") + table.add_column("Path") + + for file in files: + table.add_row( + _format_bytes(file.size_bytes), + file.modified_time.strftime("%Y-%m-%d %H:%M"), + file.owner or "unknown", + str(file.path)[:60], + ) + + console.print(table) + + +@app.command("dir-sizes") +def show_directory_sizes( + path: Path = typer.Argument(".", help="Elemzendő könyvtár"), +) -> None: + """ + Könyvtár méretek megjelenítése. + + Show directory sizes for immediate subdirectories. + """ + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + console=console, + ) as progress: + progress.add_task("Könyvtár méretek számítása...", total=None) + results = disk_analyzer.analyze_directory(path) + + if not results: + console.print("[yellow]No directories found[/yellow]") + return + + table = Table(title=f"Directory Sizes: {path}", show_header=True) + table.add_column("Size", justify="right", style="cyan") + table.add_column("Files", justify="right") + table.add_column("Dirs", justify="right") + table.add_column("Path") + + for item in results[:20]: + table.add_row( + _format_bytes(item.size_bytes), + str(item.file_count), + str(item.dir_count), + str(item.path)[:50], + ) + + console.print(table) + + +# ============================================ +# Service Management Commands +# ============================================ + + +@app.command("services") +def list_services_cmd( + state: Optional[str] = typer.Option(None, "--state", "-s", help="Szűrés állapot alapján (running, stopped, failed)"), + enabled_only: bool = typer.Option(False, "--enabled", "-e", help="Csak enabled szolgáltatások"), +) -> None: + """ + Szolgáltatások listázása. + + List system services with optional filtering. + """ + filter_state = None + if state: + try: + filter_state = ServiceState(state.lower()) + except ValueError: + console.print(f"[red]Invalid state: {state}[/red]") + raise typer.Exit(1) + + with Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + console=console, + ) as progress: + progress.add_task("Szolgáltatások lekérdezése...", total=None) + services = service_manager.list_services( + filter_state=filter_state, + filter_enabled=True if enabled_only else None, + ) + + if not services: + console.print("[yellow]No services found[/yellow]") + return + + table = Table(title="System Services", show_header=True) + table.add_column("Name", style="cyan") + table.add_column("State") + table.add_column("Enabled") + table.add_column("PID", justify="right") + table.add_column("Memory", justify="right") + + for svc in services[:50]: + state_color = { + ServiceState.RUNNING: "green", + ServiceState.STOPPED: "white", + ServiceState.FAILED: "red", + ServiceState.INACTIVE: "yellow", + }.get(svc.state, "white") + + table.add_row( + svc.name[:30], + f"[{state_color}]{svc.state.value}[/{state_color}]", + "[green]yes[/green]" if svc.is_enabled else "[red]no[/red]", + str(svc.pid) if svc.pid else "-", + _format_bytes(svc.memory_bytes) if svc.memory_bytes else "-", + ) + + console.print(table) + + +@app.command("service") +def show_service_status( + name: str = typer.Argument(..., help="Szolgáltatás neve"), +) -> None: + """ + Szolgáltatás részletes státusza. + + Show detailed status of a specific service. + """ + status = service_manager.get_service_status(name) + + state_color = { + ServiceState.RUNNING: "green", + ServiceState.STOPPED: "white", + ServiceState.FAILED: "red", + ServiceState.INACTIVE: "yellow", + }.get(status.state, "white") + + console.print(Panel(f"[bold]Service: {status.name}[/bold]")) + + table = Table(show_header=True, header_style="bold magenta") + table.add_column("Property", style="cyan") + table.add_column("Value") + + table.add_row("State", f"[{state_color}]{status.state.value}[/{state_color}]") + table.add_row("Active", "[green]yes[/green]" if status.is_active else "[red]no[/red]") + table.add_row("Enabled", "[green]yes[/green]" if status.is_enabled else "[red]no[/red]") + if status.description: + table.add_row("Description", status.description) + if status.pid: + table.add_row("PID", str(status.pid)) + if status.memory_bytes: + table.add_row("Memory", _format_bytes(status.memory_bytes)) + if status.load_state: + table.add_row("Load State", status.load_state) + if status.sub_state: + table.add_row("Sub State", status.sub_state) + + console.print(table) + + +@app.command("check-services") +def check_critical_services_cmd( + services: Optional[list[str]] = typer.Argument(None, help="Ellenőrizendő szolgáltatások"), +) -> None: + """ + Kritikus szolgáltatások ellenőrzése. + + Check status of critical services. + """ + results = service_manager.check_critical_services(services) + + table = Table(title="Critical Services Check", show_header=True) + table.add_column("Service", style="cyan") + table.add_column("Status") + table.add_column("Enabled") + + all_ok = True + for name, status in results.items(): + if status.is_active: + status_str = "[green]RUNNING[/green]" + elif status.state == ServiceState.FAILED: + status_str = "[red]FAILED[/red]" + all_ok = False + else: + status_str = f"[yellow]{status.state.value.upper()}[/yellow]" + all_ok = False + + table.add_row( + name, + status_str, + "[green]yes[/green]" if status.is_enabled else "[yellow]no[/yellow]", + ) + + console.print(table) + + if all_ok: + console.print("\n[green]All critical services are running![/green]") + else: + console.print("\n[red]Warning: Some services are not running![/red]") + + +# ============================================ +# Helper Functions +# ============================================ + + +def _format_bytes(size_bytes: int) -> str: + """Bájtok formázása ember-olvasható formátumba.""" + for unit in ["B", "KB", "MB", "GB", "TB"]: + if abs(size_bytes) < 1024.0: + return f"{size_bytes:.1f} {unit}" + size_bytes /= 1024.0 + return f"{size_bytes:.1f} PB" + + +def _format_uptime(seconds: float) -> str: + """Uptime formázása ember-olvasható formátumba.""" + days = int(seconds // 86400) + hours = int((seconds % 86400) // 3600) + minutes = int((seconds % 3600) // 60) + + parts = [] + if days: + parts.append(f"{days}d") + if hours: + parts.append(f"{hours}h") + if minutes or not parts: + parts.append(f"{minutes}m") + + return " ".join(parts) + + +def main() -> None: + """CLI belépési pont.""" + app() + + +if __name__ == "__main__": + main() diff --git a/1-sysadmin-toolkit/toolkit/disk_analyzer.py b/1-sysadmin-toolkit/toolkit/disk_analyzer.py new file mode 100644 index 0000000..2b25f83 --- /dev/null +++ b/1-sysadmin-toolkit/toolkit/disk_analyzer.py @@ -0,0 +1,280 @@ +""" +Disk Analyzer - Lemezterület elemző. + +Tools for analyzing disk usage, finding large files, and directory sizes. + +Eszközök lemezterület elemzéshez, nagy fájlok kereséséhez és +könyvtár méretek meghatározásához. +""" + +import os +from datetime import datetime +from pathlib import Path +from typing import Optional + +import psutil + +from .models import DirectorySize, DiskUsage, LargeFile + + +def get_filesystem_usage(path: str = "/") -> DiskUsage: + """ + Fájlrendszer használat lekérdezése adott útvonalon. + + Args: + path: Az ellenőrizendő útvonal. + + Returns: + DiskUsage objektum a használati adatokkal. + """ + usage = psutil.disk_usage(path) + + # Eszköz keresése a mountpoint alapján + device = "unknown" + fstype = "unknown" + for partition in psutil.disk_partitions(all=False): + if partition.mountpoint == path: + device = partition.device + fstype = partition.fstype + break + + return DiskUsage( + device=device, + mountpoint=path, + fstype=fstype, + total_bytes=usage.total, + used_bytes=usage.used, + free_bytes=usage.free, + percent_used=usage.percent, + ) + + +def analyze_directory( + path: Path | str, + max_depth: int = 1, + exclude_hidden: bool = True, +) -> list[DirectorySize]: + """ + Könyvtár méret elemzése. + + Args: + path: Az elemzendő könyvtár útvonala. + max_depth: Maximum mélység (1 = csak közvetlen alkönyvtárak). + exclude_hidden: Rejtett könyvtárak kizárása. + + Returns: + DirectorySize objektumok listája méret szerint rendezve. + """ + path = Path(path) + if not path.exists(): + raise FileNotFoundError(f"Könyvtár nem található: {path}") + + if not path.is_dir(): + raise ValueError(f"Nem könyvtár: {path}") + + results = [] + + # Közvetlen alkönyvtárak bejárása + try: + entries = list(path.iterdir()) + except PermissionError: + return results + + for entry in entries: + if exclude_hidden and entry.name.startswith("."): + continue + + if entry.is_dir(): + try: + dir_size = _calculate_dir_size(entry) + results.append(dir_size) + except PermissionError: + # Reason: Nincs jogosultság a könyvtárhoz + results.append( + DirectorySize( + path=str(entry), + size_bytes=0, + file_count=0, + dir_count=0, + ) + ) + elif entry.is_file(): + try: + stat = entry.stat() + results.append( + DirectorySize( + path=str(entry), + size_bytes=stat.st_size, + file_count=1, + dir_count=0, + ) + ) + except (PermissionError, OSError): + continue + + # Rendezés méret szerint (csökkenő) + results.sort(key=lambda x: x.size_bytes, reverse=True) + return results + + +def _calculate_dir_size(path: Path) -> DirectorySize: + """ + Könyvtár méretének kiszámítása rekurzívan. + + Args: + path: A könyvtár útvonala. + + Returns: + DirectorySize objektum. + """ + total_size = 0 + file_count = 0 + dir_count = 0 + + for entry in path.rglob("*"): + try: + if entry.is_file(): + total_size += entry.stat().st_size + file_count += 1 + elif entry.is_dir(): + dir_count += 1 + except (PermissionError, OSError): + continue + + return DirectorySize( + path=str(path), + size_bytes=total_size, + file_count=file_count, + dir_count=dir_count, + ) + + +def find_large_files( + path: Path | str, + min_size_bytes: int = 100 * 1024 * 1024, # 100 MB + max_results: int = 50, + exclude_patterns: Optional[list[str]] = None, +) -> list[LargeFile]: + """ + Nagy fájlok keresése könyvtárban. + + Args: + path: A keresési könyvtár útvonala. + min_size_bytes: Minimum fájlméret (alapértelmezett: 100 MB). + max_results: Maximum visszaadandó eredmények száma. + exclude_patterns: Kizárandó mintázatok listája (pl. ["*.log", "*.tmp"]). + + Returns: + LargeFile objektumok listája méret szerint rendezve. + """ + path = Path(path) + if not path.exists(): + raise FileNotFoundError(f"Könyvtár nem található: {path}") + + exclude_patterns = exclude_patterns or [] + large_files = [] + + for entry in path.rglob("*"): + try: + if not entry.is_file(): + continue + + # Kizárási minták ellenőrzése + if any(entry.match(pattern) for pattern in exclude_patterns): + continue + + stat = entry.stat() + if stat.st_size >= min_size_bytes: + # Tulajdonos lekérdezése + try: + import pwd + owner = pwd.getpwuid(stat.st_uid).pw_name + except (ImportError, KeyError): + owner = str(stat.st_uid) + + large_files.append( + LargeFile( + path=str(entry), + size_bytes=stat.st_size, + modified_time=datetime.fromtimestamp(stat.st_mtime), + owner=owner, + ) + ) + except (PermissionError, OSError): + continue + + # Rendezés méret szerint (csökkenő) + large_files.sort(key=lambda x: x.size_bytes, reverse=True) + return large_files[:max_results] + + +def get_directory_sizes( + path: Path | str, + depth: int = 1, +) -> dict[str, int]: + """ + Könyvtár méretek lekérdezése adott mélységig. + + Args: + path: A kiindulási könyvtár útvonala. + depth: Maximum mélység. + + Returns: + Dict ahol a kulcs a könyvtár útvonala, az érték a méret bájtban. + """ + path = Path(path) + sizes: dict[str, int] = {} + + def _get_size(current_path: Path, current_depth: int) -> int: + """Rekurzív méret számítás.""" + total = 0 + + try: + entries = list(current_path.iterdir()) + except PermissionError: + return 0 + + for entry in entries: + try: + if entry.is_file(): + total += entry.stat().st_size + elif entry.is_dir(): + if current_depth < depth: + subdir_size = _get_size(entry, current_depth + 1) + sizes[str(entry)] = subdir_size + total += subdir_size + else: + # Mélység elérve, teljes méret számítás + dir_size = sum( + f.stat().st_size + for f in entry.rglob("*") + if f.is_file() + ) + sizes[str(entry)] = dir_size + total += dir_size + except (PermissionError, OSError): + continue + + return total + + total_size = _get_size(path, 0) + sizes[str(path)] = total_size + + return sizes + + +def format_size(size_bytes: int) -> str: + """ + Fájlméret formázása ember-olvasható formátumba. + + Args: + size_bytes: Méret bájtban. + + Returns: + Formázott méret string (pl. "1.5 GB"). + """ + for unit in ["B", "KB", "MB", "GB", "TB", "PB"]: + if abs(size_bytes) < 1024.0: + return f"{size_bytes:.1f} {unit}" + size_bytes /= 1024.0 + return f"{size_bytes:.1f} EB" diff --git a/1-sysadmin-toolkit/toolkit/log_analyzer.py b/1-sysadmin-toolkit/toolkit/log_analyzer.py new file mode 100644 index 0000000..040b0b8 --- /dev/null +++ b/1-sysadmin-toolkit/toolkit/log_analyzer.py @@ -0,0 +1,271 @@ +""" +Log Analyzer - Rendszer log elemző. + +Tools for parsing and analyzing system logs (syslog, auth.log, etc.). + +Eszközök rendszer logok (syslog, auth.log, stb.) elemzéséhez. +""" + +import re +from collections import Counter +from datetime import datetime +from pathlib import Path +from typing import Generator, Optional + +from .models import LogAnalysisResult, LogEntry, LogLevel + + +# Syslog formátum regex (RFC 3164) +# Példa: "Dec 4 10:30:15 hostname program[1234]: message" +SYSLOG_PATTERN = re.compile( + r"^(?P\w{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2})\s+" + r"(?P\S+)\s+" + r"(?P[\w\-\.\/]+)" + r"(?:\[(?P\d+)\])?" + r":\s*" + r"(?P.*)$" +) + +# Auth log minták sikeres/sikertelen bejelentkezéshez +AUTH_SUCCESS_PATTERNS = [ + re.compile(r"Accepted\s+(?:password|publickey)\s+for\s+(\S+)\s+from\s+(\S+)"), + re.compile(r"session opened for user\s+(\S+)"), + re.compile(r"pam_unix\(.*:session\):\s+session opened for user\s+(\S+)"), +] + +AUTH_FAILURE_PATTERNS = [ + re.compile(r"Failed\s+password\s+for\s+(?:invalid user\s+)?(\S+)\s+from\s+(\S+)"), + re.compile(r"authentication failure.*user=(\S+)"), + re.compile(r"pam_unix\(.*:auth\):\s+authentication failure"), + re.compile(r"Invalid user\s+(\S+)\s+from\s+(\S+)"), +] + +# Log szint felismerő minták +LEVEL_PATTERNS = { + LogLevel.EMERGENCY: re.compile(r"\b(emerg|emergency)\b", re.IGNORECASE), + LogLevel.ALERT: re.compile(r"\balert\b", re.IGNORECASE), + LogLevel.CRITICAL: re.compile(r"\b(crit|critical)\b", re.IGNORECASE), + LogLevel.ERROR: re.compile(r"\b(err|error|failed|failure)\b", re.IGNORECASE), + LogLevel.WARNING: re.compile(r"\b(warn|warning)\b", re.IGNORECASE), + LogLevel.NOTICE: re.compile(r"\bnotice\b", re.IGNORECASE), + LogLevel.INFO: re.compile(r"\binfo\b", re.IGNORECASE), + LogLevel.DEBUG: re.compile(r"\bdebug\b", re.IGNORECASE), +} + + +class LogAnalyzer: + """ + Log elemző osztály. + + Class for analyzing log files with various parsing strategies. + """ + + def __init__(self, year: Optional[int] = None): + """ + Inicializálja a log elemzőt. + + Args: + year: Az év a timestamp-ekhez (alapértelmezett: jelenlegi év). + Year for timestamps (default: current year). + """ + self.year = year or datetime.now().year + self._entries: list[LogEntry] = [] + + def parse_syslog_line(self, line: str) -> Optional[LogEntry]: + """ + Egy syslog sor elemzése. + + Args: + line: A log sor. + + Returns: + LogEntry vagy None ha nem sikerült elemezni. + """ + line = line.strip() + if not line: + return None + + match = SYSLOG_PATTERN.match(line) + if not match: + return None + + groups = match.groupdict() + + # Timestamp konvertálása + try: + timestamp_str = f"{self.year} {groups['timestamp']}" + timestamp = datetime.strptime(timestamp_str, "%Y %b %d %H:%M:%S") + except ValueError: + return None + + # PID konvertálása + pid = int(groups["pid"]) if groups.get("pid") else None + + # Log szint detektálása + level = self._detect_level(groups["message"]) + + return LogEntry( + timestamp=timestamp, + hostname=groups["hostname"], + program=groups["program"], + pid=pid, + message=groups["message"], + level=level, + raw_line=line, + ) + + def _detect_level(self, message: str) -> LogLevel: + """ + Log szint detektálása üzenet alapján. + + Args: + message: A log üzenet. + + Returns: + Detektált LogLevel. + """ + for level, pattern in LEVEL_PATTERNS.items(): + if pattern.search(message): + return level + return LogLevel.INFO + + def parse_file(self, file_path: Path | str) -> Generator[LogEntry, None, None]: + """ + Log fájl elemzése. + + Args: + file_path: Log fájl útvonala. + + Yields: + LogEntry objektumok. + """ + file_path = Path(file_path) + if not file_path.exists(): + raise FileNotFoundError(f"Log fájl nem található: {file_path}") + + with open(file_path, "r", encoding="utf-8", errors="replace") as f: + for line in f: + entry = self.parse_syslog_line(line) + if entry: + yield entry + + def analyze(self, entries: list[LogEntry]) -> LogAnalysisResult: + """ + Log bejegyzések elemzése és statisztika készítése. + + Args: + entries: LogEntry lista. + + Returns: + LogAnalysisResult a statisztikákkal. + """ + if not entries: + return LogAnalysisResult(total_entries=0) + + # Számlálók + program_counter: Counter[str] = Counter() + level_counter: Counter[str] = Counter() + error_messages: Counter[str] = Counter() + + # Auth log elemzés + failed_logins = 0 + successful_logins = 0 + + timestamps = [] + + for entry in entries: + program_counter[entry.program] += 1 + level_counter[entry.level.value] += 1 + timestamps.append(entry.timestamp) + + # Hiba üzenetek gyűjtése + if entry.level in (LogLevel.ERROR, LogLevel.CRITICAL, LogLevel.EMERGENCY): + # Üzenet normalizálása (számok eltávolítása) + normalized = re.sub(r"\d+", "N", entry.message[:100]) + error_messages[normalized] += 1 + + # Auth log elemzés + for pattern in AUTH_SUCCESS_PATTERNS: + if pattern.search(entry.message): + successful_logins += 1 + break + + for pattern in AUTH_FAILURE_PATTERNS: + if pattern.search(entry.message): + failed_logins += 1 + break + + # Top error üzenetek + top_errors = [msg for msg, _ in error_messages.most_common(10)] + + return LogAnalysisResult( + total_entries=len(entries), + error_count=sum( + level_counter.get(level.value, 0) + for level in (LogLevel.ERROR, LogLevel.CRITICAL, LogLevel.EMERGENCY) + ), + warning_count=level_counter.get(LogLevel.WARNING.value, 0), + entries_by_program=dict(program_counter), + entries_by_level=dict(level_counter), + time_range_start=min(timestamps) if timestamps else None, + time_range_end=max(timestamps) if timestamps else None, + top_error_messages=top_errors, + failed_logins=failed_logins, + successful_logins=successful_logins, + ) + + +def parse_syslog( + file_path: Path | str, year: Optional[int] = None +) -> list[LogEntry]: + """ + Syslog fájl elemzése. + + Args: + file_path: A syslog fájl útvonala. + year: Év a timestamp-ekhez. + + Returns: + LogEntry lista. + """ + analyzer = LogAnalyzer(year=year) + return list(analyzer.parse_file(file_path)) + + +def parse_auth_log( + file_path: Path | str, year: Optional[int] = None +) -> list[LogEntry]: + """ + Auth.log fájl elemzése. + + Args: + file_path: Az auth.log fájl útvonala. + year: Év a timestamp-ekhez. + + Returns: + LogEntry lista. + """ + # Reason: Az auth.log és syslog formátuma azonos + return parse_syslog(file_path, year) + + +def analyze_logs( + file_path: Path | str, year: Optional[int] = None +) -> LogAnalysisResult: + """ + Log fájl elemzése és statisztika készítése. + + Args: + file_path: A log fájl útvonala. + year: Év a timestamp-ekhez. + + Returns: + LogAnalysisResult a statisztikákkal. + + Example: + >>> result = analyze_logs("/var/log/syslog") + >>> print(f"Errors: {result.error_count}") + """ + analyzer = LogAnalyzer(year=year) + entries = list(analyzer.parse_file(file_path)) + return analyzer.analyze(entries) diff --git a/1-sysadmin-toolkit/toolkit/models.py b/1-sysadmin-toolkit/toolkit/models.py new file mode 100644 index 0000000..ddd659c --- /dev/null +++ b/1-sysadmin-toolkit/toolkit/models.py @@ -0,0 +1,216 @@ +""" +Pydantic models for SysAdmin Toolkit. + +Pydantic modellek a rendszergazda eszköztárhoz. +""" + +from datetime import datetime +from enum import Enum +from typing import Optional + +from pydantic import BaseModel, ConfigDict, Field + + +class LogLevel(str, Enum): + """Log szint enumeration.""" + + DEBUG = "debug" + INFO = "info" + NOTICE = "notice" + WARNING = "warning" + ERROR = "error" + CRITICAL = "critical" + ALERT = "alert" + EMERGENCY = "emergency" + + +class LogEntry(BaseModel): + """ + Egy log bejegyzés reprezentációja. + + Represents a single log entry from system logs. + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + timestamp: datetime = Field(description="Log bejegyzés időpontja") + hostname: str = Field(description="Host neve") + program: str = Field(description="Program vagy szolgáltatás neve") + pid: Optional[int] = Field(default=None, description="Process ID") + message: str = Field(description="Log üzenet tartalma") + level: LogLevel = Field(default=LogLevel.INFO, description="Log szint") + facility: Optional[str] = Field(default=None, description="Syslog facility") + raw_line: Optional[str] = Field(default=None, description="Eredeti log sor") + + +class LogAnalysisResult(BaseModel): + """ + Log elemzés eredménye. + + Contains statistics and insights from log analysis. + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + total_entries: int = Field(description="Összes bejegyzés száma") + error_count: int = Field(default=0, description="Hibák száma") + warning_count: int = Field(default=0, description="Figyelmeztetések száma") + entries_by_program: dict[str, int] = Field( + default_factory=dict, description="Bejegyzések programonként" + ) + entries_by_level: dict[str, int] = Field( + default_factory=dict, description="Bejegyzések szintenként" + ) + time_range_start: Optional[datetime] = Field( + default=None, description="Legkorábbi bejegyzés" + ) + time_range_end: Optional[datetime] = Field( + default=None, description="Legutolsó bejegyzés" + ) + top_error_messages: list[str] = Field( + default_factory=list, description="Leggyakoribb hibaüzenetek" + ) + failed_logins: int = Field(default=0, description="Sikertelen bejelentkezések száma") + successful_logins: int = Field( + default=0, description="Sikeres bejelentkezések száma" + ) + + +class SystemHealth(BaseModel): + """ + Rendszer egészségi állapot. + + System health information including CPU, memory, disk, and load. + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + hostname: str = Field(description="Rendszer neve") + uptime_seconds: float = Field(description="Üzemidő másodpercben") + boot_time: datetime = Field(description="Boot időpont") + cpu_percent: float = Field(description="CPU használat százalékban") + cpu_count: int = Field(description="CPU magok száma") + cpu_freq_mhz: Optional[float] = Field( + default=None, description="CPU frekvencia MHz-ben" + ) + load_avg_1m: float = Field(description="Load average 1 perc") + load_avg_5m: float = Field(description="Load average 5 perc") + load_avg_15m: float = Field(description="Load average 15 perc") + memory_total_bytes: int = Field(description="Összes memória") + memory_used_bytes: int = Field(description="Használt memória") + memory_available_bytes: int = Field(description="Elérhető memória") + memory_percent: float = Field(description="Memória használat százalékban") + swap_total_bytes: int = Field(description="Összes swap") + swap_used_bytes: int = Field(description="Használt swap") + swap_percent: float = Field(description="Swap használat százalékban") + disk_partitions: list["DiskUsage"] = Field( + default_factory=list, description="Lemez partíciók" + ) + process_count: int = Field(description="Futó folyamatok száma") + users_logged_in: int = Field(description="Bejelentkezett felhasználók") + + +class DiskUsage(BaseModel): + """ + Lemez használat információ. + + Disk usage information for a partition or mount point. + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + device: str = Field(description="Eszköz neve") + mountpoint: str = Field(description="Mount pont") + fstype: str = Field(description="Fájlrendszer típusa") + total_bytes: int = Field(description="Összes méret") + used_bytes: int = Field(description="Használt méret") + free_bytes: int = Field(description="Szabad méret") + percent_used: float = Field(description="Használat százalékban") + + +class DirectorySize(BaseModel): + """ + Könyvtár méret információ. + + Directory size information. + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + path: str = Field(description="Könyvtár útvonala") + size_bytes: int = Field(description="Méret bájtokban") + file_count: int = Field(default=0, description="Fájlok száma") + dir_count: int = Field(default=0, description="Alkönyvtárak száma") + + +class LargeFile(BaseModel): + """ + Nagy fájl információ. + + Information about a large file. + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + path: str = Field(description="Fájl útvonala") + size_bytes: int = Field(description="Méret bájtokban") + modified_time: datetime = Field(description="Utolsó módosítás") + owner: Optional[str] = Field(default=None, description="Tulajdonos") + + +class ProcessInfo(BaseModel): + """ + Folyamat információ. + + Process information including resource usage. + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + pid: int = Field(description="Process ID") + name: str = Field(description="Folyamat neve") + username: str = Field(description="Tulajdonos") + status: str = Field(description="Folyamat státusza") + cpu_percent: float = Field(description="CPU használat százalékban") + memory_percent: float = Field(description="Memória használat százalékban") + memory_rss_bytes: int = Field(description="RSS memória") + create_time: datetime = Field(description="Létrehozás időpontja") + cmdline: Optional[str] = Field(default=None, description="Parancssor") + + +class ServiceState(str, Enum): + """Szolgáltatás állapot enumeration.""" + + RUNNING = "running" + STOPPED = "stopped" + FAILED = "failed" + INACTIVE = "inactive" + UNKNOWN = "unknown" + + +class ServiceStatus(BaseModel): + """ + Szolgáltatás státusz. + + Service status information from systemd or init. + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + name: str = Field(description="Szolgáltatás neve") + state: ServiceState = Field(description="Aktuális állapot") + is_enabled: bool = Field(description="Indításkor elindul-e") + is_active: bool = Field(description="Aktív-e") + pid: Optional[int] = Field(default=None, description="Main PID") + memory_bytes: Optional[int] = Field( + default=None, description="Memória használat bájtban" + ) + cpu_usage_seconds: Optional[float] = Field( + default=None, description="CPU használat másodpercben" + ) + uptime_seconds: Optional[float] = Field( + default=None, description="Szolgáltatás uptime" + ) + description: Optional[str] = Field(default=None, description="Szolgáltatás leírása") + load_state: Optional[str] = Field(default=None, description="Load state") + sub_state: Optional[str] = Field(default=None, description="Sub state") diff --git a/1-sysadmin-toolkit/toolkit/service_manager.py b/1-sysadmin-toolkit/toolkit/service_manager.py new file mode 100644 index 0000000..9ede515 --- /dev/null +++ b/1-sysadmin-toolkit/toolkit/service_manager.py @@ -0,0 +1,255 @@ +""" +Service Manager - Szolgáltatás kezelő. + +Tools for querying and monitoring systemd services. + +Eszközök systemd szolgáltatások lekérdezéséhez és monitorozásához. +""" + +import subprocess +from typing import Optional + +from .models import ServiceState, ServiceStatus + + +def _run_systemctl(args: list[str]) -> tuple[str, int]: + """ + Systemctl parancs futtatása. + + Args: + args: Parancs argumentumok. + + Returns: + Tuple (stdout, return_code). + """ + try: + result = subprocess.run( + ["systemctl"] + args, + capture_output=True, + text=True, + timeout=30, + ) + return result.stdout, result.returncode + except FileNotFoundError: + return "", -1 + except subprocess.TimeoutExpired: + return "", -2 + + +def get_service_status(service_name: str) -> ServiceStatus: + """ + Szolgáltatás státusz lekérdezése. + + Args: + service_name: A szolgáltatás neve (pl. "nginx", "sshd"). + + Returns: + ServiceStatus objektum a szolgáltatás információival. + + Example: + >>> status = get_service_status("nginx") + >>> print(f"State: {status.state}") + >>> print(f"Active: {status.is_active}") + """ + # Alapértelmezett értékek + state = ServiceState.UNKNOWN + is_enabled = False + is_active = False + pid = None + memory_bytes = None + description = None + load_state = None + sub_state = None + + # is-enabled ellenőrzés + stdout, rc = _run_systemctl(["is-enabled", service_name]) + if rc == 0: + is_enabled = True + + # is-active ellenőrzés + stdout, rc = _run_systemctl(["is-active", service_name]) + active_status = stdout.strip().lower() + is_active = active_status == "active" + + # Állapot meghatározása + if active_status == "active": + state = ServiceState.RUNNING + elif active_status == "inactive": + state = ServiceState.INACTIVE + elif active_status == "failed": + state = ServiceState.FAILED + else: + state = ServiceState.STOPPED + + # Részletes információ lekérdezése + stdout, rc = _run_systemctl([ + "show", service_name, + "--property=MainPID,MemoryCurrent,Description,LoadState,SubState" + ]) + + if rc == 0: + for line in stdout.strip().split("\n"): + if "=" not in line: + continue + key, value = line.split("=", 1) + + if key == "MainPID" and value.isdigit(): + pid_val = int(value) + if pid_val > 0: + pid = pid_val + elif key == "MemoryCurrent" and value.isdigit(): + mem_val = int(value) + # Reason: [not set] értéknél 18446744073709551615 jön vissza + if mem_val < 2**62: + memory_bytes = mem_val + elif key == "Description": + description = value + elif key == "LoadState": + load_state = value + elif key == "SubState": + sub_state = value + + return ServiceStatus( + name=service_name, + state=state, + is_enabled=is_enabled, + is_active=is_active, + pid=pid, + memory_bytes=memory_bytes, + description=description, + load_state=load_state, + sub_state=sub_state, + ) + + +def list_services( + filter_state: Optional[ServiceState] = None, + filter_enabled: Optional[bool] = None, +) -> list[ServiceStatus]: + """ + Szolgáltatások listázása. + + Args: + filter_state: Szűrés állapot alapján. + filter_enabled: Szűrés enabled állapot alapján. + + Returns: + ServiceStatus objektumok listája. + """ + services = [] + + # Összes service unit lekérdezése + stdout, rc = _run_systemctl([ + "list-units", "--type=service", "--all", "--no-legend", "--plain" + ]) + + if rc != 0: + return services + + for line in stdout.strip().split("\n"): + if not line: + continue + + parts = line.split() + if len(parts) < 4: + continue + + unit_name = parts[0] + # Eltávolítjuk a .service kiterjesztést + if unit_name.endswith(".service"): + service_name = unit_name[:-8] + else: + service_name = unit_name + + # Részletes státusz lekérdezése + status = get_service_status(service_name) + + # Szűrés alkalmazása + if filter_state is not None and status.state != filter_state: + continue + if filter_enabled is not None and status.is_enabled != filter_enabled: + continue + + services.append(status) + + return services + + +def check_critical_services( + service_names: Optional[list[str]] = None, +) -> dict[str, ServiceStatus]: + """ + Kritikus szolgáltatások állapotának ellenőrzése. + + Args: + service_names: Ellenőrizendő szolgáltatások listája. + Ha None, alapértelmezett kritikus szolgáltatások. + + Returns: + Dict ahol a kulcs a szolgáltatás neve, az érték a ServiceStatus. + + Example: + >>> results = check_critical_services(["sshd", "nginx"]) + >>> for name, status in results.items(): + ... if not status.is_active: + ... print(f"WARNING: {name} is not running!") + """ + if service_names is None: + # Alapértelmezett kritikus szolgáltatások Linux-on + service_names = [ + "sshd", + "systemd-journald", + "systemd-networkd", + "systemd-resolved", + "cron", + "rsyslog", + ] + + results = {} + for name in service_names: + results[name] = get_service_status(name) + + return results + + +def get_failed_services() -> list[ServiceStatus]: + """ + Hibás állapotú szolgáltatások lekérdezése. + + Returns: + Hibás szolgáltatások listája. + """ + return list_services(filter_state=ServiceState.FAILED) + + +def get_service_logs( + service_name: str, + lines: int = 50, + since: Optional[str] = None, +) -> str: + """ + Szolgáltatás naplóinak lekérdezése journalctl-lel. + + Args: + service_name: A szolgáltatás neve. + lines: Visszaadandó sorok száma. + since: Kezdő időpont (pl. "1 hour ago", "today"). + + Returns: + A napló tartalma string-ként. + """ + args = ["-u", service_name, "-n", str(lines), "--no-pager"] + + if since: + args.extend(["--since", since]) + + try: + result = subprocess.run( + ["journalctl"] + args, + capture_output=True, + text=True, + timeout=30, + ) + return result.stdout + except (FileNotFoundError, subprocess.TimeoutExpired): + return "" diff --git a/1-sysadmin-toolkit/toolkit/system_health.py b/1-sysadmin-toolkit/toolkit/system_health.py new file mode 100644 index 0000000..bbe7968 --- /dev/null +++ b/1-sysadmin-toolkit/toolkit/system_health.py @@ -0,0 +1,222 @@ +""" +System Health Checker - Rendszer egészség ellenőrző. + +Tools for monitoring system health including CPU, memory, disk, and processes. + +Eszközök a rendszer egészségének monitorozásához, beleértve a CPU-t, +memóriát, lemezt és folyamatokat. +""" + +import socket +from datetime import datetime +from typing import Optional + +import psutil + +from .models import DiskUsage, ProcessInfo, SystemHealth + + +def get_cpu_info() -> dict: + """ + CPU információk lekérdezése. + + Returns: + CPU információkat tartalmazó dict. + """ + cpu_freq = psutil.cpu_freq() + return { + "cpu_percent": psutil.cpu_percent(interval=1), + "cpu_count": psutil.cpu_count(), + "cpu_count_logical": psutil.cpu_count(logical=True), + "cpu_freq_mhz": cpu_freq.current if cpu_freq else None, + "cpu_freq_min": cpu_freq.min if cpu_freq else None, + "cpu_freq_max": cpu_freq.max if cpu_freq else None, + } + + +def get_memory_info() -> dict: + """ + Memória információk lekérdezése. + + Returns: + Memória információkat tartalmazó dict. + """ + mem = psutil.virtual_memory() + swap = psutil.swap_memory() + + return { + "memory_total_bytes": mem.total, + "memory_used_bytes": mem.used, + "memory_available_bytes": mem.available, + "memory_percent": mem.percent, + "memory_cached_bytes": getattr(mem, "cached", 0), + "memory_buffers_bytes": getattr(mem, "buffers", 0), + "swap_total_bytes": swap.total, + "swap_used_bytes": swap.used, + "swap_free_bytes": swap.free, + "swap_percent": swap.percent, + } + + +def get_disk_info(exclude_types: Optional[list[str]] = None) -> list[DiskUsage]: + """ + Lemez partíciók információinak lekérdezése. + + Args: + exclude_types: Kizárandó fájlrendszer típusok listája. + List of filesystem types to exclude. + + Returns: + DiskUsage objektumok listája. + """ + if exclude_types is None: + exclude_types = ["tmpfs", "devtmpfs", "squashfs", "overlay"] + + disks = [] + for partition in psutil.disk_partitions(all=False): + if partition.fstype in exclude_types: + continue + + try: + usage = psutil.disk_usage(partition.mountpoint) + disks.append( + DiskUsage( + device=partition.device, + mountpoint=partition.mountpoint, + fstype=partition.fstype, + total_bytes=usage.total, + used_bytes=usage.used, + free_bytes=usage.free, + percent_used=usage.percent, + ) + ) + except (PermissionError, OSError): + # Reason: Néhány mountpoint nem elérhető (pl. snap) + continue + + return disks + + +def get_top_processes( + count: int = 10, sort_by: str = "cpu" +) -> list[ProcessInfo]: + """ + Top folyamatok lekérdezése erőforrás használat alapján. + + Args: + count: Visszaadandó folyamatok száma. + sort_by: Rendezési szempont ("cpu" vagy "memory"). + + Returns: + ProcessInfo objektumok listája. + """ + processes = [] + + for proc in psutil.process_iter( + ["pid", "name", "username", "status", "cpu_percent", "memory_percent", + "memory_info", "create_time", "cmdline"] + ): + try: + pinfo = proc.info + if pinfo.get("pid") is None: + continue + + # cmdline összeállítása + cmdline = pinfo.get("cmdline") + cmdline_str = " ".join(cmdline) if cmdline else None + + # memory_info kezelése + mem_info = pinfo.get("memory_info") + rss = mem_info.rss if mem_info else 0 + + # create_time konvertálása + create_time = pinfo.get("create_time", 0) + create_dt = datetime.fromtimestamp(create_time) if create_time else datetime.now() + + processes.append( + ProcessInfo( + pid=pinfo["pid"], + name=pinfo.get("name", "unknown"), + username=pinfo.get("username", "unknown"), + status=pinfo.get("status", "unknown"), + cpu_percent=pinfo.get("cpu_percent", 0.0) or 0.0, + memory_percent=pinfo.get("memory_percent", 0.0) or 0.0, + memory_rss_bytes=rss, + create_time=create_dt, + cmdline=cmdline_str, + ) + ) + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): + continue + + # Rendezés + if sort_by == "memory": + processes.sort(key=lambda p: p.memory_percent, reverse=True) + else: + processes.sort(key=lambda p: p.cpu_percent, reverse=True) + + return processes[:count] + + +def get_system_health() -> SystemHealth: + """ + Teljes rendszer egészségi állapot lekérdezése. + + Returns: + SystemHealth objektum minden releváns információval. + + Example: + >>> health = get_system_health() + >>> print(f"CPU: {health.cpu_percent}%") + >>> print(f"Memory: {health.memory_percent}%") + """ + # Boot idő és uptime + boot_time = datetime.fromtimestamp(psutil.boot_time()) + uptime = (datetime.now() - boot_time).total_seconds() + + # Load average + try: + load_avg = psutil.getloadavg() + except (AttributeError, OSError): + # Reason: Windows-on nincs load average + load_avg = (0.0, 0.0, 0.0) + + # CPU info + cpu_info = get_cpu_info() + + # Memory info + mem_info = get_memory_info() + + # Disk info + disk_info = get_disk_info() + + # Bejelentkezett felhasználók + try: + users = len(psutil.users()) + except (AttributeError, RuntimeError): + users = 0 + + # Futó folyamatok száma + process_count = len(psutil.pids()) + + return SystemHealth( + hostname=socket.gethostname(), + uptime_seconds=uptime, + boot_time=boot_time, + cpu_percent=cpu_info["cpu_percent"], + cpu_count=cpu_info["cpu_count"], + cpu_freq_mhz=cpu_info.get("cpu_freq_mhz"), + load_avg_1m=load_avg[0], + load_avg_5m=load_avg[1], + load_avg_15m=load_avg[2], + memory_total_bytes=mem_info["memory_total_bytes"], + memory_used_bytes=mem_info["memory_used_bytes"], + memory_available_bytes=mem_info["memory_available_bytes"], + memory_percent=mem_info["memory_percent"], + swap_total_bytes=mem_info["swap_total_bytes"], + swap_used_bytes=mem_info["swap_used_bytes"], + swap_percent=mem_info["swap_percent"], + disk_partitions=disk_info, + process_count=process_count, + users_logged_in=users, + ) diff --git a/tests/test_sysadmin_toolkit/__init__.py b/tests/test_sysadmin_toolkit/__init__.py new file mode 100644 index 0000000..c6cc600 --- /dev/null +++ b/tests/test_sysadmin_toolkit/__init__.py @@ -0,0 +1 @@ +"""Tests for SysAdmin Toolkit.""" diff --git a/tests/test_sysadmin_toolkit/test_disk_analyzer.py b/tests/test_sysadmin_toolkit/test_disk_analyzer.py new file mode 100644 index 0000000..86fe3c9 --- /dev/null +++ b/tests/test_sysadmin_toolkit/test_disk_analyzer.py @@ -0,0 +1,211 @@ +""" +Tests for disk analyzer module. + +Tesztek a lemez elemző modulhoz. +""" + +import os +import tempfile +from datetime import datetime +from pathlib import Path + +import pytest + +import sys +sys.path.insert(0, str(__file__).rsplit("/tests/", 1)[0] + "/1-sysadmin-toolkit") + +from toolkit.disk_analyzer import ( + analyze_directory, + find_large_files, + format_size, + get_directory_sizes, + get_filesystem_usage, +) +from toolkit.models import DirectorySize, DiskUsage, LargeFile + + +class TestGetFilesystemUsage: + """Tests for get_filesystem_usage function.""" + + def test_filesystem_usage_returns_disk_usage(self): + """Test that function returns DiskUsage object.""" + usage = get_filesystem_usage("/") + assert isinstance(usage, DiskUsage) + + def test_filesystem_usage_has_valid_percent(self): + """Test that percent used is valid.""" + usage = get_filesystem_usage("/") + assert 0 <= usage.percent_used <= 100 + + def test_filesystem_usage_sizes_make_sense(self): + """Test that size values are logical.""" + usage = get_filesystem_usage("/") + assert usage.total_bytes > 0 + assert usage.used_bytes >= 0 + assert usage.free_bytes >= 0 + assert usage.used_bytes + usage.free_bytes <= usage.total_bytes * 1.1 # Allow small variance + + +class TestAnalyzeDirectory: + """Tests for analyze_directory function.""" + + def test_analyze_directory_returns_list(self): + """Test that function returns a list.""" + with tempfile.TemporaryDirectory() as tmpdir: + # Create some test files and directories + Path(tmpdir, "subdir1").mkdir() + Path(tmpdir, "subdir2").mkdir() + Path(tmpdir, "file1.txt").write_text("hello") + + results = analyze_directory(tmpdir) + assert isinstance(results, list) + + def test_analyze_directory_finds_subdirs(self): + """Test that subdirectories are found.""" + with tempfile.TemporaryDirectory() as tmpdir: + Path(tmpdir, "subdir1").mkdir() + Path(tmpdir, "subdir2").mkdir() + + results = analyze_directory(tmpdir) + paths = [r.path for r in results] + assert any("subdir1" in p for p in paths) + assert any("subdir2" in p for p in paths) + + def test_analyze_directory_excludes_hidden(self): + """Test that hidden directories are excluded by default.""" + with tempfile.TemporaryDirectory() as tmpdir: + Path(tmpdir, ".hidden").mkdir() + Path(tmpdir, "visible").mkdir() + + results = analyze_directory(tmpdir, exclude_hidden=True) + paths = [r.path for r in results] + assert not any(".hidden" in p for p in paths) + assert any("visible" in p for p in paths) + + def test_analyze_directory_includes_hidden(self): + """Test that hidden directories can be included.""" + with tempfile.TemporaryDirectory() as tmpdir: + Path(tmpdir, ".hidden").mkdir() + Path(tmpdir, "visible").mkdir() + + results = analyze_directory(tmpdir, exclude_hidden=False) + paths = [r.path for r in results] + assert any(".hidden" in p for p in paths) + + def test_analyze_directory_not_found(self): + """Test FileNotFoundError for non-existent directory.""" + with pytest.raises(FileNotFoundError): + analyze_directory("/nonexistent/directory") + + def test_analyze_directory_not_a_directory(self): + """Test ValueError when path is not a directory.""" + with tempfile.NamedTemporaryFile() as f: + with pytest.raises(ValueError): + analyze_directory(f.name) + + +class TestFindLargeFiles: + """Tests for find_large_files function.""" + + def test_find_large_files_returns_list(self): + """Test that function returns a list.""" + with tempfile.TemporaryDirectory() as tmpdir: + results = find_large_files(tmpdir, min_size_bytes=1) + assert isinstance(results, list) + + def test_find_large_files_finds_files(self): + """Test that large files are found.""" + with tempfile.TemporaryDirectory() as tmpdir: + # Create a "large" file (larger than 10 bytes for test) + large_file = Path(tmpdir, "large.txt") + large_file.write_text("x" * 100) + + results = find_large_files(tmpdir, min_size_bytes=10) + assert len(results) > 0 + assert any("large.txt" in r.path for r in results) + + def test_find_large_files_filters_by_size(self): + """Test that size filter works.""" + with tempfile.TemporaryDirectory() as tmpdir: + # Create small and large files + Path(tmpdir, "small.txt").write_text("x") + Path(tmpdir, "large.txt").write_text("x" * 1000) + + results = find_large_files(tmpdir, min_size_bytes=500) + paths = [r.path for r in results] + assert any("large.txt" in p for p in paths) + assert not any("small.txt" in p for p in paths) + + def test_find_large_files_respects_max_results(self): + """Test that max_results is respected.""" + with tempfile.TemporaryDirectory() as tmpdir: + # Create multiple files + for i in range(10): + Path(tmpdir, f"file{i}.txt").write_text("x" * 100) + + results = find_large_files(tmpdir, min_size_bytes=10, max_results=5) + assert len(results) <= 5 + + def test_find_large_files_returns_large_file_objects(self): + """Test that results are LargeFile objects.""" + with tempfile.TemporaryDirectory() as tmpdir: + Path(tmpdir, "test.txt").write_text("x" * 100) + + results = find_large_files(tmpdir, min_size_bytes=10) + if results: + assert isinstance(results[0], LargeFile) + + def test_find_large_files_sorted_by_size(self): + """Test that results are sorted by size descending.""" + with tempfile.TemporaryDirectory() as tmpdir: + Path(tmpdir, "small.txt").write_text("x" * 100) + Path(tmpdir, "medium.txt").write_text("x" * 500) + Path(tmpdir, "large.txt").write_text("x" * 1000) + + results = find_large_files(tmpdir, min_size_bytes=10) + if len(results) > 1: + sizes = [r.size_bytes for r in results] + assert sizes == sorted(sizes, reverse=True) + + +class TestGetDirectorySizes: + """Tests for get_directory_sizes function.""" + + def test_directory_sizes_returns_dict(self): + """Test that function returns a dictionary.""" + with tempfile.TemporaryDirectory() as tmpdir: + sizes = get_directory_sizes(tmpdir) + assert isinstance(sizes, dict) + + def test_directory_sizes_includes_root(self): + """Test that root directory is included.""" + with tempfile.TemporaryDirectory() as tmpdir: + sizes = get_directory_sizes(tmpdir) + assert tmpdir in sizes + + +class TestFormatSize: + """Tests for format_size function.""" + + def test_format_bytes(self): + """Test formatting bytes.""" + assert format_size(100) == "100.0 B" + assert format_size(0) == "0.0 B" + + def test_format_kilobytes(self): + """Test formatting kilobytes.""" + assert format_size(1024) == "1.0 KB" + assert format_size(2048) == "2.0 KB" + + def test_format_megabytes(self): + """Test formatting megabytes.""" + assert format_size(1048576) == "1.0 MB" + assert format_size(5242880) == "5.0 MB" + + def test_format_gigabytes(self): + """Test formatting gigabytes.""" + assert format_size(1073741824) == "1.0 GB" + + def test_format_terabytes(self): + """Test formatting terabytes.""" + assert format_size(1099511627776) == "1.0 TB" diff --git a/tests/test_sysadmin_toolkit/test_log_analyzer.py b/tests/test_sysadmin_toolkit/test_log_analyzer.py new file mode 100644 index 0000000..5d548aa --- /dev/null +++ b/tests/test_sysadmin_toolkit/test_log_analyzer.py @@ -0,0 +1,192 @@ +""" +Tests for log analyzer module. + +Tesztek a log elemző modulhoz. +""" + +import tempfile +from datetime import datetime +from pathlib import Path + +import pytest + +import sys +sys.path.insert(0, str(__file__).rsplit("/tests/", 1)[0] + "/1-sysadmin-toolkit") + +from toolkit.log_analyzer import ( + LogAnalyzer, + analyze_logs, + parse_auth_log, + parse_syslog, +) +from toolkit.models import LogEntry, LogLevel + + +# Sample log lines for testing / Teszt log sorok +SAMPLE_SYSLOG_LINES = [ + "Dec 4 10:30:15 server01 sshd[1234]: Accepted password for user1 from 192.168.1.100", + "Dec 4 10:30:16 server01 nginx[5678]: worker process started", + "Dec 4 10:30:17 server01 kernel: Out of memory: Kill process 9999", + "Dec 4 10:30:18 server01 cron[100]: (root) CMD (/usr/bin/backup)", + "Dec 4 10:30:19 server01 sshd[1235]: Failed password for invalid user admin from 10.0.0.1", +] + +SAMPLE_AUTH_LOG_LINES = [ + "Dec 4 10:30:15 server01 sshd[1234]: Accepted password for user1 from 192.168.1.100", + "Dec 4 10:30:16 server01 sshd[1235]: Failed password for root from 10.0.0.1", + "Dec 4 10:30:17 server01 sshd[1236]: Failed password for invalid user admin from 10.0.0.2", + "Dec 4 10:30:18 server01 sudo: pam_unix(sudo:session): session opened for user root", + "Dec 4 10:30:19 server01 sshd[1237]: Invalid user test from 10.0.0.3", +] + + +class TestLogAnalyzer: + """Tests for LogAnalyzer class.""" + + def test_parse_syslog_line(self): + """Test parsing a single syslog line.""" + analyzer = LogAnalyzer(year=2024) + entry = analyzer.parse_syslog_line(SAMPLE_SYSLOG_LINES[0]) + + assert entry is not None + assert entry.hostname == "server01" + assert entry.program == "sshd" + assert entry.pid == 1234 + assert "Accepted password" in entry.message + + def test_parse_syslog_line_without_pid(self): + """Test parsing syslog line without PID.""" + analyzer = LogAnalyzer(year=2024) + entry = analyzer.parse_syslog_line(SAMPLE_SYSLOG_LINES[2]) + + assert entry is not None + assert entry.program == "kernel" + assert entry.pid is None + + def test_parse_empty_line(self): + """Test parsing empty line returns None.""" + analyzer = LogAnalyzer(year=2024) + entry = analyzer.parse_syslog_line("") + assert entry is None + + def test_parse_invalid_line(self): + """Test parsing invalid line returns None.""" + analyzer = LogAnalyzer(year=2024) + entry = analyzer.parse_syslog_line("This is not a valid syslog line") + assert entry is None + + def test_detect_error_level(self): + """Test error level detection.""" + analyzer = LogAnalyzer(year=2024) + + # Error keyword detection + assert analyzer._detect_level("Connection failed") == LogLevel.ERROR + assert analyzer._detect_level("Error occurred") == LogLevel.ERROR + assert analyzer._detect_level("Warning: disk full") == LogLevel.WARNING + assert analyzer._detect_level("Normal message") == LogLevel.INFO + + +class TestParseSyslog: + """Tests for parse_syslog function.""" + + def test_parse_syslog_file(self): + """Test parsing entire syslog file.""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".log", delete=False) as f: + f.write("\n".join(SAMPLE_SYSLOG_LINES)) + f.flush() + + entries = parse_syslog(f.name, year=2024) + + assert len(entries) == 5 + assert entries[0].program == "sshd" + assert entries[1].program == "nginx" + + def test_parse_syslog_file_not_found(self): + """Test FileNotFoundError for non-existent file.""" + with pytest.raises(FileNotFoundError): + parse_syslog("/nonexistent/file.log") + + +class TestParseAuthLog: + """Tests for parse_auth_log function.""" + + def test_parse_auth_log_file(self): + """Test parsing auth.log file.""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".log", delete=False) as f: + f.write("\n".join(SAMPLE_AUTH_LOG_LINES)) + f.flush() + + entries = parse_auth_log(f.name, year=2024) + + assert len(entries) == 5 + + +class TestAnalyzeLogs: + """Tests for analyze_logs function.""" + + def test_analyze_logs(self): + """Test log file analysis.""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".log", delete=False) as f: + f.write("\n".join(SAMPLE_SYSLOG_LINES)) + f.flush() + + result = analyze_logs(f.name, year=2024) + + assert result.total_entries == 5 + assert "sshd" in result.entries_by_program + assert result.entries_by_program["sshd"] >= 1 + + def test_analyze_auth_logs(self): + """Test auth log analysis for login tracking.""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".log", delete=False) as f: + f.write("\n".join(SAMPLE_AUTH_LOG_LINES)) + f.flush() + + result = analyze_logs(f.name, year=2024) + + assert result.successful_logins >= 1 + assert result.failed_logins >= 1 + + def test_analyze_empty_file(self): + """Test analyzing empty file.""" + with tempfile.NamedTemporaryFile(mode="w", suffix=".log", delete=False) as f: + f.write("") + f.flush() + + result = analyze_logs(f.name, year=2024) + + assert result.total_entries == 0 + + +class TestLogAnalyzerIntegration: + """Integration tests for log analyzer.""" + + def test_full_analysis_workflow(self): + """Test complete analysis workflow.""" + # Create sample log content + log_content = """Dec 4 10:30:15 server01 sshd[1234]: Accepted password for user1 from 192.168.1.100 +Dec 4 10:30:16 server01 nginx[5678]: Error: connection refused +Dec 4 10:30:17 server01 kernel: Warning: disk usage high +Dec 4 10:30:18 server01 sshd[1235]: Failed password for root from 10.0.0.1 +Dec 4 10:30:19 server01 cron[100]: INFO: job completed""" + + with tempfile.NamedTemporaryFile(mode="w", suffix=".log", delete=False) as f: + f.write(log_content) + f.flush() + + analyzer = LogAnalyzer(year=2024) + entries = list(analyzer.parse_file(f.name)) + result = analyzer.analyze(entries) + + # Verify counts + assert result.total_entries == 5 + assert result.error_count >= 1 # "Error: connection refused" + assert result.warning_count >= 1 # "Warning: disk usage" + + # Verify program breakdown + assert "sshd" in result.entries_by_program + assert result.entries_by_program["sshd"] == 2 + + # Verify time range + assert result.time_range_start is not None + assert result.time_range_end is not None diff --git a/tests/test_sysadmin_toolkit/test_models.py b/tests/test_sysadmin_toolkit/test_models.py new file mode 100644 index 0000000..af8e91f --- /dev/null +++ b/tests/test_sysadmin_toolkit/test_models.py @@ -0,0 +1,226 @@ +""" +Tests for SysAdmin Toolkit models. + +Tesztek a toolkit modellekhez. +""" + +from datetime import datetime + +import pytest + +import sys +sys.path.insert(0, str(__file__).rsplit("/tests/", 1)[0] + "/1-sysadmin-toolkit") + +from toolkit.models import ( + DirectorySize, + DiskUsage, + LargeFile, + LogAnalysisResult, + LogEntry, + LogLevel, + ProcessInfo, + ServiceState, + ServiceStatus, + SystemHealth, +) + + +class TestLogEntry: + """Tests for LogEntry model.""" + + def test_create_log_entry(self): + """Test creating a basic log entry.""" + entry = LogEntry( + timestamp=datetime.now(), + hostname="server01", + program="sshd", + message="Accepted password for user", + level=LogLevel.INFO, + ) + assert entry.hostname == "server01" + assert entry.program == "sshd" + assert entry.level == LogLevel.INFO + + def test_log_entry_with_pid(self): + """Test log entry with PID.""" + entry = LogEntry( + timestamp=datetime.now(), + hostname="server01", + program="nginx", + pid=1234, + message="worker process started", + ) + assert entry.pid == 1234 + + def test_log_entry_without_pid(self): + """Test log entry without PID defaults to None.""" + entry = LogEntry( + timestamp=datetime.now(), + hostname="server01", + program="kernel", + message="kernel message", + ) + assert entry.pid is None + + +class TestLogLevel: + """Tests for LogLevel enum.""" + + def test_all_log_levels_exist(self): + """Test that all expected log levels are defined.""" + assert LogLevel.DEBUG.value == "debug" + assert LogLevel.INFO.value == "info" + assert LogLevel.WARNING.value == "warning" + assert LogLevel.ERROR.value == "error" + assert LogLevel.CRITICAL.value == "critical" + + def test_log_level_from_string(self): + """Test creating log level from string.""" + assert LogLevel("error") == LogLevel.ERROR + + +class TestLogAnalysisResult: + """Tests for LogAnalysisResult model.""" + + def test_empty_analysis_result(self): + """Test creating empty analysis result.""" + result = LogAnalysisResult(total_entries=0) + assert result.total_entries == 0 + assert result.error_count == 0 + assert result.warning_count == 0 + + def test_analysis_result_with_data(self): + """Test analysis result with data.""" + result = LogAnalysisResult( + total_entries=100, + error_count=5, + warning_count=10, + entries_by_program={"sshd": 50, "nginx": 50}, + entries_by_level={"info": 85, "error": 5, "warning": 10}, + failed_logins=3, + successful_logins=10, + ) + assert result.total_entries == 100 + assert result.error_count == 5 + assert result.entries_by_program["sshd"] == 50 + + +class TestSystemHealth: + """Tests for SystemHealth model.""" + + def test_create_system_health(self): + """Test creating system health object.""" + health = SystemHealth( + hostname="server01", + uptime_seconds=86400.0, + boot_time=datetime.now(), + cpu_percent=25.5, + cpu_count=4, + load_avg_1m=0.5, + load_avg_5m=0.6, + load_avg_15m=0.7, + memory_total_bytes=8589934592, + memory_used_bytes=4294967296, + memory_available_bytes=4294967296, + memory_percent=50.0, + swap_total_bytes=2147483648, + swap_used_bytes=0, + swap_percent=0.0, + process_count=100, + users_logged_in=2, + ) + assert health.hostname == "server01" + assert health.cpu_percent == 25.5 + assert health.memory_percent == 50.0 + + +class TestDiskUsage: + """Tests for DiskUsage model.""" + + def test_create_disk_usage(self): + """Test creating disk usage object.""" + disk = DiskUsage( + device="/dev/sda1", + mountpoint="/", + fstype="ext4", + total_bytes=107374182400, + used_bytes=53687091200, + free_bytes=53687091200, + percent_used=50.0, + ) + assert disk.device == "/dev/sda1" + assert disk.percent_used == 50.0 + + +class TestDirectorySize: + """Tests for DirectorySize model.""" + + def test_create_directory_size(self): + """Test creating directory size object.""" + dir_size = DirectorySize( + path="/var/log", + size_bytes=1073741824, + file_count=100, + dir_count=10, + ) + assert dir_size.path == "/var/log" + assert dir_size.size_bytes == 1073741824 + + +class TestLargeFile: + """Tests for LargeFile model.""" + + def test_create_large_file(self): + """Test creating large file object.""" + large_file = LargeFile( + path="/var/log/syslog", + size_bytes=524288000, + modified_time=datetime.now(), + owner="root", + ) + assert large_file.size_bytes == 524288000 + assert large_file.owner == "root" + + +class TestProcessInfo: + """Tests for ProcessInfo model.""" + + def test_create_process_info(self): + """Test creating process info object.""" + proc = ProcessInfo( + pid=1, + name="systemd", + username="root", + status="running", + cpu_percent=0.5, + memory_percent=1.2, + memory_rss_bytes=12582912, + create_time=datetime.now(), + ) + assert proc.pid == 1 + assert proc.name == "systemd" + + +class TestServiceStatus: + """Tests for ServiceStatus model.""" + + def test_create_service_status(self): + """Test creating service status object.""" + status = ServiceStatus( + name="nginx", + state=ServiceState.RUNNING, + is_enabled=True, + is_active=True, + pid=1234, + description="A high performance web server", + ) + assert status.name == "nginx" + assert status.state == ServiceState.RUNNING + assert status.is_active is True + + def test_service_states(self): + """Test all service states exist.""" + assert ServiceState.RUNNING.value == "running" + assert ServiceState.STOPPED.value == "stopped" + assert ServiceState.FAILED.value == "failed" + assert ServiceState.INACTIVE.value == "inactive" diff --git a/tests/test_sysadmin_toolkit/test_service_manager.py b/tests/test_sysadmin_toolkit/test_service_manager.py new file mode 100644 index 0000000..73533a6 --- /dev/null +++ b/tests/test_sysadmin_toolkit/test_service_manager.py @@ -0,0 +1,269 @@ +""" +Tests for service manager module. + +Tesztek a szolgáltatás kezelő modulhoz. +""" + +from unittest.mock import MagicMock, patch + +import pytest + +import sys +sys.path.insert(0, str(__file__).rsplit("/tests/", 1)[0] + "/1-sysadmin-toolkit") + +from toolkit.service_manager import ( + check_critical_services, + get_failed_services, + get_service_logs, + get_service_status, + list_services, +) +from toolkit.models import ServiceState, ServiceStatus + + +class TestGetServiceStatus: + """Tests for get_service_status function.""" + + @patch("toolkit.service_manager._run_systemctl") + def test_service_status_running(self, mock_run): + """Test getting status of a running service.""" + # Setup mock responses + mock_run.side_effect = [ + ("enabled", 0), # is-enabled + ("active", 0), # is-active + ("MainPID=1234\nDescription=Test Service\nLoadState=loaded\nSubState=running\n", 0), # show + ] + + status = get_service_status("test-service") + + assert isinstance(status, ServiceStatus) + assert status.name == "test-service" + assert status.state == ServiceState.RUNNING + assert status.is_enabled is True + assert status.is_active is True + + @patch("toolkit.service_manager._run_systemctl") + def test_service_status_stopped(self, mock_run): + """Test getting status of a stopped service.""" + mock_run.side_effect = [ + ("disabled", 1), # is-enabled + ("inactive", 3), # is-active + ("MainPID=0\nDescription=Test Service\n", 0), # show + ] + + status = get_service_status("test-service") + + assert status.state == ServiceState.INACTIVE + assert status.is_enabled is False + assert status.is_active is False + + @patch("toolkit.service_manager._run_systemctl") + def test_service_status_failed(self, mock_run): + """Test getting status of a failed service.""" + mock_run.side_effect = [ + ("enabled", 0), + ("failed", 1), + ("MainPID=0\n", 0), + ] + + status = get_service_status("test-service") + + assert status.state == ServiceState.FAILED + + @patch("toolkit.service_manager._run_systemctl") + def test_service_status_with_memory(self, mock_run): + """Test getting service status with memory info.""" + mock_run.side_effect = [ + ("enabled", 0), + ("active", 0), + ("MainPID=1234\nMemoryCurrent=52428800\n", 0), + ] + + status = get_service_status("test-service") + + assert status.pid == 1234 + assert status.memory_bytes == 52428800 + + +class TestListServices: + """Tests for list_services function.""" + + @patch("toolkit.service_manager.get_service_status") + @patch("toolkit.service_manager._run_systemctl") + def test_list_services_returns_list(self, mock_run, mock_status): + """Test that list_services returns a list.""" + mock_run.return_value = ( + "nginx.service loaded active running nginx server\n" + "sshd.service loaded active running SSH daemon\n", + 0, + ) + mock_status.return_value = ServiceStatus( + name="test", + state=ServiceState.RUNNING, + is_enabled=True, + is_active=True, + ) + + services = list_services() + assert isinstance(services, list) + + @patch("toolkit.service_manager.get_service_status") + @patch("toolkit.service_manager._run_systemctl") + def test_list_services_filter_by_state(self, mock_run, mock_status): + """Test filtering by state.""" + mock_run.return_value = ( + "nginx.service loaded active running nginx\n", + 0, + ) + mock_status.return_value = ServiceStatus( + name="nginx", + state=ServiceState.RUNNING, + is_enabled=True, + is_active=True, + ) + + services = list_services(filter_state=ServiceState.RUNNING) + assert all(s.state == ServiceState.RUNNING for s in services) + + @patch("toolkit.service_manager._run_systemctl") + def test_list_services_empty_on_error(self, mock_run): + """Test empty list on systemctl error.""" + mock_run.return_value = ("", 1) + + services = list_services() + assert services == [] + + +class TestCheckCriticalServices: + """Tests for check_critical_services function.""" + + @patch("toolkit.service_manager.get_service_status") + def test_check_critical_services_returns_dict(self, mock_status): + """Test that function returns a dictionary.""" + mock_status.return_value = ServiceStatus( + name="test", + state=ServiceState.RUNNING, + is_enabled=True, + is_active=True, + ) + + results = check_critical_services(["sshd"]) + assert isinstance(results, dict) + assert "sshd" in results + + @patch("toolkit.service_manager.get_service_status") + def test_check_critical_services_custom_list(self, mock_status): + """Test with custom service list.""" + mock_status.return_value = ServiceStatus( + name="test", + state=ServiceState.RUNNING, + is_enabled=True, + is_active=True, + ) + + custom_services = ["nginx", "mysql", "redis"] + results = check_critical_services(custom_services) + + assert len(results) == 3 + assert all(name in results for name in custom_services) + + @patch("toolkit.service_manager.get_service_status") + def test_check_critical_services_default_list(self, mock_status): + """Test with default service list.""" + mock_status.return_value = ServiceStatus( + name="test", + state=ServiceState.RUNNING, + is_enabled=True, + is_active=True, + ) + + results = check_critical_services(None) + # Should contain some default services + assert len(results) > 0 + + +class TestGetFailedServices: + """Tests for get_failed_services function.""" + + @patch("toolkit.service_manager.list_services") + def test_get_failed_services(self, mock_list): + """Test getting failed services.""" + mock_list.return_value = [ + ServiceStatus( + name="failed1", + state=ServiceState.FAILED, + is_enabled=True, + is_active=False, + ), + ] + + failed = get_failed_services() + mock_list.assert_called_once_with(filter_state=ServiceState.FAILED) + + +class TestGetServiceLogs: + """Tests for get_service_logs function.""" + + @patch("toolkit.service_manager.subprocess.run") + def test_get_service_logs_success(self, mock_run): + """Test successful log retrieval.""" + mock_run.return_value = MagicMock(stdout="Log line 1\nLog line 2\n") + + logs = get_service_logs("nginx", lines=10) + + assert "Log line 1" in logs + mock_run.assert_called_once() + + @patch("toolkit.service_manager.subprocess.run") + def test_get_service_logs_with_since(self, mock_run): + """Test log retrieval with since parameter.""" + mock_run.return_value = MagicMock(stdout="Recent logs") + + logs = get_service_logs("nginx", lines=50, since="1 hour ago") + + call_args = mock_run.call_args[0][0] + assert "--since" in call_args + assert "1 hour ago" in call_args + + @patch("toolkit.service_manager.subprocess.run") + def test_get_service_logs_not_found(self, mock_run): + """Test when journalctl is not found.""" + mock_run.side_effect = FileNotFoundError + + logs = get_service_logs("nginx") + assert logs == "" + + +class TestServiceStatusModel: + """Additional tests for ServiceStatus model integration.""" + + def test_service_status_creation(self): + """Test creating ServiceStatus with all fields.""" + status = ServiceStatus( + name="nginx", + state=ServiceState.RUNNING, + is_enabled=True, + is_active=True, + pid=1234, + memory_bytes=52428800, + description="A high performance web server", + load_state="loaded", + sub_state="running", + ) + + assert status.name == "nginx" + assert status.pid == 1234 + assert status.memory_bytes == 52428800 + + def test_service_status_optional_fields(self): + """Test ServiceStatus with optional fields as None.""" + status = ServiceStatus( + name="test", + state=ServiceState.STOPPED, + is_enabled=False, + is_active=False, + ) + + assert status.pid is None + assert status.memory_bytes is None + assert status.description is None diff --git a/tests/test_sysadmin_toolkit/test_system_health.py b/tests/test_sysadmin_toolkit/test_system_health.py new file mode 100644 index 0000000..8ce275c --- /dev/null +++ b/tests/test_sysadmin_toolkit/test_system_health.py @@ -0,0 +1,224 @@ +""" +Tests for system health module. + +Tesztek a rendszer egészség modulhoz. +""" + +from datetime import datetime +from unittest.mock import MagicMock, patch + +import pytest + +import sys +sys.path.insert(0, str(__file__).rsplit("/tests/", 1)[0] + "/1-sysadmin-toolkit") + +from toolkit.system_health import ( + get_cpu_info, + get_disk_info, + get_memory_info, + get_system_health, + get_top_processes, +) +from toolkit.models import DiskUsage, ProcessInfo, SystemHealth + + +class TestGetCPUInfo: + """Tests for get_cpu_info function.""" + + def test_cpu_info_returns_dict(self): + """Test that cpu_info returns a dictionary.""" + info = get_cpu_info() + assert isinstance(info, dict) + + def test_cpu_info_has_required_keys(self): + """Test that cpu_info has required keys.""" + info = get_cpu_info() + assert "cpu_percent" in info + assert "cpu_count" in info + + def test_cpu_percent_is_valid(self): + """Test that CPU percent is between 0 and 100.""" + info = get_cpu_info() + assert 0 <= info["cpu_percent"] <= 100 + + def test_cpu_count_is_positive(self): + """Test that CPU count is positive.""" + info = get_cpu_info() + assert info["cpu_count"] > 0 + + +class TestGetMemoryInfo: + """Tests for get_memory_info function.""" + + def test_memory_info_returns_dict(self): + """Test that memory_info returns a dictionary.""" + info = get_memory_info() + assert isinstance(info, dict) + + def test_memory_info_has_required_keys(self): + """Test that memory_info has required keys.""" + info = get_memory_info() + assert "memory_total_bytes" in info + assert "memory_used_bytes" in info + assert "memory_available_bytes" in info + assert "memory_percent" in info + assert "swap_total_bytes" in info + + def test_memory_total_is_positive(self): + """Test that memory total is positive.""" + info = get_memory_info() + assert info["memory_total_bytes"] > 0 + + def test_memory_percent_is_valid(self): + """Test that memory percent is between 0 and 100.""" + info = get_memory_info() + assert 0 <= info["memory_percent"] <= 100 + + +class TestGetDiskInfo: + """Tests for get_disk_info function.""" + + def test_disk_info_returns_list(self): + """Test that disk_info returns a list.""" + disks = get_disk_info() + assert isinstance(disks, list) + + def test_disk_info_items_are_disk_usage(self): + """Test that disk_info items are DiskUsage objects.""" + disks = get_disk_info() + if disks: # May be empty in some environments + assert isinstance(disks[0], DiskUsage) + + def test_disk_usage_has_valid_percent(self): + """Test that disk usage percent is valid.""" + disks = get_disk_info() + for disk in disks: + assert 0 <= disk.percent_used <= 100 + + def test_exclude_types_filter(self): + """Test that exclude_types filters correctly.""" + disks_all = get_disk_info(exclude_types=[]) + disks_filtered = get_disk_info(exclude_types=["tmpfs", "devtmpfs"]) + # Filtered list should be same or smaller + assert len(disks_filtered) <= len(disks_all) + + +class TestGetTopProcesses: + """Tests for get_top_processes function.""" + + def test_top_processes_returns_list(self): + """Test that top_processes returns a list.""" + procs = get_top_processes() + assert isinstance(procs, list) + + def test_top_processes_respects_count(self): + """Test that top_processes respects count parameter.""" + procs = get_top_processes(count=5) + assert len(procs) <= 5 + + def test_top_processes_items_are_process_info(self): + """Test that items are ProcessInfo objects.""" + procs = get_top_processes(count=1) + if procs: + assert isinstance(procs[0], ProcessInfo) + + def test_top_processes_sort_by_cpu(self): + """Test sorting by CPU.""" + procs = get_top_processes(count=10, sort_by="cpu") + if len(procs) > 1: + # First should have >= CPU than second + assert procs[0].cpu_percent >= procs[1].cpu_percent + + def test_top_processes_sort_by_memory(self): + """Test sorting by memory.""" + procs = get_top_processes(count=10, sort_by="memory") + if len(procs) > 1: + # First should have >= memory than second + assert procs[0].memory_percent >= procs[1].memory_percent + + +class TestGetSystemHealth: + """Tests for get_system_health function.""" + + def test_system_health_returns_object(self): + """Test that system_health returns SystemHealth object.""" + health = get_system_health() + assert isinstance(health, SystemHealth) + + def test_system_health_has_hostname(self): + """Test that system health has hostname.""" + health = get_system_health() + assert health.hostname is not None + assert len(health.hostname) > 0 + + def test_system_health_has_valid_uptime(self): + """Test that uptime is positive.""" + health = get_system_health() + assert health.uptime_seconds > 0 + + def test_system_health_has_boot_time(self): + """Test that boot_time is a valid datetime.""" + health = get_system_health() + assert isinstance(health.boot_time, datetime) + assert health.boot_time < datetime.now() + + def test_system_health_has_cpu_info(self): + """Test that CPU info is valid.""" + health = get_system_health() + assert 0 <= health.cpu_percent <= 100 + assert health.cpu_count > 0 + + def test_system_health_has_memory_info(self): + """Test that memory info is valid.""" + health = get_system_health() + assert health.memory_total_bytes > 0 + assert 0 <= health.memory_percent <= 100 + + def test_system_health_has_disk_info(self): + """Test that disk info is present.""" + health = get_system_health() + assert isinstance(health.disk_partitions, list) + + def test_system_health_has_process_count(self): + """Test that process count is positive.""" + health = get_system_health() + assert health.process_count > 0 + + +class TestSystemHealthMocked: + """Tests for system health with mocked psutil.""" + + @patch("toolkit.system_health.psutil") + @patch("toolkit.system_health.socket") + def test_system_health_with_mocked_psutil(self, mock_socket, mock_psutil): + """Test system health with mocked values.""" + # Setup mocks + mock_socket.gethostname.return_value = "test-host" + mock_psutil.boot_time.return_value = datetime.now().timestamp() - 86400 + mock_psutil.cpu_percent.return_value = 50.0 + mock_psutil.cpu_count.return_value = 4 + mock_psutil.cpu_freq.return_value = MagicMock(current=2400.0) + mock_psutil.getloadavg.return_value = (1.0, 1.5, 2.0) + + vm = MagicMock() + vm.total = 8589934592 + vm.used = 4294967296 + vm.available = 4294967296 + vm.percent = 50.0 + mock_psutil.virtual_memory.return_value = vm + + swap = MagicMock() + swap.total = 2147483648 + swap.used = 0 + swap.percent = 0.0 + mock_psutil.swap_memory.return_value = swap + + mock_psutil.disk_partitions.return_value = [] + mock_psutil.users.return_value = [] + mock_psutil.pids.return_value = [1, 2, 3] + + health = get_system_health() + + assert health.hostname == "test-host" + assert health.cpu_percent == 50.0 + assert health.memory_percent == 50.0