diff --git a/frontend/static/css/new-style.css b/frontend/static/css/new-style.css
index 42abbb1b..8c021a6b 100644
--- a/frontend/static/css/new-style.css
+++ b/frontend/static/css/new-style.css
@@ -14,7 +14,7 @@
--error-color: #e74c3c;
--info-color: #2980b9;
--debug-color: #7f8c8d;
-
+
/* Component Colors */
--sidebar-bg: #2c3e50;
--sidebar-text: #ecf0f1;
@@ -24,7 +24,7 @@
--card-bg: var(--bg-secondary);
--switch-bg: #cbd2d9;
--switch-active: #3498db;
-
+
/* Button Colors */
--button-primary-bg: #3498db;
--button-primary-text: #ffffff;
@@ -33,11 +33,11 @@
--button-danger-hover: #c0392b;
--button-success-bg: #27ae60;
--button-success-hover: #219955;
-
+
/* Status Colors */
--status-connected: #27ae60;
--status-not-connected: #e74c3c;
-
+
/* Logs Colors */
--log-bg: var(--bg-secondary);
--log-border: var(--border-color);
@@ -53,7 +53,7 @@
--border-color: #4a5568;
--accent-color: #3498db;
--accent-hover: #2980b9;
-
+
/* Component Colors */
--sidebar-bg: #121212;
--sidebar-text: #ecf0f1;
@@ -62,7 +62,7 @@
--topbar-bg: #252a34;
--card-bg: #252a34;
--switch-bg: #4a5568;
-
+
/* Logs Colors */
--log-bg: #252a34;
--log-border: #4a5568;
@@ -424,9 +424,9 @@ div[id*="scheduling"] p:not(.no-schedules-message p),
}
/* Keep old class references for backward compatibility */
-#addScheduleButton,
-#saveSchedulesButton,
-button.save-button,
+#addScheduleButton,
+#saveSchedulesButton,
+button.save-button,
button.action-button {
background-color: #3498db !important;
border: none !important;
@@ -1425,18 +1425,18 @@ input:checked + .toggle-slider:before {
.dashboard-grid {
grid-template-columns: 1fr;
}
-
+
.section-header {
flex-direction: column;
align-items: flex-start;
}
-
+
.app-tabs, .settings-actions {
width: 100%;
overflow-x: auto;
padding-bottom: 5px; /* Prevent cut-off of button shadows */
}
-
+
.app-tab, .settings-tab {
flex: 1;
white-space: nowrap;
@@ -1459,21 +1459,21 @@ input:checked + .toggle-slider:before {
margin-right: 0;
margin-bottom: 10px;
}
-
+
/* Styles moved from 992px breakpoint */
.settings-group {
padding: 15px;
}
-
+
.setting-item label {
width: 100%;
margin-bottom: 8px;
}
-
+
.setting-help {
margin-left: 0;
}
-
+
.stats-grid {
grid-template-columns: 1fr;
}
@@ -1491,53 +1491,53 @@ input:checked + .toggle-slider:before {
width: 80px; /* Adjust width as needed */
max-width: 80px;
}
-
+
/* History styles moved from 992px breakpoint */
.history-table {
font-size: 13px;
}
-
+
.history-table th, .history-table td {
padding: 8px 10px;
}
-
+
.history-search input {
width: 150px;
}
-
+
/* Original mobile history styles */
.history-controls {
flex-wrap: wrap;
gap: 10px;
}
-
+
.history-search {
width: 100%;
margin-bottom: 10px;
}
-
+
.history-search input {
width: calc(100% - 40px);
}
-
+
/* Sidebar styles moved from 950px breakpoint */
.sidebar {
width: 60px;
}
-
+
.sidebar h1, .nav-item span, .switch-label {
display: none;
}
-
+
.nav-item i {
margin-right: 0;
font-size: 22px;
}
-
+
.logo-container {
justify-content: center;
}
-
+
.logo {
margin-right: 0;
}
@@ -2144,12 +2144,12 @@ input:checked + .toggle-slider:before {
flex-wrap: wrap;
gap: 10px;
}
-
+
.history-search {
width: 100%;
margin-bottom: 10px;
}
-
+
.history-search input {
width: calc(100% - 40px);
}
@@ -2233,14 +2233,14 @@ input:checked + .toggle-slider:before {
}
/* Any container elements that might have overflow: hidden */
-.app-container,
-.content-section,
-.scheduler-container,
+.app-container,
+.content-section,
+.scheduler-container,
.scheduler-panel {
overflow: auto !important;
}
-#schedulingPage,
+#schedulingPage,
#schedulingSection {
overflow-y: auto !important;
}
@@ -2258,118 +2258,6 @@ input:checked + .toggle-slider:before {
padding: 12px 10px !important;
}
-/* Make number inputs for intervals shorter */
-input[type="number"].interval-input,
-#generalSettings .setting-item input#stateful_management_hours {
- width: 80px !important; /* Use !important to override potential broader input styles */
- padding: 12px 8px !important;
-}
-
-/* Stateful management header row with reset button */
-.stateful-header-row {
- display: flex;
- justify-content: space-between;
- align-items: center;
- padding-bottom: 10px;
- margin-bottom: 15px;
- width: 100%;
-}
-
-.stateful-header-row h3 {
- margin: 0;
- font-size: 1.1rem;
- font-weight: 600;
- color: var(--text-primary);
- text-shadow: 0 1px 2px rgba(0, 0, 0, 0.2);
-}
-
-/* Reset button styling */
-#reset_stateful_btn {
- background: linear-gradient(145deg, rgba(231, 76, 60, 0.2), rgba(192, 57, 43, 0.15));
- color: rgba(231, 76, 60, 0.9);
- border: 1px solid rgba(231, 76, 60, 0.3);
- padding: 6px 14px;
- border-radius: 6px;
- font-size: 13px;
- font-weight: 500;
- cursor: pointer;
- display: inline-flex;
- align-items: center;
- gap: 6px;
- transition: all 0.2s ease;
- box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);
-}
-
-#reset_stateful_btn:hover {
- background: linear-gradient(145deg, rgba(231, 76, 60, 0.3), rgba(192, 57, 43, 0.25));
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15);
- color: rgba(231, 76, 60, 1);
-}
-
-#reset_stateful_btn i {
- font-size: 13px;
-}
-
-/* Stateful Management Section Styling */
-#generalSettings .setting-info-block {
- background: linear-gradient(145deg, rgba(30, 39, 56, 0.4), rgba(22, 28, 40, 0.5));
- border: 1px solid rgba(90, 109, 137, 0.1);
- border-radius: 8px;
- padding: 15px;
- margin-top: 0;
- margin-bottom: 20px;
- box-shadow: 0 5px 15px rgba(0, 0, 0, 0.1);
-}
-
-#generalSettings .info-container {
- display: grid;
- grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
- gap: 15px;
-}
-
-#generalSettings .date-info-block {
- background: linear-gradient(145deg, rgba(20, 25, 35, 0.4), rgba(15, 19, 26, 0.3));
- padding: 12px 15px;
- border-radius: 6px;
- border-left: 3px solid var(--accent-color);
- box-shadow: 0 2px 6px rgba(0, 0, 0, 0.1);
-}
-
-#generalSettings .date-label {
- font-size: 0.85em;
- color: var(--text-secondary);
- margin-bottom: 5px;
- opacity: 0.85;
-}
-
-#generalSettings .date-value {
- font-size: 0.95em;
- font-weight: 600;
- color: var(--text-primary);
- font-family: inherit;
- background-color: transparent;
- padding: 0;
-}
-
-#generalSettings .setting-item .reset-help {
- color: var(--warning-color);
- font-style: normal;
- font-size: 0.85em;
- opacity: 0.9;
-}
-
-/* Fix specific input field widths - State Reset Interval */
-#generalSettings .setting-item input#stateful_management_hours,
-input#stateful_management_hours {
- width: 55px !important; /* Make it significantly narrower to match the red line in the image */
- padding: 8px 6px !important;
- text-align: center;
- margin-right: 10px;
- min-width: 55px !important;
- max-width: 55px !important;
- font-size: 0.9rem;
-}
-
/* Pagination controls above table - left aligned and compact */
.pagination-controls.pagination-above-table {
display: flex;
diff --git a/frontend/static/css/style.css b/frontend/static/css/style.css
index b64ac659..6849428a 100644
--- a/frontend/static/css/style.css
+++ b/frontend/static/css/style.css
@@ -14,7 +14,7 @@
--error-color: #e74c3c;
--info-color: #2980b9;
--debug-color: #7f8c8d;
-
+
/* Component Colors */
--sidebar-bg: #2c3e50;
--sidebar-text: #ecf0f1;
@@ -24,7 +24,7 @@
--card-bg: var(--bg-secondary);
--switch-bg: #cbd2d9;
--switch-active: #3498db;
-
+
/* Button Colors */
--button-primary-bg: #3498db;
--button-primary-text: #ffffff;
@@ -33,11 +33,11 @@
--button-danger-hover: #c0392b;
--button-success-bg: #27ae60;
--button-success-hover: #219955;
-
+
/* Status Colors */
--status-connected: #27ae60;
--status-not-connected: #e74c3c;
-
+
/* Logs Colors */
--log-bg: var(--bg-secondary);
--log-border: var(--border-color);
@@ -53,7 +53,7 @@
--border-color: #4a5568;
--accent-color: #3498db;
--accent-hover: #2980b9;
-
+
/* Component Colors */
--sidebar-bg: #121212;
--sidebar-text: #ecf0f1;
@@ -62,7 +62,7 @@
--topbar-bg: #252a34;
--card-bg: #252a34;
--switch-bg: #4a5568;
-
+
/* Logs Colors */
--log-bg: #252a34;
--log-border: #4a5568;
@@ -335,11 +335,11 @@ input:checked + .slider:before {
.content-section {
height: calc(100vh - 50px); /* Account for taller topbar in mobile */
}
-
+
.logs {
height: calc(100vh - 150px); /* Adjust logs container for mobile topbar */
}
-
+
/* Title alignment with version info */
.page-title {
font-size: 1.3rem;
@@ -350,7 +350,7 @@ input:checked + .slider:before {
margin: 0;
padding: 0;
}
-
+
.top-bar {
display: flex;
align-items: center;
@@ -363,11 +363,11 @@ input:checked + .slider:before {
.content-section {
height: calc(100vh - 80px); /* Account for the taller stacked header on phones */
}
-
+
.logs {
height: calc(100vh - 180px); /* Adjust logs container for taller phone header */
}
-
+
.page-title {
font-size: 1.2rem; /* Slightly smaller font for phone */
}
@@ -813,121 +813,6 @@ input:checked + .toggle-slider:before {
transform: translateX(20px); /* Changed to match login page toggle (20px) */
}
-/* Stateful Management Styling */
-.stateful-header-wrapper {
- position: relative;
- width: 100%;
- margin-bottom: 25px;
- padding-bottom: 15px;
- border-bottom: 1px solid #2d3748; /* Darker border matching the screenshot */
-}
-
-.stateful-header-wrapper h3 {
- margin: 0;
- font-size: 18px;
- font-weight: 600;
- color: var(--text-primary);
- padding-bottom: 15px;
-}
-
-.header-line {
- display: none;
-}
-
-.stateful-header {
- display: flex;
- justify-content: space-between;
- align-items: center;
- margin-bottom: 15px;
- padding-bottom: 10px;
- border-bottom: 1px solid var(--border-color);
-}
-
-.stateful-title {
- font-size: 16px;
- font-weight: 600;
- color: var(--text-primary);
-}
-
-.stateful-reset-btn {
- padding: 8px 16px;
- font-size: 13px;
- font-weight: 500;
- background-color: var(--button-danger-bg);
- color: white;
- border: none;
- border-radius: 4px;
- cursor: pointer;
- transition: background-color 0.2s ease;
- white-space: nowrap;
-}
-
-.stateful-reset-btn:hover {
- background-color: var(--button-danger-hover);
-}
-
-.info-container {
- display: flex;
- flex-direction: column;
- gap: 12px;
- padding: 12px;
- background-color: var(--bg-tertiary);
- border-radius: 6px;
- margin-top: 10px;
-}
-
-.date-info-block {
- display: flex;
- justify-content: space-between;
- align-items: center;
- padding: 8px 10px;
- background-color: var(--bg-secondary);
- border-radius: 4px;
- border-left: 4px solid var(--accent-color);
-}
-
-.date-label {
- font-weight: 500;
- color: var(--text-secondary);
-}
-
-.date-value {
- font-family: monospace;
- font-size: 14px;
- font-weight: 600;
- color: var(--accent-color);
- padding: 4px 8px;
- background-color: rgba(52, 152, 219, 0.1);
- border-radius: 4px;
-}
-
-.reset-help {
- margin-top: 8px;
- font-style: italic;
- color: var(--error-color);
- font-size: 12px;
-}
-
-/* Settings Stateful Management */
-.setting-info-block {
- background-color: var(--bg-tertiary);
- border: 1px solid var(--border-color);
- border-radius: 8px;
- padding: 15px;
- margin: 10px 0;
-}
-
-.setting-info-block .info-row {
- display: flex;
- justify-content: space-between;
- padding: 5px 0;
- border-bottom: 1px solid var(--border-color);
-}
-
-.setting-info-block .info-row:last-child {
- border-bottom: none;
-}
-
.danger-button {
background-color: var(--button-danger-bg);
color: #fff;
@@ -942,35 +827,6 @@ input:checked + .toggle-slider:before {
background-color: var(--button-danger-hover);
}
-/* Custom reset button that matches the screenshot exactly */
-.danger-reset-button {
- background-color: #e74c3c; /* Solid red to match the image */
- color: white;
- border: none;
- padding: 6px 12px;
- border-radius: 4px;
- font-size: 13px;
- font-weight: 500;
- cursor: pointer;
- display: inline-flex;
- align-items: center;
- gap: 4px;
- transition: background-color 0.2s ease;
- width: fit-content;
- white-space: nowrap;
- position: absolute;
- top: 0;
- right: 0;
-}
-
-.danger-reset-button:hover {
- background-color: #c0392b;
-}
-
-.danger-reset-button i {
- font-size: 13px;
-}
-
/* Make sure settings-group has the right positioning for absolute elements */
.settings-group {
position: relative;
@@ -983,41 +839,41 @@ input:checked + .toggle-slider:before {
min-width: 60px !important;
max-width: 60px !important;
}
-
+
.main-content {
margin-left: 0 !important;
width: calc(100% - 60px) !important;
}
-
+
/* Navbar item adjustments */
.nav-item {
padding: 10px 0;
justify-content: center;
}
-
+
.nav-item span {
display: none !important;
}
-
+
.nav-icon-wrapper {
margin-right: 0 !important;
}
-
+
/* Logo container */
.logo-container {
justify-content: center !important;
padding: 15px 0 !important;
}
-
+
.logo-container h1 {
display: none !important;
}
-
+
.logo {
width: 40px !important;
height: 40px !important;
}
-
+
/* Fix active/hover state */
.nav-item:hover,
.nav-item.active {
@@ -1026,7 +882,7 @@ input:checked + .toggle-slider:before {
margin: 0 auto !important;
border-radius: 8px !important;
}
-
+
/* Topbar adjustments */
.topbar-section.center {
position: relative !important;
@@ -1034,13 +890,13 @@ input:checked + .toggle-slider:before {
transform: none !important;
justify-content: center !important;
}
-
+
/* Version bar */
.version-bar {
flex-wrap: wrap !important;
gap: 8px !important;
}
-
+
.version-item, .developer-credit {
font-size: 12px !important;
}
@@ -1050,23 +906,23 @@ input:checked + .toggle-slider:before {
.community-links {
flex-direction: column;
}
-
+
.community-link-card {
width: 100%;
}
-
+
.app-stats-grid {
grid-template-columns: 1fr;
}
-
+
.sponsors-list {
grid-template-columns: repeat(2, 1fr);
}
-
+
.version-bar {
gap: 8px;
}
-
+
.version-divider {
display: none;
}
@@ -1363,63 +1219,6 @@ label .info-icon ~ text {
margin-right: 2px;
}
-/* Reset button in top right corner */
-.top-right-button {
- position: absolute !important;
- top: 0;
- right: 0;
- margin: 0 !important;
- padding: 6px 10px !important;
- font-size: 12px !important;
- border-radius: 3px !important;
-}
-
-.top-right-button i {
- margin-right: 3px;
-}
-
-/* Stateful management header row with reset button */
-.stateful-header-row {
- display: flex;
- justify-content: space-between;
- align-items: center;
- padding-bottom: 12px;
- margin-bottom: 20px;
- border-bottom: 1px solid #2d3748; /* Dark border line matching the screenshot */
- width: 100%;
-}
-
-.stateful-header-row h3 {
- margin: 0;
- font-size: 18px;
- font-weight: 600;
- color: var(--text-primary);
-}
-
-/* Reset button styling exactly matching the screenshot */
-#reset_stateful_btn {
- background-color: #e74c3c;
- color: white;
- border: none;
- padding: 5px 12px;
- border-radius: 4px;
- font-size: 13px;
- font-weight: 500;
- cursor: pointer;
- display: inline-flex;
- align-items: center;
- gap: 5px;
- transition: background-color 0.2s ease;
-}
-
-#reset_stateful_btn:hover {
- background-color: #c0392b;
-}
-
-#reset_stateful_btn i {
- font-size: 13px;
-}
-
/* Apps Section */
/* Use the existing log dropdown styles for app section. No custom CSS needed for the dropdown itself. */
@@ -1841,16 +1640,16 @@ label .info-icon ~ text {
gap: 8px;
padding: 10px;
}
-
+
.stat-card {
padding: 8px;
}
-
+
.stat-label {
font-size: 10px;
margin-bottom: 4px;
}
-
+
.stat-value {
font-size: 16px;
}
diff --git a/frontend/static/js/new-main.js b/frontend/static/js/new-main.js
index aad835ec..99981f48 100644
--- a/frontend/static/js/new-main.js
+++ b/frontend/static/js/new-main.js
@@ -27,32 +27,32 @@ let huntarrUI = {
},
originalSettings: {}, // Store the full original settings object
settingsChanged: false, // Legacy flag (auto-save enabled)
-
+
// Logo URL
logoUrl: './static/logo/256.png',
-
+
// Element references
elements: {},
-
+
// Initialize the application
init: function() {
console.log('[huntarrUI] Initializing UI...');
-
+
// Skip initialization on login page
const isLoginPage = document.querySelector('.login-container, #loginForm, .login-form');
if (isLoginPage) {
console.log('[huntarrUI] Login page detected, skipping full initialization');
return;
}
-
+
// Cache frequently used DOM elements
this.cacheElements();
-
+
// Register event handlers
this.setupEventListeners();
this.setupLogoHandling();
// Auto-save enabled - no unsaved changes handler needed
-
+
// Check if Low Usage Mode is enabled BEFORE loading stats to avoid race condition
this.checkLowUsageMode().then(() => {
// Initialize media stats after low usage mode is determined
@@ -65,7 +65,7 @@ let huntarrUI = {
this.loadMediaStats();
}
});
-
+
// Check if we need to navigate to a specific section after refresh
const targetSection = localStorage.getItem('huntarr-target-section');
if (targetSection) {
@@ -77,13 +77,13 @@ let huntarrUI = {
// Initial navigation based on hash
this.handleHashNavigation(window.location.hash);
}
-
+
// Remove initial sidebar hiding style
const initialSidebarStyle = document.getElementById('initial-sidebar-state');
if (initialSidebarStyle) {
initialSidebarStyle.remove();
}
-
+
// Check which sidebar should be shown based on current section
console.log(`[huntarrUI] Initialization - current section: ${this.currentSection}`);
if (this.currentSection === 'settings' || this.currentSection === 'scheduling' || this.currentSection === 'notifications' || this.currentSection === 'backup-restore' || this.currentSection === 'user') {
@@ -102,12 +102,12 @@ let huntarrUI = {
localStorage.removeItem('huntarr-apps-sidebar');
this.showMainSidebar();
}
-
+
// Auto-save enabled - no unsaved changes handler needed
-
+
// Load username
this.loadUsername();
-
+
// Apply any preloaded theme immediately to avoid flashing
const prefersDarkMode = localStorage.getItem('huntarr-dark-mode') === 'true';
if (prefersDarkMode) {
@@ -123,46 +123,43 @@ let huntarrUI = {
}
// Ensure logo is visible immediately
this.logoUrl = localStorage.getItem('huntarr-logo-url') || this.logoUrl;
-
+
// Load current version
this.loadCurrentVersion(); // Load current version
-
+
// Load latest version from GitHub
this.loadLatestVersion(); // Load latest version from GitHub
-
+
// Load latest beta version from GitHub
this.loadBetaVersion(); // Load latest beta version from GitHub
-
+
// Load GitHub star count
this.loadGitHubStarCount(); // Load GitHub star count
-
- // Preload stateful management info so it's ready when needed
- this.loadStatefulInfo();
-
+
// Ensure logo is applied
if (typeof window.applyLogoToAllElements === 'function') {
window.applyLogoToAllElements();
}
-
+
// Initialize instance event handlers
this.setupInstanceEventHandlers();
-
+
// Setup navigation for sidebars
this.setupRequestarrNavigation();
this.setupAppsNavigation();
this.setupSettingsNavigation();
-
+
// Auto-save enabled - no unsaved changes handler needed
-
+
// Setup Swaparr components
this.setupSwaparrResetCycle();
-
+
// Setup Swaparr status polling (refresh every 30 seconds)
this.setupSwaparrStatusPolling();
-
+
// Setup Prowlarr status polling (refresh every 30 seconds)
this.setupProwlarrStatusPolling();
-
+
// Make dashboard visible after initialization to prevent FOUC
setTimeout(() => {
this.showDashboard();
@@ -171,7 +168,7 @@ let huntarrUI = {
console.log('[huntarrUI] Initialization complete - refresh on section change enabled');
}, 50); // Reduced from implicit longer delay
},
-
+
// Cache DOM elements for better performance
cacheElements: function() {
// Navigation
@@ -181,7 +178,7 @@ let huntarrUI = {
this.elements.huntManagerNav = document.getElementById('huntManagerNav');
this.elements.settingsNav = document.getElementById('settingsNav');
this.elements.userNav = document.getElementById('userNav');
-
+
// Sections
this.elements.sections = document.querySelectorAll('.content-section');
this.elements.homeSection = document.getElementById('homeSection');
@@ -189,25 +186,25 @@ let huntarrUI = {
this.elements.huntManagerSection = document.getElementById('huntManagerSection');
this.elements.settingsSection = document.getElementById('settingsSection');
this.elements.schedulingSection = document.getElementById('schedulingSection');
-
+
// History dropdown elements
this.elements.historyOptions = document.querySelectorAll('.history-option'); // History dropdown options
this.elements.currentHistoryApp = document.getElementById('current-history-app'); // Current history app text
this.elements.historyDropdownBtn = document.querySelector('.history-dropdown-btn'); // History dropdown button
this.elements.historyDropdownContent = document.querySelector('.history-dropdown-content'); // History dropdown content
this.elements.historyPlaceholderText = document.getElementById('history-placeholder-text'); // Placeholder text for history
-
+
// Settings dropdown elements
this.elements.settingsOptions = document.querySelectorAll('.settings-option'); // New: settings dropdown options
this.elements.currentSettingsApp = document.getElementById('current-settings-app'); // New: current settings app text
this.elements.settingsDropdownBtn = document.querySelector('.settings-dropdown-btn'); // New: settings dropdown button
this.elements.settingsDropdownContent = document.querySelector('.settings-dropdown-content'); // New: dropdown content
-
+
this.elements.appSettingsPanels = document.querySelectorAll('.app-settings-panel');
-
+
// Settings
// Save button removed for auto-save
-
+
// Status elements
this.elements.sonarrHomeStatus = document.getElementById('sonarrHomeStatus');
this.elements.radarrHomeStatus = document.getElementById('radarrHomeStatus');
@@ -215,15 +212,15 @@ let huntarrUI = {
this.elements.readarrHomeStatus = document.getElementById('readarrHomeStatus'); // Added readarr
this.elements.whisparrHomeStatus = document.getElementById('whisparrHomeStatus'); // Added whisparr
this.elements.erosHomeStatus = document.getElementById('erosHomeStatus'); // Added eros
-
+
// Actions
this.elements.startHuntButton = document.getElementById('startHuntButton');
this.elements.stopHuntButton = document.getElementById('stopHuntButton');
-
+
// Logout
this.elements.logoutLink = document.getElementById('logoutLink'); // Added logout link
},
-
+
// Set up event listeners
setupEventListeners: function() {
// Navigation
@@ -234,7 +231,7 @@ let huntarrUI = {
e.preventDefault();
this.handleNavigation(e);
}
-
+
// Handle cycle reset button clicks
if (e.target.matches('.cycle-reset-button') || e.target.closest('.cycle-reset-button')) {
const button = e.target.matches('.cycle-reset-button') ? e.target : e.target.closest('.cycle-reset-button');
@@ -244,17 +241,17 @@ let huntarrUI = {
}
}
});
-
+
// History dropdown toggle
if (this.elements.historyDropdownBtn) {
this.elements.historyDropdownBtn.addEventListener('click', (e) => {
e.preventDefault();
e.stopPropagation(); // Prevent event bubbling
-
+
// Toggle this dropdown
this.elements.historyDropdownContent.classList.toggle('show');
});
-
+
// Close dropdown when clicking outside
document.addEventListener('click', (e) => {
if (!e.target.closest('.history-dropdown') && this.elements.historyDropdownContent.classList.contains('show')) {
@@ -262,22 +259,22 @@ let huntarrUI = {
}
});
}
-
+
// History options
this.elements.historyOptions.forEach(option => {
option.addEventListener('click', (e) => this.handleHistoryOptionChange(e));
});
-
+
// Settings dropdown toggle
if (this.elements.settingsDropdownBtn) {
this.elements.settingsDropdownBtn.addEventListener('click', (e) => {
e.preventDefault();
e.stopPropagation(); // Prevent event bubbling
-
+
// Toggle this dropdown
this.elements.settingsDropdownContent.classList.toggle('show');
});
-
+
// Close dropdown when clicking outside
document.addEventListener('click', (e) => {
if (!e.target.closest('.settings-dropdown') && this.elements.settingsDropdownContent.classList.contains('show')) {
@@ -285,72 +282,56 @@ let huntarrUI = {
}
});
}
-
+
// Settings options
this.elements.settingsOptions.forEach(option => {
option.addEventListener('click', (e) => this.handleSettingsOptionChange(e));
});
-
+
// Save settings button
// Save button removed for auto-save
-
+
// Test notification button (delegated event listener for dynamic content)
document.addEventListener('click', (e) => {
if (e.target.id === 'testNotificationBtn' || e.target.closest('#testNotificationBtn')) {
this.testNotification();
}
});
-
+
// Start hunt button
if (this.elements.startHuntButton) {
this.elements.startHuntButton.addEventListener('click', () => this.startHunt());
}
-
+
// Stop hunt button
if (this.elements.stopHuntButton) {
this.elements.stopHuntButton.addEventListener('click', () => this.stopHunt());
}
-
+
// Logout button
if (this.elements.logoutLink) {
this.elements.logoutLink.addEventListener('click', (e) => this.logout(e));
}
-
+
// Requestarr navigation
this.setupRequestarrNavigation();
-
+
// Dark mode toggle
const darkModeToggle = document.getElementById('darkModeToggle');
if (darkModeToggle) {
const prefersDarkMode = localStorage.getItem('huntarr-dark-mode') === 'true';
darkModeToggle.checked = prefersDarkMode;
-
+
darkModeToggle.addEventListener('change', function() {
const isDarkMode = this.checked;
document.body.classList.toggle('dark-theme', isDarkMode);
localStorage.setItem('huntarr-dark-mode', isDarkMode);
});
}
-
+
// Settings now use manual save - no auto-save setup
console.log('[huntarrUI] Settings using manual save - skipping auto-save setup');
-
- // Auto-save enabled - no need to warn about unsaved changes
-
- // Stateful management reset button
- const resetStatefulBtn = document.getElementById('reset_stateful_btn');
- if (resetStatefulBtn) {
- resetStatefulBtn.addEventListener('click', () => this.handleStatefulReset());
- }
-
- // Stateful management hours input
- const statefulHoursInput = document.getElementById('stateful_management_hours');
- if (statefulHoursInput) {
- statefulHoursInput.addEventListener('change', () => {
- this.updateStatefulExpirationOnUI();
- });
- }
-
+
// Handle window hash change
window.addEventListener('hashchange', () => this.handleHashNavigation(window.location.hash)); // Ensure hash is passed
@@ -361,8 +342,6 @@ let huntarrUI = {
console.log('[huntarrUI] Settings section using manual save - no auto-save listeners');
}
- // Auto-save enabled - no need for beforeunload warnings
-
// Initial setup based on hash or default to home
const initialHash = window.location.hash || '#home';
this.handleHashNavigation(initialHash);
@@ -376,7 +355,7 @@ let huntarrUI = {
});
}
},
-
+
// Setup logo handling to prevent flashing during navigation
setupLogoHandling: function() {
// Get the logo image
@@ -384,7 +363,7 @@ let huntarrUI = {
if (logoImg) {
// Cache the source
this.logoSrc = logoImg.src;
-
+
// Ensure it's fully loaded
if (!logoImg.complete) {
logoImg.onload = () => {
@@ -393,7 +372,7 @@ let huntarrUI = {
};
}
}
-
+
// Also add event listener to ensure logo is preserved during navigation
window.addEventListener('beforeunload', () => {
// Store logo src in session storage to persist across page loads
@@ -402,18 +381,18 @@ let huntarrUI = {
}
});
},
-
+
// Navigation handling
handleNavigation: function(e) {
const targetElement = e.currentTarget; // Get the clicked nav item
const href = targetElement.getAttribute('href');
const target = targetElement.getAttribute('target');
-
+
// Allow links with target="_blank" to open in a new window (return early)
if (target === '_blank') {
return; // Let the default click behavior happen
}
-
+
// For all other links, prevent default behavior and handle internally
e.preventDefault();
@@ -423,14 +402,14 @@ let huntarrUI = {
let isInternalLink = href.startsWith('#');
if (isInternalLink) {
- targetSection = href.substring(1) || 'home'; // Get section from hash, default to 'home' if only '#'
+ targetSection = href.substring(1) || 'home'; // Get section from hash, default to 'home' if only '#'
} else {
// Handle external links (like /user) or non-hash links if needed
// For now, assume non-hash links navigate away
}
// Auto-save enabled - no need to check for unsaved changes when navigating
-
+
// Add special handling for apps section - clear global app module flags
if (this.currentSection === 'apps' && targetSection !== 'apps') {
// Reset the app module flags when navigating away
@@ -454,15 +433,15 @@ let huntarrUI = {
window.location.href = href;
}
},
-
+
handleHashNavigation: function(hash) {
const section = hash.substring(1) || 'home';
this.switchSection(section);
},
-
+
switchSection: function(section) {
console.log(`[huntarrUI] *** SWITCH SECTION CALLED *** section: ${section}, current: ${this.currentSection}`);
-
+
// Check for unsaved changes before allowing navigation
if (this.isInitialized && this.currentSection && this.currentSection !== section) {
// Check for unsaved Swaparr changes if leaving Swaparr section
@@ -472,7 +451,7 @@ let huntarrUI = {
return; // User chose to stay and save changes
}
}
-
+
// Check for unsaved Settings changes if leaving Settings section
if (this.currentSection === 'settings' && window.SettingsForms && typeof window.SettingsForms.checkUnsavedChanges === 'function') {
if (!window.SettingsForms.checkUnsavedChanges()) {
@@ -480,7 +459,7 @@ let huntarrUI = {
return; // User chose to stay and save changes
}
}
-
+
// Check for unsaved Notifications changes if leaving Notifications section
if (this.currentSection === 'notifications' && window.SettingsForms && typeof window.SettingsForms.checkUnsavedChanges === 'function') {
if (!window.SettingsForms.checkUnsavedChanges()) {
@@ -488,7 +467,7 @@ let huntarrUI = {
return; // User chose to stay and save changes
}
}
-
+
// Check for unsaved App instance changes if leaving Apps section
const appSections = ['apps'];
if (appSections.includes(this.currentSection) && window.SettingsForms && typeof window.SettingsForms.checkUnsavedChanges === 'function') {
@@ -497,7 +476,7 @@ let huntarrUI = {
return; // User chose to stay and save changes
}
}
-
+
// Check for unsaved Prowlarr changes if leaving Prowlarr section
if (this.currentSection === 'prowlarr' && window.SettingsForms && typeof window.SettingsForms.checkUnsavedChanges === 'function') {
if (!window.SettingsForms.checkUnsavedChanges()) {
@@ -505,30 +484,30 @@ let huntarrUI = {
return; // User chose to stay and save changes
}
}
-
+
console.log(`[huntarrUI] User switching from ${this.currentSection} to ${section}, refreshing page...`);
// Store the target section in localStorage so we can navigate to it after refresh
localStorage.setItem('huntarr-target-section', section);
location.reload();
return;
}
-
+
// Update active section
this.elements.sections.forEach(s => {
s.classList.remove('active');
s.style.display = 'none';
});
-
+
// Additionally, make sure scheduling section is completely hidden
if (section !== 'scheduling' && this.elements.schedulingSection) {
this.elements.schedulingSection.style.display = 'none';
}
-
+
// Update navigation
this.elements.navItems.forEach(item => {
item.classList.remove('active');
});
-
+
// Show selected section
let newTitle = 'Home'; // Default title
const sponsorsSection = document.getElementById('sponsorsSection'); // Get sponsors section element
@@ -540,13 +519,13 @@ let huntarrUI = {
if (this.elements.homeNav) this.elements.homeNav.classList.add('active');
newTitle = 'Home';
this.currentSection = 'home';
-
+
// Show main sidebar when returning to home and clear settings sidebar preference
localStorage.removeItem('huntarr-settings-sidebar');
this.showMainSidebar();
-
+
// Disconnect logs if switching away from logs
- this.disconnectAllEventSources();
+ this.disconnectAllEventSources();
// Check app connections when returning to home page to update status
this.checkAppConnections();
// Load Swaparr status
@@ -559,27 +538,27 @@ let huntarrUI = {
if (this.elements.logsNav) this.elements.logsNav.classList.add('active');
newTitle = 'Logs';
this.currentSection = 'logs';
-
+
// Show main sidebar for main sections and clear settings sidebar preference
localStorage.removeItem('huntarr-settings-sidebar');
this.showMainSidebar();
-
+
// Comprehensive LogsModule debugging
console.log('[huntarrUI] === LOGS SECTION DEBUG START ===');
console.log('[huntarrUI] window object keys:', Object.keys(window).filter(k => k.includes('Log')));
console.log('[huntarrUI] window.LogsModule exists:', !!window.LogsModule);
console.log('[huntarrUI] window.LogsModule type:', typeof window.LogsModule);
-
+
if (window.LogsModule) {
console.log('[huntarrUI] LogsModule methods:', Object.keys(window.LogsModule));
console.log('[huntarrUI] LogsModule.init type:', typeof window.LogsModule.init);
console.log('[huntarrUI] LogsModule.connectToLogs type:', typeof window.LogsModule.connectToLogs);
-
+
try {
console.log('[huntarrUI] Calling LogsModule.init()...');
window.LogsModule.init();
console.log('[huntarrUI] LogsModule.init() completed successfully');
-
+
// LogsModule will handle its own connection - don't interfere with pagination
console.log('[huntarrUI] LogsModule initialized - letting it handle its own connections');
} catch (error) {
@@ -596,11 +575,11 @@ let huntarrUI = {
if (document.getElementById('huntManagerNav')) document.getElementById('huntManagerNav').classList.add('active');
newTitle = 'Hunt Manager';
this.currentSection = 'hunt-manager';
-
+
// Show main sidebar for main sections and clear settings sidebar preference
localStorage.removeItem('huntarr-settings-sidebar');
this.showMainSidebar();
-
+
// Load hunt manager data if the module exists
if (typeof huntManagerModule !== 'undefined') {
huntManagerModule.refresh();
@@ -611,13 +590,13 @@ let huntarrUI = {
if (document.getElementById('requestarrNav')) document.getElementById('requestarrNav').classList.add('active');
newTitle = 'Requestarr';
this.currentSection = 'requestarr';
-
+
// Switch to Requestarr sidebar
this.showRequestarrSidebar();
-
+
// Show home view by default
this.showRequestarrView('home');
-
+
// Initialize requestarr module if it exists
if (typeof window.requestarrModule !== 'undefined') {
window.requestarrModule.loadInstances();
@@ -627,10 +606,10 @@ let huntarrUI = {
document.getElementById('requestarr-section').style.display = 'block';
newTitle = 'Requestarr - History';
this.currentSection = 'requestarr-history';
-
+
// Switch to Requestarr sidebar
this.showRequestarrSidebar();
-
+
// Show history view
this.showRequestarrView('history');
} else if (section === 'apps') {
@@ -645,10 +624,10 @@ let huntarrUI = {
if (document.getElementById('appsSonarrNav')) document.getElementById('appsSonarrNav').classList.add('active');
newTitle = 'Sonarr';
this.currentSection = 'sonarr';
-
+
// Switch to Apps sidebar
this.showAppsSidebar();
-
+
// Initialize app module for sonarr
if (typeof appsModule !== 'undefined') {
appsModule.init('sonarr');
@@ -659,10 +638,10 @@ let huntarrUI = {
if (document.getElementById('appsRadarrNav')) document.getElementById('appsRadarrNav').classList.add('active');
newTitle = 'Radarr';
this.currentSection = 'radarr';
-
+
// Switch to Apps sidebar
this.showAppsSidebar();
-
+
// Initialize app module for radarr
if (typeof appsModule !== 'undefined') {
appsModule.init('radarr');
@@ -673,10 +652,10 @@ let huntarrUI = {
if (document.getElementById('appsLidarrNav')) document.getElementById('appsLidarrNav').classList.add('active');
newTitle = 'Lidarr';
this.currentSection = 'lidarr';
-
+
// Switch to Apps sidebar
this.showAppsSidebar();
-
+
// Initialize app module for lidarr
if (typeof appsModule !== 'undefined') {
appsModule.init('lidarr');
@@ -687,10 +666,10 @@ let huntarrUI = {
if (document.getElementById('appsReadarrNav')) document.getElementById('appsReadarrNav').classList.add('active');
newTitle = 'Readarr';
this.currentSection = 'readarr';
-
+
// Switch to Apps sidebar
this.showAppsSidebar();
-
+
// Initialize app module for readarr
if (typeof appsModule !== 'undefined') {
appsModule.init('readarr');
@@ -701,10 +680,10 @@ let huntarrUI = {
if (document.getElementById('appsWhisparrNav')) document.getElementById('appsWhisparrNav').classList.add('active');
newTitle = 'Whisparr V2';
this.currentSection = 'whisparr';
-
+
// Switch to Apps sidebar
this.showAppsSidebar();
-
+
// Initialize app module for whisparr
if (typeof appsModule !== 'undefined') {
appsModule.init('whisparr');
@@ -715,10 +694,10 @@ let huntarrUI = {
if (document.getElementById('appsErosNav')) document.getElementById('appsErosNav').classList.add('active');
newTitle = 'Whisparr V3';
this.currentSection = 'eros';
-
+
// Switch to Apps sidebar
this.showAppsSidebar();
-
+
// Initialize app module for eros
if (typeof appsModule !== 'undefined') {
appsModule.init('eros');
@@ -729,11 +708,11 @@ let huntarrUI = {
if (document.getElementById('swaparrNav')) document.getElementById('swaparrNav').classList.add('active');
newTitle = 'Swaparr';
this.currentSection = 'swaparr';
-
+
// Show main sidebar for main sections and clear settings sidebar preference
localStorage.removeItem('huntarr-settings-sidebar');
this.showMainSidebar();
-
+
// Initialize Swaparr section
this.initializeSwaparr();
} else if (section === 'settings' && document.getElementById('settingsSection')) {
@@ -743,15 +722,15 @@ let huntarrUI = {
if (document.getElementById('settingsNav')) document.getElementById('settingsNav').classList.add('active');
newTitle = 'Settings';
this.currentSection = 'settings';
-
+
// Switch to Settings sidebar
console.log('[huntarrUI] About to call showSettingsSidebar()');
this.showSettingsSidebar();
console.log('[huntarrUI] Called showSettingsSidebar()');
-
+
// Set localStorage to maintain Settings sidebar preference
localStorage.setItem('huntarr-settings-sidebar', 'true');
-
+
// Initialize settings if not already done
this.initializeSettings();
} else if (section === 'scheduling' && document.getElementById('schedulingSection')) {
@@ -760,10 +739,10 @@ let huntarrUI = {
if (document.getElementById('schedulingNav')) document.getElementById('schedulingNav').classList.add('active');
newTitle = 'Scheduling';
this.currentSection = 'scheduling';
-
+
// Switch to Settings sidebar for scheduling
this.showSettingsSidebar();
-
+
// Set localStorage to maintain Settings sidebar preference
localStorage.setItem('huntarr-settings-sidebar', 'true');
} else if (section === 'notifications' && document.getElementById('notificationsSection')) {
@@ -772,13 +751,13 @@ let huntarrUI = {
if (document.getElementById('settingsNotificationsNav')) document.getElementById('settingsNotificationsNav').classList.add('active');
newTitle = 'Notifications';
this.currentSection = 'notifications';
-
+
// Switch to Settings sidebar for notifications
this.showSettingsSidebar();
-
+
// Set localStorage to maintain Settings sidebar preference
localStorage.setItem('huntarr-settings-sidebar', 'true');
-
+
// Initialize notifications settings if not already done
this.initializeNotifications();
} else if (section === 'backup-restore' && document.getElementById('backupRestoreSection')) {
@@ -787,13 +766,13 @@ let huntarrUI = {
if (document.getElementById('settingsBackupRestoreNav')) document.getElementById('settingsBackupRestoreNav').classList.add('active');
newTitle = 'Backup / Restore';
this.currentSection = 'backup-restore';
-
+
// Switch to Settings sidebar for backup/restore
this.showSettingsSidebar();
-
+
// Set localStorage to maintain Settings sidebar preference
localStorage.setItem('huntarr-settings-sidebar', 'true');
-
+
// Initialize backup/restore functionality if not already done
this.initializeBackupRestore();
} else if (section === 'prowlarr' && document.getElementById('prowlarrSection')) {
@@ -802,10 +781,10 @@ let huntarrUI = {
if (document.getElementById('appsProwlarrNav')) document.getElementById('appsProwlarrNav').classList.add('active');
newTitle = 'Prowlarr';
this.currentSection = 'prowlarr';
-
+
// Switch to Apps sidebar for prowlarr
this.showAppsSidebar();
-
+
// Initialize prowlarr settings if not already done
this.initializeProwlarr();
} else if (section === 'user' && document.getElementById('userSection')) {
@@ -814,13 +793,13 @@ let huntarrUI = {
if (document.getElementById('userNav')) document.getElementById('userNav').classList.add('active');
newTitle = 'User';
this.currentSection = 'user';
-
+
// Switch to Settings sidebar for user
this.showSettingsSidebar();
-
+
// Set localStorage to maintain Settings sidebar preference
localStorage.setItem('huntarr-settings-sidebar', 'true');
-
+
// Initialize user module if not already done
this.initializeUser();
} else {
@@ -832,7 +811,7 @@ let huntarrUI = {
if (this.elements.homeNav) this.elements.homeNav.classList.add('active');
newTitle = 'Home';
this.currentSection = 'home';
-
+
// Show main sidebar and clear settings sidebar preference
localStorage.removeItem('huntarr-settings-sidebar');
this.showMainSidebar();
@@ -851,7 +830,7 @@ let huntarrUI = {
console.warn("[huntarrUI] currentPageTitle element not found during section switch.");
}
},
-
+
// Sidebar switching functions
showMainSidebar: function() {
document.getElementById('sidebar').style.display = 'flex';
@@ -859,28 +838,28 @@ let huntarrUI = {
document.getElementById('settings-sidebar').style.display = 'none';
document.getElementById('requestarr-sidebar').style.display = 'none';
},
-
+
showAppsSidebar: function() {
document.getElementById('sidebar').style.display = 'none';
document.getElementById('apps-sidebar').style.display = 'flex';
document.getElementById('settings-sidebar').style.display = 'none';
document.getElementById('requestarr-sidebar').style.display = 'none';
},
-
+
showSettingsSidebar: function() {
document.getElementById('sidebar').style.display = 'none';
document.getElementById('apps-sidebar').style.display = 'none';
document.getElementById('settings-sidebar').style.display = 'flex';
document.getElementById('requestarr-sidebar').style.display = 'none';
},
-
+
showRequestarrSidebar: function() {
document.getElementById('sidebar').style.display = 'none';
document.getElementById('apps-sidebar').style.display = 'none';
document.getElementById('settings-sidebar').style.display = 'none';
document.getElementById('requestarr-sidebar').style.display = 'flex';
},
-
+
// Simple event source disconnection for compatibility
disconnectAllEventSources: function() {
// Delegate to LogsModule if it exists
@@ -890,28 +869,28 @@ let huntarrUI = {
// Clear local references
this.eventSources = {};
},
-
+
// App tab switching
handleAppTabChange: function(e) {
const app = e.target.getAttribute('data-app');
if (!app) return;
-
+
// Update active tab
this.elements.appTabs.forEach(tab => {
tab.classList.remove('active');
});
e.target.classList.add('active');
-
+
// Let LogsModule handle app switching to preserve pagination
this.currentApp = app;
if (window.LogsModule && typeof window.LogsModule.handleAppChange === 'function') {
window.LogsModule.handleAppChange(app);
}
},
-
+
// Log option dropdown handling - Delegated to LogsModule
// (Removed to prevent conflicts with LogsModule.handleLogOptionChange)
-
+
// History option dropdown handling
handleHistoryOptionChange: function(app) {
if (app && app.target && typeof app.target.value === 'string') {
@@ -933,11 +912,11 @@ let huntarrUI = {
// Switch to the selected app history
this.currentHistoryApp = app;
},
-
+
// Update the history placeholder text based on the selected app
updateHistoryPlaceholder: function(app) {
if (!this.elements.historyPlaceholderText) return;
-
+
let message = "";
if (app === 'all') {
message = "The History feature will be available in a future update. Stay tuned for enhancements that will allow you to view your media processing history.";
@@ -945,99 +924,99 @@ let huntarrUI = {
let displayName = this.capitalizeFirst(app);
message = `The ${displayName} History feature is under development and will be available in a future update. You'll be able to track your ${displayName} media processing history here.`;
}
-
+
this.elements.historyPlaceholderText.textContent = message;
},
-
+
// Settings option handling
handleSettingsOptionChange: function(e) {
e.preventDefault(); // Prevent default anchor behavior
-
+
const app = e.target.getAttribute('data-app');
if (!app || app === this.currentSettingsApp) return; // Do nothing if same tab clicked
-
+
// Update active option
this.elements.settingsOptions.forEach(option => {
option.classList.remove('active');
});
e.target.classList.add('active');
-
+
// Update the current settings app text with proper capitalization
let displayName = app.charAt(0).toUpperCase() + app.slice(1);
this.elements.currentSettingsApp.textContent = displayName;
-
+
// Close the dropdown
this.elements.settingsDropdownContent.classList.remove('show');
-
+
// Hide all settings panels
this.elements.appSettingsPanels.forEach(panel => {
panel.classList.remove('active');
panel.style.display = 'none';
});
-
+
// Show the selected app's settings panel
const selectedPanel = document.getElementById(app + 'Settings');
if (selectedPanel) {
selectedPanel.classList.add('active');
selectedPanel.style.display = 'block';
}
-
+
this.currentSettingsTab = app;
console.log(`[huntarrUI] Switched settings tab to: ${this.currentSettingsTab}`); // Added logging
},
-
+
// Compatibility methods that delegate to LogsModule
connectToLogs: function() {
if (window.LogsModule && typeof window.LogsModule.connectToLogs === 'function') {
window.LogsModule.connectToLogs();
}
},
-
+
clearLogs: function() {
if (window.LogsModule && typeof window.LogsModule.clearLogs === 'function') {
window.LogsModule.clearLogs();
}
},
-
+
// Insert log entry in chronological order to maintain proper reverse time sorting
insertLogInChronologicalOrder: function(newLogEntry) {
if (!this.elements.logsContainer || !newLogEntry) return;
-
+
// Parse timestamp from the new log entry
const newTimestamp = this.parseLogTimestamp(newLogEntry);
-
+
// If we can't parse the timestamp, just append to the end
if (!newTimestamp) {
this.elements.logsContainer.appendChild(newLogEntry);
return;
}
-
+
// Get all existing log entries
const existingEntries = Array.from(this.elements.logsContainer.children);
-
+
// If no existing entries, just add the new one
if (existingEntries.length === 0) {
this.elements.logsContainer.appendChild(newLogEntry);
return;
}
-
+
// Find the correct position to insert (maintaining chronological order)
// Since CSS will reverse the order, we want older entries first in DOM
let insertPosition = null;
-
+
for (let i = 0; i < existingEntries.length; i++) {
const existingTimestamp = this.parseLogTimestamp(existingEntries[i]);
-
+
// If we can't parse existing timestamp, skip it
if (!existingTimestamp) continue;
-
+
// If new log is newer than existing log, insert before it
if (newTimestamp > existingTimestamp) {
insertPosition = existingEntries[i];
break;
}
}
-
+
// Insert in the correct position
if (insertPosition) {
this.elements.logsContainer.insertBefore(newLogEntry, insertPosition);
@@ -1046,30 +1025,30 @@ let huntarrUI = {
this.elements.logsContainer.appendChild(newLogEntry);
}
},
-
+
// Parse timestamp from log entry DOM element
parseLogTimestamp: function(logEntry) {
if (!logEntry) return null;
-
+
try {
// Look for timestamp elements
const dateSpan = logEntry.querySelector('.log-timestamp .date');
const timeSpan = logEntry.querySelector('.log-timestamp .time');
-
+
if (!dateSpan || !timeSpan) return null;
-
+
const dateText = dateSpan.textContent.trim();
const timeText = timeSpan.textContent.trim();
-
+
// Skip invalid timestamps
if (!dateText || !timeText || dateText === '--' || timeText === '--:--:--') {
return null;
}
-
+
// Combine date and time into a proper timestamp
const timestampString = `${dateText} ${timeText}`;
const timestamp = new Date(timestampString);
-
+
// Return timestamp if valid, null otherwise
return isNaN(timestamp.getTime()) ? null : timestamp;
} catch (error) {
@@ -1077,49 +1056,49 @@ let huntarrUI = {
return null;
}
},
-
+
// Search logs functionality with performance optimization
searchLogs: function() {
if (!this.elements.logsContainer || !this.elements.logSearchInput) return;
-
+
const searchText = this.elements.logSearchInput.value.trim().toLowerCase();
-
+
// If empty search, reset everything
if (!searchText) {
this.clearLogSearch();
return;
}
-
+
// Show clear search button when searching
if (this.elements.clearSearchButton) {
this.elements.clearSearchButton.style.display = 'block';
}
-
+
// Filter log entries based on search text - with performance optimization
const logEntries = Array.from(this.elements.logsContainer.querySelectorAll('.log-entry'));
let matchCount = 0;
-
+
// Set a limit for highlighting to prevent browser lockup
const MAX_ENTRIES_TO_PROCESS = 300;
const processedLogEntries = logEntries.slice(0, MAX_ENTRIES_TO_PROCESS);
const remainingCount = Math.max(0, logEntries.length - MAX_ENTRIES_TO_PROCESS);
-
+
// Process in batches to prevent UI lockup
processedLogEntries.forEach((entry, index) => {
const entryText = entry.textContent.toLowerCase();
-
+
// Show/hide based on search match
if (entryText.includes(searchText)) {
entry.style.display = '';
matchCount++;
-
+
// Simple highlight by replacing HTML - much more performant
this.simpleHighlightMatch(entry, searchText);
} else {
entry.style.display = 'none';
}
});
-
+
// Handle any remaining entries - only for visibility, don't highlight
if (remainingCount > 0) {
logEntries.slice(MAX_ENTRIES_TO_PROCESS).forEach(entry => {
@@ -1132,14 +1111,14 @@ let huntarrUI = {
}
});
}
-
+
// Update search results info
if (this.elements.logSearchResults) {
let resultsText = `Found ${matchCount} matching log entries`;
this.elements.logSearchResults.textContent = resultsText;
this.elements.logSearchResults.style.display = 'block';
}
-
+
// Disable auto-scroll when searching
if (this.elements.autoScrollCheckbox && this.elements.autoScrollCheckbox.checked) {
// Save auto-scroll state to restore later if needed
@@ -1147,89 +1126,89 @@ let huntarrUI = {
this.elements.autoScrollCheckbox.checked = false;
}
},
-
+
// New simplified highlighting method that's much more performant
simpleHighlightMatch: function(logEntry, searchText) {
// Only proceed if the search text is meaningful
if (searchText.length < 2) return;
-
+
// Store original HTML if not already stored
if (!logEntry.hasAttribute('data-original-html')) {
logEntry.setAttribute('data-original-html', logEntry.innerHTML);
}
-
+
const html = logEntry.getAttribute('data-original-html');
const escapedSearchText = searchText.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // Escape regex special chars
-
+
// Simple case-insensitive replace with highlight span (using a more efficient regex approach)
const regex = new RegExp(`(${escapedSearchText})`, 'gi');
const newHtml = html.replace(regex, '$1');
-
+
logEntry.innerHTML = newHtml;
},
-
+
// Clear log search and reset to default view
clearLogSearch: function() {
if (!this.elements.logsContainer) return;
-
+
// Clear search input
if (this.elements.logSearchInput) {
this.elements.logSearchInput.value = '';
}
-
+
// Hide clear search button
if (this.elements.clearSearchButton) {
this.elements.clearSearchButton.style.display = 'none';
}
-
+
// Hide search results info
if (this.elements.logSearchResults) {
this.elements.logSearchResults.style.display = 'none';
}
-
+
// Show all log entries - use a more efficient approach
const allLogEntries = this.elements.logsContainer.querySelectorAll('.log-entry');
-
+
// Process in batches for better performance
Array.from(allLogEntries).forEach(entry => {
// Display all entries
entry.style.display = '';
-
+
// Restore original HTML if it exists
if (entry.hasAttribute('data-original-html')) {
entry.innerHTML = entry.getAttribute('data-original-html');
}
});
-
+
// Restore auto-scroll if it was enabled
if (this.autoScrollWasEnabled && this.elements.autoScrollCheckbox) {
this.elements.autoScrollCheckbox.checked = true;
this.autoScrollWasEnabled = false;
}
},
-
+
// Settings handling
loadAllSettings: function() {
// Disable save button until changes are made
this.updateSaveResetButtonState(false);
this.settingsChanged = false;
-
+
// Get all settings to populate forms
HuntarrUtils.fetchWithTimeout('./api/settings')
.then(response => response.json())
.then(data => {
console.log('Loaded settings:', data);
-
+
// Store original settings for comparison
this.originalSettings = data;
-
+
// Cache settings in localStorage for timezone access
try {
localStorage.setItem('huntarr-settings-cache', JSON.stringify(data));
} catch (e) {
console.warn('[huntarrUI] Failed to cache settings in localStorage:', e);
}
-
+
// Populate each app's settings form
if (data.sonarr) this.populateSettingsForm('sonarr', data.sonarr);
if (data.radarr) this.populateSettingsForm('radarr', data.radarr);
@@ -1244,39 +1223,36 @@ let huntarrUI = {
}
if (data.prowlarr) this.populateSettingsForm('prowlarr', data.prowlarr);
if (data.general) this.populateSettingsForm('general', data.general);
-
+
// Update duration displays (like sleep durations)
- if (typeof SettingsForms !== 'undefined' &&
+ if (typeof SettingsForms !== 'undefined' &&
typeof SettingsForms.updateDurationDisplay === 'function') {
SettingsForms.updateDurationDisplay();
}
-
+
// Update Swaparr instance visibility based on global setting
- if (typeof SettingsForms !== 'undefined' &&
+ if (typeof SettingsForms !== 'undefined' &&
typeof SettingsForms.updateAllSwaparrInstanceVisibility === 'function') {
SettingsForms.updateAllSwaparrInstanceVisibility();
}
-
- // Load stateful info immediately, don't wait for loadAllSettings to complete
- this.loadStatefulInfo();
})
.catch(error => {
console.error('Error loading settings:', error);
this.showNotification('Error loading settings. Please try again.', 'error');
});
},
-
+
populateSettingsForm: function(app, appSettings) {
// Cache the form for this app
const form = document.getElementById(`${app}Settings`);
if (!form) return;
-
+
// Check if SettingsForms is loaded to generate the form
if (typeof SettingsForms !== 'undefined') {
const formFunction = SettingsForms[`generate${app.charAt(0).toUpperCase()}${app.slice(1)}Form`];
if (typeof formFunction === 'function') {
formFunction(form, appSettings); // This function already calls setupInstanceManagement internally
-
+
// Update duration displays for this app
if (typeof SettingsForms.updateDurationDisplay === 'function') {
try {
@@ -1285,7 +1261,7 @@ let huntarrUI = {
console.error(`[huntarrUI] Error updating duration display:`, e);
}
}
-
+
// Update Swaparr instance visibility based on global setting
if (typeof SettingsForms.updateAllSwaparrInstanceVisibility === 'function') {
try {
@@ -1302,7 +1278,7 @@ let huntarrUI = {
return;
}
},
-
+
// Called when any setting input changes in the active tab
markSettingsAsChanged() {
if (!this.settingsChanged) {
@@ -1315,12 +1291,12 @@ let huntarrUI = {
saveSettings: function() {
const app = this.currentSettingsTab;
console.log(`[huntarrUI] saveSettings called for app: ${app}`);
-
+
// Clear the unsaved changes flag BEFORE sending the request
// This prevents the "unsaved changes" dialog from appearing
this.settingsChanged = false;
this.updateSaveResetButtonState(false);
-
+
// Use getFormSettings for all apps, as it handles different structures
let settings = this.getFormSettings(app);
@@ -1331,13 +1307,13 @@ let huntarrUI = {
}
console.log(`[huntarrUI] Collected settings for ${app}:`, settings);
-
+
// Check if this is general settings and if the authentication mode has changed
- const isAuthModeChanged = app === 'general' &&
- this.originalSettings &&
- this.originalSettings.general &&
+ const isAuthModeChanged = app === 'general' &&
+ this.originalSettings &&
+ this.originalSettings.general &&
this.originalSettings.general.auth_mode !== settings.auth_mode;
-
+
// Log changes to authentication settings
console.log(`[huntarrUI] Authentication mode changed: ${isAuthModeChanged}`);
@@ -1345,7 +1321,7 @@ let huntarrUI = {
// Use the correct endpoint based on app type
const endpoint = app === 'general' ? './api/settings/general' : `./api/settings/${app}`;
-
+
HuntarrUtils.fetchWithTimeout(endpoint, {
method: 'POST',
headers: {
@@ -1367,7 +1343,7 @@ let huntarrUI = {
})
.then(savedConfig => {
console.log('[huntarrUI] Settings saved successfully. Full config received:', savedConfig);
-
+
// Only reload the page if Authentication Mode was changed
if (isAuthModeChanged) {
this.showNotification('Settings saved successfully. Reloading page to apply authentication changes...', 'success');
@@ -1376,13 +1352,13 @@ let huntarrUI = {
}, 1500);
return;
}
-
+
// Settings auto-save notification removed per user request
// Update original settings state with the full config returned from backend
if (typeof savedConfig === 'object' && savedConfig !== null) {
this.originalSettings = JSON.parse(JSON.stringify(savedConfig));
-
+
// Cache Swaparr settings globally if they were updated
if (app === 'swaparr') {
// Handle both nested (savedConfig.swaparr) and direct (savedConfig) formats
@@ -1392,7 +1368,7 @@ let huntarrUI = {
console.log('[huntarrUI] Updated Swaparr settings cache:', window.swaparrSettings);
}
}
-
+
// Check if low usage mode setting has changed and apply it immediately
if (app === 'general' && 'low_usage_mode' in settings) {
this.applyLowUsageMode(settings.low_usage_mode);
@@ -1405,46 +1381,20 @@ let huntarrUI = {
// Re-populate the form with the saved data
const currentAppSettings = this.originalSettings[app] || {};
-
+
// Preserve instances data if missing in the response but was in our sent data
if (app === 'sonarr' && !currentAppSettings.instances && settings.instances) {
currentAppSettings.instances = settings.instances;
}
-
+
this.populateSettingsForm(app, currentAppSettings);
// Update connection status and UI
this.checkAppConnection(app);
this.updateHomeConnectionStatus();
-
+
// If general settings were saved, refresh the stateful info display
if (app === 'general') {
- // Update the displayed interval hours if it's available in the settings
- if (settings.stateful_management_hours && document.getElementById('stateful_management_hours')) {
- const intervalInput = document.getElementById('stateful_management_hours');
- const intervalDaysSpan = document.getElementById('stateful_management_days');
- const expiresDateEl = document.getElementById('stateful_expires_date');
-
- // Update the input value
- intervalInput.value = settings.stateful_management_hours;
-
- // Update the days display
- if (intervalDaysSpan) {
- const days = (settings.stateful_management_hours / 24).toFixed(1);
- intervalDaysSpan.textContent = `${days} days`;
- }
-
- // Show updating indicator
- if (expiresDateEl) {
- expiresDateEl.textContent = 'Updating...';
- }
-
- // Also directly update the stateful expiration on the server and update UI
- this.updateStatefulExpirationOnUI();
- } else {
- this.loadStatefulInfo();
- }
-
// Dispatch a custom event that community-resources.js can listen for
window.dispatchEvent(new CustomEvent('settings-saved', {
detail: { appType: app, settings: settings }
@@ -1468,7 +1418,7 @@ let huntarrUI = {
// Setup auto-save for settings
setupSettingsAutoSave: function() {
console.log('[huntarrUI] Setting up immediate settings auto-save');
-
+
// Add event listeners to the settings container
const settingsContainer = document.getElementById('settingsSection');
if (settingsContainer) {
@@ -1478,7 +1428,7 @@ let huntarrUI = {
this.triggerSettingsAutoSave();
}
});
-
+
// Listen for change events (for checkboxes, selects, radio buttons)
settingsContainer.addEventListener('change', (event) => {
if (event.target.matches('input, select, textarea')) {
@@ -1496,11 +1446,11 @@ let huntarrUI = {
console.log('[huntarrUI] Update checking toggled, applying immediately');
this.applyUpdateCheckingChange(event.target.checked);
}
-
+
this.triggerSettingsAutoSave();
}
});
-
+
console.log('[huntarrUI] Settings auto-save listeners added');
}
},
@@ -1511,16 +1461,16 @@ let huntarrUI = {
console.log('[huntarrUI] Settings auto-save skipped - already saving');
return;
}
-
+
// Determine what type of settings we're saving
const app = this.currentSettingsTab;
const isGeneralSettings = this.currentSection === 'settings' && !app;
-
+
if (!app && !isGeneralSettings) {
console.log('[huntarrUI] No current settings tab for auto-save');
return;
}
-
+
if (isGeneralSettings) {
console.log('[huntarrUI] Triggering immediate general settings auto-save');
this.autoSaveGeneralSettings(true).catch(error => {
@@ -1538,13 +1488,13 @@ let huntarrUI = {
console.log(`[huntarrUI] Auto-save for ${app} skipped - already saving`);
return;
}
-
+
console.log(`[huntarrUI] Auto-saving settings for: ${app}`);
window._settingsCurrentlySaving = true;
-
+
// Use the existing saveSettings logic but make it silent
const originalShowNotification = this.showNotification;
-
+
// Temporarily override showNotification to suppress success messages
this.showNotification = (message, type) => {
if (type === 'error') {
@@ -1553,10 +1503,10 @@ let huntarrUI = {
}
// Suppress success notifications for auto-save
};
-
+
// Call the existing saveSettings function
this.saveSettings();
-
+
// Schedule restoration of showNotification after save completes
setTimeout(() => {
this.showNotification = originalShowNotification;
@@ -1567,30 +1517,30 @@ let huntarrUI = {
// Clean URL by removing special characters from the end
cleanUrlString: function(url) {
if (!url) return "";
-
+
// Trim whitespace first
let cleanUrl = url.trim();
-
+
// First remove any trailing slashes
cleanUrl = cleanUrl.replace(/[\/\\]+$/g, '');
-
+
// Then remove any other trailing special characters
// This regex will match any special character at the end that is not alphanumeric, hyphen, period, or underscore
return cleanUrl.replace(/[^a-zA-Z0-9\-\._]$/g, '');
},
-
+
// Get settings from the form, updated to handle instances consistently
getFormSettings: function(app) {
const settings = {};
let form = document.getElementById(`${app}Settings`);
-
+
// Special handling for Swaparr since it has its own section structure
if (app === 'swaparr') {
- form = document.getElementById('swaparrContainer') ||
+ form = document.getElementById('swaparrContainer') ||
document.querySelector('.swaparr-container') ||
document.querySelector('[data-app-type="swaparr"]');
}
-
+
if (!form) {
console.error(`[huntarrUI] Settings form for ${app} not found.`);
return null;
@@ -1600,19 +1550,19 @@ let huntarrUI = {
if (app === 'swaparr') {
console.log('[huntarrUI] Processing Swaparr settings');
console.log('[huntarrUI] Form:', form);
-
+
// Get all inputs and select elements in the Swaparr form
const swaparrInputs = form.querySelectorAll('input, select, textarea');
-
+
swaparrInputs.forEach(input => {
let key = input.id;
let value;
-
+
// Remove 'swaparr_' prefix to get clean key name
if (key.startsWith('swaparr_')) {
key = key.substring(8); // Remove 'swaparr_' prefix
}
-
+
if (input.type === 'checkbox') {
value = input.checked;
} else if (input.type === 'number') {
@@ -1620,15 +1570,15 @@ let huntarrUI = {
} else {
value = input.value.trim();
}
-
+
console.log(`[huntarrUI] Processing Swaparr input: ${key} = ${value}`);
-
+
// Handle field name mappings for settings that have different names
if (key === 'malicious_detection') {
key = 'malicious_file_detection';
console.log(`[huntarrUI] Mapped malicious_detection -> malicious_file_detection`);
}
-
+
// Only include non-tag-system fields
if (key && !key.includes('_tags') && !key.includes('_input')) {
// Only include non-tag-system fields
@@ -1641,14 +1591,14 @@ let huntarrUI = {
}
}
});
-
+
// Handle tag containers separately
const tagContainers = [
{ containerId: 'swaparr_malicious_extensions_tags', settingKey: 'malicious_extensions' },
{ containerId: 'swaparr_suspicious_patterns_tags', settingKey: 'suspicious_patterns' },
{ containerId: 'swaparr_quality_patterns_tags', settingKey: 'blocked_quality_patterns' }
];
-
+
tagContainers.forEach(({ containerId, settingKey }) => {
const container = document.getElementById(containerId);
if (container) {
@@ -1660,7 +1610,7 @@ let huntarrUI = {
settings[settingKey] = [];
}
});
-
+
console.log('[huntarrUI] Final Swaparr settings:', settings);
return settings;
}
@@ -1670,23 +1620,23 @@ let huntarrUI = {
console.log('[huntarrUI] Processing general settings');
console.log('[huntarrUI] Form:', form);
console.log('[huntarrUI] Form HTML (first 500 chars):', form.innerHTML.substring(0, 500));
-
+
// Debug: Check if apprise_urls exists anywhere
const globalAppriseElement = document.querySelector('#apprise_urls');
console.log('[huntarrUI] Global apprise_urls element:', globalAppriseElement);
-
+
// Get all inputs and select elements in the general form AND notifications container
const generalInputs = form.querySelectorAll('input, select, textarea');
const notificationsContainer = document.querySelector('#notificationsContainer');
const notificationInputs = notificationsContainer ? notificationsContainer.querySelectorAll('input, select, textarea') : [];
-
+
// Combine inputs from both containers
const allInputs = [...generalInputs, ...notificationInputs];
-
+
allInputs.forEach(input => {
let key = input.id;
let value;
-
+
if (input.type === 'checkbox') {
value = input.checked;
} else if (input.type === 'number') {
@@ -1694,35 +1644,35 @@ let huntarrUI = {
} else {
value = input.value.trim();
}
-
+
console.log(`[huntarrUI] Processing input: ${key} = ${value}`);
-
+
// Handle special cases
if (key === 'apprise_urls') {
console.log('[huntarrUI] Processing Apprise URLs');
console.log('[huntarrUI] Raw apprise_urls value:', input.value);
-
+
// Split by newline and filter empty lines
settings.apprise_urls = input.value.split('\n')
.map(url => url.trim())
.filter(url => url.length > 0);
-
+
console.log('[huntarrUI] Processed apprise_urls:', settings.apprise_urls);
} else if (key && !key.includes('_instance_')) {
// Only include non-instance fields
settings[key] = value;
}
});
-
+
console.log('[huntarrUI] Final general settings:', settings);
return settings;
}
-
+
// Handle apps that use instances (Sonarr, Radarr, etc.)
// Get all instance items in the form
const instanceItems = form.querySelectorAll('.instance-item');
settings.instances = [];
-
+
// Check if multi-instance UI elements exist (like Sonarr)
if (instanceItems.length > 0) {
console.log(`[huntarrUI] Found ${instanceItems.length} instance items for ${app}. Processing multi-instance mode.`);
@@ -1809,11 +1759,11 @@ let huntarrUI = {
// Get the field key (remove app prefix)
let key = input.id;
-
+
if (key.startsWith(`${app}_`)) {
key = key.substring(app.length + 1);
}
-
+
// Skip empty keys or keys that are just numbers (unlikely but possible)
if (!key || /^\d+$/.test(key)) return;
@@ -1835,27 +1785,27 @@ let huntarrUI = {
// Test notification functionality
testNotification: function() {
console.log('[huntarrUI] Testing notification...');
-
+
const statusElement = document.getElementById('testNotificationStatus');
const buttonElement = document.getElementById('testNotificationBtn');
-
+
if (!statusElement || !buttonElement) {
console.error('[huntarrUI] Test notification elements not found');
return;
}
-
+
// Disable button and show loading
buttonElement.disabled = true;
buttonElement.innerHTML = ' Auto-saving...';
statusElement.innerHTML = 'Auto-saving settings before testing...';
-
+
// Auto-save general settings before testing
this.autoSaveGeneralSettings()
.then(() => {
// Update button text to show we're now testing
buttonElement.innerHTML = ' Sending...';
statusElement.innerHTML = 'Sending test notification...';
-
+
// Now test with the saved settings
return HuntarrUtils.fetchWithTimeout('./api/test-notification', {
method: 'POST',
@@ -1867,7 +1817,7 @@ let huntarrUI = {
.then(response => response.json())
.then(data => {
console.log('[huntarrUI] Test notification response:', data);
-
+
if (data.success) {
statusElement.innerHTML = 'โ Test notification sent successfully!';
this.showNotification('Test notification sent! Check your notification service.', 'success');
@@ -1885,7 +1835,7 @@ let huntarrUI = {
// Re-enable button
buttonElement.disabled = false;
buttonElement.innerHTML = ' Test Notification';
-
+
// Clear status after 5 seconds
setTimeout(() => {
if (statusElement) {
@@ -1898,23 +1848,23 @@ let huntarrUI = {
// Auto-save general settings (used by test notification and auto-save)
autoSaveGeneralSettings: function(silent = false) {
console.log('[huntarrUI] Auto-saving general settings...');
-
+
return new Promise((resolve, reject) => {
// Find the general settings form using the correct selectors
const generalForm = document.querySelector('#generalSettings') ||
document.querySelector('.app-settings-panel[data-app-type="general"]') ||
document.querySelector('#settingsSection[data-app-type="general"]') ||
document.querySelector('#general');
-
+
if (!generalForm) {
console.error('[huntarrUI] Could not find general settings form for auto-save');
console.log('[huntarrUI] Available forms:', document.querySelectorAll('.app-settings-panel, #settingsSection, [id*="general"], [id*="General"]'));
reject(new Error('Could not find general settings form'));
return;
}
-
+
console.log('[huntarrUI] Found general form:', generalForm);
-
+
// Get settings from the form using the correct app parameter
let settings = {};
try {
@@ -1925,7 +1875,7 @@ let huntarrUI = {
reject(error);
return;
}
-
+
// Save the settings
HuntarrUtils.fetchWithTimeout('./api/settings/general', {
method: 'POST',
@@ -1954,21 +1904,21 @@ let huntarrUI = {
// Auto-save Swaparr settings
autoSaveSwaparrSettings: function(silent = false) {
console.log('[huntarrUI] Auto-saving Swaparr settings...');
-
+
return new Promise((resolve, reject) => {
// Find the Swaparr settings form
const swaparrForm = document.querySelector('#swaparrContainer') ||
document.querySelector('.swaparr-container') ||
document.querySelector('[data-app-type="swaparr"]');
-
+
if (!swaparrForm) {
console.error('[huntarrUI] Could not find Swaparr settings form for auto-save');
reject(new Error('Could not find Swaparr settings form'));
return;
}
-
+
console.log('[huntarrUI] Found Swaparr form:', swaparrForm);
-
+
// Get settings from the form using the correct app parameter
let settings = {};
try {
@@ -1979,7 +1929,7 @@ let huntarrUI = {
reject(error);
return;
}
-
+
// Save the settings
HuntarrUtils.fetchWithTimeout('./api/swaparr/settings', {
method: 'POST',
@@ -1992,13 +1942,13 @@ let huntarrUI = {
.then(data => {
if (data.success !== false) { // API returns all settings on success, not just success:true
console.log('[huntarrUI] Swaparr auto-save successful');
-
+
// Update Swaparr field visibility in all loaded app forms
if (window.SettingsForms && typeof window.SettingsForms.updateSwaparrFieldsDisabledState === 'function') {
console.log('[huntarrUI] Broadcasting Swaparr state change to all app forms...');
window.SettingsForms.updateSwaparrFieldsDisabledState();
}
-
+
resolve();
} else {
console.error('[huntarrUI] Swaparr auto-save failed:', data);
@@ -2011,48 +1961,48 @@ let huntarrUI = {
});
});
},
-
+
// Handle instance management events
setupInstanceEventHandlers: function() {
console.log("DEBUG: setupInstanceEventHandlers called"); // Added logging
const settingsPanels = document.querySelectorAll('.app-settings-panel');
-
+
settingsPanels.forEach(panel => {
console.log(`DEBUG: Adding listeners to panel '${panel.id}'`); // Added logging
panel.addEventListener('addInstance', (e) => {
console.log(`DEBUG: addInstance event listener fired for panel '${panel.id}'. Event detail:`, e.detail);
this.addAppInstance(e.detail.appName);
});
-
+
panel.addEventListener('removeInstance', (e) => {
this.removeAppInstance(e.detail.appName, e.detail.instanceId);
});
-
+
panel.addEventListener('testConnection', (e) => {
this.testInstanceConnection(e.detail.appName, e.detail.instanceId, e.detail.url, e.detail.apiKey);
});
});
},
-
+
// Add a new instance to the app
addAppInstance: function(appName) {
console.log(`DEBUG: addAppInstance called for app '${appName}'`);
const container = document.getElementById(`${appName}Settings`);
if (!container) return;
-
+
// Get current settings
const currentSettings = this.getFormSettings(appName);
if (!currentSettings.instances) {
currentSettings.instances = [];
}
-
+
// Limit to 9 instances
if (currentSettings.instances.length >= 9) {
this.showNotification('Maximum of 9 instances allowed', 'error');
return;
}
-
+
// Add new instance with a default name
currentSettings.instances.push({
name: `Instance ${currentSettings.instances.length + 1}`,
@@ -2060,79 +2010,79 @@ let huntarrUI = {
api_key: '',
enabled: true
});
-
+
// Regenerate form with new instance
SettingsForms[`generate${appName.charAt(0).toUpperCase()}${appName.slice(1)}Form`](container, currentSettings);
-
+
// Update controls like duration displays
SettingsForms.updateDurationDisplay();
-
+
this.showNotification('New instance added', 'success');
},
-
+
// Remove an instance
removeAppInstance: function(appName, instanceId) {
const container = document.getElementById(`${appName}Settings`);
if (!container) return;
-
+
// Get current settings
const currentSettings = this.getFormSettings(appName);
-
+
// Remove the instance
if (currentSettings.instances && instanceId >= 0 && instanceId < currentSettings.instances.length) {
// Keep at least one instance
if (currentSettings.instances.length > 1) {
const removedName = currentSettings.instances[instanceId].name;
currentSettings.instances.splice(instanceId, 1);
-
+
// Regenerate form
SettingsForms[`generate${appName.charAt(0).toUpperCase()}${appName.slice(1)}Form`](container, currentSettings);
-
+
// Update controls like duration displays
SettingsForms.updateDurationDisplay();
-
+
this.showNotification(`Instance "${removedName}" removed`, 'info');
} else {
this.showNotification('Cannot remove the last instance', 'error');
}
}
},
-
+
// Test connection for a specific instance
testInstanceConnection: function(appName, instanceId, url, apiKey) {
console.log(`Testing connection for ${appName} instance ${instanceId} with URL: ${url}`);
-
+
// Make sure instanceId is treated as a number
instanceId = parseInt(instanceId, 10);
-
+
// Find the status span where we'll display the result
const statusSpan = document.getElementById(`${appName}_instance_${instanceId}_status`);
if (!statusSpan) {
console.error(`Status span not found for ${appName} instance ${instanceId}`);
return;
}
-
+
// Show testing status
statusSpan.textContent = 'Testing...';
statusSpan.className = 'connection-status testing';
-
+
// Validate URL and API key
if (!url || !apiKey) {
statusSpan.textContent = 'Missing URL or API key';
statusSpan.className = 'connection-status error';
return;
}
-
+
// Check if URL is properly formatted
if (!url.startsWith('http://') && !url.startsWith('https://')) {
statusSpan.textContent = 'URL must start with http:// or https://';
statusSpan.className = 'connection-status error';
return;
}
-
+
// Clean the URL (remove special characters from the end)
url = this.cleanUrlString(url);
-
+
// Make the API request to test the connection
HuntarrUtils.fetchWithTimeout(`./api/${appName}/test-connection`, {
method: 'POST',
@@ -2160,7 +2110,7 @@ let huntarrUI = {
if (data.success) {
statusSpan.textContent = data.message || 'Connected';
statusSpan.className = 'connection-status success';
-
+
// If a version was returned, display it
if (data.version) {
statusSpan.textContent += ` (v${data.version})`;
@@ -2172,7 +2122,7 @@ let huntarrUI = {
})
.catch(error => {
console.error(`Error testing connection for ${appName} instance ${instanceId}:`, error);
-
+
// Extract the most relevant part of the error message
let errorMessage = error.message || 'Unknown error';
if (errorMessage.includes('Name or service not known')) {
@@ -2188,12 +2138,12 @@ let huntarrUI = {
} else if (errorMessage.startsWith('HTTP error!')) {
errorMessage = 'Connection failed. Check URL and port.';
}
-
+
statusSpan.textContent = errorMessage;
statusSpan.className = 'connection-status error';
});
},
-
+
// Helper function to translate HTTP error codes to user-friendly messages
getConnectionErrorMessage: function(status) {
switch(status) {
@@ -2217,7 +2167,7 @@ let huntarrUI = {
return `Connection error. Check URL and port.`;
}
},
-
+
// App connections
checkAppConnections: function() {
this.checkAppConnection('sonarr');
@@ -2227,13 +2177,13 @@ let huntarrUI = {
this.checkAppConnection('whisparr'); // Added whisparr
this.checkAppConnection('eros'); // Enable actual Eros API check
},
-
+
checkAppConnection: function(app) {
HuntarrUtils.fetchWithTimeout(`./api/status/${app}`)
.then(response => response.json())
.then(data => {
// Pass the whole data object for all apps
- this.updateConnectionStatus(app, data);
+ this.updateConnectionStatus(app, data);
// Still update the configuredApps flag for potential other uses, but after updating status
this.configuredApps[app] = data.configured === true; // Ensure it's a boolean
@@ -2241,10 +2191,10 @@ let huntarrUI = {
.catch(error => {
console.error(`Error checking ${app} connection:`, error);
// Pass a default 'not configured' status object on error
- this.updateConnectionStatus(app, { configured: false, connected: false });
+ this.updateConnectionStatus(app, { configured: false, connected: false });
});
},
-
+
updateConnectionStatus: function(app, statusData) {
const statusElement = this.elements[`${app}HomeStatus`];
if (!statusElement) return;
@@ -2260,19 +2210,19 @@ let huntarrUI = {
// Special handling for *arr apps' multi-instance connected count
let connectedCount = statusData?.connected_count ?? 0;
let totalConfigured = statusData?.total_configured ?? 0;
-
+
// For all *arr apps, 'isConfigured' means at least one instance is configured
if (['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros', 'swaparr'].includes(app)) {
isConfigured = totalConfigured > 0;
// For *arr apps, 'isConnected' means at least one instance is connected
- isConnected = isConfigured && connectedCount > 0;
+ isConnected = isConfigured && connectedCount > 0;
}
- // --- Visibility Logic ---
+ // --- Visibility Logic ---
if (isConfigured) {
// Ensure the box is visible
if (this.elements[`${app}HomeStatus`].closest('.app-stats-card')) {
- this.elements[`${app}HomeStatus`].closest('.app-stats-card').style.display = '';
+ this.elements[`${app}HomeStatus`].closest('.app-stats-card').style.display = '';
}
} else {
// Not configured - HIDE the box
@@ -2313,19 +2263,19 @@ let huntarrUI = {
// Show/hide card based on whether Swaparr is enabled
if (data.enabled && data.configured) {
swaparrCard.style.display = 'block';
-
+
// Update persistent statistics with large number formatting (like other apps)
const persistentStats = data.persistent_statistics || {};
document.getElementById('swaparr-processed').textContent = this.formatLargeNumber(persistentStats.processed || 0);
document.getElementById('swaparr-strikes').textContent = this.formatLargeNumber(persistentStats.strikes || 0);
document.getElementById('swaparr-removals').textContent = this.formatLargeNumber(persistentStats.removals || 0);
document.getElementById('swaparr-ignored').textContent = this.formatLargeNumber(persistentStats.ignored || 0);
-
+
// Setup button event handlers after content is loaded
setTimeout(() => {
this.setupSwaparrResetCycle();
}, 100);
-
+
} else {
swaparrCard.style.display = 'none';
}
@@ -2359,22 +2309,22 @@ let huntarrUI = {
if (this.swaparrResetInProgress) {
return;
}
-
+
// Show confirmation
if (!confirm('Are you sure you want to reset all Swaparr data? This will clear all strike counts and removed items data.')) {
return;
}
-
+
this.swaparrResetInProgress = true;
-
+
// Immediately update the UI first to provide immediate feedback (like Live Hunts)
this.updateSwaparrStatsDisplay({
processed: 0,
- strikes: 0,
+ strikes: 0,
removals: 0,
ignored: 0
});
-
+
// Show success notification immediately
this.showNotification('Swaparr statistics reset successfully', 'success');
@@ -2419,7 +2369,7 @@ let huntarrUI = {
if (element && stats.hasOwnProperty(key)) {
const currentValue = this.parseFormattedNumber(element.textContent);
const targetValue = stats[key];
-
+
if (currentValue !== targetValue) {
// Animate the number change
this.animateNumber(element, currentValue, targetValue, 500);
@@ -2432,7 +2382,7 @@ let huntarrUI = {
setupSwaparrStatusPolling: function() {
// Load initial status
this.loadSwaparrStatus();
-
+
// Set up polling to refresh Swaparr status every 30 seconds
// Only poll when home section is active to reduce unnecessary requests
setInterval(() => {
@@ -2442,11 +2392,11 @@ let huntarrUI = {
}, 30000);
},
- // Setup Prowlarr status polling
+ // Setup Prowlarr status polling
setupProwlarrStatusPolling: function() {
// Load initial status
this.loadProwlarrStatus();
-
+
// Set up polling to refresh Prowlarr status every 30 seconds
// Only poll when home section is active to reduce unnecessary requests
setInterval(() => {
@@ -2468,7 +2418,7 @@ let huntarrUI = {
// Only show card if Prowlarr is configured and enabled
if (statusData.configured && statusData.enabled) {
prowlarrCard.style.display = 'block';
-
+
// Update connection status
const statusElement = document.getElementById('prowlarrConnectionStatus');
if (statusElement) {
@@ -2480,14 +2430,14 @@ let huntarrUI = {
statusElement.className = 'status-badge error';
}
}
-
+
// Load data if connected
if (statusData.connected) {
// Load indexers quickly first
this.loadProwlarrIndexers();
// Load statistics separately (cached)
this.loadProwlarrStats();
-
+
// Set up periodic refresh for statistics (every 5 minutes)
if (!this.prowlarrStatsInterval) {
this.prowlarrStatsInterval = setInterval(() => {
@@ -2498,14 +2448,14 @@ let huntarrUI = {
// Show disconnected state
this.updateIndexersList(null, 'Prowlarr is disconnected');
this.updateProwlarrStatistics(null, 'Prowlarr is disconnected');
-
+
// Clear interval if disconnected
if (this.prowlarrStatsInterval) {
clearInterval(this.prowlarrStatsInterval);
this.prowlarrStatsInterval = null;
}
}
-
+
} else {
// Hide card if not configured or disabled
prowlarrCard.style.display = 'none';
@@ -2554,21 +2504,21 @@ let huntarrUI = {
// Update stat numbers
const activeElement = document.getElementById('prowlarr-active-indexers');
if (activeElement) activeElement.textContent = stats.active_indexers;
-
+
const callsElement = document.getElementById('prowlarr-total-calls');
if (callsElement) callsElement.textContent = this.formatLargeNumber(stats.total_api_calls);
-
+
const throttledElement = document.getElementById('prowlarr-throttled');
if (throttledElement) throttledElement.textContent = stats.throttled_indexers;
-
+
const failedElement = document.getElementById('prowlarr-failed');
if (failedElement) failedElement.textContent = stats.failed_indexers;
-
+
// Update health status
const healthElement = document.getElementById('prowlarr-health-status');
if (healthElement) {
healthElement.textContent = stats.health_status || 'Unknown';
-
+
// Add color coding based on health
if (stats.health_status && stats.health_status.includes('throttled')) {
healthElement.style.color = '#f59e0b'; // amber
@@ -2622,57 +2572,57 @@ let huntarrUI = {
updateIndexersList: function(indexerDetails, errorMessage = null) {
const indexersList = document.getElementById('prowlarr-indexers-list');
if (!indexersList) return;
-
+
if (errorMessage) {
// Show error state
indexersList.innerHTML = `
${errorMessage}
`;
return;
}
-
+
if (!indexerDetails || (!indexerDetails.active && !indexerDetails.throttled && !indexerDetails.failed)) {
// No indexers found
indexersList.innerHTML = 'No indexers configured
';
return;
}
-
+
// Combine all indexers and sort alphabetically
let allIndexers = [];
-
+
// Add active indexers
if (indexerDetails.active) {
allIndexers = allIndexers.concat(
indexerDetails.active.map(idx => ({ ...idx, status: 'active' }))
);
}
-
+
// Add throttled indexers
if (indexerDetails.throttled) {
allIndexers = allIndexers.concat(
indexerDetails.throttled.map(idx => ({ ...idx, status: 'throttled' }))
);
}
-
+
// Add failed indexers
if (indexerDetails.failed) {
allIndexers = allIndexers.concat(
indexerDetails.failed.map(idx => ({ ...idx, status: 'failed' }))
);
}
-
+
// Sort alphabetically by name
allIndexers.sort((a, b) => a.name.localeCompare(b.name));
-
+
if (allIndexers.length === 0) {
indexersList.innerHTML = 'No indexers found
';
return;
}
-
+
// Build the HTML for indexers list with hover interactions
const indexersHtml = allIndexers.map(indexer => {
const statusText = indexer.status === 'active' ? 'Active' :
indexer.status === 'throttled' ? 'Throttled' :
'Failed';
-
+
return `
${indexer.name}
@@ -2680,20 +2630,20 @@ let huntarrUI = {
`;
}).join('');
-
+
indexersList.innerHTML = indexersHtml;
-
+
// Add hover event listeners to indexer names
const indexerItems = indexersList.querySelectorAll('.indexer-item');
indexerItems.forEach(item => {
const indexerName = item.dataset.indexerName;
const nameElement = item.querySelector('.indexer-name');
-
+
nameElement.addEventListener('mouseenter', () => {
this.showIndexerStats(indexerName);
nameElement.classList.add('hovered');
});
-
+
nameElement.addEventListener('mouseleave', () => {
this.showOverallStats();
nameElement.classList.remove('hovered');
@@ -2705,23 +2655,23 @@ let huntarrUI = {
updateProwlarrStatistics: function(stats, errorMessage = null) {
const statisticsContent = document.getElementById('prowlarr-statistics-content');
if (!statisticsContent) return;
-
+
if (errorMessage) {
statisticsContent.innerHTML = `${errorMessage}
`;
return;
}
-
+
if (!stats) {
statisticsContent.innerHTML = 'No statistics available
';
return;
}
-
+
// Debug: Log the stats data
console.log('Statistics data:', stats);
-
+
// Build statistics cards HTML
let statisticsCards = [];
-
+
// Search activity
if (stats.searches_today !== undefined) {
const todayClass = stats.searches_today > 0 ? 'success' : '';
@@ -2732,13 +2682,13 @@ let huntarrUI = {
`);
}
-
+
// Success rate (always show, even if 0 or undefined)
let successRate = 0;
if (stats.recent_success_rate !== undefined && stats.recent_success_rate !== null) {
successRate = stats.recent_success_rate;
}
- const successClass = successRate >= 80 ? 'success' :
+ const successClass = successRate >= 80 ? 'success' :
successRate >= 60 ? 'warning' : 'error';
statisticsCards.push(`
@@ -2747,13 +2697,13 @@ let huntarrUI = {
`);
console.log('Added success rate card:', successRate, successClass);
-
+
// Average response time
if (stats.avg_response_time !== undefined) {
- const responseClass = stats.avg_response_time <= 1000 ? 'success' :
+ const responseClass = stats.avg_response_time <= 1000 ? 'success' :
stats.avg_response_time <= 3000 ? 'warning' : 'error';
- const responseTime = stats.avg_response_time >= 1000 ?
- `${(stats.avg_response_time / 1000).toFixed(1)}s` :
+ const responseTime = stats.avg_response_time >= 1000 ?
+ `${(stats.avg_response_time / 1000).toFixed(1)}s` :
`${stats.avg_response_time}ms`;
statisticsCards.push(`
@@ -2762,7 +2712,7 @@ let huntarrUI = {
`);
}
-
+
// Total API calls
if (stats.total_api_calls !== undefined) {
statisticsCards.push(`
@@ -2772,7 +2722,7 @@ let huntarrUI = {
`);
}
-
+
// Failed searches (only show if > 0)
if (stats.recent_failed_searches !== undefined && stats.recent_failed_searches > 0) {
statisticsCards.push(`
@@ -2782,7 +2732,7 @@ let huntarrUI = {
`);
}
-
+
if (statisticsCards.length === 0) {
statisticsContent.innerHTML = 'No recent activity
';
} else {
@@ -2800,7 +2750,7 @@ let huntarrUI = {
if (statisticsHeader) {
statisticsHeader.innerHTML = ` ${indexerName} Stats`;
}
-
+
// Fetch and display indexer-specific stats
HuntarrUtils.fetchWithTimeout(`./api/prowlarr/indexer-stats/${encodeURIComponent(indexerName)}`)
.then(response => response.json())
@@ -2824,37 +2774,37 @@ let huntarrUI = {
if (statisticsHeader) {
statisticsHeader.innerHTML = ' Statistics';
}
-
+
// Show cached overall stats
this.loadProwlarrStats();
},
-
+
// User
loadUsername: function() {
const usernameElement = document.getElementById('username');
if (!usernameElement) return;
-
+
HuntarrUtils.fetchWithTimeout('./api/user/info')
.then(response => response.json())
.then(data => {
if (data.username) {
usernameElement.textContent = data.username;
}
-
+
// Check if local access bypass is enabled and update UI visibility
this.checkLocalAccessBypassStatus();
})
.catch(error => {
console.error('Error loading username:', error);
-
+
// Still check local access bypass status even if username loading failed
this.checkLocalAccessBypassStatus();
});
},
-
+
// Check if local access bypass is enabled and update UI accordingly
checkLocalAccessBypassStatus: function() {
console.log("Checking local access bypass status...");
@@ -2866,7 +2816,7 @@ let huntarrUI = {
// Attempt to read response body for more details, if available
response.text().then(text => console.error('Response body:', text));
// Throw an error to trigger the catch block with a clearer message
- throw new Error(`HTTP error ${response.status}`);
+ throw new Error(`HTTP error ${response.status}`);
}
return response.json(); // Only parse JSON if response is OK
})
@@ -2887,21 +2837,21 @@ let huntarrUI = {
this.updateUIForLocalAccessBypass(false);
});
},
-
+
// Update UI elements visibility based on local access bypass status
updateUIForLocalAccessBypass: function(isEnabled) {
console.log("Updating UI for local access bypass:", isEnabled);
-
+
// Get the user info container in topbar (username and logout button)
const userInfoContainer = document.getElementById('userInfoContainer');
-
+
// Get the user nav item in sidebar
const userNav = document.getElementById('userNav');
-
+
// Set display style explicitly based on local access bypass setting
if (isEnabled === true) {
console.log("Local access bypass is ENABLED - hiding user elements");
-
+
// Hide user info in topbar
if (userInfoContainer) {
userInfoContainer.style.display = 'none';
@@ -2909,7 +2859,7 @@ let huntarrUI = {
} else {
console.warn(" โ userInfoContainer not found");
}
-
+
// Always show user nav in sidebar regardless of authentication mode
if (userNav) {
userNav.style.display = '';
@@ -2920,7 +2870,7 @@ let huntarrUI = {
}
} else {
console.log("Local access bypass is DISABLED - showing user elements");
-
+
// Show user info in topbar
if (userInfoContainer) {
userInfoContainer.style.display = '';
@@ -2928,7 +2878,7 @@ let huntarrUI = {
} else {
console.warn(" โ userInfoContainer not found");
}
-
+
// Show user nav in sidebar
if (userNav) {
userNav.style.display = '';
@@ -2938,7 +2888,7 @@ let huntarrUI = {
}
}
},
-
+
logout: function(e) { // Added logout function
e.preventDefault(); // Prevent default link behavior
console.log('[huntarrUI] Logging out...');
@@ -2963,7 +2913,7 @@ let huntarrUI = {
this.showNotification('An error occurred during logout.', 'error');
});
},
-
+
// Media statistics handling
loadMediaStats: function() {
// Prevent multiple simultaneous stats loading
@@ -2971,9 +2921,9 @@ let huntarrUI = {
console.debug('Stats already loading, skipping duplicate request');
return;
}
-
+
this.isLoadingStats = true;
-
+
// Try to load cached stats first for immediate display
const cachedStats = localStorage.getItem('huntarr-stats-cache');
if (cachedStats) {
@@ -2989,13 +2939,13 @@ let huntarrUI = {
console.log('[huntarrUI] Failed to parse cached stats');
}
}
-
+
// Add loading class to stats container to hide raw JSON
const statsContainer = document.querySelector('.media-stats-container');
if (statsContainer) {
statsContainer.classList.add('stats-loading');
}
-
+
HuntarrUtils.fetchWithTimeout('./api/stats')
.then(response => {
if (!response.ok) {
@@ -3007,16 +2957,16 @@ let huntarrUI = {
if (data.success && data.stats) {
// Store raw stats data globally for tooltips to access
window.mediaStats = data.stats;
-
+
// Cache the fresh stats with timestamp
localStorage.setItem('huntarr-stats-cache', JSON.stringify({
stats: data.stats,
timestamp: Date.now()
}));
-
+
// Update display
this.updateStatsDisplay(data.stats);
-
+
// Remove loading class after stats are loaded
if (statsContainer) {
statsContainer.classList.remove('stats-loading');
@@ -3037,18 +2987,18 @@ let huntarrUI = {
this.isLoadingStats = false;
});
},
-
+
updateStatsDisplay: function(stats, isFromCache = false) {
// Update each app's statistics
const apps = ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros', 'swaparr'];
const statTypes = ['hunted', 'upgraded'];
-
+
// More robust low usage mode detection - check multiple sources
const isLowUsageMode = this.isLowUsageModeEnabled();
-
-
+
+
console.log(`[huntarrUI] updateStatsDisplay - Low usage mode: ${isLowUsageMode}, from cache: ${isFromCache}`);
-
+
apps.forEach(app => {
if (stats[app]) {
statTypes.forEach(type => {
@@ -3058,7 +3008,7 @@ let huntarrUI = {
const currentText = element.textContent || '0';
const currentValue = this.parseFormattedNumber(currentText);
const targetValue = Math.max(0, parseInt(stats[app][type]) || 0); // Ensure non-negative
-
+
// If low usage mode is enabled or loading from cache, skip animations and set values directly
if (isLowUsageMode || isFromCache) {
element.textContent = this.formatLargeNumber(targetValue);
@@ -3069,7 +3019,7 @@ let huntarrUI = {
if (element.animationFrame) {
cancelAnimationFrame(element.animationFrame);
}
-
+
// Animate the number change
this.animateNumber(element, currentValue, targetValue);
} else if (isNaN(currentValue) || currentValue < 0) {
@@ -3086,18 +3036,18 @@ let huntarrUI = {
// Helper function to parse formatted numbers back to integers
parseFormattedNumber: function(formattedStr) {
if (!formattedStr || typeof formattedStr !== 'string') return 0;
-
+
// Remove any formatting (K, M, commas, etc.)
const cleanStr = formattedStr.replace(/[^\d.-]/g, '');
const parsed = parseInt(cleanStr);
-
+
// Handle K and M suffixes
if (formattedStr.includes('K')) {
return Math.floor(parsed * 1000);
} else if (formattedStr.includes('M')) {
return Math.floor(parsed * 1000000);
}
-
+
return isNaN(parsed) ? 0 : Math.max(0, parsed);
},
@@ -3105,28 +3055,28 @@ let huntarrUI = {
// Ensure start and end are valid numbers
start = Math.max(0, parseInt(start) || 0);
end = Math.max(0, parseInt(end) || 0);
-
+
// If start equals end, just set the value
if (start === end) {
element.textContent = this.formatLargeNumber(end);
return;
}
-
+
const duration = 600; // Animation duration in milliseconds - reduced for faster loading feel
const startTime = performance.now();
-
+
const updateNumber = (currentTime) => {
const elapsedTime = currentTime - startTime;
const progress = Math.min(elapsedTime / duration, 1);
-
+
// Easing function for smooth animation
const easeOutQuad = progress * (2 - progress);
-
+
const currentValue = Math.max(0, Math.floor(start + (end - start) * easeOutQuad));
-
+
// Format number for display
element.textContent = this.formatLargeNumber(currentValue);
-
+
if (progress < 1) {
// Store the animation frame ID to allow cancellation
element.animationFrame = requestAnimationFrame(updateNumber);
@@ -3137,12 +3087,12 @@ let huntarrUI = {
element.animationFrame = null;
}
};
-
+
// Store the animation frame ID to allow cancellation
element.animationFrame = requestAnimationFrame(updateNumber);
},
-
- // Format large numbers with appropriate suffixes (K, M, B, T)
+
+ // Format large numbers with appropriate suffixes (K, M, B, T)
formatLargeNumber: function(num) {
if (num < 1000) {
// 0-999: Display as is
@@ -3184,7 +3134,7 @@ let huntarrUI = {
'whisparr': {'hunted': 0, 'upgraded': 0},
'eros': {'hunted': 0, 'upgraded': 0}
};
-
+
// Immediately update UI before even showing the confirmation
if (appType) {
// Only reset the specific app's stats
@@ -3195,14 +3145,14 @@ let huntarrUI = {
// Reset all stats
this.updateStatsDisplay(stats);
}
-
+
// Show a success notification
this.showNotification('Statistics reset successfully', 'success');
// Try to send the reset to the server, but don't depend on it
try {
const requestBody = appType ? { app_type: appType } : {};
-
+
HuntarrUtils.fetchWithTimeout('./api/stats/reset_public', {
method: 'POST',
headers: {
@@ -3227,17 +3177,17 @@ let huntarrUI = {
console.warn('Error in stats reset:', error);
}
},
-
+
// Utility functions
showNotification: function(message, type) {
// Create a notification element
const notification = document.createElement('div');
notification.className = `notification ${type}`;
notification.textContent = message;
-
+
// Add to the document
document.body.appendChild(notification);
-
+
// Ensure any existing notification is removed first to prevent stacking
const existingNotifications = document.querySelectorAll('.notification');
existingNotifications.forEach(n => {
@@ -3246,12 +3196,12 @@ let huntarrUI = {
setTimeout(() => n.remove(), 300);
}
});
-
+
// Fade in
setTimeout(() => {
notification.classList.add('show');
}, 10);
-
+
// Remove after a delay
setTimeout(() => {
notification.classList.remove('show');
@@ -3260,7 +3210,7 @@ let huntarrUI = {
}, 300);
}, 3000);
},
-
+
capitalizeFirst: function(string) {
return string.charAt(0).toUpperCase() + string.slice(1);
},
@@ -3280,7 +3230,7 @@ let huntarrUI = {
versionElement.textContent = version.trim();
versionElement.style.display = 'inline'; // Show the element
}
-
+
// Store in localStorage for topbar access
try {
const versionInfo = localStorage.getItem('huntarr-version-info') || '{}';
@@ -3321,7 +3271,7 @@ let huntarrUI = {
// Remove potential 'v' prefix for consistency if needed, or keep it
latestVersionElement.textContent = data.tag_name;
latestVersionElement.style.display = 'inline'; // Show the element
-
+
// Store in localStorage for topbar access
try {
const versionInfo = localStorage.getItem('huntarr-version-info') || '{}';
@@ -3345,7 +3295,7 @@ let huntarrUI = {
}
});
},
-
+
// Load latest beta version from GitHub tags
loadBetaVersion: function() {
HuntarrUtils.fetchWithTimeout('https://api.github.com/repos/plexguide/Huntarr.io/tags?per_page=100')
@@ -3362,11 +3312,11 @@ let huntarrUI = {
})
.then(data => {
const betaVersionElement = document.getElementById('beta-version-value');
-
+
if (betaVersionElement && data && Array.isArray(data) && data.length > 0) {
// Find the first tag that starts with B (case insensitive)
const betaTag = data.find(tag => tag.name.toUpperCase().startsWith('B'));
-
+
if (betaTag) {
betaVersionElement.textContent = betaTag.name;
// Store in localStorage for future reference
@@ -3398,7 +3348,7 @@ let huntarrUI = {
loadGitHubStarCount: function() {
const starsElement = document.getElementById('github-stars-value');
if (!starsElement) return;
-
+
// First, try to load from cache immediately for fast display
const cachedData = localStorage.getItem('huntarr-github-stars');
if (cachedData) {
@@ -3417,12 +3367,12 @@ let huntarrUI = {
localStorage.removeItem('huntarr-github-stars');
}
}
-
+
starsElement.textContent = 'Loading...';
-
+
// GitHub API endpoint for repository information
const apiUrl = 'https://api.github.com/repos/plexguide/huntarr';
-
+
HuntarrUtils.fetchWithTimeout(apiUrl)
.then(response => {
if (!response.ok) {
@@ -3435,7 +3385,7 @@ let huntarrUI = {
// Format the number with commas for thousands
const formattedStars = data.stargazers_count.toLocaleString();
starsElement.textContent = formattedStars;
-
+
// Store in localStorage to avoid excessive API requests
const cacheData = {
stars: data.stargazers_count,
@@ -3448,7 +3398,7 @@ let huntarrUI = {
})
.catch(error => {
console.error('Error fetching GitHub stars:', error);
-
+
// Try to load from cache if we have it
const cachedData = localStorage.getItem('huntarr-github-stars');
if (cachedData) {
@@ -3477,227 +3427,30 @@ let huntarrUI = {
// or use the stored configuredApps status if checkAppConnection updates it.
this.checkAppConnections(); // Re-check all connections after a save might be simplest
},
-
- // Load stateful management info
- loadStatefulInfo: function(attempts = 0, skipCache = false) {
- const initialStateEl = document.getElementById('stateful_initial_state');
- const expiresDateEl = document.getElementById('stateful_expires_date');
- const intervalInput = document.getElementById('stateful_management_hours');
- const intervalDaysSpan = document.getElementById('stateful_management_days');
-
- // Max retry attempts - increased for better reliability
- const maxAttempts = 5;
-
- console.log(`[StatefulInfo] Loading stateful info (attempt ${attempts + 1}, skipCache: ${skipCache})`);
-
- // Update UI to show loading state instead of N/A on first attempt
- if (attempts === 0) {
- if (initialStateEl && initialStateEl.textContent !== 'Loading...') initialStateEl.textContent = 'Loading...';
- if (expiresDateEl && expiresDateEl.textContent !== 'Updating...') expiresDateEl.textContent = 'Loading...';
- }
-
- // First check if we have cached data in localStorage that we can use immediately
- const cachedStatefulData = localStorage.getItem('huntarr-stateful-data');
- if (!skipCache && cachedStatefulData && attempts === 0) {
- try {
- const parsedData = JSON.parse(cachedStatefulData);
- const cacheAge = Date.now() - parsedData.timestamp;
-
- // Use cache if it's less than 5 minutes old while waiting for fresh data
- if (cacheAge < 300000) {
- console.log('[StatefulInfo] Using cached data while fetching fresh data');
-
- // Display cached data
- if (initialStateEl && parsedData.created_at_ts) {
- const createdDate = new Date(parsedData.created_at_ts * 1000);
- initialStateEl.textContent = this.formatDateNicely(createdDate);
- }
-
- if (expiresDateEl && parsedData.expires_at_ts) {
- const expiresDate = new Date(parsedData.expires_at_ts * 1000);
- expiresDateEl.textContent = this.formatDateNicely(expiresDate);
- }
-
- // Update interval input and days display
- if (intervalInput && parsedData.interval_hours) {
- intervalInput.value = parsedData.interval_hours;
- if (intervalDaysSpan) {
- const days = (parsedData.interval_hours / 24).toFixed(1);
- intervalDaysSpan.textContent = `${days} days`;
- }
- }
- }
- } catch (e) {
- console.warn('[StatefulInfo] Error parsing cached data:', e);
- }
- }
-
- // Always fetch fresh data from the server
- HuntarrUtils.fetchWithTimeout('./api/stateful/info', {
- cache: 'no-cache',
- headers: {
- 'Cache-Control': 'no-cache, no-store, must-revalidate',
- 'Pragma': 'no-cache',
- 'Expires': '0'
- }
- })
- .then(response => {
- if (!response.ok) {
- throw new Error(`HTTP error! Status: ${response.status} ${response.statusText}`);
- }
- return response.json();
- })
- .then(data => {
- if (data.success) {
- // Cache the response with a timestamp for future use
- localStorage.setItem('huntarr-stateful-data', JSON.stringify({
- ...data,
- timestamp: Date.now()
- }));
-
- // Handle initial state date
- if (initialStateEl) {
- if (data.created_at_ts) {
- const createdDate = new Date(data.created_at_ts * 1000);
- initialStateEl.textContent = this.formatDateNicely(createdDate);
- } else {
- initialStateEl.textContent = 'Not yet created';
-
- // If this is the first state load attempt and no timestamp exists,
- // it might be because the state file hasn't been created yet
- if (attempts < maxAttempts) {
- console.log(`[StatefulInfo] No initial state timestamp, will retry (${attempts + 1}/${maxAttempts})`);
- setTimeout(() => {
- this.loadStatefulInfo(attempts + 1);
- }, 500); // Longer delay for better chance of success
- return;
- }
- }
- }
-
- // Handle expiration date
- if (expiresDateEl) {
- if (data.expires_at_ts) {
- const expiresDate = new Date(data.expires_at_ts * 1000);
- expiresDateEl.textContent = this.formatDateNicely(expiresDate);
- } else {
- expiresDateEl.textContent = 'Not set';
- }
- }
-
- // Update interval input and days display
- if (intervalInput && data.interval_hours) {
- intervalInput.value = data.interval_hours;
- if (intervalDaysSpan) {
- const days = (data.interval_hours / 24).toFixed(1);
- intervalDaysSpan.textContent = `${days} days`;
- }
- }
-
- // Hide error notification if it was visible
- const notification = document.getElementById('stateful-notification');
- if (notification) {
- notification.style.display = 'none';
- }
-
- // Store the data for future reference
- this._cachedStatefulData = data;
-
- console.log('[StatefulInfo] Successfully loaded and displayed stateful data');
- } else {
- throw new Error(data.message || 'Failed to load stateful info');
- }
- })
- .catch(error => {
- console.error(`Error loading stateful info (attempt ${attempts + 1}/${maxAttempts + 1}):`, error);
-
- // Retry if we haven't reached max attempts with exponential backoff
- if (attempts < maxAttempts) {
- const delay = Math.min(2000, 300 * Math.pow(2, attempts)); // Exponential backoff with max 2000ms
- console.log(`[StatefulInfo] Retrying in ${delay}ms (attempt ${attempts + 1}/${maxAttempts})`);
- setTimeout(() => {
- // Double-check if still on the same page before retrying
- if (document.getElementById('stateful_management_hours')) {
- this.loadStatefulInfo(attempts + 1);
- } else {
- console.log(`[StatefulInfo] Stateful info retry cancelled; user navigated away.`);
- }
- }, delay);
- return;
- }
-
- // Use cached data as fallback if available
- const cachedStatefulData = localStorage.getItem('huntarr-stateful-data');
- if (cachedStatefulData) {
- try {
- console.log('[StatefulInfo] Using cached data as fallback after failed fetch');
- const parsedData = JSON.parse(cachedStatefulData);
-
- if (initialStateEl && parsedData.created_at_ts) {
- const createdDate = new Date(parsedData.created_at_ts * 1000);
- initialStateEl.textContent = this.formatDateNicely(createdDate) + ' (cached)';
- } else if (initialStateEl) {
- initialStateEl.textContent = 'Not available';
- }
-
- if (expiresDateEl && parsedData.expires_at_ts) {
- const expiresDate = new Date(parsedData.expires_at_ts * 1000);
- expiresDateEl.textContent = this.formatDateNicely(expiresDate) + ' (cached)';
- } else if (expiresDateEl) {
- expiresDateEl.textContent = 'Not available';
- }
-
- // Update interval input and days display from cache
- if (intervalInput && parsedData.interval_hours) {
- intervalInput.value = parsedData.interval_hours;
- if (intervalDaysSpan) {
- const days = (parsedData.interval_hours / 24).toFixed(1);
- intervalDaysSpan.textContent = `${days} days`;
- }
- }
-
- return;
- } catch (e) {
- console.warn('[StatefulInfo] Error parsing cached data as fallback:', e);
- }
- }
-
- // Final fallback if no cached data
- if (initialStateEl) initialStateEl.textContent = 'Not available';
- if (expiresDateEl) expiresDateEl.textContent = 'Not available';
-
- // Show error notification
- const notification = document.getElementById('stateful-notification');
- if (notification) {
- notification.style.display = 'block';
- notification.textContent = 'Could not load stateful management info. This may affect media tracking.';
- }
- });
- },
-
+
// Format date nicely with time, day, and relative time indication
formatDateNicely: function(date) {
if (!(date instanceof Date) || isNaN(date)) {
console.warn('[formatDateNicely] Invalid date provided:', date);
return 'Invalid date';
}
-
+
// Get the user's configured timezone from settings or default to UTC
const userTimezone = this.getUserTimezone();
-
+
console.log(`[formatDateNicely] Formatting date ${date.toISOString()} for timezone: ${userTimezone}`);
-
- const options = {
+
+ const options = {
weekday: 'short',
- year: 'numeric',
- month: 'short',
+ year: 'numeric',
+ month: 'short',
day: 'numeric',
hour: '2-digit',
minute: '2-digit',
hour12: false, // Use 24-hour format (global world time)
timeZone: userTimezone
};
-
+
let formattedDate;
try {
formattedDate = date.toLocaleDateString(undefined, options);
@@ -3708,12 +3461,12 @@ let huntarrUI = {
const fallbackOptions = { ...options, timeZone: 'UTC' };
formattedDate = date.toLocaleDateString(undefined, fallbackOptions) + ' (UTC fallback)';
}
-
+
// Add relative time indicator (e.g., "in 6 days" or "7 days ago")
const now = new Date();
const diffTime = date.getTime() - now.getTime();
const diffDays = Math.ceil(diffTime / (1000 * 60 * 60 * 24));
-
+
let relativeTime = '';
if (diffDays > 0) {
relativeTime = ` (in ${diffDays} day${diffDays !== 1 ? 's' : ''})`;
@@ -3722,23 +3475,23 @@ let huntarrUI = {
} else {
relativeTime = ' (today)';
}
-
+
return `${formattedDate}${relativeTime}`;
},
-
+
// Helper function to get the user's configured timezone from settings
getUserTimezone: function() {
// Assume UTC as default if no timezone is set
const defaultTimezone = 'UTC';
-
+
// Try multiple sources for the timezone setting
let timezone = null;
-
+
// 1. Try to get from originalSettings.general
if (this.originalSettings && this.originalSettings.general && this.originalSettings.general.timezone) {
timezone = this.originalSettings.general.timezone;
}
-
+
// 2. Try to get from the timezone dropdown if it exists (for immediate updates)
if (!timezone) {
const timezoneSelect = document.getElementById('timezone');
@@ -3746,7 +3499,7 @@ let huntarrUI = {
timezone = timezoneSelect.value;
}
}
-
+
// 3. Try to get from localStorage cache
if (!timezone) {
const cachedSettings = localStorage.getItem('huntarr-settings-cache');
@@ -3761,197 +3514,36 @@ let huntarrUI = {
}
}
}
-
+
// 4. Fallback to default
if (!timezone) {
timezone = defaultTimezone;
}
-
+
console.log(`[getUserTimezone] Using timezone: ${timezone}`);
return timezone;
},
-
- // Reset stateful management - clear all processed IDs
- resetStatefulManagement: function() {
- console.log("Reset stateful management function called");
-
- // Show a loading indicator or disable the button
- const resetBtn = document.getElementById('reset_stateful_btn');
- if (resetBtn) {
- resetBtn.disabled = true;
- const originalText = resetBtn.innerHTML;
- resetBtn.innerHTML = ' Resetting...';
- console.log("Reset button found and disabled:", resetBtn);
- } else {
- console.error("Reset button not found in the DOM!");
- }
-
- // Add debug logging
- console.log("Sending reset request to /api/stateful/reset");
-
- HuntarrUtils.fetchWithTimeout('./api/stateful/reset', {
- method: 'POST',
- headers: {
- 'Accept': 'application/json',
- 'Content-Type': 'application/json',
- 'Cache-Control': 'no-cache, no-store, must-revalidate',
- 'Pragma': 'no-cache'
- },
- cache: 'no-cache' // Add cache control to prevent caching
- })
- .then(response => {
- console.log("Reset response received:", response.status, response.statusText);
- if (!response.ok) {
- throw new Error(`HTTP error! Status: ${response.status}`);
- }
- return response.json();
- })
- .then(data => {
- console.log("Reset response data:", data);
-
- if (data.success) {
- this.showNotification('Stateful management reset successfully', 'success');
- // Wait a moment before reloading the info to ensure it's refreshed
- setTimeout(() => {
- this.loadStatefulInfo(0); // Reload stateful info with fresh attempt
-
- // Re-enable the button
- if (resetBtn) {
- resetBtn.disabled = false;
- resetBtn.innerHTML = ' Reset';
- }
- }, 1000);
- } else {
- throw new Error(data.message || 'Unknown error resetting stateful management');
- }
- })
- .catch(error => {
- console.error("Error resetting stateful management:", error);
- this.showNotification(`Error resetting stateful management: ${error.message}`, 'error');
-
- // Re-enable the button
- if (resetBtn) {
- resetBtn.disabled = false;
- resetBtn.innerHTML = ' Reset';
- }
- });
- },
-
- // Update stateful management expiration based on hours input
- updateStatefulExpirationOnUI: function() {
- const hoursInput = document.getElementById('stateful_management_hours');
- if (!hoursInput) return;
-
- const hours = parseInt(hoursInput.value) || 168;
-
- // Show updating indicator
- const expiresDateEl = document.getElementById('stateful_expires_date');
- const initialStateEl = document.getElementById('stateful_initial_state');
-
- if (expiresDateEl) {
- expiresDateEl.textContent = 'Updating...';
- }
-
- const url = './api/stateful/update-expiration';
- const cleanedUrl = this.cleanUrlString(url);
-
- HuntarrUtils.fetchWithTimeout(cleanedUrl, {
- method: 'POST',
- headers: {
- 'Accept': 'application/json',
- 'Content-Type': 'application/json'
- },
- body: JSON.stringify({ hours: hours }),
- cache: 'no-cache'
- })
- .then(response => {
- if (!response.ok) {
- throw new Error(`HTTP error! Status: ${response.status} ${response.statusText}`);
- }
- return response.json();
- })
- .then(data => {
- if (data.success) {
- console.log('[huntarrUI] Stateful expiration updated successfully:', data);
-
- // Get updated info to show proper dates
- this.loadStatefulInfo();
-
- // Show a notification
- this.showNotification(`Updated expiration to ${hours} hours (${(hours/24).toFixed(1)} days)`, 'success');
- } else {
- throw new Error(data.message || 'Unknown error updating expiration');
- }
- })
- .catch(error => {
- console.error('Error updating stateful expiration:', error);
- this.showNotification(`Failed to update expiration: ${error.message}`, 'error');
- // Reset the UI
- if (expiresDateEl) {
- expiresDateEl.textContent = 'Error updating';
- }
-
- // Try to reload original data
- setTimeout(() => this.loadStatefulInfo(), 1000);
- });
- },
- // Add the updateStatefulExpiration method
- updateStatefulExpiration: function(hours) {
- if (!hours || typeof hours !== 'number' || hours <= 0) {
- console.error('[huntarrUI] Invalid hours value for updateStatefulExpiration:', hours);
- return;
- }
-
- console.log(`[huntarrUI] Directly updating stateful expiration to ${hours} hours`);
-
- // Make a direct API call to update the stateful expiration
- HuntarrUtils.fetchWithTimeout('./api/stateful/update-expiration', {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json'
- },
- body: JSON.stringify({ hours: hours })
- })
- .then(response => {
- if (!response.ok) {
- throw new Error(`HTTP error! Status: ${response.status}`);
- }
- return response.json();
- })
- .then(data => {
- console.log('[huntarrUI] Stateful expiration updated successfully:', data);
- // Update the expiration date display
- const expiresDateEl = document.getElementById('stateful_expires_date');
- if (expiresDateEl && data.expires_date) {
- expiresDateEl.textContent = data.expires_date;
- }
- })
- .catch(error => {
- console.error('[huntarrUI] Error updating stateful expiration:', error);
- });
- },
-
// Add global event handler and method to track saved settings across all apps
// Auto-save enabled - unsaved changes handlers removed
-
+
// Add a proper hasFormChanges function to compare form values with original values
hasFormChanges: function(app) {
// If we don't have original settings or current app settings, we can't compare
if (!this.originalSettings || !this.originalSettings[app]) {
return false;
}
-
+
// Get current settings from the form
const currentSettings = this.getFormSettings(app);
-
+
// For complex objects like instances, we need to stringify them for comparison
const originalJSON = JSON.stringify(this.originalSettings[app]);
const currentJSON = JSON.stringify(currentSettings);
-
+
return originalJSON !== currentJSON;
},
-
+
// Check if Low Usage Mode is enabled in settings and apply it
checkLowUsageMode: function() {
return HuntarrUtils.fetchWithTimeout('./api/settings/general', {
@@ -3973,24 +3565,24 @@ let huntarrUI = {
throw error;
});
},
-
+
// Apply Low Usage Mode effects based on setting
applyLowUsageMode: function(enabled) {
console.log(`[huntarrUI] Setting Low Usage Mode: ${enabled ? 'Enabled' : 'Disabled'}`);
-
+
// Store the previous state to detect changes
const wasEnabled = document.body.classList.contains('low-usage-mode');
-
+
if (enabled) {
// Add CSS class to body to disable animations
document.body.classList.add('low-usage-mode');
-
+
// Low Usage Mode now runs without any visual indicator for a cleaner interface
} else {
// Remove CSS class from body to enable animations
document.body.classList.remove('low-usage-mode');
}
-
+
// If low usage mode state changed and we have stats data, update the display
if (wasEnabled !== enabled && window.mediaStats) {
console.log(`[huntarrUI] Low usage mode changed from ${wasEnabled} to ${enabled}, updating stats display`);
@@ -4001,7 +3593,7 @@ let huntarrUI = {
// Apply timezone change immediately
applyTimezoneChange: function(timezone) {
console.log(`[huntarrUI] Applying timezone change to: ${timezone}`);
-
+
// Call the backend to apply timezone immediately
fetch('./api/settings/apply-timezone', {
method: 'POST',
@@ -4015,7 +3607,7 @@ let huntarrUI = {
if (data.success) {
console.log('[huntarrUI] Timezone applied successfully');
// Settings auto-save notification removed per user request
-
+
// Refresh any time displays that might be affected
this.refreshTimeDisplays();
} else {
@@ -4032,17 +3624,17 @@ let huntarrUI = {
// Apply authentication mode change immediately
applyAuthModeChange: function(authMode) {
console.log(`[huntarrUI] Authentication mode changed to: ${authMode}`);
-
+
// Show notification about the change
const modeNames = {
'login': 'Login Mode',
- 'local_bypass': 'Local Bypass Mode',
+ 'local_bypass': 'Local Bypass Mode',
'no_login': 'No Login Mode'
};
-
+
const modeName = modeNames[authMode] || authMode;
// Settings auto-save notification removed per user request
-
+
// Settings auto-save warning notification removed per user request
},
@@ -4055,27 +3647,27 @@ let huntarrUI = {
// Refresh time displays after timezone change
refreshTimeDisplays: function() {
console.log('[huntarrUI] Refreshing all time displays for timezone change');
-
+
// 1. Refresh logs module timezone cache and reload logs
if (window.LogsModule) {
console.log('[huntarrUI] Refreshing LogsModule timezone');
window.LogsModule.userTimezone = null; // Clear cache
window.LogsModule.loadUserTimezone(); // Reload timezone
-
+
// Force reload current logs to show new timezone
if (window.LogsModule.currentLogApp) {
console.log('[huntarrUI] Reloading logs with new timezone');
window.LogsModule.loadLogsFromAPI(window.LogsModule.currentLogApp);
}
}
-
+
// 2. Refresh cycle countdown timers
if (window.CycleCountdown) {
console.log('[huntarrUI] Refreshing CycleCountdown timers');
// Force refresh cycle data to get updated timestamps
window.CycleCountdown.refreshAllData();
}
-
+
// 3. Refresh scheduler timezone display if on scheduling page
if (this.currentSection === 'scheduling' || this.currentSection === 'schedules') {
console.log('[huntarrUI] Refreshing scheduling timezone display');
@@ -4083,7 +3675,7 @@ let huntarrUI = {
loadServerTimezone(); // Reload server timezone display
}
}
-
+
// 4. Refresh hunt manager if it's currently visible (timestamps in hunt entries)
if (this.currentSection === 'hunt-manager' && window.huntManagerModule) {
console.log('[huntarrUI] Refreshing hunt manager data');
@@ -4091,7 +3683,7 @@ let huntarrUI = {
window.huntManagerModule.refresh();
}
}
-
+
// 5. Update any cached timezone settings in huntarrUI
if (this.originalSettings && this.originalSettings.general) {
// Update cached timezone to match the new setting
@@ -4101,7 +3693,7 @@ let huntarrUI = {
console.log('[huntarrUI] Updated cached timezone setting to:', timezoneSelect.value);
}
}
-
+
// 6. Refresh any time elements with custom refresh methods
const timeElements = document.querySelectorAll('[data-time], .time-display, .timestamp');
timeElements.forEach(element => {
@@ -4110,10 +3702,10 @@ let huntarrUI = {
// Custom refresh logic could go here
}
});
-
+
console.log('[huntarrUI] Time display refresh completed');
},
-
+
// Reset the app cycle for a specific app
resetAppCycle: function(app, button) {
// Make sure we have the app and button elements
@@ -4121,14 +3713,14 @@ let huntarrUI = {
console.error('[huntarrUI] Missing app or button for resetAppCycle');
return;
}
-
+
// First, disable the button to prevent multiple clicks
button.disabled = true;
button.innerHTML = ' Resetting...';
-
+
// API endpoint
const endpoint = `./api/cycle/reset/${app}`;
-
+
HuntarrUtils.fetchWithTimeout(endpoint, {
method: 'POST'
})
@@ -4141,7 +3733,7 @@ let huntarrUI = {
.then(data => {
this.showNotification(`Successfully reset ${this.capitalizeFirst(app)} cycle`, 'success');
console.log(`[huntarrUI] Reset ${app} cycle response:`, data);
-
+
// Re-enable the button with original text
button.disabled = false;
button.innerHTML = ` Reset`;
@@ -4149,7 +3741,7 @@ let huntarrUI = {
.catch(error => {
console.error(`[huntarrUI] Error resetting ${app} cycle:`, error);
this.showNotification(`Error resetting ${this.capitalizeFirst(app)} cycle: ${error.message}`, 'error');
-
+
// Re-enable the button with original text
button.disabled = false;
button.innerHTML = ` Reset`;
@@ -4159,18 +3751,18 @@ let huntarrUI = {
// More robust low usage mode detection
isLowUsageModeEnabled: function() {
// Check multiple sources to determine if low usage mode is enabled
-
+
// 1. Check CSS class on body (primary method)
const hasLowUsageClass = document.body.classList.contains('low-usage-mode');
-
+
// 2. Check if the standalone low-usage-mode.js module is enabled
const standaloneModuleEnabled = window.LowUsageMode && window.LowUsageMode.isEnabled && window.LowUsageMode.isEnabled();
-
+
// 3. Final determination based on reliable sources (no indicator checking needed)
const isEnabled = hasLowUsageClass || standaloneModuleEnabled;
-
+
console.log(`[huntarrUI] Low usage mode detection - CSS class: ${hasLowUsageClass}, Module: ${standaloneModuleEnabled}, Final: ${isEnabled}`);
-
+
return isEnabled;
},
@@ -4188,17 +3780,17 @@ let huntarrUI = {
applyFilterToSingleEntry: function(logEntry, selectedLevel) {
// Apply the same filtering logic used in filterLogsByLevel to a single entry
const levelBadge = logEntry.querySelector('.log-level-badge, .log-level, .log-level-error, .log-level-warning, .log-level-info, .log-level-debug');
-
+
// Clear any existing filter attribute first
logEntry.removeAttribute('data-hidden-by-filter');
-
+
if (levelBadge) {
// Get the level from the badge text
let entryLevel = '';
-
+
// Get badge text and normalize it
const badgeText = levelBadge.textContent.toLowerCase().trim();
-
+
// Fixed mapping - match the actual badge text created in log entries
switch(badgeText) {
case 'information':
@@ -4234,7 +3826,7 @@ let huntarrUI = {
entryLevel = null;
}
}
-
+
// Show or hide based on filter match, using data attributes for pagination cooperation
if (entryLevel && entryLevel === selectedLevel) {
logEntry.style.display = '';
@@ -4251,13 +3843,13 @@ let huntarrUI = {
filterLogsByLevel: function(selectedLevel) {
if (!this.elements.logsContainer) return;
-
+
const allLogEntries = this.elements.logsContainer.querySelectorAll('.log-entry');
let visibleCount = 0;
let totalCount = allLogEntries.length;
-
+
console.log(`[huntarrUI] Filtering logs by level: ${selectedLevel}, total entries: ${totalCount}`);
-
+
// Debug: Log first few badge texts to see what we're working with
allLogEntries.forEach((entry, index) => {
if (index < 5) { // Log first 5 entries for debugging
@@ -4267,12 +3859,12 @@ let huntarrUI = {
}
}
});
-
+
// Clear any existing filter attributes first
allLogEntries.forEach(entry => {
entry.removeAttribute('data-hidden-by-filter');
});
-
+
allLogEntries.forEach(entry => {
if (selectedLevel === 'all') {
// Show all entries - remove any filter hiding
@@ -4281,14 +3873,14 @@ let huntarrUI = {
} else {
// Check if this entry matches the selected level
const levelBadge = entry.querySelector('.log-level-badge, .log-level, .log-level-error, .log-level-warning, .log-level-info, .log-level-debug');
-
+
if (levelBadge) {
// Get the level from the badge text
let entryLevel = '';
-
+
// Get badge text and normalize it
const badgeText = levelBadge.textContent.toLowerCase().trim();
-
+
// Fixed mapping - match the actual badge text created in log entries
switch(badgeText) {
case 'information':
@@ -4325,7 +3917,7 @@ let huntarrUI = {
entryLevel = null; // Set to null to indicate unmapped
}
}
-
+
// Show or hide based on filter match, using data attributes for pagination cooperation
if (entryLevel && entryLevel === selectedLevel) {
entry.style.display = '';
@@ -4341,9 +3933,9 @@ let huntarrUI = {
}
}
});
-
+
// Pagination controls remain visible at all times - removed hiding logic
-
+
// Auto-scroll to top to show newest entries (logs are in reverse order)
if (this.autoScroll && this.elements.autoScrollCheckbox && this.elements.autoScrollCheckbox.checked && visibleCount > 0) {
setTimeout(() => {
@@ -4353,16 +3945,16 @@ let huntarrUI = {
});
}, 100);
}
-
+
console.log(`[huntarrUI] Filtered logs by level '${selectedLevel}': showing ${visibleCount}/${totalCount} entries`);
},
-
+
// Helper method to detect JSON fragments that shouldn't be displayed as log entries
isJsonFragment: function(logString) {
if (!logString || typeof logString !== 'string') return false;
-
+
const trimmed = logString.trim();
-
+
// Check for common JSON fragment patterns
const jsonPatterns = [
/^"[^"]*":\s*"[^"]*",?$/, // "key": "value",
@@ -4391,51 +3983,51 @@ let huntarrUI = {
/^[a-zA-Z_]+\s*Setting:\s*.*$/i, // Setting fragments
/^[a-zA-Z_]+\s*Config:\s*.*$/i // Config fragments
];
-
+
return jsonPatterns.some(pattern => pattern.test(trimmed));
},
-
+
// Helper method to detect other invalid log lines
isInvalidLogLine: function(logString) {
if (!logString || typeof logString !== 'string') return true;
-
+
const trimmed = logString.trim();
-
+
// Skip empty lines or lines with only whitespace
if (trimmed.length === 0) return true;
-
+
// Skip lines that are clearly not log entries
if (trimmed.length < 10) return true; // Too short to be a meaningful log
-
+
// Skip lines that look like HTTP headers or other metadata
if (/^(HTTP\/|Content-|Connection:|Host:|User-Agent:)/i.test(trimmed)) return true;
-
+
// Skip partial words or fragments that don't form complete sentences
if (/^[a-zA-Z]{1,5}\s+(Mode|Setting|Config|Debug|Info|Error|Warning):/i.test(trimmed)) return true;
-
+
// Skip single words that are clearly fragments
if (/^[a-zA-Z]{1,8}$/i.test(trimmed)) return true;
-
+
// Skip lines that start with partial words and contain colons (config fragments)
if (/^[a-z]{1,8}\s*[A-Z]/i.test(trimmed) && trimmed.includes(':')) return true;
-
+
return false;
},
-
+
// Load instance-specific state management information
loadInstanceStateInfo: function(appType, instanceIndex) {
const supportedApps = ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros'];
if (!supportedApps.includes(appType)) return;
-
+
// Try multiple methods to get the correct instance name
let instanceName = null;
-
+
// Method 1: Try the name input field
const instanceNameElement = document.getElementById(`${appType}-name-${instanceIndex}`);
if (instanceNameElement && instanceNameElement.value && instanceNameElement.value.trim()) {
instanceName = instanceNameElement.value.trim();
}
-
+
// Method 2: Try to get from the instance header/title
if (!instanceName) {
const instanceHeader = document.querySelector(`#${appType}-instance-${instanceIndex} h3, #${appType}-instance-${instanceIndex} .instance-title`);
@@ -4448,17 +4040,17 @@ let huntarrUI = {
}
}
}
-
+
// Method 3: Fallback to Default for first instance, descriptive name for others
if (!instanceName) {
instanceName = instanceIndex === 0 ? 'Default' : `Instance ${instanceIndex + 1}`;
}
-
+
const hoursInput = document.getElementById(`${appType}-state-management-hours-${instanceIndex}`);
const customHours = parseInt(hoursInput?.value) || 168;
-
+
console.log(`[huntarrUI] Loading state info for ${appType}/${instanceName} (index ${instanceIndex})`);
-
+
// Load state information for this specific instance using per-instance API
HuntarrUtils.fetchWithTimeout(`./api/stateful/summary?app_type=${appType}&instance_name=${encodeURIComponent(instanceName)}`, {
method: 'GET'
@@ -4474,12 +4066,12 @@ let huntarrUI = {
this.updateInstanceStateDisplay(appType, instanceIndex, null, instanceName, customHours);
});
},
-
+
// Update the instance state management display
updateInstanceStateDisplay: function(appType, instanceIndex, summaryData, instanceName, customHours) {
const resetTimeElement = document.getElementById(`${appType}-state-reset-time-${instanceIndex}`);
const itemsCountElement = document.getElementById(`${appType}-state-items-count-${instanceIndex}`);
-
+
// Update reset time from server data ONLY - no fallback calculations
if (resetTimeElement) {
if (summaryData && summaryData.next_reset_time) {
@@ -4490,7 +4082,7 @@ let huntarrUI = {
resetTimeElement.textContent = 'Error loading time';
}
}
-
+
// Update processed items count
if (itemsCountElement) {
const count = summaryData ? (summaryData.processed_count || 0) : 0;
@@ -4501,11 +4093,11 @@ let huntarrUI = {
// Refresh state management timezone displays when timezone changes
refreshStateManagementTimezone: function() {
console.log('[huntarrUI] Refreshing state management timezone displays due to settings change');
-
+
try {
// Simply reload the displays - the backend will use the new timezone automatically
this.reloadStateManagementDisplays();
-
+
} catch (error) {
console.error('[huntarrUI] Error refreshing state management timezone:', error);
}
@@ -4514,30 +4106,30 @@ let huntarrUI = {
// Reload state management displays after timezone change
reloadStateManagementDisplays: function() {
console.log('[huntarrUI] Reloading state management displays after timezone change');
-
+
// Refresh all visible state management displays
const supportedApps = ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros'];
-
+
supportedApps.forEach(appType => {
// Find all instance containers for this app
const appPanel = document.getElementById(`${appType}-panel`);
if (appPanel && appPanel.style.display !== 'none') {
// Look for state reset time elements
const stateElements = appPanel.querySelectorAll(`[id*="${appType}-state-reset-time-"]`);
-
+
stateElements.forEach(element => {
// Extract instance index from element ID
const match = element.id.match(/(\w+)-state-reset-time-(\d+)/);
if (match) {
const instanceIndex = parseInt(match[2]);
-
+
// Get instance name from the form
const instanceNameElement = document.querySelector(`#${appType}-instance-name-${instanceIndex}`);
if (instanceNameElement) {
const instanceName = instanceNameElement.value || 'Default';
-
+
console.log(`[huntarrUI] Reloading state management for ${appType} instance ${instanceIndex} (${instanceName})`);
-
+
// Fetch fresh state management data
this.loadStateManagementForInstance(appType, instanceIndex, instanceName);
}
@@ -4545,14 +4137,14 @@ let huntarrUI = {
});
}
});
-
+
console.log('[huntarrUI] State management timezone refresh completed');
},
// Load state management data for a specific instance
loadStateManagementForInstance: function(appType, instanceIndex, instanceName) {
const url = `./api/stateful/summary?app_type=${encodeURIComponent(appType)}&instance_name=${encodeURIComponent(instanceName)}`;
-
+
HuntarrUtils.fetchWithTimeout(url, {
method: 'GET'
})
@@ -4560,7 +4152,7 @@ let huntarrUI = {
.then(data => {
if (data.success) {
console.log(`[huntarrUI] Received updated state management data for ${appType}/${instanceName}:`, data);
-
+
// Update the display with the new timezone-converted data
this.updateInstanceStateDisplay(appType, instanceIndex, data, instanceName, data.expiration_hours);
} else {
@@ -4577,11 +4169,11 @@ let huntarrUI = {
const mainSidebar = document.getElementById('sidebar');
const requestarrSidebar = document.getElementById('requestarr-sidebar');
const settingsSidebar = document.getElementById('settings-sidebar');
-
+
if (mainSidebar) mainSidebar.style.display = 'none';
if (settingsSidebar) settingsSidebar.style.display = 'none';
if (requestarrSidebar) requestarrSidebar.style.display = 'block';
-
+
// Update active states in Requestarr sidebar
this.updateRequestarrSidebarActive();
},
@@ -4590,35 +4182,35 @@ let huntarrUI = {
// Hide all Requestarr views
const homeView = document.getElementById('requestarr-home-view');
const historyView = document.getElementById('requestarr-history-view');
-
+
if (homeView) homeView.style.display = 'none';
if (historyView) historyView.style.display = 'none';
-
+
// Show selected view
if (view === 'home' && homeView) {
homeView.style.display = 'block';
} else if (view === 'history' && historyView) {
historyView.style.display = 'block';
}
-
+
// Update navigation states
this.updateRequestarrNavigation(view);
},
showMainSidebar: function() {
console.log('[huntarrUI] showMainSidebar called');
-
+
// Show main sidebar
const mainSidebar = document.getElementById('sidebar');
const requestarrSidebar = document.getElementById('requestarr-sidebar');
const settingsSidebar = document.getElementById('settings-sidebar');
-
+
console.log('[huntarrUI] Sidebar elements found:', {
main: !!mainSidebar,
requestarr: !!requestarrSidebar,
settings: !!settingsSidebar
});
-
+
if (mainSidebar) {
mainSidebar.style.display = 'block';
mainSidebar.style.setProperty('display', 'block', 'important');
@@ -4631,9 +4223,9 @@ let huntarrUI = {
settingsSidebar.style.display = 'none';
settingsSidebar.style.setProperty('display', 'none', 'important');
}
-
+
console.log('[huntarrUI] Sidebar styles applied');
-
+
// Clear Settings sidebar preference when showing main sidebar
localStorage.removeItem('huntarr-settings-sidebar');
},
@@ -4644,20 +4236,20 @@ let huntarrUI = {
if (flashPreventionStyle) {
flashPreventionStyle.remove();
}
-
+
// Show main sidebar and hide others
const mainSidebar = document.getElementById('sidebar');
const requestarrSidebar = document.getElementById('requestarr-sidebar');
const settingsSidebar = document.getElementById('settings-sidebar');
const appsSidebar = document.getElementById('apps-sidebar');
-
+
if (mainSidebar) mainSidebar.style.display = 'block';
if (requestarrSidebar) requestarrSidebar.style.display = 'none';
if (settingsSidebar) settingsSidebar.style.display = 'none';
if (appsSidebar) appsSidebar.style.display = 'none';
-
+
console.log('[huntarrUI] Main sidebar shown');
-
+
// Clear Settings sidebar preference when showing main sidebar
localStorage.removeItem('huntarr-settings-sidebar');
},
@@ -4668,18 +4260,18 @@ let huntarrUI = {
if (flashPreventionStyle) {
flashPreventionStyle.remove();
}
-
+
// Hide main sidebar and show requestarr sidebar
const mainSidebar = document.getElementById('sidebar');
const requestarrSidebar = document.getElementById('requestarr-sidebar');
const settingsSidebar = document.getElementById('settings-sidebar');
const appsSidebar = document.getElementById('apps-sidebar');
-
+
if (mainSidebar) mainSidebar.style.display = 'none';
if (requestarrSidebar) requestarrSidebar.style.display = 'block';
if (settingsSidebar) settingsSidebar.style.display = 'none';
if (appsSidebar) appsSidebar.style.display = 'none';
-
+
// Update active states in Requestarr sidebar
this.updateRequestarrSidebarActive();
},
@@ -4690,18 +4282,18 @@ let huntarrUI = {
if (flashPreventionStyle) {
flashPreventionStyle.remove();
}
-
+
// Hide main sidebar and show apps sidebar
const mainSidebar = document.getElementById('sidebar');
const requestarrSidebar = document.getElementById('requestarr-sidebar');
const settingsSidebar = document.getElementById('settings-sidebar');
const appsSidebar = document.getElementById('apps-sidebar');
-
+
if (mainSidebar) mainSidebar.style.display = 'none';
if (requestarrSidebar) requestarrSidebar.style.display = 'none';
if (settingsSidebar) settingsSidebar.style.display = 'none';
if (appsSidebar) appsSidebar.style.display = 'block';
-
+
// Update active states in Apps sidebar
this.updateAppsSidebarActive();
},
@@ -4712,18 +4304,18 @@ let huntarrUI = {
if (flashPreventionStyle) {
flashPreventionStyle.remove();
}
-
+
// Hide main sidebar and show settings sidebar
const mainSidebar = document.getElementById('sidebar');
const requestarrSidebar = document.getElementById('requestarr-sidebar');
const settingsSidebar = document.getElementById('settings-sidebar');
const appsSidebar = document.getElementById('apps-sidebar');
-
+
if (mainSidebar) mainSidebar.style.display = 'none';
if (requestarrSidebar) requestarrSidebar.style.display = 'none';
if (appsSidebar) appsSidebar.style.display = 'none';
if (settingsSidebar) settingsSidebar.style.display = 'block';
-
+
// Update active states in Settings sidebar
this.updateSettingsSidebarActive();
},
@@ -4732,7 +4324,7 @@ let huntarrUI = {
// Remove active from all Requestarr nav items
const requestarrNavItems = document.querySelectorAll('#requestarr-sidebar .nav-item');
requestarrNavItems.forEach(item => item.classList.remove('active'));
-
+
// Set appropriate active state based on current section
if (this.currentSection === 'requestarr') {
const homeNav = document.getElementById('requestarrHomeNav');
@@ -4747,7 +4339,7 @@ let huntarrUI = {
// Remove active from all Requestarr nav items
const requestarrNavItems = document.querySelectorAll('#requestarr-sidebar .nav-item');
requestarrNavItems.forEach(item => item.classList.remove('active'));
-
+
// Set active state based on view
if (view === 'home') {
const homeNav = document.getElementById('requestarrHomeNav');
@@ -4767,7 +4359,7 @@ let huntarrUI = {
window.location.hash = '#home';
});
}
-
+
// Home button - shows Requestarr home
const homeNav = document.getElementById('requestarrHomeNav');
if (homeNav) {
@@ -4776,7 +4368,7 @@ let huntarrUI = {
window.location.hash = '#requestarr';
});
}
-
+
// History button - shows Requestarr history
const historyNav = document.getElementById('requestarrHistoryNav');
if (historyNav) {
@@ -4791,7 +4383,7 @@ let huntarrUI = {
// Remove active from all Apps nav items
const appsNavItems = document.querySelectorAll('#apps-sidebar .nav-item');
appsNavItems.forEach(item => item.classList.remove('active'));
-
+
// Set appropriate active state based on current section
if (this.currentSection === 'sonarr') {
const sonarrNav = document.getElementById('appsSonarrNav');
@@ -4818,7 +4410,7 @@ let huntarrUI = {
// Remove active from all Settings nav items
const settingsNavItems = document.querySelectorAll('#settings-sidebar .nav-item');
settingsNavItems.forEach(item => item.classList.remove('active'));
-
+
// Set appropriate active state based on current section
if (this.currentSection === 'settings') {
const mainNav = document.getElementById('settingsMainNav');
@@ -4844,7 +4436,7 @@ let huntarrUI = {
window.location.hash = '#home';
});
}
-
+
// Sonarr button
const sonarrNav = document.getElementById('appsSonarrNav');
if (sonarrNav) {
@@ -4853,7 +4445,7 @@ let huntarrUI = {
window.location.hash = '#sonarr';
});
}
-
+
// Radarr button
const radarrNav = document.getElementById('appsRadarrNav');
if (radarrNav) {
@@ -4862,7 +4454,7 @@ let huntarrUI = {
window.location.hash = '#radarr';
});
}
-
+
// Lidarr button
const lidarrNav = document.getElementById('appsLidarrNav');
if (lidarrNav) {
@@ -4871,7 +4463,7 @@ let huntarrUI = {
window.location.hash = '#lidarr';
});
}
-
+
// Readarr button
const readarrNav = document.getElementById('appsReadarrNav');
if (readarrNav) {
@@ -4880,7 +4472,7 @@ let huntarrUI = {
window.location.hash = '#readarr';
});
}
-
+
// Whisparr button
const whisparrNav = document.getElementById('appsWhisparrNav');
if (whisparrNav) {
@@ -4889,7 +4481,7 @@ let huntarrUI = {
window.location.hash = '#whisparr';
});
}
-
+
// Eros button
const erosNav = document.getElementById('appsErosNav');
if (erosNav) {
@@ -4911,7 +4503,7 @@ let huntarrUI = {
window.location.hash = '#home';
});
}
-
+
// Main button - shows Settings main page
const mainNav = document.getElementById('settingsMainNav');
if (mainNav) {
@@ -4920,7 +4512,7 @@ let huntarrUI = {
window.location.hash = '#settings';
});
}
-
+
// Scheduling button - shows Scheduling page
const schedulingNav = document.getElementById('settingsSchedulingNav');
if (schedulingNav) {
@@ -4929,7 +4521,7 @@ let huntarrUI = {
window.location.hash = '#scheduling';
});
}
-
+
// Notifications button - shows Notifications page
const notificationsNav = document.getElementById('settingsNotificationsNav');
if (notificationsNav) {
@@ -4938,7 +4530,7 @@ let huntarrUI = {
window.location.hash = '#notifications';
});
}
-
+
// User button - shows User page
const userNav = document.getElementById('settingsUserNav');
if (userNav) {
@@ -4962,7 +4554,7 @@ let huntarrUI = {
.then(settings => {
console.log('[huntarrUI] Loaded settings:', settings);
console.log('[huntarrUI] General settings:', settings.general);
-
+
// Generate the general settings form - pass only the general settings
if (typeof SettingsForms !== 'undefined' && SettingsForms.generateGeneralForm) {
SettingsForms.generateGeneralForm(generalSettings, settings.general || {});
@@ -4979,17 +4571,17 @@ let huntarrUI = {
initializeNotifications: function() {
console.log('[huntarrUI] initializeNotifications called');
-
+
// Check if notifications are already initialized
const notificationsContainer = document.getElementById('notificationsContainer');
if (!notificationsContainer) {
console.error('[huntarrUI] notificationsContainer element not found!');
return;
}
-
+
console.log('[huntarrUI] notificationsContainer found:', notificationsContainer);
console.log('[huntarrUI] Current container content:', notificationsContainer.innerHTML.trim());
-
+
// Check if notifications are actually initialized (ignore HTML comments)
const currentContent = notificationsContainer.innerHTML.trim();
if (currentContent !== '' && !currentContent.includes('')) {
@@ -4998,7 +4590,7 @@ let huntarrUI = {
}
console.log('[huntarrUI] Loading notifications settings from API...');
-
+
// Load settings from API and generate the notifications form
fetch('./api/settings')
.then(response => response.json())
@@ -5007,7 +4599,7 @@ let huntarrUI = {
console.log('[huntarrUI] General settings:', settings.general);
console.log('[huntarrUI] SettingsForms available:', typeof SettingsForms !== 'undefined');
console.log('[huntarrUI] generateNotificationsForm available:', typeof SettingsForms !== 'undefined' && SettingsForms.generateNotificationsForm);
-
+
// Generate the notifications form - pass the general settings which contain notification settings
if (typeof SettingsForms !== 'undefined' && SettingsForms.generateNotificationsForm) {
console.log('[huntarrUI] Calling SettingsForms.generateNotificationsForm...');
@@ -5026,7 +4618,7 @@ let huntarrUI = {
initializeBackupRestore: function() {
console.log('[huntarrUI] initializeBackupRestore called');
-
+
// Initialize backup/restore functionality
if (typeof BackupRestore !== 'undefined') {
BackupRestore.initialize();
@@ -5037,17 +4629,17 @@ let huntarrUI = {
initializeProwlarr: function() {
console.log('[huntarrUI] initializeProwlarr called');
-
+
// Check if prowlarr is already initialized
const prowlarrContainer = document.getElementById('prowlarrContainer');
if (!prowlarrContainer) {
console.error('[huntarrUI] prowlarrContainer element not found!');
return;
}
-
+
console.log('[huntarrUI] prowlarrContainer found:', prowlarrContainer);
console.log('[huntarrUI] Current container content:', prowlarrContainer.innerHTML.trim());
-
+
// Check if prowlarr is actually initialized (ignore HTML comments)
const currentContent = prowlarrContainer.innerHTML.trim();
if (currentContent !== '' && !currentContent.includes('')) {
@@ -5056,7 +4648,7 @@ let huntarrUI = {
}
console.log('[huntarrUI] Loading prowlarr settings from API...');
-
+
// Load settings from API and generate the prowlarr form
fetch('./api/settings')
.then(response => response.json())
@@ -5065,7 +4657,7 @@ let huntarrUI = {
console.log('[huntarrUI] Prowlarr settings:', settings.prowlarr);
console.log('[huntarrUI] SettingsForms available:', typeof SettingsForms !== 'undefined');
console.log('[huntarrUI] generateProwlarrForm available:', typeof SettingsForms !== 'undefined' && SettingsForms.generateProwlarrForm);
-
+
// Generate the prowlarr form
if (typeof SettingsForms !== 'undefined' && SettingsForms.generateProwlarrForm) {
console.log('[huntarrUI] Calling SettingsForms.generateProwlarrForm...');
@@ -5084,7 +4676,7 @@ let huntarrUI = {
initializeUser: function() {
console.log('[huntarrUI] initializeUser called');
-
+
// Check if UserModule is available and initialize it
if (typeof UserModule !== 'undefined') {
if (!window.userModule) {
@@ -5101,17 +4693,17 @@ let huntarrUI = {
initializeSwaparr: function() {
console.log('[huntarrUI] initializeSwaparr called');
-
+
// Check if Swaparr is already initialized
const swaparrContainer = document.getElementById('swaparrContainer');
if (!swaparrContainer) {
console.error('[huntarrUI] swaparrContainer element not found!');
return;
}
-
+
console.log('[huntarrUI] swaparrContainer found:', swaparrContainer);
console.log('[huntarrUI] Current container content:', swaparrContainer.innerHTML.trim());
-
+
// Check if Swaparr is actually initialized (ignore HTML comments)
const currentContent = swaparrContainer.innerHTML.trim();
if (currentContent !== '' && !currentContent.includes('')) {
@@ -5120,7 +4712,7 @@ let huntarrUI = {
}
console.log('[huntarrUI] Loading Swaparr settings from API...');
-
+
// Load settings from API and generate the Swaparr form
fetch('./api/swaparr/settings')
.then(response => response.json())
@@ -5128,13 +4720,13 @@ let huntarrUI = {
console.log('[huntarrUI] Loaded Swaparr settings:', settings);
console.log('[huntarrUI] SettingsForms available:', typeof SettingsForms !== 'undefined');
console.log('[huntarrUI] generateSwaparrForm available:', typeof SettingsForms !== 'undefined' && SettingsForms.generateSwaparrForm);
-
+
// Generate the Swaparr form
if (typeof SettingsForms !== 'undefined' && SettingsForms.generateSwaparrForm) {
console.log('[huntarrUI] Calling SettingsForms.generateSwaparrForm...');
SettingsForms.generateSwaparrForm(swaparrContainer, settings || {});
console.log('[huntarrUI] Swaparr form generated successfully');
-
+
// Load Swaparr apps table/status
this.loadSwaparrApps();
} else {
@@ -5150,7 +4742,7 @@ let huntarrUI = {
loadSwaparrApps: function() {
console.log('[huntarrUI] loadSwaparrApps called');
-
+
// Get the Swaparr apps panel
const swaparrAppsPanel = document.getElementById('swaparrApps');
if (!swaparrAppsPanel) {
@@ -5175,10 +4767,10 @@ let huntarrUI = {
// Setup Prowlarr status polling
setupProwlarrStatusPolling: function() {
console.log('[huntarrUI] Setting up Prowlarr status polling');
-
+
// Load initial status
this.loadProwlarrStatus();
-
+
// Set up polling to refresh Prowlarr status every 30 seconds
// Only poll when home section is active to reduce unnecessary requests
this.prowlarrPollingInterval = setInterval(() => {
@@ -5186,7 +4778,7 @@ let huntarrUI = {
this.loadProwlarrStatus();
}
}, 30000);
-
+
// Set up refresh button handler
const refreshButton = document.getElementById('refresh-prowlarr-data');
if (refreshButton) {
@@ -5205,29 +4797,29 @@ let huntarrUI = {
// Initialize when document is ready
document.addEventListener('DOMContentLoaded', function() {
huntarrUI.init();
-
+
// Initialize our enhanced UI features
if (typeof StatsTooltips !== 'undefined') {
StatsTooltips.init();
}
-
+
if (typeof CardHoverEffects !== 'undefined') {
CardHoverEffects.init();
}
-
+
if (typeof CircularProgress !== 'undefined') {
CircularProgress.init();
}
-
+
if (typeof BackgroundPattern !== 'undefined') {
BackgroundPattern.init();
}
-
+
// Initialize per-instance reset button listeners
if (typeof SettingsForms !== 'undefined' && typeof SettingsForms.setupInstanceResetListeners === 'function') {
SettingsForms.setupInstanceResetListeners();
}
-
+
// Initialize UserModule when available
if (typeof UserModule !== 'undefined') {
console.log('[huntarrUI] UserModule available, initializing...');
diff --git a/main.py b/main.py
index c6d486ea..22214a1b 100644
--- a/main.py
+++ b/main.py
@@ -113,10 +113,10 @@ def __init__(self, *args, **kwargs):
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
from primary.utils.logger import setup_main_logger, get_logger
from primary.utils.clean_logger import setup_clean_logging
-
+
# Initialize main logger
huntarr_logger = setup_main_logger()
-
+
# Initialize timezone from TZ environment variable
try:
from primary.settings_manager import initialize_timezone_from_env
@@ -124,13 +124,13 @@ def __init__(self, *args, **kwargs):
huntarr_logger.info("Timezone initialization completed.")
except Exception as e:
huntarr_logger.warning(f"Failed to initialize timezone from environment: {e}")
-
-
+
+
# Initialize clean logging for frontend consumption
setup_clean_logging()
huntarr_logger.info("Clean logging system initialized for frontend consumption.")
-
+
huntarr_logger.info("Successfully imported application components.")
# Main function startup message removed to reduce log spam
except ImportError as e:
@@ -157,19 +157,19 @@ def refresh_sponsors_on_startup():
"""Refresh sponsors database from manifest.json on startup"""
import os
import json
-
+
try:
# Get database instance
from src.primary.utils.database import get_database
db = get_database()
-
+
# Path to manifest.json
manifest_path = os.path.join(os.path.dirname(__file__), 'manifest.json')
-
+
if os.path.exists(manifest_path):
with open(manifest_path, 'r') as f:
manifest_data = json.load(f)
-
+
sponsors_list = manifest_data.get('sponsors', [])
if sponsors_list:
# Clear existing sponsors and save new ones
@@ -179,7 +179,7 @@ def refresh_sponsors_on_startup():
huntarr_logger.warning("No sponsors found in manifest.json")
else:
huntarr_logger.warning(f"manifest.json not found at {manifest_path}")
-
+
except Exception as e:
huntarr_logger.error(f"Error refreshing sponsors on startup: {e}")
raise
@@ -187,19 +187,19 @@ def refresh_sponsors_on_startup():
def load_version_to_database():
"""Load current version from version.txt into database on startup"""
import os
-
+
try:
# Get database instance
from src.primary.utils.database import get_database
db = get_database()
-
+
# Path to version.txt
version_path = os.path.join(os.path.dirname(__file__), 'version.txt')
-
+
if os.path.exists(version_path):
with open(version_path, 'r') as f:
version = f.read().strip()
-
+
if version:
# Store version in database
db.set_version(version)
@@ -208,7 +208,7 @@ def load_version_to_database():
huntarr_logger.warning("version.txt is empty")
else:
huntarr_logger.warning(f"version.txt not found at {version_path}")
-
+
except Exception as e:
huntarr_logger.error(f"Error loading version to database: {e}")
# Don't raise - this is not critical enough to stop startup
@@ -239,11 +239,11 @@ def run_web_server():
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
from primary.settings_manager import load_settings
-
+
settings = load_settings("general")
local_access_bypass = settings.get("local_access_bypass", False)
proxy_auth_bypass = settings.get("proxy_auth_bypass", False)
-
+
if proxy_auth_bypass:
web_logger.info("๐ Authentication Mode: NO LOGIN MODE (Proxy authentication bypass enabled)")
elif local_access_bypass:
@@ -271,16 +271,16 @@ def run_web_server():
from waitress.server import create_server
import time
web_logger.info("Running with Waitress production server.")
-
+
# Create the server instance so we can shut it down gracefully
waitress_server = create_server(app, host=host, port=port, threads=8)
-
+
web_logger.info("Waitress server starting...")
-
+
# Start the server in a separate thread
server_thread = threading.Thread(target=waitress_server.run, daemon=True)
server_thread.start()
-
+
# Monitor for shutdown signal in the main thread
while not shutdown_requested.is_set() and not stop_event.is_set():
try:
@@ -289,25 +289,25 @@ def run_web_server():
break
except KeyboardInterrupt:
break
-
+
# Shutdown sequence
web_logger.info("Shutdown signal received. Stopping Waitress server...")
try:
waitress_server.close()
web_logger.info("Waitress server close() called.")
-
+
# Wait for server thread to finish
server_thread.join(timeout=3.0)
if server_thread.is_alive():
web_logger.warning("Server thread did not stop within timeout.")
else:
web_logger.info("Server thread stopped successfully.")
-
+
except Exception as e:
web_logger.exception(f"Error during Waitress server shutdown: {e}")
-
+
web_logger.info("Waitress server has stopped.")
-
+
except ImportError:
web_logger.error("Waitress not found. Falling back to Flask development server (NOT recommended for production).")
web_logger.error("Install waitress ('pip install waitress') for production use.")
@@ -328,20 +328,20 @@ def main_shutdown_handler(signum, frame):
"""Gracefully shut down the application."""
global _global_shutdown_flag
_global_shutdown_flag = True # Set global shutdown flag immediately
-
+
signal_name = "SIGINT" if signum == signal.SIGINT else "SIGTERM" if signum == signal.SIGTERM else f"Signal {signum}"
huntarr_logger.info(f"Received {signal_name}. Initiating graceful shutdown...")
-
+
# Set a reasonable timeout for shutdown operations
shutdown_start_time = time.time()
shutdown_timeout = 30 # 30 seconds total shutdown timeout
-
+
# Immediate database checkpoint to prevent corruption
try:
from primary.utils.database import get_database, get_logs_database
-
+
huntarr_logger.info("Performing emergency database checkpoint...")
-
+
# Emergency checkpoint for main database
try:
main_db = get_database()
@@ -350,7 +350,7 @@ def main_shutdown_handler(signum, frame):
huntarr_logger.info("Main database emergency checkpoint completed")
except Exception as e:
huntarr_logger.warning(f"Main database emergency checkpoint failed: {e}")
-
+
# Emergency checkpoint for logs database
try:
logs_db = get_logs_database()
@@ -359,16 +359,16 @@ def main_shutdown_handler(signum, frame):
huntarr_logger.info("Logs database emergency checkpoint completed")
except Exception as e:
huntarr_logger.warning(f"Logs database emergency checkpoint failed: {e}")
-
+
except Exception as e:
huntarr_logger.warning(f"Emergency database checkpoint failed: {e}")
-
+
# Set both shutdown events
if not stop_event.is_set():
stop_event.set()
if not shutdown_requested.is_set():
shutdown_requested.set()
-
+
# Also shutdown the Waitress server directly if it exists
global waitress_server
if waitress_server:
@@ -377,7 +377,7 @@ def main_shutdown_handler(signum, frame):
waitress_server.close()
except Exception as e:
huntarr_logger.warning(f"Error closing Waitress server: {e}")
-
+
# Force exit if shutdown takes too long (Docker container update scenario)
elapsed_time = time.time() - shutdown_start_time
if elapsed_time > shutdown_timeout:
@@ -388,11 +388,11 @@ def cleanup_handler():
"""Cleanup function called at exit"""
cleanup_start_time = time.time()
huntarr_logger.info("Exit cleanup handler called")
-
+
# Shutdown databases gracefully with timeout
try:
from primary.utils.database import get_database, get_logs_database
-
+
# Close main database connections
main_db = get_database()
if hasattr(main_db, '_database_instance') and main_db._database_instance is not None:
@@ -405,7 +405,7 @@ def cleanup_handler():
huntarr_logger.debug("Main database WAL checkpoint completed")
except Exception as db_error:
huntarr_logger.warning(f"Error during main database cleanup: {db_error}")
-
+
# Close logs database connections
logs_db = get_logs_database()
if hasattr(logs_db, '_logs_database_instance') and logs_db._logs_database_instance is not None:
@@ -417,18 +417,18 @@ def cleanup_handler():
huntarr_logger.debug("Logs database WAL checkpoint completed")
except Exception as logs_error:
huntarr_logger.warning(f"Error during logs database cleanup: {logs_error}")
-
+
huntarr_logger.info("Database shutdown completed")
-
+
except Exception as e:
huntarr_logger.warning(f"Error during database shutdown: {e}")
-
+
# Ensure stop events are set
if not stop_event.is_set():
stop_event.set()
if not shutdown_requested.is_set():
shutdown_requested.set()
-
+
# Log cleanup timing for Docker update diagnostics
cleanup_duration = time.time() - cleanup_start_time
huntarr_logger.info(f"Cleanup completed in {cleanup_duration:.2f} seconds")
@@ -440,30 +440,30 @@ def main():
# Register signal handlers for graceful shutdown in the main process
signal.signal(signal.SIGINT, main_shutdown_handler)
signal.signal(signal.SIGTERM, main_shutdown_handler)
-
+
# Register cleanup handler
atexit.register(cleanup_handler)
-
+
# Initialize databases with default configurations
try:
from primary.settings_manager import initialize_database
initialize_database()
huntarr_logger.info("Main database initialization completed successfully")
-
+
# Initialize base URL from BASE_URL environment variable early
# This needs to happen before web server initialization
try:
from primary.settings_manager import initialize_base_url_from_env
initialize_base_url_from_env()
huntarr_logger.info("Base URL initialization completed.")
-
+
# Reconfigure the web server with the updated base URL
from primary.web_server import reconfigure_base_url
reconfigure_base_url()
huntarr_logger.info("Web server reconfigured with updated base URL.")
except Exception as e:
huntarr_logger.warning(f"Failed to initialize base URL from environment: {e}")
-
+
# Initialize database logging system (now uses main huntarr.db)
try:
from primary.utils.database import get_logs_database, schedule_log_cleanup
@@ -472,22 +472,29 @@ def main():
huntarr_logger.info("Database logging system initialized with scheduled cleanup.")
except Exception as e:
huntarr_logger.warning(f"Failed to initialize database logging: {e}")
-
+
# Load version from version.txt into database on startup
try:
load_version_to_database()
except Exception as version_error:
huntarr_logger.warning(f"Failed to load version to database: {version_error}")
-
+
# Refresh sponsors from manifest.json on startup
try:
refresh_sponsors_on_startup()
huntarr_logger.info("Sponsors database refreshed from manifest.json")
except Exception as sponsor_error:
huntarr_logger.warning(f"Failed to refresh sponsors on startup: {sponsor_error}")
-
+
+ # Initialize state management system
+ try:
+ from src.primary.stateful_manager import initialize_state_management
+ initialize_state_management()
+ except Exception as e:
+ huntarr_logger.warning("Failed to initialize state management system: %s", e)
+
except Exception as e:
- huntarr_logger.error(f"Failed to initialize databases: {e}")
+ huntarr_logger.error("Failed to initialize databases: %s", e)
huntarr_logger.error("Application may not function correctly without database")
# Continue anyway - the app might still work with defaults
@@ -542,7 +549,7 @@ def main():
# shutdown_threads() # Uncomment if primary.main.shutdown_threads() does more cleanup
huntarr_logger.info("--- Huntarr Main Process Exiting ---")
-
+
# Return appropriate exit code based on shutdown reason
if shutdown_requested.is_set() or stop_event.is_set():
huntarr_logger.info("Clean shutdown completed - Exit code 0")
diff --git a/src/primary/apps/eros/missing.py b/src/primary/apps/eros/missing.py
index 03974bbb..f4f5260f 100644
--- a/src/primary/apps/eros/missing.py
+++ b/src/primary/apps/eros/missing.py
@@ -6,73 +6,58 @@
Exclusively supports the v3 API.
"""
-import time
import random
import datetime
-from typing import List, Dict, Any, Set, Callable
+from typing import Dict, Any, Callable
from src.primary.utils.logger import get_logger
from src.primary.apps.eros import api as eros_api
from src.primary.settings_manager import load_settings, get_advanced_setting
from src.primary.stateful_manager import is_processed, add_processed_id
from src.primary.stats_manager import increment_stat, check_hourly_cap_exceeded
from src.primary.utils.history_utils import log_processed_media
-from src.primary.state import check_state_reset
# Get logger for the app
eros_logger = get_logger("eros")
+
def process_missing_items(
app_settings: Dict[str, Any],
stop_check: Callable[[], bool] # Function to check if stop is requested
) -> bool:
"""
Process missing items in Eros based on provided settings.
-
+
Args:
app_settings: Dictionary containing all settings for Eros
stop_check: A function that returns True if the process should stop
-
+
Returns:
True if any items were processed, False otherwise.
"""
eros_logger.info("Starting missing items processing cycle for Eros.")
- processed_any = False
-
- # Reset state files if enough time has passed
- check_state_reset("eros")
-
+
# Load settings to check if tagging is enabled
eros_settings = load_settings("eros")
tag_processed_items = eros_settings.get("tag_processed_items", True)
-
+
# Extract necessary settings
api_url = app_settings.get("api_url", "").strip()
api_key = app_settings.get("api_key", "").strip()
api_timeout = get_advanced_setting("api_timeout", 120) # Use database value
instance_name = app_settings.get("instance_name", "Eros Default")
-
- # Load general settings to get centralized timeout
- general_settings = load_settings('general')
-
+
monitored_only = app_settings.get("monitored_only", True)
skip_future_releases = app_settings.get("skip_future_releases", True)
# skip_item_refresh setting removed as it was a performance bottleneck
search_mode = app_settings.get("search_mode", "movie") # Default to movie mode if not specified
-
- eros_logger.info(f"Using search mode: {search_mode} for missing items")
-
+
+ eros_logger.info("Using search mode: %s for missing items", search_mode)
+
# Use the new hunt_missing_items parameter name, falling back to hunt_missing_scenes for backwards compatibility
hunt_missing_items = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 0))
-
- # Use advanced settings from database for command operations
- command_wait_delay = get_advanced_setting("command_wait_delay", 1)
- command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
-
- # Use the centralized advanced setting for stateful management hours
- stateful_management_hours = get_advanced_setting("stateful_management_hours", 168)
-
+
# Log that we're using Eros v3 API
- eros_logger.debug(f"Using Eros API v3 for instance: {instance_name}")
+ eros_logger.debug("Using Eros API v3 for instance: %s", instance_name)
# Skip if hunt_missing_items is set to a negative value or 0
if hunt_missing_items <= 0:
@@ -83,26 +68,26 @@ def process_missing_items(
if stop_check():
eros_logger.info("Stop requested before starting missing items. Aborting...")
return False
-
+
# Get missing items
- eros_logger.info(f"Retrieving items with missing files...")
- missing_items = eros_api.get_items_with_missing(api_url, api_key, api_timeout, monitored_only, search_mode)
-
+ eros_logger.info("Retrieving items with missing files...")
+ missing_items = eros_api.get_items_with_missing(api_url, api_key, api_timeout, monitored_only, search_mode)
+
if missing_items is None: # API call failed
eros_logger.error("Failed to retrieve missing items from Eros API.")
return False
-
+
if not missing_items:
eros_logger.info("No missing items found.")
return False
-
+
# Check for stop signal after retrieving items
if stop_check():
eros_logger.info("Stop requested after retrieving missing items. Aborting...")
return False
-
- eros_logger.info(f"Found {len(missing_items)} items with missing files.")
-
+
+ eros_logger.info("Found %d items with missing files.", len(missing_items))
+
# Filter out future releases if configured
if skip_future_releases:
now = datetime.datetime.now().replace(tzinfo=datetime.timezone.utc)
@@ -111,18 +96,18 @@ def process_missing_items(
missing_items = [
item for item in missing_items
if not item.get('airDateUtc') or (
- item.get('airDateUtc') and
+ item.get('airDateUtc') and
datetime.datetime.fromisoformat(item['airDateUtc'].replace('Z', '+00:00')) < now
)
]
skipped_count = original_count - len(missing_items)
if skipped_count > 0:
- eros_logger.info(f"Skipped {skipped_count} future item releases based on air date.")
+ eros_logger.info("Skipped %d future item releases based on air date.", skipped_count)
if not missing_items:
eros_logger.info("No missing items left to process after filtering future releases.")
return False
-
+
# Filter out already processed items using stateful management
unprocessed_items = []
for item in missing_items:
@@ -130,22 +115,22 @@ def process_missing_items(
if not is_processed("eros", instance_name, item_id):
unprocessed_items.append(item)
else:
- eros_logger.debug(f"Skipping already processed item ID: {item_id}")
-
- eros_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(missing_items)} total items with missing files.")
-
+ eros_logger.debug("Skipping already processed item ID: %s", item_id)
+
+ eros_logger.info("Found %d unprocessed items out of %d total items with missing files.", len(unprocessed_items), len(missing_items))
+
if not unprocessed_items:
- eros_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.")
+ eros_logger.info("No unprocessed items found for %s. All available items have been processed.", instance_name)
return False
-
+
items_processed = 0
processing_done = False
-
+
# Select items to search based on configuration
- eros_logger.info(f"Randomly selecting up to {hunt_missing_items} missing items.")
+ eros_logger.info("Randomly selecting up to %d missing items.", hunt_missing_items)
items_to_search = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_missing_items))
-
- eros_logger.info(f"Selected {len(items_to_search)} missing items to search.")
+
+ eros_logger.info("Selected %d missing items to search.", len(items_to_search))
# Process selected items
for item in items_to_search:
@@ -153,25 +138,25 @@ def process_missing_items(
if stop_check():
eros_logger.info("Stop requested during item processing. Aborting...")
break
-
+
# Check API limit before processing each item
try:
if check_hourly_cap_exceeded("eros"):
- eros_logger.warning(f"๐ Eros API hourly limit reached - stopping missing items processing after {items_processed} items")
+ eros_logger.warning("๐ Eros API hourly limit reached - stopping missing items processing after %d items", items_processed)
break
except Exception as e:
- eros_logger.error(f"Error checking hourly API cap: {e}")
+ eros_logger.error("Error checking hourly API cap: %s", e)
# Continue processing if cap check fails - safer than stopping
-
+
# Re-check limit in case it changed
current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1))
if items_processed >= current_limit:
- eros_logger.info(f"Reached HUNT_MISSING_ITEMS limit ({current_limit}) for this cycle.")
- break
+ eros_logger.info("Reached HUNT_MISSING_ITEMS limit (%d) for this cycle.", current_limit)
+ break
item_id = item.get("id")
title = item.get("title", "Unknown Title")
-
+
# For movies, we don't use season/episode format
if search_mode == "movie":
item_info = title
@@ -184,61 +169,61 @@ def process_missing_items(
item_info = f"{title} - {season_episode}"
else:
item_info = title
-
- eros_logger.info(f"Processing missing item: \"{item_info}\" (Item ID: {item_id})")
-
+
+ eros_logger.info('Processing missing item: "%s" (Item ID: %s)', item_info, item_id)
+
# Mark the item as processed BEFORE triggering any searches
add_processed_id("eros", instance_name, str(item_id))
- eros_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}")
-
+ eros_logger.debug("Added item ID %s to processed list for %s", item_id, instance_name)
+
# Refresh functionality has been removed as it was identified as a performance bottleneck
-
+
# Check for stop signal before searching
if stop_check():
- eros_logger.info(f"Stop requested before searching for {title}. Aborting...")
+ eros_logger.info("Stop requested before searching for %s. Aborting...", title)
break
-
+
# Search for the item
eros_logger.info(" - Searching for missing item...")
search_command_id = eros_api.item_search(api_url, api_key, api_timeout, [item_id])
if search_command_id:
- eros_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.")
-
+ eros_logger.info("Triggered search command %s. Assuming success for now.", search_command_id)
+
# Tag the movie if enabled
if tag_processed_items:
from src.primary.settings_manager import get_custom_tag
custom_tag = get_custom_tag("eros", "missing", "huntarr-missing")
try:
eros_api.tag_processed_movie(api_url, api_key, api_timeout, item_id, custom_tag)
- eros_logger.debug(f"Tagged movie {item_id} with '{custom_tag}'")
+ eros_logger.debug("Tagged movie %s with '%s'", item_id, custom_tag)
except Exception as e:
- eros_logger.warning(f"Failed to tag movie {item_id} with '{custom_tag}': {e}")
-
+ eros_logger.warning("Failed to tag movie %s with '%s': %s", item_id, custom_tag, e)
+
# Log to history system
log_processed_media("eros", item_info, item_id, instance_name, "missing")
- eros_logger.debug(f"Logged history entry for item: {item_info}")
-
+ eros_logger.debug("Logged history entry for item: %s", item_info)
+
items_processed += 1
processing_done = True
-
+
# Increment the hunted statistics for Eros
increment_stat("eros", "hunted", 1)
- eros_logger.debug(f"Incremented eros hunted statistics by 1")
+ eros_logger.debug("Incremented eros hunted statistics by 1")
# Log progress
current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1))
- eros_logger.info(f"Processed {items_processed}/{current_limit} missing items this cycle.")
+ eros_logger.info("Processed %d/%d missing items this cycle.", items_processed, current_limit)
else:
- eros_logger.warning(f"Failed to trigger search command for item ID {item_id}.")
+ eros_logger.warning("Failed to trigger search command for item ID %s.", item_id)
# Do not mark as processed if search couldn't be triggered
continue
-
+
# Log final status
if items_processed > 0:
- eros_logger.info(f"Completed processing {items_processed} missing items for this cycle.")
+ eros_logger.info("Completed processing %d missing items for this cycle.", items_processed)
else:
eros_logger.info("No new missing items were processed in this run.")
-
+
return processing_done
diff --git a/src/primary/apps/eros/upgrade.py b/src/primary/apps/eros/upgrade.py
index 7e5ca1bc..f8395147 100644
--- a/src/primary/apps/eros/upgrade.py
+++ b/src/primary/apps/eros/upgrade.py
@@ -6,70 +6,56 @@
Exclusively supports the v3 API.
"""
-import time
import random
-import datetime
-from typing import List, Dict, Any, Set, Callable
-from src.primary.utils.logger import get_logger
+from typing import Any, Callable
+
from src.primary.apps.eros import api as eros_api
from src.primary.settings_manager import load_settings, get_advanced_setting
from src.primary.stateful_manager import is_processed, add_processed_id
from src.primary.stats_manager import increment_stat, check_hourly_cap_exceeded
from src.primary.utils.history_utils import log_processed_media
-from src.primary.state import check_state_reset
+from src.primary.utils.logger import get_logger
-# Get logger for the app
eros_logger = get_logger("eros")
+
def process_cutoff_upgrades(
- app_settings: Dict[str, Any],
+ app_settings: dict[str, Any],
stop_check: Callable[[], bool] # Function to check if stop is requested
) -> bool:
"""
Process quality cutoff upgrades for Eros based on settings.
-
+
Args:
app_settings: Dictionary containing all settings for Eros
stop_check: A function that returns True if the process should stop
-
+
Returns:
True if any items were processed for upgrades, False otherwise.
"""
eros_logger.info("Starting quality cutoff upgrades processing cycle for Eros.")
- processed_any = False
-
- # Reset state files if enough time has passed
- check_state_reset("eros")
-
+
# Load settings to check if tagging is enabled
eros_settings = load_settings("eros")
tag_processed_items = eros_settings.get("tag_processed_items", True)
-
+
# Extract necessary settings
api_url = app_settings.get("api_url", "").strip()
api_key = app_settings.get("api_key", "").strip()
api_timeout = get_advanced_setting("api_timeout", 120) # Use database value
instance_name = app_settings.get("instance_name", "Eros Default")
-
- # Load general settings to get centralized timeout
- general_settings = load_settings('general')
-
+
monitored_only = app_settings.get("monitored_only", True)
# skip_item_refresh setting removed as it was a performance bottleneck
search_mode = app_settings.get("search_mode", "movie") # Default to movie mode if not specified
-
- eros_logger.info(f"Using search mode: {search_mode} for quality upgrades")
-
+
+ eros_logger.info("Using search mode: %s for quality upgrades", search_mode)
+
# Use the new hunt_upgrade_items parameter name, falling back to hunt_upgrade_scenes for backwards compatibility
hunt_upgrade_items = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 0))
-
- # Use advanced settings from database for command operations
- command_wait_delay = get_advanced_setting("command_wait_delay", 1)
- command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
- state_reset_interval_hours = get_advanced_setting("stateful_management_hours", 168)
-
+
# Log that we're using Eros API v3
- eros_logger.debug(f"Using Eros API v3 for instance: {instance_name}")
+ eros_logger.debug("Using Eros API v3 for instance: %s", instance_name)
# Skip if hunt_upgrade_items is set to 0
if hunt_upgrade_items <= 0:
@@ -82,20 +68,20 @@ def process_cutoff_upgrades(
return False
# Get items eligible for upgrade
- eros_logger.info(f"Retrieving items eligible for cutoff upgrade...")
+ eros_logger.info("Retrieving items eligible for cutoff upgrade...")
upgrade_eligible_data = eros_api.get_quality_upgrades(api_url, api_key, api_timeout, monitored_only, search_mode)
-
+
if not upgrade_eligible_data:
eros_logger.info("No items found eligible for upgrade or error retrieving them.")
return False
-
+
# Check for stop signal after retrieving eligible items
if stop_check():
eros_logger.info("Stop requested after retrieving upgrade eligible items. Aborting...")
return False
-
- eros_logger.info(f"Found {len(upgrade_eligible_data)} items eligible for quality upgrade.")
-
+
+ eros_logger.info("Found %s items eligible for quality upgrade.", len(upgrade_eligible_data))
+
# Filter out already processed items using stateful management
unprocessed_items = []
for item in upgrade_eligible_data:
@@ -103,48 +89,48 @@ def process_cutoff_upgrades(
if not is_processed("eros", instance_name, item_id):
unprocessed_items.append(item)
else:
- eros_logger.debug(f"Skipping already processed item ID: {item_id}")
-
- eros_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(upgrade_eligible_data)} total items eligible for quality upgrade.")
-
+ eros_logger.debug("Skipping already processed item ID: %s", item_id)
+
+ eros_logger.info("Found %s unprocessed items out of %s total items eligible for quality upgrade.", len(unprocessed_items), len(upgrade_eligible_data))
+
if not unprocessed_items:
- eros_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.")
+ eros_logger.info("No unprocessed items found for %s. All available items have been processed.", instance_name)
return False
-
+
items_processed = 0
processing_done = False
-
+
# Always use random selection for upgrades
- eros_logger.info(f"Randomly selecting up to {hunt_upgrade_items} items for quality upgrade.")
+ eros_logger.info("Randomly selecting up to %s items for quality upgrade.", hunt_upgrade_items)
items_to_upgrade = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_upgrade_items))
-
- eros_logger.info(f"Selected {len(items_to_upgrade)} items for quality upgrade.")
-
+
+ eros_logger.info("Selected %s items for quality upgrade.", len(items_to_upgrade))
+
# Process selected items
for item in items_to_upgrade:
# Check for stop signal before each item
if stop_check():
eros_logger.info("Stop requested during item processing. Aborting...")
break
-
+
# Check API limit before processing each item
try:
if check_hourly_cap_exceeded("eros"):
- eros_logger.warning(f"๐ Eros API hourly limit reached - stopping upgrade processing after {items_processed} items")
+ eros_logger.warning("๐ Eros API hourly limit reached - stopping upgrade processing after %s items", items_processed)
break
except Exception as e:
- eros_logger.error(f"Error checking hourly API cap: {e}")
+ eros_logger.error("Error checking hourly API cap: %s", e)
# Continue processing if cap check fails - safer than stopping
-
+
# Re-check limit in case it changed
current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1))
if items_processed >= current_limit:
- eros_logger.info(f"Reached HUNT_UPGRADE_ITEMS limit ({current_limit}) for this cycle.")
+ eros_logger.info("Reached HUNT_UPGRADE_ITEMS limit (%s) for this cycle.", current_limit)
break
-
+
item_id = item.get("id")
title = item.get("title", "Unknown Title")
-
+
# For movies, we don't use season/episode format
if search_mode == "movie":
item_info = title
@@ -161,62 +147,58 @@ def process_cutoff_upgrades(
item_info = title
# Legacy episode quality path
current_quality = item.get("episodeFile", {}).get("quality", {}).get("quality", {}).get("name", "Unknown")
-
- eros_logger.info(f"Processing item for quality upgrade: \"{item_info}\" (Item ID: {item_id})")
- eros_logger.info(f" - Current quality: {current_quality}")
-
+
+ eros_logger.info("Processing item for quality upgrade: \"%s\" (Item ID: %s)", item_info, item_id)
+ eros_logger.info(" - Current quality: %s", current_quality)
+
# Mark the item as processed BEFORE triggering any searches
add_processed_id("eros", instance_name, str(item_id))
- eros_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}")
-
- # Refresh the item information if not skipped
- refresh_command_id = None
- # Refresh functionality has been removed as it was identified as a performance bottleneck
-
+ eros_logger.debug("Added item ID %s to processed list for %s", item_id, instance_name)
+
# Check for stop signal before searching
if stop_check():
- eros_logger.info(f"Stop requested before searching for {title}. Aborting...")
+ eros_logger.info("Stop requested before searching for %s. Aborting...", title)
break
-
+
# Search for the item
eros_logger.info(" - Searching for quality upgrade...")
search_command_id = eros_api.item_search(api_url, api_key, api_timeout, [item_id])
if search_command_id:
- eros_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.")
-
+ eros_logger.info("Triggered search command %s. Assuming success for now.", search_command_id)
+
# Tag the movie if enabled
if tag_processed_items:
from src.primary.settings_manager import get_custom_tag
custom_tag = get_custom_tag("eros", "upgrade", "huntarr-upgraded")
try:
eros_api.tag_processed_movie(api_url, api_key, api_timeout, item_id, custom_tag)
- eros_logger.debug(f"Tagged movie {item_id} with '{custom_tag}'")
+ eros_logger.debug("Tagged movie %s with '%s'", item_id, custom_tag)
except Exception as e:
- eros_logger.warning(f"Failed to tag movie {item_id} with '{custom_tag}': {e}")
-
+ eros_logger.warning("Failed to tag movie %s with '%s': %s", item_id, custom_tag, e)
+
# Log to history so the upgrade appears in the history UI
log_processed_media("eros", item_info, item_id, instance_name, "upgrade")
- eros_logger.debug(f"Logged quality upgrade to history for item ID {item_id}")
-
+ eros_logger.debug("Logged quality upgrade to history for item ID %s", item_id)
+
items_processed += 1
processing_done = True
-
+
# Increment the upgraded statistics for Eros
increment_stat("eros", "upgraded", 1)
- eros_logger.debug(f"Incremented eros upgraded statistics by 1")
-
+ eros_logger.debug("Incremented eros upgraded statistics by 1")
+
# Log progress
current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1))
- eros_logger.info(f"Processed {items_processed}/{current_limit} items for quality upgrade this cycle.")
+ eros_logger.info("Processed %s/%s items for quality upgrade this cycle.", items_processed, current_limit)
else:
- eros_logger.warning(f"Failed to trigger search command for item ID {item_id}.")
+ eros_logger.warning("Failed to trigger search command for item ID %s.", item_id)
# Do not mark as processed if search couldn't be triggered
continue
-
+
# Log final status
if items_processed > 0:
- eros_logger.info(f"Completed processing {items_processed} items for quality upgrade for this cycle.")
+ eros_logger.info("Completed processing %s items for quality upgrade for this cycle.", items_processed)
else:
eros_logger.info("No new items were processed for quality upgrade in this run.")
-
+
return processing_done
diff --git a/src/primary/apps/eros_routes.py b/src/primary/apps/eros_routes.py
index 7e58b703..f335fa56 100644
--- a/src/primary/apps/eros_routes.py
+++ b/src/primary/apps/eros_routes.py
@@ -1,43 +1,39 @@
#!/usr/bin/env python3
-from flask import Blueprint, request, jsonify
-import datetime, os, requests
-
-from src.primary.state import reset_state_file
-from src.primary.utils.logger import get_logger, APP_LOG_FILES
-from src.primary.settings_manager import load_settings, get_ssl_verify_setting
import traceback
import socket
from urllib.parse import urlparse
-from src.primary.apps.eros import api as eros_api
+
+from flask import Blueprint, request, jsonify
+import requests
+
from src.primary.apps.eros import get_configured_instances
-# Import centralized path configuration
-from src.primary.utils.config_paths import CONFIG_PATH
+from src.primary.utils.logger import get_logger
+from src.primary.settings_manager import load_settings, get_ssl_verify_setting
eros_bp = Blueprint('eros', __name__)
eros_logger = get_logger("eros")
-# State management now handled directly through database calls
def test_connection(url, api_key):
# Validate URL format
if not (url.startswith('http://') or url.startswith('https://')):
- eros_logger.warning(f"API URL missing http(s) scheme: {url}")
+ eros_logger.warning("API URL missing http(s) scheme: %s", url)
url = f"http://{url}"
- eros_logger.debug(f"Auto-correcting URL to: {url}")
-
+ eros_logger.debug("Auto-correcting URL to: %s", url)
+
# Try to establish a socket connection first to check basic connectivity
parsed_url = urlparse(url)
hostname = parsed_url.hostname
port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
-
+
try:
# Try socket connection for quick feedback on connectivity issues
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3) # Short timeout for quick feedback
result = sock.connect_ex((hostname, port))
sock.close()
-
+
if result != 0:
error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
eros_logger.error(error_msg)
@@ -48,42 +44,40 @@ def test_connection(url, api_key):
return {"success": False, "message": error_msg}
except Exception as e:
# Log the socket testing error but continue with the full request
- eros_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
-
+ eros_logger.debug("Socket test error, continuing with full request: %s", str(e))
+
# For Eros, we only use v3 API path
api_url = f"{url.rstrip('/')}/api/v3/system/status"
headers = {'X-Api-Key': api_key}
-
+
# Get SSL verification setting
verify_ssl = get_ssl_verify_setting()
-
+
if not verify_ssl:
eros_logger.debug("SSL verification disabled by user setting for connection test")
-
+
try:
# Make the request with appropriate timeouts
- eros_logger.debug(f"Trying API path: {api_url}")
+ eros_logger.debug("Trying API path: %s", api_url)
response = requests.get(api_url, headers=headers, timeout=(5, 30), verify=verify_ssl)
-
+
try:
response.raise_for_status()
-
+
# Check if we got a valid JSON response
try:
response_data = response.json()
-
+
# Verify this is actually an Eros server by checking for version
version = response_data.get('version')
if not version:
error_msg = "API response doesn't contain version information. This doesn't appear to be a valid Eros server."
eros_logger.error(error_msg)
return {"success": False, "message": error_msg}
-
+
# Version check - should be v3.x for Eros
if version.startswith('3'):
detected_version = "v3"
-
-
# Success!
return {"success": True, "message": "Successfully connected to Eros API", "version": version, "api_version": detected_version}
elif version.startswith('2'):
@@ -96,9 +90,9 @@ def test_connection(url, api_key):
return {"success": False, "message": error_msg}
except ValueError:
error_msg = "Invalid JSON response from Eros API - This doesn't appear to be a valid Eros server"
- eros_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ eros_logger.error("%s. Response content: %s", error_msg, response.text[:200])
return {"success": False, "message": error_msg}
-
+
except requests.exceptions.HTTPError:
# Handle specific HTTP errors
if response.status_code == 401:
@@ -113,35 +107,36 @@ def test_connection(url, api_key):
error_msg = f"Eros server error (HTTP {response.status_code}): The Eros server is experiencing issues"
eros_logger.error(error_msg)
return {"success": False, "message": error_msg}
-
+
except requests.exceptions.ConnectionError as e:
# Connection error - server might be down or unreachable
error_details = str(e)
-
+
if "Connection refused" in error_details:
error_msg = f"Connection refused - Eros is not running on {url} or the port is incorrect"
else:
error_msg = f"Connection error - Check if Eros is running: {error_details}"
-
+
eros_logger.error(error_msg)
return {"success": False, "message": error_msg}
-
+
except requests.exceptions.Timeout:
- error_msg = f"Connection timed out - Eros took too long to respond"
+ error_msg = "Connection timed out - Eros took too long to respond"
eros_logger.error(error_msg)
return {"success": False, "message": error_msg}
-
+
except Exception as e:
error_msg = f"Unexpected error: {str(e)}"
- eros_logger.error(f"{error_msg}\n{traceback.format_exc()}")
+ eros_logger.error("%s\n%s", error_msg, traceback.format_exc())
return {"success": False, "message": error_msg}
+
@eros_bp.route('/status', methods=['GET'])
def get_status():
"""Get the status of all configured Eros instances"""
try:
instances = get_configured_instances()
- eros_logger.debug(f"Eros configured instances: {instances}")
+ eros_logger.debug("Eros configured instances: %s", instances)
if instances:
connected_count = 0
for instance in instances:
@@ -157,64 +152,45 @@ def get_status():
eros_logger.debug("No Eros instances configured")
return jsonify({"configured": False, "connected": False})
except Exception as e:
- eros_logger.error(f"Error getting Eros status: {str(e)}")
+ eros_logger.error("Error getting Eros status: %s", str(e))
return jsonify({"configured": False, "connected": False, "error": str(e)})
+
@eros_bp.route('/test-connection', methods=['POST'])
def test_connection_endpoint():
"""Test connection to an Eros API instance"""
data = request.json
api_url = data.get('api_url')
api_key = data.get('api_key')
- api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
-
+
if not api_url or not api_key:
return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
-
-
# Auto-correct URL if missing http(s) scheme
if not (api_url.startswith('http://') or api_url.startswith('https://')):
- eros_logger.warning(f"API URL missing http(s) scheme: {api_url}")
+ eros_logger.warning("API URL missing http(s) scheme: %s", api_url)
api_url = f"http://{api_url}"
- eros_logger.debug(f"Auto-correcting URL to: {api_url}")
-
+ eros_logger.debug("Auto-correcting URL to: %s", api_url)
+
return test_connection(api_url, api_key)
+
@eros_bp.route('/test-settings', methods=['GET'])
def test_eros_settings():
"""Debug endpoint to test Eros settings loading from database"""
try:
results = {}
-
+
# Load settings from database via settings_manager
try:
- from src.primary.settings_manager import load_settings
settings = load_settings("eros")
results["database_settings"] = settings
results["configured"] = bool(settings.get("url") and settings.get("api_key"))
except Exception as e:
results["database_error"] = str(e)
-
+
results["note"] = "Settings are now stored in database. Legacy JSON files are no longer used."
-
+
return jsonify(results)
except Exception as e:
return jsonify({"error": str(e)})
-
-@eros_bp.route('/reset-processed', methods=['POST'])
-def reset_processed_state():
- """Reset the processed state files for Eros"""
- try:
- # Reset the state files for missing and upgrades
- reset_state_file("eros", "processed_missing")
- reset_state_file("eros", "processed_upgrades")
-
- eros_logger.info("Successfully reset Eros processed state files")
- return jsonify({"success": True, "message": "Successfully reset processed state"})
- except Exception as e:
- error_msg = f"Error resetting Eros state: {str(e)}"
- eros_logger.error(error_msg)
- return jsonify({"success": False, "message": error_msg}), 500
-
-
diff --git a/src/primary/apps/lidarr/missing.py b/src/primary/apps/lidarr/missing.py
index a2a4dc3b..09bbca20 100644
--- a/src/primary/apps/lidarr/missing.py
+++ b/src/primary/apps/lidarr/missing.py
@@ -4,19 +4,16 @@
Handles missing albums or artists based on configuration.
"""
-import time
import random
-import datetime
-import os
-import json
+import time
from typing import Dict, Any, Callable
-from src.primary.utils.logger import get_logger
+
from src.primary.apps.lidarr import api as lidarr_api
from src.primary.stats_manager import increment_stat, check_hourly_cap_exceeded
from src.primary.stateful_manager import is_processed, add_processed_id
from src.primary.utils.history_utils import log_processed_media
-from src.primary.settings_manager import load_settings, get_advanced_setting
-from src.primary.state import check_state_reset
+from src.primary.settings_manager import get_custom_tag, load_settings, get_advanced_setting
+from src.primary.utils.logger import get_logger
# Get the logger for the Lidarr module
lidarr_logger = get_logger(__name__) # Use __name__ for correct logger hierarchy
@@ -36,43 +33,37 @@ def process_missing_albums(
Returns:
bool: True if any items were processed, False otherwise.
"""
-
+
# Copy instance-specific information
instance_name = app_settings.get("instance_name", "Default")
api_url = app_settings.get("api_url", "").strip()
api_key = app_settings.get("api_key", "").strip()
api_timeout = get_advanced_setting("api_timeout", 120) # Use database value
monitored_only = app_settings.get("monitored_only", True)
- skip_future_releases = app_settings.get("skip_future_releases", False)
hunt_missing_items = app_settings.get("hunt_missing_items", 0)
hunt_missing_mode = app_settings.get("hunt_missing_mode", "album")
command_wait_delay = get_advanced_setting("command_wait_delay", 1)
- command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
-
+
# Early exit for disabled features
if not api_url or not api_key:
- lidarr_logger.warning(f"Missing API URL or API key, skipping missing processing for {instance_name}")
+ lidarr_logger.warning("Missing API URL or API key, skipping missing processing for %s", instance_name)
return False
-
+
if hunt_missing_items <= 0:
- lidarr_logger.debug(f"Hunting for missing items is disabled (hunt_missing_items={hunt_missing_items}) for {instance_name}")
+ lidarr_logger.debug("Hunting for missing items is disabled (hunt_missing_items=%s) for %s", hunt_missing_items, instance_name)
return False
-
+
# Make sure any requested stop function is executable
stop_check = stop_check if callable(stop_check) else lambda: False
-
- lidarr_logger.info(f"Looking for missing albums for {instance_name}")
- lidarr_logger.debug(f"Processing up to {hunt_missing_items} missing items in {hunt_missing_mode} mode")
-
- # Reset state files if enough time has passed
- check_state_reset("lidarr")
-
+
+ lidarr_logger.info("Looking for missing albums for %s", instance_name)
+ lidarr_logger.debug("Processing up to %s missing items in %s mode", hunt_missing_items, hunt_missing_mode)
+
# Initialize processed counter and tracking containers
processed_count = 0
- processed_any = False
processed_artists_or_albums = set()
total_items_to_process = hunt_missing_items
-
+
# Load settings to check if tagging is enabled
lidarr_settings = load_settings("lidarr")
tag_processed_items = lidarr_settings.get("tag_processed_items", True)
@@ -85,85 +76,85 @@ def process_missing_albums(
missing_albums_data = lidarr_api.get_missing_albums_random_page(
api_url, api_key, api_timeout, monitored_only, total_items_to_process * 2
)
-
+
if missing_albums_data is None:
lidarr_logger.error("Failed to retrieve missing albums from Lidarr API.")
return False
-
+
if not missing_albums_data:
lidarr_logger.info("No missing albums found.")
return False
-
- lidarr_logger.info(f"Retrieved {len(missing_albums_data)} missing albums from random page selection.")
-
+
+ lidarr_logger.info("Retrieved %s missing albums from random page selection.", len(missing_albums_data))
+
# Convert to the expected format for album processing - keep IDs as integers
unprocessed_entities = []
for album in missing_albums_data:
album_id = album.get("id") # Keep as integer, don't convert to string
if album_id and not is_processed("lidarr", instance_name, str(album_id)): # Convert to string only for processed check
unprocessed_entities.append(album_id)
-
+
search_entity_type = "album"
-
+
elif hunt_missing_mode == "artist":
# For artist mode, we still need to get all missing albums to group by artist
lidarr_logger.info("Retrieving missing albums for artist-based processing...")
missing_albums_data = lidarr_api.get_missing_albums(api_url, api_key, api_timeout, monitored_only)
-
+
if missing_albums_data is None:
lidarr_logger.error("Failed to retrieve missing albums from Lidarr API.")
return False
-
+
if not missing_albums_data:
lidarr_logger.info("No missing albums found.")
return False
-
- lidarr_logger.info(f"Retrieved {len(missing_albums_data)} missing albums.")
+
+ lidarr_logger.info("Retrieved %s missing albums.", len(missing_albums_data))
# Group by artist ID
items_by_artist = {}
for item in missing_albums_data: # Use the potentially filtered missing_items list
artist_id = item.get('artistId')
- lidarr_logger.debug(f"Missing album item: {item.get('title')} by artistId: {artist_id}")
+ lidarr_logger.debug("Missing album item: %s by artistId: %s", item.get('title'), artist_id)
if artist_id:
if artist_id not in items_by_artist:
items_by_artist[artist_id] = []
items_by_artist[artist_id].append(item)
-
+
# In artist mode, map from artists to their albums
# First, get all artist IDs
target_entities = list(items_by_artist.keys())
-
+
# Filter out already processed artists
- lidarr_logger.info(f"Found {len(target_entities)} artists with missing albums before filtering")
- unprocessed_entities = [eid for eid in target_entities
+ lidarr_logger.info("Found %s artists with missing albums before filtering", len(target_entities))
+ unprocessed_entities = [eid for eid in target_entities
if not is_processed("lidarr", instance_name, str(eid))]
-
- lidarr_logger.info(f"Found {len(unprocessed_entities)} unprocessed artists out of {len(target_entities)} total")
+
+ lidarr_logger.info("Found %s unprocessed artists out of %s total", len(unprocessed_entities), len(target_entities))
search_entity_type = "artist"
else:
# Fallback case - this should not normally be reached
- lidarr_logger.error(f"Invalid hunt_missing_mode: {hunt_missing_mode}. Expected 'album' or 'artist'.")
+ lidarr_logger.error("Invalid hunt_missing_mode: %s. Expected 'album' or 'artist'.", hunt_missing_mode)
return False
-
+
if not unprocessed_entities:
- lidarr_logger.info(f"No unprocessed {search_entity_type}s found for {instance_name}. All available {search_entity_type}s have been processed.")
+ lidarr_logger.info("No unprocessed %ss found for %s. All available %ss have been processed.", search_entity_type, instance_name, search_entity_type)
return False
-
+
# Select entities to search
entities_to_search_ids = random.sample(unprocessed_entities, min(len(unprocessed_entities), total_items_to_process))
- lidarr_logger.info(f"Randomly selected {len(entities_to_search_ids)} {search_entity_type}s to search.")
- lidarr_logger.debug(f"Unprocessed entities: {unprocessed_entities}")
- lidarr_logger.debug(f"Entities to search: {entities_to_search_ids}")
+ lidarr_logger.info("Randomly selected %s %ss to search.", len(entities_to_search_ids), search_entity_type)
+ lidarr_logger.debug("Unprocessed entities: %s", unprocessed_entities)
+ lidarr_logger.debug("Entities to search: %s", entities_to_search_ids)
# --- Trigger Search (Artist or Album) ---
if hunt_missing_mode == "artist":
- lidarr_logger.info(f"Artist-based missing mode selected")
- lidarr_logger.info(f"Found {len(entities_to_search_ids)} unprocessed artists to search.")
-
+ lidarr_logger.info("Artist-based missing mode selected")
+ lidarr_logger.info("Found %s unprocessed artists to search.", len(entities_to_search_ids))
+
# Prepare a list for artist details log
artist_details_log = []
-
+
# First, fetch detailed artist info for each artist ID to enhance logs
artist_details = {}
for artist_id in entities_to_search_ids:
@@ -171,13 +162,13 @@ def process_missing_albums(
artist_data = lidarr_api.get_artist_by_id(api_url, api_key, api_timeout, artist_id)
if artist_data:
artist_details[artist_id] = artist_data
-
- lidarr_logger.info(f"Artists selected for processing in this cycle:")
+
+ lidarr_logger.info("Artists selected for processing in this cycle:")
for i, artist_id in enumerate(entities_to_search_ids):
# Get artist name and any additional details
artist_name = f"Artist ID {artist_id}" # Default if name not found
artist_metadata = ""
-
+
if artist_id in artist_details:
artist_data = artist_details[artist_id]
artist_name = artist_data.get('artistName', artist_name)
@@ -192,24 +183,24 @@ def process_missing_albums(
artist_metadata = f"{artist_metadata} - {genres}"
else:
artist_metadata = f"({genres})"
-
+
detail_line = f"{i+1}. {artist_name} {artist_metadata} - ID: {artist_id}"
artist_details_log.append(detail_line)
- lidarr_logger.info(f" {detail_line}")
-
- lidarr_logger.info(f"Triggering Artist Search for {len(entities_to_search_ids)} artists on {instance_name}...")
+ lidarr_logger.info(" %s", detail_line)
+
+ lidarr_logger.info("Triggering Artist Search for %s artists on %s...", len(entities_to_search_ids), instance_name)
for i, artist_id in enumerate(entities_to_search_ids):
if stop_check(): # Use the new stop_check function
lidarr_logger.warning("Shutdown requested during artist search trigger.")
break
-
+
# Check API limit before processing each artist
try:
if check_hourly_cap_exceeded("lidarr"):
- lidarr_logger.warning(f"๐ Lidarr API hourly limit reached - stopping artist processing after {processed_count} artists")
+ lidarr_logger.warning("๐ Lidarr API hourly limit reached - stopping artist processing after %s artists", processed_count)
break
except Exception as e:
- lidarr_logger.error(f"Error checking hourly API cap: {e}")
+ lidarr_logger.error("Error checking hourly API cap: %s", e)
# Continue processing if cap check fails - safer than stopping
# Get artist name from cached details or first album
@@ -222,45 +213,44 @@ def process_missing_albums(
first_album = items_by_artist[artist_id][0]
artist_info = first_album.get('artist')
if artist_info and isinstance(artist_info, dict):
- artist_name = artist_info.get('artistName', artist_name)
-
+ artist_name = artist_info.get('artistName', artist_name)
+
# Mark the artist as processed right away - BEFORE triggering the search
success = add_processed_id("lidarr", instance_name, str(artist_id))
- lidarr_logger.debug(f"Added artist ID {artist_id} to processed list for {instance_name}, success: {success}")
-
+ lidarr_logger.debug("Added artist ID %s to processed list for %s, success: %s", artist_id, instance_name, success)
+
# Trigger the search AFTER marking as processed
command_result = lidarr_api.search_artist(api_url, api_key, api_timeout, artist_id)
command_id = command_result.get('id', 'unknown') if command_result else 'failed'
- lidarr_logger.info(f"Triggered Lidarr ArtistSearch for artist ID: {artist_id}, Command ID: {command_id}")
-
+ lidarr_logger.info("Triggered Lidarr ArtistSearch for artist ID: %s, Command ID: %s", artist_id, command_id)
+
# Increment stats for UI tracking
if command_result:
increment_stat("lidarr", "hunted")
processed_count += 1 # Count successful searches
processed_artists_or_albums.add(artist_id)
-
+
# Tag the artist if enabled
if tag_processed_items:
- from src.primary.settings_manager import get_custom_tag
custom_tag = get_custom_tag("lidarr", "missing", "huntarr-missing")
try:
lidarr_api.tag_processed_artist(api_url, api_key, api_timeout, artist_id, custom_tag)
- lidarr_logger.debug(f"Tagged artist {artist_id} with '{custom_tag}'")
+ lidarr_logger.debug("Tagged artist %s with '%s'", artist_id, custom_tag)
except Exception as e:
- lidarr_logger.warning(f"Failed to tag artist {artist_id} with '{custom_tag}': {e}")
-
+ lidarr_logger.warning("Failed to tag artist %s with '%s': %s", artist_id, custom_tag, e)
+
# Also mark all albums from this artist as processed
if artist_id in items_by_artist:
for album in items_by_artist[artist_id]:
album_id = album.get('id')
if album_id:
album_success = add_processed_id("lidarr", instance_name, str(album_id))
- lidarr_logger.debug(f"Added album ID {album_id} to processed list for {instance_name}, success: {album_success}")
-
+ lidarr_logger.debug("Added album ID %s to processed list for %s, success: %s", album_id, instance_name, album_success)
+
# Log to history system
log_processed_media("lidarr", f"{artist_name}", artist_id, instance_name, "missing")
- lidarr_logger.debug(f"Logged history entry for artist: {artist_name}")
-
+ lidarr_logger.debug("Logged history entry for artist: %s", artist_name)
+
time.sleep(0.1) # Small delay between triggers
else: # Album mode
album_ids_to_search = list(entities_to_search_ids)
@@ -272,20 +262,20 @@ def process_missing_albums(
album_details_log = []
# Create a dict for quick lookup based on album ID
missing_items_dict = {item['id']: item for item in missing_albums_data if 'id' in item}
-
+
# First, fetch additional album details for better logging if needed
album_details = {}
for album_id in album_ids_to_search:
album_details[album_id] = lidarr_api.get_albums(api_url, api_key, api_timeout, album_id)
-
- lidarr_logger.info(f"Albums selected for processing in this cycle:")
+
+ lidarr_logger.info("Albums selected for processing in this cycle:")
for idx, album_id in enumerate(album_ids_to_search):
album_info = missing_items_dict.get(album_id)
if album_info:
# Safely get title and artist name, provide defaults
title = album_info.get('title', f'Album ID {album_id}')
artist_name = album_info.get('artist', {}).get('artistName', 'Unknown Artist')
-
+
# Get additional metadata if available
release_year = ""
if 'releaseDate' in album_info and album_info['releaseDate']:
@@ -294,40 +284,39 @@ def process_missing_albums(
release_year = f"({release_date[:4]})"
except (IndexError, ValueError):
pass
-
+
# Get quality if available
quality_info = ""
if album_details.get(album_id) and 'quality' in album_details[album_id]:
quality = album_details[album_id]['quality'].get('quality', {}).get('name', '')
if quality:
quality_info = f"[{quality}]"
-
+
detail_line = f"{idx+1}. {artist_name} - {title} {release_year} {quality_info} - ID: {album_id}"
album_details_log.append(detail_line)
- lidarr_logger.info(f" {detail_line}")
+ lidarr_logger.info(" %s", detail_line)
else:
# Fallback if album ID wasn't found in the fetched missing items (should be rare)
detail_line = f"{idx+1}. Album ID {album_id} (Details not found)"
album_details_log.append(detail_line)
- lidarr_logger.info(f" {detail_line}")
+ lidarr_logger.info(" %s", detail_line)
# Mark the albums as processed BEFORE triggering the search
for album_id in album_ids_to_search:
success = add_processed_id("lidarr", instance_name, str(album_id))
- lidarr_logger.debug(f"Added album ID {album_id} to processed list for {instance_name}, success: {success}")
-
+ lidarr_logger.debug("Added album ID %s to processed list for %s, success: %s", album_id, instance_name, success)
+
# Now trigger the search
command_id = lidarr_api.search_albums(api_url, api_key, api_timeout, album_ids_to_search)
if command_id:
# Log after successful search
- lidarr_logger.debug(f"Album search command triggered with ID: {command_id} for albums: [{', '.join(album_details_log)}]")
+ lidarr_logger.debug("Album search command triggered with ID: %s for albums: [%s]", command_id, ', '.join(album_details_log))
increment_stat("lidarr", "hunted") # Changed from "missing" to "hunted"
processed_count += len(album_ids_to_search) # Count albums searched
processed_artists_or_albums.update(album_ids_to_search)
-
+
# Tag artists if enabled (from albums)
if tag_processed_items:
- from src.primary.settings_manager import get_custom_tag
custom_tag = get_custom_tag("lidarr", "missing", "huntarr-missing")
tagged_artists = set() # Track which artists we've already tagged
for album_id in album_ids_to_search:
@@ -337,11 +326,11 @@ def process_missing_albums(
if artist_id and artist_id not in tagged_artists:
try:
lidarr_api.tag_processed_artist(api_url, api_key, api_timeout, artist_id, custom_tag)
- lidarr_logger.debug(f"Tagged artist {artist_id} with '{custom_tag}'")
+ lidarr_logger.debug("Tagged artist %s with '%s'", artist_id, custom_tag)
tagged_artists.add(artist_id)
except Exception as e:
- lidarr_logger.warning(f"Failed to tag artist {artist_id} with '{custom_tag}': {e}")
-
+ lidarr_logger.warning("Failed to tag artist %s with '%s': %s", artist_id, custom_tag, e)
+
# Log to history system
for album_id in album_ids_to_search:
album_info = missing_items_dict.get(album_id)
@@ -351,15 +340,15 @@ def process_missing_albums(
artist_name = album_info.get('artist', {}).get('artistName', 'Unknown Artist')
media_name = f"{artist_name} - {title}"
log_processed_media("lidarr", media_name, album_id, instance_name, "missing")
- lidarr_logger.debug(f"Logged history entry for album: {media_name}")
-
+ lidarr_logger.debug("Logged history entry for album: %s", media_name)
+
time.sleep(command_wait_delay) # Basic delay after the single command
else:
- lidarr_logger.warning(f"Failed to trigger album search for IDs {album_ids_to_search} on {instance_name}.")
+ lidarr_logger.warning("Failed to trigger album search for IDs %s on %s.", album_ids_to_search, instance_name)
except Exception as e:
- lidarr_logger.error(f"An error occurred during missing album processing for {instance_name}: {e}", exc_info=True)
+ lidarr_logger.error("An error occurred during missing album processing for %s: %s", instance_name, e, exc_info=True)
return False
- lidarr_logger.info(f"Missing album processing finished for {instance_name}. Processed {processed_count} items/searches ({len(processed_artists_or_albums)} unique {search_entity_type}s).")
- return processed_count > 0
\ No newline at end of file
+ lidarr_logger.info("Missing album processing finished for %s. Processed %s items/searches (%s unique %ss).", instance_name, processed_count, len(processed_artists_or_albums), search_entity_type)
+ return processed_count > 0
diff --git a/src/primary/apps/lidarr/upgrade.py b/src/primary/apps/lidarr/upgrade.py
index a0d4358b..ab68a9dc 100644
--- a/src/primary/apps/lidarr/upgrade.py
+++ b/src/primary/apps/lidarr/upgrade.py
@@ -6,20 +6,20 @@
import time
import random
-from typing import Dict, Any, Optional, Callable, List, Union, Set # Added List, Union and Set
+from typing import Any, Callable
+
from src.primary.utils.logger import get_logger
from src.primary.apps.lidarr import api as lidarr_api
from src.primary.utils.history_utils import log_processed_media
from src.primary.stateful_manager import is_processed, add_processed_id
from src.primary.stats_manager import increment_stat, check_hourly_cap_exceeded
-from src.primary.settings_manager import load_settings, get_advanced_setting
-from src.primary.state import check_state_reset # Add the missing import
+from src.primary.settings_manager import get_custom_tag, load_settings, get_advanced_setting
+
+lidarr_logger = get_logger(__name__)
-# Get logger for the app
-lidarr_logger = get_logger(__name__) # Use __name__ for correct logger hierarchy
def process_cutoff_upgrades(
- app_settings: Dict[str, Any], # Changed signature: Use app_settings
+ app_settings: dict[str, Any], # Changed signature: Use app_settings
stop_check: Callable[[], bool] # Changed signature: Use stop_check
) -> bool:
"""
@@ -38,42 +38,38 @@ def process_cutoff_upgrades(
# --- Extract Settings --- #
# Instance details are now part of app_settings passed from background loop
instance_name = app_settings.get("instance_name", "Lidarr Default")
-
+
# Extract necessary settings
api_url = app_settings.get("api_url", "").strip()
api_key = app_settings.get("api_key", "").strip()
api_timeout = get_advanced_setting("api_timeout", 120) # Use database value
-
+
# Get command wait settings from database
command_wait_delay = get_advanced_setting("command_wait_delay", 1)
- command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
# General Lidarr settings (also from app_settings)
hunt_upgrade_items = app_settings.get("hunt_upgrade_items", 0)
monitored_only = app_settings.get("monitored_only", True)
- lidarr_logger.info(f"Using API timeout of {api_timeout} seconds for Lidarr upgrades")
+ lidarr_logger.info("Using API timeout of %s seconds for Lidarr upgrades", api_timeout)
- lidarr_logger.debug(f"Processing upgrades for instance: {instance_name}")
+ lidarr_logger.debug("Processing upgrades for instance: %s", instance_name)
# lidarr_logger.debug(f"Instance Config (extracted): {{ 'api_url': '{api_url}', 'api_key': '***' }}")
# lidarr_logger.debug(f"General Settings (from app_settings): {app_settings}") # Avoid logging full settings potentially containing sensitive info
# Check if API URL or Key are missing
if not api_url or not api_key:
- lidarr_logger.error(f"Missing API URL or Key for instance '{instance_name}'. Cannot process upgrades.")
+ lidarr_logger.error("Missing API URL or Key for instance '%s'. Cannot process upgrades.", instance_name)
return False
# Check if upgrade hunting is enabled
if hunt_upgrade_items <= 0:
- lidarr_logger.info(f"'hunt_upgrade_items' is {hunt_upgrade_items} or less. Skipping upgrade processing for {instance_name}.")
+ lidarr_logger.info("'hunt_upgrade_items' is %s or less. Skipping upgrade processing for %s.", hunt_upgrade_items, instance_name)
return False
- lidarr_logger.info(f"Looking for quality upgrades for {instance_name}")
- lidarr_logger.debug(f"Processing up to {hunt_upgrade_items} items for quality upgrade")
-
- # Reset state files if enough time has passed
- check_state_reset("lidarr")
-
+ lidarr_logger.info("Looking for quality upgrades for %s", instance_name)
+ lidarr_logger.debug("Processing up to %s items for quality upgrade", hunt_upgrade_items)
+
processed_count = 0
processed_any = False
@@ -82,21 +78,21 @@ def process_cutoff_upgrades(
tag_processed_items = lidarr_settings.get("tag_processed_items", True)
try:
- lidarr_logger.info(f"Retrieving cutoff unmet albums...")
+ lidarr_logger.info("Retrieving cutoff unmet albums...")
# Use efficient random page selection instead of fetching all albums
cutoff_unmet_data = lidarr_api.get_cutoff_unmet_albums_random_page(
api_url, api_key, api_timeout, monitored_only, hunt_upgrade_items * 2
)
-
+
if cutoff_unmet_data is None: # API call failed
lidarr_logger.error("Failed to retrieve cutoff unmet albums from Lidarr API.")
return False
-
+
if not cutoff_unmet_data:
lidarr_logger.info("No cutoff unmet albums found.")
return False
-
- lidarr_logger.info(f"Retrieved {len(cutoff_unmet_data)} cutoff unmet albums from random page selection.")
+
+ lidarr_logger.info("Retrieved %s cutoff unmet albums from random page selection.", len(cutoff_unmet_data))
# Filter out already processed items
unprocessed_albums = []
@@ -105,17 +101,17 @@ def process_cutoff_upgrades(
if album_id and not is_processed("lidarr", instance_name, str(album_id)): # Convert to string only for processed check
unprocessed_albums.append(album)
else:
- lidarr_logger.debug(f"Skipping already processed album ID: {album_id}")
-
- lidarr_logger.info(f"Found {len(unprocessed_albums)} unprocessed albums out of {len(cutoff_unmet_data)} total albums eligible for quality upgrade.")
-
+ lidarr_logger.debug("Skipping already processed album ID: %s", album_id)
+
+ lidarr_logger.info("Found %s unprocessed albums out of %s total albums eligible for quality upgrade.", len(unprocessed_albums), len(cutoff_unmet_data))
+
if not unprocessed_albums:
lidarr_logger.info("No unprocessed albums found for quality upgrade. Skipping cycle.")
return False
# Always select albums randomly
albums_to_search = random.sample(unprocessed_albums, min(len(unprocessed_albums), hunt_upgrade_items))
- lidarr_logger.info(f"Randomly selected {len(albums_to_search)} albums for upgrade search.")
+ lidarr_logger.info("Randomly selected %s albums for upgrade search.", len(albums_to_search))
album_ids_to_search = [album['id'] for album in albums_to_search]
@@ -134,30 +130,30 @@ def process_cutoff_upgrades(
# Log each album on a separate line for better readability
if album_details_log:
- lidarr_logger.info(f"Albums selected for quality upgrade in this cycle:")
+ lidarr_logger.info("Albums selected for quality upgrade in this cycle:")
for album_detail in album_details_log:
- lidarr_logger.info(f" {album_detail}")
+ lidarr_logger.info(" %s", album_detail)
# Check stop event before triggering search
if stop_check(): # Use the new stop_check function
lidarr_logger.warning("Shutdown requested before album upgrade search trigger.")
return False
-
+
# Check API limit before processing albums
try:
if check_hourly_cap_exceeded("lidarr"):
- lidarr_logger.warning(f"๐ Lidarr API hourly limit reached - stopping upgrade processing")
+ lidarr_logger.warning("๐ Lidarr API hourly limit reached - stopping upgrade processing")
return False
except Exception as e:
- lidarr_logger.error(f"Error checking hourly API cap: {e}")
+ lidarr_logger.error("Error checking hourly API cap: %s", e)
# Continue processing if cap check fails - safer than stopping
# Mark the albums as processed BEFORE triggering the search
for album_id in album_ids_to_search:
success = add_processed_id("lidarr", instance_name, str(album_id))
- lidarr_logger.debug(f"Added album ID {album_id} to processed list for {instance_name}, success: {success}")
+ lidarr_logger.debug("Added album ID %s to processed list for %s, success: %s", album_id, instance_name, success)
- lidarr_logger.info(f"Triggering Album Search for {len(album_ids_to_search)} albums for upgrade on instance {instance_name}: {album_ids_to_search}")
+ lidarr_logger.info("Triggering Album Search for %s albums for upgrade on instance %s: %s", len(album_ids_to_search), instance_name, album_ids_to_search)
# Pass necessary details extracted above to the API function
command_id = lidarr_api.search_albums(
api_url,
@@ -166,12 +162,11 @@ def process_cutoff_upgrades(
album_ids_to_search
)
if command_id:
- lidarr_logger.debug(f"Upgrade album search command triggered with ID: {command_id} for albums: {album_ids_to_search}")
+ lidarr_logger.debug("Upgrade album search command triggered with ID: %s for albums: %s", command_id, album_ids_to_search)
increment_stat("lidarr", "upgraded") # Use appropriate stat key
-
+
# Tag artists if enabled (from albums)
if tag_processed_items:
- from src.primary.settings_manager import get_custom_tag
custom_tag = get_custom_tag("lidarr", "upgrade", "huntarr-upgraded")
tagged_artists = set() # Track which artists we've already tagged
for album in albums_to_search:
@@ -179,11 +174,11 @@ def process_cutoff_upgrades(
if artist_id and artist_id not in tagged_artists:
try:
lidarr_api.tag_processed_artist(api_url, api_key, api_timeout, artist_id, custom_tag)
- lidarr_logger.debug(f"Tagged artist {artist_id} with '{custom_tag}'")
+ lidarr_logger.debug("Tagged artist %s with '%s'", artist_id, custom_tag)
tagged_artists.add(artist_id)
except Exception as e:
- lidarr_logger.warning(f"Failed to tag artist {artist_id} with '{custom_tag}': {e}")
-
+ lidarr_logger.warning("Failed to tag artist %s with '%s': %s", artist_id, custom_tag, e)
+
# Log to history
for album_id in album_ids_to_search:
# Find the album info for this ID to log to history
@@ -193,20 +188,20 @@ def process_cutoff_upgrades(
artist_name = album.get('artist', {}).get('artistName', 'Unknown Artist')
media_name = f"{artist_name} - {album_title}"
log_processed_media("lidarr", media_name, album_id, instance_name, "upgrade")
- lidarr_logger.debug(f"Logged quality upgrade to history for album ID {album_id}")
+ lidarr_logger.debug("Logged quality upgrade to history for album ID %s", album_id)
break
-
+
time.sleep(command_wait_delay) # Basic delay
processed_count += len(album_ids_to_search)
processed_any = True # Mark that we processed something
# Consider adding wait_for_command logic if needed
# wait_for_command(api_url, api_key, command_id, command_wait_delay, command_wait_attempts)
else:
- lidarr_logger.warning(f"Failed to trigger upgrade album search for IDs {album_ids_to_search} on {instance_name}.")
+ lidarr_logger.warning("Failed to trigger upgrade album search for IDs %s on %s.", album_ids_to_search, instance_name)
except Exception as e:
- lidarr_logger.error(f"An error occurred during upgrade album processing for {instance_name}: {e}", exc_info=True)
+ lidarr_logger.error("An error occurred during upgrade album processing for %s: %s", instance_name, e, exc_info=True)
return False # Indicate failure
- lidarr_logger.info(f"Upgrade album processing finished for {instance_name}. Triggered searches for {processed_count} items.")
- return processed_any # Return True if anything was processed
\ No newline at end of file
+ lidarr_logger.info("Upgrade album processing finished for %s. Triggered searches for %s items.", instance_name, processed_count)
+ return processed_any # Return True if anything was processed
diff --git a/src/primary/apps/lidarr_routes.py b/src/primary/apps/lidarr_routes.py
index e22899cb..ca10cf9a 100644
--- a/src/primary/apps/lidarr_routes.py
+++ b/src/primary/apps/lidarr_routes.py
@@ -1,19 +1,17 @@
#!/usr/bin/env python3
+import socket
+from urllib.parse import urlparse
+
+import requests
from flask import Blueprint, request, jsonify
-import datetime, os, requests
-from src.primary.state import reset_state_file
from src.primary.utils.logger import get_logger
from src.primary.settings_manager import get_ssl_verify_setting
-import traceback
-import socket
-from urllib.parse import urlparse
lidarr_bp = Blueprint('lidarr', __name__)
lidarr_logger = get_logger("lidarr")
-# State management now handled directly through database calls
@lidarr_bp.route('/test-connection', methods=['POST'])
def test_connection():
@@ -22,28 +20,28 @@ def test_connection():
api_url = data.get('api_url')
api_key = data.get('api_key')
api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
-
+
if not api_url or not api_key:
return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
-
+
# Auto-correct URL if missing http(s) scheme
if not (api_url.startswith('http://') or api_url.startswith('https://')):
lidarr_logger.warning(f"API URL missing http(s) scheme: {api_url}")
api_url = f"http://{api_url}"
lidarr_logger.debug(f"Auto-correcting URL to: {api_url}")
-
+
# Try to establish a socket connection first to check basic connectivity
parsed_url = urlparse(api_url)
hostname = parsed_url.hostname
port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
-
+
try:
# Try socket connection for quick feedback on connectivity issues
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3) # Short timeout for quick feedback
result = sock.connect_ex((hostname, port))
sock.close()
-
+
if result != 0:
error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
lidarr_logger.error(error_msg)
@@ -55,23 +53,23 @@ def test_connection():
except Exception as e:
# Log the socket testing error but continue with the full request
lidarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
-
+
# For Lidarr, use api/v1
url = f"{api_url.rstrip('/')}/api/v1/system/status"
headers = {
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
-
+
# Get SSL verification setting
verify_ssl = get_ssl_verify_setting()
-
+
if not verify_ssl:
lidarr_logger.debug("SSL verification disabled by user setting for connection test")
-
+
try:
response = requests.get(url, headers=headers, timeout=(10, api_timeout), verify=verify_ssl)
-
+
# For HTTP errors, provide more specific feedback
if response.status_code == 401:
error_msg = "Authentication failed: Invalid API key"
@@ -89,14 +87,14 @@ def test_connection():
error_msg = f"Lidarr server error (HTTP {response.status_code}): The Lidarr server is experiencing issues"
lidarr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), response.status_code
-
+
# Raise for other HTTP errors
response.raise_for_status()
-
+
try:
response_data = response.json()
version = response_data.get('version', 'unknown')
-
+
return jsonify({
"success": True,
"message": "Successfully connected to Lidarr API",
@@ -106,7 +104,7 @@ def test_connection():
error_msg = "Invalid JSON response from Lidarr API - This doesn't appear to be a valid Lidarr server"
lidarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
return jsonify({"success": False, "message": error_msg}), 500
-
+
except requests.exceptions.ConnectionError as e:
# Handle different types of connection errors
error_details = str(e)
@@ -116,7 +114,7 @@ def test_connection():
error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL."
else:
error_msg = f"Connection error - Check if Lidarr is running: {error_details}"
-
+
lidarr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), 404
except requests.exceptions.Timeout:
diff --git a/src/primary/apps/radarr/__init__.py b/src/primary/apps/radarr/__init__.py
index c89d41d7..8b5c38cb 100644
--- a/src/primary/apps/radarr/__init__.py
+++ b/src/primary/apps/radarr/__init__.py
@@ -6,22 +6,21 @@
# Module exports
from src.primary.apps.radarr.missing import process_missing_movies
from src.primary.apps.radarr.upgrade import process_cutoff_upgrades
-
-# Add necessary imports for get_configured_instances
from src.primary.settings_manager import load_settings
from src.primary.utils.logger import get_logger
-radarr_logger = get_logger("radarr") # Get the logger instance
+radarr_logger = get_logger("radarr")
+
def get_configured_instances():
"""Get all configured and enabled Radarr instances"""
settings = load_settings("radarr")
instances = []
-
+
if not settings:
radarr_logger.debug("No settings found for Radarr")
return instances
-
+
# Check if instances are configured
if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
for instance in settings["instances"]:
@@ -41,30 +40,30 @@ def get_configured_instances():
# Remove instances list to avoid confusion
if "instances" in instance_settings:
del instance_settings["instances"]
-
+
# Override with instance-specific connection settings (using corrected URL)
instance_settings["api_url"] = api_url
instance_settings["api_key"] = api_key
instance_settings["instance_name"] = instance.get("name", "Default")
instance_settings["swaparr_enabled"] = instance.get("swaparr_enabled", False)
-
+
# Add per-instance hunt values for missing/upgrade processing
instance_settings["hunt_missing_movies"] = instance.get("hunt_missing_movies", 1)
instance_settings["hunt_upgrade_movies"] = instance.get("hunt_upgrade_movies", 0)
instance_settings["release_date_delay_days"] = instance.get("release_date_delay_days", 0)
-
+
instances.append(instance_settings)
else:
# Fallback to legacy single-instance config
api_url = settings.get("api_url", "").strip()
api_key = settings.get("api_key", "").strip()
-
+
# Ensure URL has proper scheme for legacy config too
if api_url and not (api_url.startswith('http://') or api_url.startswith('https://')):
radarr_logger.warning(f"API URL missing http(s) scheme: {api_url}")
api_url = f"http://{api_url}"
radarr_logger.warning(f"Auto-correcting URL to: {api_url}")
-
+
if api_url and api_key:
settings_copy = settings.copy()
settings_copy["api_url"] = api_url # Use corrected URL
@@ -75,8 +74,8 @@ def get_configured_instances():
settings_copy["hunt_upgrade_movies"] = settings.get("hunt_upgrade_movies", 0)
settings_copy["release_date_delay_days"] = settings.get("release_date_delay_days", 0)
instances.append(settings_copy)
-
+
# Use debug level to avoid spamming logs, especially with 0 instances
return instances
-__all__ = ["process_missing_movies", "process_cutoff_upgrades", "get_configured_instances"]
\ No newline at end of file
+__all__ = ["process_missing_movies", "process_cutoff_upgrades", "get_configured_instances"]
diff --git a/src/primary/apps/radarr_routes.py b/src/primary/apps/radarr_routes.py
index 8573b661..92b770b3 100644
--- a/src/primary/apps/radarr_routes.py
+++ b/src/primary/apps/radarr_routes.py
@@ -1,19 +1,17 @@
#!/usr/bin/env python3
+import socket
+from urllib.parse import urlparse
+
+import requests
from flask import Blueprint, request, jsonify
-import datetime, os, requests
-from src.primary.state import reset_state_file
from src.primary.utils.logger import get_logger
from src.primary.settings_manager import get_ssl_verify_setting
-import traceback
-import socket
-from urllib.parse import urlparse
radarr_bp = Blueprint('radarr', __name__)
radarr_logger = get_logger("radarr")
-# State management now handled directly through database calls
@radarr_bp.route('/test-connection', methods=['POST'])
def test_connection():
@@ -22,28 +20,28 @@ def test_connection():
api_url = data.get('api_url')
api_key = data.get('api_key')
api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
-
+
if not api_url or not api_key:
return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
-
+
# Auto-correct URL if missing http(s) scheme
if not (api_url.startswith('http://') or api_url.startswith('https://')):
- radarr_logger.warning(f"API URL missing http(s) scheme: {api_url}")
+ radarr_logger.warning("API URL missing http(s) scheme: %s", api_url)
api_url = f"http://{api_url}"
- radarr_logger.debug(f"Auto-correcting URL to: {api_url}")
-
+ radarr_logger.debug("Auto-correcting URL to: %s", api_url)
+
# Try to establish a socket connection first to check basic connectivity
parsed_url = urlparse(api_url)
hostname = parsed_url.hostname
port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
-
+
try:
# Try socket connection for quick feedback on connectivity issues
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3) # Short timeout for quick feedback
result = sock.connect_ex((hostname, port))
sock.close()
-
+
if result != 0:
error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
radarr_logger.error(error_msg)
@@ -54,24 +52,24 @@ def test_connection():
return jsonify({"success": False, "message": error_msg}), 404
except Exception as e:
# Log the socket testing error but continue with the full request
- radarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
-
+ radarr_logger.debug("Socket test error, continuing with full request: %s", str(e))
+
# For Radarr, use api/v3
url = f"{api_url.rstrip('/')}/api/v3/system/status"
headers = {
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
-
+
# Get SSL verification setting
verify_ssl = get_ssl_verify_setting()
-
+
if not verify_ssl:
radarr_logger.debug("SSL verification disabled by user setting for connection test")
-
+
try:
response = requests.get(url, headers=headers, timeout=(10, api_timeout), verify=verify_ssl)
-
+
# For HTTP errors, provide more specific feedback
if response.status_code == 401:
error_msg = "Authentication failed: Invalid API key"
@@ -89,14 +87,14 @@ def test_connection():
error_msg = f"Radarr server error (HTTP {response.status_code}): The Radarr server is experiencing issues"
radarr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), response.status_code
-
+
# Raise for other HTTP errors
response.raise_for_status()
-
+
try:
response_data = response.json()
version = response_data.get('version', 'unknown')
-
+
return jsonify({
"success": True,
"message": "Successfully connected to Radarr API",
@@ -104,9 +102,9 @@ def test_connection():
})
except ValueError:
error_msg = "Invalid JSON response from Radarr API - This doesn't appear to be a valid Radarr server"
- radarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ radarr_logger.error("%s. Response content: %s", error_msg, response.text[:200])
return jsonify({"success": False, "message": error_msg}), 500
-
+
except requests.exceptions.ConnectionError as e:
# Handle different types of connection errors
error_details = str(e)
@@ -116,7 +114,7 @@ def test_connection():
error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL."
else:
error_msg = f"Connection error - Check if Radarr is running: {error_details}"
-
+
radarr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), 404
except requests.exceptions.Timeout:
@@ -127,5 +125,3 @@ def test_connection():
error_msg = f"Connection test failed: {str(e)}"
radarr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), 500
-
-
diff --git a/src/primary/apps/readarr/missing.py b/src/primary/apps/readarr/missing.py
index 6e209009..37030595 100644
--- a/src/primary/apps/readarr/missing.py
+++ b/src/primary/apps/readarr/missing.py
@@ -4,64 +4,50 @@
Handles searching for missing books in Readarr
"""
-import time
import random
-from typing import List, Dict, Any, Set, Callable
+from typing import Any, Callable
+
from src.primary.utils.logger import get_logger
from src.primary.apps.readarr import api as readarr_api
from src.primary.stats_manager import increment_stat, check_hourly_cap_exceeded
from src.primary.stateful_manager import is_processed, add_processed_id
from src.primary.utils.history_utils import log_processed_media
-from src.primary.settings_manager import load_settings, get_advanced_setting
-from src.primary.state import check_state_reset
+from src.primary.settings_manager import get_custom_tag, load_settings, get_advanced_setting
-# Get logger for the app
readarr_logger = get_logger("readarr")
+
def process_missing_books(
- app_settings: Dict[str, Any],
+ app_settings: dict[str, Any],
stop_check: Callable[[], bool] # Function to check if stop is requested
) -> bool:
"""
Process missing books in Readarr based on provided settings.
-
+
Args:
app_settings: Dictionary containing all settings for Readarr
stop_check: A function that returns True if the process should stop
-
+
Returns:
True if any books were processed, False otherwise.
"""
readarr_logger.info("Starting missing books processing cycle for Readarr.")
processed_any = False
-
- # Reset state files if enough time has passed
- check_state_reset("readarr")
-
+
# Load settings to check if tagging is enabled
readarr_settings = load_settings("readarr")
tag_processed_items = readarr_settings.get("tag_processed_items", True)
-
- # Get the settings for the instance
- general_settings = readarr_api.load_settings('general')
-
+
# Extract necessary settings
api_url = app_settings.get("api_url", "").strip()
api_key = app_settings.get("api_key", "").strip()
api_timeout = get_advanced_setting("api_timeout", 120) # Use database value
instance_name = app_settings.get("instance_name", "Readarr Default")
-
- readarr_logger.info(f"Using API timeout of {api_timeout} seconds for Readarr")
-
+
+ readarr_logger.info("Using API timeout of %s seconds for Readarr", api_timeout)
+
monitored_only = app_settings.get("monitored_only", True)
- skip_future_releases = app_settings.get("skip_future_releases", True)
hunt_missing_books = app_settings.get("hunt_missing_books", 0)
-
- # Use advanced settings from database for command operations
- command_wait_delay = get_advanced_setting("command_wait_delay", 1)
- command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
-
- # Configuration logging removed to reduce log spam
if not api_url or not api_key:
readarr_logger.error("API URL or Key not configured in settings. Cannot process missing books.")
@@ -78,20 +64,20 @@ def process_missing_books(
return False
# Get missing books
- readarr_logger.info(f"Retrieving books with missing files...")
+ readarr_logger.info("Retrieving books with missing files...")
# Use efficient random page selection instead of fetching all books
missing_books_data = readarr_api.get_wanted_missing_books_random_page(
api_url, api_key, api_timeout, monitored_only, hunt_missing_books * 2
)
-
+
if missing_books_data is None or not missing_books_data: # API call failed or no books
if missing_books_data is None:
readarr_logger.error("Failed to retrieve missing books from Readarr API.")
else:
readarr_logger.info("No missing books found.")
return False
-
- readarr_logger.info(f"Retrieved {len(missing_books_data)} missing books from random page selection.")
+
+ readarr_logger.info("Retrieved %s missing books from random page selection.", len(missing_books_data))
# Check for stop signal after retrieving books
if stop_check():
@@ -105,28 +91,28 @@ def process_missing_books(
if not is_processed("readarr", instance_name, book_id):
unprocessed_books.append(book)
else:
- readarr_logger.debug(f"Skipping already processed book ID: {book_id}")
+ readarr_logger.debug("Skipping already processed book ID: %s", book_id)
+
+ readarr_logger.info("Found %s unprocessed missing books out of %s total.", len(unprocessed_books), len(missing_books_data))
- readarr_logger.info(f"Found {len(unprocessed_books)} unprocessed missing books out of {len(missing_books_data)} total.")
-
if not unprocessed_books:
readarr_logger.info("No unprocessed missing books found. All available books have been processed.")
return False
# Select individual books to process (fixed: was selecting authors, now selects books)
- readarr_logger.info(f"Randomly selecting up to {hunt_missing_books} individual books to search.")
+ readarr_logger.info("Randomly selecting up to %s individual books to search.", hunt_missing_books)
books_to_process = random.sample(unprocessed_books, min(hunt_missing_books, len(unprocessed_books)))
- readarr_logger.info(f"Selected {len(books_to_process)} individual books to search for missing items.")
-
+ readarr_logger.info("Selected %s individual books to search for missing items.", len(books_to_process))
+
# Add detailed logging for selected books
if books_to_process:
- readarr_logger.info(f"Books selected for processing in this cycle:")
+ readarr_logger.info("Books selected for processing in this cycle:")
for idx, book in enumerate(books_to_process):
book_id = book.get("id")
book_title = book.get("title", "Unknown Title")
author_id = book.get("authorId", "Unknown")
- readarr_logger.info(f" {idx+1}. '{book_title}' (ID: {book_id}, Author ID: {author_id})")
+ readarr_logger.info(" %s. '%s' (ID: %s, Author ID: %s)", idx+1, book_title, book_id, author_id)
processed_count = 0
processed_books = [] # Track book titles processed
@@ -136,77 +122,76 @@ def process_missing_books(
if stop_check():
readarr_logger.info("Stop signal received, aborting Readarr missing cycle.")
break
-
+
# Check API limit before processing each book
try:
if check_hourly_cap_exceeded("readarr"):
- readarr_logger.warning(f"๐ Readarr API hourly limit reached - stopping missing books processing after {processed_count} books")
+ readarr_logger.warning("๐ Readarr API hourly limit reached - stopping missing books processing after %s books", processed_count)
break
except Exception as e:
- readarr_logger.error(f"Error checking hourly API cap: {e}")
+ readarr_logger.error("Error checking hourly API cap: %s", e)
# Continue processing if cap check fails - safer than stopping
book_id = book.get("id")
book_title = book.get("title", f"Unknown Book ID {book_id}")
author_id = book.get("authorId")
-
+
# Get author name for logging
author_info = readarr_api.get_author_details(api_url, api_key, author_id, api_timeout) if author_id else None
author_name = author_info.get("authorName", f"Author ID {author_id}") if author_info else "Unknown Author"
- readarr_logger.info(f"Processing missing book: '{book_title}' by {author_name} (Book ID: {book_id})")
+ readarr_logger.info("Processing missing book: '%s' by %s (Book ID: %s)", book_title, author_name, book_id)
# Search for this individual book (fixed: was searching all books by author)
- readarr_logger.info(f" - Searching for individual book: '{book_title}'...")
-
+ readarr_logger.info(" - Searching for individual book: '%s'...", book_title)
+
# Mark book as processed BEFORE triggering search to prevent duplicates
add_processed_id("readarr", instance_name, str(book_id))
- readarr_logger.debug(f"Added book ID {book_id} to processed list for {instance_name}")
-
+ readarr_logger.debug("Added book ID %s to processed list for %s", book_id, instance_name)
+
# Search for the specific book (using book search instead of author search)
search_command_result = readarr_api.search_books(api_url, api_key, [book_id], api_timeout)
if search_command_result:
# Extract command ID if the result is a dictionary, otherwise use the result directly
command_id = search_command_result.get('id') if isinstance(search_command_result, dict) else search_command_result
- readarr_logger.info(f"Triggered book search command {command_id} for '{book_title}' by {author_name}.")
+ readarr_logger.info("Triggered book search command %s for '%s' by %s.", command_id, book_title, author_name)
increment_stat("readarr", "hunted")
-
+
# Tag the book's author if enabled (keep author tagging as it's still useful)
if tag_processed_items and author_id:
- from src.primary.settings_manager import get_custom_tag
custom_tag = get_custom_tag("readarr", "missing", "huntarr-missing")
try:
readarr_api.tag_processed_author(api_url, api_key, api_timeout, author_id, custom_tag)
- readarr_logger.debug(f"Tagged author {author_id} with '{custom_tag}'")
+ readarr_logger.debug("Tagged author %s with '%s'", author_id, custom_tag)
except Exception as e:
- readarr_logger.warning(f"Failed to tag author {author_id} with '{custom_tag}': {e}")
-
+ readarr_logger.warning("Failed to tag author %s with '%s': %s", author_id, custom_tag, e)
+
# Log history entry for this specific book
media_name = f"{author_name} - {book_title}"
log_processed_media("readarr", media_name, book_id, instance_name, "missing")
- readarr_logger.debug(f"Logged missing book history entry: {media_name} (ID: {book_id})")
-
+ readarr_logger.debug("Logged missing book history entry: %s (ID: %s)", media_name, book_id)
+
processed_count += 1
processed_books.append(f"'{book_title}' by {author_name}")
processed_any = True
- readarr_logger.info(f"Processed {processed_count}/{len(books_to_process)} books for missing search this cycle.")
+ readarr_logger.info("Processed %s/%s books for missing search this cycle.", processed_count, len(books_to_process))
else:
- readarr_logger.error(f"Failed to trigger search for book '{book_title}' by {author_name}.")
+ readarr_logger.error("Failed to trigger search for book '%s' by %s.", book_title, author_name)
if processed_count >= hunt_missing_books:
- readarr_logger.info(f"Reached target of {hunt_missing_books} books processed for this cycle.")
+ readarr_logger.info("Reached target of %s books processed for this cycle.", hunt_missing_books)
break
if processed_books:
# Log first few books, then summarize if there are many
if len(processed_books) <= 3:
books_list = ', '.join(processed_books)
- readarr_logger.info(f'Completed processing {processed_count} books for missing search this cycle: {books_list}')
+ readarr_logger.info('Completed processing %s books for missing search this cycle: %s', processed_count, books_list)
else:
first_books = ', '.join(processed_books[:3])
- readarr_logger.info(f'Completed processing {processed_count} books for missing search this cycle: {first_books} and {len(processed_books)-3} others')
+ readarr_logger.info('Completed processing %s books for missing search this cycle: %s and %s others', processed_count, first_books, len(processed_books)-3)
else:
- readarr_logger.info(f"Completed processing {processed_count} books for missing search this cycle.")
+ readarr_logger.info("Completed processing %s books for missing search this cycle.", processed_count)
- return processed_any
\ No newline at end of file
+ return processed_any
diff --git a/src/primary/apps/readarr/upgrade.py b/src/primary/apps/readarr/upgrade.py
index a44b0650..7400ac4c 100644
--- a/src/primary/apps/readarr/upgrade.py
+++ b/src/primary/apps/readarr/upgrade.py
@@ -4,79 +4,70 @@
Handles searching for books that need quality upgrades in Readarr
"""
-import time
import random
import datetime
-from typing import List, Dict, Any, Set, Callable, Union, Optional
+from typing import Any, Callable
+
from src.primary.utils.logger import get_logger
from src.primary.apps.readarr import api as readarr_api
+from src.primary.settings_manager import get_custom_tag, load_settings
from src.primary.stats_manager import increment_stat, check_hourly_cap_exceeded
from src.primary.stateful_manager import is_processed, add_processed_id
from src.primary.utils.history_utils import log_processed_media
-from src.primary.state import check_state_reset
-from src.primary.settings_manager import load_settings # Import load_settings function
-# Get logger for the app
readarr_logger = get_logger("readarr")
+
def process_cutoff_upgrades(
- app_settings: Dict[str, Any],
+ app_settings: dict[str, Any],
stop_check: Callable[[], bool] # Function to check if stop is requested
) -> bool:
"""
Process quality cutoff upgrades for Readarr based on settings.
-
+
Args:
app_settings: Dictionary containing all settings for Readarr
stop_check: A function that returns True if the process should stop
-
+
Returns:
True if any books were processed for upgrades, False otherwise.
"""
readarr_logger.info("Starting quality cutoff upgrades processing cycle for Readarr.")
-
- # Reset state files if enough time has passed
- check_state_reset("readarr")
-
- processed_any = False
-
+
# Load general settings to get centralized timeout
general_settings = load_settings('general')
-
+
# Load settings to check if tagging is enabled
readarr_settings = load_settings("readarr")
tag_processed_items = readarr_settings.get("tag_processed_items", True)
-
+
# Get the API credentials for this instance
api_url = app_settings.get('api_url', '')
api_key = app_settings.get('api_key', '')
-
+
# Use the centralized timeout from general settings with app-specific as fallback
api_timeout = general_settings.get("api_timeout", app_settings.get("api_timeout", 90)) # Use centralized timeout
-
- readarr_logger.info(f"Using API timeout of {api_timeout} seconds for Readarr")
-
+
+ readarr_logger.info("Using API timeout of %s seconds for Readarr", api_timeout)
+
# Extract necessary settings
instance_name = app_settings.get("instance_name", "Readarr Default")
- monitored_only = app_settings.get("monitored_only", True)
# skip_author_refresh setting removed as it was a performance bottleneck
hunt_upgrade_books = app_settings.get("hunt_upgrade_books", 0)
- command_wait_delay = app_settings.get("command_wait_delay", 5)
- command_wait_attempts = app_settings.get("command_wait_attempts", 12)
-
+
# Get books eligible for upgrade
readarr_logger.info("Retrieving books eligible for quality upgrade...")
# Pass API credentials explicitly
upgrade_eligible_data = readarr_api.get_cutoff_unmet_books(api_url=api_url, api_key=api_key, api_timeout=api_timeout)
-
+
if upgrade_eligible_data is None: # Check if the API call failed (assuming it returns None on error)
readarr_logger.error("Error retrieving books eligible for upgrade from Readarr API.")
return False
elif not upgrade_eligible_data: # Check if the list is empty
readarr_logger.info("No books found eligible for upgrade.")
return False
-
- readarr_logger.info(f"Found {len(upgrade_eligible_data)} books eligible for quality upgrade.")
+
+ readarr_logger.info("Found %s books eligible for quality upgrade.", len(upgrade_eligible_data))
# Filter out future releases if configured
skip_future_releases = app_settings.get("skip_future_releases", True)
@@ -99,13 +90,13 @@ def process_cutoff_upgrades(
release_date = datetime.datetime.strptime(release_date_str, '%Y-%m-%d')
# Add UTC timezone for consistent comparison
release_date = release_date.replace(tzinfo=datetime.timezone.utc)
-
+
if release_date <= now:
filtered_books.append(book)
else:
- readarr_logger.debug(f"Skipping future book ID {book.get('id')} with release date {release_date_str}")
+ readarr_logger.debug("Skipping future book ID %s with release date %s", book.get('id'), release_date_str)
except ValueError:
- readarr_logger.warning(f"Could not parse release date '{release_date_str}' for book ID {book.get('id')}. Including anyway.")
+ readarr_logger.warning("Could not parse release date '%s' for book ID %s. Including anyway.", release_date_str, book.get('id'))
filtered_books.append(book)
else:
filtered_books.append(book) # Include books without a release date
@@ -113,12 +104,12 @@ def process_cutoff_upgrades(
upgrade_eligible_data = filtered_books
skipped_count = original_count - len(upgrade_eligible_data)
if skipped_count > 0:
- readarr_logger.info(f"Skipped {skipped_count} future books based on release date for upgrades.")
+ readarr_logger.info("Skipped %s future books based on release date for upgrades.", skipped_count)
if not upgrade_eligible_data:
readarr_logger.info("No upgradeable books found to process (after potential filtering). Skipping.")
return False
-
+
# Filter out already processed books using stateful management
unprocessed_books = []
for book in upgrade_eligible_data:
@@ -126,19 +117,19 @@ def process_cutoff_upgrades(
if not is_processed("readarr", instance_name, book_id):
unprocessed_books.append(book)
else:
- readarr_logger.debug(f"Skipping already processed book ID: {book_id}")
-
- readarr_logger.info(f"Found {len(unprocessed_books)} unprocessed books out of {len(upgrade_eligible_data)} total books eligible for upgrade.")
-
+ readarr_logger.debug("Skipping already processed book ID: %s", book_id)
+
+ readarr_logger.info("Found %s unprocessed books out of %s total books eligible for upgrade.", len(unprocessed_books), len(upgrade_eligible_data))
+
if not unprocessed_books:
- readarr_logger.info(f"No unprocessed books found for {instance_name}. All available books have been processed.")
+ readarr_logger.info("No unprocessed books found for %s. All available books have been processed.", instance_name)
return False
# Always randomly select books to process
- readarr_logger.info(f"Randomly selecting up to {hunt_upgrade_books} books for upgrade search.")
+ readarr_logger.info("Randomly selecting up to %s books for upgrade search.", hunt_upgrade_books)
books_to_process = random.sample(unprocessed_books, min(hunt_upgrade_books, len(unprocessed_books)))
- readarr_logger.info(f"Selected {len(books_to_process)} books to search for upgrades.")
+ readarr_logger.info("Selected %s books to search for upgrades.", len(books_to_process))
processed_count = 0
processed_something = False
@@ -147,28 +138,27 @@ def process_cutoff_upgrades(
# Check API limit before processing books
try:
if check_hourly_cap_exceeded("readarr"):
- readarr_logger.warning(f"๐ Readarr API hourly limit reached - stopping upgrade processing")
+ readarr_logger.warning("๐ Readarr API hourly limit reached - stopping upgrade processing")
return False
except Exception as e:
- readarr_logger.error(f"Error checking hourly API cap: {e}")
+ readarr_logger.error("Error checking hourly API cap: %s", e)
# Continue processing if cap check fails - safer than stopping
# Mark books as processed BEFORE triggering any searches
for book_id in book_ids_to_search:
add_processed_id("readarr", instance_name, str(book_id))
- readarr_logger.debug(f"Added book ID {book_id} to processed list for {instance_name}")
-
+ readarr_logger.debug("Added book ID %s to processed list for %s", book_id, instance_name)
+
# Now trigger the search
search_command_result = readarr_api.search_books(api_url, api_key, book_ids_to_search, api_timeout)
-
+
if search_command_result:
command_id = search_command_result
- readarr_logger.info(f"Triggered upgrade search command {command_id} for {len(book_ids_to_search)} books.")
+ readarr_logger.info("Triggered upgrade search command %s for %s books.", command_id, len(book_ids_to_search))
increment_stat("readarr", "upgraded")
-
+
# Tag authors if enabled (from books)
if tag_processed_items:
- from src.primary.settings_manager import get_custom_tag
custom_tag = get_custom_tag("readarr", "upgrade", "huntarr-upgraded")
tagged_authors = set() # Track which authors we've already tagged
for book in books_to_process:
@@ -176,11 +166,11 @@ def process_cutoff_upgrades(
if author_id and author_id not in tagged_authors:
try:
readarr_api.tag_processed_author(api_url, api_key, api_timeout, author_id, custom_tag)
- readarr_logger.debug(f"Tagged author {author_id} with '{custom_tag}'")
+ readarr_logger.debug("Tagged author %s with '%s'", author_id, custom_tag)
tagged_authors.add(author_id)
except Exception as e:
- readarr_logger.warning(f"Failed to tag author {author_id} with '{custom_tag}': {e}")
-
+ readarr_logger.warning("Failed to tag author %s with '%s': %s", author_id, custom_tag, e)
+
# Log to history system for each book
for book in books_to_process:
# Ensure we have a valid author name - if missing, fetch it
@@ -195,25 +185,25 @@ def process_cutoff_upgrades(
else:
author_name = f"Author ID {author_id}"
except Exception as e:
- readarr_logger.debug(f"Error fetching author details: {e}")
+ readarr_logger.debug("Error fetching author details: %s", e)
author_name = f"Author ID {author_id}"
elif not author_name:
author_name = "Unknown Author"
-
+
book_title = book.get("title", f"Book ID {book.get('id')}")
media_name = f"{author_name} - {book_title}"
-
+
# Include full details in history entry
log_processed_media("readarr", media_name, book.get("id"), instance_name, "upgrade")
- readarr_logger.debug(f"Logged quality upgrade to history for '{media_name}' (Book ID: {book.get('id')})")
+ readarr_logger.debug("Logged quality upgrade to history for '%s' (Book ID: %s)", media_name, book.get('id'))
+
-
processed_count += len(book_ids_to_search)
processed_something = True
- readarr_logger.info(f"Processed {processed_count} book upgrades this cycle.")
+ readarr_logger.info("Processed %s book upgrades this cycle.", processed_count)
else:
- readarr_logger.error(f"Failed to trigger search for book upgrades.")
+ readarr_logger.error("Failed to trigger search for book upgrades.")
+
+ readarr_logger.info("Completed processing %s books for upgrade this cycle.", processed_count)
- readarr_logger.info(f"Completed processing {processed_count} books for upgrade this cycle.")
-
- return processed_something
\ No newline at end of file
+ return processed_something
diff --git a/src/primary/apps/readarr_routes.py b/src/primary/apps/readarr_routes.py
index 222c4cfe..42616934 100644
--- a/src/primary/apps/readarr_routes.py
+++ b/src/primary/apps/readarr_routes.py
@@ -1,19 +1,17 @@
#!/usr/bin/env python3
+import socket
+from urllib.parse import urlparse
+
+import requests
from flask import Blueprint, request, jsonify
-import datetime, os, requests
-from src.primary.state import reset_state_file
from src.primary.utils.logger import get_logger
from src.primary.settings_manager import get_ssl_verify_setting
-import traceback
-import socket
-from urllib.parse import urlparse
readarr_bp = Blueprint('readarr', __name__)
readarr_logger = get_logger("readarr")
-# State management now handled directly through database calls
@readarr_bp.route('/test-connection', methods=['POST'])
def test_connection():
@@ -21,44 +19,42 @@ def test_connection():
data = request.json
api_url = data.get('api_url')
api_key = data.get('api_key')
-
+
if not api_url or not api_key:
return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
-
-
# Auto-correct URL if missing http(s) scheme
if not (api_url.startswith('http://') or api_url.startswith('https://')):
readarr_logger.warning(f"API URL missing http(s) scheme: {api_url}")
api_url = f"http://{api_url}"
readarr_logger.debug(f"Auto-correcting URL to: {api_url}")
-
+
# For Readarr, use api/v1
url = f"{api_url}/api/v1/system/status"
headers = {
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
-
+
# Get SSL verification setting
verify_ssl = get_ssl_verify_setting()
-
+
if not verify_ssl:
readarr_logger.debug("SSL verification disabled by user setting for connection test")
-
+
try:
# First check if the host is reachable at all
parsed_url = urlparse(api_url)
hostname = parsed_url.hostname
port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
-
+
# Try to establish a socket connection first to provide a better error message for connection issues
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3) # Short timeout for quick feedback
result = sock.connect_ex((hostname, port))
sock.close()
-
+
if result != 0:
error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
readarr_logger.error(error_msg)
@@ -70,10 +66,10 @@ def test_connection():
except Exception as e:
# Log the socket testing error but continue with the full request
readarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
-
+
# Now proceed with the actual API request
response = requests.get(url, headers=headers, timeout=10, verify=verify_ssl)
-
+
# For HTTP errors, provide more specific feedback
if response.status_code == 401:
error_msg = "Authentication failed: Invalid API key"
@@ -91,15 +87,14 @@ def test_connection():
error_msg = f"Readarr server error (HTTP {response.status_code}): The Readarr server is experiencing issues"
readarr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), response.status_code
-
+
# Raise for other HTTP errors
response.raise_for_status()
-
+
try:
response_data = response.json()
version = response_data.get('version', 'unknown')
-
return jsonify({
"success": True,
"message": "Successfully connected to Readarr API",
@@ -109,7 +104,7 @@ def test_connection():
error_msg = "Invalid JSON response from Readarr API - This doesn't appear to be a valid Readarr server"
readarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
return jsonify({"success": False, "message": error_msg}), 500
-
+
except requests.exceptions.ConnectionError as e:
# Handle different types of connection errors
error_details = str(e)
@@ -119,7 +114,7 @@ def test_connection():
error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL."
else:
error_msg = f"Connection error - Check if Readarr is running: {error_details}"
-
+
readarr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), 404
except requests.exceptions.Timeout:
@@ -130,5 +125,3 @@ def test_connection():
error_msg = f"Connection test failed: {str(e)}"
readarr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), 500
-
-
diff --git a/src/primary/apps/sonarr_routes.py b/src/primary/apps/sonarr_routes.py
index becb41c7..4ec891be 100644
--- a/src/primary/apps/sonarr_routes.py
+++ b/src/primary/apps/sonarr_routes.py
@@ -1,103 +1,20 @@
#!/usr/bin/env python3
-from flask import Blueprint, request, jsonify, render_template
-import datetime, os, requests
-
-from src.primary.state import reset_state_file, check_state_reset
-from src.primary.utils.logger import get_logger
-from src.primary.settings_manager import get_ssl_verify_setting
-import traceback
+import logging
import socket
from urllib.parse import urlparse
-from src.primary.apps.sonarr import missing, upgrade
-from src.primary.auth import get_app_url_and_key
-from src.primary.utils.database import get_database
-from src.primary import settings_manager
-import logging
+
+import requests
+from flask import Blueprint, request, jsonify
+
+from src.primary.settings_manager import get_ssl_verify_setting
+from src.primary.utils.logger import get_logger
logger = logging.getLogger(__name__)
sonarr_bp = Blueprint('sonarr', __name__)
sonarr_logger = get_logger("sonarr")
-# State management now handled directly through database calls
-
-@sonarr_bp.route('/sonarr')
-def sonarr_page():
- """Render the Sonarr page"""
- return render_template('sonarr.html')
-
-@sonarr_bp.route('/api/sonarr/missing', methods=['POST'])
-def sonarr_missing():
- """Handle Sonarr missing episodes search"""
- try:
- # Check if state needs to be reset
- check_state_reset("sonarr")
-
- # Get app configuration from database
- app_url, api_key = get_app_url_and_key("sonarr")
- if not app_url or not api_key:
- return jsonify({"error": "Sonarr not configured"}), 400
-
- # Get settings from database
- missing_search_enabled = settings_manager.get_app_setting("sonarr", "missing_search", True)
- if not missing_search_enabled:
- return jsonify({"message": "Missing search is disabled for Sonarr"}), 200
-
- # Run missing search
- result = missing.run_missing_search(app_url, api_key, "sonarr")
- return jsonify(result)
-
- except Exception as e:
- logger.error(f"Error in Sonarr missing search: {e}")
- return jsonify({"error": str(e)}), 500
-
-@sonarr_bp.route('/api/sonarr/upgrade', methods=['POST'])
-def sonarr_upgrade():
- """Handle Sonarr upgrade search"""
- try:
- # Check if state needs to be reset
- check_state_reset("sonarr")
-
- # Get app configuration from database
- app_url, api_key = get_app_url_and_key("sonarr")
- if not app_url or not api_key:
- return jsonify({"error": "Sonarr not configured"}), 400
-
- # Get settings from database
- upgrade_search_enabled = settings_manager.get_app_setting("sonarr", "upgrade_search", True)
- if not upgrade_search_enabled:
- return jsonify({"message": "Upgrade search is disabled for Sonarr"}), 200
-
- # Run upgrade search
- result = upgrade.run_upgrade_search(app_url, api_key, "sonarr")
- return jsonify(result)
-
- except Exception as e:
- logger.error(f"Error in Sonarr upgrade search: {e}")
- return jsonify({"error": str(e)}), 500
-
-@sonarr_bp.route('/api/sonarr/reset', methods=['POST'])
-def sonarr_reset():
- """Reset Sonarr state files"""
- try:
- data = request.get_json() or {}
- reset_type = data.get('type', 'all')
-
- success = True
- if reset_type == 'missing' or reset_type == 'all':
- success &= reset_state_file("sonarr", "processed_missing")
- if reset_type == 'upgrade' or reset_type == 'all':
- success &= reset_state_file("sonarr", "processed_upgrades")
-
- if success:
- return jsonify({"message": f"Sonarr {reset_type} state reset successfully"})
- else:
- return jsonify({"error": f"Failed to reset Sonarr {reset_type} state"}), 500
-
- except Exception as e:
- logger.error(f"Error resetting Sonarr state: {e}")
- return jsonify({"error": str(e)}), 500
@sonarr_bp.route('/test-connection', methods=['POST'])
def test_connection():
@@ -109,25 +26,25 @@ def test_connection():
if not api_url or not api_key:
return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
-
+
# Auto-correct URL if missing http(s) scheme
if not (api_url.startswith('http://') or api_url.startswith('https://')):
- sonarr_logger.debug(f"Auto-correcting URL to: {api_url}")
+ sonarr_logger.debug("Auto-correcting URL to: %s", api_url)
api_url = f"http://{api_url}"
- sonarr_logger.debug(f"Auto-correcting URL to: {api_url}")
-
+ sonarr_logger.debug("Auto-correcting URL to: %s", api_url)
+
# Try to establish a socket connection first to check basic connectivity
parsed_url = urlparse(api_url)
hostname = parsed_url.hostname
port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
-
+
try:
# Try socket connection for quick feedback on connectivity issues
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3) # Short timeout for quick feedback
result = sock.connect_ex((hostname, port))
sock.close()
-
+
if result != 0:
error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
sonarr_logger.error(error_msg)
@@ -138,22 +55,22 @@ def test_connection():
return jsonify({"success": False, "message": error_msg}), 404
except Exception as e:
# Log the socket testing error but continue with the full request
- sonarr_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
-
+ sonarr_logger.debug("Socket test error, continuing with full request: %s", str(e))
+
# Create the test URL and set headers
test_url = f"{api_url.rstrip('/')}/api/v3/system/status"
headers = {'X-Api-Key': api_key}
-
+
# Get SSL verification setting
verify_ssl = get_ssl_verify_setting()
-
+
if not verify_ssl:
sonarr_logger.debug("SSL verification disabled by user setting for connection test")
try:
# Now proceed with the actual API request
response = requests.get(test_url, headers=headers, timeout=(10, api_timeout), verify=verify_ssl)
-
+
# For HTTP errors, provide more specific feedback
if response.status_code == 401:
error_msg = "Authentication failed: Invalid API key"
@@ -171,20 +88,17 @@ def test_connection():
error_msg = f"Sonarr server error (HTTP {response.status_code}): The Sonarr server is experiencing issues"
sonarr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), response.status_code
-
+
# Raise for other HTTP errors
response.raise_for_status()
-
+
# Log HTTP status code for diagnostic purposes
- sonarr_logger.debug(f"Sonarr API status code: {response.status_code}")
+ sonarr_logger.debug("Sonarr API status code: %s", response.status_code)
# Ensure the response is valid JSON
try:
response_data = response.json()
-
- # We no longer save keys here since we use instances
-
-
+
# Return success with some useful information
return jsonify({
"success": True,
@@ -193,30 +107,29 @@ def test_connection():
})
except ValueError:
error_msg = "Invalid JSON response from Sonarr API - This doesn't appear to be a valid Sonarr server"
- sonarr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ sonarr_logger.error("%s. Response content: %s", error_msg, response.text[:200])
return jsonify({"success": False, "message": error_msg}), 500
except requests.exceptions.Timeout as e:
error_msg = f"Connection timed out after {api_timeout} seconds"
- sonarr_logger.error(f"{error_msg}: {str(e)}")
+ sonarr_logger.error("%s: %s", error_msg, str(e))
return jsonify({"success": False, "message": error_msg}), 504
-
+
except requests.exceptions.ConnectionError as e:
# Handle different types of connection errors
error_details = str(e)
if "Connection refused" in error_details:
- error_msg = f"Connection refused - Sonarr is not running on {api_url} or the port is incorrect"
+ error_msg = "Connection refused - Sonarr is not running on %s or the port is incorrect"
+ sonarr_logger.error(error_msg, api_url)
elif "Name or service not known" in error_details or "getaddrinfo failed" in error_details:
- error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL."
+ error_msg = "DNS resolution failed - Cannot find host '%s'. Check your URL."
+ sonarr_logger.error(error_msg, urlparse(api_url).hostname)
else:
- error_msg = f"Connection error - Check if Sonarr is running: {error_details}"
-
- sonarr_logger.error(error_msg)
+ error_msg = "Connection error - Check if Sonarr is running: %s"
+ sonarr_logger.error(error_msg, error_details)
return jsonify({"success": False, "message": error_msg}), 404
-
- except requests.exceptions.RequestException as e:
- error_msg = f"Connection test failed: {str(e)}"
- sonarr_logger.error(error_msg)
- return jsonify({"success": False, "message": error_msg}), 500
-
+ except requests.exceptions.RequestException as e:
+ error_msg = "Connection test failed: %s"
+ sonarr_logger.error(error_msg, str(e))
+ return jsonify({"success": False, "message": error_msg % str(e)}), 500
diff --git a/src/primary/apps/whisparr/missing.py b/src/primary/apps/whisparr/missing.py
index a6a821d5..b4cde8e8 100644
--- a/src/primary/apps/whisparr/missing.py
+++ b/src/primary/apps/whisparr/missing.py
@@ -16,55 +16,43 @@
from src.primary.stateful_manager import is_processed, add_processed_id
from src.primary.stats_manager import increment_stat, check_hourly_cap_exceeded
from src.primary.utils.history_utils import log_processed_media
-from src.primary.state import check_state_reset
-# Get logger for the app
whisparr_logger = get_logger("whisparr")
+
def process_missing_items(
app_settings: Dict[str, Any],
stop_check: Callable[[], bool] # Function to check if stop is requested
) -> bool:
"""
Process missing items in Whisparr based on provided settings.
-
+
Args:
app_settings: Dictionary containing all settings for Whisparr
stop_check: A function that returns True if the process should stop
-
+
Returns:
True if any items were processed, False otherwise.
"""
whisparr_logger.info("Starting missing items processing cycle for Whisparr.")
- processed_any = False
-
- # Reset state files if enough time has passed
- check_state_reset("whisparr")
-
+
# Load settings to check if tagging is enabled
whisparr_settings = load_settings("whisparr")
tag_processed_items = whisparr_settings.get("tag_processed_items", True)
-
+
# Extract necessary settings
api_url = app_settings.get("api_url", "").strip()
api_key = app_settings.get("api_key", "").strip()
api_timeout = get_advanced_setting("api_timeout", 120) # Use database value
instance_name = app_settings.get("instance_name", "Whisparr Default")
-
- # Use the centralized advanced setting for stateful management hours
- stateful_management_hours = get_advanced_setting("stateful_management_hours", 168)
-
+
monitored_only = app_settings.get("monitored_only", True)
skip_future_releases = app_settings.get("skip_future_releases", True)
# skip_item_refresh setting removed as it was a performance bottleneck
-
+
# Use the new hunt_missing_items parameter name, falling back to hunt_missing_scenes for backwards compatibility
hunt_missing_items = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 0))
-
- # Use advanced settings from database for command operations
- command_wait_delay = get_advanced_setting("command_wait_delay", 1)
- command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
-
+
# Log that we're using Whisparr V2 API
whisparr_logger.debug(f"Using Whisparr V2 API for instance: {instance_name}")
@@ -77,26 +65,26 @@ def process_missing_items(
if stop_check():
whisparr_logger.info("Stop requested before starting missing items. Aborting...")
return False
-
+
# Get missing items
whisparr_logger.info(f"Retrieving items with missing files...")
- missing_items = whisparr_api.get_items_with_missing(api_url, api_key, api_timeout, monitored_only)
-
+ missing_items = whisparr_api.get_items_with_missing(api_url, api_key, api_timeout, monitored_only)
+
if missing_items is None: # API call failed
whisparr_logger.error("Failed to retrieve missing items from Whisparr API.")
return False
-
+
if not missing_items:
whisparr_logger.info("No missing items found.")
return False
-
+
# Check for stop signal after retrieving items
if stop_check():
whisparr_logger.info("Stop requested after retrieving missing items. Aborting...")
return False
-
+
whisparr_logger.info(f"Found {len(missing_items)} items with missing files.")
-
+
# Filter out future releases if configured
if skip_future_releases:
now = datetime.datetime.now().replace(tzinfo=datetime.timezone.utc)
@@ -105,7 +93,7 @@ def process_missing_items(
missing_items = [
item for item in missing_items
if not item.get('airDateUtc') or (
- item.get('airDateUtc') and
+ item.get('airDateUtc') and
datetime.datetime.fromisoformat(item['airDateUtc'].replace('Z', '+00:00')) < now
)
]
@@ -116,7 +104,7 @@ def process_missing_items(
if not missing_items:
whisparr_logger.info("No missing items left to process after filtering future releases.")
return False
-
+
# Filter out already processed items using stateful management
unprocessed_items = []
for item in missing_items:
@@ -125,20 +113,20 @@ def process_missing_items(
unprocessed_items.append(item)
else:
whisparr_logger.debug(f"Skipping already processed item ID: {item_id}")
-
+
whisparr_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(missing_items)} total items with missing files.")
-
+
if not unprocessed_items:
whisparr_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.")
return False
-
+
items_processed = 0
processing_done = False
-
+
# Select items to search based on configuration
whisparr_logger.info(f"Randomly selecting up to {hunt_missing_items} missing items.")
items_to_search = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_missing_items))
-
+
whisparr_logger.info(f"Selected {len(items_to_search)} missing items to search.")
# Process selected items
@@ -147,7 +135,7 @@ def process_missing_items(
if stop_check():
whisparr_logger.info("Stop requested during item processing. Aborting...")
break
-
+
# Check API limit before processing each item
try:
if check_hourly_cap_exceeded("whisparr"):
@@ -156,7 +144,7 @@ def process_missing_items(
except Exception as e:
whisparr_logger.error(f"Error checking hourly API cap: {e}")
# Continue processing if cap check fails - safer than stopping
-
+
# Re-check limit in case it changed
current_limit = app_settings.get("hunt_missing_items", app_settings.get("hunt_missing_scenes", 1))
if items_processed >= current_limit:
@@ -166,26 +154,26 @@ def process_missing_items(
item_id = item.get("id")
title = item.get("title", "Unknown Title")
season_episode = f"S{item.get('seasonNumber', 0):02d}E{item.get('episodeNumber', 0):02d}"
-
+
whisparr_logger.info(f"Processing missing item: \"{title}\" - {season_episode} (Item ID: {item_id})")
-
+
# Refresh functionality has been removed as it was identified as a performance bottleneck
-
+
# Mark the item as processed BEFORE triggering any searches
add_processed_id("whisparr", instance_name, str(item_id))
whisparr_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}")
-
+
# Check for stop signal before searching
if stop_check():
whisparr_logger.info(f"Stop requested before searching for {title}. Aborting...")
break
-
+
# Search for the item
whisparr_logger.info(" - Searching for missing item...")
search_command_id = whisparr_api.item_search(api_url, api_key, api_timeout, [item_id])
if search_command_id:
whisparr_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.")
-
+
# Tag the series if enabled
if tag_processed_items:
from src.primary.settings_manager import get_custom_tag
@@ -197,15 +185,15 @@ def process_missing_items(
whisparr_logger.debug(f"Tagged series {series_id} with '{custom_tag}'")
except Exception as e:
whisparr_logger.warning(f"Failed to tag series {series_id} with '{custom_tag}': {e}")
-
+
# Log to history system
media_name = f"{title} - {season_episode}"
log_processed_media("whisparr", media_name, item_id, instance_name, "missing")
whisparr_logger.debug(f"Logged history entry for item: {media_name}")
-
+
items_processed += 1
processing_done = True
-
+
# Increment the hunted statistics for Whisparr
increment_stat("whisparr", "hunted", 1)
whisparr_logger.debug(f"Incremented whisparr hunted statistics by 1")
@@ -217,12 +205,12 @@ def process_missing_items(
whisparr_logger.warning(f"Failed to trigger search command for item ID {item_id}.")
# Do not mark as processed if search couldn't be triggered
continue
-
+
# Log final status
if items_processed > 0:
whisparr_logger.info(f"Completed processing {items_processed} missing items for this cycle.")
else:
whisparr_logger.info("No new missing items were processed in this run.")
-
+
return processing_done
diff --git a/src/primary/apps/whisparr/upgrade.py b/src/primary/apps/whisparr/upgrade.py
index 5e6e471e..ccc4ae5c 100644
--- a/src/primary/apps/whisparr/upgrade.py
+++ b/src/primary/apps/whisparr/upgrade.py
@@ -6,65 +6,53 @@
Supports both v2 (legacy) and v3 (Eros) API versions
"""
-import time
import random
-from typing import Dict, Any, List, Callable
-from datetime import datetime, timedelta
+from typing import Any, Callable
+
from src.primary.utils.logger import get_logger
from src.primary.apps.whisparr import api as whisparr_api
-from src.primary.settings_manager import load_settings, get_advanced_setting
+from src.primary.settings_manager import get_custom_tag, load_settings, get_advanced_setting
from src.primary.stateful_manager import is_processed, add_processed_id
from src.primary.stats_manager import increment_stat, check_hourly_cap_exceeded
from src.primary.utils.history_utils import log_processed_media
-from src.primary.state import check_state_reset
# Get logger for the app
whisparr_logger = get_logger("whisparr")
def process_cutoff_upgrades(
- app_settings: Dict[str, Any],
+ app_settings: dict[str, Any],
stop_check: Callable[[], bool] # Function to check if stop is requested
) -> bool:
"""
Process quality cutoff upgrades for Whisparr based on settings.
-
+
Args:
app_settings: Dictionary containing all settings for Whisparr
stop_check: A function that returns True if the process should stop
-
+
Returns:
True if any items were processed for upgrades, False otherwise.
"""
whisparr_logger.info("Starting quality cutoff upgrades processing cycle for Whisparr.")
- processed_any = False
-
- # Reset state files if enough time has passed
- check_state_reset("whisparr")
-
+
# Load settings to check if tagging is enabled
whisparr_settings = load_settings("whisparr")
tag_processed_items = whisparr_settings.get("tag_processed_items", True)
-
+
# Extract necessary settings
api_url = app_settings.get("api_url", "").strip()
api_key = app_settings.get("api_key", "").strip()
api_timeout = get_advanced_setting("api_timeout", 120) # Use database value
instance_name = app_settings.get("instance_name", "Whisparr Default")
-
- # Use advanced settings from database for command operations
- command_wait_delay = get_advanced_setting("command_wait_delay", 1)
- command_wait_attempts = get_advanced_setting("command_wait_attempts", 600)
-
+
monitored_only = app_settings.get("monitored_only", True)
# skip_item_refresh setting removed as it was a performance bottleneck
-
+
# Use the new hunt_upgrade_items parameter name, falling back to hunt_upgrade_scenes for backwards compatibility
hunt_upgrade_items = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 0))
-
- state_reset_interval_hours = get_advanced_setting("stateful_management_hours", 168)
-
+
# Log that we're using Whisparr V2 API
- whisparr_logger.debug(f"Using Whisparr V2 API for instance: {instance_name}")
+ whisparr_logger.debug("Using Whisparr V2 API for instance: %s", instance_name)
# Skip if hunt_upgrade_items is set to 0
if hunt_upgrade_items <= 0:
@@ -77,20 +65,20 @@ def process_cutoff_upgrades(
return False
# Get items eligible for upgrade
- whisparr_logger.info(f"Retrieving items eligible for cutoff upgrade...")
+ whisparr_logger.info("Retrieving items eligible for cutoff upgrade...")
upgrade_eligible_data = whisparr_api.get_cutoff_unmet_items(api_url, api_key, api_timeout, monitored_only)
-
+
if not upgrade_eligible_data:
whisparr_logger.info("No items found eligible for upgrade or error retrieving them.")
return False
-
+
# Check for stop signal after retrieving eligible items
if stop_check():
whisparr_logger.info("Stop requested after retrieving upgrade eligible items. Aborting...")
return False
-
- whisparr_logger.info(f"Found {len(upgrade_eligible_data)} items eligible for quality upgrade.")
-
+
+ whisparr_logger.info("Found %s items eligible for quality upgrade.", len(upgrade_eligible_data))
+
# Filter out already processed items using stateful management
unprocessed_items = []
for item in upgrade_eligible_data:
@@ -98,108 +86,107 @@ def process_cutoff_upgrades(
if not is_processed("whisparr", instance_name, item_id):
unprocessed_items.append(item)
else:
- whisparr_logger.debug(f"Skipping already processed item ID: {item_id}")
-
- whisparr_logger.info(f"Found {len(unprocessed_items)} unprocessed items out of {len(upgrade_eligible_data)} total items eligible for quality upgrade.")
-
+ whisparr_logger.debug("Skipping already processed item ID: %s", item_id)
+
+ whisparr_logger.info("Found %s unprocessed items out of %s total items eligible for quality upgrade.", len(unprocessed_items), len(upgrade_eligible_data))
+
if not unprocessed_items:
- whisparr_logger.info(f"No unprocessed items found for {instance_name}. All available items have been processed.")
+ whisparr_logger.info("No unprocessed items found for %s. All available items have been processed.", instance_name)
return False
-
+
items_processed = 0
processing_done = False
-
+
# Always use random selection for upgrades
- whisparr_logger.info(f"Randomly selecting up to {hunt_upgrade_items} items for quality upgrade.")
+ whisparr_logger.info("Randomly selecting up to %s items for quality upgrade.", hunt_upgrade_items)
items_to_upgrade = random.sample(unprocessed_items, min(len(unprocessed_items), hunt_upgrade_items))
-
- whisparr_logger.info(f"Selected {len(items_to_upgrade)} items for quality upgrade.")
-
+
+ whisparr_logger.info("Selected %s items for quality upgrade.", len(items_to_upgrade))
+
# Process selected items
for item in items_to_upgrade:
# Check for stop signal before each item
if stop_check():
whisparr_logger.info("Stop requested during item processing. Aborting...")
break
-
+
# Check API limit before processing each item
try:
if check_hourly_cap_exceeded("whisparr"):
- whisparr_logger.warning(f"๐ Whisparr API hourly limit reached - stopping upgrade processing after {items_processed} items")
+ whisparr_logger.warning("๐ Whisparr API hourly limit reached - stopping upgrade processing after %s items", items_processed)
break
except Exception as e:
- whisparr_logger.error(f"Error checking hourly API cap: {e}")
+ whisparr_logger.error("Error checking hourly API cap: %s", e)
# Continue processing if cap check fails - safer than stopping
-
+
# Re-check limit in case it changed
current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1))
if items_processed >= current_limit:
- whisparr_logger.info(f"Reached HUNT_UPGRADE_ITEMS limit ({current_limit}) for this cycle.")
+ whisparr_logger.info("Reached HUNT_UPGRADE_ITEMS limit (%s) for this cycle.", current_limit)
break
-
+
item_id = item.get("id")
title = item.get("title", "Unknown Title")
season_episode = f"S{item.get('seasonNumber', 0):02d}E{item.get('episodeNumber', 0):02d}"
-
+
current_quality = item.get("episodeFile", {}).get("quality", {}).get("quality", {}).get("name", "Unknown")
-
- whisparr_logger.info(f"Processing item for quality upgrade: \"{title}\" - {season_episode} (Item ID: {item_id})")
- whisparr_logger.info(f" - Current quality: {current_quality}")
-
+
+ whisparr_logger.info("Processing item for quality upgrade: \"%s\" - %s (Item ID: %s)", title, season_episode, item_id)
+ whisparr_logger.info(" - Current quality: %s", current_quality)
+
# Refresh functionality has been removed as it was identified as a performance bottleneck
-
+
# Check for stop signal before searching
if stop_check():
- whisparr_logger.info(f"Stop requested before searching for {title}. Aborting...")
+ whisparr_logger.info("Stop requested before searching for %s. Aborting...", title)
break
-
+
# Mark the item as processed BEFORE triggering any searches
add_processed_id("whisparr", instance_name, str(item_id))
- whisparr_logger.debug(f"Added item ID {item_id} to processed list for {instance_name}")
-
+ whisparr_logger.debug("Added item ID %s to processed list for %s", item_id, instance_name)
+
# Search for the item
whisparr_logger.info(" - Searching for quality upgrade...")
search_command_id = whisparr_api.item_search(api_url, api_key, api_timeout, [item_id])
if search_command_id:
- whisparr_logger.info(f"Triggered search command {search_command_id}. Assuming success for now.")
-
+ whisparr_logger.info("Triggered search command %s. Assuming success for now.", search_command_id)
+
# Tag the series if enabled
if tag_processed_items:
- from src.primary.settings_manager import get_custom_tag
custom_tag = get_custom_tag("whisparr", "upgrade", "huntarr-upgraded")
series_id = item.get('seriesId')
if series_id:
try:
whisparr_api.tag_processed_series(api_url, api_key, api_timeout, series_id, custom_tag)
- whisparr_logger.debug(f"Tagged series {series_id} with '{custom_tag}'")
+ whisparr_logger.debug("Tagged series %s with '%s'", series_id, custom_tag)
except Exception as e:
- whisparr_logger.warning(f"Failed to tag series {series_id} with '{custom_tag}': {e}")
-
+ whisparr_logger.warning("Failed to tag series %s with '%s': %s", series_id, custom_tag, e)
+
# Log to history so the upgrade appears in the history UI
series_title = item.get("series", {}).get("title", "Unknown Series")
media_name = f"{series_title} - {season_episode} - {title}"
log_processed_media("whisparr", media_name, item_id, instance_name, "upgrade")
- whisparr_logger.debug(f"Logged quality upgrade to history for item ID {item_id}")
-
+ whisparr_logger.debug("Logged quality upgrade to history for item ID %s", item_id)
+
items_processed += 1
processing_done = True
-
+
# Increment the upgraded statistics for Whisparr
increment_stat("whisparr", "upgraded", 1)
- whisparr_logger.debug(f"Incremented whisparr upgraded statistics by 1")
-
+ whisparr_logger.debug("Incremented whisparr upgraded statistics by 1")
+
# Log progress
current_limit = app_settings.get("hunt_upgrade_items", app_settings.get("hunt_upgrade_scenes", 1))
- whisparr_logger.info(f"Processed {items_processed}/{current_limit} items for quality upgrade this cycle.")
+ whisparr_logger.info("Processed %s/%s items for quality upgrade this cycle.", items_processed, current_limit)
else:
- whisparr_logger.warning(f"Failed to trigger search command for item ID {item_id}.")
+ whisparr_logger.warning("Failed to trigger search command for item ID %s.", item_id)
# Do not mark as processed if search couldn't be triggered
continue
-
+
# Log final status
if items_processed > 0:
- whisparr_logger.info(f"Completed processing {items_processed} items for quality upgrade for this cycle.")
+ whisparr_logger.info("Completed processing %s items for quality upgrade for this cycle.", items_processed)
else:
whisparr_logger.info("No new items were processed for quality upgrade in this run.")
-
- return processing_done
\ No newline at end of file
+
+ return processing_done
diff --git a/src/primary/apps/whisparr_routes.py b/src/primary/apps/whisparr_routes.py
index 48de5033..55f521f3 100644
--- a/src/primary/apps/whisparr_routes.py
+++ b/src/primary/apps/whisparr_routes.py
@@ -1,41 +1,40 @@
#!/usr/bin/env python3
-from flask import Blueprint, request, jsonify
-import datetime, os, requests
-
-from src.primary.state import reset_state_file
-from src.primary.utils.logger import get_logger, APP_LOG_FILES
-from src.primary.settings_manager import get_ssl_verify_setting
-import traceback
+import os
import socket
+import traceback
from urllib.parse import urlparse
+
+import requests
+from flask import Blueprint, request, jsonify
+
from src.primary.apps.whisparr import api as whisparr_api
+from src.primary.utils.logger import get_logger, APP_LOG_FILES
+from src.primary.settings_manager import get_ssl_verify_setting, load_settings
whisparr_bp = Blueprint('whisparr', __name__)
whisparr_logger = get_logger("whisparr")
-# State management now handled directly through database calls
@whisparr_bp.route('/status', methods=['GET'])
def get_status():
"""Get the status of configured Whisparr instance"""
try:
# Get configured instance
- from src.primary.settings_manager import load_settings
settings = load_settings("whisparr")
-
+
api_url = settings.get("url", "")
api_key = settings.get("api_key", "")
enabled = settings.get("enabled", True)
-
+
connected_count = 0
total_configured = 1 if api_url and api_key else 0
-
+
if api_url and api_key and enabled:
# Use a short timeout for status checks
if whisparr_api.check_connection(api_url, api_key, 5):
connected_count = 1
-
+
return jsonify({
"configured": total_configured > 0,
"connected": connected_count > 0,
@@ -43,13 +42,14 @@ def get_status():
"total_configured": total_configured
})
except Exception as e:
- whisparr_logger.error(f"Error getting Whisparr status: {str(e)}")
+ whisparr_logger.error("Error getting Whisparr status: %s", str(e))
return jsonify({
"configured": False,
"connected": False,
"error": str(e)
}), 500
+
@whisparr_bp.route('/test-connection', methods=['POST'])
def test_connection():
"""Test connection to a Whisparr API instance"""
@@ -57,30 +57,29 @@ def test_connection():
api_url = data.get('api_url')
api_key = data.get('api_key')
api_timeout = data.get('api_timeout', 30) # Use longer timeout for connection test
-
+
if not api_url or not api_key:
return jsonify({"success": False, "message": "API URL and API Key are required"}), 400
-
-
+
# Auto-correct URL if missing http(s) scheme
if not (api_url.startswith('http://') or api_url.startswith('https://')):
- whisparr_logger.warning(f"API URL missing http(s) scheme: {api_url}")
+ whisparr_logger.warning("API URL missing http(s) scheme: %s", api_url)
api_url = f"http://{api_url}"
- whisparr_logger.debug(f"Auto-correcting URL to: {api_url}")
-
+ whisparr_logger.debug("Auto-correcting URL to: %s", api_url)
+
# Try to establish a socket connection first to check basic connectivity
parsed_url = urlparse(api_url)
hostname = parsed_url.hostname
port = parsed_url.port or (443 if parsed_url.scheme == 'https' else 80)
-
+
try:
# Try socket connection for quick feedback on connectivity issues
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3) # Short timeout for quick feedback
result = sock.connect_ex((hostname, port))
sock.close()
-
+
if result != 0:
error_msg = f"Connection refused - Unable to connect to {hostname}:{port}. Please check if the server is running and the port is correct."
whisparr_logger.error(error_msg)
@@ -91,42 +90,40 @@ def test_connection():
return jsonify({"success": False, "message": error_msg}), 404
except Exception as e:
# Log the socket testing error but continue with the full request
- whisparr_logger.debug(f"Socket test error, continuing with full request: {str(e)}")
-
+ whisparr_logger.debug("Socket test error, continuing with full request: %s", str(e))
+
# First try standard API endpoint (Whisparr v2)
api_paths = [
{"url": f"{api_url.rstrip('/')}/api/system/status", "version": "v2"},
{"url": f"{api_url.rstrip('/')}/api/v3/system/status", "version": "v3"}
]
-
+
headers = {
"X-Api-Key": api_key,
"Content-Type": "application/json"
}
-
+
# Get SSL verification setting
verify_ssl = get_ssl_verify_setting()
-
+
if not verify_ssl:
whisparr_logger.debug("SSL verification disabled by user setting for connection test")
-
+
response = None
- detected_version = None
-
+
# Try each API path in order
for api_path in api_paths:
try:
url = api_path["url"]
- whisparr_logger.debug(f"Trying API path: {url}")
+ whisparr_logger.debug("Trying API path: %s", url)
response = requests.get(url, headers=headers, timeout=(10, api_timeout), verify=verify_ssl)
-
+
if response.status_code == 200:
- detected_version = api_path["version"]
break
-
+
except requests.exceptions.RequestException:
continue
-
+
# If no successful response was obtained
if not response or response.status_code != 200:
if response:
@@ -155,13 +152,12 @@ def test_connection():
error_msg = "Could not connect to any Whisparr API endpoint"
whisparr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), 404
-
+
# Successfully connected, now validate version
try:
response_data = response.json()
version = response_data.get('version', 'unknown')
-
-
+
# Check if this is a v2 version
if version and version.startswith('2'):
# Detected v2
@@ -182,7 +178,7 @@ def test_connection():
return jsonify({"success": False, "message": error_msg}), 400
except ValueError:
error_msg = "Invalid JSON response from Whisparr API - This doesn't appear to be a valid Whisparr server"
- whisparr_logger.error(f"{error_msg}. Response content: {response.text[:200]}")
+ whisparr_logger.error("%s. Response content: %s", error_msg, response.text[:200])
return jsonify({"success": False, "message": error_msg}), 500
except requests.exceptions.ConnectionError as e:
# Handle different types of connection errors
@@ -193,11 +189,11 @@ def test_connection():
error_msg = f"DNS resolution failed - Cannot find host '{urlparse(api_url).hostname}'. Check your URL."
else:
error_msg = f"Connection error - Check if Whisparr is running: {error_details}"
-
+
whisparr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), 404
except requests.exceptions.Timeout:
- error_msg = f"Connection timed out - Whisparr took too long to respond"
+ error_msg = "Connection timed out - Whisparr took too long to respond"
whisparr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), 504
except requests.exceptions.RequestException as e:
@@ -205,49 +201,42 @@ def test_connection():
whisparr_logger.error(error_msg)
return jsonify({"success": False, "message": error_msg}), 500
-# Function to check if Whisparr is configured
-def is_configured():
- """Check if Whisparr API credentials are configured"""
- from src.primary.settings_manager import load_settings
- settings = load_settings("whisparr")
- return settings.get("url") and settings.get("api_key")
@whisparr_bp.route('/versions', methods=['GET'])
def get_versions():
"""Get the version information from the Whisparr API"""
try:
# Get configured instance
- from src.primary.settings_manager import load_settings
settings = load_settings("whisparr")
-
+
api_url = settings.get("url", "")
api_key = settings.get("api_key", "")
enabled = settings.get("enabled", True)
instance_name = settings.get("name", "Default")
-
+
if not api_url or not api_key:
return jsonify({"success": False, "message": "No Whisparr instance configured"}), 404
-
+
if not enabled:
return jsonify({"success": False, "message": "Whisparr instance is disabled"}), 404
-
+
# First try standard API endpoint
version_url = f"{api_url.rstrip('/')}/api/system/status"
headers = {"X-Api-Key": api_key}
-
+
try:
response = requests.get(version_url, headers=headers, timeout=10)
-
+
# If we get a 404, try with the v3 path
if response.status_code == 404:
- whisparr_logger.debug(f"Standard API path failed for {instance_name}, trying v3 path")
+ whisparr_logger.debug("Standard API path failed for %s, trying v3 path", instance_name)
v3_url = f"{api_url.rstrip('/')}/api/v3/system/status"
response = requests.get(v3_url, headers=headers, timeout=10)
-
+
if response.status_code == 200:
version_data = response.json()
version = version_data.get("version", "Unknown")
-
+
# Validate that it's a V2 version
if version and version.startswith('2'):
result = {
@@ -285,52 +274,31 @@ def get_versions():
"success": False,
"message": f"Connection error: {str(e)}"
}
-
+
return jsonify({"success": True, "results": [result]})
except Exception as e:
- whisparr_logger.error(f"Error getting Whisparr versions: {str(e)}")
+ whisparr_logger.error("Error getting Whisparr versions: %s", str(e))
return jsonify({"success": False, "message": str(e)}), 500
+
@whisparr_bp.route('/logs', methods=['GET'])
def get_logs():
"""Get the log file for Whisparr"""
try:
# Get the log file path
log_file = APP_LOG_FILES.get("whisparr")
-
+
if not log_file or not os.path.exists(log_file):
return jsonify({"success": False, "message": "Log file not found"}), 404
-
+
# Read the log file (last 200 lines)
with open(log_file, 'r') as f:
lines = f.readlines()
log_content = ''.join(lines[-200:])
-
+
return jsonify({"success": True, "logs": log_content})
except Exception as e:
error_message = f"Error fetching Whisparr logs: {str(e)}"
whisparr_logger.error(error_message)
traceback.print_exc()
return jsonify({"success": False, "message": error_message}), 500
-
-@whisparr_bp.route('/clear-processed', methods=['POST'])
-def clear_processed():
- """Clear the processed missing and upgrade files for Whisparr"""
- try:
- # Reset missing items state file
- whisparr_logger.info("Clearing processed missing items state")
- reset_state_file("whisparr", "processed_missing")
-
- # Reset upgrade state file
- whisparr_logger.info("Clearing processed quality upgrade state")
- reset_state_file("whisparr", "processed_upgrades")
-
- return jsonify({
- "success": True,
- "message": "Successfully cleared Whisparr processed state"
- })
- except Exception as e:
- error_message = f"Error clearing Whisparr processed state: {str(e)}"
- whisparr_logger.error(error_message)
- return jsonify({"success": False, "message": error_message}), 500
-
diff --git a/src/primary/background.py b/src/primary/background.py
index 748c7566..3fedc698 100644
--- a/src/primary/background.py
+++ b/src/primary/background.py
@@ -4,35 +4,33 @@
Supports multiple Arr applications running concurrently
"""
+__version__ = "1.0.0" # Consider updating this based on changes
+
import time
-import sys
-import os
-# import socket # No longer used directly
-import signal
import importlib
import logging
import threading
-from typing import Dict, List, Optional, Callable, Union, Tuple
import datetime
import traceback
-import pytz
-
-# Define the version number
-__version__ = "1.0.0" # Consider updating this based on changes
+from typing import Dict
-# Set up logging first
-from src.primary.utils.logger import setup_main_logger, get_logger # Import get_logger
-logger = setup_main_logger()
-
-# Import necessary modules
-from src.primary import config, settings_manager
-# Removed keys_manager import as settings_manager handles API details
-from src.primary.state import check_state_reset, calculate_reset_time
-from src.primary.stats_manager import check_hourly_cap_exceeded
-# Instance list generator has been removed
+from src.primary import settings_manager
+from src.primary.apps import prowlarr_routes
+from src.primary.apps.swaparr.handler import run_swaparr
+from src.primary.cycle_tracker import end_cycle, start_cycle, update_next_cycle
from src.primary.scheduler_engine import start_scheduler, stop_scheduler
+from src.primary.settings_manager import load_settings
+from src.primary.stateful_manager import (
+ get_instance_state_management_summary,
+ should_state_management_reset,
+ reset_state_management,
+)
+from src.primary.stats_manager import check_hourly_cap_exceeded
+from src.primary.utils.database import get_database
+from src.primary.utils.logger import setup_main_logger, get_logger
+from src.primary.utils.timezone_utils import get_user_timezone
-# from src.primary.utils.app_utils import get_ip_address # No longer used here
+logger = setup_main_logger()
# Global state for managing app threads and their status
app_threads: Dict[str, threading.Thread] = {}
@@ -50,15 +48,6 @@
# Define which apps have background processing cycles
CYCLICAL_APP_TYPES = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros"]
-# Instance list generator has been removed
-
-def _get_user_timezone():
- """Get the user's selected timezone from general settings"""
- try:
- from src.primary.utils.timezone_utils import get_user_timezone
- return get_user_timezone()
- except Exception:
- return pytz.UTC
def app_specific_loop(app_type: str) -> None:
"""
@@ -67,14 +56,12 @@ def app_specific_loop(app_type: str) -> None:
Args:
app_type: The type of Arr application (sonarr, radarr, lidarr, readarr)
"""
- from src.primary.cycle_tracker import update_next_cycle
-
app_logger = get_logger(app_type)
- app_logger.info(f"=== [{app_type.upper()}] Thread starting ===")
+ app_logger.info("=== [%s] Thread starting ===", app_type.upper())
# Immediately exit for non-cyclical apps (e.g., prowlarr, swaparr)
if app_type not in CYCLICAL_APP_TYPES:
- app_logger.info(f"Skipping background loop for non-cyclical app: {app_type}")
+ app_logger.info("Skipping background loop for non-cyclical app: %s", app_type)
return
# Dynamically import app-specific modules
@@ -120,11 +107,11 @@ def app_specific_loop(app_type: str) -> None:
missing_module = importlib.import_module('src.primary.apps.lidarr.missing')
upgrade_module = importlib.import_module('src.primary.apps.lidarr.upgrade')
# Use process_missing_albums as the function name
- process_missing = getattr(missing_module, 'process_missing_albums')
+ process_missing = getattr(missing_module, 'process_missing_albums')
process_upgrades = getattr(upgrade_module, 'process_cutoff_upgrades')
hunt_missing_setting = "hunt_missing_items"
# Use hunt_upgrade_items
- hunt_upgrade_setting = "hunt_upgrade_items"
+ hunt_upgrade_setting = "hunt_upgrade_items"
elif app_type == "readarr":
missing_module = importlib.import_module('src.primary.apps.readarr.missing')
upgrade_module = importlib.import_module('src.primary.apps.readarr.upgrade')
@@ -148,16 +135,16 @@ def app_specific_loop(app_type: str) -> None:
hunt_upgrade_setting = "hunt_upgrade_items"
else:
- app_logger.error(f"Unsupported app_type: {app_type}")
+ app_logger.error("Unsupported app_type: %s", app_type)
return # Exit thread if app type is invalid
except (ImportError, AttributeError) as e:
- app_logger.error(f"Failed to import modules or functions for {app_type}: {e}", exc_info=True)
+ app_logger.error("Failed to import modules or functions for %s: %s", app_type, e, exc_info=True)
return # Exit thread if essential modules fail to load
# Create app-specific logger using provided function
app_logger = logging.getLogger(f"huntarr.{app_type}")
-
+
while not stop_event.is_set():
# --- Load Settings for this Cycle --- #
try:
@@ -173,26 +160,23 @@ def app_specific_loop(app_type: str) -> None:
api_timeout = app_settings.get("api_timeout", 120) # Default to 120 seconds
except Exception as e:
- app_logger.error(f"Error loading settings for cycle: {e}", exc_info=True)
+ app_logger.error("Error loading settings for cycle: %s", e, exc_info=True)
stop_event.wait(60) # Wait before retrying
continue
- # --- State Reset Check --- #
- check_state_reset(app_type)
-
- app_logger.info(f"=== Starting {app_type.upper()} cycle ===")
+ app_logger.info("=== Starting %s cycle ===", app_type.upper())
# Mark cycle as started (set cyclelock to True)
try:
from src.primary.cycle_tracker import start_cycle
start_cycle(app_type)
except Exception as e:
- app_logger.warning(f"Failed to mark cycle start for {app_type}: {e}")
+ app_logger.warning("Failed to mark cycle start for %s: %s", app_type, e)
# Non-critical, continue execution
# Check if we need to use multi-instance mode
instances_to_process = []
-
+
# Use the dynamically loaded function (if found)
if get_instances_func:
# Multi-instance mode supported
@@ -203,7 +187,7 @@ def app_specific_loop(app_type: str) -> None:
pass
else:
# No instances found via get_configured_instances
- app_logger.debug(f"No configured {app_type} instances found. Skipping cycle.")
+ app_logger.debug("No configured %s instances found. Skipping cycle.", app_type)
stop_event.wait(sleep_duration)
continue
except Exception as e:
@@ -216,60 +200,62 @@ def app_specific_loop(app_type: str) -> None:
api_url = app_settings.get("api_url")
api_key = app_settings.get("api_key")
instance_name = app_settings.get("name", f"{app_type.capitalize()} Default") # Use 'name' or default
-
+
if api_url and api_key:
- app_logger.info(f"Processing {app_type} as single instance: {instance_name}")
+ app_logger.info("Processing %s as single instance: %s", app_type, instance_name)
# Create a list with a single dict matching the multi-instance structure
instances_to_process = [{
- "instance_name": instance_name,
- "api_url": api_url,
+ "instance_name": instance_name,
+ "api_url": api_url,
"api_key": api_key
}]
else:
- app_logger.warning(f"No 'get_configured_instances' function found and no valid single instance config (URL/Key) for {app_type}. Skipping cycle.")
+ app_logger.warning("No 'get_configured_instances' function found and no valid single instance config (URL/Key) for %s. Skipping cycle.", app_type)
stop_event.wait(sleep_duration)
continue
-
+
# If after all checks, instances_to_process is still empty
if not instances_to_process:
- app_logger.warning(f"No valid {app_type} instances to process this cycle (unexpected state). Skipping.")
+ app_logger.warning("No valid %s instances to process this cycle (unexpected state). Skipping.", app_type)
stop_event.wait(sleep_duration)
continue
-
+
# Process each instance dictionary returned by get_configured_instances
processed_any_items = False
enabled_instances = []
-
+
for instance_details in instances_to_process:
if stop_event.is_set():
break
-
+
instance_name = instance_details.get("instance_name", "Default") # Use the dict from get_configured_instances
- app_logger.info(f"Processing {app_type} instance: {instance_name}")
-
+ app_logger.info("Processing %s instance: %s", app_type, instance_name)
+
# Get instance-specific settings from the instance_details dict
api_url = instance_details.get("api_url", "")
api_key = instance_details.get("api_key", "")
- # Get global/shared settings from app_settings loaded at the start of the loop
- # Example: monitored_only = app_settings.get("monitored_only", True)
+ # --- State Reset Check --- #
+ if should_state_management_reset(app_type, instance_name):
+ app_logger.info("State has expired for %s instance '%s'. Resetting state.", app_type, instance_name)
+ reset_state_management(app_type, instance_name)
# --- Connection Check --- #
if not api_url or not api_key:
- app_logger.warning(f"Missing API URL or Key for instance '{instance_name}'. Skipping.")
+ app_logger.warning("Missing API URL or Key for instance '%s'. Skipping.", instance_name)
continue
try:
# Use instance details for connection check
- app_logger.debug(f"Checking connection to {app_type} instance '{instance_name}' at {api_url} with timeout {api_timeout}s")
+ app_logger.debug("Checking connection to %s instance '%s' at %s with timeout %ss", app_type, instance_name, api_url, api_timeout)
connected = check_connection(api_url, api_key, api_timeout=api_timeout)
if not connected:
- app_logger.warning(f"Failed to connect to {app_type} instance '{instance_name}' at {api_url}. Skipping.")
+ app_logger.warning("Failed to connect to %s instance '%s' at %s. Skipping.", app_type, instance_name, api_url)
continue
- app_logger.debug(f"Successfully connected to {app_type} instance: {instance_name}")
+ app_logger.debug("Successfully connected to %s instance: %s", app_type, instance_name)
except Exception as e:
- app_logger.error(f"Error connecting to {app_type} instance '{instance_name}': {e}", exc_info=True)
+ app_logger.error("Error connecting to %s instance '%s': %s", app_type, instance_name, e, exc_info=True)
continue # Skip this instance if connection fails
-
+
# --- API Cap Check --- #
try:
# Check if hourly API cap is exceeded
@@ -277,10 +263,10 @@ def app_specific_loop(app_type: str) -> None:
# Get the current cap status for logging
from src.primary.stats_manager import get_hourly_cap_status
cap_status = get_hourly_cap_status(app_type)
- app_logger.info(f"{app_type.upper()} hourly cap reached {cap_status['current_usage']} of {cap_status['limit']} (app-specific limit). Skipping cycle!")
+ app_logger.info("%s hourly cap reached %s of %s (app-specific limit). Skipping cycle!", app_type.upper(), cap_status['current_usage'], cap_status['limit'])
continue # Skip this instance if API cap is exceeded
except Exception as e:
- app_logger.error(f"Error checking hourly API cap for {app_type}: {e}", exc_info=True)
+ app_logger.error("Error checking hourly API cap for %s: %s", app_type, e, exc_info=True)
# Continue with the cycle even if cap check fails - safer than skipping
# --- Check if Hunt Modes are Enabled --- #
@@ -311,39 +297,38 @@ def app_specific_loop(app_type: str) -> None:
hunt_missing_enabled = hunt_missing_value > 0
hunt_upgrade_enabled = hunt_upgrade_value > 0
-
+
# Debug logging for per-instance hunt values
- app_logger.info(f"Instance '{instance_name}' - Missing: {hunt_missing_value} (enabled: {hunt_missing_enabled}), Upgrade: {hunt_upgrade_value} (enabled: {hunt_upgrade_enabled})")
+ app_logger.info("Instance '%s' - Missing: %s (enabled: %s), Upgrade: %s (enabled: %s)", instance_name, hunt_missing_value, hunt_missing_enabled, hunt_upgrade_value, hunt_upgrade_enabled)
# --- Queue Size Check --- # Moved inside loop
# Get maximum_download_queue_size from general settings (still using minimum_download_queue_size key for backward compatibility)
general_settings = settings_manager.load_settings('general')
max_queue_size = general_settings.get("minimum_download_queue_size", -1)
-
-
+
if max_queue_size >= 0:
try:
# Use instance details for queue check
current_queue_size = get_queue_size(api_url, api_key, api_timeout)
if current_queue_size >= max_queue_size:
- app_logger.info(f"Download queue size ({current_queue_size}) meets or exceeds maximum ({max_queue_size}) for {instance_name}. Skipping cycle for this instance.")
+ app_logger.info("Download queue size (%s) meets or exceeds maximum (%s) for %s. Skipping cycle for this instance.", current_queue_size, max_queue_size, instance_name)
continue # Skip processing for this instance
else:
- app_logger.info(f"Queue size ({current_queue_size}) is below maximum ({max_queue_size}). Proceeding.")
+ app_logger.info("Queue size (%s) is below maximum (%s). Proceeding.", current_queue_size, max_queue_size)
except Exception as e:
- app_logger.warning(f"Could not get download queue size for {instance_name}. Proceeding anyway. Error: {e}", exc_info=False) # Log less verbosely
-
+ app_logger.warning("Could not get download queue size for %s. Proceeding anyway. Error: %s", instance_name, e, exc_info=False) # Log less verbosely
+
# Prepare args dictionary for processing functions
# Combine instance details with general app settings for the processing functions
# Assuming app_settings already contains most general settings, add instance specifics
combined_settings = app_settings.copy() # Start with general settings
combined_settings.update(instance_details) # Add/overwrite with instance specifics (name, url, key)
-
+
# Ensure settings from database are consistently used for all apps
combined_settings["api_timeout"] = settings_manager.get_advanced_setting("api_timeout", 120)
combined_settings["command_wait_delay"] = settings_manager.get_advanced_setting("command_wait_delay", 1)
combined_settings["command_wait_attempts"] = settings_manager.get_advanced_setting("command_wait_attempts", 600)
-
+
# Define the stop check function
stop_check_func = stop_event.is_set
@@ -360,7 +345,7 @@ def app_specific_loop(app_type: str) -> None:
hunt_missing_mode = instance_details.get("hunt_missing_mode", "seasons_packs")
command_wait_delay = combined_settings.get("command_wait_delay", 1)
command_wait_attempts = combined_settings.get("command_wait_attempts", 600)
-
+
if app_type == "sonarr":
air_date_delay_days = instance_details.get("air_date_delay_days", 0)
processed_missing = process_missing(
@@ -380,11 +365,11 @@ def app_specific_loop(app_type: str) -> None:
else:
# For other apps that still use the old signature
processed_missing = process_missing(app_settings=combined_settings, stop_check=stop_check_func)
-
+
if processed_missing:
processed_any_items = True
except Exception as e:
- app_logger.error(f"Error during missing processing for {instance_name}: {e}", exc_info=True)
+ app_logger.error("Error during missing processing for %s: %s", instance_name, e, exc_info=True)
# --- Process Upgrades --- #
if hunt_upgrade_enabled and process_upgrades:
@@ -399,7 +384,7 @@ def app_specific_loop(app_type: str) -> None:
upgrade_mode = instance_details.get("upgrade_mode", "seasons_packs")
command_wait_delay = combined_settings.get("command_wait_delay", 1)
command_wait_attempts = combined_settings.get("command_wait_attempts", 600)
-
+
processed_upgrades = process_upgrades(
api_url=api_url,
api_key=api_key,
@@ -415,146 +400,121 @@ def app_specific_loop(app_type: str) -> None:
else:
# For other apps that still use the old signature
processed_upgrades = process_upgrades(app_settings=combined_settings, stop_check=stop_check_func)
-
+
if processed_upgrades:
processed_any_items = True
except Exception as e:
- app_logger.error(f"Error during upgrade processing for {instance_name}: {e}", exc_info=True)
-
-
+ app_logger.error("Error during upgrade processing for %s: %s", instance_name, e, exc_info=True)
# Small delay between instances if needed (optional)
if not stop_event.is_set():
time.sleep(1) # Short pause
enabled_instances.append(instance_name)
- # --- Cycle End & Sleep --- #
- calculate_reset_time(app_type) # Pass app_type here if needed by the function
-
# Log cycle completion
if processed_any_items:
- app_logger.info(f"=== {app_type.upper()} cycle finished. Processed items across instances. ===")
+ app_logger.info("=== %s cycle finished. Processed items across instances. ===", app_type.upper())
else:
- app_logger.info(f"=== {app_type.upper()} cycle finished. No items processed in any instance. ===")
-
+ app_logger.info("=== %s cycle finished. No items processed in any instance. ===", app_type.upper())
+
# Add state management summary logging for user clarity (only for hunting apps, not Swaparr)
if app_type != "swaparr":
try:
- from src.primary.stateful_manager import get_state_management_summary
-
# Get summary for each enabled instance with per-instance settings
instance_summaries = []
total_processed = 0
has_any_processed = False
-
+
for instance_name in enabled_instances:
- # Get per-instance settings
- instance_hours = None
- instance_enabled = True
- instance_mode = "custom"
-
- try:
- # Look up the instance in the configured instances
- if configured_instances and app_type in configured_instances:
- for instance_details in configured_instances[app_type]:
- if instance_details.get("instance_name") == instance_name:
- instance_hours = instance_details.get("state_management_hours", 168)
- instance_mode = instance_details.get("state_management_mode", "custom")
- instance_enabled = (instance_mode != "disabled")
- break
- except Exception as e:
- app_logger.warning(f"Could not load instance settings for {instance_name}: {e}")
- instance_hours = 168 # Default fallback
-
+
# Get summary for this instance
- summary = get_state_management_summary(app_type, instance_name, instance_hours)
-
+ summary = get_instance_state_management_summary(app_type, instance_name)
+
# Store instance-specific information
instance_summaries.append({
"name": instance_name,
- "enabled": instance_enabled,
- "mode": instance_mode,
- "hours": instance_hours,
+ "enabled": summary["state_management_enabled"],
+ "mode": summary["state_management_mode"],
+ "hours": summary["state_management_hours"],
"processed_count": summary["processed_count"],
"next_reset_time": summary["next_reset_time"],
"has_processed_items": summary["has_processed_items"]
})
-
+
# Only count if state management is enabled for this instance
- if instance_enabled and summary["has_processed_items"]:
+ if summary["state_management_enabled"] and summary["has_processed_items"]:
total_processed += summary["processed_count"]
has_any_processed = True
-
+
# Log per-instance state management info
if instance_summaries:
- app_logger.info(f"=== STATE MANAGEMENT SUMMARY FOR {app_type.upper()} ===")
-
+ app_logger.info("=== STATE MANAGEMENT SUMMARY FOR %s ===", app_type.upper())
+
for inst in instance_summaries:
if inst["enabled"]:
if inst["processed_count"] > 0:
- app_logger.info(f" {inst['name']}: {inst['processed_count']} items tracked, next reset: {inst['next_reset_time']} ({inst['hours']}h interval)")
+ app_logger.info(" %s: %s items tracked, next reset: %s (%sh interval)", inst['name'], inst['processed_count'], inst['next_reset_time'], inst['hours'])
else:
- app_logger.info(f" {inst['name']}: No items tracked yet, next reset: {inst['next_reset_time']} ({inst['hours']}h interval)")
+ app_logger.info(" %s: No items tracked yet, next reset: %s (%sh interval)", inst['name'], inst['next_reset_time'], inst['hours'])
else:
- app_logger.info(f" {inst['name']}: State management disabled")
-
+ app_logger.info(" %s: State management disabled", inst['name'])
+
# Overall summary
if not processed_any_items and has_any_processed:
# Items were skipped due to state management
- app_logger.info(f"RESULT: {total_processed} items skipped due to state management (already processed)")
+ app_logger.info("RESULT: %s items skipped due to state management (already processed)", total_processed)
elif processed_any_items:
# Items were processed, show summary
- app_logger.info(f"RESULT: Items processed successfully. Total tracked across instances: {total_processed}")
+ app_logger.info("RESULT: Items processed successfully. Total tracked across instances: %s", total_processed)
else:
# No items processed and no state management blocking
if total_processed > 0:
- app_logger.info(f"RESULT: No new items found. Total tracked across instances: {total_processed}")
+ app_logger.info("RESULT: No new items found. Total tracked across instances: %s", total_processed)
else:
- app_logger.info(f"RESULT: No items to process and no items tracked yet")
-
+ app_logger.info("RESULT: No items to process and no items tracked yet")
+
except Exception as e:
- app_logger.warning(f"Could not generate state management summary: {e}")
+ app_logger.warning("Could not generate state management summary: %s", e)
else:
# Swaparr uses its own state management for strikes and removed downloads
- app_logger.debug(f"Swaparr uses its own strike/removal tracking, not the hunting state manager")
-
+ app_logger.debug("Swaparr uses its own strike/removal tracking, not the hunting state manager")
+
# Calculate sleep duration (use configured or default value)
sleep_seconds = app_settings.get("sleep_duration", 900) # Default to 15 minutes
-
+
# Sleep with periodic checks for reset file
# Calculate and format the time when the next cycle will begin
# Use user's selected timezone for all time operations
-
+
# Get user's selected timezone
- user_tz = _get_user_timezone()
-
+ user_tz = get_user_timezone()
+
# Get current time in user's timezone - remove microseconds for clean timestamps
now_user_tz = datetime.datetime.now(user_tz).replace(microsecond=0)
-
+
# Calculate next cycle time in user's timezone without microseconds
next_cycle_time = now_user_tz + datetime.timedelta(seconds=sleep_seconds)
-
- app_logger.debug(f"Current time ({user_tz}): {now_user_tz.strftime('%Y-%m-%d %H:%M:%S')}")
- app_logger.info(f"Next cycle will begin at {next_cycle_time.strftime('%Y-%m-%d %H:%M:%S')} ({user_tz})")
- app_logger.info(f"Sleep duration: {sleep_seconds} seconds")
-
+
+ app_logger.debug("Current time (%s): %s", user_tz, now_user_tz.strftime('%Y-%m-%d %H:%M:%S'))
+ app_logger.info("Next cycle will begin at %s (%s)", next_cycle_time.strftime('%Y-%m-%d %H:%M:%S'), user_tz)
+ app_logger.info("Sleep duration: %s seconds", sleep_seconds)
+
# Update cycle tracking with user timezone time
next_cycle_naive = next_cycle_time.replace(tzinfo=None) if next_cycle_time.tzinfo else next_cycle_time
update_next_cycle(app_type, next_cycle_naive)
-
+
# Mark cycle as ended (set cyclelock to False) and update next cycle time
# Use user's timezone for internal storage consistency
try:
- from src.primary.cycle_tracker import end_cycle
# Convert timezone-aware datetime to naive for clean timestamp generation
next_cycle_naive = next_cycle_time.replace(tzinfo=None) if next_cycle_time.tzinfo else next_cycle_time
end_cycle(app_type, next_cycle_naive)
except Exception as e:
- app_logger.warning(f"Failed to mark cycle end for {app_type}: {e}")
+ app_logger.warning("Failed to mark cycle end for %s: %s", app_type, e)
# Non-critical, continue execution
-
- app_logger.debug(f"Sleeping for {sleep_seconds} seconds before next cycle...")
-
+
+ app_logger.debug("Sleeping for %s seconds before next cycle...", sleep_seconds)
+
# Use shorter sleep intervals and check for reset file
wait_interval = 1 # Check every second to be more responsive
elapsed = 0
@@ -563,56 +523,58 @@ def app_specific_loop(app_type: str) -> None:
if stop_event.is_set():
app_logger.info("Stop event detected during sleep. Breaking out of sleep cycle.")
break
-
+
# Check for database reset request
try:
from src.primary.utils.database import get_database
db = get_database()
reset_timestamp = db.get_pending_reset_request(app_type)
if reset_timestamp:
- app_logger.info(f"!!! RESET REQUEST DETECTED !!! Manual cycle reset triggered for {app_type} (timestamp: {reset_timestamp}). Starting new cycle immediately.")
-
+ app_logger.info("!!! RESET REQUEST DETECTED !!! Manual cycle reset triggered for %s (timestamp: %s). Starting new cycle immediately.", app_type, reset_timestamp)
+
# Mark the reset request as processed
db.mark_reset_request_processed(app_type)
- app_logger.info(f"Reset request processed for {app_type}. Starting new cycle now.")
+ app_logger.info("Reset request processed for %s. Starting new cycle now.", app_type)
break
except Exception as e:
- app_logger.error(f"Error checking reset request for {app_type}: {e}", exc_info=True)
-
+ app_logger.error("Error checking reset request for %s: %s", app_type, e, exc_info=True)
+
# Sleep for a short interval
stop_event.wait(wait_interval)
elapsed += wait_interval
-
+
# If we've slept for at least 30 seconds, update the logger message every 30 seconds
if elapsed > 0 and elapsed % 30 == 0:
- app_logger.debug(f"Still sleeping, {sleep_seconds - elapsed} seconds remaining before next cycle...")
-
- app_logger.info(f"=== [{app_type.upper()}] Thread stopped ====")
+ app_logger.debug("Still sleeping, %s seconds remaining before next cycle...", sleep_seconds - elapsed)
+
+ app_logger.info("=== [%s] Thread stopped ====", app_type.upper())
+
def reset_app_cycle(app_type: str) -> bool:
"""
Trigger a manual reset of an app's cycle.
-
+
Args:
app_type: The type of Arr application (sonarr, radarr, lidarr, readarr, etc.)
-
+
Returns:
bool: True if the reset was triggered, False if the app is not running
"""
- logger.info(f"Manual cycle reset requested for {app_type} - Creating reset request")
-
+ logger.info("Manual cycle reset requested for %s - Creating reset request", app_type)
+
# Create a reset request in the database
try:
from src.primary.utils.database import get_database
db = get_database()
success = db.create_reset_request(app_type)
if success:
- logger.info(f"Reset request created for {app_type}. Cycle will reset on next check.")
+ logger.info("Reset request created for %s. Cycle will reset on next check.", app_type)
return success
except Exception as e:
- logger.error(f"Error creating reset request for {app_type}: {e}", exc_info=True)
+ logger.error("Error creating reset request for %s: %s", app_type, e, exc_info=True)
return False
+
def start_app_threads():
"""Start threads for all configured and enabled apps."""
configured_apps_list = settings_manager.get_configured_apps() # Corrected function name
@@ -622,69 +584,31 @@ def start_app_threads():
if is_configured:
# Skip non-cyclical apps (e.g., prowlarr handled via routes, swaparr has its own thread)
if app_type not in CYCLICAL_APP_TYPES:
- logger.debug(f"Configured non-cyclical app detected; not starting background thread: {app_type}")
+ logger.debug("Configured non-cyclical app detected; not starting background thread: %s", app_type)
continue
- # Optional: Add an explicit 'enabled' setting check if desired
- # enabled = settings_manager.get_setting(app_type, "enabled", True)
- # if not enabled:
- # logger.info(f"Skipping {app_type} thread as it is disabled in settings.")
- # continue
-
if app_type not in app_threads or not app_threads[app_type].is_alive():
if app_type in app_threads: # If it existed but died
- logger.warning(f"{app_type} thread died, restarting...")
+ logger.warning("%s thread died, restarting...", app_type)
del app_threads[app_type]
else: # Starting for the first time
- logger.info(f"Starting thread for {app_type}...")
+ logger.info("Starting thread for %s...", app_type)
thread = threading.Thread(target=app_specific_loop, args=(app_type,), name=f"{app_type}-Loop", daemon=True)
app_threads[app_type] = thread
thread.start()
elif app_type in app_threads and app_threads[app_type].is_alive():
- # If app becomes un-configured, stop its thread? Or let it fail connection check?
- # For now, let it run and fail connection check.
- logger.warning(f"{app_type} is no longer configured. Thread will likely stop after failing connection checks.")
- # else: # App not configured and no thread running - do nothing
- # logger.debug(f"{app_type} is not configured. No thread started.")
- pass # Corrected indentation
-
-def check_and_restart_threads():
- """Check if any threads have died and restart them if the app is still configured."""
- configured_apps_list = settings_manager.get_configured_apps() # Corrected function name
- configured_apps = {app: True for app in configured_apps_list} # Convert list to dict format expected below
-
- for app_type, thread in list(app_threads.items()):
- # Only monitor cyclical apps for restarts
- if app_type not in CYCLICAL_APP_TYPES:
- continue
- if not thread.is_alive():
- logger.warning(f"{app_type} thread died unexpectedly.")
- del app_threads[app_type] # Remove dead thread
- # Only restart if it's still configured
- if configured_apps.get(app_type, False):
- logger.info(f"Restarting thread for {app_type}...")
- new_thread = threading.Thread(target=app_specific_loop, args=(app_type,), name=f"{app_type}-Loop", daemon=True)
- app_threads[app_type] = new_thread
- new_thread.start()
- else:
- logger.info(f"Not restarting {app_type} thread as it is no longer configured.")
+ # If app becomes un-configured, stop its thread? Or let it fail connection check?
+ # For now, let it run and fail connection check.
+ logger.warning("%s is no longer configured. Thread will likely stop after failing connection checks.", app_type)
-def shutdown_handler(signum, frame):
- """Handle termination signals (SIGINT, SIGTERM)."""
- signal_name = "SIGINT" if signum == signal.SIGINT else "SIGTERM" if signum == signal.SIGTERM else f"Signal {signum}"
- logger.info(f"Received {signal_name}. Initiating background tasks shutdown...")
- stop_event.set() # Signal all threads to stop
-
- # Log shutdown progress for Docker diagnostics
- logger.info("Background shutdown initiated - threads will stop gracefully")
def shutdown_threads():
"""Wait for all threads to finish."""
import time
shutdown_start = time.time()
logger.info("Waiting for all app threads to stop...")
-
+
# Stop the hourly API cap scheduler
global hourly_cap_scheduler_thread
if hourly_cap_scheduler_thread and hourly_cap_scheduler_thread.is_alive():
@@ -695,7 +619,7 @@ def shutdown_threads():
logger.warning("Hourly API cap scheduler did not stop gracefully")
else:
logger.info("Hourly API cap scheduler stopped")
-
+
# Stop the Prowlarr stats refresher
global prowlarr_stats_thread
if prowlarr_stats_thread and prowlarr_stats_thread.is_alive():
@@ -716,79 +640,81 @@ def shutdown_threads():
logger.warning("Swaparr thread did not stop gracefully")
else:
logger.info("Swaparr thread stopped")
-
+
# Stop the scheduler engine
try:
logger.info("Stopping schedule action engine...")
stop_scheduler()
logger.info("Schedule action engine stopped successfully")
except Exception as e:
- logger.error(f"Error stopping schedule action engine: {e}")
-
+ logger.error("Error stopping schedule action engine: %s", e)
+
# Wait for all app threads to terminate
active_threads = [name for name, thread in app_threads.items() if thread.is_alive()]
if active_threads:
- logger.info(f"Waiting for {len(active_threads)} app threads to stop: {', '.join(active_threads)}")
-
+ logger.info("Waiting for %s app threads to stop: %s", len(active_threads), ', '.join(active_threads))
+
for name, thread in app_threads.items():
if thread.is_alive():
- logger.debug(f"Waiting for {name} thread to stop...")
+ logger.debug("Waiting for %s thread to stop...", name)
thread.join(timeout=10.0)
if thread.is_alive():
- logger.warning(f"{name} thread did not stop gracefully within 10 seconds")
+ logger.warning("%s thread did not stop gracefully within 10 seconds", name)
else:
- logger.debug(f"{name} thread stopped successfully")
-
+ logger.debug("%s thread stopped successfully", name)
+
shutdown_duration = time.time() - shutdown_start
- logger.info(f"All app threads stopped. Shutdown completed in {shutdown_duration:.2f} seconds")
+ logger.info("All app threads stopped. Shutdown completed in %.2f seconds", shutdown_duration)
+
def hourly_cap_scheduler_loop():
"""Main loop for the hourly API cap scheduler thread
Checks time every 30 seconds and resets caps if needed at the top of the hour
"""
logger.info("Starting hourly API cap scheduler loop")
-
+
try:
from src.primary.stats_manager import reset_hourly_caps
-
+
# Initial check in case we're starting right at the top of an hour
current_time = datetime.datetime.now()
if current_time.minute == 0:
- logger.debug(f"Initial hourly reset triggered at {current_time.hour}:00")
+ logger.debug("Initial hourly reset triggered at %s:00", current_time.hour)
reset_hourly_caps()
-
+
# Main monitoring loop
while not stop_event.is_set():
try:
# Sleep for 30 seconds between checks
# This ensures we won't miss the top of the hour
stop_event.wait(30)
-
+
if stop_event.is_set():
break
-
+
# Check if it's the top of the hour (00 minute mark)
current_time = datetime.datetime.now()
if current_time.minute == 0:
- logger.debug(f"Hourly reset triggered at {current_time.hour}:00")
+ logger.debug("Hourly reset triggered at %s:00", current_time.hour)
success = reset_hourly_caps()
if success:
- logger.debug(f"Successfully reset hourly API caps at {current_time.hour}:00")
+ logger.debug("Successfully reset hourly API caps at %s:00", current_time.hour)
else:
- logger.error(f"Failed to reset hourly API caps at {current_time.hour}:00")
-
+ logger.error("Failed to reset hourly API caps at %s:00", current_time.hour)
+
except Exception as e:
- logger.error(f"Error in hourly cap scheduler: {e}")
+ logger.error("Error in hourly cap scheduler: %s", e)
logger.error(traceback.format_exc())
# Sleep briefly to avoid spinning in case of repeated errors
time.sleep(5)
-
+
except Exception as e:
- logger.error(f"Fatal error in hourly cap scheduler: {e}")
+ logger.error("Fatal error in hourly cap scheduler: %s", e)
logger.error(traceback.format_exc())
-
+
logger.info("Hourly API cap scheduler stopped")
+
def prowlarr_stats_loop():
"""Background loop to refresh Prowlarr statistics cache every 5 minutes.
Runs independently of the frontend and does nothing if Prowlarr is not configured or disabled.
@@ -796,10 +722,6 @@ def prowlarr_stats_loop():
refresher_logger = get_logger("prowlarr")
refresher_logger.info("Prowlarr stats refresher thread started")
try:
- from src.primary.settings_manager import load_settings
- # Import inside loop target to avoid circular issues at module import time
- from src.primary.apps import prowlarr_routes as prow
-
refresh_interval_seconds = 300 # 5 minutes
# Do an immediate pass on start
@@ -818,16 +740,16 @@ def prowlarr_stats_loop():
# Trigger cache update (safe even if cache is warm)
try:
- prow._update_stats_cache()
+ prowlarr_routes._update_stats_cache()
except Exception as e:
- refresher_logger.error(f"Prowlarr stats refresh error: {e}", exc_info=True)
+ refresher_logger.error("Prowlarr stats refresh error: %s", e, exc_info=True)
# Sleep until next refresh or until stop requested
if stop_event.wait(refresh_interval_seconds):
break
except Exception as loop_error:
- refresher_logger.error(f"Unexpected error in Prowlarr stats refresher: {loop_error}", exc_info=True)
+ refresher_logger.error("Unexpected error in Prowlarr stats refresher: %s", loop_error, exc_info=True)
# Back off briefly to avoid tight error loops
if stop_event.wait(60):
break
@@ -839,17 +761,13 @@ def swaparr_app_loop():
"""Dedicated Swaparr processing loop that follows same patterns as other apps"""
swaparr_logger = get_logger("swaparr")
swaparr_logger.info("Swaparr thread started")
-
+
try:
- from src.primary.apps.swaparr.handler import run_swaparr
- from src.primary.settings_manager import load_settings
- from src.primary.cycle_tracker import start_cycle, end_cycle, update_next_cycle
-
while not stop_event.is_set():
try:
# Load Swaparr settings
swaparr_settings = load_settings("swaparr")
-
+
if not swaparr_settings or not swaparr_settings.get("enabled", False):
# Swaparr is disabled - no need to log this repeatedly
# Sleep for 30 seconds when disabled, then check again
@@ -857,100 +775,101 @@ def swaparr_app_loop():
continue
else:
break
-
+
# Get sleep duration from settings
sleep_duration = swaparr_settings.get("sleep_duration", 900)
-
+
# Get user's timezone
- user_tz = _get_user_timezone()
-
+ user_tz = get_user_timezone()
+
# Calculate next cycle time in user's timezone
now_user_tz = datetime.datetime.now(user_tz).replace(microsecond=0)
next_cycle_time = now_user_tz + datetime.timedelta(seconds=sleep_duration)
-
+
# Start cycle tracking
start_cycle("swaparr")
-
+
# Start cycle
swaparr_logger.info("=== SWAPARR cycle started. Processing stalled downloads across all instances. ===")
-
+
try:
# Run Swaparr processing
run_swaparr()
swaparr_logger.info("=== SWAPARR cycle finished. Processed stalled downloads across instances. ===")
except Exception as e:
- swaparr_logger.error(f"Error during Swaparr processing: {e}", exc_info=True)
+ swaparr_logger.error("Error during Swaparr processing: %s", e, exc_info=True)
swaparr_logger.info("=== SWAPARR cycle finished with errors. ===")
-
+
# End cycle tracking
next_cycle_naive = next_cycle_time.replace(tzinfo=None) if next_cycle_time.tzinfo else next_cycle_time
end_cycle("swaparr", next_cycle_naive)
update_next_cycle("swaparr", next_cycle_naive)
-
+
# Sleep duration and next cycle info (like other apps)
- swaparr_logger.debug(f"Current time ({user_tz}): {now_user_tz.strftime('%Y-%m-%d %H:%M:%S')}")
- swaparr_logger.info(f"Next cycle will begin at {next_cycle_time.strftime('%Y-%m-%d %H:%M:%S')} ({user_tz})")
- swaparr_logger.info(f"Sleep duration: {sleep_duration} seconds")
-
+ swaparr_logger.debug("Current time (%s): %s", user_tz, now_user_tz.strftime('%Y-%m-%d %H:%M:%S'))
+ swaparr_logger.info("Next cycle will begin at %s (%s)", next_cycle_time.strftime('%Y-%m-%d %H:%M:%S'), user_tz)
+ swaparr_logger.info("Sleep duration: %s seconds", sleep_duration)
+
# Sleep with responsiveness to stop events and reset requests (like other apps)
elapsed = 0
wait_interval = 5 # Check every 5 seconds for responsiveness
while elapsed < sleep_duration and not stop_event.is_set():
# Check for database reset request (same logic as other apps)
try:
- from src.primary.utils.database import get_database
db = get_database()
reset_timestamp = db.get_pending_reset_request("swaparr")
if reset_timestamp:
- swaparr_logger.info(f"!!! RESET REQUEST DETECTED !!! Manual cycle reset triggered for swaparr (timestamp: {reset_timestamp}). Starting new cycle immediately.")
-
+ swaparr_logger.info("!!! RESET REQUEST DETECTED !!! Manual cycle reset triggered for swaparr (timestamp: %s). Starting new cycle immediately.", reset_timestamp)
+
# Mark the reset request as processed
db.mark_reset_request_processed("swaparr")
- swaparr_logger.info(f"Reset request processed for swaparr. Starting new cycle now.")
+ swaparr_logger.info("Reset request processed for swaparr. Starting new cycle now.")
break
except Exception as e:
- swaparr_logger.error(f"Error checking reset request for swaparr: {e}", exc_info=True)
-
+ swaparr_logger.error("Error checking reset request for swaparr: %s", e, exc_info=True)
+
# Check for stop event
if stop_event.is_set():
swaparr_logger.info("Stop event detected during sleep. Breaking out of sleep cycle.")
break
-
+
# Sleep for a short interval
stop_event.wait(wait_interval)
elapsed += wait_interval
-
+
# Log progress every 30 seconds (like other apps)
if elapsed > 0 and elapsed % 30 == 0:
- swaparr_logger.debug(f"Still sleeping, {sleep_duration - elapsed} seconds remaining before next cycle...")
-
+ swaparr_logger.debug("Still sleeping, %s seconds remaining before next cycle...", sleep_duration - elapsed)
+
except Exception as e:
- swaparr_logger.error(f"Unexpected error in Swaparr loop: {e}", exc_info=True)
+ swaparr_logger.error("Unexpected error in Swaparr loop: %s", e, exc_info=True)
# Sleep briefly to avoid spinning in case of repeated errors
time.sleep(60)
-
+
except Exception as e:
- swaparr_logger.error(f"Fatal error in Swaparr thread: {e}", exc_info=True)
-
+ swaparr_logger.error("Fatal error in Swaparr thread: %s", e, exc_info=True)
+
swaparr_logger.info("Swaparr thread stopped")
+
def start_hourly_cap_scheduler():
"""Start the hourly API cap scheduler thread"""
global hourly_cap_scheduler_thread
-
+
if hourly_cap_scheduler_thread and hourly_cap_scheduler_thread.is_alive():
logger.info("Hourly API cap scheduler already running")
return
-
+
# Create and start the scheduler thread
hourly_cap_scheduler_thread = threading.Thread(
- target=hourly_cap_scheduler_loop,
- name="HourlyCapScheduler",
+ target=hourly_cap_scheduler_loop,
+ name="HourlyCapScheduler",
daemon=True
)
hourly_cap_scheduler_thread.start()
-
- logger.info(f"Hourly API cap scheduler started. Thread is alive: {hourly_cap_scheduler_thread.is_alive()}")
+
+ logger.info("Hourly API cap scheduler started. Thread is alive: %s", hourly_cap_scheduler_thread.is_alive())
+
def start_prowlarr_stats_thread():
"""Start the Prowlarr statistics refresher thread (5-minute cadence)."""
@@ -964,61 +883,64 @@ def start_prowlarr_stats_thread():
daemon=True,
)
prowlarr_stats_thread.start()
- logger.info(f"Prowlarr stats refresher started. Thread is alive: {prowlarr_stats_thread.is_alive()}")
+ logger.info("Prowlarr stats refresher started. Thread is alive: %s", prowlarr_stats_thread.is_alive())
+
def start_swaparr_thread():
"""Start the dedicated Swaparr processing thread"""
global swaparr_thread
-
+
if swaparr_thread and swaparr_thread.is_alive():
logger.info("Swaparr thread already running")
return
-
+
# Create and start the Swaparr thread
swaparr_thread = threading.Thread(
- target=swaparr_app_loop,
- name="SwaparrApp",
+ target=swaparr_app_loop,
+ name="SwaparrApp",
daemon=True
)
swaparr_thread.start()
-
- logger.info(f"Swaparr thread started. Thread is alive: {swaparr_thread.is_alive()}")
+
+ logger.info("Swaparr thread started. Thread is alive: %s", swaparr_thread.is_alive())
+
def start_huntarr():
"""Main entry point for Huntarr background tasks."""
- logger.info(f"--- Starting Huntarr Background Tasks v{__version__} --- ")
-
+ logger.info("--- Starting Huntarr Background Tasks v%s --- ", __version__)
+
# Migration environment variable no longer used
-
+
# Start the hourly API cap scheduler
+
try:
start_hourly_cap_scheduler()
logger.info("Hourly API cap scheduler started successfully")
except Exception as e:
- logger.error(f"Failed to start hourly API cap scheduler: {e}")
-
+ logger.error("Failed to start hourly API cap scheduler: %s", e)
+
# Start the Swaparr processing thread
try:
start_swaparr_thread()
logger.info("Swaparr thread started successfully")
except Exception as e:
- logger.error(f"Failed to start Swaparr thread: {e}")
-
+ logger.error("Failed to start Swaparr thread: %s", e)
+
# Start the Prowlarr stats refresher
try:
start_prowlarr_stats_thread()
logger.info("Prowlarr stats refresher started successfully")
except Exception as e:
- logger.error(f"Failed to start Prowlarr stats refresher: {e}")
-
+ logger.error("Failed to start Prowlarr stats refresher: %s", e)
+
# Start the scheduler engine
try:
start_scheduler()
logger.info("Schedule action engine started successfully")
except Exception as e:
- logger.error(f"Failed to start schedule action engine: {e}")
-
+ logger.error("Failed to start schedule action engine: %s", e)
+
# Configuration logging has been disabled to reduce log spam
# Settings are loaded and used internally without verbose logging
@@ -1026,14 +948,13 @@ def start_huntarr():
# Main loop: Start and monitor app threads
while not stop_event.is_set():
start_app_threads() # Start/Restart threads for configured apps
- # check_and_restart_threads() # This is implicitly handled by start_app_threads checking is_alive
stop_event.wait(15) # Check for stop signal every 15 seconds
except Exception as e:
- logger.exception(f"Unexpected error in main monitoring loop: {e}")
+ logger.exception("Unexpected error in main monitoring loop: %s", e)
finally:
logger.info("Background task main loop exited. Shutting down threads...")
if not stop_event.is_set():
- stop_event.set() # Ensure stop is signaled if loop exited unexpectedly
+ stop_event.set() # Ensure stop is signaled if loop exited unexpectedly
shutdown_threads()
- logger.info("--- Huntarr Background Tasks stopped --- ")
\ No newline at end of file
+ logger.info("--- Huntarr Background Tasks stopped --- ")
diff --git a/src/primary/cycle_tracker.py b/src/primary/cycle_tracker.py
index 02a6a577..a9bf2a68 100644
--- a/src/primary/cycle_tracker.py
+++ b/src/primary/cycle_tracker.py
@@ -10,30 +10,18 @@
from typing import Dict, Any, Optional
from src.primary.utils.logger import get_logger
from src.primary.utils.database import get_database
+from src.primary.utils.timezone_utils import get_user_timezone
logger = get_logger("cycle_tracker")
# Lock for thread-safe operations
_lock = threading.Lock()
-def _get_user_timezone():
- """Get the user's configured timezone"""
- try:
- from src.primary.settings_manager import load_settings
- general_settings = load_settings("general")
- timezone_str = general_settings.get("timezone", "UTC")
-
- import pytz
- return pytz.timezone(timezone_str)
- except Exception as e:
- logger.warning(f"Error getting user timezone, defaulting to UTC: {e}")
- import pytz
- return pytz.UTC
def update_sleep_json(app_type: str, next_cycle_time: datetime.datetime, cyclelock: bool = None) -> None:
"""
Update the sleep/cycle data in the database
-
+
Args:
app_type: The type of app (sonarr, radarr, etc.)
next_cycle_time: When the next cycle will begin
@@ -41,34 +29,34 @@ def update_sleep_json(app_type: str, next_cycle_time: datetime.datetime, cyclelo
"""
try:
logger.debug(f"Updating sleep data for {app_type}, cyclelock: {cyclelock}")
-
+
# Ensure next_cycle_time is timezone-aware and in user's selected timezone
- user_tz = _get_user_timezone()
-
+ user_tz = get_user_timezone()
+
if next_cycle_time.tzinfo is None:
# If naive datetime, assume it's in user's timezone
next_cycle_time = user_tz.localize(next_cycle_time)
elif next_cycle_time.tzinfo != user_tz:
# Convert to user's timezone if it's in a different timezone
next_cycle_time = next_cycle_time.astimezone(user_tz)
-
+
# Remove microseconds for clean timestamps
next_cycle_time = next_cycle_time.replace(microsecond=0)
-
+
# Calculate current time in user's timezone for consistency
now_user_tz = datetime.datetime.now(user_tz).replace(microsecond=0)
-
+
# Store in database
db = get_database()
-
+
# Get current data to preserve existing values
current_data = db.get_sleep_data(app_type)
-
+
# Determine cyclelock value
if cyclelock is None:
# If not explicitly set, preserve existing value or default to True (cycle starting)
cyclelock = current_data.get('cycle_lock', True)
-
+
# Update the database
db.set_sleep_data(
app_type=app_type,
@@ -77,24 +65,24 @@ def update_sleep_json(app_type: str, next_cycle_time: datetime.datetime, cyclelo
last_cycle_start=current_data.get('last_cycle_start'),
last_cycle_end=current_data.get('last_cycle_end')
)
-
+
logger.info(f"Updated sleep data for {app_type}: next_cycle={next_cycle_time.isoformat()}, cyclelock={cyclelock}")
-
+
except Exception as e:
logger.error(f"Error updating sleep data for {app_type}: {e}")
def update_next_cycle(app_type: str, next_cycle_time: datetime.datetime) -> None:
"""
Update the next cycle time for an app
-
+
Args:
app_type: The type of app (sonarr, radarr, etc.)
next_cycle_time: When the next cycle will begin
"""
with _lock:
# Get user's timezone for consistent timestamp formatting
- user_tz = _get_user_timezone()
-
+ user_tz = get_user_timezone()
+
# Ensure next_cycle_time is timezone-aware and in user's timezone
if next_cycle_time.tzinfo is None:
# If naive datetime, assume it's in user's timezone
@@ -102,27 +90,27 @@ def update_next_cycle(app_type: str, next_cycle_time: datetime.datetime) -> None
elif next_cycle_time.tzinfo != user_tz:
# Convert to user's timezone if it's in a different timezone
next_cycle_time = next_cycle_time.astimezone(user_tz)
-
+
# Remove microseconds for clean timestamps
next_cycle_time = next_cycle_time.replace(microsecond=0)
-
+
# Update database
update_sleep_json(app_type, next_cycle_time)
def get_cycle_status(app_type: Optional[str] = None) -> Dict[str, Any]:
"""
Get the cycle status for all apps or a specific app
-
+
Args:
app_type: Optional app type to filter for
-
+
Returns:
Dict with cycle status information including cyclelock status
"""
with _lock:
try:
db = get_database()
-
+
if app_type:
# Return data for a specific app
data = db.get_sleep_data(app_type)
@@ -156,18 +144,18 @@ def get_cycle_status(app_type: Optional[str] = None) -> Dict[str, Any]:
def start_cycle(app_type: str) -> None:
"""
Mark that a cycle has started for an app (set cyclelock to True)
-
+
Args:
app_type: The app that is starting a cycle
"""
try:
db = get_database()
current_data = db.get_sleep_data(app_type)
-
+
# Get current time for last_cycle_start
- user_tz = _get_user_timezone()
+ user_tz = get_user_timezone()
now_user_tz = datetime.datetime.now(user_tz).replace(microsecond=0)
-
+
# Update with cycle started
db.set_sleep_data(
app_type=app_type,
@@ -176,7 +164,7 @@ def start_cycle(app_type: str) -> None:
last_cycle_start=now_user_tz.isoformat(),
last_cycle_end=current_data.get('last_cycle_end')
)
-
+
logger.info(f"Started cycle for {app_type} (cyclelock = True)")
except Exception as e:
logger.error(f"Error starting cycle for {app_type}: {e}")
@@ -184,29 +172,29 @@ def start_cycle(app_type: str) -> None:
def end_cycle(app_type: str, next_cycle_time: datetime.datetime) -> None:
"""
Mark that a cycle has ended for an app (set cyclelock to False) and update next cycle time
-
+
Args:
app_type: The app that finished its cycle
next_cycle_time: When the next cycle will begin
"""
try:
logger.info(f"Ending cycle for {app_type}, next cycle at {next_cycle_time.isoformat()}")
-
+
db = get_database()
current_data = db.get_sleep_data(app_type)
-
+
# Get current time for last_cycle_end
- user_tz = _get_user_timezone()
+ user_tz = get_user_timezone()
now_user_tz = datetime.datetime.now(user_tz).replace(microsecond=0)
-
+
# Ensure next_cycle_time is timezone-aware
if next_cycle_time.tzinfo is None:
next_cycle_time = user_tz.localize(next_cycle_time)
elif next_cycle_time.tzinfo != user_tz:
next_cycle_time = next_cycle_time.astimezone(user_tz)
-
+
next_cycle_time = next_cycle_time.replace(microsecond=0)
-
+
# Update with cycle ended
db.set_sleep_data(
app_type=app_type,
@@ -215,7 +203,7 @@ def end_cycle(app_type: str, next_cycle_time: datetime.datetime) -> None:
last_cycle_start=current_data.get('last_cycle_start'),
last_cycle_end=now_user_tz.isoformat()
)
-
+
logger.info(f"Ended cycle for {app_type} (cyclelock = False)")
except Exception as e:
logger.error(f"Error ending cycle for {app_type}: {e}")
@@ -223,22 +211,22 @@ def end_cycle(app_type: str, next_cycle_time: datetime.datetime) -> None:
def reset_cycle(app_type: str) -> bool:
"""
Reset the cycle for a specific app (delete its cycle data and set cyclelock to True)
-
+
Args:
app_type: The app to reset
-
+
Returns:
True if successful, False otherwise
"""
with _lock:
try:
db = get_database()
-
+
# Get current time
- user_tz = _get_user_timezone()
+ user_tz = get_user_timezone()
now = datetime.datetime.now(user_tz).replace(microsecond=0)
future_time = now + datetime.timedelta(minutes=15) # Default 15 minutes
-
+
# Reset the app's data - set cyclelock to True (cycle should start)
db.set_sleep_data(
app_type=app_type,
@@ -247,7 +235,7 @@ def reset_cycle(app_type: str) -> bool:
last_cycle_start=None,
last_cycle_end=None
)
-
+
logger.info(f"Reset cycle for {app_type} - set cyclelock to True")
return True
except Exception as e:
diff --git a/src/primary/history_manager.py b/src/primary/history_manager.py
index e3caab81..777f91bf 100644
--- a/src/primary/history_manager.py
+++ b/src/primary/history_manager.py
@@ -4,17 +4,13 @@
Handles storing and retrieving processed media history using manager.db
"""
-import time
-from datetime import datetime
import threading
import logging
-from typing import Dict, Any, Optional
-# Create a logger
-logger = logging.getLogger(__name__)
+from src.primary.notification_manager import send_history_notification
+from src.primary.utils.database import get_database
-# Import manager database
-from src.primary.utils.database import get_manager_database
+logger = logging.getLogger(__name__)
# Lock to prevent race conditions during database operations
history_locks = {
@@ -27,30 +23,30 @@
"swaparr": threading.Lock()
}
+
def add_history_entry(app_type, entry_data):
"""
Add a history entry for processed media
-
+
Parameters:
- app_type: str - The app type (sonarr, radarr, etc)
- entry_data: dict - Entry data containing id, name, operation_type, instance_name
-
+
Returns:
- dict - The created history entry or None if failed
"""
if app_type not in history_locks:
- logger.error(f"Invalid app type: {app_type}")
+ logger.error("Invalid app type: %s", app_type)
return None
-
+
# Extract instance name from entry data
instance_name = entry_data.get("instance_name", "Default")
-
- logger.debug(f"Adding history entry for {app_type} with instance_name: '{instance_name}'")
-
+ logger.debug("Adding history entry for %s with instance_name: '%s'", app_type, instance_name)
+
# Thread-safe database operation
with history_locks[app_type]:
try:
- manager_db = get_manager_database()
+ manager_db = get_database()
entry = manager_db.add_hunt_history_entry(
app_type=app_type,
instance_name=instance_name,
@@ -59,115 +55,81 @@ def add_history_entry(app_type, entry_data):
operation_type=entry_data.get("operation_type", "missing"),
discovered=False # Default to false - will be updated by discovery tracker
)
-
+
# Add additional fields for compatibility
entry["app_type"] = app_type # Include app_type in the entry for display in UI
-
- logger.info(f"Added history entry for {app_type}-{instance_name}: {entry_data['name']}")
-
- # Send notification about this history entry
+
+ logger.info("Added history entry for %s-%s: %s", app_type, instance_name, entry_data['name'])
+
try:
- # Import here to avoid circular imports
- from src.primary.notification_manager import send_history_notification
send_history_notification(entry)
except Exception as e:
- logger.error(f"Failed to send notification for history entry: {e}")
-
+ logger.error("Failed to send notification for history entry: %s", e)
+
return entry
-
+
except Exception as e:
- logger.error(f"Database error adding history entry for {app_type}: {e}")
+ logger.error("Database error adding history entry for %s: %s", app_type, e)
return None
+
def get_history(app_type, search_query=None, page=1, page_size=20):
"""
Get history entries for an app
-
+
Parameters:
- app_type: str - The app type (sonarr, radarr, etc)
- search_query: str - Optional search query to filter results
- page: int - Page number (1-based)
- page_size: int - Number of entries per page
-
+
Returns:
- dict with entries, total_entries, and total_pages
"""
if app_type not in history_locks and app_type != "all":
- logger.error(f"Invalid app type: {app_type}")
+ logger.error("Invalid app type: %s", app_type)
return {"entries": [], "total_entries": 0, "total_pages": 0, "current_page": 1}
-
+
try:
- manager_db = get_manager_database()
+ manager_db = get_database()
result = manager_db.get_hunt_history(
app_type=app_type,
search_query=search_query,
page=page,
page_size=page_size
)
-
- logger.debug(f"Retrieved {len(result['entries'])} history entries for {app_type} (page {page})")
+
+ logger.debug("Retrieved %d history entries for %s (page %d)", len(result['entries']), app_type, page)
return result
-
+
except Exception as e:
- logger.error(f"Database error getting history for {app_type}: {e}")
+ logger.error("Database error getting history for %s: %s", app_type, e)
return {"entries": [], "total_entries": 0, "total_pages": 0, "current_page": 1}
+
def clear_history(app_type):
"""
Clear history for an app
-
+
Parameters:
- app_type: str - The app type (sonarr, radarr, etc) or "all" to clear all history
-
+
Returns:
- bool - Success or failure
"""
if app_type not in history_locks and app_type != "all":
- logger.error(f"Invalid app type: {app_type}")
+ logger.error("Invalid app type: %s", app_type)
return False
-
+
try:
- manager_db = get_manager_database()
+ manager_db = get_database()
manager_db.clear_hunt_history(app_type)
- logger.info(f"Successfully cleared hunt history for {app_type}")
+ logger.info("Successfully cleared hunt history for %s", app_type)
return True
-
- except Exception as e:
- logger.error(f"Database error clearing history for {app_type}: {e}")
- return False
-def handle_instance_rename(app_type, old_instance_name, new_instance_name):
- """
- Handle renaming of an instance by updating history entries in the database.
-
- Parameters:
- - app_type: str - The app type (sonarr, radarr, etc)
- - old_instance_name: str - Previous instance name
- - new_instance_name: str - New instance name
-
- Returns:
- - bool - Success or failure
- """
- if app_type not in history_locks:
- logger.error(f"Invalid app type: {app_type}")
+ except Exception as e:
+ logger.error("Database error clearing history for %s: %s", app_type, e)
return False
-
- # If names are the same, nothing to do
- if old_instance_name == new_instance_name:
- return True
-
- logger.info(f"Handling instance rename for {app_type}: {old_instance_name} -> {new_instance_name}")
-
- # Thread-safe operation
- with history_locks[app_type]:
- try:
- manager_db = get_manager_database()
- manager_db.handle_instance_rename(app_type, old_instance_name, new_instance_name)
- return True
-
- except Exception as e:
- logger.error(f"Database error renaming instance history: {e}")
- return False
# No longer need to run synchronization on module import since we're using database
logger.info("History manager initialized with database backend")
diff --git a/src/primary/notification_manager.py b/src/primary/notification_manager.py
index 23303b87..e3b6fea3 100644
--- a/src/primary/notification_manager.py
+++ b/src/primary/notification_manager.py
@@ -5,8 +5,6 @@
"""
import logging
-import json
-from typing import Dict, Any, Optional, List
# Lazy import Apprise to avoid startup issues if the package is not installed
apprise_import_error = None
@@ -15,16 +13,16 @@
except ImportError as e:
apprise_import_error = str(e)
+from src.primary.settings_manager import load_settings
+
# Create a logger for the notification manager
logger = logging.getLogger(__name__)
-# Import the settings manager
-from src.primary.settings_manager import get_setting, load_settings
def get_notification_config():
"""
Get the notification configuration from general settings
-
+
Returns:
dict: The notification configuration
"""
@@ -38,29 +36,30 @@ def get_notification_config():
'include_instance_name': general_settings.get('notification_include_instance', True),
'include_app_name': general_settings.get('notification_include_app', True)
}
-
+
return notification_config
+
def create_apprise_object():
"""
Create and configure an Apprise object with the URLs from settings
-
+
Returns:
apprise.Apprise: Configured Apprise object or None if there was an error
"""
if apprise_import_error:
logger.error(f"Apprise is not available: {apprise_import_error}")
return None
-
+
config = get_notification_config()
-
+
if not config['enabled'] or not config['apprise_urls']:
return None
-
+
try:
# Create an Apprise instance
apobj = apprise.Apprise()
-
+
# Add all the URLs to our Apprise object
for url in config['apprise_urls']:
if url and url.strip():
@@ -69,35 +68,36 @@ def create_apprise_object():
logger.debug(f"Added Apprise URL: {url[:15]}...")
else:
logger.warning(f"Failed to add Apprise URL: {url[:15]}...")
-
+
return apobj
except Exception as e:
logger.error(f"Error creating Apprise object: {e}")
return None
+
def send_notification(title, message, level='info', attach=None):
"""
Send a notification via Apprise
-
+
Args:
title (str): The notification title
message (str): The notification message
level (str): The notification level (info, success, warning, error)
attach (str, optional): Path to a file to attach
-
+
Returns:
bool: True if notification was sent successfully, False otherwise
"""
if apprise_import_error:
logger.error(f"Cannot send notification, Apprise is not available: {apprise_import_error}")
return False
-
+
config = get_notification_config()
-
+
if not config['enabled']:
logger.debug("Notifications are disabled in settings")
return False
-
+
# Check if the notification level is high enough to send
levels = {
'debug': 0,
@@ -106,26 +106,26 @@ def send_notification(title, message, level='info', attach=None):
'warning': 2,
'error': 3
}
-
+
if levels.get(level, 0) < levels.get(config['level'], 1):
logger.debug(f"Notification level {level} is below configured level {config['level']}")
return False
-
+
# Create Apprise object
apobj = create_apprise_object()
if not apobj:
return False
-
+
# Set notification type based on level
notify_type = apprise.NotifyType.INFO
-
+
if level == 'success':
notify_type = apprise.NotifyType.SUCCESS
elif level == 'warning':
notify_type = apprise.NotifyType.WARNING
elif level == 'error':
notify_type = apprise.NotifyType.FAILURE
-
+
try:
# Send notification
result = apobj.notify(
@@ -134,57 +134,58 @@ def send_notification(title, message, level='info', attach=None):
notify_type=notify_type,
attach=attach
)
-
+
logger.info(f"Notification sent (level={level}): {title}")
return result
-
+
except Exception as e:
logger.error(f"Failed to send notification: {e}")
return False
+
def send_history_notification(entry_data, operation_type=None):
"""
Send a notification about a history entry
-
+
Args:
entry_data (dict): The history entry data
operation_type (str, optional): Override the operation type
-
+
Returns:
bool: True if notification was sent successfully, False otherwise
"""
config = get_notification_config()
-
+
if not config['enabled']:
return False
-
+
# Skip if we shouldn't notify on this operation type
op_type = operation_type or entry_data.get('operation_type', 'missing')
if op_type == 'missing' and not config.get('notify_on_missing', True):
return False
if op_type == 'upgrade' and not config.get('notify_on_upgrade', True):
return False
-
+
# Determine notification level based on operation type
level = 'info'
if op_type == 'error':
level = 'error'
elif op_type == 'upgrade':
level = 'success'
-
+
# Build notification title
title_parts = ["Huntarr"]
-
+
if config.get('include_app_name', True) and 'app_type' in entry_data:
app_type = entry_data['app_type']
# Capitalize app name
title_parts.append(app_type.capitalize())
-
+
if config.get('include_instance_name', True) and 'instance_name' in entry_data:
title_parts.append(f"({entry_data['instance_name']})")
-
+
title = " ".join(title_parts)
-
+
# Build notification message
if op_type == 'missing':
message = f"Added Missing: {entry_data.get('processed_info', 'Unknown')}"
@@ -194,22 +195,22 @@ def send_history_notification(entry_data, operation_type=None):
message = f"Error Processing: {entry_data.get('processed_info', 'Unknown')}"
else:
message = f"{op_type.capitalize()}: {entry_data.get('processed_info', 'Unknown')}"
-
+
# Send the notification
return send_notification(title, message, level=level)
+
# Example usage (for testing)
if __name__ == "__main__":
- import sys
- logging.basicConfig(level=logging.DEBUG,
+ logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
-
+
# Test notification
result = send_notification(
- title="Huntarr Test Notification",
- message="This is a test notification from Huntarr",
+ title="Huntarr Test Notification",
+ message="This is a test notification from Huntarr",
level="info"
)
-
- logger.info(f"Notification result: {result}")
\ No newline at end of file
+
+ logger.info(f"Notification result: {result}")
diff --git a/src/primary/scheduler_engine.py b/src/primary/scheduler_engine.py
index 24ed4f71..e4764dbc 100644
--- a/src/primary/scheduler_engine.py
+++ b/src/primary/scheduler_engine.py
@@ -4,29 +4,20 @@
Handles execution of scheduled actions from database
"""
-import os
-import json
import threading
import datetime
import time
import traceback
-from typing import Dict, List, Any
import collections
-# Import settings_manager to handle cache refreshing
from src.primary.settings_manager import clear_cache, load_settings, save_settings
-
+from src.primary.utils.database import get_database
from src.primary.utils.logger import get_logger
-# Add import for stateful_manager's check_expiration
-from src.primary.stateful_manager import check_expiration as check_stateful_expiration
+from src.primary.utils.timezone_utils import get_user_timezone
-# Import database
-from src.primary.utils.database import get_database
-# Initialize logger
scheduler_logger = get_logger("scheduler")
-# Scheduler constants
SCHEDULE_CHECK_INTERVAL = 60 # Check schedule every minute
# Track last executed actions to prevent duplicates
@@ -39,14 +30,6 @@
stop_event = threading.Event()
scheduler_thread = None
-def _get_user_timezone():
- """Get the user's selected timezone from general settings"""
- try:
- from src.primary.utils.timezone_utils import get_user_timezone
- return get_user_timezone()
- except Exception:
- import pytz
- return pytz.UTC
def load_schedule():
"""Load the schedule configuration from database"""
@@ -56,21 +39,22 @@ def load_schedule():
# Schedules loaded - debug spam removed
return schedule_data
except Exception as e:
- scheduler_logger.error(f"Error loading schedule from database: {e}")
+ scheduler_logger.error("Error loading schedule from database: %s", e)
scheduler_logger.error(traceback.format_exc())
return {"global": [], "sonarr": [], "radarr": [], "lidarr": [], "readarr": [], "whisparr": [], "eros": []}
+
def add_to_history(action_entry, status, message):
"""Add an action execution to the history log"""
# Use user's selected timezone for display
- user_tz = _get_user_timezone()
+ user_tz = get_user_timezone()
now = datetime.datetime.now(user_tz)
time_str = now.strftime("%Y-%m-%d %H:%M:%S")
-
+
# Add timezone information to the timestamp for clarity
timezone_name = str(user_tz)
time_str_with_tz = f"{time_str} {timezone_name}"
-
+
history_entry = {
"timestamp": time_str,
"timestamp_tz": time_str_with_tz, # Include timezone-aware timestamp
@@ -80,45 +64,46 @@ def add_to_history(action_entry, status, message):
"status": status,
"message": message
}
-
+
execution_history.appendleft(history_entry)
- scheduler_logger.debug(f"Scheduler history: {time_str_with_tz} - {action_entry.get('action')} for {action_entry.get('app')} - {status} - {message}")
+ scheduler_logger.debug("Scheduler history: %s - %s for %s - %s - %s", time_str_with_tz, action_entry.get('action'), action_entry.get('app'), status, message)
+
def execute_action(action_entry):
"""Execute a scheduled action"""
action_type = action_entry.get("action")
app_type = action_entry.get("app")
app_id = action_entry.get("id")
-
+
# Generate a unique key for this action to track execution
- user_tz = _get_user_timezone()
+ user_tz = get_user_timezone()
current_date = datetime.datetime.now(user_tz).strftime("%Y-%m-%d")
execution_key = f"{app_id}_{current_date}"
-
+
# Check if this action was already executed today
if execution_key in last_executed_actions:
message = f"Action {app_id} for {app_type} already executed today, skipping"
scheduler_logger.debug(message)
add_to_history(action_entry, "skipped", message)
return False # Already executed
-
+
# Helper function to extract base app name from app identifiers like "radarr-all"
def get_base_app_name(app_identifier):
"""Extract base app name from identifiers like 'radarr-all', 'sonarr-instance1', etc."""
if not app_identifier or app_identifier == "global":
return app_identifier
-
+
# Split on hyphen and take the first part as the base app name
base_name = app_identifier.split('-')[0]
-
+
# Validate it's a known app
valid_apps = ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros']
if base_name in valid_apps:
return base_name
-
+
# If not a known app with suffix, return the original identifier
return app_identifier
-
+
try:
# Handle both old "pause" and new "disable" terminology
if action_type == "pause" or action_type == "disable":
@@ -157,7 +142,7 @@ def get_base_app_name(app_identifier):
try:
# Extract base app name for config access
base_app_name = get_base_app_name(app_type)
-
+
# Load settings from database
config_data = load_settings(base_app_name)
if config_data:
@@ -185,7 +170,7 @@ def get_base_app_name(app_identifier):
scheduler_logger.error(error_message)
add_to_history(action_entry, "error", error_message)
return False
-
+
# Handle both old "resume" and new "enable" terminology
elif action_type == "resume" or action_type == "enable":
# Enable logic for global or specific app
@@ -223,7 +208,7 @@ def get_base_app_name(app_identifier):
try:
# Extract base app name for config access
base_app_name = get_base_app_name(app_type)
-
+
# Load settings from database
config_data = load_settings(base_app_name)
if config_data:
@@ -251,7 +236,7 @@ def get_base_app_name(app_identifier):
scheduler_logger.error(error_message)
add_to_history(action_entry, "error", error_message)
return False
-
+
# Handle the API limit actions based on the predefined values
elif action_type.startswith("api-") or action_type.startswith("API Limits "):
# Extract the API limit value from the action type
@@ -261,7 +246,7 @@ def get_base_app_name(app_identifier):
api_limit = int(action_type.replace("api-", ""))
else:
api_limit = int(action_type.replace("API Limits ", ""))
-
+
if app_type == "global":
message = f"Setting global API cap to {api_limit}"
scheduler_logger.info(message)
@@ -288,7 +273,7 @@ def get_base_app_name(app_identifier):
try:
# Extract base app name for config access
base_app_name = get_base_app_name(app_type)
-
+
# Load settings from database
config_data = load_settings(base_app_name)
if config_data:
@@ -313,70 +298,71 @@ def get_base_app_name(app_identifier):
scheduler_logger.error(error_message)
add_to_history(action_entry, "error", error_message)
return False
-
+
# Mark this action as executed for today
last_executed_actions[execution_key] = datetime.datetime.now(user_tz)
return True
-
+
except Exception as e:
- scheduler_logger.error(f"Error executing action {action_type} for {app_type}: {e}")
+ scheduler_logger.error("Error executing action %s for %s: %s", action_type, app_type, e)
scheduler_logger.error(traceback.format_exc())
return False
+
def should_execute_schedule(schedule_entry):
"""Check if a schedule entry should be executed now"""
schedule_id = schedule_entry.get("id", "unknown")
-
+
# Debug log the schedule we're checking
- scheduler_logger.debug(f"Checking if schedule {schedule_id} should be executed")
-
+ scheduler_logger.debug("Checking if schedule %s should be executed", schedule_id)
+
# Get user's selected timezone for consistent timing
- user_tz = _get_user_timezone()
-
+ user_tz = get_user_timezone()
+
# Log exact system time for debugging with timezone info
exact_time = datetime.datetime.now(user_tz)
timezone_name = str(user_tz)
time_with_tz = f"{exact_time.strftime('%Y-%m-%d %H:%M:%S.%f')} {timezone_name}"
- scheduler_logger.debug(f"EXACT CURRENT TIME: {time_with_tz}")
-
+ scheduler_logger.debug("EXACT CURRENT TIME: %s", time_with_tz)
+
if not schedule_entry.get("enabled", True):
- scheduler_logger.debug(f"Schedule {schedule_id} is disabled, skipping")
+ scheduler_logger.debug("Schedule %s is disabled, skipping", schedule_id)
return False
-
+
# Check if specific days are configured
days = schedule_entry.get("days", [])
- scheduler_logger.debug(f"Schedule {schedule_id} days: {days}")
-
+ scheduler_logger.debug("Schedule %s days: %s", schedule_id, days)
+
# Get today's day of week in lowercase (respects user timezone)
current_day = datetime.datetime.now(user_tz).strftime("%A").lower() # e.g., 'monday'
-
+
# Debug what's being compared
- scheduler_logger.debug(f"CRITICAL DEBUG - Today: '{current_day}', Schedule days: {days}")
-
+ scheduler_logger.debug("CRITICAL DEBUG - Today: '%s', Schedule days: %s", current_day, days)
+
# If days array is empty, treat as "run every day"
if not days:
- scheduler_logger.debug(f"Schedule {schedule_id} has no days specified, treating as 'run every day'")
+ scheduler_logger.debug("Schedule %s has no days specified, treating as 'run every day'", schedule_id)
else:
# Make sure all day comparisons are done with lowercase strings
lowercase_days = [str(day).lower() for day in days]
-
+
# If today is not in the schedule days, skip this schedule
if current_day not in lowercase_days:
- scheduler_logger.debug(f"FAILURE: Schedule {schedule_id} not configured to run on {current_day}, skipping")
+ scheduler_logger.debug("FAILURE: Schedule %s not configured to run on %s, skipping", schedule_id, current_day)
return False
else:
- scheduler_logger.debug(f"SUCCESS: Schedule {schedule_id} IS configured to run on {current_day}")
+ scheduler_logger.debug("SUCCESS: Schedule %s IS configured to run on %s", schedule_id, current_day)
+
-
# Get current time with second-level precision for accurate timing (in user's timezone)
current_time = datetime.datetime.now(user_tz)
-
+
# Extract scheduled time from different possible formats
try:
# First try the flat format
schedule_hour = schedule_entry.get("hour")
schedule_minute = schedule_entry.get("minute")
-
+
# If not found, try nested format or string format
if schedule_hour is None or schedule_minute is None:
time_value = schedule_entry.get("time")
@@ -389,84 +375,83 @@ def should_execute_schedule(schedule_entry):
time_parts = time_value.split(":")
schedule_hour = int(time_parts[0])
schedule_minute = int(time_parts[1]) if len(time_parts) > 1 else 0
-
+
# Convert to integers to ensure proper comparison
schedule_hour = int(schedule_hour)
schedule_minute = int(schedule_minute)
except (TypeError, ValueError, IndexError):
- scheduler_logger.warning(f"Invalid schedule time format in entry: {schedule_entry}")
+ scheduler_logger.warning("Invalid schedule time format in entry: %s", schedule_entry)
return False
-
+
# Add detailed logging for time debugging
time_debug_str = f"{current_time.hour:02d}:{current_time.minute:02d}:{current_time.second:02d}"
if timezone_name:
time_debug_str += f" {timezone_name}"
-
- scheduler_logger.debug(f"Schedule {schedule_id} time: {schedule_hour:02d}:{schedule_minute:02d}, "
- f"current time: {time_debug_str}")
-
+
+ scheduler_logger.debug("Schedule %s time: %02d:%02d, current time: %s", schedule_id, schedule_hour, schedule_minute, time_debug_str)
+
# ===== STRICT TIME COMPARISON - PREVENT EARLY EXECUTION =====
-
+
# If current hour is BEFORE scheduled hour, NEVER execute
if current_time.hour < schedule_hour:
- scheduler_logger.debug(f"BLOCKED EXECUTION: Current hour {current_time.hour} is BEFORE scheduled hour {schedule_hour}")
+ scheduler_logger.debug("BLOCKED EXECUTION: Current hour %s is BEFORE scheduled hour %s", current_time.hour, schedule_hour)
return False
-
+
# If same hour but current minute is BEFORE scheduled minute, NEVER execute
if current_time.hour == schedule_hour and current_time.minute < schedule_minute:
- scheduler_logger.debug(f"BLOCKED EXECUTION: Current minute {current_time.minute} is BEFORE scheduled minute {schedule_minute}")
+ scheduler_logger.debug("BLOCKED EXECUTION: Current minute %s is BEFORE scheduled minute %s", current_time.minute, schedule_minute)
return False
-
+
# ===== 4-MINUTE EXECUTION WINDOW =====
-
+
# We're in the scheduled hour and minute, or later - check 4-minute window
if current_time.hour == schedule_hour:
# Execute if we're in the scheduled minute or up to 3 minutes after the scheduled minute
if current_time.minute >= schedule_minute and current_time.minute < schedule_minute + 4:
- scheduler_logger.info(f"EXECUTING: Current time {current_time.hour:02d}:{current_time.minute:02d} is within the 4-minute window after {schedule_hour:02d}:{schedule_minute:02d}")
+ scheduler_logger.info("EXECUTING: Current time %02d:%02d is within the 4-minute window after %02d:%02d", current_time.hour, current_time.minute, schedule_hour, schedule_minute)
return True
-
+
# Handle hour rollover case (e.g., scheduled for 6:59, now it's 7:00, 7:01, or 7:02)
if current_time.hour == schedule_hour + 1:
# Only apply if scheduled minute was in the last 3 minutes of the hour (57-59)
# and current minute is in the first (60 - schedule_minute) minutes of the next hour
if schedule_minute >= 57 and current_time.minute < (60 - schedule_minute):
- scheduler_logger.info(f"EXECUTING: Hour rollover within 4-minute window after {schedule_hour:02d}:{schedule_minute:02d}")
+ scheduler_logger.info("EXECUTING: Hour rollover within 4-minute window after %02d:%02d", schedule_hour, schedule_minute)
return True
-
+
# We've missed the 4-minute window
- scheduler_logger.debug(f"MISSED WINDOW: Current time {current_time.hour:02d}:{current_time.minute:02d} "
- f"is past the 4-minute window for {schedule_hour:02d}:{schedule_minute:02d}")
+ scheduler_logger.debug("MISSED WINDOW: Current time %02d:%02d is past the 4-minute window for %02d:%02d", current_time.hour, current_time.minute, schedule_hour, schedule_minute)
return False
+
def check_and_execute_schedules():
"""Check all schedules and execute those that should run now"""
try:
# Get user timezone for consistent logging
- user_tz = _get_user_timezone()
-
+ user_tz = get_user_timezone()
+
# Format time in user timezone
current_time = datetime.datetime.now(user_tz).strftime("%Y-%m-%d %H:%M:%S")
- scheduler_logger.debug(f"Checking schedules at {current_time} ({user_tz})")
-
+ scheduler_logger.debug("Checking schedules at %s (%s)", current_time, user_tz)
+
# Load schedules from database
# Loading schedules debug removed to reduce log spam
-
+
# Load the schedule
schedule_data = load_schedule()
if not schedule_data:
return
-
+
# Log schedule data summary
schedule_summary = {app: len(schedules) for app, schedules in schedule_data.items()}
- scheduler_logger.debug(f"Loaded schedules: {schedule_summary}")
-
+ scheduler_logger.debug("Loaded schedules: %s", schedule_summary)
+
# Add to history that we've checked schedules
add_to_history({"action": "check"}, "debug", f"Checking schedules at {current_time}")
-
+
# Initialize counter for schedules found
schedules_found = 0
-
+
# Check for schedules to execute
for app_type, schedules in schedule_data.items():
for schedule_entry in schedules:
@@ -478,74 +463,69 @@ def check_and_execute_schedules():
last_time = last_executed_actions[entry_id]
now = datetime.datetime.now(user_tz)
delta = (now - last_time).total_seconds() / 60 # Minutes
-
+
if delta < 5: # Don't re-execute if less than 5 minutes have passed
- scheduler_logger.info(f"Skipping recently executed schedule '{entry_id}' ({delta:.1f} minutes ago)")
+ scheduler_logger.info("Skipping recently executed schedule '%s' (%.1f minutes ago)", entry_id, delta)
add_to_history(
- schedule_entry,
- "skipped",
+ schedule_entry,
+ "skipped",
f"Already executed {delta:.1f} minutes ago"
)
continue
-
+
# Execute the action
schedule_entry["appType"] = app_type
execute_action(schedule_entry)
-
+
# Update last executed time
if entry_id:
last_executed_actions[entry_id] = datetime.datetime.now(user_tz)
-
+
# No need to log anything when no schedules are found, as this is expected
-
+
except Exception as e:
error_msg = f"Error checking schedules: {e}"
scheduler_logger.error(error_msg)
scheduler_logger.error(traceback.format_exc())
add_to_history({"action": "check"}, "error", error_msg)
+
def scheduler_loop():
"""Main scheduler loop - runs in a background thread"""
scheduler_logger.info("Scheduler loop started.")
while not stop_event.is_set():
try:
- # Check for stateful management expiration first
- # Stateful management check debug removed to reduce log spam
- check_stateful_expiration() # Call the imported function
-
- # Schedule execution debug removed to reduce log spam
check_and_execute_schedules()
-
- # Sleep until the next check
stop_event.wait(SCHEDULE_CHECK_INTERVAL)
-
except Exception as e:
- scheduler_logger.error(f"Error in scheduler loop: {e}")
+ scheduler_logger.error("Error in scheduler loop: %s", e)
scheduler_logger.error(traceback.format_exc())
# Sleep briefly to avoid rapidly repeating errors
time.sleep(5)
-
+
scheduler_logger.info("Scheduler loop stopped")
+
def get_execution_history():
"""Get the execution history for the scheduler"""
return list(execution_history)
+
def start_scheduler():
"""Start the scheduler engine"""
global scheduler_thread
-
+
if scheduler_thread and scheduler_thread.is_alive():
scheduler_logger.info("Scheduler already running")
return
-
+
# Reset the stop event
stop_event.clear()
-
+
# Create and start the scheduler thread
scheduler_thread = threading.Thread(target=scheduler_loop, name="SchedulerEngine", daemon=True)
scheduler_thread.start()
-
+
# Add a startup entry to the history
startup_entry = {
"id": "system",
@@ -553,24 +533,25 @@ def start_scheduler():
"app": "scheduler"
}
add_to_history(startup_entry, "info", "Scheduler engine started")
-
- scheduler_logger.info(f"Scheduler engine started. Thread is alive: {scheduler_thread.is_alive()}")
+
+ scheduler_logger.info("Scheduler engine started. Thread is alive: %s", scheduler_thread.is_alive())
return True
+
def stop_scheduler():
"""Stop the scheduler engine"""
global scheduler_thread
-
+
if not scheduler_thread or not scheduler_thread.is_alive():
scheduler_logger.info("Scheduler not running")
return
-
+
# Signal the thread to stop
stop_event.set()
-
+
# Wait for the thread to terminate (with timeout)
scheduler_thread.join(timeout=5.0)
-
+
if scheduler_thread.is_alive():
scheduler_logger.warning("Scheduler did not terminate gracefully")
else:
diff --git a/src/primary/settings_manager.py b/src/primary/settings_manager.py
index 10fa4691..caddc86a 100644
--- a/src/primary/settings_manager.py
+++ b/src/primary/settings_manager.py
@@ -10,40 +10,42 @@
import pathlib
import logging
import time
+from pathlib import Path
from typing import Dict, Any, Optional, List
+from src.primary.utils.database import get_database
+from src.primary.utils.timezone_utils import clear_timezone_cache, validate_timezone
+
# Create a simple logger for settings_manager
logging.basicConfig(level=logging.INFO)
settings_logger = logging.getLogger("settings_manager")
-# Database integration
-from src.primary.utils.database import get_database
-
-# Default configs location
DEFAULT_CONFIGS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'default_configs'))
-# Known app types
KNOWN_APP_TYPES = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros", "swaparr", "prowlarr", "general"]
# Add a settings cache with timestamps to avoid excessive database reads
settings_cache = {} # Format: {app_name: {'timestamp': timestamp, 'data': settings_dict}}
CACHE_TTL = 5 # Cache time-to-live in seconds
+
def clear_cache(app_name=None):
"""Clear the settings cache for a specific app or all apps."""
global settings_cache
if app_name:
if app_name in settings_cache:
- settings_logger.debug(f"Clearing cache for {app_name}")
+ settings_logger.debug("Clearing cache for %s", app_name)
settings_cache.pop(app_name, None)
else:
settings_logger.debug("Clearing entire settings cache")
settings_cache = {}
+
def get_default_config_path(app_name: str) -> pathlib.Path:
"""Get the path to the default config file for a specific app."""
return pathlib.Path(DEFAULT_CONFIGS_DIR) / f"{app_name}.json"
+
def load_default_app_settings(app_name: str) -> Dict[str, Any]:
"""Load default settings for a specific app from its JSON file."""
default_file = get_default_config_path(app_name)
@@ -52,17 +54,18 @@ def load_default_app_settings(app_name: str) -> Dict[str, Any]:
with open(default_file, 'r') as f:
return json.load(f)
except Exception as e:
- settings_logger.error(f"Error loading default settings for {app_name} from {default_file}: {e}")
+ settings_logger.error("Error loading default settings for %s from %s: %s", app_name, default_file, e)
return {}
else:
- settings_logger.warning(f"Default settings file not found for {app_name}: {default_file}")
+ settings_logger.warning("Default settings file not found for %s: %s", app_name, default_file)
return {}
+
def _ensure_config_exists(app_name: str) -> None:
"""Ensure the config exists for an app in the database."""
try:
db = get_database()
-
+
if app_name == 'general':
# Check if general settings exist
existing_settings = db.get_general_settings()
@@ -71,9 +74,9 @@ def _ensure_config_exists(app_name: str) -> None:
default_settings = load_default_app_settings(app_name)
if default_settings:
db.save_general_settings(default_settings)
- settings_logger.info(f"Created default general settings in database")
+ settings_logger.info("Created default general settings in database")
else:
- settings_logger.warning(f"No default config found for general settings")
+ settings_logger.warning("No default config found for general settings")
else:
# Check if app config exists
config = db.get_app_config(app_name)
@@ -82,49 +85,50 @@ def _ensure_config_exists(app_name: str) -> None:
default_settings = load_default_app_settings(app_name)
if default_settings:
db.save_app_config(app_name, default_settings)
- settings_logger.info(f"Created default settings in database for {app_name}")
+ settings_logger.info("Created default settings in database for %s", app_name)
else:
# Create empty config in database
db.save_app_config(app_name, {})
- settings_logger.warning(f"No default config found for {app_name}. Created empty database entry.")
+ settings_logger.warning("No default config found for %s. Created empty database entry.", app_name)
except Exception as e:
- settings_logger.error(f"Database error for {app_name}: {e}")
+ settings_logger.error("Database error for %s: %s", app_name, e)
raise
+
def load_settings(app_type, use_cache=True):
"""
Load settings for a specific app type from database
-
+
Args:
app_type: The app type to load settings for
use_cache: Whether to use the cached settings if available and recent
-
+
Returns:
Dict containing the app settings
"""
global settings_cache
-
+
# Only log unexpected app types that are not 'general'
if app_type not in KNOWN_APP_TYPES and app_type != "general":
- settings_logger.warning(f"load_settings called with unexpected app_type: {app_type}")
-
+ settings_logger.warning("load_settings called with unexpected app_type: %s", app_type)
+
# Check if we have a valid cache entry
if use_cache and app_type in settings_cache:
cache_entry = settings_cache[app_type]
cache_age = time.time() - cache_entry.get('timestamp', 0)
-
+
if cache_age < CACHE_TTL:
- settings_logger.debug(f"Using cached settings for {app_type} (age: {cache_age:.1f}s)")
+ settings_logger.debug("Using cached settings for %s (age: %.1fs)", app_type, cache_age)
return cache_entry['data']
else:
- settings_logger.debug(f"Cache expired for {app_type} (age: {cache_age:.1f}s)")
-
+ settings_logger.debug("Cache expired for %s (age: %.1fs)", app_type, cache_age)
+
# No valid cache entry, load from database
current_settings = {}
-
+
try:
db = get_database()
-
+
if app_type == 'general':
current_settings = db.get_general_settings()
if not current_settings:
@@ -137,81 +141,102 @@ def load_settings(app_type, use_cache=True):
# Config doesn't exist in database, create it
_ensure_config_exists(app_type)
current_settings = db.get_app_config(app_type) or {}
-
- settings_logger.debug(f"Loaded {app_type} settings from database")
-
+
+ settings_logger.debug("Loaded %s settings from database", app_type)
+
except Exception as e:
- settings_logger.error(f"Database error loading {app_type}: {e}")
+ settings_logger.error("Database error loading %s: %s", app_type, e)
raise
-
+
# Load defaults to check for missing keys
default_settings = load_default_app_settings(app_type)
-
+
# Add missing keys from defaults without overwriting existing values
updated = False
for key, value in default_settings.items():
if key not in current_settings:
current_settings[key] = value
updated = True
-
+
# Apply Lidarr migration (artist -> album) for Huntarr 7.5.0+
if app_type == "lidarr":
if current_settings.get("hunt_missing_mode") == "artist":
settings_logger.info("Migrating Lidarr hunt_missing_mode from 'artist' to 'album' (Huntarr 7.5.0+)")
current_settings["hunt_missing_mode"] = "album"
updated = True
-
+
# If keys were added, save the updated settings
if updated:
- settings_logger.info(f"Added missing default keys to {app_type} settings")
+ settings_logger.info("Added missing default keys to %s settings", app_type)
save_settings(app_type, current_settings)
-
+
# Update cache
settings_cache[app_type] = {
'timestamp': time.time(),
'data': current_settings
}
-
+
return current_settings
+
+def load_instance_settings(app_name: str, instance_name: str) -> dict[str, Any]:
+ """
+ Load settings that apply to a specific instance of an app.
+
+ Args:
+ app_name: The app name (sonarr, radarr, etc.)
+ instance_name: The specific instance name to load settings for
+
+ Returns:
+ dict: Dictionary containing the instance settings
+ """
+ app_settings = load_settings(app_name)
+
+ for instance_settings in app_settings.get("instances", []):
+ if instance_settings.get("name") == instance_name:
+ return instance_settings
+
+ raise ValueError(f"Instance '{instance_name}' not found for app '{app_name}'")
+
+
def save_settings(app_name: str, settings_data: Dict[str, Any]) -> bool:
"""Save settings for a specific app to database."""
if app_name not in KNOWN_APP_TYPES:
- settings_logger.error(f"Attempted to save settings for unknown app type: {app_name}")
- return False
-
+ settings_logger.error("Attempted to save settings for unknown app type: %s", app_name)
+ return False
+
# Debug: Log the data being saved, especially for general settings
if app_name == 'general':
- settings_logger.debug(f"Saving general settings: {settings_data}")
- settings_logger.debug(f"Apprise URLs being saved: {settings_data.get('apprise_urls', 'NOT_FOUND')}")
-
+ settings_logger.debug("Saving general settings: %s", settings_data)
+ settings_logger.debug("Apprise URLs being saved: %s", settings_data.get('apprise_urls', 'NOT_FOUND'))
+
# Validate and enforce hourly_cap maximum limit of 400
if 'hourly_cap' in settings_data:
original_cap = settings_data['hourly_cap']
if isinstance(original_cap, (int, float)) and original_cap > 400:
settings_data['hourly_cap'] = 400
- settings_logger.warning(f"Hourly cap for {app_name} was {original_cap}, automatically reduced to maximum allowed value of 400")
-
+ settings_logger.warning("Hourly cap for %s was %s, automatically reduced to maximum allowed value of 400", app_name, original_cap)
+
# Validate and enforce minimum values (no negative numbers allowed)
numeric_fields = [
'hourly_cap', 'hunt_missing_items', 'hunt_upgrade_items',
'hunt_missing_movies', 'hunt_upgrade_movies', 'hunt_missing_books', 'hunt_upgrade_books'
]
-
+
# Special validation for sleep_duration (minimum 600 seconds = 10 minutes)
if 'sleep_duration' in settings_data:
original_value = settings_data['sleep_duration']
if isinstance(original_value, (int, float)) and original_value < 600:
settings_data['sleep_duration'] = 600
- settings_logger.warning(f"Sleep duration for {app_name} was {original_value} seconds, automatically set to minimum allowed value of 600 seconds (10 minutes)")
-
+ settings_logger.warning("Sleep duration for %s was %s seconds, automatically set to minimum allowed value of 600 seconds (10 minutes)", app_name, original_value)
+
for field in numeric_fields:
if field in settings_data:
original_value = settings_data[field]
if isinstance(original_value, (int, float)) and original_value < 0:
settings_data[field] = 0
- settings_logger.warning(f"{field} for {app_name} was {original_value}, automatically set to minimum allowed value of 0")
-
+ settings_logger.warning("%s for %s was %s, automatically set to minimum allowed value of 0", field, app_name, original_value)
+
# Also validate numeric fields in instances array
if 'instances' in settings_data and isinstance(settings_data['instances'], list):
for i, instance in enumerate(settings_data['instances']):
@@ -221,62 +246,65 @@ def save_settings(app_name: str, settings_data: Dict[str, Any]) -> bool:
original_value = instance['sleep_duration']
if isinstance(original_value, (int, float)) and original_value < 600:
instance['sleep_duration'] = 600
- settings_logger.warning(f"Sleep duration for {app_name} instance {i+1} was {original_value} seconds, automatically set to minimum allowed value of 600 seconds (10 minutes)")
-
+ settings_logger.warning("Sleep duration for %s instance %s was %s seconds, automatically set to minimum allowed value of 600 seconds (10 minutes)", app_name, i+1, original_value)
+
for field in numeric_fields:
if field in instance:
original_value = instance[field]
if isinstance(original_value, (int, float)) and original_value < 0:
instance[field] = 0
- settings_logger.warning(f"{field} for {app_name} instance {i+1} was {original_value}, automatically set to minimum allowed value of 0")
-
+ settings_logger.warning("%s for %s instance %s was %s, automatically set to minimum allowed value of 0", field, app_name, i+1, original_value)
+
try:
db = get_database()
-
+
if app_name == 'general':
db.save_general_settings(settings_data)
else:
# For app configs, check if instance names have changed and migrate state management data
if 'instances' in settings_data and isinstance(settings_data['instances'], list):
_migrate_instance_state_management_if_needed(app_name, settings_data, db)
-
+
db.save_app_config(app_name, settings_data)
-
+
# Auto-save enabled - no need to log every successful save
success = True
-
+
except Exception as e:
- settings_logger.error(f"Database error saving {app_name}: {e}")
+ settings_logger.error("Database error saving %s: %s", app_name, e)
return False
-
+
if success:
# Clear cache for this app to ensure fresh reads
clear_cache(app_name)
-
+
# If general settings were saved, also clear timezone cache
if app_name == 'general':
try:
- from src.primary.utils.timezone_utils import clear_timezone_cache
clear_timezone_cache()
settings_logger.debug("Timezone cache cleared")
except Exception as e:
- settings_logger.warning(f"Could not clear timezone cache: {e}")
-
+ settings_logger.warning("Could not clear timezone cache: %s", e)
+
return success
+
def get_setting(app_name: str, key: str, default: Optional[Any] = None) -> Any:
"""Get a specific setting value for an app."""
settings = load_settings(app_name)
return settings.get(key, default)
+
def get_api_url(app_name: str) -> Optional[str]:
"""Get the API URL for a specific app."""
return get_setting(app_name, "api_url", "")
+
def get_api_key(app_name: str) -> Optional[str]:
"""Get the API Key for a specific app."""
return get_setting(app_name, "api_key", "")
+
def get_all_settings() -> Dict[str, Dict[str, Any]]:
"""Load settings for all known apps."""
all_settings = {}
@@ -287,15 +315,16 @@ def get_all_settings() -> Dict[str, Dict[str, Any]]:
all_settings[app_name] = settings
return all_settings
+
def get_configured_apps() -> List[str]:
"""Return a list of app names that have basic configuration (API URL and Key)."""
configured = []
for app_name in KNOWN_APP_TYPES:
if app_name == 'general':
continue # Skip general settings
-
+
settings = load_settings(app_name)
-
+
# First check if there are valid instances configured (multi-instance mode)
if "instances" in settings and isinstance(settings["instances"], list) and settings["instances"]:
for instance in settings["instances"]:
@@ -303,27 +332,28 @@ def get_configured_apps() -> List[str]:
configured.append(app_name)
break # One valid instance is enough to consider the app configured
continue # Skip the single-instance check if we already checked instances
-
+
# Fallback to legacy single-instance config
if settings.get("api_url") and settings.get("api_key"):
configured.append(app_name)
-
- settings_logger.debug(f"Configured apps: {configured}")
+
+ settings_logger.debug("Configured apps: %s", configured)
return configured
+
def apply_timezone(timezone: str) -> bool:
"""Apply the specified timezone to the application.
-
+
Args:
timezone: The timezone to set (e.g., 'UTC', 'America/New_York')
-
+
Returns:
bool: True if successful, False otherwise
"""
try:
# Set TZ environment variable (this works in all environments and is sufficient)
os.environ['TZ'] = timezone
-
+
# Force Python to reload time zone information
try:
import time
@@ -331,26 +361,25 @@ def apply_timezone(timezone: str) -> bool:
except AttributeError:
# tzset() is not available on Windows
pass
-
+
# Clear timezone cache to ensure fresh timezone is loaded
try:
- from src.primary.utils.timezone_utils import clear_timezone_cache
clear_timezone_cache()
settings_logger.debug("Timezone cache cleared")
except Exception as e:
- settings_logger.warning(f"Could not clear timezone cache: {e}")
-
+ settings_logger.warning("Could not clear timezone cache: %s", e)
+
# Refresh all logger formatters to use the new timezone immediately
try:
from src.primary.utils.logger import refresh_timezone_formatters
refresh_timezone_formatters()
settings_logger.debug("Logger timezone formatters refreshed")
except Exception as e:
- settings_logger.warning(f"Could not refresh logger formatters: {e}")
-
+ settings_logger.warning("Could not refresh logger formatters: %s", e)
+
# Note: Database logs now store timestamps in UTC and convert timezone on-the-fly,
# eliminating the need for formatter refreshing
-
+
# Try to update system timezone files only in Docker containers where we have permissions
# This is optional - the TZ environment variable is sufficient for Python timezone handling
system_files_updated = False
@@ -362,84 +391,63 @@ def apply_timezone(timezone: str) -> bool:
# Remove existing symlink if it exists
if os.path.exists("/etc/localtime"):
os.remove("/etc/localtime")
-
+
# Create new symlink
os.symlink(zoneinfo_path, "/etc/localtime")
-
+
# Also update /etc/timezone file
with open("/etc/timezone", "w") as f:
f.write(f"{timezone}\n")
-
+
system_files_updated = True
- settings_logger.debug(f"System timezone files updated to {timezone}")
+ settings_logger.debug("System timezone files updated to %s", timezone)
else:
- settings_logger.debug(f"Timezone file not found: {zoneinfo_path}, using TZ environment variable only")
+ settings_logger.debug("Timezone file not found: %s, using TZ environment variable only", zoneinfo_path)
else:
- settings_logger.debug(f"No write access to /etc, using TZ environment variable only for {timezone}")
+ settings_logger.debug("No write access to /etc, using TZ environment variable only for %s", timezone)
except Exception as e:
# Silently handle any errors with system files - TZ env var is sufficient
- settings_logger.debug(f"Could not update system timezone files: {str(e)}, using TZ environment variable only")
-
+ settings_logger.debug("Could not update system timezone files: %s, using TZ environment variable only", str(e))
+
# Always return True - TZ environment variable is sufficient for timezone handling
if system_files_updated:
- settings_logger.info(f"Timezone fully applied to {timezone} (system files + TZ env var)")
+ settings_logger.info("Timezone fully applied to %s (system files + TZ env var)", timezone)
else:
- settings_logger.info(f"Timezone applied to {timezone} (TZ environment variable)")
-
- return True
-
- except Exception as e:
- settings_logger.error(f"Critical error setting timezone: {str(e)}")
- return False
+ settings_logger.info("Timezone applied to %s (TZ environment variable)", timezone)
-def validate_timezone(timezone_str: str) -> bool:
- """
- Validate if a timezone string is valid using pytz.
-
- Args:
- timezone_str: The timezone string to validate (e.g., 'Europe/Bucharest')
-
- Returns:
- bool: True if valid, False otherwise
- """
- if not timezone_str:
- return False
-
- try:
- import pytz
- pytz.timezone(timezone_str)
return True
- except pytz.UnknownTimeZoneError:
- return False
+
except Exception as e:
- settings_logger.warning(f"Error validating timezone {timezone_str}: {e}")
+ settings_logger.error("Critical error setting timezone: %s", str(e))
return False
+
def get_safe_timezone(timezone_str: str, fallback: str = "UTC") -> str:
"""
Get a safe timezone string, falling back to a default if invalid.
-
+
Args:
timezone_str: The timezone string to validate
fallback: The fallback timezone if validation fails (default: UTC)
-
+
Returns:
str: A valid timezone string
"""
if validate_timezone(timezone_str):
return timezone_str
-
+
if timezone_str != fallback:
- settings_logger.warning(f"Invalid timezone '{timezone_str}', falling back to '{fallback}'")
-
+ settings_logger.warning("Invalid timezone '%s', falling back to '%s'", timezone_str, fallback)
+
# Ensure fallback is also valid
if validate_timezone(fallback):
return fallback
-
+
# Ultimate fallback to UTC if even the fallback is invalid
- settings_logger.error(f"Fallback timezone '{fallback}' is also invalid, using UTC")
+ settings_logger.error("Fallback timezone '%s' is also invalid, using UTC", fallback)
return "UTC"
+
def initialize_timezone_from_env():
"""Initialize timezone setting from TZ environment variable if not already set."""
try:
@@ -448,44 +456,45 @@ def initialize_timezone_from_env():
if not tz_env:
settings_logger.info("No TZ environment variable found, using default UTC")
return
-
+
# Load current general settings
general_settings = load_settings("general")
current_timezone = general_settings.get("timezone")
-
+
# If timezone is not set in settings, initialize it from TZ environment variable
if not current_timezone or current_timezone == "UTC":
- settings_logger.info(f"Initializing timezone from TZ environment variable: {tz_env}")
-
+ settings_logger.info("Initializing timezone from TZ environment variable: %s", tz_env)
+
# Use safe timezone validation
safe_timezone = get_safe_timezone(tz_env)
-
+
if safe_timezone == tz_env:
- settings_logger.info(f"TZ environment variable '{tz_env}' is valid")
+ settings_logger.info("TZ environment variable '%s' is valid", tz_env)
else:
- settings_logger.warning(f"TZ environment variable '{tz_env}' is invalid, using '{safe_timezone}' instead")
-
+ settings_logger.warning("TZ environment variable '%s' is invalid, using '%s' instead", tz_env, safe_timezone)
+
# Update the settings with the safe timezone
general_settings["timezone"] = safe_timezone
save_settings("general", general_settings)
-
+
# Apply the timezone to the system
apply_timezone(safe_timezone)
-
- settings_logger.info(f"Successfully initialized timezone to {safe_timezone}")
+
+ settings_logger.info("Successfully initialized timezone to %s", safe_timezone)
else:
- settings_logger.info(f"Timezone already set in settings: {current_timezone}")
-
+ settings_logger.info("Timezone already set in settings: %s", current_timezone)
+
# Validate the existing timezone setting
safe_timezone = get_safe_timezone(current_timezone)
if safe_timezone != current_timezone:
- settings_logger.warning(f"Existing timezone setting '{current_timezone}' is invalid, updating to '{safe_timezone}'")
+ settings_logger.warning("Existing timezone setting '%s' is invalid, updating to '%s'", current_timezone, safe_timezone)
general_settings["timezone"] = safe_timezone
save_settings("general", general_settings)
apply_timezone(safe_timezone)
-
+
except Exception as e:
- settings_logger.error(f"Error initializing timezone from environment: {e}")
+ settings_logger.error("Error initializing timezone from environment: %s", e)
+
def initialize_base_url_from_env():
"""Initialize base_url setting from BASE_URL environment variable if not already set."""
@@ -498,11 +507,11 @@ def initialize_base_url_from_env():
# Clean up the environment variable value
base_url_env = base_url_env.strip()
-
+
# Ensure it starts with / if not empty
if base_url_env and not base_url_env.startswith('/'):
base_url_env = f'/{base_url_env}'
-
+
# Remove trailing slash if present (except for root)
if base_url_env and base_url_env != '/' and base_url_env.endswith('/'):
base_url_env = base_url_env.rstrip('/')
@@ -510,30 +519,31 @@ def initialize_base_url_from_env():
# Load current general settings
general_settings = load_settings("general")
current_base_url = general_settings.get("base_url", "").strip()
-
+
# If base_url is not set in settings, initialize it from BASE_URL environment variable
if not current_base_url:
- settings_logger.info(f"Initializing base_url from BASE_URL environment variable: {base_url_env}")
-
+ settings_logger.info("Initializing base_url from BASE_URL environment variable: %s", base_url_env)
+
# Update the settings with the base_url
general_settings["base_url"] = base_url_env
save_settings("general", general_settings)
-
+
# Clear cache to ensure new settings are loaded
clear_cache("general")
-
- settings_logger.info(f"Successfully initialized base_url to {base_url_env}")
+
+ settings_logger.info("Successfully initialized base_url to %s", base_url_env)
else:
- settings_logger.debug(f"Base URL already configured in settings: {current_base_url}, not overriding with environment variable")
-
+ settings_logger.debug("Base URL already configured in settings: %s, not overriding with environment variable", current_base_url)
+
except Exception as e:
- settings_logger.error(f"Error initializing base_url from environment: {e}")
+ settings_logger.error("Error initializing base_url from environment: %s", e)
+
# Add a list of known advanced settings for clarity and documentation
ADVANCED_SETTINGS = [
- "api_timeout",
- "command_wait_delay",
- "command_wait_attempts",
+ "api_timeout",
+ "command_wait_delay",
+ "command_wait_attempts",
"minimum_download_queue_size",
"log_refresh_interval_seconds",
"stateful_management_hours",
@@ -542,45 +552,48 @@ def initialize_base_url_from_env():
"base_url" # Add base URL setting
]
+
def get_advanced_setting(setting_name, default_value=None):
"""
Get an advanced setting from general settings.
-
+
Advanced settings are now centralized in general settings and no longer stored
in individual app settings files. This function provides a consistent way to
access these settings from anywhere in the codebase.
-
+
Args:
setting_name: The name of the advanced setting to retrieve
default_value: The default value to return if the setting is not found
-
+
Returns:
The value of the advanced setting, or default_value if not found
"""
if setting_name not in ADVANCED_SETTINGS:
- settings_logger.warning(f"get_advanced_setting called with unknown setting: {setting_name}")
-
+ settings_logger.warning("get_advanced_setting called with unknown setting: %s", setting_name)
+
general_settings = load_settings("general")
return general_settings.get(setting_name, default_value)
+
def get_ssl_verify_setting():
"""
Get the SSL verification setting from general settings.
-
+
Returns:
bool: True if SSL verification is enabled, False otherwise
"""
return get_advanced_setting("ssl_verify", True) # Default to True for security
+
def get_custom_tag(app_name: str, tag_type: str, default: str) -> str:
"""
Get a custom tag for a specific app and tag type.
-
+
Args:
app_name: The name of the app (e.g., 'sonarr', 'radarr')
tag_type: The type of tag (e.g., 'missing', 'upgrade')
default: The default tag to return if not found
-
+
Returns:
str: The custom tag or the default if not found
"""
@@ -588,32 +601,31 @@ def get_custom_tag(app_name: str, tag_type: str, default: str) -> str:
custom_tags = settings.get("custom_tags", {})
return custom_tags.get(tag_type, default)
+
def initialize_database():
"""Initialize database with default configurations if needed"""
- from .utils.database import get_database
- from pathlib import Path
-
# Get database instance and ensure it exists
db = get_database()
db.ensure_database_exists()
-
+
# Initialize database with default configurations
defaults_dir = Path(__file__).parent / "default_configs"
db.initialize_from_defaults(defaults_dir)
-
+
# Start database maintenance scheduler for integrity monitoring
try:
db.schedule_maintenance()
settings_logger.info("Database maintenance scheduler initialized")
except Exception as e:
settings_logger.warning(f"Failed to start database maintenance scheduler: {e}")
-
+
settings_logger.info("Database initialization completed successfully")
+
def _migrate_instance_state_management_if_needed(app_name: str, new_settings_data: Dict[str, Any], db) -> None:
"""
Check if instance names have changed and migrate state management data if needed.
-
+
Args:
app_name: The app type (e.g., 'sonarr', 'radarr')
new_settings_data: The new settings data being saved
@@ -625,48 +637,47 @@ def _migrate_instance_state_management_if_needed(app_name: str, new_settings_dat
if not current_settings or 'instances' not in current_settings:
# No existing instances to migrate from
return
-
+
current_instances = current_settings.get('instances', [])
new_instances = new_settings_data.get('instances', [])
-
+
if not isinstance(current_instances, list) or not isinstance(new_instances, list):
return
-
+
# Create mappings of instances by their position/index and identify name changes
for i, (current_instance, new_instance) in enumerate(zip(current_instances, new_instances)):
if not isinstance(current_instance, dict) or not isinstance(new_instance, dict):
continue
-
+
current_name = current_instance.get('name', f'Instance {i+1}')
new_name = new_instance.get('name', f'Instance {i+1}')
-
+
# If name has changed, migrate the state management data
if current_name != new_name and current_name and new_name:
settings_logger.info(f"Detected instance name change for {app_name} instance {i+1}: '{current_name}' -> '{new_name}'")
-
+
# Attempt to migrate state management data
migration_success = db.migrate_instance_state_management(app_name, current_name, new_name)
-
+
if migration_success:
settings_logger.info(f"Successfully migrated state management data for {app_name} from '{current_name}' to '{new_name}'")
else:
settings_logger.warning(f"Failed to migrate state management data for {app_name} from '{current_name}' to '{new_name}' - user may need to reset state management")
-
+
# Handle case where instances were removed (we don't migrate in this case, just log)
if len(current_instances) > len(new_instances):
removed_count = len(current_instances) - len(new_instances)
settings_logger.info(f"Detected {removed_count} removed instances for {app_name} - state management data for removed instances will remain in database")
-
+
except Exception as e:
settings_logger.error(f"Error checking for instance name changes in {app_name}: {e}")
# Don't fail the save operation if migration checking fails
-
# Example usage (for testing purposes, remove later)
if __name__ == "__main__":
settings_logger.info(f"Known app types: {KNOWN_APP_TYPES}")
-
+
# Ensure defaults are copied if needed
for app in KNOWN_APP_TYPES:
_ensure_config_exists(app)
@@ -693,4 +704,4 @@ def _migrate_instance_state_management_if_needed(app_name: str, new_settings_dat
# Test getting configured apps
configured_list = get_configured_apps()
- settings_logger.debug(f"Configured apps: {configured_list}")
\ No newline at end of file
+ settings_logger.debug(f"Configured apps: {configured_list}")
diff --git a/src/primary/state.py b/src/primary/state.py
deleted file mode 100644
index 8bf31249..00000000
--- a/src/primary/state.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/env python3
-"""
-State management module for Huntarr
-Handles all persistence of program state using database
-"""
-
-import os
-import datetime
-import time
-from typing import List, Dict, Any, Optional
-from src.primary import settings_manager
-
-# Import database
-from src.primary.utils.database import get_database
-
-# Get the logger at module level
-from src.primary.utils.logger import get_logger
-logger = get_logger("huntarr")
-
-
-
-def get_last_reset_time(app_type: str) -> datetime.datetime:
- """
- Get the last time the state was reset for a specific app type.
-
- Args:
- app_type: The type of app to get last reset time for.
-
- Returns:
- The datetime of the last reset, or current time if no reset has occurred.
- """
- if not app_type:
- logger.error("get_last_reset_time called without app_type.")
- return datetime.datetime.now()
-
- try:
- db = get_database()
- reset_time_str = db.get_last_reset_time_state(app_type)
- if reset_time_str:
- return datetime.datetime.fromisoformat(reset_time_str)
- except Exception as e:
- logger.error(f"Error reading last reset time for {app_type}: {e}")
-
- # If no reset time exists, initialize it with current time and return current time
- logger.info(f"No reset time found for {app_type}, initializing with current time")
- current_time = datetime.datetime.now()
- set_last_reset_time(current_time, app_type)
- return current_time
-
-def set_last_reset_time(reset_time: datetime.datetime, app_type: str) -> None:
- """
- Set the last time the state was reset for a specific app type.
-
- Args:
- reset_time: The datetime to set
- app_type: The type of app to set last reset time for.
- """
- if not app_type:
- logger.error("set_last_reset_time called without app_type.")
- return
-
- try:
- db = get_database()
- db.set_last_reset_time_state(app_type, reset_time.isoformat())
- except Exception as e:
- logger.error(f"Error writing last reset time for {app_type}: {e}")
-
-def check_state_reset(app_type: str) -> bool:
- """
- Check if the state needs to be reset based on the reset interval.
- If it's time to reset, clears the processed IDs and updates the last reset time.
-
- Args:
- app_type: The type of app to check state reset for.
-
- Returns:
- True if the state was reset, False otherwise.
- """
- if not app_type:
- logger.error("check_state_reset called without app_type.")
- return False
-
- # Use a much longer default interval (1 week = 168 hours) to prevent frequent resets
- reset_interval = settings_manager.get_advanced_setting("stateful_management_hours", 168)
-
- last_reset = get_last_reset_time(app_type)
- now = datetime.datetime.now()
-
- delta = now - last_reset
- hours_passed = delta.total_seconds() / 3600
-
- # Log every cycle to help diagnose state reset issues
- logger.debug(f"State check for {app_type}: {hours_passed:.1f} hours since last reset (interval: {reset_interval}h)")
-
- if hours_passed >= reset_interval:
- logger.warning(f"State files for {app_type} will be reset after {hours_passed:.1f} hours (interval: {reset_interval}h)")
- logger.warning(f"This will cause all previously processed media to be eligible for processing again")
-
- # Add additional safeguard - only reset if more than double the interval has passed
- # This helps prevent accidental resets due to clock issues or other anomalies
- if hours_passed >= (reset_interval * 2):
- logger.info(f"Confirmed state reset for {app_type} after {hours_passed:.1f} hours")
- clear_processed_ids(app_type)
- set_last_reset_time(now, app_type)
- return True
- else:
- logger.info(f"State reset postponed for {app_type} - will proceed when {reset_interval * 2}h have passed")
- # Update last reset time partially to avoid immediate reset next cycle
- half_delta = datetime.timedelta(hours=reset_interval/2)
- set_last_reset_time(now - half_delta, app_type)
-
- return False
-
-def clear_processed_ids(app_type: str) -> None:
- """
- Clear all processed IDs for a specific app type.
-
- Args:
- app_type: The type of app to clear processed IDs for.
- """
- if not app_type:
- logger.error("clear_processed_ids called without app_type.")
- return
-
- try:
- db = get_database()
- db.clear_processed_ids_state(app_type)
- logger.info(f"Cleared processed IDs for {app_type}")
- except Exception as e:
- logger.error(f"Error clearing processed IDs for {app_type}: {e}")
-
-def _get_user_timezone():
- """Get the user's selected timezone from general settings"""
- try:
- from src.primary.utils.timezone_utils import get_user_timezone
- return get_user_timezone()
- except Exception as e:
- logger.warning(f"Could not get user timezone, defaulting to UTC: {e}")
- import pytz
- return pytz.UTC
-
-def calculate_reset_time(app_type: str) -> str:
- """
- Calculate when the next state reset will occur.
-
- Args:
- app_type: The type of app to calculate reset time for.
-
- Returns:
- A string representation of when the next reset will occur.
- """
- if not app_type:
- logger.error("calculate_reset_time called without app_type.")
- return "Next reset: Unknown (app type not provided)"
-
- reset_interval = settings_manager.get_advanced_setting("stateful_management_hours", 168)
-
- last_reset = get_last_reset_time(app_type)
-
- # Get user's timezone for consistent time display
- user_tz = _get_user_timezone()
-
- # Convert last reset to user timezone (assuming it was stored as naive UTC)
- import pytz
- if last_reset.tzinfo is None:
- last_reset_utc = pytz.UTC.localize(last_reset)
- else:
- last_reset_utc = last_reset
-
- next_reset = last_reset_utc + datetime.timedelta(hours=reset_interval)
- now_user_tz = datetime.datetime.now(user_tz)
-
- # Convert next_reset to user timezone for comparison
- next_reset_user_tz = next_reset.astimezone(user_tz)
-
- if next_reset_user_tz < now_user_tz:
- return "Next reset: at the start of the next cycle"
-
- delta = next_reset_user_tz - now_user_tz
- hours = delta.total_seconds() / 3600
-
- if hours < 1:
- minutes = delta.total_seconds() / 60
- return f"Next reset: in {int(minutes)} minutes"
- elif hours < 24:
- return f"Next reset: in {int(hours)} hours"
- else:
- days = hours / 24
- return f"Next reset: in {int(days)} days"
-
-def reset_state_file(app_type: str, state_type: str) -> bool:
- """
- Reset a specific state file for an app type.
-
- Args:
- app_type: The type of app (sonarr, radarr, etc.)
- state_type: The type of state file (processed_missing, processed_upgrades)
-
- Returns:
- True if successful, False otherwise
- """
- if not app_type:
- logger.error("reset_state_file called without app_type.")
- return False
-
- try:
- db = get_database()
- db.set_processed_ids_state(app_type, state_type, [])
- logger.info(f"Reset {state_type} state for {app_type}")
- return True
- except Exception as e:
- logger.error(f"Error resetting {state_type} state for {app_type}: {e}")
- return False
-
-def init_state_files() -> None:
- """Initialize state data for all app types in database"""
- app_types = settings_manager.KNOWN_APP_TYPES
-
- try:
- db = get_database()
- for app_type in app_types:
- # Initialize processed IDs if they don't exist
- if not db.get_processed_ids_state(app_type, "processed_missing"):
- db.set_processed_ids_state(app_type, "processed_missing", [])
- if not db.get_processed_ids_state(app_type, "processed_upgrades"):
- db.set_processed_ids_state(app_type, "processed_upgrades", [])
-
- # Initialize reset time if it doesn't exist
- if not db.get_last_reset_time_state(app_type):
- db.set_last_reset_time_state(app_type, datetime.datetime.fromtimestamp(0).isoformat())
- except Exception as e:
- logger.error(f"Error initializing state data: {e}")
-
-init_state_files()
\ No newline at end of file
diff --git a/src/primary/stateful_manager.py b/src/primary/stateful_manager.py
index b2bb766d..f6b4fb3b 100644
--- a/src/primary/stateful_manager.py
+++ b/src/primary/stateful_manager.py
@@ -2,520 +2,295 @@
"""
Stateful Manager for Huntarr
Handles storing and retrieving processed media IDs to prevent reprocessing
-Now uses SQLite database instead of JSON files for better performance and reliability.
"""
-import time
import datetime
import logging
-from typing import Dict, Any, List, Optional, Set
+import time
+from typing import Any
-# Create logger for stateful_manager
-stateful_logger = logging.getLogger("stateful_manager")
+from src.primary.settings_manager import load_settings, load_instance_settings
+from src.primary.utils.database import get_database
+from src.primary.utils.timezone_utils import get_user_timezone
-# Constants
-DEFAULT_HOURS = 168 # Default 7 days (168 hours)
+logger = logging.getLogger("stateful_manager")
-# App types
+DEFAULT_HOURS = 168 # Default 7 days (168 hours)
APP_TYPES = ["sonarr", "radarr", "lidarr", "readarr", "whisparr", "eros"]
-# Import database
-from src.primary.utils.database import get_database
-from src.primary.settings_manager import get_advanced_setting
-
-def initialize_lock_file() -> None:
- """Initialize the lock file with the current timestamp if it doesn't exist."""
- db = get_database()
- lock_info = db.get_stateful_lock_info()
-
- if not lock_info:
+
+def initialize_state_management():
+ """
+ Initialize reset intervals for all app instances based on their settings.
+ """
+ for app in APP_TYPES:
+
try:
- current_time = int(time.time())
- # Get the expiration hours setting
- expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
-
- expires_at = current_time + (expiration_hours * 3600)
-
- db.set_stateful_lock_info(current_time, expires_at)
- stateful_logger.info(f"Initialized stateful lock in database with expiration in {expiration_hours} hours")
+ app_settings = load_settings(app)
except Exception as e:
- stateful_logger.error(f"Error initializing stateful lock: {e}")
-
-def get_lock_info() -> Dict[str, Any]:
- """Get the current lock information."""
- initialize_lock_file()
- db = get_database()
-
- try:
- lock_info = db.get_stateful_lock_info()
-
- # Validate the structure and ensure required fields exist
- if not lock_info or "created_at" not in lock_info:
- current_time = int(time.time())
- expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
- expires_at = current_time + (expiration_hours * 3600)
-
- lock_info = {
- "created_at": current_time,
- "expires_at": expires_at
- }
- db.set_stateful_lock_info(current_time, expires_at)
-
- if "expires_at" not in lock_info or lock_info["expires_at"] is None:
- # Recalculate expiration if missing
- expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
- expires_at = lock_info["created_at"] + (expiration_hours * 3600)
- lock_info["expires_at"] = expires_at
-
- # Save the updated info
- db.set_stateful_lock_info(lock_info["created_at"], expires_at)
-
- return lock_info
- except Exception as e:
- stateful_logger.error(f"Error reading lock info from database: {e}")
- # Return default values if there's an error
- current_time = int(time.time())
- expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
- expires_at = current_time + (expiration_hours * 3600)
-
- return {
- "created_at": current_time,
- "expires_at": expires_at
- }
+ logger.error("Skipping %s. Could not load settings: %s", app, e)
+ continue
-def update_lock_expiration(hours: int = None) -> bool:
- """Update the lock expiration based on the hours setting."""
- if hours is None:
- expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
- else:
- expiration_hours = hours
-
- lock_info = get_lock_info()
- created_at = lock_info.get("created_at", int(time.time()))
- expires_at = created_at + (expiration_hours * 3600)
-
- try:
- db = get_database()
- db.set_stateful_lock_info(created_at, expires_at)
- stateful_logger.info(f"Updated lock expiration to {datetime.datetime.fromtimestamp(expires_at)}")
- return True
- except Exception as e:
- stateful_logger.error(f"Error updating lock expiration: {e}")
- return False
+ for i, instance_settings in enumerate(app_settings.get("instances", [])):
+
+ try:
+ instance_enabled = instance_settings["enabled"]
+ instance_name = instance_settings["name"]
+ except KeyError as e:
+ logger.error(
+ "Skipping initialization of instance %d in %s app. Missing setting: %s",
+ i, app, e,
+ )
+ continue
-def reset_stateful_management() -> bool:
+ try:
+ instance_initialized = get_database().get_instance_lock_info(app, instance_name)
+ except Exception as e:
+ logger.error(
+ "Skipping initialization of %s/%s. Could not verify existing lock: %s",
+ app, instance_name, e,
+ )
+ continue
+
+ if not instance_enabled or instance_initialized:
+ continue # Skip disabled or already initialized instances
+
+ initialize_instance_state_management(
+ app,
+ instance_settings["name"],
+ instance_settings["state_management_hours"],
+ )
+
+
+def initialize_instance_state_management(app: str, instance: str, expiration_hours: int) -> bool:
"""
- Reset the stateful management system.
+ Initialize state management for a specific app instance.
- This involves:
- 1. Creating a new lock file with the current timestamp and a calculated expiration time
- based on the 'stateful_management_hours' setting.
- 2. Deleting all stored processed ID data from the database.
+ Args:
+ app: The type of app (sonarr, radarr, etc.)
+ instance: The name of the instance
+ expiration_hours: The duration for state management in hours
Returns:
- bool: True if the reset was successful, False otherwise.
+ bool: True if initialization was successful, False otherwise
"""
- try:
- db = get_database()
-
- # Get the expiration hours setting BEFORE writing the lock info
- expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
-
- # Create new lock info with calculated expiration
- current_time = int(time.time())
- expires_at = current_time + (expiration_hours * 3600)
-
- # Clear all stateful data and set new lock info
- db.clear_all_stateful_data()
- db.set_stateful_lock_info(current_time, expires_at)
-
- stateful_logger.info(f"Successfully reset stateful management. New expiration: {datetime.datetime.fromtimestamp(expires_at)}")
- return True
- except Exception as e:
- stateful_logger.error(f"Error resetting stateful management: {e}")
+ if app not in APP_TYPES:
+ logger.error("Unknown app type: %s", app)
return False
-def check_expiration() -> bool:
- """
- Check if the stateful management has expired.
-
- Returns:
- bool: True if expired, False otherwise
- """
- lock_info = get_lock_info()
- expires_at = lock_info.get("expires_at")
-
- # If expires_at is None, update it based on settings
- if expires_at is None:
- update_lock_expiration()
- lock_info = get_lock_info()
- expires_at = lock_info.get("expires_at")
-
current_time = int(time.time())
-
- if current_time >= expires_at:
- stateful_logger.info("Stateful management has expired, resetting...")
- reset_stateful_management()
- return True
-
- return False
+ expires_at = current_time + (expiration_hours * 3600)
-def get_processed_ids(app_type: str, instance_name: str) -> Set[str]:
- """
- Get the set of processed media IDs for a specific app instance.
-
- Args:
- app_type: The type of app (sonarr, radarr, etc.)
- instance_name: The name of the instance
-
- Returns:
- Set[str]: Set of processed media IDs
- """
- if app_type not in APP_TYPES:
- stateful_logger.warning(f"Unknown app type: {app_type}")
- return set()
-
try:
db = get_database()
- processed_ids_set = db.get_processed_ids(app_type, instance_name)
- stateful_logger.debug(f"[get_processed_ids] Read {len(processed_ids_set)} IDs from database for {app_type}/{instance_name}: {processed_ids_set}")
- return processed_ids_set
+ db.set_instance_lock_info(app, instance, current_time, expires_at, expiration_hours)
except Exception as e:
- stateful_logger.error(f"Error reading processed IDs for {instance_name} from database: {e}")
- return set()
+ logger.error("Error initializing state management for %s/%s: %s", app, instance, e)
+ return False
+
+ logger.info(
+ "Initialized state management for %s/%s with %dh interval",
+ app, instance, expiration_hours,
+ )
+
+ return True
+
def add_processed_id(app_type: str, instance_name: str, media_id: str) -> bool:
"""
Add a media ID to the processed list for a specific app instance.
-
+
Args:
app_type: The type of app (sonarr, radarr, etc.)
instance_name: The name of the instance
media_id: The ID of the processed media
-
+
Returns:
bool: True if successful, False otherwise (or if state management is disabled)
"""
if app_type not in APP_TYPES:
- stateful_logger.warning(f"Unknown app type: {app_type}")
+ logger.warning("Unknown app type: %s", app_type)
return False
-
+
+ if is_processed(app_type, instance_name, media_id):
+ logger.info("[add_processed_id] ID %s already in database for %s/%s", media_id, app_type, instance_name)
+ return True
+
try:
- # First check if state management is enabled for this instance
- instance_hours = 168 # Default
- instance_mode = "custom"
-
- try:
- from src.primary.settings_manager import load_settings
- settings = load_settings(app_type)
-
- if settings and 'instances' in settings:
- # Find the matching instance
- for instance in settings['instances']:
- if instance.get('name') == instance_name:
- instance_mode = instance.get('state_management_mode', 'custom')
- instance_hours = instance.get('state_management_hours', 168)
-
- # If state management is disabled for this instance, don't add to processed list
- if instance_mode == 'disabled':
- stateful_logger.debug(f"State management disabled for {app_type}/{instance_name}, not adding item {media_id} to processed list")
- return True # Return True to indicate "success" (no error), but item wasn't actually added
- break
- except Exception as e:
- stateful_logger.warning(f"Could not check state management mode for {app_type}/{instance_name}: {e}")
- # Fall back to adding anyway if we can't determine the mode
-
- db = get_database()
-
- # Initialize per-instance state management if not already done
- db.initialize_instance_state_management(app_type, instance_name, instance_hours)
-
- # Check if this instance's state has expired
- if db.check_instance_expiration(app_type, instance_name):
- stateful_logger.info(f"State management expired for {app_type}/{instance_name}, resetting before adding new ID...")
- db.reset_instance_state_management(app_type, instance_name, instance_hours)
-
- # Check if already processed
- if db.is_processed(app_type, instance_name, media_id):
- stateful_logger.debug(f"[add_processed_id] ID {media_id} already in database for {app_type}/{instance_name}")
- return True
-
- # Add the new ID
- success = db.add_processed_id(app_type, instance_name, media_id)
- if success:
- stateful_logger.debug(f"[add_processed_id] Added ID {media_id} to database for {app_type}/{instance_name}")
-
- return success
+ get_database().add_processed_id(app_type, instance_name, media_id)
except Exception as e:
- stateful_logger.error(f"Error adding media ID {media_id} to database: {e}")
- return False
+ logger.error("Error adding media ID %s to database: %s", media_id, e)
+
+ logger.info("[add_processed_id] Added ID %s to database for %s/%s", media_id, app_type, instance_name)
+
+ return True
+
def is_processed(app_type: str, instance_name: str, media_id: str) -> bool:
"""
Check if a media ID has already been processed.
-
+
Args:
app_type: The type of app (sonarr, radarr, etc.)
instance_name: The name of the instance
media_id: The ID of the media to check
-
+
Returns:
bool: True if already processed, False otherwise (or if state management is disabled)
"""
+ if app_type not in APP_TYPES:
+ logger.warning("Unknown app type: %s", app_type)
+ return False
+
+ media_id = str(media_id) # Ensure media_id is a string for consistent checking
+
try:
- # First check if state management is enabled for this instance
- instance_hours = 168 # Default
- instance_mode = "custom"
-
- try:
- from src.primary.settings_manager import load_settings
- settings = load_settings(app_type)
-
- if settings and 'instances' in settings:
- # Find the matching instance
- for instance in settings['instances']:
- if instance.get('name') == instance_name:
- instance_mode = instance.get('state_management_mode', 'custom')
- instance_hours = instance.get('state_management_hours', 168)
-
- # If state management is disabled for this instance, always return False (not processed)
- if instance_mode == 'disabled':
- stateful_logger.debug(f"State management disabled for {app_type}/{instance_name}, treating item {media_id} as unprocessed")
- return False
- break
- except Exception as e:
- stateful_logger.warning(f"Could not check state management mode for {app_type}/{instance_name}: {e}")
- # Fall back to checking anyway if we can't determine the mode
-
- db = get_database()
-
- # Initialize per-instance state management if not already done
- db.initialize_instance_state_management(app_type, instance_name, instance_hours)
-
- # Check if this instance's state has expired
- if db.check_instance_expiration(app_type, instance_name):
- stateful_logger.info(f"State management expired for {app_type}/{instance_name}, resetting...")
- db.reset_instance_state_management(app_type, instance_name, instance_hours)
- # After reset, item is not processed
- return False
-
- # Converting media_id to string since some callers might pass an integer
- media_id_str = str(media_id)
- is_in_db = db.is_processed(app_type, instance_name, media_id_str)
-
- # Get total count for logging
- processed_ids = db.get_processed_ids(app_type, instance_name)
- total_count = len(processed_ids)
-
- stateful_logger.info(f"is_processed check: {app_type}/{instance_name}, ID:{media_id_str}, Found:{is_in_db}, Total IDs:{total_count}")
-
- return is_in_db
+ processed_ids = get_database().get_processed_ids(app_type, instance_name)
except Exception as e:
- stateful_logger.error(f"Error checking if processed for {app_type}/{instance_name}, ID:{media_id}: {e}")
+ logger.error("Could not load processed IDs for %s/%s: %s", app_type, instance_name, e)
return False
-def get_stateful_management_info() -> Dict[str, Any]:
- """Get information about the stateful management system."""
- lock_info = get_lock_info()
- created_at_ts = lock_info.get("created_at")
- expires_at_ts = lock_info.get("expires_at")
-
- # Get the interval setting
- expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
-
- return {
- "created_at_ts": created_at_ts,
- "expires_at_ts": expires_at_ts,
- "interval_hours": expiration_hours
- }
-
-def get_state_management_summary(app_type: str, instance_name: str, instance_hours: int = None) -> Dict[str, Any]:
+ is_item_processed = media_id in processed_ids
+ total_processed_ids = len(processed_ids)
+
+ logger.info(
+ "is_processed check: %s/%s, ID:%s, Found:%s, Total IDs:%d",
+ app_type, instance_name, media_id, is_item_processed, total_processed_ids,
+ )
+
+ return is_item_processed
+
+
+def get_instance_state_management_summary(app_type: str, instance_name: str) -> dict[str, Any]:
"""
Get a summary of stateful management for an app instance.
-
+
Args:
app_type: The type of app (sonarr, radarr, etc.)
instance_name: The name of the instance
- instance_hours: Custom hours for this instance (if provided)
-
+
Returns:
- Dict containing processed count, next reset time, and other useful info
+ dict containing processed count, next reset time, and other useful info
"""
try:
+ settings = load_instance_settings(app_type, instance_name)
+
+ if settings["state_management_mode"] == "disabled":
+ return {
+ "state_management_mode": settings["state_management_mode"],
+ "state_management_enabled": False,
+ "processed_count": 0,
+ "next_reset_time": None,
+ "state_management_hours": settings["state_management_hours"],
+ "has_processed_items": False
+ }
+
db = get_database()
-
- # Use per-instance hours if provided, otherwise fall back to global setting
- if instance_hours is not None:
- expiration_hours = instance_hours
- else:
- expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
-
+
# Initialize per-instance state management if not already done
- db.initialize_instance_state_management(app_type, instance_name, expiration_hours)
-
- # Get processed IDs count
- processed_ids = get_processed_ids(app_type, instance_name)
- processed_count = len(processed_ids)
-
+ db.initialize_instance_state_management(app_type, instance_name, settings["state_management_hours"])
+
# Get per-instance lock info for accurate next reset time
lock_info = db.get_instance_lock_info(app_type, instance_name)
if lock_info and lock_info.get("expires_at"):
- import datetime
expires_at = lock_info["expires_at"]
# Convert to user timezone for display
- user_tz = _get_user_timezone()
+ user_tz = get_user_timezone()
utc_time = datetime.datetime.fromtimestamp(expires_at, tz=datetime.timezone.utc)
local_time = utc_time.astimezone(user_tz)
next_reset_time = local_time.strftime('%Y-%m-%d %H:%M:%S')
else:
# This should not happen since initialize_instance_state_management was called above
- stateful_logger.warning(f"No lock info found for {app_type}/{instance_name} after initialization")
+ logger.warning("No lock info found for %s/%s after initialization", app_type, instance_name)
next_reset_time = None
-
+
+ # Get processed IDs count
+ processed_ids = db.get_processed_ids(app_type, instance_name)
+ processed_count = len(processed_ids)
+
return {
+ "state_management_mode": settings["state_management_mode"],
+ "state_management_enabled": settings["state_management_mode"] != "disabled",
"processed_count": processed_count,
"next_reset_time": next_reset_time,
- "expiration_hours": expiration_hours,
+ "state_management_hours": settings["state_management_hours"],
"has_processed_items": processed_count > 0
}
except Exception as e:
- stateful_logger.error(f"Error getting state management summary for {app_type}/{instance_name}: {e}")
+ logger.error("Error getting state management summary for %s/%s: %s", app_type, instance_name, e)
return {
+ "state_management_mode": "custom",
+ "state_management_enabled": True,
"processed_count": 0,
"next_reset_time": None,
- "expiration_hours": instance_hours or DEFAULT_HOURS,
+ "state_management_hours": DEFAULT_HOURS,
"has_processed_items": False
}
-def _get_user_timezone():
- """Get the user's selected timezone from general settings"""
- try:
- from src.primary.utils.timezone_utils import get_user_timezone
- return get_user_timezone()
- except Exception as e:
- stateful_logger.warning(f"Could not get user timezone, defaulting to UTC: {e}")
- import pytz
- return pytz.UTC
+def should_state_management_reset(app: str, instance: str) -> bool:
+ """
+ Check if the instance's state needs to be reset based on the reset interval.
+ Args:
+ app: The type of app (sonarr, radarr, etc.)
+ instance: The name of the instance for which to reset state
-def get_next_reset_time() -> Optional[str]:
- """
- Get the next state management reset time as a formatted string in user's timezone.
-
Returns:
- Formatted reset time string or None if unable to calculate
+ bool: True if the state has expired, False otherwise.
"""
+ if app not in APP_TYPES:
+ logger.error("Unknown app type: %s", app)
+ return False
+
try:
- # Import here to avoid circular imports
- from src.primary.state import get_last_reset_time
-
- # Get user's timezone
- user_tz = _get_user_timezone()
-
- # Get reset interval in hours
- reset_interval = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
-
- # Get last reset time and calculate next reset (use 'sonarr' as default for global state)
- last_reset = get_last_reset_time('sonarr') # Pass app_type parameter
-
- # Check if last_reset is valid (not Unix epoch or too old)
- unix_epoch = datetime.datetime(1970, 1, 1)
- one_year_ago = datetime.datetime.now() - datetime.timedelta(days=365)
-
- if last_reset and last_reset > one_year_ago and last_reset != unix_epoch:
- # Convert last reset to user timezone (assuming it was stored in UTC)
- import pytz
- last_reset_utc = pytz.UTC.localize(last_reset) if last_reset.tzinfo is None else last_reset
- next_reset_user_tz = last_reset_utc.astimezone(user_tz) + datetime.timedelta(hours=reset_interval)
- return next_reset_user_tz.strftime('%Y-%m-%d %H:%M:%S')
- else:
- # If no valid last reset time, calculate from now
- stateful_logger.info("No valid last reset time found, calculating next reset from current time")
- now_user_tz = datetime.datetime.now(user_tz)
- next_reset = now_user_tz + datetime.timedelta(hours=reset_interval)
- return next_reset.strftime('%Y-%m-%d %H:%M:%S')
+ lock_info = get_database().get_instance_lock_info(app, instance)
except Exception as e:
- stateful_logger.error(f"Error calculating next reset time: {e}")
- return None
+ logger.error("Could not load lock info for %s/%s: %s", app, instance, e)
+ return False
+
+ logger.info(
+ "State check for %s.%s: %.1f hours since last reset (interval: %dh)",
+ app,
+ instance,
+ (time.time() - lock_info.get("created_at")) / 3600, # hours since last reset
+ lock_info.get("expiration_hours"),
+ )
-def get_next_reset_time_for_instance(instance_hours: int, app_type: str = None) -> Optional[str]:
+ return int(time.time()) >= lock_info.get("expires_at", float('inf'))
+
+
+def reset_state_management(app: str, instance: str) -> bool:
"""
- Get the next state management reset time for a specific instance based on custom hours.
-
+ Reset the state management for a specific app instance.
+
Args:
- instance_hours: Custom reset interval hours for this instance
- app_type: The app type for getting last reset time (optional, defaults to 'sonarr')
-
+ app: The type of app (sonarr, radarr, etc.)
+ instance: The name of the instance for which to reset state
+
Returns:
- Formatted reset time string or None if unable to calculate
+ bool: True if successful, False otherwise.
"""
+ if app not in APP_TYPES:
+ logger.error("Unknown app type: %s", app)
+ return False
+
try:
- # Import here to avoid circular imports
- from src.primary.state import get_last_reset_time
-
- # Get user's timezone
- user_tz = _get_user_timezone()
-
- # Default to 'sonarr' if no app_type provided (for backward compatibility)
- if app_type is None:
- app_type = 'sonarr'
-
- # Get last reset time and calculate next reset
- last_reset = get_last_reset_time(app_type) # Pass app_type parameter
-
- # Check if last_reset is valid (not Unix epoch or too old)
- unix_epoch = datetime.datetime(1970, 1, 1)
- one_year_ago = datetime.datetime.now() - datetime.timedelta(days=365)
-
- if last_reset and last_reset > one_year_ago and last_reset != unix_epoch:
- # Convert last reset to user timezone (assuming it was stored in UTC)
- import pytz
- last_reset_utc = pytz.UTC.localize(last_reset) if last_reset.tzinfo is None else last_reset
- next_reset_user_tz = last_reset_utc.astimezone(user_tz) + datetime.timedelta(hours=instance_hours)
- return next_reset_user_tz.strftime('%Y-%m-%d %H:%M:%S')
- else:
- # If no valid last reset time, calculate from now
- stateful_logger.info(f"No valid last reset time found for {app_type}, calculating next reset from current time using {instance_hours} hours")
- now_user_tz = datetime.datetime.now(user_tz)
- next_reset = now_user_tz + datetime.timedelta(hours=instance_hours)
- return next_reset.strftime('%Y-%m-%d %H:%M:%S')
- except Exception as e:
- stateful_logger.error(f"Error calculating next reset time for instance ({instance_hours} hours): {e}")
- return None
-
-def initialize_stateful_system():
- """Perform a complete initialization of the stateful management system."""
- stateful_logger.info("Initializing stateful management system")
-
- # Initialize the database and lock info
- try:
- initialize_lock_file()
- # Update expiration time
- expiration_hours = get_advanced_setting("stateful_management_hours", DEFAULT_HOURS)
- update_lock_expiration(expiration_hours)
- stateful_logger.info(f"Stateful lock initialized in database with {expiration_hours} hour expiration")
+ settings = load_instance_settings(app, instance)
+ state_management_hours = settings["state_management_hours"]
except Exception as e:
- stateful_logger.error(f"Failed to initialize stateful lock: {e}")
-
- # Check for existing processed IDs in database
+ logger.error("Could not load settings for %s/%s: %s", app, instance, e)
+ state_management_hours = DEFAULT_HOURS
+
+ now = int(time.time())
+ expires_at = now + (state_management_hours * 3600)
+
try:
db = get_database()
- total_ids = 0
- for app_type in APP_TYPES:
- # Get a sample of instance names to count processed IDs
- # This is a rough count since we don't track instance names separately
- processed_ids = db.get_processed_ids(app_type, "Default") # Check default instance
- total_ids += len(processed_ids)
-
- if total_ids > 0:
- stateful_logger.info(f"Found {total_ids} existing processed IDs in database")
- else:
- stateful_logger.info("No existing processed IDs found in database")
+ db.clear_instance_processed_ids(app, instance)
+ db.set_instance_lock_info(app, instance, now, expires_at, state_management_hours)
except Exception as e:
- stateful_logger.error(f"Failed to check for existing processed IDs: {e}")
-
- stateful_logger.info("Stateful management system initialization complete")
+ logger.error("Error resetting state management for %s/%s: %s", app, instance, e)
+ return False
-# Initialize the stateful system on module import
-initialize_stateful_system()
+ return True
diff --git a/src/primary/stateful_routes.py b/src/primary/stateful_routes.py
index 3dcfe99d..ba33520e 100644
--- a/src/primary/stateful_routes.py
+++ b/src/primary/stateful_routes.py
@@ -4,46 +4,38 @@
Handles API endpoints for stateful management
"""
-from flask import Blueprint, jsonify, request, Response
import json
+from typing import Any
+
+from flask import Blueprint, request, Response
+
from src.primary.stateful_manager import (
- get_stateful_management_info,
- reset_stateful_management,
- update_lock_expiration,
- get_state_management_summary
+ get_instance_state_management_summary,
+ reset_state_management,
)
from src.primary.utils.logger import get_logger
-# Create logger
stateful_logger = get_logger("stateful")
-# Create blueprint
stateful_api = Blueprint('stateful_api', __name__)
-@stateful_api.route('/info', methods=['GET'])
-def get_info():
- """Get stateful management information."""
- try:
- info = get_stateful_management_info()
- # Add CORS headers to allow access from frontend
- response_data = {
- "success": True,
- "created_at_ts": info.get("created_at_ts"),
- "expires_at_ts": info.get("expires_at_ts"),
- "interval_hours": info.get("interval_hours")
- }
- response = Response(json.dumps(response_data))
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
- except Exception as e:
- stateful_logger.error(f"Error getting stateful info: {e}")
- # Return error response with proper headers
- error_data = {"success": False, "message": f"Error getting stateful info: {str(e)}"}
- response = Response(json.dumps(error_data), status=500)
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
+
+def base_response(status: int, data: dict[str, Any]):
+ """
+ Helper to create a Flask Response with CORS headers.
+
+ Args:
+ status (int): HTTP status code
+ data (dict): Data to include in the response body
+
+ Returns:
+ Response: Flask Response object with CORS headers
+ """
+ response = Response(json.dumps(data), status=status)
+ response.headers['Content-Type'] = 'application/json'
+ response.headers['Access-Control-Allow-Origin'] = '*'
+ return response
+
@stateful_api.route('/reset', methods=['POST'])
def reset_stateful():
@@ -52,101 +44,29 @@ def reset_stateful():
data = request.json or {}
app_type = data.get('app_type')
instance_name = data.get('instance_name')
-
- if app_type and instance_name:
- # Per-instance reset
- from src.primary.utils.database import get_database
- from src.primary.settings_manager import load_settings
-
- # Get instance settings for expiration hours
- instance_hours = 168 # Default
- try:
- settings = load_settings(app_type)
- if settings and 'instances' in settings:
- for instance in settings['instances']:
- if instance.get('name') == instance_name:
- instance_hours = instance.get('state_management_hours', 168)
- break
- except Exception as e:
- stateful_logger.warning(f"Could not load instance settings for {app_type}/{instance_name}: {e}")
-
- # Reset per-instance state management
- db = get_database()
- success = db.reset_instance_state_management(app_type, instance_name, instance_hours)
-
- if success:
- stateful_logger.info(f"Successfully reset state management for {app_type}/{instance_name}")
- response_data = {"success": True, "message": f"State management reset successfully for {app_type}/{instance_name}"}
- else:
- response_data = {"success": False, "message": f"Failed to reset state management for {app_type}/{instance_name}"}
-
- else:
- # Global reset (legacy)
- success = reset_stateful_management()
- if success:
- response_data = {"success": True, "message": "Stateful management reset successfully"}
- else:
- response_data = {"success": False, "message": "Failed to reset stateful management"}
-
- # Add CORS headers to allow access from frontend
- status_code = 200 if response_data["success"] else 500
- response = Response(json.dumps(response_data), status=status_code)
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
-
- except Exception as e:
- stateful_logger.error(f"Error resetting stateful management: {e}")
- # Return error response with proper headers
- error_data = {"success": False, "error": str(e)}
- response = Response(json.dumps(error_data), status=500)
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
-
-@stateful_api.route('/update-expiration', methods=['POST'])
-def update_expiration():
- """Update the stateful management expiration time."""
- try:
- hours = request.json.get('hours')
- if hours is None or not isinstance(hours, int) or hours <= 0:
- stateful_logger.error(f"Invalid hours value for update-expiration: {hours}")
- # Return error response with proper headers
- error_data = {"success": False, "message": f"Invalid hours value: {hours}. Must be a positive integer."}
- response = Response(json.dumps(error_data), status=400)
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
-
- updated = update_lock_expiration(hours)
- if updated:
- # Get updated info
- info = get_stateful_management_info()
- # Add CORS headers to allow access from frontend
- response_data = {
- "success": True,
- "message": f"Expiration updated to {hours} hours",
- "expires_at": info.get("expires_at"),
- "expires_date": info.get("expires_date")
- }
- response = Response(json.dumps(response_data))
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
+
+ if not app_type and instance_name:
+ return base_response(400, {
+ "success": False,
+ "message": "app_type and instance_name parameters are required"
+ })
+
+ success = reset_state_management(app_type, instance_name)
+ if success:
+ stateful_logger.info("Successfully reset state management for %s/%s", app_type, instance_name)
+ response_data = {"success": True, "message": f"State management reset successfully for {app_type}/{instance_name}"}
else:
- # Add CORS headers to allow access from frontend
- response = Response(json.dumps({"success": False, "message": "Failed to update expiration"}), status=500)
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
+ response_data = {"success": False, "message": f"Failed to reset state management for {app_type}/{instance_name}"}
+
+ return base_response(200 if response_data["success"] else 500, response_data)
+
except Exception as e:
- stateful_logger.error(f"Error updating expiration: {e}", exc_info=True)
- # Return error response with proper headers
- error_data = {"success": False, "message": f"Error updating expiration: {str(e)}"}
- response = Response(json.dumps(error_data), status=500)
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
+ stateful_logger.error("Error resetting stateful management: %s", e)
+ return base_response(500, {
+ "success": False,
+ "message": f"Error resetting stateful management: {str(e)}",
+ })
+
@stateful_api.route('/summary', methods=['GET'])
def get_summary():
@@ -154,72 +74,27 @@ def get_summary():
try:
app_type = request.args.get('app_type')
instance_name = request.args.get('instance_name')
-
+
if not app_type or not instance_name:
- error_data = {"success": False, "message": "app_type and instance_name parameters are required"}
- response = Response(json.dumps(error_data), status=400)
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
-
- # Get per-instance settings to retrieve custom hours
- instance_hours = None
- try:
- from src.primary.settings_manager import load_settings
- settings = load_settings(app_type)
-
- if settings and 'instances' in settings:
- # Find the matching instance
- for instance in settings['instances']:
- if instance.get('name') == instance_name:
- # Get per-instance state management hours
- instance_hours = instance.get('state_management_hours', 168)
- instance_mode = instance.get('state_management_mode', 'custom')
-
- # If state management is disabled for this instance, return disabled status
- if instance_mode == 'disabled':
- response_data = {
- "success": True,
- "processed_count": 0,
- "next_reset_time": None,
- "expiration_hours": instance_hours,
- "has_processed_items": False,
- "state_management_enabled": False
- }
- response = Response(json.dumps(response_data))
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
-
- break
- except Exception as e:
- stateful_logger.warning(f"Could not load instance settings for {app_type}/{instance_name}: {e}")
- # Fall back to default hours if settings can't be loaded
- instance_hours = 168
-
- # Get summary for the specific instance with custom hours
- summary = get_state_management_summary(app_type, instance_name, instance_hours)
-
- response_data = {
+ return base_response(400, {
+ "success": False,
+ "message": "app_type and instance_name parameters are required"
+ })
+
+ summary = get_instance_state_management_summary(app_type, instance_name)
+
+ return base_response(200, {
"success": True,
"processed_count": summary.get("processed_count", 0),
"next_reset_time": summary.get("next_reset_time"),
- "expiration_hours": summary.get("expiration_hours", instance_hours or 168),
+ "expiration_hours": summary.get("state_management_hours"),
"has_processed_items": summary.get("has_processed_items", False),
- "state_management_enabled": True
- }
-
- response = Response(json.dumps(response_data))
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
-
- except Exception as e:
- stateful_logger.error(f"Error getting stateful summary for {app_type}/{instance_name}: {e}")
- error_data = {"success": False, "message": f"Error getting summary: {str(e)}"}
- response = Response(json.dumps(error_data), status=500)
- response.headers['Content-Type'] = 'application/json'
- response.headers['Access-Control-Allow-Origin'] = '*'
- return response
-
+ "state_management_enabled": summary.get("state_management_enabled")
+ })
+ except Exception as e:
+ stateful_logger.error("Error getting stateful summary for %s/%s: %s", app_type, instance_name, e)
+ return base_response(500, {
+ "success": False,
+ "message": f"Error getting summary: {str(e)}",
+ })
diff --git a/src/primary/utils/app_utils.py b/src/primary/utils/app_utils.py
deleted file mode 100644
index 917ecf5b..00000000
--- a/src/primary/utils/app_utils.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import socket
-from urllib.parse import urlparse
-from src.primary.config import API_URL
-
-def get_ip_address():
- try:
- parsed_url = urlparse(API_URL)
- hostname = parsed_url.netloc
- if ':' in hostname:
- hostname = hostname.split(':')[0]
- return hostname
- except Exception:
- try:
- hostname = socket.gethostname()
- ip = socket.gethostbyname(hostname)
- return ip
- except:
- return "localhost"
-
-def _get_user_timezone():
- """Get the user's selected timezone from general settings"""
- try:
- from src.primary.utils.timezone_utils import get_user_timezone
- return get_user_timezone()
- except Exception:
- import pytz
- return pytz.UTC
-
-def write_log(log_file, message):
- from datetime import datetime
-
- # Use user's selected timezone
- user_tz = _get_user_timezone()
- now = datetime.now(user_tz)
- timestamp = now.strftime("%Y-%m-%d %H:%M:%S")
-
- # Add timezone information
- timezone_name = str(user_tz)
- timestamp_with_tz = f"{timestamp} {timezone_name}"
-
- with open(log_file, 'a') as f:
- f.write(f"{timestamp_with_tz} - {message}\n")
diff --git a/src/primary/utils/database.py b/src/primary/utils/database.py
index e0d6f7e8..85b483f8 100644
--- a/src/primary/utils/database.py
+++ b/src/primary/utils/database.py
@@ -4,25 +4,30 @@
Handles both app configurations, general settings, and stateful management data.
"""
-import os
+import hashlib
import json
+import logging
+import platform
+import os
+import random
+import shutil
import sqlite3
+import threading
+import time
+from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Any, Optional, Set
-from datetime import datetime, timedelta
-import logging
-import time
-import shutil
logger = logging.getLogger(__name__)
+
class HuntarrDatabase:
"""Database manager for all Huntarr configurations and settings"""
-
+
def __init__(self):
self.db_path = self._get_database_path()
self.ensure_database_exists()
-
+
def execute_query(self, query: str, params: tuple = None) -> List[tuple]:
"""Execute a raw SQL query and return results"""
with self.get_connection() as conn:
@@ -32,7 +37,7 @@ def execute_query(self, query: str, params: tuple = None) -> List[tuple]:
else:
cursor.execute(query)
return cursor.fetchall()
-
+
def _configure_connection(self, conn):
"""Configure SQLite connection with Synology NAS compatible settings"""
conn.execute('PRAGMA foreign_keys = ON')
@@ -43,7 +48,7 @@ def _configure_connection(self, conn):
conn.execute('PRAGMA mmap_size = 268435456')
conn.execute('PRAGMA wal_autocheckpoint = 1000')
conn.execute('PRAGMA busy_timeout = 30000')
-
+
def get_connection(self):
"""Get a configured SQLite connection with Synology NAS compatibility"""
try:
@@ -54,7 +59,7 @@ def get_connection(self):
return conn
except (sqlite3.DatabaseError, sqlite3.OperationalError) as e:
if "file is not a database" in str(e) or "database disk image is malformed" in str(e):
- logger.error(f"Database corruption detected: {e}")
+ logger.error("Database corruption detected: %s", e)
self._handle_database_corruption()
# Try connecting again after recovery
conn = sqlite3.connect(self.db_path)
@@ -62,7 +67,7 @@ def get_connection(self):
return conn
else:
raise
-
+
def _get_database_path(self) -> Path:
"""Get database path - use /config for Docker, Windows AppData, or local data directory"""
# Check if running in Docker (config directory exists)
@@ -70,57 +75,50 @@ def _get_database_path(self) -> Path:
if config_dir.exists() and config_dir.is_dir():
# Running in Docker - use persistent config directory
return config_dir / "huntarr.db"
-
+
# Check if we have a Windows-specific config directory set
windows_config = os.environ.get("HUNTARR_CONFIG_DIR")
if windows_config:
config_path = Path(windows_config)
config_path.mkdir(parents=True, exist_ok=True)
return config_path / "huntarr.db"
-
+
# Check if we're on Windows and use AppData
- import platform
if platform.system() == "Windows":
appdata = os.environ.get("APPDATA", os.path.expanduser("~"))
windows_config_dir = Path(appdata) / "Huntarr"
windows_config_dir.mkdir(parents=True, exist_ok=True)
return windows_config_dir / "huntarr.db"
-
+
# For local development on non-Windows, use data directory in project root
project_root = Path(__file__).parent.parent.parent.parent
data_dir = project_root / "data"
-
+
# Ensure directory exists
data_dir.mkdir(parents=True, exist_ok=True)
return data_dir / "huntarr.db"
-
-
def _handle_database_corruption(self):
"""Handle database corruption by creating backup and starting fresh"""
- import time
-
- logger.error(f"Handling database corruption for: {self.db_path}")
-
+ logger.error("Handling database corruption for: %s", self.db_path)
try:
# Create backup of corrupted database if it exists
if self.db_path.exists():
backup_path = self.db_path.parent / f"huntarr_corrupted_backup_{int(time.time())}.db"
self.db_path.rename(backup_path)
- logger.warning(f"Corrupted database backed up to: {backup_path}")
+ logger.warning("Corrupted database backed up to: %s", backup_path)
logger.warning("Starting with fresh database - all previous data has been backed up but will be lost")
-
+
# Ensure the corrupted file is completely removed
if self.db_path.exists():
self.db_path.unlink()
-
except Exception as backup_error:
- logger.error(f"Error during database corruption recovery: {backup_error}")
+ logger.error("Error during database corruption recovery: %s", backup_error)
# Force remove the corrupted file
try:
if self.db_path.exists():
self.db_path.unlink()
- except:
+ except Exception:
pass
def _check_database_integrity(self) -> bool:
@@ -132,12 +130,12 @@ def _check_database_integrity(self) -> bool:
if result and result[0] == "ok":
return True
else:
- logger.error(f"Database integrity check failed: {result}")
+ logger.error("Database integrity check failed: %s", result)
return False
except Exception as e:
- logger.error(f"Database integrity check failed with error: {e}")
+ logger.error("Database integrity check failed with error: %s", e)
return False
-
+
def perform_integrity_check(self, repair: bool = False) -> dict:
"""Perform comprehensive integrity check with optional repair"""
results = {
@@ -146,26 +144,26 @@ def perform_integrity_check(self, repair: bool = False) -> dict:
'warnings': [],
'repaired': False
}
-
+
try:
with self.get_connection() as conn:
# Full integrity check
integrity_results = conn.execute("PRAGMA integrity_check").fetchall()
-
+
if len(integrity_results) == 1 and integrity_results[0][0] == 'ok':
logger.info("Database integrity check passed")
else:
results['status'] = 'error'
for result in integrity_results:
results['errors'].append(result[0])
-
+
if repair:
logger.warning("Attempting to repair database corruption")
try:
# Attempt repair by forcing checkpoint and vacuum
conn.execute("PRAGMA wal_checkpoint(TRUNCATE)")
conn.execute("VACUUM")
-
+
# Re-check integrity after repair
post_repair = conn.execute("PRAGMA integrity_check").fetchall()
if len(post_repair) == 1 and post_repair[0][0] == 'ok':
@@ -175,15 +173,15 @@ def perform_integrity_check(self, repair: bool = False) -> dict:
else:
logger.error("Database repair failed, corruption persists")
except Exception as repair_error:
- logger.error(f"Database repair attempt failed: {repair_error}")
-
+ logger.error("Database repair attempt failed: %s", repair_error)
+
# Check foreign key constraints
fk_violations = conn.execute("PRAGMA foreign_key_check").fetchall()
if fk_violations:
results['warnings'].append(f"Foreign key violations found: {len(fk_violations)}")
for violation in fk_violations[:5]: # Limit to first 5
results['warnings'].append(f"FK violation: {violation}")
-
+
# Check index consistency
for table_info in conn.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall():
table_name = table_info[0]
@@ -193,134 +191,128 @@ def perform_integrity_check(self, repair: bool = False) -> dict:
results['warnings'].append(f"Index issues in table {table_name}")
except Exception:
pass # Skip if table doesn't exist or other issues
-
+
except Exception as e:
results['status'] = 'error'
results['errors'].append(f"Integrity check failed: {e}")
- logger.error(f"Failed to perform integrity check: {e}")
-
+ logger.error("Failed to perform integrity check: %s", e)
+
return results
-
+
def create_backup(self, backup_path: str = None) -> str:
"""Create a backup of the database using SQLite backup API"""
- import time
- import shutil
- from pathlib import Path
-
+
if not backup_path:
timestamp = int(time.time())
backup_filename = f"huntarr_backup_{timestamp}.db"
backup_path = self.db_path.parent / backup_filename
else:
backup_path = Path(backup_path)
-
+
try:
# Ensure backup directory exists
backup_path.parent.mkdir(parents=True, exist_ok=True)
-
+
# Force WAL checkpoint before backup
with self.get_connection() as conn:
conn.execute("PRAGMA wal_checkpoint(TRUNCATE)")
-
+
# Create backup using file copy (simple but effective)
shutil.copy2(self.db_path, backup_path)
-
+
# Verify backup integrity
backup_db = HuntarrDatabase()
backup_db.db_path = backup_path
-
+
if backup_db._check_database_integrity():
- logger.info(f"Database backup created successfully: {backup_path}")
+ logger.info("Database backup created successfully: %s", backup_path)
return str(backup_path)
else:
logger.error("Backup verification failed, removing corrupt backup")
backup_path.unlink(missing_ok=True)
raise Exception("Backup verification failed")
-
+
except Exception as e:
- logger.error(f"Failed to create database backup: {e}")
+ logger.error("Failed to create database backup: %s", e)
raise
-
+
def schedule_maintenance(self):
"""Schedule regular maintenance tasks"""
- import threading
- import time
-
+
def maintenance_worker():
while True:
try:
# Wait 6 hours between maintenance cycles
time.sleep(6 * 60 * 60)
-
+
logger.info("Starting scheduled database maintenance")
-
+
# Perform integrity check
integrity_results = self.perform_integrity_check(repair=True)
if integrity_results['status'] == 'error':
logger.error("Database integrity issues detected during maintenance")
-
+
# Clean up expired rate limit entries
self.cleanup_expired_rate_limits()
-
+
# Optimize database
with self.get_connection() as conn:
conn.execute("PRAGMA optimize")
conn.execute("PRAGMA wal_checkpoint(PASSIVE)")
-
+
logger.info("Scheduled database maintenance completed")
-
+
except Exception as e:
- logger.error(f"Database maintenance failed: {e}")
-
+ logger.error("Database maintenance failed: %s", e)
+
# Start maintenance thread
maintenance_thread = threading.Thread(target=maintenance_worker, daemon=True)
maintenance_thread.start()
logger.info("Database maintenance scheduler started")
-
+
def ensure_database_exists(self):
"""Create database and all tables if they don't exist"""
try:
# Ensure the database directory exists and is writable
db_dir = self.db_path.parent
db_dir.mkdir(parents=True, exist_ok=True)
-
+
# Test write permissions
test_file = db_dir / f"db_test_{int(time.time())}.tmp"
try:
test_file.write_text("test")
test_file.unlink()
except Exception as perm_error:
- logger.error(f"Database directory not writable: {db_dir} - {perm_error}")
+ logger.error("Database directory not writable: %s - %s", db_dir, perm_error)
# On Windows, try an alternative location
- import platform
if platform.system() == "Windows":
alt_dir = Path(os.path.expanduser("~")) / "Documents" / "Huntarr"
alt_dir.mkdir(parents=True, exist_ok=True)
self.db_path = alt_dir / "huntarr.db"
- logger.info(f"Using alternative database location: {self.db_path}")
+ logger.info("Using alternative database location: %s", self.db_path)
else:
raise perm_error
-
+
except Exception as e:
- logger.error(f"Error setting up database directory: {e}")
+ logger.error("Error setting up database directory: %s", e)
raise
-
+
# Create all tables with corruption recovery
try:
self._create_all_tables()
except (sqlite3.DatabaseError, sqlite3.OperationalError) as e:
if "file is not a database" in str(e) or "database disk image is malformed" in str(e):
- logger.error(f"Database corruption detected during table creation: {e}")
+ logger.error("Database corruption detected during table creation: %s", e)
self._handle_database_corruption()
# Try creating tables again after recovery
self._create_all_tables()
else:
raise
-
+
def _create_all_tables(self):
"""Create all database tables"""
with self.get_connection() as conn:
-
+
# Create app_configs table for all app settings
conn.execute('''
CREATE TABLE IF NOT EXISTS app_configs (
@@ -331,7 +323,7 @@ def _create_all_tables(self):
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
-
+
# Create general_settings table for general/global settings
conn.execute('''
CREATE TABLE IF NOT EXISTS general_settings (
@@ -343,17 +335,10 @@ def _create_all_tables(self):
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
-
- # Create stateful_lock table for stateful management lock info
- conn.execute('''
- CREATE TABLE IF NOT EXISTS stateful_lock (
- id INTEGER PRIMARY KEY CHECK (id = 1),
- created_at INTEGER NOT NULL,
- expires_at INTEGER NOT NULL,
- updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
- )
- ''')
-
+
+ # Legacy global stateful_lock table removed - drop if it exists
+ conn.execute('DROP TABLE IF EXISTS stateful_lock')
+
# Create stateful_processed_ids table for processed media IDs
conn.execute('''
CREATE TABLE IF NOT EXISTS stateful_processed_ids (
@@ -365,7 +350,7 @@ def _create_all_tables(self):
UNIQUE(app_type, instance_name, media_id)
)
''')
-
+
# Create stateful_instance_locks table for per-instance state management
conn.execute('''
CREATE TABLE IF NOT EXISTS stateful_instance_locks (
@@ -379,7 +364,7 @@ def _create_all_tables(self):
UNIQUE(app_type, instance_name)
)
''')
-
+
# Create media_stats table for tracking hunted/upgraded media statistics
conn.execute('''
CREATE TABLE IF NOT EXISTS media_stats (
@@ -391,7 +376,7 @@ def _create_all_tables(self):
UNIQUE(app_type, stat_type)
)
''')
-
+
# Create hourly_caps table for API usage tracking
conn.execute('''
CREATE TABLE IF NOT EXISTS hourly_caps (
@@ -402,7 +387,7 @@ def _create_all_tables(self):
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
-
+
# Create sleep_data table for cycle tracking
conn.execute('''
CREATE TABLE IF NOT EXISTS sleep_data (
@@ -415,7 +400,7 @@ def _create_all_tables(self):
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
-
+
# Create swaparr_stats table for Swaparr-specific statistics
conn.execute('''
CREATE TABLE IF NOT EXISTS swaparr_stats (
@@ -425,10 +410,10 @@ def _create_all_tables(self):
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
-
+
# History table moved to manager.db - remove this table if it exists
conn.execute('DROP TABLE IF EXISTS history')
-
+
# Create schedules table for storing scheduled actions
conn.execute('''
CREATE TABLE IF NOT EXISTS schedules (
@@ -444,20 +429,10 @@ def _create_all_tables(self):
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
-
- # Create state_data table for state management (processed IDs and reset times)
- conn.execute('''
- CREATE TABLE IF NOT EXISTS state_data (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- app_type TEXT NOT NULL,
- state_type TEXT NOT NULL,
- state_data TEXT NOT NULL,
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- UNIQUE(app_type, state_type)
- )
- ''')
-
+
+ # Legacy state_data table removed - drop if it exists
+ conn.execute('DROP TABLE IF EXISTS state_data')
+
# Create swaparr_state table for Swaparr-specific state management
conn.execute('''
CREATE TABLE IF NOT EXISTS swaparr_state (
@@ -470,7 +445,7 @@ def _create_all_tables(self):
UNIQUE(app_name, state_type)
)
''')
-
+
# Create users table for authentication and user management
conn.execute('''
CREATE TABLE IF NOT EXISTS users (
@@ -487,7 +462,7 @@ def _create_all_tables(self):
recovery_key TEXT
)
''')
-
+
# Create sponsors table for GitHub sponsors data
conn.execute('''
CREATE TABLE IF NOT EXISTS sponsors (
@@ -503,10 +478,10 @@ def _create_all_tables(self):
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
-
+
# Logs table moved to separate logs.db - remove if it exists
conn.execute('DROP TABLE IF EXISTS logs')
-
+
# Create recovery_key_rate_limit table for tracking failed recovery key attempts
conn.execute('''
CREATE TABLE IF NOT EXISTS recovery_key_rate_limit (
@@ -556,7 +531,7 @@ def _create_all_tables(self):
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
-
+
# Add temp_2fa_secret column if it doesn't exist (for existing databases)
try:
conn.execute('ALTER TABLE users ADD COLUMN temp_2fa_secret TEXT')
@@ -564,7 +539,7 @@ def _create_all_tables(self):
except sqlite3.OperationalError:
# Column already exists
pass
-
+
# Add recovery_key column if it doesn't exist (for existing databases)
try:
conn.execute('ALTER TABLE users ADD COLUMN recovery_key TEXT')
@@ -572,7 +547,7 @@ def _create_all_tables(self):
except sqlite3.OperationalError:
# Column already exists
pass
-
+
# Add plex_linked_at column if it doesn't exist (for existing databases)
try:
conn.execute('ALTER TABLE users ADD COLUMN plex_linked_at INTEGER')
@@ -580,7 +555,7 @@ def _create_all_tables(self):
except sqlite3.OperationalError:
# Column already exists
pass
-
+
# Create reset_requests table for reset request management
conn.execute('''
CREATE TABLE IF NOT EXISTS reset_requests (
@@ -591,7 +566,7 @@ def _create_all_tables(self):
processed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')
-
+
# Create indexes for better performance
conn.execute('CREATE INDEX IF NOT EXISTS idx_app_configs_type ON app_configs(app_type)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_general_settings_key ON general_settings(setting_key)')
@@ -605,7 +580,6 @@ def _create_all_tables(self):
conn.execute('CREATE INDEX IF NOT EXISTS idx_schedules_app_type ON schedules(app_type)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_schedules_enabled ON schedules(enabled)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_schedules_time ON schedules(time_hour, time_minute)')
- conn.execute('CREATE INDEX IF NOT EXISTS idx_state_data_app_type ON state_data(app_type, state_type)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_swaparr_state_app_name ON swaparr_state(app_name, state_type)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_users_username ON users(username)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_sponsors_login ON sponsors(login)')
@@ -614,10 +588,10 @@ def _create_all_tables(self):
conn.execute('CREATE INDEX IF NOT EXISTS idx_hunt_history_date_time ON hunt_history(date_time)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_hunt_history_media_id ON hunt_history(media_id)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_hunt_history_operation_type ON hunt_history(operation_type)')
-
+
conn.commit()
- logger.info(f"Database initialized at: {self.db_path}")
-
+ logger.info("Database initialized at: %s", self.db_path)
+
def get_app_config(self, app_type: str) -> Optional[Dict[str, Any]]:
"""Get app configuration from database"""
with self.get_connection() as conn:
@@ -626,19 +600,19 @@ def get_app_config(self, app_type: str) -> Optional[Dict[str, Any]]:
(app_type,)
)
row = cursor.fetchone()
-
+
if row:
try:
return json.loads(row[0])
except json.JSONDecodeError as e:
- logger.error(f"Failed to parse JSON for {app_type}: {e}")
+ logger.error("Failed to parse JSON for %s: %s", app_type, e)
return None
return None
-
+
def save_app_config(self, app_type: str, config_data: Dict[str, Any]):
"""Save app configuration to database"""
config_json = json.dumps(config_data, indent=2)
-
+
with self.get_connection() as conn:
conn.execute('''
INSERT OR REPLACE INTO app_configs (app_type, config_data, updated_at)
@@ -646,7 +620,7 @@ def save_app_config(self, app_type: str, config_data: Dict[str, Any]):
''', (app_type, config_json))
conn.commit()
# Auto-save enabled - no need to log every successful save
-
+
def get_general_settings(self) -> Dict[str, Any]:
"""Get all general settings as a dictionary"""
with self.get_connection() as conn:
@@ -654,13 +628,13 @@ def get_general_settings(self) -> Dict[str, Any]:
cursor = conn.execute(
'SELECT setting_key, setting_value, setting_type FROM general_settings'
)
-
+
settings = {}
for row in cursor.fetchall():
key = row['setting_key']
value = row['setting_value']
setting_type = row['setting_type']
-
+
# Convert value based on type
if setting_type == 'boolean':
settings[key] = value.lower() == 'true'
@@ -675,9 +649,9 @@ def get_general_settings(self) -> Dict[str, Any]:
settings[key] = value
else: # string
settings[key] = value
-
+
return settings
-
+
def save_general_settings(self, settings: Dict[str, Any]):
"""Save general settings to database"""
with self.get_connection() as conn:
@@ -698,16 +672,16 @@ def save_general_settings(self, settings: Dict[str, Any]):
else:
setting_type = 'string'
setting_value = str(value)
-
+
conn.execute('''
- INSERT OR REPLACE INTO general_settings
+ INSERT OR REPLACE INTO general_settings
(setting_key, setting_value, setting_type, updated_at)
VALUES (?, ?, ?, CURRENT_TIMESTAMP)
''', (key, setting_value, setting_type))
-
+
conn.commit()
# Auto-save enabled - no need to log every successful save
-
+
def get_general_setting(self, key: str, default: Any = None) -> Any:
"""Get a specific general setting"""
with self.get_connection() as conn:
@@ -716,10 +690,10 @@ def get_general_setting(self, key: str, default: Any = None) -> Any:
(key,)
)
row = cursor.fetchone()
-
+
if row:
value, setting_type = row
-
+
# Convert value based on type
if setting_type == 'boolean':
return value.lower() == 'true'
@@ -734,9 +708,9 @@ def get_general_setting(self, key: str, default: Any = None) -> Any:
return value
else: # string
return value
-
+
return default
-
+
def set_general_setting(self, key: str, value: Any):
"""Set a specific general setting"""
# Determine type and convert value
@@ -755,39 +729,39 @@ def set_general_setting(self, key: str, value: Any):
else:
setting_type = 'string'
setting_value = str(value)
-
+
with self.get_connection() as conn:
conn.execute('''
- INSERT OR REPLACE INTO general_settings
+ INSERT OR REPLACE INTO general_settings
(setting_key, setting_value, setting_type, updated_at)
VALUES (?, ?, ?, CURRENT_TIMESTAMP)
''', (key, setting_value, setting_type))
conn.commit()
- logger.debug(f"Set general setting {key} = {value}")
-
+ logger.debug("Set general setting %s = %r", key, value)
+
def get_version(self) -> str:
"""Get the current version from database"""
return self.get_general_setting('current_version', 'N/A')
-
+
def set_version(self, version: str):
"""Set the current version in database"""
self.set_general_setting('current_version', version.strip())
- logger.debug(f"Version stored in database: {version.strip()}")
-
+ logger.debug("Version stored in database: %s", version.strip())
+
def get_all_app_types(self) -> List[str]:
"""Get list of all app types in database"""
with self.get_connection() as conn:
cursor = conn.execute('SELECT app_type FROM app_configs ORDER BY app_type')
return [row[0] for row in cursor.fetchall()]
-
+
def initialize_from_defaults(self, defaults_dir: Path):
"""Initialize database with default configurations if empty"""
app_types = ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros', 'swaparr', 'general']
-
+
for app_type in app_types:
# Check if config already exists
existing_config = self.get_app_config(app_type) if app_type != 'general' else self.get_general_settings()
-
+
if not existing_config:
# Load default config
default_file = defaults_dir / f"{app_type}.json"
@@ -795,111 +769,60 @@ def initialize_from_defaults(self, defaults_dir: Path):
try:
with open(default_file, 'r') as f:
default_config = json.load(f)
-
+
if app_type == 'general':
self.save_general_settings(default_config)
else:
self.save_app_config(app_type, default_config)
-
- logger.info(f"Initialized {app_type} with default configuration")
+
+ logger.info("Initialized %s with default configuration", app_type)
except Exception as e:
- logger.error(f"Failed to initialize {app_type} from defaults: {e}")
-
+ logger.error("Failed to initialize %s from defaults: %s", app_type, e)
+
# Stateful Management Methods
-
- def get_stateful_lock_info(self) -> Dict[str, Any]:
- """Get stateful management lock information"""
- with self.get_connection() as conn:
- cursor = conn.execute('SELECT created_at, expires_at FROM stateful_lock WHERE id = 1')
- row = cursor.fetchone()
-
- if row:
- return {
- "created_at": row[0],
- "expires_at": row[1]
- }
- return {}
-
- def set_stateful_lock_info(self, created_at: int, expires_at: int):
- """Set stateful management lock information"""
- with self.get_connection() as conn:
- conn.execute('''
- INSERT OR REPLACE INTO stateful_lock (id, created_at, expires_at, updated_at)
- VALUES (1, ?, ?, CURRENT_TIMESTAMP)
- ''', (created_at, expires_at))
- conn.commit()
- logger.debug(f"Set stateful lock: created_at={created_at}, expires_at={expires_at}")
-
+
def get_processed_ids(self, app_type: str, instance_name: str) -> Set[str]:
"""Get processed media IDs for a specific app instance"""
with self.get_connection() as conn:
cursor = conn.execute('''
- SELECT media_id FROM stateful_processed_ids
+ SELECT media_id FROM stateful_processed_ids
WHERE app_type = ? AND instance_name = ?
''', (app_type, instance_name))
-
+
return {row[0] for row in cursor.fetchall()}
-
- def add_processed_id(self, app_type: str, instance_name: str, media_id: str) -> bool:
+
+ def add_processed_id(self, app_type: str, instance_name: str, media_id: str):
"""Add a processed media ID for a specific app instance"""
- try:
- with self.get_connection() as conn:
- conn.execute('''
- INSERT OR IGNORE INTO stateful_processed_ids
- (app_type, instance_name, media_id)
- VALUES (?, ?, ?)
- ''', (app_type, instance_name, str(media_id)))
- conn.commit()
- logger.debug(f"Added processed ID {media_id} for {app_type}/{instance_name}")
- return True
- except Exception as e:
- logger.error(f"Error adding processed ID {media_id} for {app_type}/{instance_name}: {e}")
- return False
-
+ with self.get_connection() as conn:
+ conn.execute('''
+ INSERT OR IGNORE INTO stateful_processed_ids
+ (app_type, instance_name, media_id)
+ VALUES (?, ?, ?)
+ ''', (app_type, instance_name, str(media_id)))
+ conn.commit()
+
def is_processed(self, app_type: str, instance_name: str, media_id: str) -> bool:
"""Check if a media ID has been processed for a specific app instance"""
with self.get_connection() as conn:
cursor = conn.execute('''
- SELECT 1 FROM stateful_processed_ids
+ SELECT 1 FROM stateful_processed_ids
WHERE app_type = ? AND instance_name = ? AND media_id = ?
''', (app_type, instance_name, str(media_id)))
-
+
return cursor.fetchone() is not None
-
- def clear_all_stateful_data(self):
- """Clear all stateful management data (for reset)"""
- with self.get_connection() as conn:
- # Clear processed IDs
- conn.execute('DELETE FROM stateful_processed_ids')
- # Clear lock info
- conn.execute('DELETE FROM stateful_lock')
- # Clear per-instance locks
- conn.execute('DELETE FROM stateful_instance_locks')
- conn.commit()
- logger.info("Cleared all stateful management data from database")
-
- def get_stateful_summary(self, app_type: str, instance_name: str) -> Dict[str, Any]:
- """Get summary of stateful data for an app instance"""
- processed_ids = self.get_processed_ids(app_type, instance_name)
- return {
- "processed_count": len(processed_ids),
- "has_processed_items": len(processed_ids) > 0
- }
-
- # Per-Instance State Management Methods
-
+
def get_instance_lock_info(self, app_type: str, instance_name: str) -> Dict[str, Any]:
"""Get state management lock information for a specific instance"""
with self.get_connection() as conn:
cursor = conn.execute('''
- SELECT created_at, expires_at, expiration_hours
- FROM stateful_instance_locks
+ SELECT created_at, expires_at, expiration_hours
+ FROM stateful_instance_locks
WHERE app_type = ? AND instance_name = ?
''', (app_type, instance_name))
row = cursor.fetchone()
-
+
if row:
return {
"created_at": row[0],
@@ -907,67 +830,35 @@ def get_instance_lock_info(self, app_type: str, instance_name: str) -> Dict[str,
"expiration_hours": row[2]
}
return {}
-
+
def set_instance_lock_info(self, app_type: str, instance_name: str, created_at: int, expires_at: int, expiration_hours: int):
"""Set state management lock information for a specific instance"""
with self.get_connection() as conn:
conn.execute('''
- INSERT OR REPLACE INTO stateful_instance_locks
+ INSERT OR REPLACE INTO stateful_instance_locks
(app_type, instance_name, created_at, expires_at, expiration_hours, updated_at)
VALUES (?, ?, ?, ?, ?, CURRENT_TIMESTAMP)
''', (app_type, instance_name, created_at, expires_at, expiration_hours))
conn.commit()
-
- def check_instance_expiration(self, app_type: str, instance_name: str) -> bool:
- """Check if state management has expired for a specific instance"""
- import time
- current_time = int(time.time())
-
- lock_info = self.get_instance_lock_info(app_type, instance_name)
- if not lock_info:
- return False # No lock info means not expired, just not initialized
-
- expires_at = lock_info.get("expires_at", 0)
- return current_time >= expires_at
-
+
def clear_instance_processed_ids(self, app_type: str, instance_name: str):
"""Clear processed IDs for a specific instance"""
with self.get_connection() as conn:
conn.execute('''
- DELETE FROM stateful_processed_ids
+ DELETE FROM stateful_processed_ids
WHERE app_type = ? AND instance_name = ?
''', (app_type, instance_name))
conn.commit()
- logger.info(f"Cleared processed IDs for {app_type}/{instance_name}")
-
- def reset_instance_state_management(self, app_type: str, instance_name: str, expiration_hours: int) -> bool:
- """Reset state management for a specific instance"""
- import time
- try:
- current_time = int(time.time())
- expires_at = current_time + (expiration_hours * 3600)
-
- # Clear processed IDs for this instance
- self.clear_instance_processed_ids(app_type, instance_name)
-
- # Set new lock info for this instance
- self.set_instance_lock_info(app_type, instance_name, current_time, expires_at, expiration_hours)
-
- logger.info(f"Reset state management for {app_type}/{instance_name} with {expiration_hours}h expiration")
- return True
- except Exception as e:
- logger.error(f"Error resetting state management for {app_type}/{instance_name}: {e}")
- return False
-
+ logger.info("Cleared processed IDs for %s/%s", app_type, instance_name)
+
def initialize_instance_state_management(self, app_type: str, instance_name: str, expiration_hours: int):
"""Initialize state management for a specific instance if not already initialized"""
lock_info = self.get_instance_lock_info(app_type, instance_name)
if not lock_info:
- import time
current_time = int(time.time())
expires_at = current_time + (expiration_hours * 3600)
self.set_instance_lock_info(app_type, instance_name, current_time, expires_at, expiration_hours)
- logger.info(f"Initialized state management for {app_type}/{instance_name} with {expiration_hours}h expiration")
+ logger.info("Initialized state management for %s/%s with %sh expiration", app_type, instance_name, expiration_hours)
def migrate_instance_state_management(self, app_type: str, old_instance_name: str, new_instance_name: str) -> bool:
"""Migrate state management data from old instance name to new instance name"""
@@ -975,96 +866,96 @@ def migrate_instance_state_management(self, app_type: str, old_instance_name: st
with self.get_connection() as conn:
# Check if old instance has any state management data
cursor = conn.execute('''
- SELECT COUNT(*) FROM stateful_instance_locks
+ SELECT COUNT(*) FROM stateful_instance_locks
WHERE app_type = ? AND instance_name = ?
''', (app_type, old_instance_name))
has_lock_data = cursor.fetchone()[0] > 0
-
+
cursor = conn.execute('''
- SELECT COUNT(*) FROM stateful_processed_ids
+ SELECT COUNT(*) FROM stateful_processed_ids
WHERE app_type = ? AND instance_name = ?
''', (app_type, old_instance_name))
has_processed_data = cursor.fetchone()[0] > 0
-
+
if not has_lock_data and not has_processed_data:
- logger.debug(f"No state management data found for {app_type}/{old_instance_name}, skipping migration")
+ logger.debug("No state management data found for %s/%s, skipping migration", app_type, old_instance_name)
return True
-
+
# Check if new instance name already has data (avoid overwriting)
cursor = conn.execute('''
- SELECT COUNT(*) FROM stateful_instance_locks
+ SELECT COUNT(*) FROM stateful_instance_locks
WHERE app_type = ? AND instance_name = ?
''', (app_type, new_instance_name))
new_has_lock_data = cursor.fetchone()[0] > 0
-
+
cursor = conn.execute('''
- SELECT COUNT(*) FROM stateful_processed_ids
+ SELECT COUNT(*) FROM stateful_processed_ids
WHERE app_type = ? AND instance_name = ?
''', (app_type, new_instance_name))
new_has_processed_data = cursor.fetchone()[0] > 0
-
+
if new_has_lock_data or new_has_processed_data:
- logger.warning(f"New instance name {app_type}/{new_instance_name} already has state management data, skipping migration to avoid conflicts")
+ logger.warning("New instance name %s/%s already has state management data, skipping migration to avoid conflicts", app_type, new_instance_name)
return False
-
+
# Migrate lock data
if has_lock_data:
conn.execute('''
- UPDATE stateful_instance_locks
+ UPDATE stateful_instance_locks
SET instance_name = ?, updated_at = CURRENT_TIMESTAMP
WHERE app_type = ? AND instance_name = ?
''', (new_instance_name, app_type, old_instance_name))
- logger.info(f"Migrated state management lock data from {app_type}/{old_instance_name} to {app_type}/{new_instance_name}")
-
+ logger.info("Migrated state management lock data from %s/%s to %s/%s", app_type, old_instance_name, app_type, new_instance_name)
+
# Migrate processed IDs
if has_processed_data:
conn.execute('''
- UPDATE stateful_processed_ids
+ UPDATE stateful_processed_ids
SET instance_name = ?, updated_at = CURRENT_TIMESTAMP
WHERE app_type = ? AND instance_name = ?
''', (new_instance_name, app_type, old_instance_name))
-
+
# Get count of migrated IDs for logging
cursor = conn.execute('''
- SELECT COUNT(*) FROM stateful_processed_ids
+ SELECT COUNT(*) FROM stateful_processed_ids
WHERE app_type = ? AND instance_name = ?
''', (app_type, new_instance_name))
migrated_count = cursor.fetchone()[0]
-
- logger.info(f"Migrated {migrated_count} processed IDs from {app_type}/{old_instance_name} to {app_type}/{new_instance_name}")
-
+
+ logger.info("Migrated %s processed IDs from %s/%s to %s/%s", migrated_count, app_type, old_instance_name, app_type, new_instance_name)
+
# Also migrate hunt history data if it exists
cursor = conn.execute('''
- SELECT COUNT(*) FROM hunt_history
+ SELECT COUNT(*) FROM hunt_history
WHERE app_type = ? AND instance_name = ?
''', (app_type, old_instance_name))
has_history_data = cursor.fetchone()[0] > 0
-
+
if has_history_data:
conn.execute('''
- UPDATE hunt_history
+ UPDATE hunt_history
SET instance_name = ?
WHERE app_type = ? AND instance_name = ?
''', (new_instance_name, app_type, old_instance_name))
-
+
cursor = conn.execute('''
- SELECT COUNT(*) FROM hunt_history
+ SELECT COUNT(*) FROM hunt_history
WHERE app_type = ? AND instance_name = ?
''', (app_type, new_instance_name))
migrated_history_count = cursor.fetchone()[0]
-
- logger.info(f"Migrated {migrated_history_count} hunt history entries from {app_type}/{old_instance_name} to {app_type}/{new_instance_name}")
-
+
+ logger.info("Migrated %s hunt history entries from %s/%s to %s/%s", migrated_history_count, app_type, old_instance_name, app_type, new_instance_name)
+
conn.commit()
- logger.info(f"Successfully completed state management migration from {app_type}/{old_instance_name} to {app_type}/{new_instance_name}")
+ logger.info("Successfully completed state management migration from %s/%s to %s/%s", app_type, old_instance_name, app_type, new_instance_name)
return True
-
+
except Exception as e:
- logger.error(f"Error migrating state management data from {app_type}/{old_instance_name} to {app_type}/{new_instance_name}: {e}")
+ logger.error("Error migrating state management data from %s/%s to %s/%s: %s", app_type, old_instance_name, app_type, new_instance_name, e)
return False
# Tally Data Management Methods
-
+
def get_media_stats(self, app_type: str = None) -> Dict[str, Any]:
"""Get media statistics for an app or all apps"""
with self.get_connection() as conn:
@@ -1082,7 +973,7 @@ def get_media_stats(self, app_type: str = None) -> Dict[str, Any]:
stats[app] = {}
stats[app][stat_type] = value
return stats
-
+
def set_media_stat(self, app_type: str, stat_type: str, value: int):
"""Set a media statistic value"""
with self.get_connection() as conn:
@@ -1091,7 +982,7 @@ def set_media_stat(self, app_type: str, stat_type: str, value: int):
VALUES (?, ?, ?, CURRENT_TIMESTAMP)
''', (app_type, stat_type, value))
conn.commit()
-
+
def increment_media_stat(self, app_type: str, stat_type: str, increment: int = 1):
"""Increment a media statistic"""
with self.get_connection() as conn:
@@ -1100,7 +991,7 @@ def increment_media_stat(self, app_type: str, stat_type: str, increment: int = 1
VALUES (?, ?, COALESCE((SELECT stat_value FROM media_stats WHERE app_type = ? AND stat_type = ?), 0) + ?, CURRENT_TIMESTAMP)
''', (app_type, stat_type, app_type, stat_type, increment))
conn.commit()
-
+
def get_hourly_caps(self) -> Dict[str, Dict[str, int]]:
"""Get hourly API caps for all apps"""
with self.get_connection() as conn:
@@ -1109,48 +1000,45 @@ def get_hourly_caps(self) -> Dict[str, Dict[str, int]]:
row[0]: {"api_hits": row[1], "last_reset_hour": row[2]}
for row in cursor.fetchall()
}
-
+
def set_hourly_cap(self, app_type: str, api_hits: int, last_reset_hour: int = None):
"""Set hourly API cap data for an app"""
if last_reset_hour is None:
- import datetime
- last_reset_hour = datetime.datetime.now().hour
-
+ last_reset_hour = datetime.now().hour
+
with self.get_connection() as conn:
conn.execute('''
INSERT OR REPLACE INTO hourly_caps (app_type, api_hits, last_reset_hour, updated_at)
VALUES (?, ?, ?, CURRENT_TIMESTAMP)
''', (app_type, api_hits, last_reset_hour))
conn.commit()
-
+
def increment_hourly_cap(self, app_type: str, increment: int = 1):
"""Increment hourly API usage for an app"""
- import datetime
with self.get_connection() as conn:
conn.execute('''
INSERT OR REPLACE INTO hourly_caps (app_type, api_hits, last_reset_hour, updated_at)
- VALUES (?, COALESCE((SELECT api_hits FROM hourly_caps WHERE app_type = ?), 0) + ?,
+ VALUES (?, COALESCE((SELECT api_hits FROM hourly_caps WHERE app_type = ?), 0) + ?,
COALESCE((SELECT last_reset_hour FROM hourly_caps WHERE app_type = ?), ?), CURRENT_TIMESTAMP)
- ''', (app_type, app_type, increment, app_type, datetime.datetime.now().hour))
+ ''', (app_type, app_type, increment, app_type, datetime.now().hour))
conn.commit()
-
+
def reset_hourly_caps(self):
"""Reset all hourly API caps"""
- import datetime
- current_hour = datetime.datetime.now().hour
-
+ current_hour = datetime.now().hour
+
with self.get_connection() as conn:
conn.execute('''
UPDATE hourly_caps SET api_hits = 0, last_reset_hour = ?, updated_at = CURRENT_TIMESTAMP
''', (current_hour,))
conn.commit()
-
+
def get_sleep_data(self, app_type: str = None) -> Dict[str, Any]:
"""Get sleep/cycle data for an app or all apps"""
with self.get_connection() as conn:
if app_type:
cursor = conn.execute('''
- SELECT next_cycle_time, cycle_lock, last_cycle_start, last_cycle_end
+ SELECT next_cycle_time, cycle_lock, last_cycle_start, last_cycle_end
FROM sleep_data WHERE app_type = ?
''', (app_type,))
row = cursor.fetchone()
@@ -1164,7 +1052,7 @@ def get_sleep_data(self, app_type: str = None) -> Dict[str, Any]:
return {}
else:
cursor = conn.execute('''
- SELECT app_type, next_cycle_time, cycle_lock, last_cycle_start, last_cycle_end
+ SELECT app_type, next_cycle_time, cycle_lock, last_cycle_start, last_cycle_end
FROM sleep_data
''')
return {
@@ -1176,27 +1064,27 @@ def get_sleep_data(self, app_type: str = None) -> Dict[str, Any]:
}
for row in cursor.fetchall()
}
-
- def set_sleep_data(self, app_type: str, next_cycle_time: str = None, cycle_lock: bool = None,
+
+ def set_sleep_data(self, app_type: str, next_cycle_time: str = None, cycle_lock: bool = None,
last_cycle_start: str = None, last_cycle_end: str = None):
"""Set sleep/cycle data for an app"""
with self.get_connection() as conn:
# Get current data
cursor = conn.execute('''
- SELECT next_cycle_time, cycle_lock, last_cycle_start, last_cycle_end
+ SELECT next_cycle_time, cycle_lock, last_cycle_start, last_cycle_end
FROM sleep_data WHERE app_type = ?
''', (app_type,))
row = cursor.fetchone()
-
+
if row:
# Update existing record with only provided values
current_next = row[0] if next_cycle_time is None else next_cycle_time
current_lock = row[1] if cycle_lock is None else cycle_lock
current_start = row[2] if last_cycle_start is None else last_cycle_start
current_end = row[3] if last_cycle_end is None else last_cycle_end
-
+
conn.execute('''
- UPDATE sleep_data
+ UPDATE sleep_data
SET next_cycle_time = ?, cycle_lock = ?, last_cycle_start = ?, last_cycle_end = ?, updated_at = CURRENT_TIMESTAMP
WHERE app_type = ?
''', (current_next, current_lock, current_start, current_end, app_type))
@@ -1206,15 +1094,15 @@ def set_sleep_data(self, app_type: str, next_cycle_time: str = None, cycle_lock:
INSERT INTO sleep_data (app_type, next_cycle_time, cycle_lock, last_cycle_start, last_cycle_end, updated_at)
VALUES (?, ?, ?, ?, ?, CURRENT_TIMESTAMP)
''', (app_type, next_cycle_time, cycle_lock, last_cycle_start, last_cycle_end))
-
+
conn.commit()
-
+
def get_swaparr_stats(self) -> Dict[str, int]:
"""Get Swaparr statistics"""
with self.get_connection() as conn:
cursor = conn.execute('SELECT stat_key, stat_value FROM swaparr_stats')
return {row[0]: row[1] for row in cursor.fetchall()}
-
+
def set_swaparr_stat(self, stat_key: str, value: int):
"""Set a Swaparr statistic value"""
with self.get_connection() as conn:
@@ -1223,7 +1111,7 @@ def set_swaparr_stat(self, stat_key: str, value: int):
VALUES (?, ?, CURRENT_TIMESTAMP)
''', (stat_key, value))
conn.commit()
-
+
def increment_swaparr_stat(self, stat_key: str, increment: int = 1):
"""Increment a Swaparr statistic"""
with self.get_connection() as conn:
@@ -1240,19 +1128,19 @@ def get_schedules(self, app_type: str = None) -> Dict[str, List[Dict[str, Any]]]
"""Get all schedules, optionally filtered by app type"""
with self.get_connection() as conn:
conn.row_factory = sqlite3.Row
-
+
if app_type:
cursor = conn.execute('''
- SELECT * FROM schedules
- WHERE app_type = ?
+ SELECT * FROM schedules
+ WHERE app_type = ?
ORDER BY time_hour, time_minute
''', (app_type,))
else:
cursor = conn.execute('''
- SELECT * FROM schedules
+ SELECT * FROM schedules
ORDER BY app_type, time_hour, time_minute
''')
-
+
schedules = {}
for row in cursor.fetchall():
schedule_data = {
@@ -1264,24 +1152,24 @@ def get_schedules(self, app_type: str = None) -> Dict[str, List[Dict[str, Any]]]
'appType': row['app_type'],
'enabled': bool(row['enabled'])
}
-
+
if row['app_type'] not in schedules:
schedules[row['app_type']] = []
schedules[row['app_type']].append(schedule_data)
-
+
# Ensure all app types are present even if empty
for app in ['global', 'sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros']:
if app not in schedules:
schedules[app] = []
-
+
return schedules
-
+
def save_schedules(self, schedules_data: Dict[str, List[Dict[str, Any]]]):
"""Save all schedules to database (replaces existing schedules)"""
with self.get_connection() as conn:
# Clear existing schedules
conn.execute('DELETE FROM schedules')
-
+
# Insert new schedules
for app_type, schedules_list in schedules_data.items():
for schedule in schedules_list:
@@ -1298,12 +1186,12 @@ def save_schedules(self, schedules_data: Dict[str, List[Dict[str, Any]]]):
except (ValueError, IndexError):
time_hour = 0
time_minute = 0
-
+
# Convert days to JSON string
days_json = json.dumps(schedule.get('days', []))
-
+
conn.execute('''
- INSERT INTO schedules
+ INSERT INTO schedules
(id, app_type, action, time_hour, time_minute, days, app_instance, enabled, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP)
''', (
@@ -1316,14 +1204,14 @@ def save_schedules(self, schedules_data: Dict[str, List[Dict[str, Any]]]):
schedule.get('app', 'global'),
schedule.get('enabled', True)
))
-
+
conn.commit()
# Schedules saved - no need to log every successful save
-
+
def add_schedule(self, schedule_data: Dict[str, Any]) -> str:
"""Add a single schedule to database"""
schedule_id = schedule_data.get('id', f"{schedule_data.get('appType', 'global')}_{int(datetime.now().timestamp())}")
-
+
# Parse time
time_str = schedule_data.get('time', '00:00')
if isinstance(time_str, dict):
@@ -1337,13 +1225,13 @@ def add_schedule(self, schedule_data: Dict[str, Any]) -> str:
except (ValueError, IndexError):
time_hour = 0
time_minute = 0
-
+
# Convert days to JSON string
days_json = json.dumps(schedule_data.get('days', []))
-
+
with self.get_connection() as conn:
conn.execute('''
- INSERT OR REPLACE INTO schedules
+ INSERT OR REPLACE INTO schedules
(id, app_type, action, time_hour, time_minute, days, app_instance, enabled, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP)
''', (
@@ -1357,100 +1245,35 @@ def add_schedule(self, schedule_data: Dict[str, Any]) -> str:
schedule_data.get('enabled', True)
))
conn.commit()
-
- logger.info(f"Added/updated schedule {schedule_id}")
+
+ logger.info("Added/updated schedule %s", schedule_id)
return schedule_id
-
+
def delete_schedule(self, schedule_id: str):
"""Delete a schedule from database"""
with self.get_connection() as conn:
cursor = conn.execute('DELETE FROM schedules WHERE id = ?', (schedule_id,))
conn.commit()
-
+
if cursor.rowcount > 0:
- logger.info(f"Deleted schedule {schedule_id}")
+ logger.info("Deleted schedule %s", schedule_id)
else:
- logger.warning(f"Schedule {schedule_id} not found for deletion")
-
+ logger.warning("Schedule %s not found for deletion", schedule_id)
+
def update_schedule_enabled(self, schedule_id: str, enabled: bool):
"""Update the enabled status of a schedule"""
with self.get_connection() as conn:
cursor = conn.execute('''
- UPDATE schedules
- SET enabled = ?, updated_at = CURRENT_TIMESTAMP
+ UPDATE schedules
+ SET enabled = ?, updated_at = CURRENT_TIMESTAMP
WHERE id = ?
''', (enabled, schedule_id))
conn.commit()
-
+
if cursor.rowcount > 0:
- logger.info(f"Updated schedule {schedule_id} enabled status to {enabled}")
+ logger.info("Updated schedule %s enabled status to %s", schedule_id, enabled)
else:
- logger.warning(f"Schedule {schedule_id} not found for update")
-
- # State Management Methods
- def get_state_data(self, app_type: str, state_type: str) -> Any:
- """Get state data for a specific app type and state type"""
- with self.get_connection() as conn:
- cursor = conn.execute(
- 'SELECT state_data FROM state_data WHERE app_type = ? AND state_type = ?',
- (app_type, state_type)
- )
- row = cursor.fetchone()
-
- if row:
- try:
- return json.loads(row[0])
- except json.JSONDecodeError as e:
- logger.error(f"Failed to parse state data for {app_type}/{state_type}: {e}")
- return None
- return None
-
- def set_state_data(self, app_type: str, state_type: str, data: Any):
- """Set state data for a specific app type and state type"""
- data_json = json.dumps(data)
- with self.get_connection() as conn:
- conn.execute(
- '''INSERT OR REPLACE INTO state_data
- (app_type, state_type, state_data, updated_at)
- VALUES (?, ?, ?, CURRENT_TIMESTAMP)''',
- (app_type, state_type, data_json)
- )
- conn.commit()
- logger.debug(f"Set state data for {app_type}/{state_type}")
-
- def get_processed_ids_state(self, app_type: str, state_type: str) -> List[int]:
- """Get processed IDs for a specific app type and state type (missing/upgrades)"""
- data = self.get_state_data(app_type, state_type)
- if data is None:
- return []
- if isinstance(data, list):
- return data
- logger.error(f"Invalid processed IDs data type for {app_type}/{state_type}: {type(data)}")
- return []
-
- def set_processed_ids_state(self, app_type: str, state_type: str, ids: List[int]):
- """Set processed IDs for a specific app type and state type (missing/upgrades)"""
- self.set_state_data(app_type, state_type, ids)
-
- def add_processed_id_state(self, app_type: str, state_type: str, item_id: int):
- """Add a single processed ID to a specific app type and state type"""
- processed_ids = self.get_processed_ids_state(app_type, state_type)
- if item_id not in processed_ids:
- processed_ids.append(item_id)
- self.set_processed_ids_state(app_type, state_type, processed_ids)
-
- def clear_processed_ids_state(self, app_type: str):
- """Clear all processed IDs for a specific app type"""
- self.set_processed_ids_state(app_type, "processed_missing", [])
- self.set_processed_ids_state(app_type, "processed_upgrades", [])
-
- def get_last_reset_time_state(self, app_type: str) -> Optional[str]:
- """Get the last reset time for a specific app type"""
- return self.get_state_data(app_type, "last_reset")
-
- def set_last_reset_time_state(self, app_type: str, reset_time: str):
- """Set the last reset time for a specific app type"""
- self.set_state_data(app_type, "last_reset", reset_time)
+ logger.warning("Schedule %s not found for update", schedule_id)
# Swaparr State Management Methods
def get_swaparr_state_data(self, app_name: str, state_type: str) -> Any:
@@ -1461,12 +1284,12 @@ def get_swaparr_state_data(self, app_name: str, state_type: str) -> Any:
(app_name, state_type)
)
row = cursor.fetchone()
-
+
if row:
try:
return json.loads(row[0])
except json.JSONDecodeError as e:
- logger.error(f"Failed to parse Swaparr state data for {app_name}/{state_type}: {e}")
+ logger.error("Failed to parse Swaparr state data for %s/%s: %s", app_name, state_type, e)
return None
return None
@@ -1475,13 +1298,13 @@ def set_swaparr_state_data(self, app_name: str, state_type: str, data: Any):
data_json = json.dumps(data)
with self.get_connection() as conn:
conn.execute(
- '''INSERT OR REPLACE INTO swaparr_state
- (app_name, state_type, state_data, updated_at)
+ '''INSERT OR REPLACE INTO swaparr_state
+ (app_name, state_type, state_data, updated_at)
VALUES (?, ?, ?, CURRENT_TIMESTAMP)''',
(app_name, state_type, data_json)
)
conn.commit()
- logger.debug(f"Set Swaparr state data for {app_name}/{state_type}")
+ logger.debug("Set Swaparr state data for %s/%s", app_name, state_type)
def get_swaparr_strike_data(self, app_name: str) -> Dict[str, Any]:
"""Get strike data for a specific Swaparr app"""
@@ -1496,13 +1319,13 @@ def get_swaparr_removed_items(self, app_name: str) -> Dict[str, Any]:
"""Get removed items data for a specific Swaparr app"""
data = self.get_swaparr_state_data(app_name, "removed_items")
return data if data is not None else {}
-
+
def set_swaparr_removed_items(self, app_name: str, removed_items: Dict[str, Any]):
"""Set removed items data for a specific Swaparr app"""
self.set_swaparr_state_data(app_name, "removed_items", removed_items)
# Reset Request Management Methods (replaces file-based reset system)
-
+
def create_reset_request(self, app_type: str) -> bool:
"""Create a reset request for an app (replaces creating .reset files)"""
try:
@@ -1512,37 +1335,37 @@ def create_reset_request(self, app_type: str) -> bool:
VALUES (?, ?, 0)
''', (app_type, int(time.time())))
conn.commit()
- logger.info(f"Created reset request for {app_type}")
+ logger.info("Created reset request for %s", app_type)
return True
except Exception as e:
- logger.error(f"Error creating reset request for {app_type}: {e}")
+ logger.error("Error creating reset request for %s: %s", app_type, e)
return False
-
+
def get_pending_reset_request(self, app_type: str) -> Optional[int]:
"""Check if there's a pending reset request for an app (replaces checking .reset files)"""
with self.get_connection() as conn:
cursor = conn.execute('''
- SELECT timestamp FROM reset_requests
+ SELECT timestamp FROM reset_requests
WHERE app_type = ? AND processed = 0
ORDER BY timestamp DESC LIMIT 1
''', (app_type,))
row = cursor.fetchone()
return row[0] if row else None
-
+
def mark_reset_request_processed(self, app_type: str) -> bool:
"""Mark a reset request as processed (replaces deleting .reset files)"""
try:
with self.get_connection() as conn:
conn.execute('''
- UPDATE reset_requests
+ UPDATE reset_requests
SET processed = 1, processed_at = CURRENT_TIMESTAMP
WHERE app_type = ? AND processed = 0
''', (app_type,))
conn.commit()
- logger.info(f"Marked reset request as processed for {app_type}")
+ logger.info("Marked reset request as processed for %s", app_type)
return True
except Exception as e:
- logger.error(f"Error marking reset request as processed for {app_type}: {e}")
+ logger.error("Error marking reset request as processed for %s: %s", app_type, e)
return False
# User Management Methods
@@ -1552,7 +1375,7 @@ def user_exists(self) -> bool:
cursor = conn.execute('SELECT COUNT(*) FROM users')
count = cursor.fetchone()[0]
return count > 0
-
+
def get_user_by_username(self, username: str) -> Optional[Dict[str, Any]]:
"""Get user data by username"""
with self.get_connection() as conn:
@@ -1562,7 +1385,7 @@ def get_user_by_username(self, username: str) -> Optional[Dict[str, Any]]:
(username,)
)
row = cursor.fetchone()
-
+
if row:
user_data = dict(row)
# Parse JSON fields
@@ -1573,7 +1396,7 @@ def get_user_by_username(self, username: str) -> Optional[Dict[str, Any]]:
user_data['plex_user_data'] = None
return user_data
return None
-
+
def get_first_user(self) -> Optional[Dict[str, Any]]:
"""Get the first user from the database (for bypass modes)"""
with self.get_connection() as conn:
@@ -1582,7 +1405,7 @@ def get_first_user(self) -> Optional[Dict[str, Any]]:
'SELECT * FROM users ORDER BY created_at ASC LIMIT 1'
)
row = cursor.fetchone()
-
+
if row:
user_data = dict(row)
# Parse JSON fields
@@ -1593,111 +1416,110 @@ def get_first_user(self) -> Optional[Dict[str, Any]]:
user_data['plex_user_data'] = None
return user_data
return None
-
- def create_user(self, username: str, password: str, two_fa_enabled: bool = False,
- two_fa_secret: str = None, plex_token: str = None,
+
+ def create_user(self, username: str, password: str, two_fa_enabled: bool = False,
+ two_fa_secret: str = None, plex_token: str = None,
plex_user_data: Dict[str, Any] = None) -> bool:
"""Create a new user"""
try:
plex_data_json = json.dumps(plex_user_data) if plex_user_data else None
-
+
with self.get_connection() as conn:
conn.execute('''
- INSERT INTO users (username, password, two_fa_enabled, two_fa_secret,
+ INSERT INTO users (username, password, two_fa_enabled, two_fa_secret,
plex_token, plex_user_data, created_at, updated_at)
VALUES (?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)
''', (username, password, two_fa_enabled, two_fa_secret, plex_token, plex_data_json))
conn.commit()
- logger.info(f"Created user: {username}")
+ logger.info("Created user: %s", username)
return True
except Exception as e:
- logger.error(f"Error creating user {username}: {e}")
+ logger.error("Error creating user %s: %s", username, e)
return False
-
+
def update_user_password(self, username: str, new_password: str) -> bool:
"""Update user password"""
try:
with self.get_connection() as conn:
conn.execute('''
- UPDATE users SET password = ?, updated_at = CURRENT_TIMESTAMP
+ UPDATE users SET password = ?, updated_at = CURRENT_TIMESTAMP
WHERE username = ?
''', (new_password, username))
conn.commit()
- logger.info(f"Updated password for user: {username}")
+ logger.info("Updated password for user: %s", username)
return True
except Exception as e:
- logger.error(f"Error updating password for user {username}: {e}")
+ logger.error("Error updating password for user %s: %s", username, e)
return False
-
+
def update_user_username(self, old_username: str, new_username: str) -> bool:
"""Update username"""
try:
with self.get_connection() as conn:
conn.execute('''
- UPDATE users SET username = ?, updated_at = CURRENT_TIMESTAMP
+ UPDATE users SET username = ?, updated_at = CURRENT_TIMESTAMP
WHERE username = ?
''', (new_username, old_username))
conn.commit()
- logger.info(f"Updated username from {old_username} to {new_username}")
+ logger.info("Updated username from %s to %s", old_username, new_username)
return True
except Exception as e:
- logger.error(f"Error updating username from {old_username} to {new_username}: {e}")
+ logger.error("Error updating username from %s to %s: %s", old_username, new_username, e)
return False
-
+
def update_user_2fa(self, username: str, two_fa_enabled: bool, two_fa_secret: str = None) -> bool:
"""Update user 2FA settings"""
try:
with self.get_connection() as conn:
conn.execute('''
- UPDATE users SET two_fa_enabled = ?, two_fa_secret = ?,
- updated_at = CURRENT_TIMESTAMP
+ UPDATE users SET two_fa_enabled = ?, two_fa_secret = ?,
+ updated_at = CURRENT_TIMESTAMP
WHERE username = ?
''', (two_fa_enabled, two_fa_secret, username))
conn.commit()
- logger.info(f"Updated 2FA settings for user: {username}")
+ logger.info("Updated 2FA settings for user: %s", username)
return True
except Exception as e:
- logger.error(f"Error updating 2FA for user {username}: {e}")
+ logger.error("Error updating 2FA for user %s: %s", username, e)
return False
-
+
def update_user_temp_2fa_secret(self, username: str, temp_2fa_secret: str = None) -> bool:
"""Update user temporary 2FA secret"""
try:
with self.get_connection() as conn:
conn.execute('''
- UPDATE users SET temp_2fa_secret = ?, updated_at = CURRENT_TIMESTAMP
+ UPDATE users SET temp_2fa_secret = ?, updated_at = CURRENT_TIMESTAMP
WHERE username = ?
''', (temp_2fa_secret, username))
conn.commit()
- logger.info(f"Updated temporary 2FA secret for user: {username}")
+ logger.info("Updated temporary 2FA secret for user: %s", username)
return True
except Exception as e:
- logger.error(f"Error updating temporary 2FA secret for user {username}: {e}")
+ logger.error("Error updating temporary 2FA secret for user %s: %s", username, e)
return False
-
- def update_user_plex(self, username: str, plex_token: str = None,
+
+ def update_user_plex(self, username: str, plex_token: str = None,
plex_user_data: Dict[str, Any] = None) -> bool:
"""Update user Plex settings"""
try:
- import time
plex_data_json = json.dumps(plex_user_data) if plex_user_data else None
-
+
# Set the linked timestamp when plex_token is provided (linking account)
plex_linked_at = int(time.time()) if plex_token else None
-
+
with self.get_connection() as conn:
conn.execute('''
- UPDATE users SET plex_token = ?, plex_user_data = ?,
- plex_linked_at = ?, updated_at = CURRENT_TIMESTAMP
+ UPDATE users SET plex_token = ?, plex_user_data = ?,
+ plex_linked_at = ?, updated_at = CURRENT_TIMESTAMP
WHERE username = ?
''', (plex_token, plex_data_json, plex_linked_at, username))
conn.commit()
- logger.info(f"Updated Plex settings for user: {username}")
+ logger.info("Updated Plex settings for user: %s", username)
return True
except Exception as e:
- logger.error(f"Error updating Plex settings for user {username}: {e}")
+ logger.error("Error updating Plex settings for user %s: %s", username, e)
return False
-
+
def has_users_with_plex(self) -> bool:
"""Check if any users have Plex authentication configured"""
try:
@@ -1708,20 +1530,17 @@ def has_users_with_plex(self) -> bool:
count = cursor.fetchone()[0]
return count > 0
except Exception as e:
- logger.error(f"Error checking for Plex users: {e}")
+ logger.error("Error checking for Plex users: %s", e)
return False
# Recovery Key Methods
def generate_recovery_key(self, username: str) -> Optional[str]:
"""Generate a new recovery key for a user"""
- import hashlib
- import secrets
- import random
-
+
# Word lists for generating human-readable recovery keys
adjectives = ['ocean', 'storm', 'frost', 'light', 'dark', 'swift', 'calm', 'wild', 'bright', 'deep']
nouns = ['tower', 'bridge', 'quest', 'dream', 'flame', 'river', 'mountain', 'crystal', 'shadow', 'star']
-
+
try:
# Generate a human-readable recovery key like "ocean-light-tower-51"
adj = random.choice(adjectives)
@@ -1729,62 +1548,61 @@ def generate_recovery_key(self, username: str) -> Optional[str]:
noun2 = random.choice(nouns)
number = random.randint(10, 99)
recovery_key = f"{adj}-{noun1}-{noun2}-{number}"
-
+
# Hash the recovery key for secure storage
recovery_key_hash = hashlib.sha256(recovery_key.encode()).hexdigest()
-
+
with self.get_connection() as conn:
conn.execute('''
- UPDATE users SET recovery_key = ?, updated_at = CURRENT_TIMESTAMP
+ UPDATE users SET recovery_key = ?, updated_at = CURRENT_TIMESTAMP
WHERE username = ?
''', (recovery_key_hash, username))
conn.commit()
- logger.info(f"Generated new recovery key for user: {username}")
-
+ logger.info("Generated new recovery key for user: %s", username)
+
# Return the plain text recovery key (only time it's shown)
return recovery_key
except Exception as e:
- logger.error(f"Error generating recovery key for user {username}: {e}")
+ logger.error("Error generating recovery key for user %s: %s", username, e)
return None
-
+
def verify_recovery_key(self, recovery_key: str) -> Optional[str]:
"""Verify a recovery key and return the username if valid"""
- import hashlib
-
+
try:
# Hash the provided recovery key
recovery_key_hash = hashlib.sha256(recovery_key.encode()).hexdigest()
-
+
with self.get_connection() as conn:
cursor = conn.execute(
'SELECT username FROM users WHERE recovery_key = ?',
(recovery_key_hash,)
)
row = cursor.fetchone()
-
+
if row:
- logger.info(f"Recovery key verified for user: {row[0]}")
+ logger.info("Recovery key verified for user: %s", row[0])
return row[0]
else:
- logger.warning(f"Invalid recovery key attempted")
+ logger.warning("Invalid recovery key attempted")
return None
except Exception as e:
- logger.error(f"Error verifying recovery key: {e}")
+ logger.error("Error verifying recovery key: %s", e)
return None
-
+
def clear_recovery_key(self, username: str) -> bool:
"""Clear the recovery key for a user (after password reset)"""
try:
with self.get_connection() as conn:
conn.execute('''
- UPDATE users SET recovery_key = NULL, updated_at = CURRENT_TIMESTAMP
+ UPDATE users SET recovery_key = NULL, updated_at = CURRENT_TIMESTAMP
WHERE username = ?
''', (username,))
conn.commit()
- logger.info(f"Cleared recovery key for user: {username}")
+ logger.info("Cleared recovery key for user: %s", username)
return True
except Exception as e:
- logger.error(f"Error clearing recovery key for user {username}: {e}")
+ logger.error("Error clearing recovery key for user %s: %s", username, e)
return False
def check_recovery_key_rate_limit(self, ip_address: str) -> Dict[str, Any]:
@@ -1793,26 +1611,25 @@ def check_recovery_key_rate_limit(self, ip_address: str) -> Dict[str, Any]:
with self.get_connection() as conn:
conn.row_factory = sqlite3.Row
cursor = conn.execute('''
- SELECT failed_attempts, locked_until, last_attempt
- FROM recovery_key_rate_limit
+ SELECT failed_attempts, locked_until, last_attempt
+ FROM recovery_key_rate_limit
WHERE ip_address = ?
''', (ip_address,))
row = cursor.fetchone()
-
+
if not row:
return {"locked": False, "failed_attempts": 0}
-
+
# Convert locked_until to datetime if it exists
locked_until = None
if row['locked_until']:
try:
- from datetime import datetime
locked_until = datetime.fromisoformat(row['locked_until'])
# Check if lockout has expired
if datetime.now() >= locked_until:
# Clear the lockout
conn.execute('''
- UPDATE recovery_key_rate_limit
+ UPDATE recovery_key_rate_limit
SET failed_attempts = 0, locked_until = NULL, updated_at = CURRENT_TIMESTAMP
WHERE ip_address = ?
''', (ip_address,))
@@ -1821,59 +1638,57 @@ def check_recovery_key_rate_limit(self, ip_address: str) -> Dict[str, Any]:
except ValueError:
# Invalid datetime format, treat as expired
conn.execute('''
- UPDATE recovery_key_rate_limit
+ UPDATE recovery_key_rate_limit
SET failed_attempts = 0, locked_until = NULL, updated_at = CURRENT_TIMESTAMP
WHERE ip_address = ?
''', (ip_address,))
conn.commit()
return {"locked": False, "failed_attempts": 0}
-
+
return {
"locked": locked_until is not None and datetime.now() < locked_until,
"failed_attempts": row['failed_attempts'],
"locked_until": locked_until.isoformat() if locked_until else None
}
except Exception as e:
- logger.error(f"Error checking recovery key rate limit for IP {ip_address}: {e}")
+ logger.error("Error checking recovery key rate limit for IP %s: %s", ip_address, e)
return {"locked": False, "failed_attempts": 0}
def record_recovery_key_attempt(self, ip_address: str, username: str = None, success: bool = False) -> Dict[str, Any]:
"""Record a recovery key attempt and apply rate limiting if needed"""
try:
- from datetime import datetime, timedelta
-
with self.get_connection() as conn:
if success:
# Clear rate limiting on successful attempt
conn.execute('''
- UPDATE recovery_key_rate_limit
+ UPDATE recovery_key_rate_limit
SET failed_attempts = 0, locked_until = NULL, updated_at = CURRENT_TIMESTAMP
WHERE ip_address = ?
''', (ip_address,))
conn.commit()
return {"locked": False, "failed_attempts": 0}
-
+
# Handle failed attempt
cursor = conn.execute('''
SELECT failed_attempts FROM recovery_key_rate_limit WHERE ip_address = ?
''', (ip_address,))
row = cursor.fetchone()
-
+
if row:
# Update existing record
new_failed_attempts = row[0] + 1
locked_until = None
-
+
# Lock for 15 minutes after 3 failed attempts
if new_failed_attempts >= 3:
locked_until = datetime.now() + timedelta(minutes=15)
locked_until_str = locked_until.isoformat()
else:
locked_until_str = None
-
+
conn.execute('''
- UPDATE recovery_key_rate_limit
- SET failed_attempts = ?, locked_until = ?, username = ?,
+ UPDATE recovery_key_rate_limit
+ SET failed_attempts = ?, locked_until = ?, username = ?,
last_attempt = CURRENT_TIMESTAMP, updated_at = CURRENT_TIMESTAMP
WHERE ip_address = ?
''', (new_failed_attempts, locked_until_str, username, ip_address))
@@ -1881,43 +1696,42 @@ def record_recovery_key_attempt(self, ip_address: str, username: str = None, suc
# Create new record
new_failed_attempts = 1
conn.execute('''
- INSERT INTO recovery_key_rate_limit
+ INSERT INTO recovery_key_rate_limit
(ip_address, username, failed_attempts, last_attempt)
VALUES (?, ?, ?, CURRENT_TIMESTAMP)
''', (ip_address, username, new_failed_attempts))
-
+
conn.commit()
-
+
locked = new_failed_attempts >= 3
return {
"locked": locked,
"failed_attempts": new_failed_attempts,
"locked_until": locked_until.isoformat() if locked else None
}
-
+
except Exception as e:
- logger.error(f"Error recording recovery key attempt for IP {ip_address}: {e}")
+ logger.error("Error recording recovery key attempt for IP %s: %s", ip_address, e)
return {"locked": False, "failed_attempts": 0}
def cleanup_expired_rate_limits(self):
"""Clean up expired rate limit entries (older than 24 hours)"""
try:
- from datetime import datetime, timedelta
cutoff_time = datetime.now() - timedelta(hours=24)
-
+
with self.get_connection() as conn:
cursor = conn.execute('''
- DELETE FROM recovery_key_rate_limit
+ DELETE FROM recovery_key_rate_limit
WHERE last_attempt < ? AND (locked_until IS NULL OR locked_until < CURRENT_TIMESTAMP)
''', (cutoff_time.isoformat(),))
deleted_count = cursor.rowcount
conn.commit()
-
+
if deleted_count > 0:
- logger.debug(f"Cleaned up {deleted_count} expired recovery key rate limit entries")
-
+ logger.debug("Cleaned up %s expired recovery key rate limit entries", deleted_count)
+
except Exception as e:
- logger.error(f"Error cleaning up expired rate limits: {e}")
+ logger.error("Error cleaning up expired rate limits: %s", e)
def get_sponsors(self) -> List[Dict[str, Any]]:
"""Get all sponsors from database"""
@@ -1925,17 +1739,17 @@ def get_sponsors(self) -> List[Dict[str, Any]]:
conn.row_factory = sqlite3.Row
cursor = conn.execute('''
SELECT login, name, avatar_url, url, tier, monthly_amount, category, updated_at
- FROM sponsors
+ FROM sponsors
ORDER BY monthly_amount DESC, name ASC
''')
return [dict(row) for row in cursor.fetchall()]
-
+
def save_sponsors(self, sponsors_data: List[Dict[str, Any]]):
"""Save sponsors data to database, replacing existing data"""
with self.get_connection() as conn:
# Clear existing sponsors
conn.execute('DELETE FROM sponsors')
-
+
# Insert new sponsors
for sponsor in sponsors_data:
conn.execute('''
@@ -1950,15 +1764,15 @@ def save_sponsors(self, sponsors_data: List[Dict[str, Any]]):
sponsor.get('monthlyAmount', 0),
sponsor.get('category', 'past')
))
-
- logger.info(f"Saved {len(sponsors_data)} sponsors to database")
-
+
+ logger.info("Saved %s sponsors to database", len(sponsors_data))
+
def add_sponsor(self, sponsor_data: Dict[str, Any]):
"""Add a new sponsor to the database"""
try:
with self.get_connection() as conn:
conn.execute('''
- INSERT OR REPLACE INTO sponsors
+ INSERT OR REPLACE INTO sponsors
(login, name, avatar_url, url, tier, monthly_amount, category)
VALUES (?, ?, ?, ?, ?, ?, ?)
''', (
@@ -1972,7 +1786,7 @@ def add_sponsor(self, sponsor_data: Dict[str, Any]):
))
conn.commit()
except Exception as e:
- logger.error(f"Error adding sponsor: {e}")
+ logger.error("Error adding sponsor: %s", e)
# Logs Database Methods
def insert_log(self, timestamp: datetime, level: str, app_type: str, message: str, logger_name: str = None):
@@ -1985,66 +1799,66 @@ def insert_log(self, timestamp: datetime, level: str, app_type: str, message: st
''', (timestamp.isoformat(), level, app_type, message, logger_name))
conn.commit()
except Exception as e:
- logger.error(f"Error inserting log entry: {e}")
-
+ logger.error("Error inserting log entry: %s", e)
+
def get_logs(self, app_type: str = None, level: str = None, limit: int = 100, offset: int = 0, search: str = None) -> List[Dict[str, Any]]:
"""Get logs with optional filtering"""
try:
with self.get_connection() as conn:
conn.row_factory = sqlite3.Row
-
+
# Build query with filters
query = "SELECT * FROM logs WHERE 1=1"
params = []
-
+
if app_type:
query += " AND app_type = ?"
params.append(app_type)
-
+
if level:
query += " AND level = ?"
params.append(level)
-
+
if search:
query += " AND message LIKE ?"
params.append(f"%{search}%")
-
+
query += " ORDER BY timestamp DESC LIMIT ? OFFSET ?"
params.extend([limit, offset])
-
+
cursor = conn.execute(query, params)
rows = cursor.fetchall()
-
+
return [dict(row) for row in rows]
except Exception as e:
- logger.error(f"Error getting logs: {e}")
+ logger.error("Error getting logs: %s", e)
return []
-
+
def get_log_count(self, app_type: str = None, level: str = None, search: str = None) -> int:
"""Get total count of logs matching filters"""
try:
with self.get_connection() as conn:
query = "SELECT COUNT(*) FROM logs WHERE 1=1"
params = []
-
+
if app_type:
query += " AND app_type = ?"
params.append(app_type)
-
+
if level:
query += " AND level = ?"
params.append(level)
-
+
if search:
query += " AND message LIKE ?"
params.append(f"%{search}%")
-
+
cursor = conn.execute(query, params)
return cursor.fetchone()[0]
except Exception as e:
- logger.error(f"Error getting log count: {e}")
+ logger.error("Error getting log count: %s", e)
return 0
-
+
def cleanup_old_logs(self, days_to_keep: int = 30, max_entries_per_app: int = 10000):
"""Clean up old logs based on age and count limits"""
try:
@@ -2056,33 +1870,33 @@ def cleanup_old_logs(self, days_to_keep: int = 30, max_entries_per_app: int = 10
(cutoff_date.isoformat(),)
)
deleted_by_age = cursor.rowcount
-
+
# Count-based cleanup per app type
app_types = ['system', 'sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros', 'swaparr']
total_deleted_by_count = 0
-
+
for app_type in app_types:
cursor = conn.execute('''
- DELETE FROM logs
+ DELETE FROM logs
WHERE app_type = ? AND id NOT IN (
- SELECT id FROM logs
- WHERE app_type = ?
- ORDER BY timestamp DESC
+ SELECT id FROM logs
+ WHERE app_type = ?
+ ORDER BY timestamp DESC
LIMIT ?
)
''', (app_type, app_type, max_entries_per_app))
total_deleted_by_count += cursor.rowcount
-
+
conn.commit()
-
+
if deleted_by_age > 0 or total_deleted_by_count > 0:
- logger.info(f"Cleaned up logs: {deleted_by_age} by age, {total_deleted_by_count} by count")
-
+ logger.info("Cleaned up logs: %s by age, %s by count", deleted_by_age, total_deleted_by_count)
+
return deleted_by_age + total_deleted_by_count
except Exception as e:
- logger.error(f"Error cleaning up logs: {e}")
+ logger.error("Error cleaning up logs: %s", e)
return 0
-
+
def get_app_types_from_logs(self) -> List[str]:
"""Get list of all app types that have logs"""
try:
@@ -2090,9 +1904,9 @@ def get_app_types_from_logs(self) -> List[str]:
cursor = conn.execute("SELECT DISTINCT app_type FROM logs ORDER BY app_type")
return [row[0] for row in cursor.fetchall()]
except Exception as e:
- logger.error(f"Error getting app types from logs: {e}")
+ logger.error("Error getting app types from logs: %s", e)
return []
-
+
def get_log_levels(self) -> List[str]:
"""Get list of all log levels that exist"""
try:
@@ -2100,9 +1914,9 @@ def get_log_levels(self) -> List[str]:
cursor = conn.execute("SELECT DISTINCT level FROM logs ORDER BY level")
return [row[0] for row in cursor.fetchall()]
except Exception as e:
- logger.error(f"Error getting log levels: {e}")
+ logger.error("Error getting log levels: %s", e)
return []
-
+
def clear_logs(self, app_type: str = None):
"""Clear logs for a specific app type or all logs"""
try:
@@ -2111,36 +1925,36 @@ def clear_logs(self, app_type: str = None):
cursor = conn.execute("DELETE FROM logs WHERE app_type = ?", (app_type,))
else:
cursor = conn.execute("DELETE FROM logs")
-
+
deleted_count = cursor.rowcount
conn.commit()
-
- logger.info(f"Cleared {deleted_count} logs" + (f" for {app_type}" if app_type else ""))
+
+ logger.info("Cleared %s logs%s", deleted_count, (" for " + app_type) if app_type else "")
return deleted_count
except Exception as e:
- logger.error(f"Error clearing logs: {e}")
+ logger.error("Error clearing logs: %s", e)
return 0
# Hunt History/Manager Database Methods (logs methods removed - now in LogsDatabase)
- def add_hunt_history_entry(self, app_type: str, instance_name: str, media_id: str,
- processed_info: str, operation_type: str = "missing",
+ def add_hunt_history_entry(self, app_type: str, instance_name: str, media_id: str,
+ processed_info: str, operation_type: str = "missing",
discovered: bool = False, date_time: int = None) -> Dict[str, Any]:
"""Add a new hunt history entry to the database"""
if date_time is None:
date_time = int(time.time())
-
+
date_time_readable = datetime.fromtimestamp(date_time).strftime('%Y-%m-%d %H:%M:%S')
-
+
with self.get_connection() as conn:
cursor = conn.execute('''
- INSERT INTO hunt_history
+ INSERT INTO hunt_history
(app_type, instance_name, media_id, processed_info, operation_type, discovered, date_time, date_time_readable)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
''', (app_type, instance_name, media_id, processed_info, operation_type, discovered, date_time, date_time_readable))
-
+
entry_id = cursor.lastrowid
conn.commit()
-
+
# Return the created entry
entry = {
"id": entry_id,
@@ -2153,39 +1967,39 @@ def add_hunt_history_entry(self, app_type: str, instance_name: str, media_id: st
"date_time": date_time,
"date_time_readable": date_time_readable
}
-
- logger.info(f"Added hunt history entry for {app_type}-{instance_name}: {processed_info}")
+
+ logger.info("Added hunt history entry for %s-%s: %s", app_type, instance_name, processed_info)
return entry
-
- def get_hunt_history(self, app_type: str = None, search_query: str = None,
+
+ def get_hunt_history(self, app_type: str = None, search_query: str = None,
page: int = 1, page_size: int = 20) -> Dict[str, Any]:
"""Get hunt history entries with pagination and filtering"""
with self.get_connection() as conn:
conn.row_factory = sqlite3.Row
-
+
# Build WHERE clause
where_conditions = []
params = []
-
+
if app_type and app_type != "all":
where_conditions.append("app_type = ?")
params.append(app_type)
-
+
if search_query:
where_conditions.append("(processed_info LIKE ? OR media_id LIKE ?)")
params.extend([f"%{search_query}%", f"%{search_query}%"])
-
+
where_clause = "WHERE " + " AND ".join(where_conditions) if where_conditions else ""
-
+
# Get total count
count_query = f"SELECT COUNT(*) FROM hunt_history {where_clause}"
cursor = conn.execute(count_query, params)
total_entries = cursor.fetchone()[0]
-
+
# Calculate pagination
total_pages = max(1, (total_entries + page_size - 1) // page_size)
offset = (page - 1) * page_size
-
+
# Get entries
entries_query = f"""
SELECT * FROM hunt_history {where_clause}
@@ -2193,17 +2007,17 @@ def get_hunt_history(self, app_type: str = None, search_query: str = None,
LIMIT ? OFFSET ?
"""
cursor = conn.execute(entries_query, params + [page_size, offset])
-
+
entries = []
current_time = int(time.time())
-
+
for row in cursor.fetchall():
entry = dict(row)
# Calculate "how long ago"
seconds_ago = current_time - entry["date_time"]
entry["how_long_ago"] = self._format_time_ago(seconds_ago)
entries.append(entry)
-
+
return {
"entries": entries,
"total_entries": total_entries,
@@ -2216,7 +2030,7 @@ def clear_hunt_history(self, app_type: str = None):
with self.get_connection() as conn:
if app_type and app_type != "all":
conn.execute("DELETE FROM hunt_history WHERE app_type = ?", (app_type,))
- logger.info(f"Cleared hunt history for {app_type}")
+ logger.info("Cleared hunt history for %s", app_type)
else:
conn.execute("DELETE FROM hunt_history")
logger.info("Cleared all hunt history")
@@ -2241,14 +2055,14 @@ def save_setup_progress(self, progress_data: dict) -> bool:
try:
with self.get_connection() as conn:
conn.execute("""
- INSERT OR REPLACE INTO general_settings (setting_key, setting_value, setting_type)
+ INSERT OR REPLACE INTO general_settings (setting_key, setting_value, setting_type)
VALUES ('setup_progress', ?, 'json')
""", (json.dumps(progress_data),))
return True
except Exception as e:
- logger.error(f"Failed to save setup progress: {e}")
+ logger.error("Failed to save setup progress: %s", e)
return False
-
+
def get_setup_progress(self) -> dict:
"""Get setup progress data from database"""
try:
@@ -2256,7 +2070,7 @@ def get_setup_progress(self) -> dict:
result = conn.execute(
"SELECT setting_value FROM general_settings WHERE setting_key = 'setup_progress'"
).fetchone()
-
+
if result:
return json.loads(result[0])
else:
@@ -2271,7 +2085,7 @@ def get_setup_progress(self) -> dict:
'timestamp': datetime.now().isoformat()
}
except Exception as e:
- logger.error(f"Failed to get setup progress: {e}")
+ logger.error("Failed to get setup progress: %s", e)
return {
'current_step': 1,
'completed_steps': [],
@@ -2282,7 +2096,7 @@ def get_setup_progress(self) -> dict:
'recovery_key_generated': False,
'timestamp': datetime.now().isoformat()
}
-
+
def clear_setup_progress(self) -> bool:
"""Clear setup progress data from database (called when setup is complete)"""
try:
@@ -2292,9 +2106,9 @@ def clear_setup_progress(self) -> bool:
)
return True
except Exception as e:
- logger.error(f"Failed to clear setup progress: {e}")
+ logger.error("Failed to clear setup progress: %s", e)
return False
-
+
def is_setup_in_progress(self) -> bool:
"""Check if setup is currently in progress"""
try:
@@ -2304,7 +2118,7 @@ def is_setup_in_progress(self) -> bool:
).fetchone()
return result is not None
except Exception as e:
- logger.error(f"Failed to check setup progress: {e}")
+ logger.error("Failed to check setup progress: %s", e)
return False
# Requestarr methods for managing media requests
@@ -2313,28 +2127,28 @@ def is_already_requested(self, tmdb_id: int, media_type: str, app_type: str, ins
try:
with self.get_connection() as conn:
result = conn.execute('''
- SELECT 1 FROM requestarr_requests
+ SELECT 1 FROM requestarr_requests
WHERE tmdb_id = ? AND media_type = ? AND app_type = ? AND instance_name = ?
''', (tmdb_id, media_type, app_type, instance_name)).fetchone()
return result is not None
except Exception as e:
- logger.error(f"Error checking if media already requested: {e}")
+ logger.error("Error checking if media already requested: %s", e)
return False
- def add_request(self, tmdb_id: int, media_type: str, title: str, year: int, overview: str,
+ def add_request(self, tmdb_id: int, media_type: str, title: str, year: int, overview: str,
poster_path: str, backdrop_path: str, app_type: str, instance_name: str) -> bool:
"""Add a new media request to the database"""
try:
with self.get_connection() as conn:
conn.execute('''
- INSERT OR REPLACE INTO requestarr_requests
+ INSERT OR REPLACE INTO requestarr_requests
(tmdb_id, media_type, title, year, overview, poster_path, backdrop_path, app_type, instance_name, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP)
''', (tmdb_id, media_type, title, year, overview, poster_path, backdrop_path, app_type, instance_name))
conn.commit()
return True
except Exception as e:
- logger.error(f"Error adding media request: {e}")
+ logger.error("Error adding media request: %s", e)
return False
def get_requests(self, page: int = 1, page_size: int = 20) -> Dict[str, Any]:
@@ -2342,23 +2156,23 @@ def get_requests(self, page: int = 1, page_size: int = 20) -> Dict[str, Any]:
try:
with self.get_connection() as conn:
conn.row_factory = sqlite3.Row
-
+
# Get total count
total_count = conn.execute('SELECT COUNT(*) FROM requestarr_requests').fetchone()[0]
-
+
# Calculate pagination
offset = (page - 1) * page_size
total_pages = (total_count + page_size - 1) // page_size
-
+
# Get paginated results
results = conn.execute('''
- SELECT tmdb_id, media_type, title, year, overview, poster_path, backdrop_path,
+ SELECT tmdb_id, media_type, title, year, overview, poster_path, backdrop_path,
app_type, instance_name, created_at, updated_at
- FROM requestarr_requests
- ORDER BY created_at DESC
+ FROM requestarr_requests
+ ORDER BY created_at DESC
LIMIT ? OFFSET ?
''', (page_size, offset)).fetchall()
-
+
# Convert to list of dictionaries
requests_list = []
for row in results:
@@ -2375,7 +2189,7 @@ def get_requests(self, page: int = 1, page_size: int = 20) -> Dict[str, Any]:
'created_at': row['created_at'],
'updated_at': row['updated_at']
})
-
+
return {
'requests': requests_list,
'total': total_count,
@@ -2383,9 +2197,9 @@ def get_requests(self, page: int = 1, page_size: int = 20) -> Dict[str, Any]:
'page_size': page_size,
'total_pages': total_pages
}
-
+
except Exception as e:
- logger.error(f"Error getting media requests: {e}")
+ logger.error("Error getting media requests: %s", e)
return {
'requests': [],
'total': 0,
@@ -2397,68 +2211,66 @@ def get_requests(self, page: int = 1, page_size: int = 20) -> Dict[str, Any]:
# Separate LogsDatabase class for logs.db
class LogsDatabase:
"""Separate database class specifically for logs to keep logs.db separate from huntarr.db"""
-
+
def __init__(self):
self.db_path = self._get_logs_database_path()
self.ensure_logs_database_exists()
-
+
def _get_logs_database_path(self) -> Path:
"""Get logs database path - same directory as main database but separate file"""
# Check if running in Docker
config_dir = Path("/config")
if config_dir.exists() and config_dir.is_dir():
return config_dir / "logs.db"
-
+
# Check for Windows config directory
windows_config = os.environ.get("HUNTARR_CONFIG_DIR")
if windows_config:
config_path = Path(windows_config)
config_path.mkdir(parents=True, exist_ok=True)
return config_path / "logs.db"
-
+
# Check for Windows AppData
- import platform
if platform.system() == "Windows":
appdata = os.environ.get("APPDATA", os.path.expanduser("~"))
windows_config_dir = Path(appdata) / "Huntarr"
windows_config_dir.mkdir(parents=True, exist_ok=True)
return windows_config_dir / "logs.db"
-
+
# Local development
project_root = Path(__file__).parent.parent.parent.parent
data_dir = project_root / "data"
data_dir.mkdir(parents=True, exist_ok=True)
return data_dir / "logs.db"
-
+
def _configure_logs_connection(self, conn):
"""Configure SQLite connection optimized for high-volume log writes"""
try:
conn.execute('PRAGMA foreign_keys = ON')
-
+
# WAL mode is particularly beneficial for logs (write-heavy workload)
try:
conn.execute('PRAGMA journal_mode = WAL')
except Exception as wal_error:
- logger.warning(f"WAL mode failed for logs.db, using DELETE mode: {wal_error}")
+ logger.warning("WAL mode failed for logs.db, using DELETE mode: %s", wal_error)
conn.execute('PRAGMA journal_mode = DELETE')
-
+
# Optimized settings for log writing
conn.execute('PRAGMA synchronous = NORMAL') # Balance between speed and safety for logs
conn.execute('PRAGMA cache_size = -16000') # 16MB cache for log operations
conn.execute('PRAGMA temp_store = MEMORY')
conn.execute('PRAGMA busy_timeout = 30000') # 30 seconds for log operations
conn.execute('PRAGMA auto_vacuum = INCREMENTAL')
-
+
# WAL-specific optimizations for logs
result = conn.execute('PRAGMA journal_mode').fetchone()
if result and result[0] == 'wal':
conn.execute('PRAGMA wal_autocheckpoint = 2000') # Less frequent checkpoints for logs
conn.execute('PRAGMA journal_size_limit = 134217728') # 128MB journal size for logs
-
+
except Exception as e:
- logger.error(f"Error configuring logs database connection: {e}")
- pass
-
+ logger.error("Error configuring logs database connection: %s", e)
+
def get_logs_connection(self):
"""Get a configured SQLite connection for logs database"""
try:
@@ -2469,7 +2281,7 @@ def get_logs_connection(self):
return conn
except (sqlite3.DatabaseError, sqlite3.OperationalError) as e:
if "file is not a database" in str(e) or "database disk image is malformed" in str(e):
- logger.error(f"Logs database corruption detected: {e}")
+ logger.error("Logs database corruption detected: %s", e)
self._handle_logs_database_corruption()
# Try connecting again after recovery
conn = sqlite3.connect(self.db_path)
@@ -2477,31 +2289,30 @@ def get_logs_connection(self):
return conn
else:
raise
-
+
def _handle_logs_database_corruption(self):
"""Handle logs database corruption"""
- import time
-
- logger.error(f"Handling logs database corruption for: {self.db_path}")
-
+
+ logger.error("Handling logs database corruption for: %s", self.db_path)
+
try:
if self.db_path.exists():
backup_path = self.db_path.parent / f"logs_corrupted_backup_{int(time.time())}.db"
self.db_path.rename(backup_path)
- logger.warning(f"Corrupted logs database backed up to: {backup_path}")
+ logger.warning("Corrupted logs database backed up to: %s", backup_path)
logger.warning("Starting with fresh logs database - log history will be lost")
-
+
if self.db_path.exists():
self.db_path.unlink()
-
+
except Exception as backup_error:
- logger.error(f"Error during logs database corruption recovery: {backup_error}")
+ logger.error("Error during logs database corruption recovery: %s", backup_error)
try:
if self.db_path.exists():
self.db_path.unlink()
- except:
+ except Exception:
pass
-
+
def ensure_logs_database_exists(self):
"""Create logs database and tables if they don't exist"""
try:
@@ -2518,25 +2329,25 @@ def ensure_logs_database_exists(self):
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
)
''')
-
+
# Create indexes for logs performance
conn.execute('CREATE INDEX IF NOT EXISTS idx_logs_timestamp ON logs(timestamp)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_logs_app_type ON logs(app_type)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_logs_level ON logs(level)')
conn.execute('CREATE INDEX IF NOT EXISTS idx_logs_app_level ON logs(app_type, level)')
-
+
conn.commit()
- logger.info(f"Logs database initialized at: {self.db_path}")
-
+ logger.info("Logs database initialized at: %s", self.db_path)
+
except (sqlite3.DatabaseError, sqlite3.OperationalError) as e:
if "file is not a database" in str(e) or "database disk image is malformed" in str(e):
- logger.error(f"Logs database corruption detected during table creation: {e}")
+ logger.error("Logs database corruption detected during table creation: %s", e)
self._handle_logs_database_corruption()
# Try creating tables again after recovery
self.ensure_logs_database_exists()
else:
raise
-
+
def insert_log(self, timestamp: datetime, level: str, app_type: str, message: str, logger_name: str = None):
"""Insert a log entry into the logs database"""
try:
@@ -2548,73 +2359,73 @@ def insert_log(self, timestamp: datetime, level: str, app_type: str, message: st
conn.commit()
except Exception as e:
# Don't let log insertion failures crash the app
- print(f"Error inserting log: {e}")
-
+ print("Error inserting log: %s", e)
+
def get_logs(self, app_type: str = None, level: str = None, limit: int = 100, offset: int = 0, search: str = None) -> List[Dict[str, Any]]:
"""Get logs with filtering and pagination"""
try:
with self.get_logs_connection() as conn:
conn.row_factory = sqlite3.Row
-
+
where_conditions = []
params = []
-
+
if app_type and app_type != "all":
where_conditions.append("app_type = ?")
params.append(app_type)
-
+
if level and level != "all":
where_conditions.append("level = ?")
params.append(level)
-
+
if search:
where_conditions.append("message LIKE ?")
params.append(f"%{search}%")
-
+
where_clause = "WHERE " + " AND ".join(where_conditions) if where_conditions else ""
-
+
query = f"""
SELECT * FROM logs {where_clause}
ORDER BY timestamp DESC
LIMIT ? OFFSET ?
"""
-
+
cursor = conn.execute(query, params + [limit, offset])
return [dict(row) for row in cursor.fetchall()]
-
+
except Exception as e:
- logger.error(f"Error getting logs: {e}")
+ logger.error("Error getting logs: %s", e)
return []
-
+
def get_log_count(self, app_type: str = None, level: str = None, search: str = None) -> int:
"""Get total count of logs matching filters"""
try:
with self.get_logs_connection() as conn:
where_conditions = []
params = []
-
+
if app_type and app_type != "all":
where_conditions.append("app_type = ?")
params.append(app_type)
-
+
if level and level != "all":
where_conditions.append("level = ?")
params.append(level)
-
+
if search:
where_conditions.append("message LIKE ?")
params.append(f"%{search}%")
-
+
where_clause = "WHERE " + " AND ".join(where_conditions) if where_conditions else ""
-
+
query = f"SELECT COUNT(*) FROM logs {where_clause}"
cursor = conn.execute(query, params)
return cursor.fetchone()[0]
-
+
except Exception as e:
- logger.error(f"Error getting log count: {e}")
+ logger.error("Error getting log count: %s", e)
return 0
-
+
def cleanup_old_logs(self, days_to_keep: int = 30, max_entries_per_app: int = 10000):
"""Clean up old logs to prevent database bloat"""
try:
@@ -2623,38 +2434,38 @@ def cleanup_old_logs(self, days_to_keep: int = 30, max_entries_per_app: int = 10
cutoff_date = datetime.now() - timedelta(days=days_to_keep)
cursor = conn.execute("DELETE FROM logs WHERE timestamp < ?", (cutoff_date,))
deleted_by_age = cursor.rowcount
-
+
# Keep only the most recent entries per app
apps_cursor = conn.execute("SELECT DISTINCT app_type FROM logs")
total_deleted_by_count = 0
-
+
for (app_type,) in apps_cursor.fetchall():
# Get count for this app
count_cursor = conn.execute("SELECT COUNT(*) FROM logs WHERE app_type = ?", (app_type,))
count = count_cursor.fetchone()[0]
-
+
if count > max_entries_per_app:
# Delete oldest entries beyond the limit
excess_count = count - max_entries_per_app
delete_cursor = conn.execute("""
- DELETE FROM logs
- WHERE app_type = ?
+ DELETE FROM logs
+ WHERE app_type = ?
AND id IN (
- SELECT id FROM logs
- WHERE app_type = ?
- ORDER BY timestamp ASC
+ SELECT id FROM logs
+ WHERE app_type = ?
+ ORDER BY timestamp ASC
LIMIT ?
)
""", (app_type, app_type, excess_count))
total_deleted_by_count += delete_cursor.rowcount
-
+
conn.commit()
return deleted_by_age + total_deleted_by_count
-
+
except Exception as e:
- logger.error(f"Error cleaning up logs: {e}")
+ logger.error("Error cleaning up logs: %s", e)
return 0
-
+
def get_app_types_from_logs(self) -> List[str]:
"""Get list of all app types that have logs"""
try:
@@ -2662,9 +2473,9 @@ def get_app_types_from_logs(self) -> List[str]:
cursor = conn.execute("SELECT DISTINCT app_type FROM logs ORDER BY app_type")
return [row[0] for row in cursor.fetchall()]
except Exception as e:
- logger.error(f"Error getting app types from logs: {e}")
+ logger.error("Error getting app types from logs: %s", e)
return []
-
+
def get_log_levels(self) -> List[str]:
"""Get list of all log levels that exist"""
try:
@@ -2672,9 +2483,9 @@ def get_log_levels(self) -> List[str]:
cursor = conn.execute("SELECT DISTINCT level FROM logs ORDER BY level")
return [row[0] for row in cursor.fetchall()]
except Exception as e:
- logger.error(f"Error getting log levels: {e}")
+ logger.error("Error getting log levels: %s", e)
return []
-
+
def clear_logs(self, app_type: str = None):
"""Clear logs for a specific app type or all logs"""
try:
@@ -2683,40 +2494,34 @@ def clear_logs(self, app_type: str = None):
cursor = conn.execute("DELETE FROM logs WHERE app_type = ?", (app_type,))
else:
cursor = conn.execute("DELETE FROM logs")
-
+
deleted_count = cursor.rowcount
conn.commit()
-
- logger.info(f"Cleared {deleted_count} logs" + (f" for {app_type}" if app_type else ""))
+
+ logger.info("Cleared %s logs%s", deleted_count, (" for " + app_type) if app_type else "")
return deleted_count
except Exception as e:
- logger.error(f"Error clearing logs: {e}")
+ logger.error("Error clearing logs: %s", e)
return 0
-# Global database instances
-_database_instance = None
-_logs_database_instance = None
def get_database() -> HuntarrDatabase:
"""Get the global database instance"""
- global _database_instance
- if _database_instance is None:
- _database_instance = HuntarrDatabase()
- return _database_instance
+ if not hasattr(get_database, "db_instance"):
+ get_database.db_instance = HuntarrDatabase()
+ return get_database.db_instance
+
-# Logs Database Functions (consolidated from logs_database.py)
def get_logs_database() -> LogsDatabase:
"""Get the logs database instance for logs operations"""
- global _logs_database_instance
- if _logs_database_instance is None:
- _logs_database_instance = LogsDatabase()
- return _logs_database_instance
+ if not hasattr(get_logs_database, "logs_db_instance"):
+ get_logs_database.logs_db_instance = LogsDatabase()
+ return get_logs_database.logs_db_instance
+
def schedule_log_cleanup():
"""Schedule periodic log cleanup - call this from background tasks"""
- import threading
- import time
-
+
def cleanup_worker():
"""Background worker to clean up logs periodically"""
while True:
@@ -2725,16 +2530,12 @@ def cleanup_worker():
logs_db = get_logs_database()
deleted_count = logs_db.cleanup_old_logs(days_to_keep=30, max_entries_per_app=10000)
if deleted_count > 0:
- logger.info(f"Scheduled cleanup removed {deleted_count} old log entries")
+ logger.info("Scheduled cleanup removed %s old log entries", deleted_count)
except Exception as e:
- logger.error(f"Error in scheduled log cleanup: {e}")
-
+ logger.error("Error in scheduled log cleanup: %s", e)
+
# Start cleanup thread
cleanup_thread = threading.Thread(target=cleanup_worker, daemon=True)
cleanup_thread.start()
logger.info("Scheduled log cleanup thread started")
-# Manager Database Functions (consolidated from manager_database.py)
-def get_manager_database() -> HuntarrDatabase:
- """Get the database instance for manager operations"""
- return get_database()
\ No newline at end of file
diff --git a/src/primary/utils/logger.py b/src/primary/utils/logger.py
index 4ba99016..87d56311 100644
--- a/src/primary/utils/logger.py
+++ b/src/primary/utils/logger.py
@@ -6,16 +6,11 @@
import logging
import sys
-import os
-import pathlib
import time
from typing import Dict, Optional
-# Use the centralized path configuration
from src.primary.utils.config_paths import LOG_DIR
-
-# Log directory is already created by config_paths module
-# LOG_DIR already exists as pathlib.Path object pointing to the correct location
+from src.primary.utils.timezone_utils import get_user_timezone
# Default log file for general messages
MAIN_LOG_FILE = LOG_DIR / "huntarr.log"
@@ -35,40 +30,31 @@
logger: Optional[logging.Logger] = None
app_loggers: Dict[str, logging.Logger] = {}
+
# Custom formatter that uses user's selected timezone instead of UTC
class LocalTimeFormatter(logging.Formatter):
"""Custom formatter that uses user's selected timezone for log timestamps"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.converter = time.localtime # Still use local time as fallback
-
- def _get_user_timezone(self):
- """Get the user's selected timezone from general settings"""
- try:
- from src.primary.utils.timezone_utils import get_user_timezone
- return get_user_timezone()
- except Exception:
- # Final fallback if timezone_utils can't be imported
- import pytz
- return pytz.UTC
-
+
def formatTime(self, record, datefmt=None):
try:
# Try to use user's selected timezone
- user_tz = self._get_user_timezone()
+ user_tz = get_user_timezone()
import datetime
ct = datetime.datetime.fromtimestamp(record.created, tz=user_tz)
-
+
if datefmt:
s = ct.strftime(datefmt)
else:
# Use timezone-aware format
s = ct.strftime("%Y-%m-%d %H:%M:%S")
-
+
# Add timezone information for clarity
timezone_name = str(user_tz)
s += f" {timezone_name}"
-
+
return s
except Exception:
# Fallback to system local time if timezone handling fails
@@ -77,14 +63,15 @@ def formatTime(self, record, datefmt=None):
s = time.strftime(datefmt, ct)
else:
s = time.strftime("%Y-%m-%d %H:%M:%S", ct)
-
+
# Add timezone information to help identify which timezone logs are in
tz_name = time.tzname[time.daylight] if time.daylight else time.tzname[0]
if tz_name:
s += f" {tz_name}"
-
+
return s
+
def setup_main_logger():
"""Set up the main Huntarr logger."""
global logger
@@ -100,7 +87,7 @@ def setup_main_logger():
# Reset handlers to avoid duplicates
current_logger.handlers.clear()
current_logger.setLevel(use_log_level)
-
+
# Prevent propagation to root logger to avoid duplicate messages
current_logger.propagate = False
@@ -127,13 +114,14 @@ def setup_main_logger():
logger = current_logger # Assign to the global variable
return current_logger
+
def get_logger(app_type: str) -> logging.Logger:
"""
Get or create a logger for a specific app type.
-
+
Args:
app_type: The app type (e.g., 'sonarr', 'radarr').
-
+
Returns:
A logger specific to the app type, or the main logger if app_type is invalid.
"""
@@ -151,18 +139,18 @@ def get_logger(app_type: str) -> logging.Logger:
if log_name in app_loggers:
# Return cached logger instance
return app_loggers[log_name]
-
+
# If not cached, set up a new logger for this app type
app_logger = logging.getLogger(log_name)
-
+
# Prevent propagation to the main 'huntarr' logger or root logger
app_logger.propagate = False
-
+
# Always use DEBUG level - let frontend filter what users see
log_level = logging.DEBUG
-
+
app_logger.setLevel(log_level)
-
+
# Reset handlers in case this logger existed before but wasn't cached
# (e.g., across restarts without clearing logging._handlers)
for handler in app_logger.handlers[:]:
@@ -171,30 +159,31 @@ def get_logger(app_type: str) -> logging.Logger:
# Create console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
-
+
# Create file handler for the specific app log file
log_file = APP_LOG_FILES[app_type]
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
-
+
# Set a distinct format for this app log
log_format = f"%(asctime)s - huntarr.{app_type} - %(levelname)s - %(message)s"
formatter = LocalTimeFormatter(log_format, datefmt="%Y-%m-%d %H:%M:%S")
-
+
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
-
+
# Add the handlers specific to this app logger
app_logger.addHandler(console_handler)
app_logger.addHandler(file_handler)
-
+
# Cache the configured logger
app_loggers[log_name] = app_logger
app_logger.debug(f"Debug logging enabled for {app_type} logger")
-
+
return app_logger
+
def update_logging_levels():
"""
Update all logger levels to DEBUG level.
@@ -202,55 +191,57 @@ def update_logging_levels():
"""
# Always use DEBUG level - let frontend filter what users see
level = logging.DEBUG
-
+
# Set level for main logger
if logger:
logger.setLevel(level)
-
+
# Set level for all app loggers
for app_logger in app_loggers.values():
app_logger.setLevel(level)
-
+
print(f"[Logger] Updated all logger levels to {logging.getLevelName(level)}")
+
def refresh_timezone_formatters():
"""
Force refresh of all logger formatters to use updated timezone settings.
This should be called when the timezone setting changes.
"""
print("[Logger] Refreshing timezone formatters for all loggers")
-
+
# Create new formatter with updated timezone handling
log_format = "%(asctime)s - huntarr - %(levelname)s - %(message)s"
new_formatter = LocalTimeFormatter(log_format, datefmt="%Y-%m-%d %H:%M:%S")
-
+
# Update main logger handlers
if logger:
for handler in logger.handlers:
handler.setFormatter(new_formatter)
-
+
# Update all app logger handlers
for app_name, app_logger in app_loggers.items():
app_type = app_name.split('.')[-1] if '.' in app_name else app_name
app_format = f"%(asctime)s - huntarr.{app_type} - %(levelname)s - %(message)s"
app_formatter = LocalTimeFormatter(app_format, datefmt="%Y-%m-%d %H:%M:%S")
-
+
for handler in app_logger.handlers:
handler.setFormatter(app_formatter)
-
+
print("[Logger] Timezone formatters refreshed for all loggers")
+
def debug_log(message: str, data: object = None, app_type: Optional[str] = None) -> None:
"""
Log debug messages with optional data.
-
+
Args:
message: The message to log.
data: Optional data to include with the message.
app_type: Optional app type to log to a specific app's log file.
"""
current_logger = get_logger(app_type) if app_type else logger
-
+
if current_logger.level <= logging.DEBUG:
if data is not None:
try:
@@ -270,4 +261,4 @@ def debug_log(message: str, data: object = None, app_type: Optional[str] = None)
current_logger.debug(f"{message}")
# Initialize the main logger instance when the module is imported
-logger = setup_main_logger()
\ No newline at end of file
+logger = setup_main_logger()
diff --git a/src/primary/utils/timezone_utils.py b/src/primary/utils/timezone_utils.py
index 8bbe51d9..c64206e7 100644
--- a/src/primary/utils/timezone_utils.py
+++ b/src/primary/utils/timezone_utils.py
@@ -5,8 +5,11 @@
"""
import os
+import time
+
import pytz
-from typing import Union
+
+from src.primary.utils.database import get_database
# Cache for timezone to avoid repeated settings lookups
_timezone_cache = None
@@ -24,16 +27,16 @@ def clear_timezone_cache():
def validate_timezone(timezone_str: str) -> bool:
"""
Validate if a timezone string is valid using pytz.
-
+
Args:
timezone_str: The timezone string to validate (e.g., 'Europe/Bucharest')
-
+
Returns:
bool: True if valid, False otherwise
"""
if not timezone_str:
return False
-
+
try:
pytz.timezone(timezone_str)
return True
@@ -46,10 +49,10 @@ def validate_timezone(timezone_str: str) -> bool:
def safe_get_timezone(timezone_name: str) -> pytz.BaseTzInfo:
"""
Safely get a timezone object with validation.
-
+
Args:
timezone_name: The timezone name to get
-
+
Returns:
pytz.BaseTzInfo: The timezone object, or None if invalid
"""
@@ -66,33 +69,29 @@ def safe_get_timezone(timezone_name: str) -> pytz.BaseTzInfo:
def get_user_timezone() -> pytz.BaseTzInfo:
"""
Get the user's selected timezone with proper fallback handling.
-
+
This function is robust and will NEVER crash, even with invalid timezones.
It gracefully handles any timezone string and falls back safely.
-
+
Fallback order:
1. User's timezone setting from general settings
2. TZ environment variable
3. UTC as final fallback
-
+
Returns:
pytz.BaseTzInfo: The timezone object to use (always valid)
"""
global _timezone_cache, _cache_timestamp
-
+
# Check cache first
- import time
current_time = time.time()
if _timezone_cache and (current_time - _cache_timestamp) < _cache_ttl:
return _timezone_cache
-
+
try:
# First try to get timezone from user settings
try:
- from src.primary import settings_manager
- general_settings = settings_manager.load_settings("general", use_cache=False) # Force fresh read
- timezone_name = general_settings.get("timezone")
-
+ timezone_name = get_database().get_general_setting("timezone")
if timezone_name and timezone_name != "UTC":
tz = safe_get_timezone(timezone_name)
if tz:
@@ -102,7 +101,7 @@ def get_user_timezone() -> pytz.BaseTzInfo:
return tz
except Exception:
pass # Fall through to TZ environment variable
-
+
# Second try TZ environment variable
tz_env = os.environ.get('TZ')
if tz_env:
@@ -112,30 +111,16 @@ def get_user_timezone() -> pytz.BaseTzInfo:
_timezone_cache = tz
_cache_timestamp = current_time
return tz
-
+
# Final fallback to UTC
tz = pytz.UTC
_timezone_cache = tz
_cache_timestamp = current_time
return tz
-
+
except Exception:
# Ultimate fallback if everything fails
tz = pytz.UTC
_timezone_cache = tz
_cache_timestamp = current_time
return tz
-
-
-def get_timezone_name() -> str:
- """
- Get the timezone name as a string.
-
- Returns:
- str: The timezone name (e.g., 'Pacific/Honolulu', 'UTC')
- """
- try:
- timezone = get_user_timezone()
- return str(timezone)
- except Exception:
- return "UTC"
\ No newline at end of file
diff --git a/src/primary/web_server.py b/src/primary/web_server.py
index 10b81164..b4cf3d5f 100644
--- a/src/primary/web_server.py
+++ b/src/primary/web_server.py
@@ -4,60 +4,47 @@
Provides a web interface to view logs in real-time, manage settings, and includes authentication
"""
+import importlib
+import json
+import logging
import os
-import datetime
-import time
+import platform
+import sys
from threading import Lock
-from primary.utils.logger import LOG_DIR, APP_LOG_FILES, MAIN_LOG_FILE # Import log constants
-from primary import settings_manager # Import settings_manager
-from src.primary.stateful_manager import update_lock_expiration # Import stateful update function
-# import socket # No longer used
-import json
-# import signal # No longer used for reload
-import sys
-import qrcode
-import pyotp
-import logging
-import threading
-import importlib # Added import
import requests
-from flask import Flask, render_template, request, jsonify, Response, send_from_directory, redirect, url_for, session, stream_with_context, Blueprint, current_app, g, make_response # Added stream_with_context and Blueprint
-# from src.primary.config import API_URL # No longer needed directly
-# Use only settings_manager
-from src.primary import settings_manager
-from src.primary.utils.logger import setup_main_logger, get_logger, LOG_DIR, update_logging_levels # Import get_logger, LOG_DIR, and update_logging_levels
-# Clean logging is now database-only
-from src.primary.auth import (
- authenticate_request, user_exists, create_user, verify_user, create_session,
- logout, SESSION_COOKIE_NAME, is_2fa_enabled, generate_2fa_secret,
- verify_2fa_code, disable_2fa, change_username, change_password,
- create_plex_pin, check_plex_pin, verify_plex_token, create_user_with_plex,
- link_plex_account, verify_plex_user
+from flask import (
+ Flask,
+ render_template,
+ request,
+ jsonify,
+ redirect,
+ current_app,
)
-# Import blueprint for common routes
+
+from src.primary import settings_manager
+from src.primary.stateful_manager import initialize_state_management
+from src.primary.utils.logger import get_logger, LOG_DIR, update_logging_levels
+from src.primary.auth import authenticate_request
from src.primary.routes.common import common_bp
from src.primary.routes.plex_auth_routes import plex_auth_bp
-# Import blueprints for each app from the centralized blueprints module
-from src.primary.apps.blueprints import sonarr_bp, radarr_bp, lidarr_bp, readarr_bp, whisparr_bp, eros_bp, swaparr_bp, requestarr_bp, prowlarr_bp
-
-# Import stateful blueprint
-from src.primary.stateful_routes import stateful_api
-
-# Import history blueprint
+from src.primary.apps.blueprints import (
+ sonarr_bp,
+ radarr_bp,
+ lidarr_bp,
+ readarr_bp,
+ whisparr_bp,
+ eros_bp,
+ swaparr_bp,
+ requestarr_bp,
+ prowlarr_bp,
+)
from src.primary.routes.history_routes import history_blueprint
-
-# Import scheduler blueprint
from src.primary.routes.scheduler_routes import scheduler_api
-
-# Import backup blueprint
-from src.routes.backup_routes import backup_bp
-
-# Import log routes blueprint
from src.primary.routes.log_routes import log_routes_bp
-
-# Import background module to trigger manual cycle resets
-from src.primary import background
+from src.primary.stateful_routes import stateful_api
+from src.primary.utils.database import get_database
+from src.routes.backup_routes import backup_bp
# Disable Flask default logging
log = logging.getLogger('werkzeug')
@@ -74,7 +61,7 @@
os.path.join(base_path, 'frontend', 'templates'), # Alternate structure
os.path.join(os.path.dirname(base_path), 'Resources', 'frontend', 'templates') # Mac app bundle with different path
]
-
+
# Find the first existing templates directory
template_dir = None
for candidate in template_candidates:
@@ -88,7 +75,7 @@
break
else:
print(f"Warning: setup.html not found in {template_dir}")
-
+
# Similar approach for static files
static_candidates = [
os.path.join(base_path, 'static'),
@@ -96,7 +83,7 @@
os.path.join(base_path, 'frontend', 'static'),
os.path.join(os.path.dirname(base_path), 'Resources', 'frontend', 'static')
]
-
+
# Find the first existing static directory
static_dir = None
for candidate in static_candidates:
@@ -105,16 +92,16 @@
static_dir = candidate_path
print(f"Found valid static directory: {static_dir}")
break
-
+
# If no valid directories found, use defaults
if not template_dir:
template_dir = os.path.join(base_path, 'templates')
print(f"Warning: Using default template dir: {template_dir}")
-
+
if not static_dir:
static_dir = os.path.join(base_path, 'static')
print(f"Warning: Using default static dir: {static_dir}")
-
+
print(f"PyInstaller mode - Using template dir: {template_dir}")
print(f"PyInstaller mode - Using static dir: {static_dir}")
print(f"Template dir exists: {os.path.exists(template_dir)}")
@@ -130,12 +117,13 @@
if os.path.exists(template_dir):
print(f"Template dir contents: {os.listdir(template_dir)}")
+
# Get base_url from settings (used for reverse proxy subpath configurations)
def get_base_url():
"""
Get the configured base URL from general settings.
This allows Huntarr to run under a subpath like /huntarr when behind a reverse proxy.
-
+
Returns:
str: The configured base URL (e.g., '/huntarr') or empty string if not configured
"""
@@ -156,7 +144,7 @@ def get_base_url():
base_url = ''
# Check for Windows platform and integrate Windows-specific helpers
-import platform
+
if platform.system() == "Windows":
# Import Windows integration module for startup support
try:
@@ -167,8 +155,8 @@ def get_base_url():
print(f"Error integrating Windows helpers: {e}")
# Create Flask app with additional debug logging
-app = Flask(__name__,
- template_folder=template_dir,
+app = Flask(__name__,
+ template_folder=template_dir,
static_folder=static_dir,
static_url_path='/static')
@@ -215,7 +203,7 @@ def debug_template_rendering():
"""Additional logging for Flask template rendering"""
app.jinja_env.auto_reload = True
orig_get_source = app.jinja_env.loader.get_source
-
+
def get_source_wrapper(environment, template):
try:
result = orig_get_source(environment, template)
@@ -231,14 +219,14 @@ def get_source_wrapper(environment, template):
print(f"Using alternative loader: {type(environment.loader).__name__}")
except Exception as loader_err:
print(f"Could not get loader info: {loader_err}")
-
+
# Print all available templates
try:
all_templates = environment.loader.list_templates()
print(f"Available templates: {all_templates}")
except Exception as templates_err:
print(f"Could not list available templates: {templates_err}")
-
+
# Add debug info for ARM application
if getattr(sys, 'frozen', False):
print("Running as a PyInstaller bundle")
@@ -257,9 +245,9 @@ def get_source_wrapper(environment, template):
except Exception as path_err:
print(f"Error checking paths: {path_err}")
raise
-
+
app.jinja_env.loader.get_source = get_source_wrapper
-
+
debug_template_rendering()
app.secret_key = os.environ.get('SECRET_KEY', 'dev_key_for_sessions')
@@ -291,12 +279,9 @@ def inject_base_url():
"""Add base_url to template context for use in templates"""
return {'base_url': base_url}
-# Removed MAIN_PID and signal-related code
-
# Lock for accessing the log files
log_lock = Lock()
-# Log files are now handled by database-only logging system
# Handle both root path and base URL root path
@app.route('/')
@@ -304,260 +289,12 @@ def home():
"""Render the main index page"""
return render_template('index.html')
+
@app.route('/user')
def user():
"""Redirect to main index with user section"""
return redirect('./#user')
-
-# This section previously contained code for redirecting paths to include the base URL
-# It has been removed as Flask's APPLICATION_ROOT setting provides this functionality
-
-# Removed /settings and /logs routes if handled by index.html and JS routing
-# Keep /logs if it's the actual SSE endpoint
-
-# Old file-based logs route removed - using database-based logs now
- """
- Event stream for logs.
- Filter logs by app type using the 'app' query parameter.
- Supports 'all', 'system', 'sonarr', 'radarr', 'lidarr', 'readarr'.
- Example: /logs?app=sonarr
- """
- app_type = request.args.get('app', 'all') # Default to 'all' if no app specified
- web_logger = get_logger("web_server")
-
- valid_app_types = list(KNOWN_LOG_FILES.keys()) + ['all']
- if app_type not in valid_app_types:
- web_logger.warning(f"Invalid app type '{app_type}' requested for logs. Defaulting to 'all'.")
- app_type = 'all'
-
- # Import needed modules
- import time
- from pathlib import Path
- import threading
- import datetime # Added datetime import
- import time # Add time module import
-
- # Use a client identifier to track connections
- # Use request.remote_addr directly for client_id
- client_id = request.remote_addr
- current_time_str = datetime.datetime.now().strftime("%H:%M:%S") # Renamed variable
-
- web_logger.debug(f"Starting log stream for app type: {app_type} (client: {client_id}, time: {current_time_str})")
-
- # Track active connections to limit resource usage
- if not hasattr(app, 'active_log_streams'):
- app.active_log_streams = {}
- app.log_stream_lock = threading.Lock()
-
- # Clean up stale connections (older than 60 seconds without activity)
- with app.log_stream_lock:
- current_time = time.time()
- stale_clients = [c for c, t in app.active_log_streams.items()
- if current_time - t > 60]
- for client in stale_clients:
- # Check if client exists before popping, avoid KeyError
- if client in app.active_log_streams:
- app.active_log_streams.pop(client)
- web_logger.debug(f"Removed stale log stream connection for client: {client}")
-
- # If too many connections, return an error for new ones
- # Increased limit slightly and check before adding the new client
- MAX_LOG_CONNECTIONS = 10 # Define as constant
- if len(app.active_log_streams) >= MAX_LOG_CONNECTIONS:
- web_logger.warning(f"Too many log stream connections ({len(app.active_log_streams)}). Rejecting new connection from {client_id}")
- # Send SSE formatted error message
- return Response("event: error\ndata: Too many active connections. Please try again later.\n\n",
- mimetype='text/event-stream', status=429) # Use 429 status code
-
- # Add/Update this client's timestamp *after* checking the limit
- app.active_log_streams[client_id] = current_time
- web_logger.debug(f"Active log streams: {len(app.active_log_streams)} clients. Added/Updated: {client_id}")
-
-
- def generate():
- """Generate log events for the SSE stream."""
- client_ip = request.remote_addr
- web_logger.debug(f"Log stream generator started for {app_type} (Client: {client_ip})")
- try:
- # Initialize last activity time
- last_activity = time.time()
-
- # Determine which log files to follow
- log_files_to_follow = []
- if app_type == 'all':
- log_files_to_follow = list(KNOWN_LOG_FILES.items())
- elif app_type == 'system':
- # For system, only follow main log
- system_log = KNOWN_LOG_FILES.get('system')
- if system_log:
- log_files_to_follow = [('system', system_log)]
- web_logger.debug(f"Following system log: {system_log}")
- else:
- # For specific app, follow that app's log
- app_log = KNOWN_LOG_FILES.get(app_type)
- if app_log:
- log_files_to_follow = [(app_type, app_log)]
- web_logger.debug(f"Following {app_type} log: {app_log}")
-
- # Also include system log for related messages
- system_log = KNOWN_LOG_FILES.get('system')
- if system_log:
- log_files_to_follow.append(('system', system_log))
- web_logger.debug(f"Also following system log for {app_type} messages")
-
- if not log_files_to_follow:
- web_logger.warning(f"No log files found for app type: {app_type}")
- yield f"data: No logs available for {app_type}\n\n"
- return
-
- # Send confirmation
- yield f"data: Starting log stream for {app_type}...\n\n"
- web_logger.debug(f"Sent confirmation for {app_type} (Client: {client_ip})")
-
- # Track file positions
- positions = {}
- last_check = {}
- keep_alive_counter = 0
-
- # Convert to Path objects
- log_files_to_follow = [(name, Path(path) if isinstance(path, str) else path)
- for name, path in log_files_to_follow if path]
-
- # Main streaming loop
- while True:
- had_content = False
- current_time = time.time()
-
- # Update client activity
- if current_time - last_activity > 10:
- with app.log_stream_lock:
- if client_id in app.active_log_streams:
- app.active_log_streams[client_id] = current_time
- else:
- web_logger.debug(f"Client {client_id} gone. Stopping generator.")
- break
- last_activity = current_time
- keep_alive_counter += 1
-
- # Check each file
- for name, path in log_files_to_follow:
- try:
- # Limit check frequency
- now = datetime.datetime.now()
- if name in last_check and (now - last_check[name]).total_seconds() < 0.2:
- continue
-
- last_check[name] = now
-
- # Check file exists
- if not path.exists():
- if positions.get(name) != -1:
- web_logger.warning(f"Log file {path} not found. Skipping.")
- positions[name] = -1
- continue
- elif positions.get(name) == -1:
- web_logger.info(f"Log file {path} found again. Resuming.")
- positions.pop(name, None)
-
- # Get size
- try:
- current_size = path.stat().st_size
- except FileNotFoundError:
- web_logger.warning(f"Log file {path} disappeared. Skipping.")
- positions[name] = -1
- continue
-
- # Init position or handle truncation
- if name not in positions or current_size < positions.get(name, 0):
- start_pos = max(0, current_size - 5120)
- web_logger.debug(f"Init position for {name}: {start_pos}")
- positions[name] = start_pos
-
- # Read content
- try:
- with open(path, 'r', encoding='utf-8', errors='ignore') as f:
- f.seek(positions[name])
- new_lines = []
- lines_read = 0
- max_lines = 100
-
- while lines_read < max_lines:
- line = f.readline()
- if not line:
- break
-
- # Only filter when reading system log for specific app tab
- if app_type != 'all' and app_type != 'system' and name == 'system':
- # MODIFIED: Don't include system logs in app tabs at all
- include_line = False
- else:
- include_line = True
-
- if include_line:
- new_lines.append(line)
-
- lines_read += 1
-
- # Process collected lines
- if new_lines:
- had_content = True
- positions[name] = f.tell()
- for line in new_lines:
- stripped = line.strip()
- if stripped:
- prefix = f"[{name.lower()}] " if app_type == 'all' else ""
- yield f"data: {prefix}{stripped}\n\n"
-
- except FileNotFoundError:
- web_logger.warning(f"Log file {path} disappeared during read.")
- positions[name] = -1
- except Exception as e:
- web_logger.error(f"Error reading {path}: {e}")
- yield f"data: ERROR: Problem reading log: {str(e)}\n\n"
-
- except Exception as e:
- web_logger.error(f"Error processing {name}: {e}")
- yield f"data: ERROR: Unexpected issue with log.\n\n"
-
- # Keep-alive or sleep
- if not had_content:
- if keep_alive_counter >= 75:
- yield f": keepalive {time.time()}\n\n"
- keep_alive_counter = 0
- time.sleep(0.2)
- else:
- keep_alive_counter = 0
- time.sleep(0.05)
-
- except GeneratorExit:
- # Clean up when client disconnects
- web_logger.debug(f"Client {client_id} disconnected from log stream for {app_type}. Cleaning up.")
- except Exception as e:
- web_logger.error(f"Unhandled error in log stream generator for {app_type} (Client: {client_ip}): {e}", exc_info=True)
- try:
- # Ensure error message is properly formatted for SSE
- yield f"event: error\ndata: ERROR: Log streaming failed unexpectedly: {str(e)}\n\n"
- except Exception as yield_err:
- web_logger.error(f"Error yielding final error message to client {client_id}: {yield_err}")
- finally:
- # Ensure cleanup happens regardless of how the generator exits
- with app.log_stream_lock:
- removed_client = app.active_log_streams.pop(client_id, None)
- if removed_client:
- web_logger.debug(f"Successfully removed client {client_id} from active log streams.")
- else:
- web_logger.debug(f"Client {client_id} was already removed from active log streams before finally block.")
- web_logger.debug(f"Log stream generator finished for {app_type} (Client: {client_id})")
-
- # Return the SSE response with appropriate headers for better streaming
- response = Response(stream_with_context(generate()), mimetype='text/event-stream') # Use stream_with_context
- response.headers['Cache-Control'] = 'no-cache'
- response.headers['X-Accel-Buffering'] = 'no' # Disable nginx buffering if using nginx
- return response
-
-# Legacy file-based logs route removed - now using database-based log routes in log_routes.py
-# The frontend should use /api/logs endpoints instead
@app.route('/api/settings', methods=['GET'])
def api_settings():
@@ -566,25 +303,26 @@ def api_settings():
all_settings = settings_manager.get_all_settings() # Corrected function name
return jsonify(all_settings)
+
@app.route('/api/settings/general', methods=['POST'])
def save_general_settings():
general_logger = get_logger("web_server")
general_logger.info("Received request to save general settings.")
-
+
# Make sure we have data
if not request.is_json:
return jsonify({"success": False, "error": "Expected JSON data"}), 400
-
+
data = request.json
-
+
# Debug: Log the incoming data to see if timezone is present
general_logger.debug(f"Received general settings data: {data}")
if 'timezone' in data:
general_logger.info(f"Timezone setting found: {data.get('timezone')}")
-
+
# Ensure auth_mode and bypass flags are consistent
auth_mode = data.get('auth_mode')
-
+
# If auth_mode is explicitly set, ensure the bypass flags match it
if auth_mode:
if auth_mode == 'local_bypass':
@@ -596,7 +334,7 @@ def save_general_settings():
elif auth_mode == 'login':
data['local_access_bypass'] = False
data['proxy_auth_bypass'] = False
-
+
# Handle timezone changes automatically with validation
timezone_changed = False
if 'timezone' in data:
@@ -604,21 +342,21 @@ def save_general_settings():
current_settings = settings_manager.load_settings('general')
current_timezone = current_settings.get('timezone', 'UTC')
new_timezone = data.get('timezone', 'UTC')
-
+
# Validate the new timezone
safe_timezone = settings_manager.get_safe_timezone(new_timezone)
if safe_timezone != new_timezone:
general_logger.warning(f"Invalid timezone '{new_timezone}' provided, using '{safe_timezone}' instead")
data['timezone'] = safe_timezone # Update the data to save the safe timezone
new_timezone = safe_timezone
-
+
if current_timezone != new_timezone:
timezone_changed = True
general_logger.info(f"Timezone changed from {current_timezone} to {new_timezone}")
-
+
# Save general settings
success = settings_manager.save_settings('general', data)
-
+
if success:
# Apply timezone change if needed
if timezone_changed:
@@ -639,36 +377,25 @@ def save_general_settings():
except Exception as e:
general_logger.error(f"Error applying timezone: {e}")
# Continue anyway - settings were still saved
-
- # Update expiration timing from general settings if applicable
- try:
- new_hours = int(data.get('stateful_management_hours'))
- if new_hours > 0:
- general_logger.info(f"Updating stateful expiration to {new_hours} hours.")
- update_lock_expiration(hours=new_hours)
- except (ValueError, TypeError, KeyError):
- # Don't update if the value wasn't provided or is invalid
- pass
- except Exception as e:
- general_logger.error(f"Error updating expiration timing: {e}")
-
+
# Update logging levels immediately when general settings are changed
update_logging_levels()
-
+
# Return all settings
return jsonify(settings_manager.get_all_settings())
else:
return jsonify({"success": False, "error": "Failed to save general settings"}), 500
+
@app.route('/api/test-notification', methods=['POST'])
def test_notification():
"""Test notification endpoint with enhanced Windows debugging"""
import platform
web_logger = get_logger("web_server")
-
+
try:
from src.primary.notification_manager import send_notification, get_notification_config, apprise_import_error
-
+
# Enhanced debugging for Windows issues
system_info = {
"platform": platform.system(),
@@ -676,9 +403,9 @@ def test_notification():
"python_version": platform.python_version(),
"apprise_available": apprise_import_error is None
}
-
+
web_logger.info(f"Test notification requested on {system_info}")
-
+
# Check for Apprise import issues first (common Windows problem)
if apprise_import_error:
error_msg = f"Apprise library not available: {apprise_import_error}"
@@ -686,22 +413,22 @@ def test_notification():
error_msg += " (Common on Windows - try: pip install apprise)"
web_logger.error(error_msg)
return jsonify({
- "success": False,
+ "success": False,
"error": error_msg,
"system_info": system_info
}), 500, {'Content-Type': 'application/json'}
-
+
# Get the user's configured notification level
config = get_notification_config()
user_level = config.get('level', 'info')
-
+
# Send a test notification using the user's configured level
success = send_notification(
title="๐งช Huntarr Test Notification",
message="This is a test notification to verify your Apprise configuration is working correctly! If you see this, your notifications are set up properly. ๐",
level=user_level
)
-
+
if success:
web_logger.info(f"Test notification sent successfully on {platform.system()}")
return jsonify({"success": True, "message": "Test notification sent successfully!"}), 200, {'Content-Type': 'application/json'}
@@ -711,16 +438,16 @@ def test_notification():
error_msg += " On Windows, ensure Apprise is properly installed and all dependencies are available."
web_logger.warning(f"Test notification failed: {error_msg}")
return jsonify({
- "success": False,
+ "success": False,
"error": error_msg,
"system_info": system_info
}), 500, {'Content-Type': 'application/json'}
-
+
except Exception as e:
error_msg = f"Error sending test notification: {str(e)}"
web_logger.error(f"{error_msg} | System: {platform.system()}")
return jsonify({
- "success": False,
+ "success": False,
"error": error_msg,
"system_info": {
"platform": platform.system(),
@@ -728,27 +455,28 @@ def test_notification():
}
}), 500, {'Content-Type': 'application/json'}
+
@app.route('/api/settings/', methods=['GET', 'POST'])
def handle_app_settings(app_name):
web_logger = get_logger("web_server")
-
+
# Validate app_name
if app_name not in settings_manager.KNOWN_APP_TYPES:
return jsonify({"success": False, "error": f"Unknown application type: {app_name}"}), 400
-
+
if request.method == 'GET':
# Return settings for the specific app
app_settings = settings_manager.load_settings(app_name)
return jsonify(app_settings)
-
+
elif request.method == 'POST':
# Make sure we have data
if not request.is_json:
return jsonify({"success": False, "error": "Expected JSON data"}), 400
-
+
data = request.json
# Auto-save request received - debug spam removed
-
+
# Clean URLs in the data before saving
if 'instances' in data and isinstance(data['instances'], list):
for instance in data['instances']:
@@ -758,12 +486,13 @@ def handle_app_settings(app_name):
elif 'api_url' in data and data['api_url']:
# For apps that don't use instances array
data['api_url'] = data['api_url'].strip().rstrip('/').rstrip('\\')
-
- # Settings cleaned - debug spam removed
-
+
# Save the app settings
success = settings_manager.save_settings(app_name, data)
-
+
+ # Initialize state management for any new instances configured
+ initialize_state_management()
+
if success:
# Auto-save enabled - no need to log every successful save
return jsonify({"success": True})
@@ -771,6 +500,7 @@ def handle_app_settings(app_name):
web_logger.error(f"Failed to save {app_name} settings")
return jsonify({"success": False, "error": f"Failed to save {app_name} settings"}), 500
+
@app.route('/api/settings/theme', methods=['GET', 'POST'])
def api_theme():
# Theme settings are handled separately, stored in database
@@ -783,6 +513,7 @@ def api_theme():
success = settings_manager.update_setting("ui", "dark_mode", dark_mode)
return jsonify({"success": success})
+
@app.route('/api/settings/reset', methods=['POST'])
def api_reset_settings():
data = request.json
@@ -809,6 +540,7 @@ def api_reset_settings():
else:
return jsonify({"success": False, "error": f"Failed to save reset settings for {app_name}"}), 500
+
@app.route('/api/app-settings', methods=['GET'])
def api_app_settings():
app_type = request.args.get('app')
@@ -822,6 +554,7 @@ def api_app_settings():
api_details = {"api_url": api_url, "api_key": api_key}
return jsonify({"success": True, **api_details})
+
@app.route('/api/configured-apps', methods=['GET'])
def api_configured_apps():
# Return the configured status of all apps using the updated settings_manager function
@@ -830,22 +563,22 @@ def api_configured_apps():
configured_status = {app: (app in configured_apps_list) for app in settings_manager.KNOWN_APP_TYPES}
return jsonify(configured_status)
-# --- Add Status Endpoint --- #
+
@app.route('/api/status/', methods=['GET'])
def api_app_status(app_name):
"""Check connection status for a specific app."""
web_logger = get_logger("web_server")
response_data = {"configured": False, "connected": False} # Default for non-Sonarr apps
status_code = 200
-
+
# First validate the app name
if app_name not in settings_manager.KNOWN_APP_TYPES:
web_logger.warning(f"Status check requested for invalid app name: {app_name}")
return jsonify({"configured": False, "connected": False, "error": "Invalid app name"}), 400
-
+
try:
if app_name in ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros']:
- # --- Multi-Instance Status Check --- #
+ # --- Multi-Instance Status Check --- #
connected_count = 0
total_configured = 0
try:
@@ -853,13 +586,13 @@ def api_app_status(app_name):
module_name = f'src.primary.apps.{app_name}'
instances_module = importlib.import_module(module_name)
api_module = importlib.import_module(f'{module_name}.api')
-
+
if hasattr(instances_module, 'get_configured_instances'):
get_instances_func = getattr(instances_module, 'get_configured_instances')
instances = get_instances_func()
total_configured = len(instances)
api_timeout = settings_manager.get_setting(app_name, "api_timeout", 10) # Get global timeout
-
+
if total_configured > 0:
web_logger.debug(f"Checking connection for {total_configured} {app_name.capitalize()} instances...")
if hasattr(api_module, 'check_connection'):
@@ -879,7 +612,7 @@ def api_app_status(app_name):
web_logger.error(f"Error checking connection for {app_name.capitalize()} instance '{inst_name}': {str(e)}")
else:
web_logger.warning(f"check_connection function not found in {app_name} API module")
-
+
# Prepare multi-instance response
response_data = {"total_configured": total_configured, "connected_count": connected_count}
except Exception as e:
@@ -890,7 +623,7 @@ def api_app_status(app_name):
web_logger.error(f"Error during {app_name} multi-instance status check: {e}", exc_info=True)
response_data = {"total_configured": total_configured, "connected_count": connected_count, "error": "Check Error"}
status_code = 500
-
+
else:
# --- Legacy/Single Instance Status Check (for other apps) --- #
api_url = settings_manager.get_api_url(app_name)
@@ -903,7 +636,7 @@ def api_app_status(app_name):
try:
module_path = f'src.primary.apps.{app_name}.api'
api_module = importlib.import_module(module_path)
-
+
if hasattr(api_module, 'check_connection'):
check_connection_func = getattr(api_module, 'check_connection')
# Use a short timeout to prevent long waits
@@ -916,19 +649,18 @@ def api_app_status(app_name):
except Exception as e:
web_logger.error(f"Error checking connection for {app_name}: {str(e)}")
is_connected = False # Ensure connection is false on check error
-
+
# Prepare legacy response format
response_data = {"configured": is_configured, "connected": is_connected}
-
+
return jsonify(response_data), status_code
-
+
except Exception as e:
web_logger.error(f"Unexpected error in status check for {app_name}: {str(e)}", exc_info=True)
# Return a valid response even on error to prevent UI issues
return jsonify({"configured": False, "connected": False, "error": "Internal error"}), 200
-
@app.route('/api/settings/apply-timezone', methods=['POST'])
def apply_timezone_setting():
"""Apply timezone setting to the container."""
@@ -938,22 +670,23 @@ def apply_timezone_setting():
if not timezone:
return jsonify({"success": False, "error": "No timezone specified"}), 400
-
+
web_logger.info(f"Applying timezone setting: {timezone}")
-
+
# Save the timezone to general settings
general_settings = settings_manager.load_settings("general")
general_settings["timezone"] = timezone
settings_manager.save_settings("general", general_settings)
-
+
# Apply the timezone to the container
success = settings_manager.apply_timezone(timezone)
-
+
if success:
return jsonify({"success": True, "message": f"Timezone set to {timezone}. Container restart may be required for full effect."})
else:
return jsonify({"success": False, "error": f"Failed to apply timezone {timezone}"}), 500
+
@app.route('/api/hourly-caps', methods=['GET'])
def api_get_hourly_caps():
"""Get hourly API usage caps for each app"""
@@ -961,20 +694,20 @@ def api_get_hourly_caps():
# Import necessary functions
from src.primary.stats_manager import load_hourly_caps
from src.primary.settings_manager import load_settings
-
+
# Get the logger
web_logger = get_logger("web_server")
-
+
# Load the current hourly caps
caps = load_hourly_caps()
-
+
# Get app-specific hourly cap limits
app_limits = {}
apps = ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros']
for app in apps:
app_settings = load_settings(app)
app_limits[app] = app_settings.get('hourly_cap', 20) # Default to 20 if not set
-
+
return jsonify({
"success": True,
"caps": caps,
@@ -988,36 +721,38 @@ def api_get_hourly_caps():
"message": "Error retrieving hourly API caps."
}), 500
+
@app.route('/api/stats/reset_public', methods=['POST'])
def api_reset_stats_public():
"""Reset the media statistics for all apps or a specific app - public endpoint without auth"""
try:
data = request.json or {}
app_type = data.get('app_type')
-
+
# Get logger for logging the reset action
web_logger = get_logger("web_server")
-
+
# Import the reset_stats function
from src.primary.stats_manager import reset_stats
-
+
if app_type:
web_logger.info(f"Resetting statistics for app (public): {app_type}")
reset_success = reset_stats(app_type)
else:
web_logger.info("Resetting all media statistics (public)")
reset_success = reset_stats(None)
-
+
if reset_success:
return jsonify({"success": True, "message": "Statistics reset successfully"}), 200
else:
return jsonify({"success": False, "error": "Failed to reset statistics"}), 500
-
+
except Exception as e:
web_logger = get_logger("web_server")
web_logger.error(f"Error resetting statistics (public): {str(e)}")
return jsonify({"success": False, "error": str(e)}), 500
+
@app.route('/version.txt')
def version_txt():
"""Serve version from database"""
@@ -1031,6 +766,7 @@ def version_txt():
web_logger.error(f"Error serving version from database: {e}")
return "N/A", 200, {'Content-Type': 'text/plain', 'Cache-Control': 'no-cache'}
+
@app.route('/api/cycle/status', methods=['GET'])
def api_get_all_cycle_status():
"""API endpoint to get cycle status for all apps."""
@@ -1043,6 +779,7 @@ def api_get_all_cycle_status():
web_logger.error(f"Error getting cycle status: {e}")
return jsonify({"error": "Failed to retrieve cycle status information."}), 500
+
@app.route('/api/cycle/status/', methods=['GET'])
def api_get_app_cycle_status(app_name):
"""API endpoint to get cycle status for a specific app."""
@@ -1055,28 +792,29 @@ def api_get_app_cycle_status(app_name):
web_logger.error(f"Error getting cycle status for {app_name}: {e}")
return jsonify({"error": f"Failed to retrieve cycle status for {app_name}."}), 500
+
@app.route('/api/cycle/reset/', methods=['POST'])
def reset_app_cycle(app_name):
"""
Manually trigger a reset of the cycle for a specific app.
-
+
Args:
app_name: The name of the app (sonarr, radarr, lidarr, readarr, etc.)
-
+
Returns:
JSON response with success/error status
"""
# Make sure to initialize web_logger if it's not available in this scope
web_logger = get_logger("web_server")
web_logger.info(f"Manual cycle reset requested for {app_name} via API")
-
+
# Check if app name is valid
if app_name not in ['sonarr', 'radarr', 'lidarr', 'readarr', 'whisparr', 'eros', 'swaparr']:
return jsonify({
'success': False,
'error': f"Invalid app name: {app_name}"
}), 400
-
+
# Check if the app is configured (special handling for Swaparr)
if app_name == 'swaparr':
# For Swaparr, check if it's enabled in settings
@@ -1095,14 +833,14 @@ def reset_app_cycle(app_name):
'success': False,
'error': f"{app_name} is not configured"
}), 400
-
+
try:
# Trigger cycle reset using database
from src.primary.utils.database import get_database
-
+
db = get_database()
success = db.create_reset_request(app_name)
-
+
if success:
web_logger.info(f"Created reset request for {app_name}")
else:
@@ -1122,6 +860,7 @@ def reset_app_cycle(app_name):
'error': f"Failed to reset cycle for {app_name}. The app may not be running."
}), 500
+
# Docker health check endpoint
@app.route('/ping', methods=['GET'])
def health_check():
@@ -1134,6 +873,7 @@ def health_check():
logger.debug("Health check endpoint accessed")
return jsonify({"status": "OK"})
+
@app.route('/api/health', methods=['GET'])
def api_health_check():
"""
@@ -1145,19 +885,19 @@ def api_health_check():
logger.debug("API health check endpoint accessed")
return jsonify({"status": "OK", "message": "Huntarr is running"})
+
@app.route('/api/github_sponsors', methods=['GET'])
def get_github_sponsors():
"""
Get sponsors from database. If database is empty, try to populate from manifest or GitHub.
"""
- from src.primary.utils.database import get_database
-
+
try:
db = get_database()
-
+
# Try to get sponsors from database first
sponsors = db.get_sponsors()
-
+
if sponsors:
# Format sponsors for frontend (convert avatar_url to avatarUrl for consistency)
formatted_sponsors = []
@@ -1172,16 +912,16 @@ def get_github_sponsors():
'tier': sponsor.get('tier', 'Supporter'),
'monthlyAmount': sponsor.get('monthly_amount', 0)
})
-
+
current_app.logger.debug(f"Returning {len(formatted_sponsors)} sponsors from database")
return jsonify(formatted_sponsors)
-
+
# If no sponsors in database, try to populate from manifest
current_app.logger.debug("No sponsors in database, attempting to populate from manifest")
-
+
# Try to use local manifest.json first, then fallback to GitHub
local_manifest_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'manifest.json')
-
+
manifest_data = None
if os.path.exists(local_manifest_path):
current_app.logger.debug(f"Using local manifest.json from {local_manifest_path}")
@@ -1194,17 +934,17 @@ def get_github_sponsors():
response = requests.get(manifest_url, timeout=10)
response.raise_for_status()
manifest_data = response.json()
-
+
if manifest_data:
sponsors_list = manifest_data.get('sponsors', [])
if sponsors_list:
# Save sponsors to database
db.save_sponsors(sponsors_list)
current_app.logger.debug(f"Populated database with {len(sponsors_list)} sponsors from manifest")
-
+
# Return the sponsors (recursively call this function to get formatted data)
return get_github_sponsors()
-
+
# If all else fails, return empty list
current_app.logger.warning("No sponsors found in database or manifest")
return jsonify([])
@@ -1214,6 +954,7 @@ def get_github_sponsors():
# Return empty list instead of 500 error to prevent UI issues
return jsonify([])
+
# Start the web server in debug or production mode
def start_web_server():
"""Start the web server in debug or production mode"""