diff --git a/Lite/Database/DuckDbInitializer.cs b/Lite/Database/DuckDbInitializer.cs
index 78b6a37..b8d0c16 100644
--- a/Lite/Database/DuckDbInitializer.cs
+++ b/Lite/Database/DuckDbInitializer.cs
@@ -86,7 +86,7 @@ public void Dispose()
///
/// Current schema version. Increment this when schema changes require table rebuilds.
///
- internal const int CurrentSchemaVersion = 15;
+ internal const int CurrentSchemaVersion = 16;
private readonly string _archivePath;
@@ -497,6 +497,14 @@ Must drop/recreate because DuckDB appender writes by position. */
_logger?.LogInformation("Running migration to v15: rebuilding file_io_stats for queued I/O columns");
await ExecuteNonQueryAsync(connection, "DROP TABLE IF EXISTS file_io_stats");
}
+
+ if (fromVersion < 16)
+ {
+ /* v16: Added database_size_stats and server_properties tables for FinOps monitoring.
+ New tables only — no existing table changes needed. Tables created by
+ GetAllTableStatements() during initialization. */
+ _logger?.LogInformation("Running migration to v16: adding FinOps tables (database_size_stats, server_properties)");
+ }
}
///
diff --git a/Lite/Database/Schema.cs b/Lite/Database/Schema.cs
index af4f391..6472cb4 100644
--- a/Lite/Database/Schema.cs
+++ b/Lite/Database/Schema.cs
@@ -591,6 +591,55 @@ percent_of_average DECIMAL(10,1)
public const string CreateRunningJobsIndex = @"
CREATE INDEX IF NOT EXISTS idx_running_jobs_time ON running_jobs(server_id, collection_time)";
+ public const string CreateDatabaseSizeStatsTable = @"
+CREATE TABLE IF NOT EXISTS database_size_stats (
+ collection_id BIGINT PRIMARY KEY,
+ collection_time TIMESTAMP NOT NULL,
+ server_id INTEGER NOT NULL,
+ server_name VARCHAR NOT NULL,
+ database_name VARCHAR NOT NULL,
+ database_id INTEGER NOT NULL,
+ file_id INTEGER NOT NULL,
+ file_type_desc VARCHAR NOT NULL,
+ file_name VARCHAR NOT NULL,
+ physical_name VARCHAR NOT NULL,
+ total_size_mb DECIMAL(19,2) NOT NULL,
+ used_size_mb DECIMAL(19,2),
+ auto_growth_mb DECIMAL(19,2),
+ max_size_mb DECIMAL(19,2),
+ recovery_model_desc VARCHAR,
+ compatibility_level INTEGER,
+ state_desc VARCHAR
+)";
+
+ public const string CreateDatabaseSizeStatsIndex = @"
+CREATE INDEX IF NOT EXISTS idx_database_size_stats_time ON database_size_stats(server_id, collection_time)";
+
+ public const string CreateServerPropertiesTable = @"
+CREATE TABLE IF NOT EXISTS server_properties (
+ collection_id BIGINT PRIMARY KEY,
+ collection_time TIMESTAMP NOT NULL,
+ server_id INTEGER NOT NULL,
+ server_name VARCHAR NOT NULL,
+ edition VARCHAR NOT NULL,
+ product_version VARCHAR NOT NULL,
+ product_level VARCHAR NOT NULL,
+ product_update_level VARCHAR,
+ engine_edition INTEGER NOT NULL,
+ cpu_count INTEGER NOT NULL,
+ hyperthread_ratio INTEGER NOT NULL,
+ physical_memory_mb BIGINT NOT NULL,
+ socket_count INTEGER,
+ cores_per_socket INTEGER,
+ is_hadr_enabled BOOLEAN,
+ is_clustered BOOLEAN,
+ enterprise_features VARCHAR,
+ service_objective VARCHAR
+)";
+
+ public const string CreateServerPropertiesIndex = @"
+CREATE INDEX IF NOT EXISTS idx_server_properties_time ON server_properties(server_id, collection_time)";
+
public const string CreateAlertLogTable = @"
CREATE TABLE IF NOT EXISTS config_alert_log (
alert_time TIMESTAMP NOT NULL,
@@ -633,6 +682,8 @@ public static IEnumerable GetAllTableStatements()
yield return CreateDatabaseScopedConfigTable;
yield return CreateTraceFlagsTable;
yield return CreateRunningJobsTable;
+ yield return CreateDatabaseSizeStatsTable;
+ yield return CreateServerPropertiesTable;
yield return CreateAlertLogTable;
}
@@ -660,5 +711,7 @@ public static IEnumerable GetAllIndexStatements()
yield return CreateDatabaseScopedConfigIndex;
yield return CreateTraceFlagsIndex;
yield return CreateRunningJobsIndex;
+ yield return CreateDatabaseSizeStatsIndex;
+ yield return CreateServerPropertiesIndex;
}
}
diff --git a/Lite/Services/RemoteCollectorService.DatabaseSize.cs b/Lite/Services/RemoteCollectorService.DatabaseSize.cs
new file mode 100644
index 0000000..7f3e9bc
--- /dev/null
+++ b/Lite/Services/RemoteCollectorService.DatabaseSize.cs
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2026 Erik Darling, Darling Data LLC
+ *
+ * This file is part of the SQL Server Performance Monitor Lite.
+ *
+ * Licensed under the MIT License. See LICENSE file in the project root for full license information.
+ */
+
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Threading;
+using System.Threading.Tasks;
+using DuckDB.NET.Data;
+using Microsoft.Data.SqlClient;
+using Microsoft.Extensions.Logging;
+using PerformanceMonitorLite.Models;
+
+namespace PerformanceMonitorLite.Services;
+
+public partial class RemoteCollectorService
+{
+ ///
+ /// Collects per-file database sizes for growth trending and capacity planning.
+ /// On-prem: queries sys.master_files + sys.databases for all online databases.
+ /// Azure SQL DB: queries sys.database_files for the single database.
+ ///
+ private async Task CollectDatabaseSizeStatsAsync(ServerConnection server, CancellationToken cancellationToken)
+ {
+ var serverStatus = _serverManager.GetConnectionStatus(server.Id);
+ bool isAzureSqlDb = serverStatus?.SqlEngineEdition == 5;
+
+ const string onPremQuery = @"
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+SELECT
+ database_name = d.name,
+ database_id = d.database_id,
+ file_id = mf.file_id,
+ file_type_desc = mf.type_desc,
+ file_name = mf.name,
+ physical_name = mf.physical_name,
+ total_size_mb =
+ CONVERT(decimal(19,2), mf.size * 8.0 / 1024.0),
+ used_size_mb =
+ CONVERT(decimal(19,2), NULL),
+ auto_growth_mb =
+ CASE
+ WHEN mf.is_percent_growth = 1
+ THEN CONVERT(decimal(19,2), NULL)
+ ELSE CONVERT(decimal(19,2), mf.growth * 8.0 / 1024.0)
+ END,
+ max_size_mb =
+ CASE
+ WHEN mf.max_size = -1
+ THEN CONVERT(decimal(19,2), -1)
+ WHEN mf.max_size = 268435456
+ THEN CONVERT(decimal(19,2), 2097152)
+ ELSE CONVERT(decimal(19,2), mf.max_size * 8.0 / 1024.0)
+ END,
+ recovery_model_desc =
+ d.recovery_model_desc,
+ compatibility_level =
+ d.compatibility_level,
+ state_desc =
+ d.state_desc
+FROM sys.master_files AS mf
+JOIN sys.databases AS d
+ ON d.database_id = mf.database_id
+WHERE d.state_desc = N'ONLINE'
+ORDER BY
+ d.name,
+ mf.file_id
+OPTION(RECOMPILE);";
+
+ const string azureSqlDbQuery = @"
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+SELECT
+ database_name = DB_NAME(),
+ database_id = DB_ID(),
+ file_id = df.file_id,
+ file_type_desc = df.type_desc,
+ file_name = df.name,
+ physical_name = df.physical_name,
+ total_size_mb =
+ CONVERT(decimal(19,2), df.size * 8.0 / 1024.0),
+ used_size_mb =
+ CONVERT(decimal(19,2), FILEPROPERTY(df.name, N'SpaceUsed') * 8.0 / 1024.0),
+ auto_growth_mb =
+ CASE
+ WHEN df.is_percent_growth = 1
+ THEN CONVERT(decimal(19,2), NULL)
+ ELSE CONVERT(decimal(19,2), df.growth * 8.0 / 1024.0)
+ END,
+ max_size_mb =
+ CASE
+ WHEN df.max_size = -1
+ THEN CONVERT(decimal(19,2), -1)
+ WHEN df.max_size = 268435456
+ THEN CONVERT(decimal(19,2), 2097152)
+ ELSE CONVERT(decimal(19,2), df.max_size * 8.0 / 1024.0)
+ END,
+ recovery_model_desc =
+ CONVERT(nvarchar(12), DATABASEPROPERTYEX(DB_NAME(), N'Recovery')),
+ compatibility_level =
+ CONVERT(int, NULL),
+ state_desc =
+ N'ONLINE'
+FROM sys.database_files AS df
+ORDER BY
+ df.file_id
+OPTION(RECOMPILE);";
+
+ string query = isAzureSqlDb ? azureSqlDbQuery : onPremQuery;
+
+ var serverId = GetServerId(server);
+ var collectionTime = DateTime.UtcNow;
+ var rowsCollected = 0;
+ _lastSqlMs = 0;
+ _lastDuckDbMs = 0;
+
+ var rows = new List<(string DatabaseName, int DatabaseId, int FileId, string FileTypeDesc,
+ string FileName, string PhysicalName, decimal TotalSizeMb, decimal? UsedSizeMb,
+ decimal? AutoGrowthMb, decimal? MaxSizeMb, string? RecoveryModel,
+ int? CompatibilityLevel, string? StateDesc)>();
+
+ var sqlSw = Stopwatch.StartNew();
+ using var sqlConnection = await CreateConnectionAsync(server, cancellationToken);
+ using var command = new SqlCommand(query, sqlConnection);
+ command.CommandTimeout = CommandTimeoutSeconds;
+
+ using var reader = await command.ExecuteReaderAsync(cancellationToken);
+ while (await reader.ReadAsync(cancellationToken))
+ {
+ rows.Add((
+ reader.GetString(0),
+ reader.GetInt32(1),
+ reader.GetInt32(2),
+ reader.GetString(3),
+ reader.GetString(4),
+ reader.GetString(5),
+ reader.GetDecimal(6),
+ reader.IsDBNull(7) ? null : reader.GetDecimal(7),
+ reader.IsDBNull(8) ? null : reader.GetDecimal(8),
+ reader.IsDBNull(9) ? null : reader.GetDecimal(9),
+ reader.IsDBNull(10) ? null : reader.GetString(10),
+ reader.IsDBNull(11) ? null : reader.GetInt32(11),
+ reader.IsDBNull(12) ? null : reader.GetString(12)));
+ }
+ sqlSw.Stop();
+
+ var duckSw = Stopwatch.StartNew();
+
+ using (var duckConnection = _duckDb.CreateConnection())
+ {
+ await duckConnection.OpenAsync(cancellationToken);
+
+ using (var appender = duckConnection.CreateAppender("database_size_stats"))
+ {
+ foreach (var r in rows)
+ {
+ var row = appender.CreateRow();
+ row.AppendValue(GenerateCollectionId())
+ .AppendValue(collectionTime)
+ .AppendValue(serverId)
+ .AppendValue(server.ServerName)
+ .AppendValue(r.DatabaseName)
+ .AppendValue(r.DatabaseId)
+ .AppendValue(r.FileId)
+ .AppendValue(r.FileTypeDesc)
+ .AppendValue(r.FileName)
+ .AppendValue(r.PhysicalName)
+ .AppendValue(r.TotalSizeMb)
+ .AppendValue(r.UsedSizeMb)
+ .AppendValue(r.AutoGrowthMb)
+ .AppendValue(r.MaxSizeMb)
+ .AppendValue(r.RecoveryModel)
+ .AppendValue(r.CompatibilityLevel)
+ .AppendValue(r.StateDesc)
+ .EndRow();
+ rowsCollected++;
+ }
+ }
+ }
+
+ duckSw.Stop();
+ _lastSqlMs = sqlSw.ElapsedMilliseconds;
+ _lastDuckDbMs = duckSw.ElapsedMilliseconds;
+
+ _logger?.LogDebug("Collected {RowCount} database size rows for server '{Server}'", rowsCollected, server.DisplayName);
+ return rowsCollected;
+ }
+}
diff --git a/Lite/Services/RemoteCollectorService.ServerProperties.cs b/Lite/Services/RemoteCollectorService.ServerProperties.cs
new file mode 100644
index 0000000..519f4de
--- /dev/null
+++ b/Lite/Services/RemoteCollectorService.ServerProperties.cs
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2026 Erik Darling, Darling Data LLC
+ *
+ * This file is part of the SQL Server Performance Monitor Lite.
+ *
+ * Licensed under the MIT License. See LICENSE file in the project root for full license information.
+ */
+
+using System;
+using System.Diagnostics;
+using System.Threading;
+using System.Threading.Tasks;
+using DuckDB.NET.Data;
+using Microsoft.Data.SqlClient;
+using Microsoft.Extensions.Logging;
+using PerformanceMonitorLite.Models;
+
+namespace PerformanceMonitorLite.Services;
+
+public partial class RemoteCollectorService
+{
+ ///
+ /// Collects server edition, version, CPU/memory hardware metadata for
+ /// license audit and FinOps cost attribution. On-load only collector.
+ ///
+ private async Task CollectServerPropertiesAsync(ServerConnection server, CancellationToken cancellationToken)
+ {
+ var serverStatus = _serverManager.GetConnectionStatus(server.Id);
+ bool isAzureSqlDb = serverStatus?.SqlEngineEdition == 5;
+
+ const string query = @"
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+SELECT
+ server_name =
+ CONVERT(nvarchar(128), SERVERPROPERTY(N'ServerName')),
+ edition =
+ CONVERT(nvarchar(128), SERVERPROPERTY(N'Edition')),
+ product_version =
+ CONVERT(nvarchar(128), SERVERPROPERTY(N'ProductVersion')),
+ product_level =
+ CONVERT(nvarchar(128), SERVERPROPERTY(N'ProductLevel')),
+ product_update_level =
+ CONVERT(nvarchar(128), SERVERPROPERTY(N'ProductUpdateLevel')),
+ engine_edition =
+ CONVERT(int, SERVERPROPERTY(N'EngineEdition')),
+ cpu_count =
+ osi.cpu_count,
+ hyperthread_ratio =
+ osi.hyperthread_ratio,
+ physical_memory_mb =
+ osi.physical_memory_kb / 1024,
+ socket_count =
+ osi.socket_count,
+ cores_per_socket =
+ osi.cores_per_socket,
+ is_hadr_enabled =
+ CONVERT(bit, SERVERPROPERTY(N'IsHadrEnabled')),
+ is_clustered =
+ CONVERT(bit, SERVERPROPERTY(N'IsClustered')),
+ service_objective =
+ CASE
+ WHEN CONVERT(int, SERVERPROPERTY(N'EngineEdition')) = 5
+ THEN CONVERT(nvarchar(128), DATABASEPROPERTYEX(DB_NAME(), N'ServiceObjective'))
+ ELSE NULL
+ END
+FROM sys.dm_os_sys_info AS osi
+OPTION(RECOMPILE);";
+
+ var serverId = GetServerId(server);
+ var collectionTime = DateTime.UtcNow;
+ var rowsCollected = 0;
+ _lastSqlMs = 0;
+ _lastDuckDbMs = 0;
+
+ var sqlSw = Stopwatch.StartNew();
+ using var sqlConnection = await CreateConnectionAsync(server, cancellationToken);
+ using var command = new SqlCommand(query, sqlConnection);
+ command.CommandTimeout = CommandTimeoutSeconds;
+
+ using var reader = await command.ExecuteReaderAsync(cancellationToken);
+ if (await reader.ReadAsync(cancellationToken))
+ {
+ var serverName = reader.GetString(0);
+ var edition = reader.GetString(1);
+ var productVersion = reader.GetString(2);
+ var productLevel = reader.GetString(3);
+ var productUpdateLevel = reader.IsDBNull(4) ? null : reader.GetString(4);
+ var engineEdition = reader.GetInt32(5);
+ var cpuCount = reader.GetInt32(6);
+ var hyperthreadRatio = reader.GetInt32(7);
+ var physicalMemoryMb = reader.GetInt64(8);
+ int? socketCount = reader.IsDBNull(9) ? null : reader.GetInt32(9);
+ int? coresPerSocket = reader.IsDBNull(10) ? null : reader.GetInt32(10);
+ bool? isHadrEnabled = reader.IsDBNull(11) ? null : reader.GetBoolean(11);
+ bool? isClustered = reader.IsDBNull(12) ? null : reader.GetBoolean(12);
+ var serviceObjective = reader.IsDBNull(13) ? null : reader.GetString(13);
+
+ sqlSw.Stop();
+
+ var duckSw = Stopwatch.StartNew();
+
+ using (var duckConnection = _duckDb.CreateConnection())
+ {
+ await duckConnection.OpenAsync(cancellationToken);
+
+ using (var appender = duckConnection.CreateAppender("server_properties"))
+ {
+ var row = appender.CreateRow();
+ row.AppendValue(GenerateCollectionId())
+ .AppendValue(collectionTime)
+ .AppendValue(serverId)
+ .AppendValue(serverName)
+ .AppendValue(edition)
+ .AppendValue(productVersion)
+ .AppendValue(productLevel)
+ .AppendValue(productUpdateLevel)
+ .AppendValue(engineEdition)
+ .AppendValue(cpuCount)
+ .AppendValue(hyperthreadRatio)
+ .AppendValue(physicalMemoryMb)
+ .AppendValue(socketCount)
+ .AppendValue(coresPerSocket)
+ .AppendValue(isHadrEnabled)
+ .AppendValue(isClustered)
+ .AppendValue((string?)null) // enterprise_features — not collected in Lite (requires cross-database cursor)
+ .AppendValue(serviceObjective)
+ .EndRow();
+ rowsCollected++;
+ }
+ }
+
+ duckSw.Stop();
+ _lastDuckDbMs = duckSw.ElapsedMilliseconds;
+ }
+ else
+ {
+ sqlSw.Stop();
+ }
+
+ _lastSqlMs = sqlSw.ElapsedMilliseconds;
+
+ _logger?.LogDebug("Collected {RowCount} server properties row(s) for server '{Server}'", rowsCollected, server.DisplayName);
+ return rowsCollected;
+ }
+}
diff --git a/Lite/Services/RemoteCollectorService.cs b/Lite/Services/RemoteCollectorService.cs
index fc9fe77..954abf7 100644
--- a/Lite/Services/RemoteCollectorService.cs
+++ b/Lite/Services/RemoteCollectorService.cs
@@ -356,6 +356,8 @@ public async Task RunCollectorAsync(ServerConnection server, string collectorNam
"database_scoped_config" => await CollectDatabaseScopedConfigAsync(server, cancellationToken),
"trace_flags" => await CollectTraceFlagsAsync(server, cancellationToken),
"running_jobs" => await CollectRunningJobsAsync(server, cancellationToken),
+ "database_size_stats" => await CollectDatabaseSizeStatsAsync(server, cancellationToken),
+ "server_properties" => await CollectServerPropertiesAsync(server, cancellationToken),
_ => throw new ArgumentException($"Unknown collector: {collectorName}")
};
diff --git a/Lite/Services/ScheduleManager.cs b/Lite/Services/ScheduleManager.cs
index d09b188..726806d 100644
--- a/Lite/Services/ScheduleManager.cs
+++ b/Lite/Services/ScheduleManager.cs
@@ -386,7 +386,9 @@ private static List GetDefaultSchedules()
new() { Name = "blocked_process_report", Enabled = true, FrequencyMinutes = 1, RetentionDays = 30, Description = "Blocked process reports from XE ring buffer session (opt-out)" },
new() { Name = "database_scoped_config", Enabled = true, FrequencyMinutes = 0, RetentionDays = 30, Description = "Database-scoped configurations (on-load only)" },
new() { Name = "trace_flags", Enabled = true, FrequencyMinutes = 0, RetentionDays = 30, Description = "Active trace flags via DBCC TRACESTATUS (on-load only)" },
- new() { Name = "running_jobs", Enabled = true, FrequencyMinutes = 5, RetentionDays = 7, Description = "Currently running SQL Agent jobs with duration comparison" }
+ new() { Name = "running_jobs", Enabled = true, FrequencyMinutes = 5, RetentionDays = 7, Description = "Currently running SQL Agent jobs with duration comparison" },
+ new() { Name = "database_size_stats", Enabled = true, FrequencyMinutes = 60, RetentionDays = 90, Description = "Database file sizes for growth trending and capacity planning" },
+ new() { Name = "server_properties", Enabled = true, FrequencyMinutes = 0, RetentionDays = 365, Description = "Server edition, licensing, CPU/memory hardware metadata (on-load only)" }
};
}
diff --git a/install/02_create_tables.sql b/install/02_create_tables.sql
index 3c91b9c..eb9a17a 100644
--- a/install/02_create_tables.sql
+++ b/install/02_create_tables.sql
@@ -1406,5 +1406,88 @@ BEGIN
PRINT 'Created collect.running_jobs table';
END;
+/*
+Database Size Statistics Table (FinOps)
+*/
+IF OBJECT_ID(N'collect.database_size_stats', N'U') IS NULL
+BEGIN
+ CREATE TABLE
+ collect.database_size_stats
+ (
+ collection_id bigint IDENTITY NOT NULL,
+ collection_time datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ database_name sysname NOT NULL,
+ database_id integer NOT NULL,
+ file_id integer NOT NULL,
+ file_type_desc nvarchar(60) NOT NULL,
+ file_name sysname NOT NULL,
+ physical_name nvarchar(260) NOT NULL,
+ total_size_mb decimal(19,2) NOT NULL,
+ used_size_mb decimal(19,2) NULL,
+ auto_growth_mb decimal(19,2) NULL,
+ max_size_mb decimal(19,2) NULL,
+ recovery_model_desc nvarchar(12) NULL,
+ compatibility_level integer NULL,
+ state_desc nvarchar(60) NULL,
+ /*Analysis helpers - computed columns*/
+ free_space_mb AS
+ (
+ total_size_mb - used_size_mb
+ ),
+ used_pct AS
+ (
+ used_size_mb * 100.0 /
+ NULLIF(total_size_mb, 0)
+ ),
+ CONSTRAINT
+ PK_database_size_stats
+ PRIMARY KEY CLUSTERED
+ (collection_time, collection_id)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.database_size_stats table';
+END;
+
+/*
+Server Properties Table (FinOps)
+*/
+IF OBJECT_ID(N'collect.server_properties', N'U') IS NULL
+BEGIN
+ CREATE TABLE
+ collect.server_properties
+ (
+ collection_id bigint IDENTITY NOT NULL,
+ collection_time datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ server_name sysname NOT NULL,
+ edition sysname NOT NULL,
+ product_version sysname NOT NULL,
+ product_level sysname NOT NULL,
+ product_update_level sysname NULL,
+ engine_edition integer NOT NULL,
+ cpu_count integer NOT NULL,
+ hyperthread_ratio integer NOT NULL,
+ physical_memory_mb bigint NOT NULL,
+ socket_count integer NULL,
+ cores_per_socket integer NULL,
+ is_hadr_enabled bit NULL,
+ is_clustered bit NULL,
+ enterprise_features nvarchar(max) NULL,
+ service_objective sysname NULL,
+ row_hash binary(32) NULL,
+ CONSTRAINT
+ PK_server_properties
+ PRIMARY KEY CLUSTERED
+ (collection_time, collection_id)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ PRINT 'Created collect.server_properties table';
+END;
+
PRINT 'All collection tables created successfully';
GO
diff --git a/install/03_create_config_tables.sql b/install/03_create_config_tables.sql
index 6fdb40d..f81960e 100644
--- a/install/03_create_config_tables.sql
+++ b/install/03_create_config_tables.sql
@@ -202,7 +202,9 @@ BEGIN
(N'plan_cache_stats_collector', 1, 60, 5, 30, N'Plan cache composition statistics - single-use plans and plan cache bloat detection'),
(N'session_stats_collector', 1, 5, 2, 30, N'Session and connection statistics - connection leaks and application patterns'),
(N'waiting_tasks_collector', 1, 5, 2, 30, N'Currently waiting tasks - blocking chains and wait analysis'),
- (N'running_jobs_collector', 1, 5, 2, 7, N'Currently running SQL Agent jobs with historical duration comparison');
+ (N'running_jobs_collector', 1, 5, 2, 7, N'Currently running SQL Agent jobs with historical duration comparison'),
+ (N'database_size_stats_collector', 1, 60, 10, 90, N'Database file sizes for growth trending and capacity planning'),
+ (N'server_properties_collector', 1, 1440, 5, 365, N'Server edition, licensing, CPU/memory hardware metadata for license audit');
/*
Stagger initial run times
diff --git a/install/06_ensure_collection_table.sql b/install/06_ensure_collection_table.sql
index bc3b47d..71d6f0c 100644
--- a/install/06_ensure_collection_table.sql
+++ b/install/06_ensure_collection_table.sql
@@ -1088,10 +1088,82 @@ BEGIN
(DATA_COMPRESSION = PAGE)
);
+ END;
+ ELSE IF @table_name = N'database_size_stats'
+ BEGIN
+ CREATE TABLE
+ collect.database_size_stats
+ (
+ collection_id bigint IDENTITY NOT NULL,
+ collection_time datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ database_name sysname NOT NULL,
+ database_id integer NOT NULL,
+ file_id integer NOT NULL,
+ file_type_desc nvarchar(60) NOT NULL,
+ file_name sysname NOT NULL,
+ physical_name nvarchar(260) NOT NULL,
+ total_size_mb decimal(19,2) NOT NULL,
+ used_size_mb decimal(19,2) NULL,
+ auto_growth_mb decimal(19,2) NULL,
+ max_size_mb decimal(19,2) NULL,
+ recovery_model_desc nvarchar(12) NULL,
+ compatibility_level integer NULL,
+ state_desc nvarchar(60) NULL,
+ free_space_mb AS
+ (
+ total_size_mb - used_size_mb
+ ),
+ used_pct AS
+ (
+ used_size_mb * 100.0 /
+ NULLIF(total_size_mb, 0)
+ ),
+ CONSTRAINT
+ PK_database_size_stats
+ PRIMARY KEY CLUSTERED
+ (collection_time, collection_id)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
+ END;
+ ELSE IF @table_name = N'server_properties'
+ BEGIN
+ CREATE TABLE
+ collect.server_properties
+ (
+ collection_id bigint IDENTITY NOT NULL,
+ collection_time datetime2(7) NOT NULL
+ DEFAULT SYSDATETIME(),
+ server_name sysname NOT NULL,
+ edition sysname NOT NULL,
+ product_version sysname NOT NULL,
+ product_level sysname NOT NULL,
+ product_update_level sysname NULL,
+ engine_edition integer NOT NULL,
+ cpu_count integer NOT NULL,
+ hyperthread_ratio integer NOT NULL,
+ physical_memory_mb bigint NOT NULL,
+ socket_count integer NULL,
+ cores_per_socket integer NULL,
+ is_hadr_enabled bit NULL,
+ is_clustered bit NULL,
+ enterprise_features nvarchar(max) NULL,
+ service_objective sysname NULL,
+ row_hash binary(32) NULL,
+ CONSTRAINT
+ PK_server_properties
+ PRIMARY KEY CLUSTERED
+ (collection_time, collection_id)
+ WITH
+ (DATA_COMPRESSION = PAGE)
+ );
+
END;
ELSE
BEGIN
- SET @error_message = N'Unknown table name: ' + @table_name + N'. Valid table names are: wait_stats, query_stats, memory_stats, memory_pressure_events, deadlock_xml, blocked_process_xml, procedure_stats, query_snapshots, query_store_data, trace_analysis, default_trace_events, file_io_stats, memory_grant_stats, cpu_scheduler_stats, memory_clerks_stats, perfmon_stats, cpu_utilization_stats, blocking_deadlock_stats, latch_stats, spinlock_stats, tempdb_stats, plan_cache_stats, session_stats, waiting_tasks, running_jobs';
+ SET @error_message = N'Unknown table name: ' + @table_name + N'. Valid table names are: wait_stats, query_stats, memory_stats, memory_pressure_events, deadlock_xml, blocked_process_xml, procedure_stats, query_snapshots, query_store_data, trace_analysis, default_trace_events, file_io_stats, memory_grant_stats, cpu_scheduler_stats, memory_clerks_stats, perfmon_stats, cpu_utilization_stats, blocking_deadlock_stats, latch_stats, spinlock_stats, tempdb_stats, plan_cache_stats, session_stats, waiting_tasks, running_jobs, database_size_stats, server_properties';
RAISERROR(@error_message, 16, 1);
RETURN;
END;
diff --git a/install/42_scheduled_master_collector.sql b/install/42_scheduled_master_collector.sql
index a192a90..565b659 100644
--- a/install/42_scheduled_master_collector.sql
+++ b/install/42_scheduled_master_collector.sql
@@ -315,6 +315,14 @@ BEGIN
BEGIN
EXECUTE collect.running_jobs_collector @debug = @debug;
END;
+ ELSE IF @collector_name = N'database_size_stats_collector'
+ BEGIN
+ EXECUTE collect.database_size_stats_collector @debug = @debug;
+ END;
+ ELSE IF @collector_name = N'server_properties_collector'
+ BEGIN
+ EXECUTE collect.server_properties_collector @debug = @debug;
+ END;
ELSE
BEGIN
RAISERROR(N'Unknown collector: %s', 11, 1, @collector_name);
diff --git a/install/52_collect_database_size_stats.sql b/install/52_collect_database_size_stats.sql
new file mode 100644
index 0000000..386bb72
--- /dev/null
+++ b/install/52_collect_database_size_stats.sql
@@ -0,0 +1,351 @@
+/*
+Copyright 2026 Darling Data, LLC
+https://www.erikdarling.com/
+
+*/
+
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
+/*******************************************************************************
+Collector: database_size_stats_collector
+Purpose: Captures per-file database sizes for growth trending and capacity
+ planning. Collects total allocated size and used space per file.
+Collection Type: Point-in-time snapshot (no deltas)
+Target Table: collect.database_size_stats
+Frequency: Every 60 minutes
+Dependencies: sys.master_files, sys.databases, sys.dm_db_file_space_used
+Notes: Uses cursor with dynamic SQL for cross-database used space collection.
+ Azure SQL DB uses sys.database_files (single database scope).
+*******************************************************************************/
+
+IF OBJECT_ID(N'collect.database_size_stats_collector', N'P') IS NULL
+BEGIN
+ EXECUTE(N'CREATE PROCEDURE collect.database_size_stats_collector AS RETURN 138;');
+END;
+GO
+
+ALTER PROCEDURE
+ collect.database_size_stats_collector
+(
+ @debug bit = 0 /*Print debugging information*/
+)
+WITH RECOMPILE
+AS
+BEGIN
+ SET NOCOUNT ON;
+ SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+ DECLARE
+ @rows_collected bigint = 0,
+ @start_time datetime2(7) = SYSDATETIME(),
+ @error_message nvarchar(4000),
+ @engine_edition integer =
+ CONVERT(integer, SERVERPROPERTY(N'EngineEdition'));
+
+ BEGIN TRY
+ /*
+ Ensure target table exists
+ */
+ IF OBJECT_ID(N'collect.database_size_stats', N'U') IS NULL
+ BEGIN
+ INSERT INTO
+ config.collection_log
+ (
+ collection_time,
+ collector_name,
+ collection_status,
+ rows_collected,
+ duration_ms,
+ error_message
+ )
+ VALUES
+ (
+ @start_time,
+ N'database_size_stats_collector',
+ N'TABLE_MISSING',
+ 0,
+ 0,
+ N'Table collect.database_size_stats does not exist, calling ensure procedure'
+ );
+
+ EXECUTE config.ensure_collection_table
+ @table_name = N'database_size_stats',
+ @debug = @debug;
+
+ IF OBJECT_ID(N'collect.database_size_stats', N'U') IS NULL
+ BEGIN
+ RAISERROR(N'Table collect.database_size_stats still missing after ensure procedure', 16, 1);
+ RETURN;
+ END;
+ END;
+
+ /*
+ Azure SQL DB: single database scope
+ */
+ IF @engine_edition = 5
+ BEGIN
+ INSERT INTO
+ collect.database_size_stats
+ (
+ collection_time,
+ database_name,
+ database_id,
+ file_id,
+ file_type_desc,
+ file_name,
+ physical_name,
+ total_size_mb,
+ used_size_mb,
+ auto_growth_mb,
+ max_size_mb,
+ recovery_model_desc,
+ compatibility_level,
+ state_desc
+ )
+ SELECT
+ collection_time = @start_time,
+ database_name = DB_NAME(),
+ database_id = DB_ID(),
+ file_id = df.file_id,
+ file_type_desc = df.type_desc,
+ file_name = df.name,
+ physical_name = df.physical_name,
+ total_size_mb =
+ CONVERT(decimal(19,2), df.size * 8.0 / 1024.0),
+ used_size_mb =
+ CONVERT
+ (
+ decimal(19,2),
+ FILEPROPERTY(df.name, N'SpaceUsed') * 8.0 / 1024.0
+ ),
+ auto_growth_mb =
+ CASE
+ WHEN df.is_percent_growth = 1
+ THEN NULL
+ ELSE CONVERT(decimal(19,2), df.growth * 8.0 / 1024.0)
+ END,
+ max_size_mb =
+ CASE
+ WHEN df.max_size = -1
+ THEN CONVERT(decimal(19,2), -1)
+ WHEN df.max_size = 268435456
+ THEN CONVERT(decimal(19,2), 2097152) /*2 TB*/
+ ELSE CONVERT(decimal(19,2), df.max_size * 8.0 / 1024.0)
+ END,
+ recovery_model_desc =
+ CONVERT(nvarchar(12), DATABASEPROPERTYEX(DB_NAME(), N'Recovery')),
+ compatibility_level = NULL,
+ state_desc = N'ONLINE'
+ FROM sys.database_files AS df
+ OPTION(RECOMPILE);
+
+ SET @rows_collected = ROWCOUNT_BIG();
+ END;
+ ELSE
+ BEGIN
+ /*
+ On-prem / Azure MI / AWS RDS: cursor over all online databases
+ Collect file sizes from sys.master_files and used space via
+ dynamic SQL executing FILEPROPERTY in each database context
+ */
+ DECLARE
+ @db_name sysname,
+ @db_id integer,
+ @sql nvarchar(max);
+
+ DECLARE db_cursor CURSOR LOCAL FAST_FORWARD FOR
+ SELECT
+ d.name,
+ d.database_id
+ FROM sys.databases AS d
+ WHERE d.state_desc = N'ONLINE'
+ AND d.database_id > 0
+ ORDER BY
+ d.database_id;
+
+ OPEN db_cursor;
+ FETCH NEXT FROM db_cursor INTO @db_name, @db_id;
+
+ WHILE @@FETCH_STATUS = 0
+ BEGIN
+ BEGIN TRY
+ SET @sql = N'
+ USE ' + QUOTENAME(@db_name) + N';
+
+ INSERT INTO
+ PerformanceMonitor.collect.database_size_stats
+ (
+ collection_time,
+ database_name,
+ database_id,
+ file_id,
+ file_type_desc,
+ file_name,
+ physical_name,
+ total_size_mb,
+ used_size_mb,
+ auto_growth_mb,
+ max_size_mb,
+ recovery_model_desc,
+ compatibility_level,
+ state_desc
+ )
+ SELECT
+ collection_time = @start_time,
+ database_name = DB_NAME(),
+ database_id = DB_ID(),
+ file_id = df.file_id,
+ file_type_desc = df.type_desc,
+ file_name = df.name,
+ physical_name = df.physical_name,
+ total_size_mb =
+ CONVERT(decimal(19,2), df.size * 8.0 / 1024.0),
+ used_size_mb =
+ CONVERT
+ (
+ decimal(19,2),
+ FILEPROPERTY(df.name, N''SpaceUsed'') * 8.0 / 1024.0
+ ),
+ auto_growth_mb =
+ CASE
+ WHEN df.is_percent_growth = 1
+ THEN NULL
+ ELSE CONVERT(decimal(19,2), df.growth * 8.0 / 1024.0)
+ END,
+ max_size_mb =
+ CASE
+ WHEN df.max_size = -1
+ THEN CONVERT(decimal(19,2), -1)
+ WHEN df.max_size = 268435456
+ THEN CONVERT(decimal(19,2), 2097152)
+ ELSE CONVERT(decimal(19,2), df.max_size * 8.0 / 1024.0)
+ END,
+ recovery_model_desc = d.recovery_model_desc,
+ compatibility_level = d.compatibility_level,
+ state_desc = d.state_desc
+ FROM sys.database_files AS df
+ CROSS JOIN sys.databases AS d
+ WHERE d.database_id = DB_ID();';
+
+ EXECUTE sys.sp_executesql
+ @sql,
+ N'@start_time datetime2(7)',
+ @start_time = @start_time;
+
+ SET @rows_collected = @rows_collected + ROWCOUNT_BIG();
+ END TRY
+ BEGIN CATCH
+ /*
+ Log per-database errors but continue with remaining databases
+ */
+ IF @debug = 1
+ BEGIN
+ RAISERROR(N'Error collecting size stats for database [%s]: %s', 0, 1, @db_name, @error_message) WITH NOWAIT;
+ END;
+ END CATCH;
+
+ FETCH NEXT FROM db_cursor INTO @db_name, @db_id;
+ END;
+
+ CLOSE db_cursor;
+ DEALLOCATE db_cursor;
+ END;
+
+ /*
+ Debug output
+ */
+ IF @debug = 1
+ BEGIN
+ RAISERROR(N'Collected %d database size rows', 0, 1, @rows_collected) WITH NOWAIT;
+
+ SELECT TOP (20)
+ dss.database_name,
+ dss.file_type_desc,
+ dss.file_name,
+ dss.total_size_mb,
+ dss.used_size_mb,
+ dss.free_space_mb,
+ dss.used_pct
+ FROM collect.database_size_stats AS dss
+ WHERE dss.collection_time = @start_time
+ ORDER BY
+ dss.total_size_mb DESC;
+ END;
+
+ /*
+ Log successful collection
+ */
+ INSERT INTO
+ config.collection_log
+ (
+ collector_name,
+ collection_status,
+ rows_collected,
+ duration_ms
+ )
+ VALUES
+ (
+ N'database_size_stats_collector',
+ N'SUCCESS',
+ @rows_collected,
+ DATEDIFF(MILLISECOND, @start_time, SYSDATETIME())
+ );
+
+ END TRY
+ BEGIN CATCH
+ IF @@TRANCOUNT > 0
+ BEGIN
+ ROLLBACK TRANSACTION;
+ END;
+
+ /*
+ Clean up cursor if open
+ */
+ IF CURSOR_STATUS(N'local', N'db_cursor') >= 0
+ BEGIN
+ CLOSE db_cursor;
+ DEALLOCATE db_cursor;
+ END;
+
+ SET @error_message = ERROR_MESSAGE();
+
+ /*
+ Log the error
+ */
+ INSERT INTO
+ config.collection_log
+ (
+ collector_name,
+ collection_status,
+ duration_ms,
+ error_message
+ )
+ VALUES
+ (
+ N'database_size_stats_collector',
+ N'ERROR',
+ DATEDIFF(MILLISECOND, @start_time, SYSDATETIME()),
+ @error_message
+ );
+
+ RAISERROR(N'Error in database size stats collector: %s', 16, 1, @error_message);
+ END CATCH;
+END;
+GO
+
+PRINT 'Database size stats collector created successfully';
+PRINT 'Captures per-file database sizes for growth trending and capacity planning';
+PRINT 'Use: EXECUTE collect.database_size_stats_collector @debug = 1;';
+GO
diff --git a/install/53_collect_server_properties.sql b/install/53_collect_server_properties.sql
new file mode 100644
index 0000000..9cf046e
--- /dev/null
+++ b/install/53_collect_server_properties.sql
@@ -0,0 +1,405 @@
+/*
+Copyright 2026 Darling Data, LLC
+https://www.erikdarling.com/
+
+*/
+
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
+/*******************************************************************************
+Collector: server_properties_collector
+Purpose: Captures server edition, version, CPU/memory hardware metadata, and
+ Enterprise feature usage for license audit and FinOps cost attribution.
+Collection Type: Deduplication snapshot (skip if unchanged)
+Target Table: collect.server_properties
+Frequency: Daily (1440 minutes)
+Dependencies: SERVERPROPERTY, sys.dm_os_sys_info, sys.dm_db_persisted_sku_features
+Notes: Enterprise features enumeration gated by DMV existence.
+ Uses FOR XML PATH for SQL 2016, STRING_AGG for 2017+.
+ Azure SQL DB uses DATABASEPROPERTYEX for service objective.
+*******************************************************************************/
+
+IF OBJECT_ID(N'collect.server_properties_collector', N'P') IS NULL
+BEGIN
+ EXECUTE(N'CREATE PROCEDURE collect.server_properties_collector AS RETURN 138;');
+END;
+GO
+
+ALTER PROCEDURE
+ collect.server_properties_collector
+(
+ @debug bit = 0 /*Print debugging information*/
+)
+WITH RECOMPILE
+AS
+BEGIN
+ SET NOCOUNT ON;
+ SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+ DECLARE
+ @rows_collected bigint = 0,
+ @start_time datetime2(7) = SYSDATETIME(),
+ @error_message nvarchar(4000),
+ @engine_edition integer =
+ CONVERT(integer, SERVERPROPERTY(N'EngineEdition')),
+ @major_version integer;
+
+ /*
+ Parse major version for feature gating
+ */
+ SET @major_version =
+ CONVERT
+ (
+ integer,
+ PARSENAME
+ (
+ CONVERT(nvarchar(128), SERVERPROPERTY(N'ProductVersion')),
+ 4
+ )
+ );
+
+ BEGIN TRY
+ /*
+ Ensure target table exists
+ */
+ IF OBJECT_ID(N'collect.server_properties', N'U') IS NULL
+ BEGIN
+ INSERT INTO
+ config.collection_log
+ (
+ collection_time,
+ collector_name,
+ collection_status,
+ rows_collected,
+ duration_ms,
+ error_message
+ )
+ VALUES
+ (
+ @start_time,
+ N'server_properties_collector',
+ N'TABLE_MISSING',
+ 0,
+ 0,
+ N'Table collect.server_properties does not exist, calling ensure procedure'
+ );
+
+ EXECUTE config.ensure_collection_table
+ @table_name = N'server_properties',
+ @debug = @debug;
+
+ IF OBJECT_ID(N'collect.server_properties', N'U') IS NULL
+ BEGIN
+ RAISERROR(N'Table collect.server_properties still missing after ensure procedure', 16, 1);
+ RETURN;
+ END;
+ END;
+
+ /*
+ Collect enterprise features in use across databases
+ sys.dm_db_persisted_sku_features lists Enterprise features per database
+ Not available on Azure SQL DB (engine edition 5)
+ */
+ DECLARE
+ @enterprise_features nvarchar(max) = NULL;
+
+ IF @engine_edition <> 5
+ AND OBJECT_ID(N'sys.dm_db_persisted_sku_features', N'V') IS NOT NULL
+ BEGIN
+ CREATE TABLE
+ #sku_features
+ (
+ database_name sysname NOT NULL,
+ feature_name sysname NOT NULL
+ );
+
+ DECLARE
+ @db_name sysname,
+ @sql nvarchar(max);
+
+ DECLARE sku_cursor CURSOR LOCAL FAST_FORWARD FOR
+ SELECT
+ d.name
+ FROM sys.databases AS d
+ WHERE d.state_desc = N'ONLINE'
+ AND d.database_id > 4 /*Skip system databases*/
+ ORDER BY
+ d.database_id;
+
+ OPEN sku_cursor;
+ FETCH NEXT FROM sku_cursor INTO @db_name;
+
+ WHILE @@FETCH_STATUS = 0
+ BEGIN
+ BEGIN TRY
+ SET @sql = N'
+ SELECT
+ database_name = ' + QUOTENAME(@db_name, N'''') + N',
+ feature_name = f.feature_name
+ FROM ' + QUOTENAME(@db_name) + N'.sys.dm_db_persisted_sku_features AS f;';
+
+ INSERT INTO #sku_features
+ (
+ database_name,
+ feature_name
+ )
+ EXECUTE sys.sp_executesql @sql;
+ END TRY
+ BEGIN CATCH
+ /*Skip databases we cannot query*/
+ IF @debug = 1
+ BEGIN
+ DECLARE @sku_err nvarchar(4000) = ERROR_MESSAGE();
+ RAISERROR(N'SKU features error for [%s]: %s', 0, 1, @db_name, @sku_err) WITH NOWAIT;
+ END;
+ END CATCH;
+
+ FETCH NEXT FROM sku_cursor INTO @db_name;
+ END;
+
+ CLOSE sku_cursor;
+ DEALLOCATE sku_cursor;
+
+ /*
+ Aggregate features into comma-delimited string
+ Format: "DatabaseName: Feature1, Feature2; DatabaseName2: Feature3"
+ Use FOR XML PATH (works on SQL 2016+)
+ */
+ SELECT
+ @enterprise_features =
+ STUFF
+ (
+ (
+ SELECT
+ N'; ' + sf.database_name + N': ' + sf.feature_name
+ FROM #sku_features AS sf
+ ORDER BY
+ sf.database_name,
+ sf.feature_name
+ FOR XML PATH(N''), TYPE
+ ).value(N'.', N'nvarchar(max)'),
+ 1,
+ 2,
+ N''
+ );
+
+ DROP TABLE #sku_features;
+ END;
+
+ /*
+ Deduplication: check if anything changed since last collection
+ */
+ DECLARE
+ @current_hash binary(32),
+ @last_hash binary(32);
+
+ SELECT
+ @current_hash =
+ HASHBYTES
+ (
+ N'SHA2_256',
+ CONCAT
+ (
+ SERVERPROPERTY(N'Edition'), N'|',
+ SERVERPROPERTY(N'ProductVersion'), N'|',
+ SERVERPROPERTY(N'ProductLevel'), N'|',
+ @engine_edition, N'|',
+ (SELECT osi.cpu_count FROM sys.dm_os_sys_info AS osi), N'|',
+ (SELECT osi.physical_memory_kb FROM sys.dm_os_sys_info AS osi), N'|',
+ ISNULL(@enterprise_features, N'')
+ )
+ );
+
+ SELECT TOP (1)
+ @last_hash = sp.row_hash
+ FROM collect.server_properties AS sp
+ ORDER BY
+ sp.collection_time DESC;
+
+ IF @current_hash = @last_hash
+ BEGIN
+ IF @debug = 1
+ BEGIN
+ RAISERROR(N'Server properties unchanged since last collection, skipping', 0, 1) WITH NOWAIT;
+ END;
+
+ INSERT INTO
+ config.collection_log
+ (
+ collector_name,
+ collection_status,
+ rows_collected,
+ duration_ms,
+ error_message
+ )
+ VALUES
+ (
+ N'server_properties_collector',
+ N'SKIPPED',
+ 0,
+ DATEDIFF(MILLISECOND, @start_time, SYSDATETIME()),
+ N'Properties unchanged since last collection'
+ );
+
+ RETURN;
+ END;
+
+ /*
+ Insert new row
+ */
+ INSERT INTO
+ collect.server_properties
+ (
+ collection_time,
+ server_name,
+ edition,
+ product_version,
+ product_level,
+ product_update_level,
+ engine_edition,
+ cpu_count,
+ hyperthread_ratio,
+ physical_memory_mb,
+ socket_count,
+ cores_per_socket,
+ is_hadr_enabled,
+ is_clustered,
+ enterprise_features,
+ service_objective,
+ row_hash
+ )
+ SELECT
+ collection_time = @start_time,
+ server_name =
+ CONVERT(sysname, SERVERPROPERTY(N'ServerName')),
+ edition =
+ CONVERT(sysname, SERVERPROPERTY(N'Edition')),
+ product_version =
+ CONVERT(sysname, SERVERPROPERTY(N'ProductVersion')),
+ product_level =
+ CONVERT(sysname, SERVERPROPERTY(N'ProductLevel')),
+ product_update_level =
+ CONVERT(sysname, SERVERPROPERTY(N'ProductUpdateLevel')),
+ engine_edition = @engine_edition,
+ cpu_count = osi.cpu_count,
+ hyperthread_ratio = osi.hyperthread_ratio,
+ physical_memory_mb =
+ osi.physical_memory_kb / 1024,
+ socket_count = osi.socket_count,
+ cores_per_socket = osi.cores_per_socket,
+ is_hadr_enabled =
+ CONVERT(bit, SERVERPROPERTY(N'IsHadrEnabled')),
+ is_clustered =
+ CONVERT(bit, SERVERPROPERTY(N'IsClustered')),
+ enterprise_features = @enterprise_features,
+ service_objective =
+ CASE
+ WHEN @engine_edition = 5
+ THEN CONVERT(sysname, DATABASEPROPERTYEX(DB_NAME(), N'ServiceObjective'))
+ ELSE NULL
+ END,
+ row_hash = @current_hash
+ FROM sys.dm_os_sys_info AS osi
+ OPTION(RECOMPILE);
+
+ SET @rows_collected = ROWCOUNT_BIG();
+
+ /*
+ Debug output
+ */
+ IF @debug = 1
+ BEGIN
+ RAISERROR(N'Collected %d server properties row(s)', 0, 1, @rows_collected) WITH NOWAIT;
+
+ SELECT TOP (1)
+ sp.server_name,
+ sp.edition,
+ sp.product_version,
+ sp.cpu_count,
+ sp.hyperthread_ratio,
+ sp.physical_memory_mb,
+ sp.socket_count,
+ sp.cores_per_socket,
+ sp.enterprise_features,
+ sp.service_objective
+ FROM collect.server_properties AS sp
+ WHERE sp.collection_time = @start_time;
+ END;
+
+ /*
+ Log successful collection
+ */
+ INSERT INTO
+ config.collection_log
+ (
+ collector_name,
+ collection_status,
+ rows_collected,
+ duration_ms
+ )
+ VALUES
+ (
+ N'server_properties_collector',
+ N'SUCCESS',
+ @rows_collected,
+ DATEDIFF(MILLISECOND, @start_time, SYSDATETIME())
+ );
+
+ END TRY
+ BEGIN CATCH
+ IF @@TRANCOUNT > 0
+ BEGIN
+ ROLLBACK TRANSACTION;
+ END;
+
+ /*
+ Clean up cursor if open
+ */
+ IF CURSOR_STATUS(N'local', N'sku_cursor') >= 0
+ BEGIN
+ CLOSE sku_cursor;
+ DEALLOCATE sku_cursor;
+ END;
+
+ SET @error_message = ERROR_MESSAGE();
+
+ /*
+ Log the error
+ */
+ INSERT INTO
+ config.collection_log
+ (
+ collector_name,
+ collection_status,
+ duration_ms,
+ error_message
+ )
+ VALUES
+ (
+ N'server_properties_collector',
+ N'ERROR',
+ DATEDIFF(MILLISECOND, @start_time, SYSDATETIME()),
+ @error_message
+ );
+
+ RAISERROR(N'Error in server properties collector: %s', 16, 1, @error_message);
+ END CATCH;
+END;
+GO
+
+PRINT 'Server properties collector created successfully';
+PRINT 'Captures edition, version, CPU/memory hardware, and Enterprise feature usage';
+PRINT 'Use: EXECUTE collect.server_properties_collector @debug = 1;';
+GO
diff --git a/install/54_create_finops_views.sql b/install/54_create_finops_views.sql
new file mode 100644
index 0000000..c827dd2
--- /dev/null
+++ b/install/54_create_finops_views.sql
@@ -0,0 +1,409 @@
+/*
+Copyright 2026 Darling Data, LLC
+https://www.erikdarling.com/
+
+FinOps Reporting Views
+Provides cost allocation, utilization scoring, peak analysis,
+and application attribution from existing collected data.
+*/
+
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
+/*******************************************************************************
+View 1: Per-Database Resource Usage
+Shows CPU time, logical reads, execution counts, and I/O per database
+for cost allocation and showback reporting.
+Source: collect.query_stats, collect.procedure_stats, collect.file_io_stats
+*******************************************************************************/
+
+CREATE OR ALTER VIEW
+ report.finops_database_resource_usage
+AS
+WITH
+ /*
+ Combine query and procedure stats deltas by database
+ Filter to last 24 hours with valid deltas
+ */
+ workload_stats AS
+ (
+ SELECT
+ database_name = qs.database_name,
+ cpu_time_ms =
+ SUM(qs.total_worker_time_delta) / 1000,
+ logical_reads =
+ SUM(qs.total_logical_reads_delta),
+ physical_reads =
+ SUM(qs.total_physical_reads_delta),
+ logical_writes =
+ SUM(qs.total_logical_writes_delta),
+ execution_count =
+ SUM(qs.execution_count_delta)
+ FROM collect.query_stats AS qs
+ WHERE qs.collection_time >= DATEADD(HOUR, -24, SYSDATETIME())
+ AND qs.total_worker_time_delta IS NOT NULL
+ GROUP BY
+ qs.database_name
+ ),
+ /*
+ File I/O deltas by database
+ */
+ io_stats AS
+ (
+ SELECT
+ database_name = fio.database_name,
+ io_read_bytes =
+ SUM(fio.num_of_bytes_read_delta),
+ io_write_bytes =
+ SUM(fio.num_of_bytes_written_delta),
+ io_stall_ms =
+ SUM(fio.io_stall_ms_delta)
+ FROM collect.file_io_stats AS fio
+ WHERE fio.collection_time >= DATEADD(HOUR, -24, SYSDATETIME())
+ AND fio.num_of_bytes_read_delta IS NOT NULL
+ GROUP BY
+ fio.database_name
+ ),
+ /*
+ Server-wide totals for percentage calculations
+ */
+ totals AS
+ (
+ SELECT
+ total_cpu_ms =
+ NULLIF(SUM(ws.cpu_time_ms), 0),
+ total_io_bytes =
+ NULLIF
+ (
+ SUM(ios.io_read_bytes) +
+ SUM(ios.io_write_bytes),
+ 0
+ )
+ FROM workload_stats AS ws
+ FULL JOIN io_stats AS ios
+ ON ios.database_name = ws.database_name
+ )
+SELECT
+ database_name =
+ COALESCE(ws.database_name, ios.database_name),
+ cpu_time_ms =
+ ISNULL(ws.cpu_time_ms, 0),
+ logical_reads =
+ ISNULL(ws.logical_reads, 0),
+ physical_reads =
+ ISNULL(ws.physical_reads, 0),
+ logical_writes =
+ ISNULL(ws.logical_writes, 0),
+ execution_count =
+ ISNULL(ws.execution_count, 0),
+ io_read_mb =
+ CONVERT
+ (
+ decimal(19,2),
+ ISNULL(ios.io_read_bytes, 0) / 1048576.0
+ ),
+ io_write_mb =
+ CONVERT
+ (
+ decimal(19,2),
+ ISNULL(ios.io_write_bytes, 0) / 1048576.0
+ ),
+ io_stall_ms =
+ ISNULL(ios.io_stall_ms, 0),
+ pct_cpu_share =
+ CONVERT
+ (
+ decimal(5,2),
+ ISNULL(ws.cpu_time_ms, 0) * 100.0 /
+ t.total_cpu_ms
+ ),
+ pct_io_share =
+ CONVERT
+ (
+ decimal(5,2),
+ (ISNULL(ios.io_read_bytes, 0) + ISNULL(ios.io_write_bytes, 0)) * 100.0 /
+ t.total_io_bytes
+ )
+FROM workload_stats AS ws
+FULL JOIN io_stats AS ios
+ ON ios.database_name = ws.database_name
+CROSS JOIN totals AS t;
+GO
+
+PRINT 'Created report.finops_database_resource_usage view';
+GO
+
+/*******************************************************************************
+View 2: Utilization Efficiency Score
+Calculates whether the server is over-provisioned, right-sized, or
+under-provisioned based on CPU, memory, and worker thread utilization.
+Source: collect.cpu_utilization_stats, collect.memory_stats,
+ collect.cpu_scheduler_stats
+*******************************************************************************/
+
+CREATE OR ALTER VIEW
+ report.finops_utilization_efficiency
+AS
+WITH
+ /*
+ CPU utilization over last 24 hours
+ */
+ cpu_stats AS
+ (
+ SELECT
+ avg_cpu_pct =
+ AVG(CONVERT(decimal(5,2), cus.sqlserver_cpu_utilization)),
+ max_cpu_pct =
+ MAX(cus.sqlserver_cpu_utilization),
+ p95_cpu_pct =
+ CONVERT
+ (
+ decimal(5,2),
+ PERCENTILE_CONT(0.95) WITHIN GROUP
+ (
+ ORDER BY
+ cus.sqlserver_cpu_utilization
+ ) OVER ()
+ ),
+ sample_count =
+ COUNT_BIG(*)
+ FROM collect.cpu_utilization_stats AS cus
+ WHERE cus.collection_time >= DATEADD(HOUR, -24, SYSDATETIME())
+ ),
+ /*
+ Deduplicate CPU stats (PERCENTILE_CONT is a window function)
+ */
+ cpu_dedup AS
+ (
+ SELECT TOP (1)
+ cs.avg_cpu_pct,
+ cs.max_cpu_pct,
+ cs.p95_cpu_pct,
+ cs.sample_count
+ FROM cpu_stats AS cs
+ ),
+ /*
+ Latest memory stats
+ */
+ memory_latest AS
+ (
+ SELECT TOP (1)
+ ms.total_memory_mb,
+ ms.committed_target_memory_mb,
+ ms.total_physical_memory_mb,
+ ms.buffer_pool_mb,
+ ms.memory_utilization_percentage,
+ memory_ratio =
+ CONVERT
+ (
+ decimal(5,2),
+ ms.total_memory_mb /
+ NULLIF(ms.committed_target_memory_mb, 0)
+ )
+ FROM collect.memory_stats AS ms
+ ORDER BY
+ ms.collection_time DESC
+ ),
+ /*
+ Latest scheduler stats
+ */
+ scheduler_latest AS
+ (
+ SELECT TOP (1)
+ ss.total_current_workers_count,
+ ss.max_workers_count,
+ ss.cpu_count,
+ worker_ratio =
+ CONVERT
+ (
+ decimal(5,2),
+ ss.total_current_workers_count * 1.0 /
+ NULLIF(ss.max_workers_count, 0)
+ )
+ FROM collect.cpu_scheduler_stats AS ss
+ ORDER BY
+ ss.collection_time DESC
+ )
+SELECT
+ avg_cpu_pct =
+ cd.avg_cpu_pct,
+ max_cpu_pct =
+ cd.max_cpu_pct,
+ p95_cpu_pct =
+ cd.p95_cpu_pct,
+ cpu_samples =
+ cd.sample_count,
+ total_memory_mb =
+ ml.total_memory_mb,
+ target_memory_mb =
+ ml.committed_target_memory_mb,
+ physical_memory_mb =
+ ml.total_physical_memory_mb,
+ memory_ratio =
+ ml.memory_ratio,
+ memory_utilization_pct =
+ ml.memory_utilization_percentage,
+ worker_threads_current =
+ sl.total_current_workers_count,
+ worker_threads_max =
+ sl.max_workers_count,
+ worker_thread_ratio =
+ sl.worker_ratio,
+ cpu_count =
+ sl.cpu_count,
+ provisioning_status =
+ CASE
+ WHEN cd.avg_cpu_pct < 15
+ AND cd.max_cpu_pct < 40
+ AND ml.memory_ratio < 0.5
+ THEN N'OVER_PROVISIONED'
+ WHEN cd.p95_cpu_pct > 85
+ OR ml.memory_ratio > 0.95
+ OR sl.worker_ratio > 0.8
+ THEN N'UNDER_PROVISIONED'
+ ELSE N'RIGHT_SIZED'
+ END
+FROM cpu_dedup AS cd
+CROSS JOIN memory_latest AS ml
+CROSS JOIN scheduler_latest AS sl;
+GO
+
+PRINT 'Created report.finops_utilization_efficiency view';
+GO
+
+/*******************************************************************************
+View 3: Peak Utilization Windows
+Shows average and maximum CPU/memory utilization per hour of day (0-23)
+to identify peak and idle windows for capacity planning.
+Source: collect.cpu_utilization_stats, collect.memory_stats (last 7 days)
+*******************************************************************************/
+
+CREATE OR ALTER VIEW
+ report.finops_peak_utilization
+AS
+WITH
+ /*
+ CPU utilization bucketed by hour of day
+ */
+ cpu_by_hour AS
+ (
+ SELECT
+ hour_of_day =
+ DATEPART(HOUR, cus.collection_time),
+ avg_cpu_pct =
+ AVG(CONVERT(decimal(5,2), cus.sqlserver_cpu_utilization)),
+ max_cpu_pct =
+ MAX(cus.sqlserver_cpu_utilization),
+ sample_count =
+ COUNT_BIG(*)
+ FROM collect.cpu_utilization_stats AS cus
+ WHERE cus.collection_time >= DATEADD(DAY, -7, SYSDATETIME())
+ GROUP BY
+ DATEPART(HOUR, cus.collection_time)
+ ),
+ /*
+ Memory utilization bucketed by hour of day
+ */
+ memory_by_hour AS
+ (
+ SELECT
+ hour_of_day =
+ DATEPART(HOUR, ms.collection_time),
+ avg_memory_pct =
+ AVG(CONVERT(decimal(5,2), ms.memory_utilization_percentage)),
+ max_memory_pct =
+ MAX(ms.memory_utilization_percentage)
+ FROM collect.memory_stats AS ms
+ WHERE ms.collection_time >= DATEADD(DAY, -7, SYSDATETIME())
+ GROUP BY
+ DATEPART(HOUR, ms.collection_time)
+ ),
+ /*
+ Overall averages for classification
+ */
+ overall AS
+ (
+ SELECT
+ overall_avg_cpu =
+ NULLIF(AVG(cbh.avg_cpu_pct), 0)
+ FROM cpu_by_hour AS cbh
+ )
+SELECT
+ hour_of_day =
+ cbh.hour_of_day,
+ avg_cpu_pct =
+ cbh.avg_cpu_pct,
+ max_cpu_pct =
+ cbh.max_cpu_pct,
+ avg_memory_pct =
+ ISNULL(mbh.avg_memory_pct, 0),
+ max_memory_pct =
+ ISNULL(mbh.max_memory_pct, 0),
+ cpu_samples =
+ cbh.sample_count,
+ hour_classification =
+ CASE
+ WHEN cbh.avg_cpu_pct > (o.overall_avg_cpu * 1.5)
+ THEN N'PEAK'
+ WHEN cbh.avg_cpu_pct < (o.overall_avg_cpu * 0.3)
+ THEN N'IDLE'
+ ELSE N'NORMAL'
+ END
+FROM cpu_by_hour AS cbh
+LEFT JOIN memory_by_hour AS mbh
+ ON mbh.hour_of_day = cbh.hour_of_day
+CROSS JOIN overall AS o;
+GO
+
+PRINT 'Created report.finops_peak_utilization view';
+GO
+
+/*******************************************************************************
+View 4: Application Resource Usage (Connection-Level Attribution)
+Shows per-application connection patterns from session stats.
+Note: Plan cache (query_stats/procedure_stats) does not capture program_name.
+Full CPU/reads per application would require Resource Governor or Query Store.
+Source: collect.session_stats (last 24 hours)
+*******************************************************************************/
+
+CREATE OR ALTER VIEW
+ report.finops_application_resource_usage
+AS
+SELECT
+ application_name =
+ ss.top_application_name,
+ avg_connections =
+ AVG(ss.top_application_connections),
+ max_connections =
+ MAX(ss.top_application_connections),
+ sample_count =
+ COUNT_BIG(*),
+ first_seen =
+ MIN(ss.collection_time),
+ last_seen =
+ MAX(ss.collection_time)
+FROM collect.session_stats AS ss
+WHERE ss.collection_time >= DATEADD(HOUR, -24, SYSDATETIME())
+AND ss.top_application_name IS NOT NULL
+GROUP BY
+ ss.top_application_name;
+GO
+
+PRINT 'Created report.finops_application_resource_usage view';
+GO
+
+PRINT 'FinOps reporting views created successfully';
+PRINT 'Views: report.finops_database_resource_usage, report.finops_utilization_efficiency,';
+PRINT ' report.finops_peak_utilization, report.finops_application_resource_usage';
+GO
diff --git a/upgrades/2.1.0-to-2.2.0/05_add_finops_collectors.sql b/upgrades/2.1.0-to-2.2.0/05_add_finops_collectors.sql
new file mode 100644
index 0000000..b21ef86
--- /dev/null
+++ b/upgrades/2.1.0-to-2.2.0/05_add_finops_collectors.sql
@@ -0,0 +1,87 @@
+/*
+Copyright 2026 Darling Data, LLC
+https://www.erikdarling.com/
+
+Upgrade from 2.1.0 to 2.2.0
+Adds FinOps collector schedule entries for existing installations.
+Tables self-heal via ensure_collection_table; views use CREATE OR ALTER.
+Only the schedule entries need explicit insertion for upgrades.
+*/
+
+SET ANSI_NULLS ON;
+SET ANSI_PADDING ON;
+SET ANSI_WARNINGS ON;
+SET ARITHABORT ON;
+SET CONCAT_NULL_YIELDS_NULL ON;
+SET QUOTED_IDENTIFIER ON;
+SET NUMERIC_ROUNDABORT OFF;
+SET IMPLICIT_TRANSACTIONS OFF;
+SET STATISTICS TIME, IO OFF;
+GO
+
+USE PerformanceMonitor;
+GO
+
+IF NOT EXISTS
+(
+ SELECT
+ 1/0
+ FROM config.collection_schedule
+ WHERE collector_name = N'database_size_stats_collector'
+)
+BEGIN
+ INSERT INTO
+ config.collection_schedule
+ (
+ collector_name,
+ enabled,
+ frequency_minutes,
+ max_duration_minutes,
+ retention_days,
+ description
+ )
+ VALUES
+ (
+ N'database_size_stats_collector',
+ 1,
+ 60,
+ 10,
+ 90,
+ N'Database file sizes for growth trending and capacity planning'
+ );
+
+ PRINT 'Added database_size_stats_collector to collection schedule';
+END;
+GO
+
+IF NOT EXISTS
+(
+ SELECT
+ 1/0
+ FROM config.collection_schedule
+ WHERE collector_name = N'server_properties_collector'
+)
+BEGIN
+ INSERT INTO
+ config.collection_schedule
+ (
+ collector_name,
+ enabled,
+ frequency_minutes,
+ max_duration_minutes,
+ retention_days,
+ description
+ )
+ VALUES
+ (
+ N'server_properties_collector',
+ 1,
+ 1440,
+ 5,
+ 365,
+ N'Server edition, licensing, CPU/memory hardware metadata for license audit'
+ );
+
+ PRINT 'Added server_properties_collector to collection schedule';
+END;
+GO
diff --git a/upgrades/2.1.0-to-2.2.0/upgrade.txt b/upgrades/2.1.0-to-2.2.0/upgrade.txt
index 22e6d63..8f2a8d5 100644
--- a/upgrades/2.1.0-to-2.2.0/upgrade.txt
+++ b/upgrades/2.1.0-to-2.2.0/upgrade.txt
@@ -2,3 +2,4 @@
02_compress_query_store_data.sql
03_compress_procedure_stats.sql
04_create_tracking_tables.sql
+05_add_finops_collectors.sql