From 6da57a7e24fb73cfe5c3ff0ecdb07f246901ac7c Mon Sep 17 00:00:00 2001
From: jhw <835269233@qq.com>
Date: Sat, 28 Mar 2026 23:38:54 +0800
Subject: [PATCH 1/3] feat(kms): align RustFS KMS env and encryption validation
Made-with: Cursor
---
CHANGELOG.md | 8 ++
.../[name]/tenant-detail-client.tsx | 44 +-----
console-web/types/api.ts | 4 -
deploy/rustfs-operator/crds/tenant-crd.yaml | 29 ++--
deploy/rustfs-operator/crds/tenant.yaml | 29 ++--
src/console/handlers/encryption.rs | 4 -
src/console/models/encryption.rs | 4 -
src/context.rs | 79 ++++++++++-
src/reconcile.rs | 5 +-
src/types/v1alpha1/encryption.rs | 45 +++---
src/types/v1alpha1/tenant/workloads.rs | 130 +++++-------------
11 files changed, 161 insertions(+), 220 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 59029f4..39a0b09 100755
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,8 +24,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- **Tenant name length**: [`validate_dns1035_label`](src/types/v1alpha1/tenant.rs) now caps `metadata.name` at **55** characters so derived names like `{name}-console` remain valid Kubernetes DNS labels (≤ 63).
+- **Encryption validation on reconcile**: [`validate_kms_secret`](src/context.rs) now runs whenever `spec.encryption.enabled` is true (previously skipped when `kmsSecret` was unset).
+
### Changed
+- **Tenant `spec.encryption.vault`**: Removed `tlsSkipVerify` and `customCertificates` (they were never wired to `rustfs-kms`). Vault TLS should rely on system-trusted CAs or TLS upstream. The project is still pre-production; if you have old YAML with these keys, remove them before apply.
+
+- **KMS pod environment** ([`tenant/workloads.rs`](src/types/v1alpha1/tenant/workloads.rs)): Align variable names with the RustFS server and `rustfs-kms` (`RUSTFS_KMS_ENABLE`, `RUSTFS_KMS_VAULT_ADDRESS`, KV mount and key prefix, local `RUSTFS_KMS_KEY_DIR` / `RUSTFS_KMS_DEFAULT_KEY_ID`, etc.); remove Vault TLS certificate volume mounts; `ping_seconds` remains documented as reserved (not injected).
+
+- **Local KMS** ([`context.rs`](src/context.rs)): Validate absolute `keyDirectory` and require a single server replica across pools (multi-replica tenants need Vault or shared storage).
+
- **Deploy scripts** ([`scripts/deploy/deploy-rustfs.sh`](scripts/deploy/deploy-rustfs.sh), [`deploy-rustfs-4node.sh`](scripts/deploy/deploy-rustfs-4node.sh)): Docker builds use **layer cache by default** (`docker_build_cached`); set `RUSTFS_DOCKER_NO_CACHE=true` for a full rebuild. Documented in [`scripts/README.md`](scripts/README.md).
- **4-node deploy**: Help text moved to an early heredoc (avoids trailing `case`/parse issues); see script header.
- **4-node cleanup** ([`cleanup-rustfs-4node.sh`](scripts/cleanup/cleanup-rustfs-4node.sh)): Host storage dirs under `/tmp/rustfs-storage-*` may require `sudo rm -rf` after Kind (root-owned bind mounts).
diff --git a/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx b/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx
index 3854471..84947e9 100644
--- a/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx
+++ b/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx
@@ -108,8 +108,6 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
namespace: "",
prefix: "",
authType: "token",
- tlsSkipVerify: false,
- customCertificates: false,
})
const [encAppRole, setEncAppRole] = useState({
engine: "",
@@ -380,8 +378,6 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
namespace: data.vault.namespace || "",
prefix: data.vault.prefix || "",
authType: data.vault.authType || "token",
- tlsSkipVerify: data.vault.tlsSkipVerify || false,
- customCertificates: data.vault.customCertificates || false,
})
if (data.vault.appRole) {
setEncAppRole({
@@ -428,8 +424,6 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
namespace: encVault.namespace || undefined,
prefix: encVault.prefix || undefined,
authType: encVault.authType || undefined,
- tlsSkipVerify: encVault.tlsSkipVerify || undefined,
- customCertificates: encVault.customCertificates || undefined,
}
if (encVault.authType === "approle") {
body.vault.appRole = {
@@ -951,32 +945,6 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
/>
-
{/* Auth type selector */}
@@ -1093,21 +1061,15 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
diff --git a/console-web/types/api.ts b/console-web/types/api.ts
index e350a25..008b5f6 100644
--- a/console-web/types/api.ts
+++ b/console-web/types/api.ts
@@ -271,8 +271,6 @@ export interface VaultInfo {
prefix: string | null
authType: string | null
appRole: AppRoleInfo | null
- tlsSkipVerify: boolean | null
- customCertificates: boolean | null
}
export interface LocalKmsInfo {
@@ -310,8 +308,6 @@ export interface UpdateEncryptionRequest {
engine?: string
retrySeconds?: number
}
- tlsSkipVerify?: boolean
- customCertificates?: boolean
}
local?: {
keyDirectory?: string
diff --git a/deploy/rustfs-operator/crds/tenant-crd.yaml b/deploy/rustfs-operator/crds/tenant-crd.yaml
index b6db5da..fcd8740 100644
--- a/deploy/rustfs-operator/crds/tenant-crd.yaml
+++ b/deploy/rustfs-operator/crds/tenant-crd.yaml
@@ -66,7 +66,7 @@ spec:
kmsSecret:
description: |-
Reference to a Secret containing sensitive KMS credentials
- (Vault token or AppRole credentials, TLS certificates).
+ (Vault token or AppRole credentials).
nullable: true
properties:
name:
@@ -80,19 +80,20 @@ spec:
nullable: true
properties:
keyDirectory:
- description: 'Directory for key files inside the container (default: `/data/kms-keys`).'
+ description: |-
+ Absolute directory for KMS key files inside the container (default: `/data/kms-keys`).
+ Must be absolute; RustFS validates this for the local backend.
nullable: true
type: string
masterKeyId:
- description: 'Master key identifier (default: `default-master-key`).'
+ description: Default KMS key id for SSE (maps to `RUSTFS_KMS_DEFAULT_KEY_ID` in the RustFS binary).
nullable: true
type: string
type: object
pingSeconds:
description: |-
- Interval in seconds for KMS health-check pings (default: disabled).
- When set, the operator stores the value; the in-process KMS library
- picks it up from `RUSTFS_KMS_PING_SECONDS`.
+ Reserved for future KMS health-check tuning. Not injected into pods: the current RustFS
+ release does not read `RUSTFS_KMS_PING_SECONDS` in the server startup path.
format: int32
nullable: true
type: integer
@@ -125,19 +126,11 @@ spec:
- null
nullable: true
type: string
- customCertificates:
- description: |-
- Enable custom TLS certificates for the Vault connection.
- When `true`, the operator mounts TLS certificate files from the KMS Secret
- and configures the corresponding environment variables.
- The Secret must contain: `vault-ca-cert`, `vault-client-cert`, `vault-client-key`.
- nullable: true
- type: boolean
endpoint:
description: Vault server endpoint (e.g. `https://vault.example.com:8200`).
type: string
engine:
- description: 'Vault KV2 engine mount path (default: `kv`).'
+ description: KV secrets engine mount path (maps to `RUSTFS_KMS_VAULT_KV_MOUNT` in rustfs-kms; e.g. `secret`, `kv`).
nullable: true
type: string
namespace:
@@ -145,13 +138,9 @@ spec:
nullable: true
type: string
prefix:
- description: Key prefix inside the engine.
+ description: Key prefix inside the KV engine (maps to `RUSTFS_KMS_VAULT_KEY_PREFIX`).
nullable: true
type: string
- tlsSkipVerify:
- description: Skip TLS certificate verification for Vault connection.
- nullable: true
- type: boolean
required:
- endpoint
type: object
diff --git a/deploy/rustfs-operator/crds/tenant.yaml b/deploy/rustfs-operator/crds/tenant.yaml
index b6db5da..fcd8740 100755
--- a/deploy/rustfs-operator/crds/tenant.yaml
+++ b/deploy/rustfs-operator/crds/tenant.yaml
@@ -66,7 +66,7 @@ spec:
kmsSecret:
description: |-
Reference to a Secret containing sensitive KMS credentials
- (Vault token or AppRole credentials, TLS certificates).
+ (Vault token or AppRole credentials).
nullable: true
properties:
name:
@@ -80,19 +80,20 @@ spec:
nullable: true
properties:
keyDirectory:
- description: 'Directory for key files inside the container (default: `/data/kms-keys`).'
+ description: |-
+ Absolute directory for KMS key files inside the container (default: `/data/kms-keys`).
+ Must be absolute; RustFS validates this for the local backend.
nullable: true
type: string
masterKeyId:
- description: 'Master key identifier (default: `default-master-key`).'
+ description: Default KMS key id for SSE (maps to `RUSTFS_KMS_DEFAULT_KEY_ID` in the RustFS binary).
nullable: true
type: string
type: object
pingSeconds:
description: |-
- Interval in seconds for KMS health-check pings (default: disabled).
- When set, the operator stores the value; the in-process KMS library
- picks it up from `RUSTFS_KMS_PING_SECONDS`.
+ Reserved for future KMS health-check tuning. Not injected into pods: the current RustFS
+ release does not read `RUSTFS_KMS_PING_SECONDS` in the server startup path.
format: int32
nullable: true
type: integer
@@ -125,19 +126,11 @@ spec:
- null
nullable: true
type: string
- customCertificates:
- description: |-
- Enable custom TLS certificates for the Vault connection.
- When `true`, the operator mounts TLS certificate files from the KMS Secret
- and configures the corresponding environment variables.
- The Secret must contain: `vault-ca-cert`, `vault-client-cert`, `vault-client-key`.
- nullable: true
- type: boolean
endpoint:
description: Vault server endpoint (e.g. `https://vault.example.com:8200`).
type: string
engine:
- description: 'Vault KV2 engine mount path (default: `kv`).'
+ description: KV secrets engine mount path (maps to `RUSTFS_KMS_VAULT_KV_MOUNT` in rustfs-kms; e.g. `secret`, `kv`).
nullable: true
type: string
namespace:
@@ -145,13 +138,9 @@ spec:
nullable: true
type: string
prefix:
- description: Key prefix inside the engine.
+ description: Key prefix inside the KV engine (maps to `RUSTFS_KMS_VAULT_KEY_PREFIX`).
nullable: true
type: string
- tlsSkipVerify:
- description: Skip TLS certificate verification for Vault connection.
- nullable: true
- type: boolean
required:
- endpoint
type: object
diff --git a/src/console/handlers/encryption.rs b/src/console/handlers/encryption.rs
index 52d1d0e..98b9b9e 100644
--- a/src/console/handlers/encryption.rs
+++ b/src/console/handlers/encryption.rs
@@ -55,8 +55,6 @@ pub async fn get_encryption(
engine: ar.engine.clone(),
retry_seconds: ar.retry_seconds,
}),
- tls_skip_verify: v.tls_skip_verify,
- custom_certificates: v.custom_certificates,
}),
local: enc.local.as_ref().map(|l| LocalInfo {
key_directory: l.key_directory.clone(),
@@ -152,8 +150,6 @@ pub async fn update_encryption(
engine: ar.engine,
retry_seconds: ar.retry_seconds,
}),
- tls_skip_verify: v.tls_skip_verify,
- custom_certificates: v.custom_certificates,
})
} else {
None
diff --git a/src/console/models/encryption.rs b/src/console/models/encryption.rs
index 6a5c7a2..1717eaa 100644
--- a/src/console/models/encryption.rs
+++ b/src/console/models/encryption.rs
@@ -38,8 +38,6 @@ pub struct VaultInfo {
pub prefix: Option
,
pub auth_type: Option,
pub app_role: Option,
- pub tls_skip_verify: Option,
- pub custom_certificates: Option,
}
/// AppRole non-sensitive fields.
@@ -90,8 +88,6 @@ pub struct UpdateVaultRequest {
pub prefix: Option,
pub auth_type: Option,
pub app_role: Option,
- pub tls_skip_verify: Option,
- pub custom_certificates: Option,
}
#[derive(Debug, Deserialize, ToSchema)]
diff --git a/src/context.rs b/src/context.rs
index 238c39f..69e0606 100755
--- a/src/context.rs
+++ b/src/context.rs
@@ -75,6 +75,31 @@ pub enum Error {
Serde { source: serde_json::Error },
}
+/// Validates Local KMS: absolute `keyDirectory` and at most one server replica across pools.
+fn validate_local_kms_tenant(
+ local: Option<&types::v1alpha1::encryption::LocalKmsConfig>,
+ pools: &[types::v1alpha1::pool::Pool],
+) -> Result<(), Error> {
+ let key_dir = local
+ .and_then(|l| l.key_directory.as_deref())
+ .unwrap_or("/data/kms-keys");
+ if !key_dir.starts_with('/') {
+ return Err(Error::KmsConfigInvalid {
+ message: format!(
+ "Local KMS keyDirectory must be an absolute path (got \"{}\")",
+ key_dir
+ ),
+ });
+ }
+ let total_servers: i32 = pools.iter().map(|p| p.servers).sum();
+ if total_servers > 1 {
+ return Err(Error::KmsConfigInvalid {
+ message: "Local KMS is only supported when the tenant has a single RustFS server replica (sum of pool servers must be 1). For multiple servers use Vault KMS, or use a single-server pool.".to_string(),
+ });
+ }
+ Ok(())
+}
+
pub struct Context {
pub(crate) client: kube::Client,
pub(crate) recorder: Recorder,
@@ -303,8 +328,9 @@ impl Context {
/// Validates encryption configuration and the KMS Secret.
///
/// Checks:
- /// 1. Vault endpoint is non-empty when backend is Vault.
- /// 2. KMS Secret exists and contains the correct keys for the auth type.
+ /// 1. Local KMS: absolute key directory and single replica (sum of pool servers).
+ /// 2. Vault endpoint is non-empty when backend is Vault.
+ /// 3. KMS Secret exists and contains the correct keys for the auth type.
pub async fn validate_kms_secret(&self, tenant: &Tenant) -> Result<(), Error> {
use crate::types::v1alpha1::encryption::{KmsBackendType, VaultAuthType};
@@ -315,6 +341,12 @@ impl Context {
return Ok(());
}
+ // Local KMS: RustFS requires an absolute key directory; multi-replica tenants need Vault
+ // (or a shared filesystem) because each Pod would otherwise have its own key files.
+ if enc.backend == KmsBackendType::Local {
+ validate_local_kms_tenant(enc.local.as_ref(), &tenant.spec.pools)?;
+ }
+
// Validate Vault endpoint is non-empty and kms_secret is required for Vault
if enc.backend == KmsBackendType::Vault {
let endpoint_empty = enc
@@ -468,3 +500,46 @@ impl Context {
Ok((status.current_revision, status.update_revision))
}
}
+
+#[cfg(test)]
+mod validate_local_kms_tests {
+ use super::Error;
+ use super::validate_local_kms_tenant;
+ use crate::types::v1alpha1::encryption::LocalKmsConfig;
+ use crate::types::v1alpha1::persistence::PersistenceConfig;
+ use crate::types::v1alpha1::pool::Pool;
+
+ fn pool(servers: i32) -> Pool {
+ Pool {
+ name: "p".to_string(),
+ servers,
+ persistence: PersistenceConfig {
+ volumes_per_server: 4,
+ ..Default::default()
+ },
+ scheduling: Default::default(),
+ }
+ }
+
+ #[test]
+ fn local_kms_default_key_dir_ok_single_replica() {
+ validate_local_kms_tenant(None, &[pool(1)]).unwrap();
+ }
+
+ #[test]
+ fn local_kms_rejects_relative_key_dir() {
+ let local = LocalKmsConfig {
+ key_directory: Some("data/kms".to_string()),
+ ..Default::default()
+ };
+ let err = validate_local_kms_tenant(Some(&local), &[pool(1)]).unwrap_err();
+ assert!(matches!(err, Error::KmsConfigInvalid { .. }));
+ }
+
+ #[test]
+ fn local_kms_rejects_multi_pool_multi_replica() {
+ let local = LocalKmsConfig::default();
+ let err = validate_local_kms_tenant(Some(&local), &[pool(2), pool(2)]).unwrap_err();
+ assert!(matches!(err, Error::KmsConfigInvalid { .. }));
+ }
+}
diff --git a/src/reconcile.rs b/src/reconcile.rs
index 2627bac..529e524 100755
--- a/src/reconcile.rs
+++ b/src/reconcile.rs
@@ -79,10 +79,11 @@ pub async fn reconcile_rustfs(tenant: Arc, ctx: Arc) -> Result<
return Err(e.into());
}
- // Validate KMS Secret if encryption is configured
+ // Validate encryption / KMS: Vault requires endpoint + kmsSecret (and correct keys);
+ // must run whenever encryption is enabled — not only when kmsSecret is set, or Vault
+ // without a Secret reference would skip validation entirely.
if let Some(ref enc) = latest_tenant.spec.encryption
&& enc.enabled
- && enc.kms_secret.is_some()
&& let Err(e) = ctx.validate_kms_secret(&latest_tenant).await
{
let _ = ctx
diff --git a/src/types/v1alpha1/encryption.rs b/src/types/v1alpha1/encryption.rs
index e5e8ba0..3da37b8 100644
--- a/src/types/v1alpha1/encryption.rs
+++ b/src/types/v1alpha1/encryption.rs
@@ -65,7 +65,7 @@ impl std::fmt::Display for VaultAuthType {
/// Vault-specific KMS configuration.
///
/// Maps to `VaultConfig` in the `rustfs-kms` crate.
-/// Sensitive fields (token, TLS keys) are stored in the Secret referenced
+/// Sensitive fields (Vault token or AppRole credentials) are stored in the Secret referenced
/// by `EncryptionConfig::kms_secret`.
#[derive(Deserialize, Serialize, Clone, Debug, KubeSchema, Default)]
#[serde(rename_all = "camelCase")]
@@ -73,7 +73,7 @@ pub struct VaultKmsConfig {
/// Vault server endpoint (e.g. `https://vault.example.com:8200`).
pub endpoint: String,
- /// Vault KV2 engine mount path (default: `kv`).
+ /// KV secrets engine mount path (maps to `RUSTFS_KMS_VAULT_KV_MOUNT` in rustfs-kms; e.g. `secret`, `kv`).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub engine: Option,
@@ -81,7 +81,7 @@ pub struct VaultKmsConfig {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub namespace: Option,
- /// Key prefix inside the engine.
+ /// Key prefix inside the KV engine (maps to `RUSTFS_KMS_VAULT_KEY_PREFIX`).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub prefix: Option,
@@ -94,17 +94,6 @@ pub struct VaultKmsConfig {
/// under keys `vault-approle-id` and `vault-approle-secret`.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub app_role: Option,
-
- /// Skip TLS certificate verification for Vault connection.
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub tls_skip_verify: Option,
-
- /// Enable custom TLS certificates for the Vault connection.
- /// When `true`, the operator mounts TLS certificate files from the KMS Secret
- /// and configures the corresponding environment variables.
- /// The Secret must contain: `vault-ca-cert`, `vault-client-cert`, `vault-client-key`.
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub custom_certificates: Option,
}
/// Vault AppRole authentication settings.
@@ -132,23 +121,28 @@ pub struct VaultAppRoleConfig {
///
/// Maps to `LocalConfig` in the `rustfs-kms` crate.
/// Keys are stored as JSON files in the specified directory.
+///
+/// **RustFS binary alignment**: `key_directory` is injected as `RUSTFS_KMS_KEY_DIR` (required by
+/// `rustfs` server startup). `master_key_id` maps to `RUSTFS_KMS_DEFAULT_KEY_ID` (default SSE key id),
+/// not to a "master encryption passphrase" (`RUSTFS_KMS_LOCAL_MASTER_KEY` is separate in rustfs-kms).
#[derive(Deserialize, Serialize, Clone, Debug, KubeSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct LocalKmsConfig {
- /// Directory for key files inside the container (default: `/data/kms-keys`).
+ /// Absolute directory for KMS key files inside the container (default: `/data/kms-keys`).
+ /// Must be absolute; RustFS validates this for the local backend.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub key_directory: Option,
- /// Master key identifier (default: `default-master-key`).
+ /// Default KMS key id for SSE (maps to `RUSTFS_KMS_DEFAULT_KEY_ID` in the RustFS binary).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub master_key_id: Option,
}
/// Encryption / KMS configuration for a Tenant.
///
-/// When enabled, the operator injects KMS environment variables and mounts
-/// the referenced Secret into all RustFS pods so that the in-process
-/// `rustfs-kms` library picks them up on startup.
+/// When enabled, the operator injects environment variables matching the **RustFS server**
+/// (`rustfs` CLI / `init_kms_system`) and `rustfs_kms::KmsConfig::from_env()` where applicable.
+/// See `Tenant::configure_kms` in `tenant/workloads.rs` for the exact variable names.
///
/// Example YAML:
/// ```yaml
@@ -161,7 +155,6 @@ pub struct LocalKmsConfig {
/// engine: "kv"
/// namespace: "tenant1"
/// prefix: "rustfs"
-/// customCertificates: true
/// kmsSecret:
/// name: "my-tenant-kms-secret"
/// ```
@@ -175,11 +168,6 @@ pub struct LocalKmsConfig {
/// - `vault-approle-id` (required): AppRole role ID
/// - `vault-approle-secret` (required): AppRole secret ID
///
-/// **Vault TLS (when `customCertificates: true`):**
-/// - `vault-ca-cert`: PEM-encoded CA certificate
-/// - `vault-client-cert`: PEM-encoded client certificate for mTLS
-/// - `vault-client-key`: PEM-encoded client private key for mTLS
-///
/// **Local backend:**
/// No secret keys required (keys are stored on disk).
#[derive(Deserialize, Serialize, Clone, Debug, KubeSchema, Default)]
@@ -202,13 +190,12 @@ pub struct EncryptionConfig {
pub local: Option,
/// Reference to a Secret containing sensitive KMS credentials
- /// (Vault token or AppRole credentials, TLS certificates).
+ /// (Vault token or AppRole credentials).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kms_secret: Option,
- /// Interval in seconds for KMS health-check pings (default: disabled).
- /// When set, the operator stores the value; the in-process KMS library
- /// picks it up from `RUSTFS_KMS_PING_SECONDS`.
+ /// Reserved for future KMS health-check tuning. Not injected into pods: the current RustFS
+ /// release does not read `RUSTFS_KMS_PING_SECONDS` in the server startup path.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ping_seconds: Option,
}
diff --git a/src/types/v1alpha1/tenant/workloads.rs b/src/types/v1alpha1/tenant/workloads.rs
index a5db7dd..a3546c8 100755
--- a/src/types/v1alpha1/tenant/workloads.rs
+++ b/src/types/v1alpha1/tenant/workloads.rs
@@ -25,9 +25,6 @@ const DEFAULT_RUN_AS_USER: i64 = 10001;
const DEFAULT_RUN_AS_GROUP: i64 = 10001;
const DEFAULT_FS_GROUP: i64 = 10001;
-const KMS_CERT_VOLUME_NAME: &str = "kms-tls";
-const KMS_CERT_MOUNT_PATH: &str = "/etc/rustfs/kms/tls";
-
fn volume_claim_template_name(shard: i32) -> String {
format!("{VOLUME_CLAIM_TEMPLATE_PREFIX}-{shard}")
}
@@ -222,6 +219,11 @@ impl Tenant {
/// Build KMS-related environment variables, pod volumes and container volume mounts
/// based on `spec.encryption`.
///
+ /// Names align with the RustFS server (`rustfs/src/init.rs`, `rustfs/src/config/cli.rs`) and
+ /// `rustfs_kms::KmsConfig::from_env()` (`rustfs/crates/kms/src/config.rs`) for forward compatibility.
+ /// The server currently reads `RUSTFS_KMS_ENABLE`, `RUSTFS_KMS_BACKEND`, `RUSTFS_KMS_KEY_DIR`,
+ /// `RUSTFS_KMS_DEFAULT_KEY_ID`, `RUSTFS_KMS_VAULT_ADDRESS`, and `RUSTFS_KMS_VAULT_TOKEN` on startup.
+ ///
/// Returns `(env_vars, pod_volumes, volume_mounts)`.
fn configure_kms(
&self,
@@ -238,17 +240,17 @@ impl Tenant {
}
let mut env = Vec::new();
- let mut volumes = Vec::new();
- let mut mounts = Vec::new();
+ let volumes: Vec = vec![];
+ let mounts: Vec = vec![];
env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_BACKEND".to_owned(),
- value: Some(enc.backend.to_string()),
+ name: "RUSTFS_KMS_ENABLE".to_owned(),
+ value: Some("true".to_owned()),
..Default::default()
});
env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_AUTO_START".to_owned(),
- value: Some("true".to_owned()),
+ name: "RUSTFS_KMS_BACKEND".to_owned(),
+ value: Some(enc.backend.to_string()),
..Default::default()
});
@@ -256,17 +258,10 @@ impl Tenant {
KmsBackendType::Vault => {
if let Some(ref vault) = enc.vault {
env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_ENDPOINT".to_owned(),
+ name: "RUSTFS_KMS_VAULT_ADDRESS".to_owned(),
value: Some(vault.endpoint.clone()),
..Default::default()
});
- if let Some(ref engine) = vault.engine {
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_ENGINE".to_owned(),
- value: Some(engine.clone()),
- ..Default::default()
- });
- }
if let Some(ref ns) = vault.namespace {
env.push(corev1::EnvVar {
name: "RUSTFS_KMS_VAULT_NAMESPACE".to_owned(),
@@ -274,17 +269,23 @@ impl Tenant {
..Default::default()
});
}
- if let Some(ref prefix) = vault.prefix {
+ // Transit secrets engine mount (rustfs-kms default: `transit`).
+ env.push(corev1::EnvVar {
+ name: "RUSTFS_KMS_VAULT_MOUNT_PATH".to_owned(),
+ value: Some("transit".to_owned()),
+ ..Default::default()
+ });
+ if let Some(ref kv_mount) = vault.engine {
env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_PREFIX".to_owned(),
- value: Some(prefix.clone()),
+ name: "RUSTFS_KMS_VAULT_KV_MOUNT".to_owned(),
+ value: Some(kv_mount.clone()),
..Default::default()
});
}
- if vault.tls_skip_verify == Some(true) {
+ if let Some(ref prefix) = vault.prefix {
env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_TLS_SKIP_VERIFY".to_owned(),
- value: Some("true".to_owned()),
+ name: "RUSTFS_KMS_VAULT_KEY_PREFIX".to_owned(),
+ value: Some(prefix.clone()),
..Default::default()
});
}
@@ -306,10 +307,10 @@ impl Tenant {
});
if let Some(ar) = enc.vault.as_ref().and_then(|v| v.app_role.as_ref()) {
- if let Some(ref engine) = ar.engine {
+ if let Some(ref approle_engine) = ar.engine {
env.push(corev1::EnvVar {
name: "RUSTFS_KMS_VAULT_APPROLE_ENGINE".to_owned(),
- value: Some(engine.clone()),
+ value: Some(approle_engine.clone()),
..Default::default()
});
}
@@ -365,64 +366,6 @@ impl Tenant {
..Default::default()
});
}
-
- // Only mount TLS certificates when explicitly enabled
- let custom_certs = enc
- .vault
- .as_ref()
- .and_then(|v| v.custom_certificates)
- .unwrap_or(false);
-
- if custom_certs {
- volumes.push(corev1::Volume {
- name: KMS_CERT_VOLUME_NAME.to_string(),
- secret: Some(corev1::SecretVolumeSource {
- secret_name: Some(secret_ref.name.clone()),
- items: Some(vec![
- corev1::KeyToPath {
- key: "vault-ca-cert".to_string(),
- path: "ca.crt".to_string(),
- ..Default::default()
- },
- corev1::KeyToPath {
- key: "vault-client-cert".to_string(),
- path: "client.crt".to_string(),
- ..Default::default()
- },
- corev1::KeyToPath {
- key: "vault-client-key".to_string(),
- path: "client.key".to_string(),
- ..Default::default()
- },
- ]),
- optional: Some(true),
- ..Default::default()
- }),
- ..Default::default()
- });
- mounts.push(corev1::VolumeMount {
- name: KMS_CERT_VOLUME_NAME.to_string(),
- mount_path: KMS_CERT_MOUNT_PATH.to_string(),
- read_only: Some(true),
- ..Default::default()
- });
-
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_TLS_CA".to_owned(),
- value: Some(format!("{KMS_CERT_MOUNT_PATH}/ca.crt")),
- ..Default::default()
- });
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_TLS_CERT".to_owned(),
- value: Some(format!("{KMS_CERT_MOUNT_PATH}/client.crt")),
- ..Default::default()
- });
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_TLS_KEY".to_owned(),
- value: Some(format!("{KMS_CERT_MOUNT_PATH}/client.key")),
- ..Default::default()
- });
- }
}
}
KmsBackendType::Local => {
@@ -430,31 +373,30 @@ impl Tenant {
let key_dir = local_cfg
.and_then(|l| l.key_directory.as_deref())
.unwrap_or("/data/kms-keys");
- let master_key_id = local_cfg
+ let default_key_id = local_cfg
.and_then(|l| l.master_key_id.as_deref())
.unwrap_or("default-master-key");
+ // `rustfs` server reads `RUSTFS_KMS_KEY_DIR` (see `build_local_kms_config`).
+ env.push(corev1::EnvVar {
+ name: "RUSTFS_KMS_KEY_DIR".to_owned(),
+ value: Some(key_dir.to_string()),
+ ..Default::default()
+ });
+ // Duplicate for `rustfs_kms::KmsConfig::from_env()` which uses `RUSTFS_KMS_LOCAL_KEY_DIR`.
env.push(corev1::EnvVar {
name: "RUSTFS_KMS_LOCAL_KEY_DIR".to_owned(),
value: Some(key_dir.to_string()),
..Default::default()
});
env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_LOCAL_MASTER_KEY_ID".to_owned(),
- value: Some(master_key_id.to_string()),
+ name: "RUSTFS_KMS_DEFAULT_KEY_ID".to_owned(),
+ value: Some(default_key_id.to_string()),
..Default::default()
});
}
}
- if let Some(ping) = enc.ping_seconds {
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_PING_SECONDS".to_owned(),
- value: Some(ping.to_string()),
- ..Default::default()
- });
- }
-
(env, volumes, mounts)
}
From f0efb53c04a6cef6a9013420c43d819af6da1628 Mon Sep 17 00:00:00 2001
From: GatewayJ <835269233@qq.com>
Date: Sun, 29 Mar 2026 23:11:30 +0800
Subject: [PATCH 2/3] feat(console): tenant events SSE stream and scoped
aggregation
Made-with: Cursor
---
CHANGELOG.md | 2 +
Cargo.lock | 13 +
Cargo.toml | 1 +
.../[name]/tenant-detail-client.tsx | 48 +++-
console-web/lib/api.ts | 20 +-
src/console/error.rs | 9 +-
src/console/handlers/events.rs | 159 +++++++++----
src/console/mod.rs | 1 +
src/console/openapi.rs | 8 +-
src/console/routes/mod.rs | 6 +-
src/console/tenant_event_scope.rs | 224 ++++++++++++++++++
11 files changed, 424 insertions(+), 67 deletions(-)
create mode 100644 src/console/tenant_event_scope.rs
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 39a0b09..6da2d0e 100755
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -28,6 +28,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
+- **Console Tenant Events (breaking)**: Removed `GET /api/v1/namespaces/{namespace}/tenants/{tenant}/events`. Events are delivered via **SSE** `GET .../tenants/{tenant}/events/stream` (`text/event-stream`; each `data:` line is JSON `EventListResponse`). The tenant detail **Events** tab uses `EventSource` with the same session cookie as other API calls. Aggregates `core/v1` Events for the Tenant CR, Pods, StatefulSets, and PVCs scoped with `rustfs.tenant` / pool naming (see PRD); Tenant rows require `involvedObject.kind=Tenant` and matching name.
+
- **Tenant `spec.encryption.vault`**: Removed `tlsSkipVerify` and `customCertificates` (they were never wired to `rustfs-kms`). Vault TLS should rely on system-trusted CAs or TLS upstream. The project is still pre-production; if you have old YAML with these keys, remove them before apply.
- **KMS pod environment** ([`tenant/workloads.rs`](src/types/v1alpha1/tenant/workloads.rs)): Align variable names with the RustFS server and `rustfs-kms` (`RUSTFS_KMS_ENABLE`, `RUSTFS_KMS_VAULT_ADDRESS`, KV mount and key prefix, local `RUSTFS_KMS_KEY_DIR` / `RUSTFS_KMS_DEFAULT_KEY_ID`, etc.); remove Vault TLS certificate volume mounts; `ping_seconds` remains documented as reserved (not injected).
diff --git a/Cargo.lock b/Cargo.lock
index 2014f34..257c8b6 100755
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1548,6 +1548,7 @@ dependencies = [
"snafu",
"strum",
"tokio",
+ "tokio-stream",
"tokio-util",
"tower",
"tower-http",
@@ -2406,6 +2407,18 @@ dependencies = [
"tokio",
]
+[[package]]
+name = "tokio-stream"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70"
+dependencies = [
+ "futures-core",
+ "pin-project-lite",
+ "tokio",
+ "tokio-util",
+]
+
[[package]]
name = "tokio-util"
version = "0.7.17"
diff --git a/Cargo.toml b/Cargo.toml
index a7e2bec..6d0606f 100755
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,6 +12,7 @@ chrono = { version = "0.4", features = ["serde"] }
const-str = "1.0.0"
serde = { version = "1.0.228", features = ["derive"] }
tokio = { version = "1.49.0", features = ["rt", "rt-multi-thread", "macros", "fs", "io-std", "io-util"] }
+tokio-stream = { version = "0.1", features = ["sync"] }
tokio-util = { version = "0.7", features = ["io", "compat"] }
futures = "0.3.31"
tracing = "0.1.44"
diff --git a/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx b/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx
index 84947e9..67bf2e2 100644
--- a/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx
+++ b/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx
@@ -28,6 +28,7 @@ import type {
PoolDetails,
PodListItem,
EventItem,
+ EventListResponse,
AddPoolRequest,
EncryptionInfoResponse,
UpdateEncryptionRequest,
@@ -72,6 +73,7 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
const [pools, setPools] = useState([])
const [pods, setPods] = useState([])
const [events, setEvents] = useState([])
+ const [eventsLoading, setEventsLoading] = useState(false)
const [loading, setLoading] = useState(true)
const [deleting, setDeleting] = useState(false)
const [addPoolOpen, setAddPoolOpen] = useState(false)
@@ -132,26 +134,20 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
})
const loadTenant = async () => {
- const [detailResult, poolResult, podResult, eventResult] = await Promise.allSettled([
+ const [detailResult, poolResult, podResult] = await Promise.allSettled([
api.getTenant(namespace, name),
api.listPools(namespace, name),
api.listPods(namespace, name),
- api.listTenantEvents(namespace, name),
])
const detailOk = detailResult.status === "fulfilled"
const poolOk = poolResult.status === "fulfilled"
const podOk = podResult.status === "fulfilled"
- const eventOk = eventResult.status === "fulfilled"
if (detailOk && poolOk && podOk) {
setTenant(detailResult.value)
setPools(poolResult.value.pools)
setPods(podResult.value.pods)
- setEvents(eventOk ? eventResult.value.events : [])
- if (!eventOk) {
- toast.error(t("Events could not be loaded"))
- }
} else {
const err = !detailOk
? (detailResult as PromiseRejectedResult).reason
@@ -182,6 +178,36 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
loadTenant()
}, [namespace, name]) // eslint-disable-line react-hooks/exhaustive-deps -- reload when route params change
+ useEffect(() => {
+ setEvents([])
+ }, [namespace, name])
+
+ useEffect(() => {
+ if (tab !== "events") return
+ setEventsLoading(true)
+ let cleaned = false
+ const url = api.getTenantEventsStreamUrl(namespace, name)
+ const es = new EventSource(url, { withCredentials: true })
+ es.onmessage = (ev) => {
+ try {
+ const data = JSON.parse(ev.data) as EventListResponse
+ setEvents(data.events ?? [])
+ } catch {
+ /* ignore malformed chunk */
+ }
+ setEventsLoading(false)
+ }
+ es.onerror = () => {
+ if (cleaned) return
+ toast.error(t("Events stream could not be loaded"))
+ setEventsLoading(false)
+ }
+ return () => {
+ cleaned = true
+ es.close()
+ }
+ }, [tab, namespace, name, t])
+
useEffect(() => {
setTenantYaml("")
setTenantYamlSnapshot("")
@@ -1181,7 +1207,13 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
- {events.length === 0 ? (
+ {eventsLoading && events.length === 0 ? (
+
+
+
+
+
+ ) : events.length === 0 ? (
{t("No events")}
diff --git a/console-web/lib/api.ts b/console-web/lib/api.ts
index 2443a03..d1c24dd 100644
--- a/console-web/lib/api.ts
+++ b/console-web/lib/api.ts
@@ -13,7 +13,6 @@ import type {
PodListResponse,
PodDetails,
DeletePodResponse,
- EventListResponse,
NodeListResponse,
NamespaceListResponse,
ClusterResourcesResponse,
@@ -28,6 +27,7 @@ import type {
SecurityContextUpdateResponse,
} from "@/types/api"
import type { TopologyOverviewResponse } from "@/types/topology"
+import { getApiBaseUrl } from "@/lib/config"
const ns = (namespace: string) => `/namespaces/${encodeURIComponent(namespace)}`
const tenant = (namespace: string, name: string) => `${ns(namespace)}/tenants/${encodeURIComponent(name)}`
@@ -37,8 +37,6 @@ const pool = (namespace: string, name: string, poolName: string) =>
const pods = (namespace: string, name: string) => `${tenant(namespace, name)}/pods`
const pod = (namespace: string, name: string, podName: string) =>
`${pods(namespace, name)}/${encodeURIComponent(podName)}`
-const events = (namespace: string, tenantName: string) =>
- `${ns(namespace)}/tenants/${encodeURIComponent(tenantName)}/events`
const tenantYaml = (namespace: string, name: string) => `${tenant(namespace, name)}/yaml`
const tenantStateCounts = "/tenants/state-counts"
const tenantStateCountsByNs = (namespace: string) => `${ns(namespace)}/tenants/state-counts`
@@ -190,9 +188,19 @@ export async function updateSecurityContext(
return apiClient.put(securityContext(namespace, name), body)
}
-// ----- Events -----
-export async function listTenantEvents(namespace: string, tenantName: string): Promise {
- return apiClient.get(events(namespace, tenantName))
+// ----- Events (SSE) -----
+/** Absolute URL for `EventSource` (cookie session + `withCredentials`). */
+export function getTenantEventsStreamUrl(namespace: string, tenantName: string): string {
+ const path = `${ns(namespace)}/tenants/${encodeURIComponent(tenantName)}/events/stream`
+ if (typeof window === "undefined") {
+ return path
+ }
+ const base = getApiBaseUrl()
+ if (base.startsWith("http://") || base.startsWith("https://")) {
+ return `${base.replace(/\/$/, "")}${path}`
+ }
+ const baseNorm = base.startsWith("/") ? base : `/${base}`
+ return `${window.location.origin}${baseNorm}${path}`
}
// ----- Cluster -----
diff --git a/src/console/error.rs b/src/console/error.rs
index 5a33d47..497b3d3 100755
--- a/src/console/error.rs
+++ b/src/console/error.rs
@@ -52,9 +52,16 @@ pub enum Error {
Json { source: serde_json::Error },
}
-/// Map `kube::Error` to a console error (404 -> NotFound, 409 -> Conflict).
+/// Map `kube::Error` to a console error (403 -> Forbidden, 404 -> NotFound, 409 -> Conflict).
pub fn map_kube_error(e: kube::Error, not_found_resource: impl Into) -> Error {
match &e {
+ kube::Error::Api(ae) if ae.code == 403 => Error::Forbidden {
+ message: if ae.message.is_empty() {
+ "Kubernetes API access denied".to_string()
+ } else {
+ ae.message.clone()
+ },
+ },
kube::Error::Api(ae) if ae.code == 404 => Error::NotFound {
resource: not_found_resource.into(),
},
diff --git a/src/console/handlers/events.rs b/src/console/handlers/events.rs
index 5a88eb7..fa96737 100755
--- a/src/console/handlers/events.rs
+++ b/src/console/handlers/events.rs
@@ -12,65 +12,134 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::convert::Infallible;
+use std::result::Result as StdResult;
+use std::time::Duration;
+
use crate::console::{
- error::{Error, Result},
+ error::{self, Error, Result},
models::event::{EventItem, EventListResponse},
state::Claims,
+ tenant_event_scope::{discover_tenant_event_scope, merge_namespace_events},
+};
+use axum::{
+ Extension,
+ extract::Path,
+ response::sse::{Event, KeepAlive, Sse},
};
-use axum::{Extension, Json, extract::Path};
+use futures::StreamExt;
use k8s_openapi::api::core::v1 as corev1;
-use kube::{Api, Client, api::ListParams};
+use kube::{
+ Api, Client,
+ api::ListParams,
+ runtime::{WatchStreamExt, watcher},
+};
+use tokio_stream::wrappers::ReceiverStream;
-/// List Kubernetes events for objects named like the tenant.
+/// SSE stream of merged tenant-scoped Kubernetes events (PRD §5.1).
///
-/// On list failure (RBAC, field selector, etc.) returns an empty list and logs a warning so the
-/// tenant detail page does not 500.
-pub async fn list_tenant_events(
+/// Uses the same `session` cookie JWT as other console routes. Payload each tick is JSON
+/// `EventListResponse` (full snapshot, max [`tenant_event_scope::MAX_EVENTS_SNAPSHOT`]).
+pub async fn stream_tenant_events(
Path((namespace, tenant)): Path<(String, String)>,
Extension(claims): Extension,
-) -> Result> {
- let client = match create_client(&claims).await {
- Ok(c) => c,
- Err(e) => return Err(e),
- };
- let api: Api = Api::namespaced(client, &namespace);
+) -> Result>>> {
+ let client = create_client(&claims).await?;
+ // Preflight: fail the HTTP request if snapshot cannot be built (avoids 200 + empty SSE).
+ let first_json = build_snapshot_json(&client, &namespace, &tenant).await?;
+ let (tx, rx) = tokio::sync::mpsc::channel::>(16);
+ let ns = namespace.clone();
+ let tenant_name = tenant.clone();
- let field_selector = format!("involvedObject.name={}", tenant);
- let events = match api
- .list(&ListParams::default().fields(&field_selector))
+ tokio::spawn(async move {
+ if let Err(e) = run_event_sse_loop(client, ns, tenant_name, tx, first_json).await {
+ tracing::warn!("Tenant events SSE ended with error: {}", e);
+ }
+ });
+
+ let stream = ReceiverStream::new(rx);
+ Ok(Sse::new(stream).keep_alive(
+ KeepAlive::new()
+ .interval(Duration::from_secs(15))
+ .text("ping"),
+ ))
+}
+
+async fn run_event_sse_loop(
+ client: Client,
+ namespace: String,
+ tenant: String,
+ tx: tokio::sync::mpsc::Sender>,
+ first_json: String,
+) -> Result<()> {
+ if tx
+ .send(Ok(Event::default().data(first_json)))
.await
+ .is_err()
{
- Ok(ev) => ev,
- Err(e) => {
- tracing::warn!(
- "List events for tenant {}/{} failed (returning empty): {}",
- namespace,
- tenant,
- e
- );
- return Ok(Json(EventListResponse { events: vec![] }));
- }
- };
+ return Ok(());
+ }
+
+ let event_api: Api = Api::namespaced(client.clone(), &namespace);
+ let mut watch = watcher(event_api, watcher::Config::default())
+ .default_backoff()
+ .boxed();
+ let mut scope_tick = tokio::time::interval(Duration::from_secs(30));
+ scope_tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
- let items: Vec = events
- .items
- .into_iter()
- .map(|e| EventItem {
- event_type: e.type_.unwrap_or_default(),
- reason: e.reason.unwrap_or_default(),
- message: e.message.unwrap_or_default(),
- involved_object: format!(
- "{}/{}",
- e.involved_object.kind.unwrap_or_default(),
- e.involved_object.name.unwrap_or_default()
- ),
- first_timestamp: e.first_timestamp.map(|ts| ts.0.to_rfc3339()),
- last_timestamp: e.last_timestamp.map(|ts| ts.0.to_rfc3339()),
- count: e.count.unwrap_or(0),
- })
- .collect();
+ loop {
+ tokio::select! {
+ _ = scope_tick.tick() => {
+ let json = match build_snapshot_json(&client, &namespace, &tenant).await {
+ Ok(j) => j,
+ Err(e) => {
+ tracing::warn!("tenant events snapshot failed: {}", e);
+ continue;
+ }
+ };
+ if tx.send(Ok(Event::default().data(json))).await.is_err() {
+ return Ok(());
+ }
+ }
+ ev = watch.next() => {
+ match ev {
+ Some(Ok(_)) => {
+ let json = match build_snapshot_json(&client, &namespace, &tenant).await {
+ Ok(j) => j,
+ Err(e) => {
+ tracing::warn!("tenant events snapshot failed: {}", e);
+ continue;
+ }
+ };
+ if tx.send(Ok(Event::default().data(json))).await.is_err() {
+ return Ok(());
+ }
+ }
+ Some(Err(e)) => {
+ tracing::warn!("Kubernetes Event watch error: {}", e);
+ }
+ None => return Ok(()),
+ }
+ }
+ }
+ }
+}
- Ok(Json(EventListResponse { events: items }))
+async fn build_snapshot_json(client: &Client, namespace: &str, tenant: &str) -> Result {
+ let scope = discover_tenant_event_scope(client, namespace, tenant).await?;
+ let api: Api = Api::namespaced(client.clone(), namespace);
+ let list = api.list(&ListParams::default()).await.map_err(|e| {
+ tracing::warn!(
+ "List events for tenant {}/{} failed: {}",
+ namespace,
+ tenant,
+ e
+ );
+ error::map_kube_error(e, format!("Events for tenant '{}'", tenant))
+ })?;
+ let items: Vec = merge_namespace_events(&scope, list.items);
+ let body = EventListResponse { events: items };
+ serde_json::to_string(&body).map_err(|e| Error::Json { source: e })
}
/// Build a client impersonating the session token.
diff --git a/src/console/mod.rs b/src/console/mod.rs
index c46315e..f0ee932 100755
--- a/src/console/mod.rs
+++ b/src/console/mod.rs
@@ -25,3 +25,4 @@ pub mod openapi;
pub mod routes;
pub mod server;
pub mod state;
+pub mod tenant_event_scope;
diff --git a/src/console/openapi.rs b/src/console/openapi.rs
index a58944f..c92d070 100644
--- a/src/console/openapi.rs
+++ b/src/console/openapi.rs
@@ -69,7 +69,7 @@ use crate::console::models::topology::{
api_delete_pod,
api_restart_pod,
api_get_pod_logs,
- api_list_events,
+ api_stream_tenant_events,
api_list_nodes,
api_get_cluster_resources,
api_list_namespaces,
@@ -268,9 +268,9 @@ fn api_restart_pod(_body: Json) -> Json {
#[utoipa::path(get, path = "/api/v1/namespaces/{namespace}/tenants/{name}/pods/{pod}/logs", params(("namespace" = String, Path), ("name" = String, Path), ("pod" = String, Path), ("container" = Option, Query), ("tail_lines" = Option, Query), ("timestamps" = Option, Query)), responses((status = 200, description = "Plain text log output", content_type = "text/plain")), tag = "pods")]
fn api_get_pod_logs() {}
-// --- Events ---
-#[utoipa::path(get, path = "/api/v1/namespaces/{namespace}/tenants/{tenant}/events", params(("namespace" = String, Path), ("tenant" = String, Path)), responses((status = 200, body = EventListResponse)), tag = "events")]
-fn api_list_events() -> Json {
+// --- Events (SSE) ---
+#[utoipa::path(get, path = "/api/v1/namespaces/{namespace}/tenants/{tenant}/events/stream", params(("namespace" = String, Path), ("tenant" = String, Path)), responses((status = 200, description = "text/event-stream; each `data:` payload is JSON EventListResponse", body = EventListResponse, content_type = "application/json")), tag = "events")]
+fn api_stream_tenant_events() {
unimplemented!("Documentation only")
}
diff --git a/src/console/routes/mod.rs b/src/console/routes/mod.rs
index 20f8432..38facc2 100755
--- a/src/console/routes/mod.rs
+++ b/src/console/routes/mod.rs
@@ -124,11 +124,11 @@ pub fn pod_routes() -> Router {
)
}
-/// Kubernetes events for a tenant
+/// Kubernetes events for a tenant (SSE)
pub fn event_routes() -> Router {
Router::new().route(
- "/namespaces/:namespace/tenants/:tenant/events",
- get(handlers::events::list_tenant_events),
+ "/namespaces/:namespace/tenants/:tenant/events/stream",
+ get(handlers::events::stream_tenant_events),
)
}
diff --git a/src/console/tenant_event_scope.rs b/src/console/tenant_event_scope.rs
new file mode 100644
index 0000000..6cd5dc7
--- /dev/null
+++ b/src/console/tenant_event_scope.rs
@@ -0,0 +1,224 @@
+// Copyright 2025 RustFS Team
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Discover which `(involvedObject.kind, involvedObject.name)` pairs belong to a Tenant for event aggregation (PRD §4.1).
+
+use std::cmp::Reverse;
+use std::collections::{HashMap, HashSet};
+
+use k8s_openapi::api::core::v1 as corev1;
+use kube::{Api, Client, ResourceExt, api::ListParams};
+
+use crate::console::{
+ error::{self, Result},
+ models::event::EventItem,
+};
+use crate::types::v1alpha1::tenant::Tenant;
+
+/// `involvedObject.kind` for the Tenant CR (matches CRD `names.kind`).
+pub const TENANT_CR_KIND: &str = "Tenant";
+
+/// Default max events per SSE snapshot (PRD §5.1).
+pub const MAX_EVENTS_SNAPSHOT: usize = 200;
+
+/// Label selector `rustfs.tenant=` — must match [`crate::console::handlers::pods::list_pods`].
+pub fn tenant_label_selector(tenant: &str) -> String {
+ format!("rustfs.tenant={}", tenant)
+}
+
+/// Allowed `(kind, name)` pairs for `Event.involvedObject` in this tenant scope.
+#[derive(Debug, Clone)]
+pub struct TenantEventScope {
+ /// Tenant name (scope metadata; filtering uses [`Self::involved`]).
+ #[allow(dead_code)]
+ pub tenant_name: String,
+ /// `(involvedObject.kind, involvedObject.name)` using API kind strings.
+ pub involved: HashSet<(String, String)>,
+}
+
+impl TenantEventScope {
+ /// Returns true if this event's regarding object is in scope.
+ pub fn matches_involved(&self, ev: &corev1::Event) -> bool {
+ let obj = &ev.involved_object;
+ let kind = obj.kind.clone().unwrap_or_default();
+ let name = obj.name.clone().unwrap_or_default();
+ if kind.is_empty() || name.is_empty() {
+ return false;
+ }
+ self.involved.contains(&(kind, name))
+ }
+}
+
+/// Load Pod / StatefulSet / PVC names and Tenant CR row — same discovery rules as `list_pods` / `list_pools`.
+pub async fn discover_tenant_event_scope(
+ client: &Client,
+ namespace: &str,
+ tenant: &str,
+) -> Result {
+ let tenant_api: Api = Api::namespaced(client.clone(), namespace);
+ let t = tenant_api
+ .get(tenant)
+ .await
+ .map_err(|e| error::map_kube_error(e, format!("Tenant '{}'", tenant)))?;
+
+ let mut involved: HashSet<(String, String)> = HashSet::new();
+ involved.insert((TENANT_CR_KIND.to_string(), tenant.to_string()));
+
+ for pool in &t.spec.pools {
+ let ss_name = format!("{}-{}", tenant, pool.name);
+ involved.insert(("StatefulSet".to_string(), ss_name));
+ }
+
+ let pod_api: Api = Api::namespaced(client.clone(), namespace);
+ let pods = pod_api
+ .list(&ListParams::default().labels(&tenant_label_selector(tenant)))
+ .await
+ .map_err(|e| error::map_kube_error(e, format!("Pods for tenant '{}'", tenant)))?;
+ for p in pods.items {
+ involved.insert(("Pod".to_string(), p.name_any()));
+ }
+
+ let pvc_api: Api = Api::namespaced(client.clone(), namespace);
+ let pvcs = pvc_api
+ .list(&ListParams::default().labels(&tenant_label_selector(tenant)))
+ .await
+ .map_err(|e| {
+ error::map_kube_error(e, format!("PersistentVolumeClaims for tenant '{}'", tenant))
+ })?;
+ for pvc in pvcs.items {
+ involved.insert(("PersistentVolumeClaim".to_string(), pvc.name_any()));
+ }
+
+ Ok(TenantEventScope {
+ tenant_name: tenant.to_string(),
+ involved,
+ })
+}
+
+/// Filter namespace events to scope, dedupe, sort newest first, cap at [`MAX_EVENTS_SNAPSHOT`].
+pub fn merge_namespace_events(scope: &TenantEventScope, raw: Vec) -> Vec {
+ let filtered: Vec = raw
+ .into_iter()
+ .filter(|e| scope.matches_involved(e))
+ .collect();
+
+ // Dedupe by uid
+ let mut by_uid: HashMap = HashMap::new();
+ let mut no_uid: Vec = Vec::new();
+ for e in filtered {
+ if let Some(uid) = e.metadata.uid.clone() {
+ by_uid.insert(uid, e);
+ } else {
+ no_uid.push(e);
+ }
+ }
+ let mut merged: Vec = by_uid.into_values().collect();
+
+ // Weak dedupe for events without uid
+ let mut seen_weak: HashSet<(String, String, String, String, String)> = HashSet::new();
+ for e in no_uid {
+ let weak = weak_dedup_key(&e);
+ if seen_weak.insert(weak) {
+ merged.push(e);
+ }
+ }
+
+ merged.sort_by_key(|b| Reverse(event_sort_key(b)));
+ merged.truncate(MAX_EVENTS_SNAPSHOT);
+ merged.into_iter().map(core_event_to_item).collect()
+}
+
+fn weak_dedup_key(e: &corev1::Event) -> (String, String, String, String, String) {
+ let kind = e.involved_object.kind.clone().unwrap_or_default();
+ let name = e.involved_object.name.clone().unwrap_or_default();
+ let reason = e.reason.clone().unwrap_or_default();
+ let first = e
+ .first_timestamp
+ .as_ref()
+ .map(|t| t.0.to_rfc3339())
+ .unwrap_or_default();
+ let msg = e.message.clone().unwrap_or_default();
+ (kind, name, reason, first, msg)
+}
+
+fn event_sort_key(e: &corev1::Event) -> chrono::DateTime {
+ if let Some(ref et) = e.event_time {
+ return et.0;
+ }
+ if let Some(ref lt) = e.last_timestamp {
+ return lt.0;
+ }
+ if let Some(ref ft) = e.first_timestamp {
+ return ft.0;
+ }
+ chrono::DateTime::from_timestamp(0, 0).unwrap_or_else(chrono::Utc::now)
+}
+
+fn core_event_to_item(e: corev1::Event) -> EventItem {
+ EventItem {
+ event_type: e.type_.unwrap_or_default(),
+ reason: e.reason.unwrap_or_default(),
+ message: e.message.unwrap_or_default(),
+ involved_object: format!(
+ "{}/{}",
+ e.involved_object.kind.unwrap_or_default(),
+ e.involved_object.name.unwrap_or_default()
+ ),
+ first_timestamp: e.first_timestamp.map(|ts| ts.0.to_rfc3339()),
+ last_timestamp: e.last_timestamp.map(|ts| ts.0.to_rfc3339()),
+ count: e.count.unwrap_or(0),
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
+
+ fn mk_event(kind: &str, name: &str, uid: Option<&str>) -> corev1::Event {
+ corev1::Event {
+ involved_object: corev1::ObjectReference {
+ kind: Some(kind.to_string()),
+ name: Some(name.to_string()),
+ ..Default::default()
+ },
+ metadata: ObjectMeta {
+ uid: uid.map(String::from),
+ ..Default::default()
+ },
+ ..Default::default()
+ }
+ }
+
+ #[test]
+ fn tenant_kind_required_for_tenant_name() {
+ let scope = TenantEventScope {
+ tenant_name: "t1".to_string(),
+ involved: HashSet::from([
+ (TENANT_CR_KIND.to_string(), "t1".to_string()),
+ ("Pod".to_string(), "p1".to_string()),
+ ]),
+ };
+ let raw = vec![
+ mk_event("Pod", "other", Some("a")),
+ mk_event("ConfigMap", "t1", Some("b")),
+ mk_event(TENANT_CR_KIND, "t1", Some("c")),
+ ];
+ let items = merge_namespace_events(&scope, raw);
+ let objs: Vec<&str> = items.iter().map(|i| i.involved_object.as_str()).collect();
+ assert!(objs.contains(&"Tenant/t1"));
+ assert!(!objs.iter().any(|s| *s == "Pod/other"));
+ assert!(!objs.iter().any(|s| *s == "ConfigMap/t1"));
+ }
+}
From 68de4d643cd0a760048c9d2522c104501ee24ade Mon Sep 17 00:00:00 2001
From: GatewayJ <835269233@qq.com>
Date: Mon, 30 Mar 2026 12:51:57 +0800
Subject: [PATCH 3/3] feat: tenant events on events.k8s.io, KMS CRD alignment,
RBAC
Made-with: Cursor
---
CHANGELOG.md | 10 +-
.../[name]/tenant-detail-client.tsx | 198 +++------------
console-web/i18n/locales/en-US.json | 31 ++-
console-web/i18n/locales/zh-CN.json | 11 +-
console-web/types/api.ts | 24 +-
deploy/k8s-dev/console-rbac.yaml | 3 +
deploy/k8s-dev/operator-rbac.yaml | 7 +
deploy/rustfs-operator/crds/tenant-crd.yaml | 72 +-----
deploy/rustfs-operator/crds/tenant.yaml | 72 +-----
.../templates/clusterrole.yaml | 10 +-
.../templates/console-clusterrole.yaml | 3 +
docs/prd-tenant-events-sse.md | 127 ++++++++++
docs/tech-design-tenant-events-sse.md | 239 ++++++++++++++++++
src/console/handlers/encryption.rs | 31 +--
src/console/handlers/events.rs | 80 +++---
src/console/models/encryption.rs | 37 +--
src/console/openapi.rs | 2 +-
src/console/tenant_event_scope.rs | 174 ++++++++-----
src/context.rs | 43 +---
src/types/v1alpha1/encryption.rs | 153 ++---------
src/types/v1alpha1/tenant/workloads.rs | 154 +++--------
21 files changed, 735 insertions(+), 746 deletions(-)
create mode 100644 docs/prd-tenant-events-sse.md
create mode 100644 docs/tech-design-tenant-events-sse.md
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6da2d0e..61d60b7 100755
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -18,6 +18,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed
+- **Console RBAC**: `ClusterRole` for the console (Helm [`deploy/rustfs-operator/templates/console-clusterrole.yaml`](deploy/rustfs-operator/templates/console-clusterrole.yaml) and [`deploy/k8s-dev/console-rbac.yaml`](deploy/k8s-dev/console-rbac.yaml)) now includes `get` / `list` / `watch` on **`events.k8s.io` `events`**, required for Tenant Events aggregation (in addition to `""` `events`).
+
+- **Operator RBAC**: `ClusterRole` for the operator ([`deploy/rustfs-operator/templates/clusterrole.yaml`](deploy/rustfs-operator/templates/clusterrole.yaml) and [`deploy/k8s-dev/operator-rbac.yaml`](deploy/k8s-dev/operator-rbac.yaml)) now includes **`events.k8s.io` `events`** (`get` / `list` / `watch` / `create` / `patch`). Dev scripts (e.g. [`scripts/deploy/deploy-rustfs-4node.sh`](scripts/deploy/deploy-rustfs-4node.sh)) often use `kubectl create token rustfs-operator` for Console login; that identity must be able to list **events.k8s.io** Events for Tenant Events SSE.
+
+- **Operator RBAC**: `ClusterRole` for the operator ServiceAccount now includes `get` / `list` / `watch` on `persistentvolumeclaims` (Helm [`deploy/rustfs-operator/templates/clusterrole.yaml`](deploy/rustfs-operator/templates/clusterrole.yaml) and [`deploy/k8s-dev/operator-rbac.yaml`](deploy/k8s-dev/operator-rbac.yaml)). Tenant event scope discovery lists PVCs labeled for the tenant; without this rule, the API returned `Forbidden` when the request identity was `rustfs-system:rustfs-operator`.
+
- **`console-web` / `make pre-commit`**: `npm run lint` now runs `eslint .` (bare `eslint` only printed CLI help). Added `format` / `format:check` scripts; [`Makefile`](Makefile) `console-fmt` and `console-fmt-check` call them so Prettier resolves from `node_modules` after `npm install` in `console-web/`.
- **Tenant `Pool` CRD validation (CEL)**: Match the operator console API — require `servers × volumesPerServer >= 4` for every pool, and `>= 6` total volumes when `servers == 3` (fixes the previous 3-server rule using `< 4` in CEL). Regenerated [`deploy/rustfs-operator/crds/tenant-crd.yaml`](deploy/rustfs-operator/crds/tenant-crd.yaml) and [`tenant.yaml`](deploy/rustfs-operator/crds/tenant.yaml). Added [`validate_pool_total_volumes`](src/types/v1alpha1/pool.rs) as the shared Rust implementation used by [`src/console/handlers/pools.rs`](src/console/handlers/pools.rs).
@@ -28,11 +34,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
-- **Console Tenant Events (breaking)**: Removed `GET /api/v1/namespaces/{namespace}/tenants/{tenant}/events`. Events are delivered via **SSE** `GET .../tenants/{tenant}/events/stream` (`text/event-stream`; each `data:` line is JSON `EventListResponse`). The tenant detail **Events** tab uses `EventSource` with the same session cookie as other API calls. Aggregates `core/v1` Events for the Tenant CR, Pods, StatefulSets, and PVCs scoped with `rustfs.tenant` / pool naming (see PRD); Tenant rows require `involvedObject.kind=Tenant` and matching name.
+- **Console Tenant Events (breaking)**: Removed `GET /api/v1/namespaces/{namespace}/tenants/{tenant}/events`. Events are delivered via **SSE** `GET .../tenants/{tenant}/events/stream` (`text/event-stream`). Payloads use named events: `snapshot` (JSON `EventListResponse`) and `stream_error` (JSON `{ "message" }` on watch/snapshot failures). Listing uses **`events.k8s.io/v1`** with per-resource field selectors `regarding.kind` + `regarding.name` (bounded concurrency) instead of listing all namespace events. The **Events** tab uses `EventSource` (`withCredentials`) and listens for `snapshot` / `stream_error`; transport `error` toasts are deduplicated until `onopen`. Aggregates events for the Tenant CR, Pods, StatefulSets, and PVCs per PRD scope; legacy **`core/v1` Events** not mirrored to `events.k8s.io` may be absent.
- **Tenant `spec.encryption.vault`**: Removed `tlsSkipVerify` and `customCertificates` (they were never wired to `rustfs-kms`). Vault TLS should rely on system-trusted CAs or TLS upstream. The project is still pre-production; if you have old YAML with these keys, remove them before apply.
-- **KMS pod environment** ([`tenant/workloads.rs`](src/types/v1alpha1/tenant/workloads.rs)): Align variable names with the RustFS server and `rustfs-kms` (`RUSTFS_KMS_ENABLE`, `RUSTFS_KMS_VAULT_ADDRESS`, KV mount and key prefix, local `RUSTFS_KMS_KEY_DIR` / `RUSTFS_KMS_DEFAULT_KEY_ID`, etc.); remove Vault TLS certificate volume mounts; `ping_seconds` remains documented as reserved (not injected).
+- **Tenant `spec.encryption` (breaking)**: CRD and Console API now match **RustFS server startup** (`rustfs/src/init.rs` / `config/cli.rs`) only. `vault` retains **`endpoint`**; `local` retains **`keyDirectory`**; optional **`defaultKeyId`** maps to `RUSTFS_KMS_DEFAULT_KEY_ID`. Removed `pingSeconds`, Vault `engine` / `namespace` / `prefix` / `authType` / `appRole`, and `local.masterKeyId`. Injected pod env vars are only those the RustFS binary reads (no unused `RUSTFS_KMS_VAULT_*` tuning). Regenerated [`deploy/rustfs-operator/crds/tenant-crd.yaml`](deploy/rustfs-operator/crds/tenant-crd.yaml) and [`tenant.yaml`](deploy/rustfs-operator/crds/tenant.yaml).
- **Local KMS** ([`context.rs`](src/context.rs)): Validate absolute `keyDirectory` and require a single server replica across pools (multi-replica tenants need Vault or shared storage).
diff --git a/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx b/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx
index 67bf2e2..6b3b183 100644
--- a/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx
+++ b/console-web/app/(dashboard)/tenants/[namespace]/[name]/tenant-detail-client.tsx
@@ -2,7 +2,7 @@
import { useRouter } from "next/navigation"
import Link from "next/link"
-import { useEffect, useState } from "react"
+import { useEffect, useRef, useState } from "react"
import { useTranslation } from "react-i18next"
import { toast } from "sonner"
import {
@@ -74,6 +74,8 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
const [pods, setPods] = useState([])
const [events, setEvents] = useState([])
const [eventsLoading, setEventsLoading] = useState(false)
+ /** Suppress repeated EventSource `error` toasts until a successful reconnect (`onopen`). */
+ const eventsStreamTransportErrorToastRef = useRef(false)
const [loading, setLoading] = useState(true)
const [deleting, setDeleting] = useState(false)
const [addPoolOpen, setAddPoolOpen] = useState(false)
@@ -106,21 +108,12 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
const [encBackend, setEncBackend] = useState<"local" | "vault">("local")
const [encVault, setEncVault] = useState({
endpoint: "",
- engine: "",
- namespace: "",
- prefix: "",
- authType: "token",
- })
- const [encAppRole, setEncAppRole] = useState({
- engine: "",
- retrySeconds: "",
})
const [encLocal, setEncLocal] = useState({
keyDirectory: "",
- masterKeyId: "",
})
+ const [encDefaultKeyId, setEncDefaultKeyId] = useState("")
const [encKmsSecretName, setEncKmsSecretName] = useState("")
- const [encPingSeconds, setEncPingSeconds] = useState("")
// Security tab state
const [secCtxLoaded, setSecCtxLoaded] = useState(false)
@@ -185,20 +178,35 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
useEffect(() => {
if (tab !== "events") return
setEventsLoading(true)
+ eventsStreamTransportErrorToastRef.current = false
let cleaned = false
const url = api.getTenantEventsStreamUrl(namespace, name)
const es = new EventSource(url, { withCredentials: true })
- es.onmessage = (ev) => {
+ es.onopen = () => {
+ eventsStreamTransportErrorToastRef.current = false
+ }
+ es.addEventListener("snapshot", (ev: MessageEvent) => {
try {
const data = JSON.parse(ev.data) as EventListResponse
setEvents(data.events ?? [])
} catch {
- /* ignore malformed chunk */
+ toast.error(t("Events data could not be parsed"))
}
setEventsLoading(false)
- }
+ })
+ es.addEventListener("stream_error", (ev: MessageEvent) => {
+ try {
+ const j = JSON.parse(ev.data) as { message?: string }
+ toast.error(j.message?.trim() || t("Events stream error"))
+ } catch {
+ toast.error(t("Events stream error"))
+ }
+ setEventsLoading(false)
+ })
es.onerror = () => {
if (cleaned) return
+ if (eventsStreamTransportErrorToastRef.current) return
+ eventsStreamTransportErrorToastRef.current = true
toast.error(t("Events stream could not be loaded"))
setEventsLoading(false)
}
@@ -400,26 +408,15 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
if (data.vault) {
setEncVault({
endpoint: data.vault.endpoint || "",
- engine: data.vault.engine || "",
- namespace: data.vault.namespace || "",
- prefix: data.vault.prefix || "",
- authType: data.vault.authType || "token",
})
- if (data.vault.appRole) {
- setEncAppRole({
- engine: data.vault.appRole.engine || "",
- retrySeconds: data.vault.appRole.retrySeconds?.toString() || "",
- })
- }
}
if (data.local) {
setEncLocal({
keyDirectory: data.local.keyDirectory || "",
- masterKeyId: data.local.masterKeyId || "",
})
}
+ setEncDefaultKeyId(data.defaultKeyId || "")
setEncKmsSecretName(data.kmsSecretName || "")
- setEncPingSeconds(data.pingSeconds?.toString() || "")
} catch (e) {
const err = e as ApiError
toast.error(err.message || t("Failed to load encryption config"))
@@ -441,26 +438,15 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
enabled: encEnabled,
backend: encBackend,
kmsSecretName: encKmsSecretName || undefined,
- pingSeconds: encPingSeconds ? parseInt(encPingSeconds, 10) : undefined,
+ defaultKeyId: encDefaultKeyId.trim() || undefined,
}
if (encBackend === "vault") {
body.vault = {
endpoint: encVault.endpoint,
- engine: encVault.engine || undefined,
- namespace: encVault.namespace || undefined,
- prefix: encVault.prefix || undefined,
- authType: encVault.authType || undefined,
- }
- if (encVault.authType === "approle") {
- body.vault.appRole = {
- engine: encAppRole.engine || undefined,
- retrySeconds: encAppRole.retrySeconds ? parseInt(encAppRole.retrySeconds, 10) : undefined,
- }
}
} else {
body.local = {
keyDirectory: encLocal.keyDirectory || undefined,
- masterKeyId: encLocal.masterKeyId || undefined,
}
}
const res = await api.updateEncryption(namespace, name, body)
@@ -878,7 +864,9 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
{t("Encryption")}
- {t("Configure server-side encryption (SSE) with a KMS backend. RustFS supports Local and Vault.")}
+ {t(
+ "Configure SSE KMS to match the RustFS server: Local (single-server tenant) or Vault (token in Secret). Vault path layout is fixed in RustFS.",
+ )}
@@ -937,7 +925,7 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
{t("Vault Configuration")}
-
-
- {/* Auth type selector */}
-
-
- {/* AppRole section */}
- {encVault.authType === "approle" && (
-
-
-
App Role
-
- {t("Not yet implemented in backend")}
-
-
-
-
- {t(
- "AppRole ID and Secret are stored in the KMS Secret (keys: vault-approle-id, vault-approle-secret).",
- )}
-
-
- )}
+
+ {t("RustFS uses fixed Transit/KV paths; only address and token are configurable.")}
+
)}
@@ -1050,38 +954,18 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
onChange={(e) => setEncLocal((l) => ({ ...l, keyDirectory: e.target.value }))}
/>
-
-
- setEncLocal((l) => ({ ...l, masterKeyId: e.target.value }))}
- />
-
)}
- {/* Status — Ping is mainly useful for remote backends (Vault) */}
- {encBackend === "vault" && (
-
-
{t("Status")}
-
-
-
-
setEncPingSeconds(e.target.value)}
- />
-
- {t("Health check interval for KMS connectivity.")}
-
-
-
-
- )}
+
+
+ setEncDefaultKeyId(e.target.value)}
+ />
+
{/* KMS Secret name */}
@@ -1093,9 +977,7 @@ export function TenantDetailClient({ namespace, name, initialTab, initialYamlEdi
/>
{encBackend === "vault"
- ? encVault.authType === "approle"
- ? t("Secret must contain 'vault-approle-id' and 'vault-approle-secret'.")
- : t("Secret must contain key 'vault-token'.")
+ ? t("Required for Vault: Secret must contain key 'vault-token'.")
: t("Not required for Local backend.")}
diff --git a/console-web/i18n/locales/en-US.json b/console-web/i18n/locales/en-US.json
index c8e7711..5986281 100755
--- a/console-web/i18n/locales/en-US.json
+++ b/console-web/i18n/locales/en-US.json
@@ -205,5 +205,34 @@
"Saving...": "Saving...",
"No changes to save": "No changes to save",
"Tenant updated": "Tenant updated",
- "Update failed": "Update failed"
+ "Update failed": "Update failed",
+ "Events stream could not be loaded": "Events stream could not be loaded",
+ "Events data could not be parsed": "Events data could not be parsed",
+ "Events stream error": "Events stream error",
+ "Encryption": "Encryption",
+ "Configure SSE KMS to match the RustFS server: Local (single-server tenant) or Vault (token in Secret). Vault path layout is fixed in RustFS.": "Configure SSE KMS to match the RustFS server: Local (single-server tenant) or Vault (token in Secret). Vault path layout is fixed in RustFS.",
+ "Enable Encryption": "Enable Encryption",
+ "KMS Backend": "KMS Backend",
+ "Vault Configuration": "Vault Configuration",
+ "Local KMS Configuration": "Local KMS Configuration",
+ "Key Directory": "Key Directory",
+ "RustFS uses fixed Transit/KV paths; only address and token are configurable.": "RustFS uses fixed Transit/KV paths; only address and token are configurable.",
+ "Default Key ID": "Default Key ID",
+ "Optional — maps to RUSTFS_KMS_DEFAULT_KEY_ID": "Optional — maps to RUSTFS_KMS_DEFAULT_KEY_ID",
+ "KMS Secret Name": "KMS Secret Name",
+ "Secret containing vault-token": "Secret containing vault-token",
+ "Required for Vault: Secret must contain key 'vault-token'.": "Required for Vault: Secret must contain key 'vault-token'.",
+ "Not required for Local backend.": "Not required for Local backend.",
+ "Failed to load encryption config": "Failed to load encryption config",
+ "Encryption config updated": "Encryption config updated",
+ "Loading encryption config...": "Loading encryption config...",
+ "Reload": "Reload",
+ "Vault endpoint is required": "Vault endpoint is required",
+ "Security": "Security",
+ "SecurityContext": "SecurityContext",
+ "Override Pod SecurityContext for RustFS pods (runAsUser, runAsGroup, fsGroup). Changes apply after Pods are recreated.": "Override Pod SecurityContext for RustFS pods (runAsUser, runAsGroup, fsGroup). Changes apply after Pods are recreated.",
+ "SecurityContext updated": "SecurityContext updated",
+ "Failed to load security context": "Failed to load security context",
+ "Do not run as Root": "Do not run as Root",
+ "Loading...": "Loading..."
}
diff --git a/console-web/i18n/locales/zh-CN.json b/console-web/i18n/locales/zh-CN.json
index 8ba21be..b1fa96b 100755
--- a/console-web/i18n/locales/zh-CN.json
+++ b/console-web/i18n/locales/zh-CN.json
@@ -201,7 +201,11 @@
"Tenant YAML updated": "租户 YAML 已更新",
"Failed to load tenant YAML": "加载租户 YAML 失败",
"Encryption": "加密",
- "Configure server-side encryption (SSE) with a KMS backend. RustFS supports Local and Vault.": "配置服务端加密 (SSE) 的 KMS 后端。RustFS 支持 Local 和 Vault。",
+ "Configure SSE KMS to match the RustFS server: Local (single-server tenant) or Vault (token in Secret). Vault path layout is fixed in RustFS.": "按 RustFS 服务端配置 SSE KMS:Local(单节点租户)或 Vault(Secret 中的 token)。Vault 路径布局由 RustFS 固定。",
+ "RustFS uses fixed Transit/KV paths; only address and token are configurable.": "RustFS 固定 Transit/KV 路径;仅可配置地址与 token。",
+ "Default Key ID": "默认密钥 ID",
+ "Optional — maps to RUSTFS_KMS_DEFAULT_KEY_ID": "可选 — 对应 RUSTFS_KMS_DEFAULT_KEY_ID",
+ "Required for Vault: Secret must contain key 'vault-token'.": "Vault 必填:Secret 须包含键 `vault-token`。",
"Enable Encryption": "启用加密",
"KMS Backend": "KMS 后端",
"Vault Configuration": "Vault 配置",
@@ -244,5 +248,8 @@
"Saving...": "保存中...",
"No changes to save": "没有可保存的修改",
"Tenant updated": "租户已更新",
- "Update failed": "更新失败"
+ "Update failed": "更新失败",
+ "Events stream could not be loaded": "事件流无法加载",
+ "Events data could not be parsed": "事件数据无法解析",
+ "Events stream error": "事件流错误"
}
diff --git a/console-web/types/api.ts b/console-web/types/api.ts
index 008b5f6..558681c 100644
--- a/console-web/types/api.ts
+++ b/console-web/types/api.ts
@@ -259,23 +259,12 @@ export interface EventListResponse {
}
// ----- Encryption -----
-export interface AppRoleInfo {
- engine: string | null
- retrySeconds: number | null
-}
-
export interface VaultInfo {
endpoint: string
- engine: string | null
- namespace: string | null
- prefix: string | null
- authType: string | null
- appRole: AppRoleInfo | null
}
export interface LocalKmsInfo {
keyDirectory: string | null
- masterKeyId: string | null
}
export interface SecurityContextInfo {
@@ -291,7 +280,7 @@ export interface EncryptionInfoResponse {
vault: VaultInfo | null
local: LocalKmsInfo | null
kmsSecretName: string | null
- pingSeconds: number | null
+ defaultKeyId: string | null
securityContext: SecurityContextInfo | null
}
@@ -300,21 +289,12 @@ export interface UpdateEncryptionRequest {
backend?: string
vault?: {
endpoint: string
- engine?: string
- namespace?: string
- prefix?: string
- authType?: string
- appRole?: {
- engine?: string
- retrySeconds?: number
- }
}
local?: {
keyDirectory?: string
- masterKeyId?: string
}
kmsSecretName?: string
- pingSeconds?: number
+ defaultKeyId?: string
}
export interface EncryptionUpdateResponse {
diff --git a/deploy/k8s-dev/console-rbac.yaml b/deploy/k8s-dev/console-rbac.yaml
index f776b0c..1be1043 100755
--- a/deploy/k8s-dev/console-rbac.yaml
+++ b/deploy/k8s-dev/console-rbac.yaml
@@ -36,6 +36,9 @@ rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch"]
+ - apiGroups: ["events.k8s.io"]
+ resources: ["events"]
+ verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["get", "list", "watch"]
diff --git a/deploy/k8s-dev/operator-rbac.yaml b/deploy/k8s-dev/operator-rbac.yaml
index efb9574..9f776d4 100755
--- a/deploy/k8s-dev/operator-rbac.yaml
+++ b/deploy/k8s-dev/operator-rbac.yaml
@@ -37,6 +37,13 @@ rules:
resources: ["statefulsets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "list", "watch", "create", "patch"]
+ # Tenant Events SSE / discovery uses events.k8s.io/v1 (demo login often uses rustfs-operator token)
+ - apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "patch"]
---
diff --git a/deploy/rustfs-operator/crds/tenant-crd.yaml b/deploy/rustfs-operator/crds/tenant-crd.yaml
index fcd8740..84120e3 100644
--- a/deploy/rustfs-operator/crds/tenant-crd.yaml
+++ b/deploy/rustfs-operator/crds/tenant-crd.yaml
@@ -54,19 +54,21 @@ spec:
properties:
backend:
default: local
- description: 'KMS backend type: `local` or `vault`.'
+ description: 'KMS backend: `local` or `vault`.'
enum:
- local
- vault
type: string
+ defaultKeyId:
+ description: Optional default SSE key id (`RUSTFS_KMS_DEFAULT_KEY_ID`).
+ nullable: true
+ type: string
enabled:
default: false
description: Enable server-side encryption. When `false`, all other fields are ignored.
type: boolean
kmsSecret:
- description: |-
- Reference to a Secret containing sensitive KMS credentials
- (Vault token or AppRole credentials).
+ description: Secret holding `vault-token` when using Vault.
nullable: true
properties:
name:
@@ -76,70 +78,20 @@ spec:
- name
type: object
local:
- description: 'Local file-based settings (optional when `backend: local`).'
+ description: 'Local: optional key directory override.'
nullable: true
properties:
keyDirectory:
- description: |-
- Absolute directory for KMS key files inside the container (default: `/data/kms-keys`).
- Must be absolute; RustFS validates this for the local backend.
- nullable: true
- type: string
- masterKeyId:
- description: Default KMS key id for SSE (maps to `RUSTFS_KMS_DEFAULT_KEY_ID` in the RustFS binary).
+ description: 'Absolute directory for KMS key files (default: `/data/kms-keys`).'
nullable: true
type: string
type: object
- pingSeconds:
- description: |-
- Reserved for future KMS health-check tuning. Not injected into pods: the current RustFS
- release does not read `RUSTFS_KMS_PING_SECONDS` in the server startup path.
- format: int32
- nullable: true
- type: integer
vault:
- description: 'Vault-specific settings (required when `backend: vault`).'
+ description: 'Vault: HTTP(S) endpoint (required when `backend: vault`).'
nullable: true
properties:
- appRole:
- description: |-
- AppRole authentication settings. Only used when `authType: approle`.
- The actual `role_id` and `secret_id` values live in the KMS Secret
- under keys `vault-approle-id` and `vault-approle-secret`.
- nullable: true
- properties:
- engine:
- description: Engine mount path for AppRole auth (e.g. `approle`).
- nullable: true
- type: string
- retrySeconds:
- description: 'Retry interval in seconds for AppRole login attempts (default: 10).'
- format: int32
- nullable: true
- type: integer
- type: object
- authType:
- description: Authentication method. Defaults to `token` when not set.
- enum:
- - token
- - approle
- - null
- nullable: true
- type: string
endpoint:
- description: Vault server endpoint (e.g. `https://vault.example.com:8200`).
- type: string
- engine:
- description: KV secrets engine mount path (maps to `RUSTFS_KMS_VAULT_KV_MOUNT` in rustfs-kms; e.g. `secret`, `kv`).
- nullable: true
- type: string
- namespace:
- description: Vault namespace (Enterprise feature).
- nullable: true
- type: string
- prefix:
- description: Key prefix inside the KV engine (maps to `RUSTFS_KMS_VAULT_KEY_PREFIX`).
- nullable: true
+ description: Vault server URL (e.g. `https://vault.example.com:8200`).
type: string
required:
- endpoint
@@ -1308,7 +1260,7 @@ spec:
nullable: true
properties:
fsGroup:
- description: GID applied to all volumes mounted in the Pod.
+ description: GID applied to all volumes mounted in the Pod (`fsGroup`).
format: int64
nullable: true
type: integer
@@ -1318,7 +1270,7 @@ spec:
nullable: true
type: integer
runAsNonRoot:
- description: 'Enforce non-root execution (default: true).'
+ description: 'Enforce non-root execution (default in the operator: `true` when set).'
nullable: true
type: boolean
runAsUser:
diff --git a/deploy/rustfs-operator/crds/tenant.yaml b/deploy/rustfs-operator/crds/tenant.yaml
index fcd8740..84120e3 100755
--- a/deploy/rustfs-operator/crds/tenant.yaml
+++ b/deploy/rustfs-operator/crds/tenant.yaml
@@ -54,19 +54,21 @@ spec:
properties:
backend:
default: local
- description: 'KMS backend type: `local` or `vault`.'
+ description: 'KMS backend: `local` or `vault`.'
enum:
- local
- vault
type: string
+ defaultKeyId:
+ description: Optional default SSE key id (`RUSTFS_KMS_DEFAULT_KEY_ID`).
+ nullable: true
+ type: string
enabled:
default: false
description: Enable server-side encryption. When `false`, all other fields are ignored.
type: boolean
kmsSecret:
- description: |-
- Reference to a Secret containing sensitive KMS credentials
- (Vault token or AppRole credentials).
+ description: Secret holding `vault-token` when using Vault.
nullable: true
properties:
name:
@@ -76,70 +78,20 @@ spec:
- name
type: object
local:
- description: 'Local file-based settings (optional when `backend: local`).'
+ description: 'Local: optional key directory override.'
nullable: true
properties:
keyDirectory:
- description: |-
- Absolute directory for KMS key files inside the container (default: `/data/kms-keys`).
- Must be absolute; RustFS validates this for the local backend.
- nullable: true
- type: string
- masterKeyId:
- description: Default KMS key id for SSE (maps to `RUSTFS_KMS_DEFAULT_KEY_ID` in the RustFS binary).
+ description: 'Absolute directory for KMS key files (default: `/data/kms-keys`).'
nullable: true
type: string
type: object
- pingSeconds:
- description: |-
- Reserved for future KMS health-check tuning. Not injected into pods: the current RustFS
- release does not read `RUSTFS_KMS_PING_SECONDS` in the server startup path.
- format: int32
- nullable: true
- type: integer
vault:
- description: 'Vault-specific settings (required when `backend: vault`).'
+ description: 'Vault: HTTP(S) endpoint (required when `backend: vault`).'
nullable: true
properties:
- appRole:
- description: |-
- AppRole authentication settings. Only used when `authType: approle`.
- The actual `role_id` and `secret_id` values live in the KMS Secret
- under keys `vault-approle-id` and `vault-approle-secret`.
- nullable: true
- properties:
- engine:
- description: Engine mount path for AppRole auth (e.g. `approle`).
- nullable: true
- type: string
- retrySeconds:
- description: 'Retry interval in seconds for AppRole login attempts (default: 10).'
- format: int32
- nullable: true
- type: integer
- type: object
- authType:
- description: Authentication method. Defaults to `token` when not set.
- enum:
- - token
- - approle
- - null
- nullable: true
- type: string
endpoint:
- description: Vault server endpoint (e.g. `https://vault.example.com:8200`).
- type: string
- engine:
- description: KV secrets engine mount path (maps to `RUSTFS_KMS_VAULT_KV_MOUNT` in rustfs-kms; e.g. `secret`, `kv`).
- nullable: true
- type: string
- namespace:
- description: Vault namespace (Enterprise feature).
- nullable: true
- type: string
- prefix:
- description: Key prefix inside the KV engine (maps to `RUSTFS_KMS_VAULT_KEY_PREFIX`).
- nullable: true
+ description: Vault server URL (e.g. `https://vault.example.com:8200`).
type: string
required:
- endpoint
@@ -1308,7 +1260,7 @@ spec:
nullable: true
properties:
fsGroup:
- description: GID applied to all volumes mounted in the Pod.
+ description: GID applied to all volumes mounted in the Pod (`fsGroup`).
format: int64
nullable: true
type: integer
@@ -1318,7 +1270,7 @@ spec:
nullable: true
type: integer
runAsNonRoot:
- description: 'Enforce non-root execution (default: true).'
+ description: 'Enforce non-root execution (default in the operator: `true` when set).'
nullable: true
type: boolean
runAsUser:
diff --git a/deploy/rustfs-operator/templates/clusterrole.yaml b/deploy/rustfs-operator/templates/clusterrole.yaml
index f0fd5ad..b0e900a 100755
--- a/deploy/rustfs-operator/templates/clusterrole.yaml
+++ b/deploy/rustfs-operator/templates/clusterrole.yaml
@@ -37,8 +37,16 @@ rules:
resources: ["statefulsets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- # Events for reconciliation notifications and console event list
+ # PersistentVolumeClaims - read (tenant-scoped event discovery lists PVC names)
- apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch"]
+
+ # Events for reconciliation notifications and console event list (core + events.k8s.io)
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "list", "watch", "create", "patch"]
+ - apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "patch"]
{{- end }}
diff --git a/deploy/rustfs-operator/templates/console-clusterrole.yaml b/deploy/rustfs-operator/templates/console-clusterrole.yaml
index 3ac019d..0f93554 100755
--- a/deploy/rustfs-operator/templates/console-clusterrole.yaml
+++ b/deploy/rustfs-operator/templates/console-clusterrole.yaml
@@ -34,6 +34,9 @@ rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch"]
+ - apiGroups: ["events.k8s.io"]
+ resources: ["events"]
+ verbs: ["get", "list", "watch"]
# StatefulSets - read only
- apiGroups: ["apps"]
diff --git a/docs/prd-tenant-events-sse.md b/docs/prd-tenant-events-sse.md
new file mode 100644
index 0000000..8ceb989
--- /dev/null
+++ b/docs/prd-tenant-events-sse.md
@@ -0,0 +1,127 @@
+# PRD:Tenant Events 多资源聚合与 SSE 推送
+
+**状态:** 草案
+**范围:** Console / Operator
+**更新:** 2026-03-29
+
+---
+
+## 1. 背景与问题
+
+Tenant 详情页 **Events** 仅列出 `involvedObject.name` 等于 Tenant 名的 Kubernetes `Event`,**看不到** Pod、StatefulSet、PVC 等子资源上的事件。详情页多为 **客户端路由** + 全量 `loadTenant()`(或等价数据加载),**不一定**触发浏览器整页刷新;但 **Events 子视图内无法单独增量刷新事件列表**,默认需 **重新进入详情** 或依赖 **全量 `loadTenant()`** 才能更新事件相关数据,排障效率低。
+
+**与 `kubectl describe` 的关系:** 此处事件与 `kubectl describe` 输出中 **Events** 小节为 **同一数据源**——均为集群中的 `Event`(Phase 1 以 `core/v1` 为主,见 §3)。对 Tenant / Pod / StatefulSet / PVC 分别执行 `kubectl describe …` 时看到的事件行,与本页按 §4 合并后的条目 **语义一致**(同一 `involvedObject` 上的同一条 Event)。差异仅在于:Console **合并多资源**、**去重**、**统一排序**并可能 **截断条数**(如默认 200),与逐条 describe 的展示顺序、是否全量不一定逐行相同。
+
+## 2. 目标
+
+1. 在同一视图展示 **归属于该 Tenant** 的多资源事件(**Tenant CR、Pod、StatefulSet、PVC**)。
+2. 通过 **SSE(Server-Sent Events)** 将 **合并后的事件快照** 推送到浏览器;**不**提供单独的 `GET .../events` HTTP 聚合接口(实现上可移除该路由及相关处理)。
+
+**仅 SSE、移除 REST 的产品代价(强决策,评审必读):**
+
+- 去掉公开 `GET .../events` JSON 后,**脚本 / curl / 自动化**无法用单请求拉取合并后的列表(除非另加 **内部 / debug / 运维** 只读接口)。
+- **集成测试**更依赖 **SSE 客户端** 或 **浏览器环境**,成本高于纯 REST 断言。
+
+**可选变体:** 若不接受对全部调用方删除 REST:可对 **用户 UI** 关闭 `GET .../events`,**保留只读运维 API**(单独鉴权或网络策略);与「完全删除」二选一,须在实现与评审中明确。代价与变体在 **§7** 展开。
+
+## 3. Phase 1 非目标
+
+- 不替代 K8s 审计日志或 RustFS 应用日志。
+- 首版不强制迁移到 `events.k8s.io`;若集群以 `core/v1` `Event` 为主可继续沿用。
+- 首版不引入 WebSocket(除非后续有强需求)。
+
+## 4. 「归属于 Tenant」的判定
+
+| 资源 | Phase 1 规则 |
+|------|----------------|
+| Tenant | `metadata.name == {tenant}`;事件侧须 **`involvedObject.name={tenant}` 且 `involvedObject.kind` 与 CRD 注册 Kind 一致(通常为 `Tenant`)**(见 §4.1)。 |
+| Pod | 见 **§4.1**,与 Console **`GET .../pods`**(`list_pods`)同源。 |
+| StatefulSet | 见 **§4.1**,与 Console **`GET .../pools`**(`list_pools`)所用 STS 同源。 |
+| PersistentVolumeClaim | 见 **§4.1**;Console 无独立 PVC 列表 API,按与 Operator 一致的 **标签** 发现。 |
+
+### 4.1 与 Pod / StatefulSet / PVC 发现对齐(固定约定)
+
+合并事件所用的 **资源名白名单** 须与当前 Console 实现 **同一套 label 与命名规则**(同 namespace、同路径参数 `{tenant}`),避免 Events 与 Pods / Pools 页「各算各的」。
+
+| 资源 | 与现有行为对齐方式 |
+|------|---------------------|
+| **Pod** | `Pod` 使用 **`ListParams` label:`rustfs.tenant=`**。与 `src/console/handlers/pods.rs` 中 **`list_pods`** 一致。 |
+| **StatefulSet** | `StatefulSet` 使用 **同一 label:`rustfs.tenant=`**;STS 名称 **`{tenant}-{pool}`**(`pool` 来自 Tenant `spec.pools`),与 `src/console/handlers/pools.rs` 中 **`list_pools`** 一致。 |
+| **PersistentVolumeClaim** | Operator 在 PVC 模板上注入 **`rustfs.tenant`**、**`rustfs.pool`** 等(见 `Tenant::pool_labels`,`src/types/v1alpha1/tenant/workloads.rs` 中 `volume_claim_templates`)。事件侧对 **`PersistentVolumeClaim`** 使用 **与 Pod 相同的租户标签 `rustfs.tenant=`** 列出名集合,即与 Operator 创建的 PVC 一致。 |
+
+**实现要求:** Events 合并逻辑应 **复用或抽取**与 `list_pods` / `list_pools` **相同的 label 字符串与 STS 命名公式**,禁止另写一套查询;变更 Pod/Pool 发现时,Events 须同步修改或共用模块。
+
+**Tenant CR 自身:** `involvedObject.name={tenant}` 且 `involvedObject.kind` 与 CRD 注册 Kind 一致(通常为 `Tenant`)。**现状缺口:** `src/console/handlers/events.rs` 仅按 `involvedObject.name` 过滤,**未**约束 kind;本需求实现 **须补齐** kind 条件(field selector 若支持则联合 `involvedObject.kind`;否则 list/watch 后 **等价后滤**),避免同 namespace **同名不同 kind** 资源事件误混入。
+
+**实现原则:** 与 **`list_pods` / `list_pools` 及 PVC 标签约定**(§4.1)一致。
+
+**范围边界(必须):** SSE 路径中的 `{tenant}` 即当前详情页 Tenant;**仅**合并、展示 **该 Tenant** 下按上表判定的资源相关事件。**禁止**混入同 namespace 内其他 Tenant 的 Pod/STS/PVC 等事件;服务端以「当前 tenant 的发现集合」为白名单过滤,前端 **只渲染本页 tenant** 的数据,切换 Tenant 或离开页面须 **丢弃** 旧列表 state,避免串数据。
+
+## 5. 功能需求
+
+### 5.1 SSE:`GET /api/v1/namespaces/{ns}/tenants/{tenant}/events/stream`
+
+**不提供**单独的 `GET .../tenants/{tenant}/events` HTTP 聚合接口;合并后的事件列表 **仅**通过本 SSE 端点以 **JSON 快照** 下发(实现可删除既有 events REST 路由与 handler)。移除 REST 的 **代价** 与 **可选变体** 见 **§7**。
+
+- **租户范围:** 快照中每条事件必须属于 **路径参数 `{tenant}`** 对应之发现集合(见 §4);不得包含其他 Tenant 资源的事件。
+- **合并**来源:**Tenant CR:** `involvedObject.name={tenant}` **且** `involvedObject.kind` 为 Tenant(或 CRD 等价 Kind;field selector 若不支持联合 kind 则见 §4.1 **后滤**);**另**合并 **该 tenant 范围内**每个 **Pod 名、StatefulSet 名、PVC 名** 对应的、**kind 匹配** 的 `involvedObject` 事件(服务端多次 list 再合并,或等价实现)。
+- **去重:** 优先 `metadata.uid`;否则用 `(kind, name, reason, firstTimestamp, message)` 弱去重。
+- **排序:** 按 `lastTimestamp` / `eventTime` 降序;**默认每帧快照最多 200 条**(常量可配置,需写入 API 说明)。
+- **错误:** 建立连接前或 Watch 无法启动等 **关键失败** 时返回 **明确 HTTP 错误**;不得在成功 `200`/建立流后长期以「空快照」掩盖失败(与现有 Console 错误策略一致)。
+- **鉴权:** 与现有 Console(JWT + 用户 K8s token)一致。
+- **Content-Type:** `text/event-stream`。
+- **行为:** 在 namespace 内 Watch `Event`(或等价),服务端仅按 **当前路径 `{tenant}`** 对应的 involvedObject 集合过滤后再推送;**不得**将无关 Tenant 的事件推入该连接。
+- **负载:** 每次事件推送 **完整快照** JSON:`{ "events": [ ... ] }`,字段约定写入 API 说明,同样 200 条上限。
+- **首包:** 连接建立后 **必须**尽快发送至少一条 **snapshot**,作为首屏数据源(无独立 REST 兜底)。
+- **断线:** 客户端 `EventSource` 退避重连;服务端用 `resourceVersion` 等在合理范围内恢复 Watch。
+
+### 5.2 前端(console-web)
+
+- 进入 Events 标签:**建立 SSE**,以 **首包及后续快照** 更新 state(无单独 HTTP 拉取 events)。
+- **鉴权与 `EventSource`:** 当前 Session 为 **Cookie**(与现有 Console 一致)时,须 **同站 / 可携带凭证**(如 `credentials: 'include'` / `withCredentials: true`),并与 **CORS** 策略一致。**若将来**改为 **Authorization 头**:原生 `EventSource` **无法设置自定义 Header**;备选为继续依赖 **Cookie**、或 **query token**(泄露风险须单独评估),在设计与评审中明确。
+- SSE 失败:**非阻塞** toast,保留上次数据,提供 **重试** 或 **手动刷新**。
+- 表格列语义不变:**类型**、**对象** 的展示与筛选枚举见 **§5.3**;**对象** 列展示为 `Kind/Name`(`Name` 为资源名,非枚举)。
+- **仅当前 Tenant:** 列表与筛选结果 **不得**包含其他 Tenant 的事件;`tenant` 路由参数变化或卸载页面前 **清空** events state,避免残留。
+- **筛选(客户端):** 在 **当前 tenant 已加载** 的合并列表上支持按 **类型**、**对象(Kind)** 与 **时间**(基于 `lastTimestamp` / `eventTime` 的范围或相对区间)过滤展示;对象侧可按 **Kind 多选** + **名称关键字**(匹配 `involvedObject.name`)组合;**不**要求 SSE 负载或 URL 增加服务端筛选参数(Phase 1)。
+
+### 5.3 类型与对象枚举(Phase 1)
+
+与 `core/v1` `Event` 及本页合并范围对齐;供 **表格列展示** 与 **筛选器** 使用。
+
+| 维度 | 枚举(固定) | 对应 K8s 字段 | 说明 |
+|------|----------------|-----------------|------|
+| **类型** | `Normal`,`Warning` | `Event.type` | **无标准 `Error` 类型:** Kubernetes `Event.type` 仅约定 `Normal` / `Warning`;失败、不可调度等「错误语义」事件在 API 中一般为 **`Warning`**,而非单独 `Error`。与 [Event v1](https://kubernetes.io/docs/reference/kubernetes-api/cluster-resources/event-v1/) 一致;筛选器仅这两项。若 API 返回空或非上述字符串(含个别组件自定义值),**类型**列 **原样显示**,该项 **不参与**「类型」枚举筛选(或归入「其他」选项,实现二选一并在 UI 文案中写清)。 |
+| **对象(Kind)** | `Tenant`,`Pod`,`StatefulSet`,`PersistentVolumeClaim` | `involvedObject.kind` | Phase 1 与 §4 资源范围一致。筛选为 **Kind 多选**;`involvedObject.name` 用 **字符串** 展示与 **可选关键字** 过滤,不设枚举。 |
+
+**实现提示:** 前端可用 TypeScript 字面量联合或常量数组表达上述枚举,避免魔法字符串分散。
+
+## 6. 非功能需求
+
+| 维度 | 要求 |
+|------|------|
+| RBAC | 用户需能 `list`/`watch` `events`,并能 `list` 用于发现 Pod/STS/PVC 的资源。 |
+| 性能 | 合并列表有上限;连接断开必须释放 Watch;避免每 Tab 无界协程。 |
+| 多副本 | 若无会话粘滞,需文档说明 **SSE 须 sticky** 或 Phase 1 仅单副本;避免 Watch 落在错误实例上长期悬挂。 |
+| 网关 / 代理 | 常见 **Nginx / Ingress** 默认 **读超时(如 60s)** 会切断长时间无响应字节的 SSE,表现为 **静默断流**、客户端 **频繁重连**。**上线 checklist:** 调大 `proxy_read_timeout`(或 Envoy 等 **等价超时**),与 **多副本 sticky** 并列;具体数值由运维与是否采用服务端注释/心跳等策略共同决定。 |
+| 安全 | SSE 快照 DTO 不包含 Secret 内容;**租户隔离**:流与 UI 仅暴露当前 `{tenant}` 范围内事件。 |
+
+## 7. 发布策略
+
+1. **直接交付 SSE** 为事件唯一通道;**删除**(或不实现)`GET .../tenants/{tenant}/events` 聚合 HTTP 接口,避免双路径维护。
+2. **产品代价(与 §2 一致):** 移除公开 JSON 后,**脚本 / curl / 自动化**无法用单请求拉取合并后的 events(除非另加 **内部 / debug / 运维** 接口);**集成测试**更依赖 **SSE 客户端** 或 **浏览器环境**。
+3. **可选变体:** 若团队不接受对全部调用方删除 REST:可对 **用户 UI** 关闭 `GET .../events`,**保留只读运维 API**(单独鉴权或网络策略);与「完全删除」二选一并在文档中写明。
+4. 无需「先 REST、后开 SSE」或 **SSE 默认关闭** 的阶段性开关;以 SSE 首包 snapshot 满足首屏与更新。
+
+## 8. 验收标准
+
+1. 人为制造 Pod 级 **Warning** 事件(如不可调度),**约 15s 内** 表格出现对应行,**Object** 为 `Pod/...`,无需整页刷新。
+2. 无 events REST 时,仅靠 SSE **首包与后续快照** 可得到 **合并、排序、截断** 后的一致列表。
+3. RBAC 不足或连接失败时返回 **明确错误**(或 SSE 合理失败语义),不出现「空表误导」。
+4. 关闭标签页后服务端 **停止** 对应 Watch/SSE(开发环境可通过日志验证)。
+5. 同 namespace 存在 **多个 Tenant** 时,在 Tenant A 详情 Events 中 **不出现** Tenant B 的 Pod/STS/PVC 等事件(服务端与前端均需满足)。
+6. 合并所用 Pod / StatefulSet / PVC 名集合与 **§4.1** 及对应 handler 行为一致(代码审查或单测可对照 `rustfs.tenant` 与 `{tenant}-{pool}` 规则)。
+7. **Tenant CR 事件**仅包含 **`involvedObject.kind=Tenant`(或 CRD 等价 Kind)且 `involvedObject.name={tenant}`**;**不得**因同名不同 kind 混入其他资源事件;可验证 **field selector 含 kind** 或文档化的 **等价后滤**(§4.1)。
+
+---
+
+*一页 PRD 结束。*
diff --git a/docs/tech-design-tenant-events-sse.md b/docs/tech-design-tenant-events-sse.md
new file mode 100644
index 0000000..9089671
--- /dev/null
+++ b/docs/tech-design-tenant-events-sse.md
@@ -0,0 +1,239 @@
+# Technical Design: Tenant Events SSE
+
+**Status:** Draft
+**Scope:** `console` (Rust) + `console-web` (Next.js)
+**Related PRD:** [prd-tenant-events-sse.md](./prd-tenant-events-sse.md)
+**Updated:** 2026-03-29
+
+---
+
+## 1. Goals (from PRD)
+
+- Aggregate `core/v1` `Event` for **Tenant CR, Pod, StatefulSet, PVC** scoped to one tenant (see PRD §4 / §4.1).
+- Deliver updates via **SSE** only; **remove** `GET /api/v1/namespaces/{ns}/tenants/{tenant}/events` (see PRD §5 / §7).
+- Frontend: **no** separate HTTP fetch for events; **client-side** filters for type / kind / time.
+
+### 1.1 PRD context (revision)
+
+- **UX / loading:** The PRD §1 clarifies that the tenant detail view is typically **client-side routing** + full `loadTenant()` (or equivalent)—**not** necessarily a browser full-page reload. The Events sub-tab still could not incrementally refresh events alone before SSE; after SSE, **the Events tab self-updates** via the stream without requiring full `loadTenant()` for event rows (keep this distinction when testing and documenting).
+
+- **Removing REST (product / engineering):** Dropping public `GET .../events` means **curl/scripts** cannot fetch merged JSON in one shot, and **integration tests** lean on **SSE clients** or **browser** (PRD §2 / §7). If the team chooses the **optional variant**, keep a **read-only ops/internal** JSON endpoint behind separate auth/networking—document the chosen path in deploy docs and OpenAPI.
+
+---
+
+## 2. Architecture
+
+```mermaid
+flowchart LR
+ subgraph browser [console-web]
+ UI[Tenant Detail Events tab]
+ ES[EventSource withCredentials]
+ UI --> ES
+ end
+ subgraph console [Operator Console API]
+ H[SSE handler]
+ D[Tenant scope discovery]
+ W[Event watch + filter]
+ M[Merge dedupe sort cap]
+ H --> D --> W --> M
+ end
+ subgraph k8s [Kubernetes API]
+ API[core/v1 Event watch/list]
+ R[Pod STS PVC Tenant lists]
+ end
+ ES <-->|cookie session| H
+ D --> R
+ W --> API
+```
+
+- **Discovery** reuses the same label/name rules as `list_pods` / `list_pools` + PVC list by `rustfs.tenant` (PRD §4.1).
+- **Watch**: namespace-scoped `Event` stream, **in-memory filter** by `involvedObject` ∈ scope (see §3.3).
+- **Transport**: SSE `text/event-stream`, each message body = full snapshot JSON `{ "events": [...] }` (PRD §5.1).
+
+---
+
+## 3. Backend (Rust / `src/console`)
+
+### 3.1 Routes
+
+| Action | Path |
+|--------|------|
+| **Add** | `GET /api/v1/namespaces/:namespace/tenants/:tenant/events/stream` |
+| **Remove** | `GET .../tenants/:tenant/events` → delete route + `list_tenant_events` (per PRD) |
+
+Register in [`src/console/routes/mod.rs`](src/console/routes/mod.rs) (`event_routes` or dedicated stream route). Merge in `api_routes()` in [`server.rs`](src/console/server.rs).
+
+### 3.2 Module layout (suggested)
+
+| Piece | Responsibility |
+|-------|------------------|
+| `handlers/events.rs` or `handlers/events_stream.rs` | Axum handler: auth `Claims`, build K8s client, spawn stream task |
+| `tenant_event_scope.rs` (new) | `async fn discover_tenant_event_scope(client, ns, tenant) -> Result`: pod names, STS names, PVC names, tenant name; **shared helpers** with `list_pods` / `list_pools` label strings (`rustfs.tenant=...`) and `{tenant}-{pool}` |
+| Reuse `EventItem` / `EventListResponse` shape | Snapshot JSON field names stay stable for the UI; optional thin wrapper `EventSnapshot { events: Vec }` for SSE |
+
+**Refactor note:** Extract `format!("rustfs.tenant={}", tenant)` and STS name building into a small module used by `pods`, `pools`, and `tenant_event_scope` to satisfy PRD §4.1 “single source of truth”.
+
+### 3.3 Kubernetes interaction
+
+**API version:** `core/v1` `Event` only in Phase 1 (PRD §3; aligns with existing `list_tenant_events`).
+
+#### 3.3.1 Tenant `involvedObject.kind` (correction vs current code)
+
+**Gap:** [`src/console/handlers/events.rs`](src/console/handlers/events.rs) today uses **only** `involvedObject.name={tenant}` in a field selector; it does **not** filter by `involvedObject.kind`. PRD §4 / §4.1 / §8 require **Tenant** rows to match **`kind` + `name`**, so a different resource kind with the same name could theoretically be mixed in.
+
+**Fix (implementation contract):**
+
+1. **Read the CRD Kind** used at runtime (e.g. constant aligned with `deploy/rustfs-operator/crds/` or `kubectl get crd`—typically **`Tenant`**). Store in `Scope` as `tenant_event_kind: String` (or `&'static str` if fixed).
+2. **Filtering:** For every candidate `Event`, when attributing to the “Tenant CR” row, require:
+ - `involved_object.name == tenant` **and**
+ - `involved_object.kind == tenant_event_kind` (case-sensitive as returned by the API).
+3. **List/watch strategy:** Kubernetes field selectors for `Event` may not support combining `involvedObject.kind` and `involvedObject.name` reliably across versions. **Recommended default:** namespace-scoped **list + watch** of `Event`, then **post-filter** all legs (Tenant / Pod / STS / PVC) in Rust—same code path for snapshot and watch updates. Optionally use field selectors where they reduce list size only after verification on target K8s versions.
+
+**Scope set:** Build `involved: Set<(Kind, Name)>` with **fully qualified kind strings** as returned by `Event.involved_object.kind` (e.g. `Pod`, `StatefulSet`, `PersistentVolumeClaim`, `Tenant`).
+
+#### 3.3.2 Scope discovery (initial + periodic refresh)
+
+1. `Pod`: `Api::list` with `labels: rustfs.tenant=` (same as [`handlers/pods.rs`](src/console/handlers/pods.rs)).
+2. `StatefulSet`: same label; names must match `{tenant}-{pool}` for each `pool` in `Tenant` spec (same as [`handlers/pools.rs`](src/console/handlers/pools.rs)).
+3. `PersistentVolumeClaim`: `Api::list` with `labels: rustfs.tenant=`.
+4. `Tenant`: name = path param; **kind** from CRD / constant (see §3.3.1).
+
+**Filtering (watch path):**
+
+- `watcher` / `WatchStream` on `Api` **in the namespace** (list+watch with `ListParams::default()` or minimal params).
+- For each `Applied`/`Deleted` event, **accept** iff `(involved.kind, involved.name)` ∈ `Scope` (with kind matching rules above).
+- On **reconnect**, use `resource_version` from last bookmark/object when possible (kube-rs patterns).
+
+**Initial snapshot:** Before or right after watch starts, **list** events in namespace and filter the same way, then **dedupe / sort / cap 200** (PRD §5.1). Emit first SSE `data:` line immediately so the UI can render without a separate REST call.
+
+**Periodic scope refresh:** Re-run discovery every **N** seconds (e.g. 30–60s) or when watch errors, so new Pods/PVCs enter the whitelist without requiring reconnect. Document chosen **N** in code comment.
+
+### 3.4 Dedupe, sort, cap
+
+- **Dedupe:** `metadata.uid` first; else weak key `(kind, name, reason, firstTimestamp, message)` (PRD §5.1).
+- **Sort:** `lastTimestamp` / `eventTime` descending.
+- **Cap:** default **200** events per snapshot (constant + comment for ops).
+
+### 3.5 SSE response (Axum)
+
+- `Content-Type: text/event-stream`
+- `Cache-Control: no-cache`, `Connection: keep-alive` as appropriate
+- Body: async **stream** of UTF-8 lines: `data: \n\n`
+- On **fatal** errors **before** stream starts → return **4xx/5xx JSON** (same error envelope as other console handlers), **not** an empty stream.
+- On **watch failure after** stream started → optionally send a final SSE event with error shape **or** close connection; **do not** silently send endless empty snapshots (PRD §5.1).
+
+**Compression:** SSE is long-lived; ensure `CompressionLayer` does not buffer the stream indefinitely (verify `tower-http` behavior or disable compression for this path if needed).
+
+### 3.6 Auth
+
+- **HTTP session:** Middleware uses **`session` cookie JWT** ([`middleware/auth.rs`](src/console/middleware/auth.rs)). **EventSource** sends cookies on same-site / credentialed CORS; frontend must use `{ withCredentials: true }` for cross-origin dev.
+- **K8s API:** `Claims` still carries `k8s_token` for impersonated `kube::Client`—unchanged from other handlers.
+
+**PRD note:** “JWT + user K8s token” in the PRD refers to this combined model; SSE does **not** use `Authorization` headers for browser transport.
+
+### 3.7 OpenAPI
+
+- Remove or mark deprecated old `GET .../events` in [`openapi.rs`](src/console/openapi.rs).
+- Document `GET .../events/stream` (response = `text/event-stream`, example snapshot schema).
+
+---
+
+## 4. Frontend (`console-web`)
+
+### 4.1 Transport
+
+- **Prefer `EventSource`** with `{ withCredentials: true }` so the **session cookie** is sent (matches existing auth).
+- Parse `message` events: `event.data` → JSON `EventListResponse`-compatible `{ events: EventItem[] }`.
+- **URL:** `${apiBase}/api/v1/namespaces/${ns}/tenants/${tenant}/events/stream` (add helper next to removed `listTenantEvents`).
+
+**Limitations (PRD §5.2):**
+
+- Standard `EventSource` does **not** send custom `Authorization` headers; cookie session is the primary fit.
+- If you ever move to **Bearer-only** auth, plan **fetch streaming** or **query token** (security review) instead of native `EventSource` with headers.
+
+### 4.2 Lifecycle (Tenant detail client)
+
+| Moment | Behavior |
+|--------|----------|
+| User opens **Events** tab | `EventSource` connect; show loading until first `data` or error |
+| First `data` | `setEvents(parsed.events)` |
+| Further `data` | Replace list with new snapshot (PRD: full snapshot each time) |
+| SSE error / disconnect | Non-blocking **toast**; keep last good list; offer **Retry** (close + reopen EventSource) |
+| `namespace` / `name` route change | **Close** EventSource, **clear** events state, open new stream |
+| Leave page / unmount | `eventSource.close()` |
+
+Do **not** load events in the initial `Promise.allSettled` batch that currently calls `listTenantEvents`; remove that call.
+
+### 4.3 CORS and cookies
+
+- Align with [`server.rs`](src/console/server.rs) `CORS_ALLOWED_ORIGINS` for dev split-host (e.g. Next on `localhost:3000`, API on another port).
+- `credentials: "include"` for `fetch` is already used; **EventSource** must mirror with **`withCredentials: true`** so preflight + cookie behavior matches PRD §5.2 / §6.
+
+### 4.4 Client-side filters (PRD §5.2 / §5.3)
+
+- **Type:** `Normal` | `Warning` + optional “show raw / Other” for unknown `event_type`.
+- **Kind:** multi-select `Tenant` | `Pod` | `StatefulSet` | `PersistentVolumeClaim`.
+- **Name:** substring on `involved_object` or `involvedObject.name` if exposed separately in DTO.
+- **Time:** filter by parsed `last_timestamp` (and `first_timestamp` if needed) within UI range.
+
+Keep filter state **local** to the Events tab; do not add query params to SSE URL in Phase 1.
+
+### 4.5 Types
+
+- Reuse `EventItem` / `EventListResponse` in `types/api.ts`.
+- Add const arrays / unions for **kind** and **type** enums (PRD §5.3).
+
+### 4.6 i18n
+
+- Reuse existing “No events” / error strings; add short strings for filter labels and retry if missing.
+
+---
+
+## 5. Non-functional
+
+| Topic | Design choice |
+|-------|----------------|
+| **RBAC** | User must `list`/`watch` `events` and `list` pods, statefulsets, persistentvolumeclaims, tenants (same as today + PVC list). Document in deploy notes. |
+| **Multi-replica Console** | SSE is sticky to one pod unless using a shared informer; PRD §6: document **ingress sticky sessions** or single replica for Phase 1. |
+| **Gateway / proxy (PRD §6)** | Default **read timeouts** (e.g. Nginx **60s**) can **silently close** idle SSE connections → client reconnects. **Deploy checklist:** increase `proxy_read_timeout` (or Envoy equivalent) for the console API route; tune together with optional **server heartbeat** (comment lines) if needed. |
+| **Limits** | One watch + periodic discovery per **active** SSE connection; cap snapshots at 200 rows. |
+
+---
+
+## 6. Testing & verification
+
+| Layer | Suggestion |
+|-------|------------|
+| **Rust** | Unit tests for `Scope` building from fake `list` results; **Tenant kind filter** (same name, different kind → excluded); dedupe/sort/cap pure functions. |
+| **E2E / manual** | PRD §8: Pod Warning ~15s; two tenants same NS isolation; tab close drops connection (server log); **§8.7** Tenant events only (`kind` + `name`). |
+| **Integration** | Without REST, prefer **SSE client** (e.g. `curl -N` with cookie, or headless browser) or add **temporary internal** JSON endpoint if product selects PRD §7 variant. |
+| **Frontend** | Component test: mock `EventSource` or stream parser; filter logic unit tests. |
+
+**Project gate:** `make pre-commit` before merge.
+
+---
+
+## 7. Implementation order (suggested)
+
+1. Extract shared **tenant scope** / label helpers; add **PVC** list by label (aligned with PRD §4.1).
+2. Implement **Tenant kind** + `(kind, name)` filtering; remove reliance on name-only field selector for Tenant leg.
+3. Implement **SSE handler** + snapshot pipeline; manual `curl -N` with cookie or browser.
+4. **Remove** `GET .../events` and frontend `listTenantEvents`; wire **EventSource** on Events tab (`withCredentials`).
+5. Add **filters** UI + polish errors / retry.
+6. OpenAPI + CHANGELOG + **deploy notes** (sticky + **proxy read timeout** + optional ops-only REST variant if chosen).
+
+---
+
+## 8. Risks & follow-ups
+
+| Risk | Mitigation |
+|------|------------|
+| High event volume in namespace | Namespace-wide watch + filter; tune refresh; monitor CPU. |
+| `events.k8s.io` only clusters | Out of Phase 1; add later if needed. |
+| EventSource CORS in dev | Align `CORS_ALLOWED_ORIGINS` and `withCredentials`. |
+| Ingress/proxy idle timeout | **proxy_read_timeout** / equivalent; document in runbook (PRD §6). |
+| REST removal | Scripts/tests use SSE or optional internal API; track in PRD §2 / §7 decision. |
+
+---
+
+*End of technical design.*
diff --git a/src/console/handlers/encryption.rs b/src/console/handlers/encryption.rs
index 98b9b9e..0ee3c0e 100644
--- a/src/console/handlers/encryption.rs
+++ b/src/console/handlers/encryption.rs
@@ -18,8 +18,7 @@ use crate::console::{
state::Claims,
};
use crate::types::v1alpha1::encryption::{
- EncryptionConfig, KmsBackendType, LocalKmsConfig, VaultAppRoleConfig, VaultAuthType,
- VaultKmsConfig,
+ EncryptionConfig, KmsBackendType, LocalKmsConfig, VaultKmsConfig,
};
use crate::types::v1alpha1::tenant::Tenant;
use axum::{Extension, Json, extract::Path};
@@ -47,21 +46,12 @@ pub async fn get_encryption(
backend: enc.backend.to_string(),
vault: enc.vault.as_ref().map(|v| VaultInfo {
endpoint: v.endpoint.clone(),
- engine: v.engine.clone(),
- namespace: v.namespace.clone(),
- prefix: v.prefix.clone(),
- auth_type: v.auth_type.as_ref().map(|a| a.to_string()),
- app_role: v.app_role.as_ref().map(|ar| AppRoleInfo {
- engine: ar.engine.clone(),
- retry_seconds: ar.retry_seconds,
- }),
}),
local: enc.local.as_ref().map(|l| LocalInfo {
key_directory: l.key_directory.clone(),
- master_key_id: l.master_key_id.clone(),
}),
kms_secret_name: enc.kms_secret.as_ref().map(|s| s.name.clone()),
- ping_seconds: enc.ping_seconds,
+ default_key_id: enc.default_key_id.clone(),
security_context: tenant.spec.security_context.as_ref().map(|sc| {
SecurityContextInfo {
run_as_user: sc.run_as_user,
@@ -77,7 +67,7 @@ pub async fn get_encryption(
vault: None,
local: None,
kms_secret_name: None,
- ping_seconds: None,
+ default_key_id: None,
security_context: tenant.spec.security_context.as_ref().map(|sc| {
SecurityContextInfo {
run_as_user: sc.run_as_user,
@@ -112,7 +102,6 @@ pub async fn update_encryption(
_ => KmsBackendType::Local,
};
- // Validate Vault config when backend is Vault (fail fast with 400 instead of invalid spec)
if backend == KmsBackendType::Vault {
let vault_ok = body
.vault
@@ -139,17 +128,6 @@ pub async fn update_encryption(
let vault = if backend == KmsBackendType::Vault {
body.vault.map(|v| VaultKmsConfig {
endpoint: v.endpoint,
- engine: v.engine,
- namespace: v.namespace,
- prefix: v.prefix,
- auth_type: v.auth_type.map(|s| match s.as_str() {
- "approle" => VaultAuthType::Approle,
- _ => VaultAuthType::Token,
- }),
- app_role: v.app_role.map(|ar| VaultAppRoleConfig {
- engine: ar.engine,
- retry_seconds: ar.retry_seconds,
- }),
})
} else {
None
@@ -158,7 +136,6 @@ pub async fn update_encryption(
let local = if backend == KmsBackendType::Local {
body.local.map(|l| LocalKmsConfig {
key_directory: l.key_directory,
- master_key_id: l.master_key_id,
})
} else {
None
@@ -175,7 +152,7 @@ pub async fn update_encryption(
vault,
local,
kms_secret,
- ping_seconds: body.ping_seconds,
+ default_key_id: body.default_key_id.filter(|s| !s.is_empty()),
})
} else {
Some(EncryptionConfig {
diff --git a/src/console/handlers/events.rs b/src/console/handlers/events.rs
index fa96737..e38a1a4 100755
--- a/src/console/handlers/events.rs
+++ b/src/console/handlers/events.rs
@@ -17,10 +17,10 @@ use std::result::Result as StdResult;
use std::time::Duration;
use crate::console::{
- error::{self, Error, Result},
- models::event::{EventItem, EventListResponse},
+ error::{Error, Result},
+ models::event::EventListResponse,
state::Claims,
- tenant_event_scope::{discover_tenant_event_scope, merge_namespace_events},
+ tenant_event_scope::{discover_tenant_event_scope, list_scoped_events_v1, merge_events_v1},
};
use axum::{
Extension,
@@ -28,18 +28,18 @@ use axum::{
response::sse::{Event, KeepAlive, Sse},
};
use futures::StreamExt;
-use k8s_openapi::api::core::v1 as corev1;
+use k8s_openapi::api::events::v1 as eventsv1;
use kube::{
Api, Client,
- api::ListParams,
runtime::{WatchStreamExt, watcher},
};
use tokio_stream::wrappers::ReceiverStream;
/// SSE stream of merged tenant-scoped Kubernetes events (PRD §5.1).
///
-/// Uses the same `session` cookie JWT as other console routes. Payload each tick is JSON
-/// `EventListResponse` (full snapshot, max [`tenant_event_scope::MAX_EVENTS_SNAPSHOT`]).
+/// Uses the same `session` cookie JWT as other console routes. Payloads use named SSE events:
+/// - `snapshot`: JSON [`EventListResponse`]
+/// - `stream_error`: JSON `{"message":"..."}` (watch/snapshot failures)
pub async fn stream_tenant_events(
Path((namespace, tenant)): Path<(String, String)>,
Extension(claims): Extension,
@@ -65,6 +65,15 @@ pub async fn stream_tenant_events(
))
}
+fn snapshot_sse_event(json: String) -> Event {
+ Event::default().event("snapshot").data(json)
+}
+
+fn stream_error_sse_event(message: &str) -> Event {
+ let payload = serde_json::json!({ "message": message }).to_string();
+ Event::default().event("stream_error").data(payload)
+}
+
async fn run_event_sse_loop(
client: Client,
namespace: String,
@@ -72,15 +81,11 @@ async fn run_event_sse_loop(
tx: tokio::sync::mpsc::Sender>,
first_json: String,
) -> Result<()> {
- if tx
- .send(Ok(Event::default().data(first_json)))
- .await
- .is_err()
- {
+ if tx.send(Ok(snapshot_sse_event(first_json))).await.is_err() {
return Ok(());
}
- let event_api: Api = Api::namespaced(client.clone(), &namespace);
+ let event_api: Api = Api::namespaced(client.clone(), &namespace);
let mut watch = watcher(event_api, watcher::Config::default())
.default_backoff()
.boxed();
@@ -90,33 +95,45 @@ async fn run_event_sse_loop(
loop {
tokio::select! {
_ = scope_tick.tick() => {
- let json = match build_snapshot_json(&client, &namespace, &tenant).await {
- Ok(j) => j,
+ match build_snapshot_json(&client, &namespace, &tenant).await {
+ Ok(json) => {
+ if tx.send(Ok(snapshot_sse_event(json))).await.is_err() {
+ return Ok(());
+ }
+ }
Err(e) => {
tracing::warn!("tenant events snapshot failed: {}", e);
- continue;
+ let msg = e.to_string();
+ if tx.send(Ok(stream_error_sse_event(&msg))).await.is_err() {
+ return Ok(());
+ }
}
- };
- if tx.send(Ok(Event::default().data(json))).await.is_err() {
- return Ok(());
}
}
ev = watch.next() => {
match ev {
Some(Ok(_)) => {
- let json = match build_snapshot_json(&client, &namespace, &tenant).await {
- Ok(j) => j,
+ match build_snapshot_json(&client, &namespace, &tenant).await {
+ Ok(json) => {
+ if tx.send(Ok(snapshot_sse_event(json))).await.is_err() {
+ return Ok(());
+ }
+ }
Err(e) => {
tracing::warn!("tenant events snapshot failed: {}", e);
- continue;
+ let msg = e.to_string();
+ if tx.send(Ok(stream_error_sse_event(&msg))).await.is_err() {
+ return Ok(());
+ }
}
- };
- if tx.send(Ok(Event::default().data(json))).await.is_err() {
- return Ok(());
}
}
Some(Err(e)) => {
tracing::warn!("Kubernetes Event watch error: {}", e);
+ let msg = format!("Kubernetes Event watch error: {}", e);
+ if tx.send(Ok(stream_error_sse_event(&msg))).await.is_err() {
+ return Ok(());
+ }
}
None => return Ok(()),
}
@@ -127,17 +144,8 @@ async fn run_event_sse_loop(
async fn build_snapshot_json(client: &Client, namespace: &str, tenant: &str) -> Result {
let scope = discover_tenant_event_scope(client, namespace, tenant).await?;
- let api: Api = Api::namespaced(client.clone(), namespace);
- let list = api.list(&ListParams::default()).await.map_err(|e| {
- tracing::warn!(
- "List events for tenant {}/{} failed: {}",
- namespace,
- tenant,
- e
- );
- error::map_kube_error(e, format!("Events for tenant '{}'", tenant))
- })?;
- let items: Vec = merge_namespace_events(&scope, list.items);
+ let raw = list_scoped_events_v1(client, namespace, &scope).await?;
+ let items = merge_events_v1(raw);
let body = EventListResponse { events: items };
serde_json::to_string(&body).map_err(|e| Error::Json { source: e })
}
diff --git a/src/console/models/encryption.rs b/src/console/models/encryption.rs
index 1717eaa..f2865dc 100644
--- a/src/console/models/encryption.rs
+++ b/src/console/models/encryption.rs
@@ -24,39 +24,24 @@ pub struct EncryptionInfoResponse {
pub vault: Option,
pub local: Option,
pub kms_secret_name: Option,
- pub ping_seconds: Option,
+ pub default_key_id: Option,
pub security_context: Option,
}
-/// Vault configuration (non-sensitive fields only).
+/// Vault endpoint only (token lives in `kmsSecret`).
#[derive(Debug, Serialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct VaultInfo {
pub endpoint: String,
- pub engine: Option,
- pub namespace: Option,
- pub prefix: Option,
- pub auth_type: Option,
- pub app_role: Option,
}
-/// AppRole non-sensitive fields.
-#[derive(Debug, Serialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct AppRoleInfo {
- pub engine: Option,
- pub retry_seconds: Option,
-}
-
-/// Local KMS configuration.
#[derive(Debug, Serialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct LocalInfo {
pub key_directory: Option,
- pub master_key_id: Option,
}
-/// SecurityContext information (lives at TenantSpec level, shown alongside encryption).
+/// SecurityContext information (TenantSpec; shown alongside encryption for convenience).
#[derive(Debug, Serialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct SecurityContextInfo {
@@ -67,7 +52,6 @@ pub struct SecurityContextInfo {
}
/// PUT request – update encryption configuration.
-/// SecurityContext is managed separately via the Security tab (PUT .../security-context).
#[derive(Debug, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct UpdateEncryptionRequest {
@@ -76,32 +60,19 @@ pub struct UpdateEncryptionRequest {
pub vault: Option,
pub local: Option,
pub kms_secret_name: Option,
- pub ping_seconds: Option,
+ pub default_key_id: Option,
}
#[derive(Debug, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct UpdateVaultRequest {
pub endpoint: String,
- pub engine: Option,
- pub namespace: Option,
- pub prefix: Option,
- pub auth_type: Option,
- pub app_role: Option,
-}
-
-#[derive(Debug, Deserialize, ToSchema)]
-#[serde(rename_all = "camelCase")]
-pub struct UpdateAppRoleRequest {
- pub engine: Option,
- pub retry_seconds: Option,
}
#[derive(Debug, Deserialize, ToSchema)]
#[serde(rename_all = "camelCase")]
pub struct UpdateLocalRequest {
pub key_directory: Option,
- pub master_key_id: Option,
}
#[derive(Debug, Deserialize, ToSchema)]
diff --git a/src/console/openapi.rs b/src/console/openapi.rs
index c92d070..3aaf347 100644
--- a/src/console/openapi.rs
+++ b/src/console/openapi.rs
@@ -269,7 +269,7 @@ fn api_restart_pod(_body: Json) -> Json {
fn api_get_pod_logs() {}
// --- Events (SSE) ---
-#[utoipa::path(get, path = "/api/v1/namespaces/{namespace}/tenants/{tenant}/events/stream", params(("namespace" = String, Path), ("tenant" = String, Path)), responses((status = 200, description = "text/event-stream; each `data:` payload is JSON EventListResponse", body = EventListResponse, content_type = "application/json")), tag = "events")]
+#[utoipa::path(get, path = "/api/v1/namespaces/{namespace}/tenants/{tenant}/events/stream", params(("namespace" = String, Path), ("tenant" = String, Path)), responses((status = 200, description = "text/event-stream; `event: snapshot` + JSON EventListResponse; `event: stream_error` + JSON { message }", body = EventListResponse, content_type = "application/json")), tag = "events")]
fn api_stream_tenant_events() {
unimplemented!("Documentation only")
}
diff --git a/src/console/tenant_event_scope.rs b/src/console/tenant_event_scope.rs
index 6cd5dc7..b6d17a8 100644
--- a/src/console/tenant_event_scope.rs
+++ b/src/console/tenant_event_scope.rs
@@ -13,11 +13,14 @@
// limitations under the License.
//! Discover which `(involvedObject.kind, involvedObject.name)` pairs belong to a Tenant for event aggregation (PRD §4.1).
+//! Lists use [`k8s_openapi::api::events::v1::Event`] (`events.k8s.io/v1`) with per-resource field selectors instead of listing all namespace events.
use std::cmp::Reverse;
use std::collections::{HashMap, HashSet};
+use futures::stream::{self, StreamExt};
use k8s_openapi::api::core::v1 as corev1;
+use k8s_openapi::api::events::v1 as eventsv1;
use kube::{Api, Client, ResourceExt, api::ListParams};
use crate::console::{
@@ -32,34 +35,24 @@ pub const TENANT_CR_KIND: &str = "Tenant";
/// Default max events per SSE snapshot (PRD §5.1).
pub const MAX_EVENTS_SNAPSHOT: usize = 200;
+/// Concurrent `events.k8s.io` list calls (per regarding kind+name pair).
+const EVENT_LIST_CONCURRENCY: usize = 16;
+
/// Label selector `rustfs.tenant=` — must match [`crate::console::handlers::pods::list_pods`].
pub fn tenant_label_selector(tenant: &str) -> String {
format!("rustfs.tenant={}", tenant)
}
-/// Allowed `(kind, name)` pairs for `Event.involvedObject` in this tenant scope.
+/// Allowed `(kind, name)` pairs for `Event.regarding` in this tenant scope.
#[derive(Debug, Clone)]
pub struct TenantEventScope {
- /// Tenant name (scope metadata; filtering uses [`Self::involved`]).
+ /// Tenant name (scope metadata).
#[allow(dead_code)]
pub tenant_name: String,
- /// `(involvedObject.kind, involvedObject.name)` using API kind strings.
+ /// `(regarding.kind, regarding.name)` using API kind strings.
pub involved: HashSet<(String, String)>,
}
-impl TenantEventScope {
- /// Returns true if this event's regarding object is in scope.
- pub fn matches_involved(&self, ev: &corev1::Event) -> bool {
- let obj = &ev.involved_object;
- let kind = obj.kind.clone().unwrap_or_default();
- let name = obj.name.clone().unwrap_or_default();
- if kind.is_empty() || name.is_empty() {
- return false;
- }
- self.involved.contains(&(kind, name))
- }
-}
-
/// Load Pod / StatefulSet / PVC names and Tenant CR row — same discovery rules as `list_pods` / `list_pools`.
pub async fn discover_tenant_event_scope(
client: &Client,
@@ -106,78 +99,133 @@ pub async fn discover_tenant_event_scope(
})
}
-/// Filter namespace events to scope, dedupe, sort newest first, cap at [`MAX_EVENTS_SNAPSHOT`].
-pub fn merge_namespace_events(scope: &TenantEventScope, raw: Vec) -> Vec {
- let filtered: Vec = raw
- .into_iter()
- .filter(|e| scope.matches_involved(e))
- .collect();
+/// List [`eventsv1::Event`] for [`TenantEventScope`] using `regarding.kind` + `regarding.name` field selectors (parallel, bounded concurrency).
+pub async fn list_scoped_events_v1(
+ client: &Client,
+ namespace: &str,
+ scope: &TenantEventScope,
+) -> Result> {
+ let api: Api = Api::namespaced(client.clone(), namespace);
+ let pairs: Vec<(String, String)> = scope.involved.iter().cloned().collect();
+
+ let results: Vec<_> = stream::iter(pairs)
+ .map(|(kind, name)| {
+ let api = api.clone();
+ async move {
+ let field_selector = format!("regarding.kind={},regarding.name={}", kind, name);
+ api.list(&ListParams::default().fields(&field_selector).limit(500))
+ .await
+ }
+ })
+ .buffer_unordered(EVENT_LIST_CONCURRENCY)
+ .collect()
+ .await;
+
+ let mut all = Vec::new();
+ for res in results {
+ let list = res.map_err(|e| {
+ error::map_kube_error(e, format!("Events for tenant '{}'", scope.tenant_name))
+ })?;
+ all.extend(list.items);
+ }
+
+ Ok(all)
+}
+/// Dedupe, sort newest first, cap at [`MAX_EVENTS_SNAPSHOT`], map to [`EventItem`].
+pub fn merge_events_v1(raw: Vec) -> Vec {
// Dedupe by uid
- let mut by_uid: HashMap = HashMap::new();
- let mut no_uid: Vec = Vec::new();
- for e in filtered {
+ let mut by_uid: HashMap = HashMap::new();
+ let mut no_uid: Vec = Vec::new();
+ for e in raw {
if let Some(uid) = e.metadata.uid.clone() {
by_uid.insert(uid, e);
} else {
no_uid.push(e);
}
}
- let mut merged: Vec = by_uid.into_values().collect();
+ let mut merged: Vec = by_uid.into_values().collect();
- // Weak dedupe for events without uid
let mut seen_weak: HashSet<(String, String, String, String, String)> = HashSet::new();
for e in no_uid {
- let weak = weak_dedup_key(&e);
+ let weak = weak_dedup_key_v1(&e);
if seen_weak.insert(weak) {
merged.push(e);
}
}
- merged.sort_by_key(|b| Reverse(event_sort_key(b)));
+ merged.sort_by_key(|b| Reverse(event_v1_sort_key(b)));
merged.truncate(MAX_EVENTS_SNAPSHOT);
- merged.into_iter().map(core_event_to_item).collect()
+ merged.into_iter().map(events_v1_to_item).collect()
}
-fn weak_dedup_key(e: &corev1::Event) -> (String, String, String, String, String) {
- let kind = e.involved_object.kind.clone().unwrap_or_default();
- let name = e.involved_object.name.clone().unwrap_or_default();
+fn weak_dedup_key_v1(e: &eventsv1::Event) -> (String, String, String, String, String) {
+ let kind = e
+ .regarding
+ .as_ref()
+ .and_then(|r| r.kind.as_ref())
+ .cloned()
+ .unwrap_or_default();
+ let name = e
+ .regarding
+ .as_ref()
+ .and_then(|r| r.name.as_ref())
+ .cloned()
+ .unwrap_or_default();
let reason = e.reason.clone().unwrap_or_default();
let first = e
- .first_timestamp
+ .deprecated_first_timestamp
.as_ref()
.map(|t| t.0.to_rfc3339())
.unwrap_or_default();
- let msg = e.message.clone().unwrap_or_default();
+ let msg = e.note.clone().unwrap_or_default();
(kind, name, reason, first, msg)
}
-fn event_sort_key(e: &corev1::Event) -> chrono::DateTime {
+fn event_v1_sort_key(e: &eventsv1::Event) -> chrono::DateTime {
if let Some(ref et) = e.event_time {
return et.0;
}
- if let Some(ref lt) = e.last_timestamp {
+ if let Some(ref s) = e.series {
+ return s.last_observed_time.0;
+ }
+ if let Some(ref lt) = e.deprecated_last_timestamp {
return lt.0;
}
- if let Some(ref ft) = e.first_timestamp {
+ if let Some(ref ft) = e.deprecated_first_timestamp {
return ft.0;
}
chrono::DateTime::from_timestamp(0, 0).unwrap_or_else(chrono::Utc::now)
}
-fn core_event_to_item(e: corev1::Event) -> EventItem {
+fn events_v1_to_item(e: eventsv1::Event) -> EventItem {
+ let kind = e
+ .regarding
+ .as_ref()
+ .and_then(|r| r.kind.clone())
+ .unwrap_or_default();
+ let name = e
+ .regarding
+ .as_ref()
+ .and_then(|r| r.name.clone())
+ .unwrap_or_default();
+ let count = e
+ .series
+ .as_ref()
+ .map(|s| s.count)
+ .or(e.deprecated_count)
+ .unwrap_or(0);
EventItem {
event_type: e.type_.unwrap_or_default(),
reason: e.reason.unwrap_or_default(),
- message: e.message.unwrap_or_default(),
- involved_object: format!(
- "{}/{}",
- e.involved_object.kind.unwrap_or_default(),
- e.involved_object.name.unwrap_or_default()
- ),
- first_timestamp: e.first_timestamp.map(|ts| ts.0.to_rfc3339()),
- last_timestamp: e.last_timestamp.map(|ts| ts.0.to_rfc3339()),
- count: e.count.unwrap_or(0),
+ message: e.note.unwrap_or_default(),
+ involved_object: format!("{}/{}", kind, name),
+ first_timestamp: e.deprecated_first_timestamp.map(|ts| ts.0.to_rfc3339()),
+ last_timestamp: e
+ .deprecated_last_timestamp
+ .map(|ts| ts.0.to_rfc3339())
+ .or_else(|| e.event_time.map(|ts| ts.0.to_rfc3339())),
+ count,
}
}
@@ -186,13 +234,13 @@ mod tests {
use super::*;
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
- fn mk_event(kind: &str, name: &str, uid: Option<&str>) -> corev1::Event {
- corev1::Event {
- involved_object: corev1::ObjectReference {
+ fn mk_event_v1(kind: &str, name: &str, uid: Option<&str>) -> eventsv1::Event {
+ eventsv1::Event {
+ regarding: Some(corev1::ObjectReference {
kind: Some(kind.to_string()),
name: Some(name.to_string()),
..Default::default()
- },
+ }),
metadata: ObjectMeta {
uid: uid.map(String::from),
..Default::default()
@@ -202,23 +250,13 @@ mod tests {
}
#[test]
- fn tenant_kind_required_for_tenant_name() {
- let scope = TenantEventScope {
- tenant_name: "t1".to_string(),
- involved: HashSet::from([
- (TENANT_CR_KIND.to_string(), "t1".to_string()),
- ("Pod".to_string(), "p1".to_string()),
- ]),
- };
+ fn merge_dedupes_identical_uid() {
let raw = vec![
- mk_event("Pod", "other", Some("a")),
- mk_event("ConfigMap", "t1", Some("b")),
- mk_event(TENANT_CR_KIND, "t1", Some("c")),
+ mk_event_v1("Pod", "p1", Some("uid-a")),
+ mk_event_v1("Pod", "p1", Some("uid-a")),
];
- let items = merge_namespace_events(&scope, raw);
- let objs: Vec<&str> = items.iter().map(|i| i.involved_object.as_str()).collect();
- assert!(objs.contains(&"Tenant/t1"));
- assert!(!objs.iter().any(|s| *s == "Pod/other"));
- assert!(!objs.iter().any(|s| *s == "ConfigMap/t1"));
+ let items = merge_events_v1(raw);
+ assert_eq!(items.len(), 1);
+ assert_eq!(items[0].involved_object, "Pod/p1");
}
}
diff --git a/src/context.rs b/src/context.rs
index 69e0606..2f475a1 100755
--- a/src/context.rs
+++ b/src/context.rs
@@ -332,7 +332,7 @@ impl Context {
/// 2. Vault endpoint is non-empty when backend is Vault.
/// 3. KMS Secret exists and contains the correct keys for the auth type.
pub async fn validate_kms_secret(&self, tenant: &Tenant) -> Result<(), Error> {
- use crate::types::v1alpha1::encryption::{KmsBackendType, VaultAuthType};
+ use crate::types::v1alpha1::encryption::KmsBackendType;
let Some(ref enc) = tenant.spec.encryption else {
return Ok(());
@@ -347,7 +347,7 @@ impl Context {
validate_local_kms_tenant(enc.local.as_ref(), &tenant.spec.pools)?;
}
- // Validate Vault endpoint is non-empty and kms_secret is required for Vault
+ // Vault: non-empty endpoint and `kmsSecret` with `vault-token` (RustFS `build_vault_kms_config`).
if enc.backend == KmsBackendType::Vault {
let endpoint_empty = enc
.vault
@@ -359,7 +359,6 @@ impl Context {
message: "Vault endpoint must not be empty".to_string(),
});
}
- // Vault backend requires credentials (token or AppRole) from a Secret
let secret_missing = enc
.kms_secret
.as_ref()
@@ -367,7 +366,9 @@ impl Context {
.unwrap_or(true);
if secret_missing {
return Err(Error::KmsConfigInvalid {
- message: "Vault backend requires kmsSecret with vault-token or vault-approle-id/vault-approle-secret".to_string(),
+ message:
+ "Vault backend requires kmsSecret referencing a Secret with key vault-token"
+ .to_string(),
});
}
}
@@ -387,32 +388,16 @@ impl Context {
})?;
if enc.backend == KmsBackendType::Vault {
- let is_approle = enc.vault.as_ref().and_then(|v| v.auth_type.as_ref())
- == Some(&VaultAuthType::Approle);
-
- if is_approle {
- for key in ["vault-approle-id", "vault-approle-secret"] {
- let has_key = secret.data.as_ref().is_some_and(|d| d.contains_key(key));
- if !has_key {
- return KmsSecretMissingKeySnafu {
- secret_name: secret_ref.name.clone(),
- key: key.to_string(),
- }
- .fail();
- }
- }
- } else {
- let has_token = secret
- .data
- .as_ref()
- .is_some_and(|d| d.contains_key("vault-token"));
- if !has_token {
- return KmsSecretMissingKeySnafu {
- secret_name: secret_ref.name.clone(),
- key: "vault-token".to_string(),
- }
- .fail();
+ let has_token = secret
+ .data
+ .as_ref()
+ .is_some_and(|d| d.contains_key("vault-token"));
+ if !has_token {
+ return KmsSecretMissingKeySnafu {
+ secret_name: secret_ref.name.clone(),
+ key: "vault-token".to_string(),
}
+ .fail();
}
}
diff --git a/src/types/v1alpha1/encryption.rs b/src/types/v1alpha1/encryption.rs
index 3da37b8..bd2cbfa 100644
--- a/src/types/v1alpha1/encryption.rs
+++ b/src/types/v1alpha1/encryption.rs
@@ -19,9 +19,7 @@ use serde::{Deserialize, Serialize};
/// KMS backend type for server-side encryption.
///
-/// RustFS supports two backends:
-/// - `Local`: File-based key storage on disk (development / single-node)
-/// - `Vault`: HashiCorp Vault KV2 engine (production)
+/// RustFS `init_kms_system` reads `RUSTFS_KMS_BACKEND` (`local` or `vault`).
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema, Default, PartialEq)]
#[serde(rename_all = "lowercase")]
#[schemars(rename_all = "lowercase")]
@@ -40,136 +38,35 @@ impl std::fmt::Display for KmsBackendType {
}
}
-/// Vault authentication method.
+/// Vault endpoint for KMS. Token is supplied via `kmsSecret` (`vault-token` key).
///
-/// `Token` is the default and fully implemented in rustfs-kms.
-/// `Approle` type exists in rustfs-kms but the backend is not yet functional.
-#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema, Default, PartialEq)]
-#[serde(rename_all = "lowercase")]
-#[schemars(rename_all = "lowercase")]
-pub enum VaultAuthType {
- #[default]
- Token,
- Approle,
-}
-
-impl std::fmt::Display for VaultAuthType {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- match self {
- VaultAuthType::Token => write!(f, "token"),
- VaultAuthType::Approle => write!(f, "approle"),
- }
- }
-}
-
-/// Vault-specific KMS configuration.
-///
-/// Maps to `VaultConfig` in the `rustfs-kms` crate.
-/// Sensitive fields (Vault token or AppRole credentials) are stored in the Secret referenced
-/// by `EncryptionConfig::kms_secret`.
-#[derive(Deserialize, Serialize, Clone, Debug, KubeSchema, Default)]
+/// RustFS currently fixes Transit mount, KV mount, and key prefix inside `build_vault_kms_config`;
+/// only address and token are configurable at startup.
+#[derive(Deserialize, Serialize, Clone, Debug, KubeSchema)]
#[serde(rename_all = "camelCase")]
pub struct VaultKmsConfig {
- /// Vault server endpoint (e.g. `https://vault.example.com:8200`).
+ /// Vault server URL (e.g. `https://vault.example.com:8200`).
pub endpoint: String,
-
- /// KV secrets engine mount path (maps to `RUSTFS_KMS_VAULT_KV_MOUNT` in rustfs-kms; e.g. `secret`, `kv`).
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub engine: Option,
-
- /// Vault namespace (Enterprise feature).
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub namespace: Option,
-
- /// Key prefix inside the KV engine (maps to `RUSTFS_KMS_VAULT_KEY_PREFIX`).
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub prefix: Option,
-
- /// Authentication method. Defaults to `token` when not set.
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub auth_type: Option,
-
- /// AppRole authentication settings. Only used when `authType: approle`.
- /// The actual `role_id` and `secret_id` values live in the KMS Secret
- /// under keys `vault-approle-id` and `vault-approle-secret`.
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub app_role: Option,
-}
-
-/// Vault AppRole authentication settings.
-///
-/// Sensitive credentials (`role_id`, `secret_id`) are NOT stored here.
-/// They must be placed in the KMS Secret referenced by `EncryptionConfig::kms_secret`
-/// under keys `vault-approle-id` and `vault-approle-secret`.
-///
-/// NOTE: The rustfs-kms `VaultAuthMethod::AppRole` type exists, but the
-/// Vault backend does **not** implement it yet. These fields are provided
-/// so the CRD/UI is ready when the backend adds support.
-#[derive(Deserialize, Serialize, Clone, Debug, KubeSchema, Default)]
-#[serde(rename_all = "camelCase")]
-pub struct VaultAppRoleConfig {
- /// Engine mount path for AppRole auth (e.g. `approle`).
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub engine: Option,
-
- /// Retry interval in seconds for AppRole login attempts (default: 10).
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub retry_seconds: Option,
}
-/// Local file-based KMS configuration.
-///
-/// Maps to `LocalConfig` in the `rustfs-kms` crate.
-/// Keys are stored as JSON files in the specified directory.
-///
-/// **RustFS binary alignment**: `key_directory` is injected as `RUSTFS_KMS_KEY_DIR` (required by
-/// `rustfs` server startup). `master_key_id` maps to `RUSTFS_KMS_DEFAULT_KEY_ID` (default SSE key id),
-/// not to a "master encryption passphrase" (`RUSTFS_KMS_LOCAL_MASTER_KEY` is separate in rustfs-kms).
+/// Local file-based KMS: key material directory inside the container.
#[derive(Deserialize, Serialize, Clone, Debug, KubeSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct LocalKmsConfig {
- /// Absolute directory for KMS key files inside the container (default: `/data/kms-keys`).
- /// Must be absolute; RustFS validates this for the local backend.
+ /// Absolute directory for KMS key files (default: `/data/kms-keys`).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub key_directory: Option,
-
- /// Default KMS key id for SSE (maps to `RUSTFS_KMS_DEFAULT_KEY_ID` in the RustFS binary).
- #[serde(default, skip_serializing_if = "Option::is_none")]
- pub master_key_id: Option,
}
/// Encryption / KMS configuration for a Tenant.
///
-/// When enabled, the operator injects environment variables matching the **RustFS server**
-/// (`rustfs` CLI / `init_kms_system`) and `rustfs_kms::KmsConfig::from_env()` where applicable.
-/// See `Tenant::configure_kms` in `tenant/workloads.rs` for the exact variable names.
-///
-/// Example YAML:
-/// ```yaml
-/// spec:
-/// encryption:
-/// enabled: true
-/// backend: vault
-/// vault:
-/// endpoint: "https://vault.example.com:8200"
-/// engine: "kv"
-/// namespace: "tenant1"
-/// prefix: "rustfs"
-/// kmsSecret:
-/// name: "my-tenant-kms-secret"
-/// ```
-///
-/// The referenced Secret must contain backend-specific keys:
-///
-/// **Vault backend (Token auth):**
-/// - `vault-token` (required): Vault authentication token
+/// Injected env vars match the RustFS server (`rustfs/src/config/cli.rs`, `init_kms_system`):
+/// `RUSTFS_KMS_ENABLE`, `RUSTFS_KMS_BACKEND`, `RUSTFS_KMS_KEY_DIR`, `RUSTFS_KMS_LOCAL_KEY_DIR`,
+/// `RUSTFS_KMS_DEFAULT_KEY_ID`, `RUSTFS_KMS_VAULT_ADDRESS`, `RUSTFS_KMS_VAULT_TOKEN`.
///
-/// **Vault backend (AppRole auth):**
-/// - `vault-approle-id` (required): AppRole role ID
-/// - `vault-approle-secret` (required): AppRole secret ID
+/// **Vault Secret:** key `vault-token` (required).
///
-/// **Local backend:**
-/// No secret keys required (keys are stored on disk).
+/// **Local:** no Secret; use a single-server tenant (operator validates replica count).
#[derive(Deserialize, Serialize, Clone, Debug, KubeSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct EncryptionConfig {
@@ -177,34 +74,30 @@ pub struct EncryptionConfig {
#[serde(default)]
pub enabled: bool,
- /// KMS backend type: `local` or `vault`.
+ /// KMS backend: `local` or `vault`.
#[serde(default)]
pub backend: KmsBackendType,
- /// Vault-specific settings (required when `backend: vault`).
+ /// Vault: HTTP(S) endpoint (required when `backend: vault`).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub vault: Option,
- /// Local file-based settings (optional when `backend: local`).
+ /// Local: optional key directory override.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub local: Option,
- /// Reference to a Secret containing sensitive KMS credentials
- /// (Vault token or AppRole credentials).
+ /// Secret holding `vault-token` when using Vault.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kms_secret: Option,
- /// Reserved for future KMS health-check tuning. Not injected into pods: the current RustFS
- /// release does not read `RUSTFS_KMS_PING_SECONDS` in the server startup path.
+ /// Optional default SSE key id (`RUSTFS_KMS_DEFAULT_KEY_ID`).
#[serde(default, skip_serializing_if = "Option::is_none")]
- pub ping_seconds: Option,
+ pub default_key_id: Option,
}
-/// Pod SecurityContext overrides.
+/// Pod SecurityContext overrides for all RustFS pods in this Tenant.
///
-/// Since RustFS KMS runs in-process (no separate sidecar like MinIO KES),
-/// these values override the default Pod SecurityContext
-/// (runAsUser/runAsGroup/fsGroup = 10001) for all RustFS pods in the Tenant.
+/// Overrides the default Pod SecurityContext (`runAsUser` / `runAsGroup` / `fsGroup` = 10001).
#[derive(Deserialize, Serialize, Clone, Debug, KubeSchema, Default)]
#[serde(rename_all = "camelCase")]
pub struct PodSecurityContextOverride {
@@ -216,11 +109,11 @@ pub struct PodSecurityContextOverride {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub run_as_group: Option,
- /// GID applied to all volumes mounted in the Pod.
+ /// GID applied to all volumes mounted in the Pod (`fsGroup`).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub fs_group: Option,
- /// Enforce non-root execution (default: true).
+ /// Enforce non-root execution (default in the operator: `true` when set).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub run_as_non_root: Option,
}
diff --git a/src/types/v1alpha1/tenant/workloads.rs b/src/types/v1alpha1/tenant/workloads.rs
index a3546c8..ff9f6b2 100755
--- a/src/types/v1alpha1/tenant/workloads.rs
+++ b/src/types/v1alpha1/tenant/workloads.rs
@@ -14,7 +14,7 @@
use super::Tenant;
use crate::types;
-use crate::types::v1alpha1::encryption::{KmsBackendType, VaultAuthType};
+use crate::types::v1alpha1::encryption::KmsBackendType;
use crate::types::v1alpha1::pool::Pool;
use k8s_openapi::api::apps::v1;
use k8s_openapi::api::core::v1 as corev1;
@@ -216,15 +216,13 @@ impl Tenant {
})
}
- /// Build KMS-related environment variables, pod volumes and container volume mounts
- /// based on `spec.encryption`.
+ /// Build KMS-related environment variables for `spec.encryption`.
///
- /// Names align with the RustFS server (`rustfs/src/init.rs`, `rustfs/src/config/cli.rs`) and
- /// `rustfs_kms::KmsConfig::from_env()` (`rustfs/crates/kms/src/config.rs`) for forward compatibility.
- /// The server currently reads `RUSTFS_KMS_ENABLE`, `RUSTFS_KMS_BACKEND`, `RUSTFS_KMS_KEY_DIR`,
- /// `RUSTFS_KMS_DEFAULT_KEY_ID`, `RUSTFS_KMS_VAULT_ADDRESS`, and `RUSTFS_KMS_VAULT_TOKEN` on startup.
+ /// Matches RustFS server startup (`rustfs/src/init.rs` `build_local_kms_config` /
+ /// `build_vault_kms_config`) and CLI env (`rustfs/src/config/cli.rs`): only the variables
+ /// parsed into `Config` are set here.
///
- /// Returns `(env_vars, pod_volumes, volume_mounts)`.
+ /// Returns `(env_vars, pod_volumes, volume_mounts)` — volumes are unused (no TLS cert mounts).
fn configure_kms(
&self,
) -> (
@@ -262,138 +260,62 @@ impl Tenant {
value: Some(vault.endpoint.clone()),
..Default::default()
});
- if let Some(ref ns) = vault.namespace {
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_NAMESPACE".to_owned(),
- value: Some(ns.clone()),
- ..Default::default()
- });
- }
- // Transit secrets engine mount (rustfs-kms default: `transit`).
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_MOUNT_PATH".to_owned(),
- value: Some("transit".to_owned()),
- ..Default::default()
- });
- if let Some(ref kv_mount) = vault.engine {
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_KV_MOUNT".to_owned(),
- value: Some(kv_mount.clone()),
- ..Default::default()
- });
- }
- if let Some(ref prefix) = vault.prefix {
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_KEY_PREFIX".to_owned(),
- value: Some(prefix.clone()),
- ..Default::default()
- });
- }
}
- let auth_type = enc
- .vault
- .as_ref()
- .and_then(|v| v.auth_type.as_ref())
- .cloned()
- .unwrap_or_default();
- let is_approle = auth_type == VaultAuthType::Approle;
-
- if is_approle {
+ if let Some(ref secret_ref) = enc.kms_secret
+ && !secret_ref.name.is_empty()
+ {
env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_AUTH_TYPE".to_owned(),
- value: Some("approle".to_owned()),
+ name: "RUSTFS_KMS_VAULT_TOKEN".to_owned(),
+ value_from: Some(corev1::EnvVarSource {
+ secret_key_ref: Some(corev1::SecretKeySelector {
+ name: secret_ref.name.clone(),
+ key: "vault-token".to_string(),
+ optional: Some(false),
+ }),
+ ..Default::default()
+ }),
..Default::default()
});
-
- if let Some(ar) = enc.vault.as_ref().and_then(|v| v.app_role.as_ref()) {
- if let Some(ref approle_engine) = ar.engine {
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_APPROLE_ENGINE".to_owned(),
- value: Some(approle_engine.clone()),
- ..Default::default()
- });
- }
- if let Some(retry) = ar.retry_seconds {
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_APPROLE_RETRY_SECONDS".to_owned(),
- value: Some(retry.to_string()),
- ..Default::default()
- });
- }
- }
}
- if let Some(ref secret_ref) = enc.kms_secret
- && !secret_ref.name.is_empty()
+ if let Some(ref id) = enc.default_key_id
+ && !id.is_empty()
{
- if is_approle {
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_APPROLE_ID".to_owned(),
- value_from: Some(corev1::EnvVarSource {
- secret_key_ref: Some(corev1::SecretKeySelector {
- name: secret_ref.name.clone(),
- key: "vault-approle-id".to_string(),
- optional: Some(false),
- }),
- ..Default::default()
- }),
- ..Default::default()
- });
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_APPROLE_SECRET".to_owned(),
- value_from: Some(corev1::EnvVarSource {
- secret_key_ref: Some(corev1::SecretKeySelector {
- name: secret_ref.name.clone(),
- key: "vault-approle-secret".to_string(),
- optional: Some(false),
- }),
- ..Default::default()
- }),
- ..Default::default()
- });
- } else {
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_VAULT_TOKEN".to_owned(),
- value_from: Some(corev1::EnvVarSource {
- secret_key_ref: Some(corev1::SecretKeySelector {
- name: secret_ref.name.clone(),
- key: "vault-token".to_string(),
- optional: Some(false),
- }),
- ..Default::default()
- }),
- ..Default::default()
- });
- }
+ env.push(corev1::EnvVar {
+ name: "RUSTFS_KMS_DEFAULT_KEY_ID".to_owned(),
+ value: Some(id.clone()),
+ ..Default::default()
+ });
}
}
KmsBackendType::Local => {
- let local_cfg = enc.local.as_ref();
- let key_dir = local_cfg
+ let key_dir = enc
+ .local
+ .as_ref()
.and_then(|l| l.key_directory.as_deref())
.unwrap_or("/data/kms-keys");
- let default_key_id = local_cfg
- .and_then(|l| l.master_key_id.as_deref())
- .unwrap_or("default-master-key");
- // `rustfs` server reads `RUSTFS_KMS_KEY_DIR` (see `build_local_kms_config`).
env.push(corev1::EnvVar {
name: "RUSTFS_KMS_KEY_DIR".to_owned(),
value: Some(key_dir.to_string()),
..Default::default()
});
- // Duplicate for `rustfs_kms::KmsConfig::from_env()` which uses `RUSTFS_KMS_LOCAL_KEY_DIR`.
env.push(corev1::EnvVar {
name: "RUSTFS_KMS_LOCAL_KEY_DIR".to_owned(),
value: Some(key_dir.to_string()),
..Default::default()
});
- env.push(corev1::EnvVar {
- name: "RUSTFS_KMS_DEFAULT_KEY_ID".to_owned(),
- value: Some(default_key_id.to_string()),
- ..Default::default()
- });
+
+ if let Some(ref id) = enc.default_key_id
+ && !id.is_empty()
+ {
+ env.push(corev1::EnvVar {
+ name: "RUSTFS_KMS_DEFAULT_KEY_ID".to_owned(),
+ value: Some(id.clone()),
+ ..Default::default()
+ });
+ }
}
}