From 286febe7c51915ff7e859d9e866ef836efd88bcf Mon Sep 17 00:00:00 2001 From: Cole Gentry Date: Sat, 24 Jan 2026 18:56:26 -0500 Subject: [PATCH 1/2] Adds new sourcing method for adapters and protocols Signed-off-by: Cole Gentry --- src/adapters/api.rs | 245 +++++++++++++++++++++++++++ src/adapters/cache.rs | 354 +++++++++++++++++++++++++++++++++++++++ src/adapters/mod.rs | 7 +- src/adapters/registry.rs | 305 ++++++++++++++++++++++++++++----- src/adapters/types.rs | 34 ++-- src/app.rs | 42 +++++ src/normalize.rs | 2 +- src/parsers/types.rs | 2 +- src/ui/histogram.rs | 2 +- 9 files changed, 931 insertions(+), 62 deletions(-) create mode 100644 src/adapters/api.rs create mode 100644 src/adapters/cache.rs diff --git a/src/adapters/api.rs b/src/adapters/api.rs new file mode 100644 index 0000000..1536751 --- /dev/null +++ b/src/adapters/api.rs @@ -0,0 +1,245 @@ +//! API client for fetching adapter and protocol specs from OpenECUAlliance. +//! +//! This module provides functions to fetch specs from the OpenECUAlliance API +//! at runtime, enabling dynamic spec updates without recompiling. + +use serde::Deserialize; +use thiserror::Error; + +use super::types::{AdapterSpec, ProtocolSpec}; + +/// Base URL for the OpenECUAlliance API +const OECUA_API_BASE: &str = "https://openecualliance.org"; + +/// User agent for API requests +const USER_AGENT: &str = concat!("UltraLog/", env!("CARGO_PKG_VERSION")); + +// ============================================================================ +// Error Types +// ============================================================================ + +/// Errors that can occur when fetching specs from the API +#[derive(Debug, Error)] +pub enum ApiError { + /// Network error during request + #[error("Network error: {0}")] + NetworkError(String), + + /// API returned an error response + #[error("API error (status {status}): {message}")] + ApiResponseError { status: u16, message: String }, + + /// Failed to parse API response + #[error("Parse error: {0}")] + ParseError(String), +} + +// ============================================================================ +// API Response Types +// ============================================================================ + +/// Summary of an adapter from the list endpoint +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct AdapterSummary { + pub id: String, + pub name: String, + pub version: String, + pub vendor: String, + pub channel_count: u32, + #[serde(default)] + pub description: Option, +} + +/// Summary of a protocol from the list endpoint +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ProtocolSummary { + pub id: String, + pub name: String, + pub version: String, + pub vendor: String, + pub message_count: u32, + #[serde(default)] + pub description: Option, +} + +/// Response wrapper for list endpoints +#[derive(Debug, Clone, Deserialize)] +pub struct ListResponse { + pub data: Vec, + #[serde(default)] + pub count: Option, +} + +// ============================================================================ +// API Client Functions +// ============================================================================ + +/// Fetch list of all adapters (summary only) +pub fn fetch_adapter_list() -> Result, ApiError> { + let url = format!("{}/api/adapters", OECUA_API_BASE); + + let mut response = ureq::get(&url) + .header("User-Agent", USER_AGENT) + .call() + .map_err(|e| match e { + ureq::Error::StatusCode(status) => ApiError::ApiResponseError { + status, + message: format!("HTTP {}", status), + }, + _ => ApiError::NetworkError(e.to_string()), + })?; + + let list: ListResponse = response + .body_mut() + .read_json() + .map_err(|e| ApiError::ParseError(e.to_string()))?; + + Ok(list.data) +} + +/// Fetch a single adapter with full details +pub fn fetch_adapter(vendor: &str, id: &str) -> Result { + let url = format!("{}/api/adapters/{}/{}", OECUA_API_BASE, vendor, id); + + let mut response = ureq::get(&url) + .header("User-Agent", USER_AGENT) + .call() + .map_err(|e| match e { + ureq::Error::StatusCode(status) => ApiError::ApiResponseError { + status, + message: format!("HTTP {}", status), + }, + _ => ApiError::NetworkError(e.to_string()), + })?; + + response + .body_mut() + .read_json() + .map_err(|e| ApiError::ParseError(e.to_string())) +} + +/// Fetch all adapters with full details +/// This fetches the list first, then fetches each adapter individually +pub fn fetch_all_adapters() -> Result, ApiError> { + let summaries = fetch_adapter_list()?; + + let mut adapters = Vec::with_capacity(summaries.len()); + for summary in summaries { + match fetch_adapter(&summary.vendor, &summary.id) { + Ok(adapter) => adapters.push(adapter), + Err(e) => { + tracing::warn!( + "Failed to fetch adapter {}/{}: {}", + summary.vendor, + summary.id, + e + ); + // Continue with other adapters + } + } + } + + Ok(adapters) +} + +/// Fetch list of all protocols (summary only) +pub fn fetch_protocol_list() -> Result, ApiError> { + let url = format!("{}/api/protocols", OECUA_API_BASE); + + let mut response = ureq::get(&url) + .header("User-Agent", USER_AGENT) + .call() + .map_err(|e| match e { + ureq::Error::StatusCode(status) => ApiError::ApiResponseError { + status, + message: format!("HTTP {}", status), + }, + _ => ApiError::NetworkError(e.to_string()), + })?; + + let list: ListResponse = response + .body_mut() + .read_json() + .map_err(|e| ApiError::ParseError(e.to_string()))?; + + Ok(list.data) +} + +/// Fetch a single protocol with full details +pub fn fetch_protocol(vendor: &str, id: &str) -> Result { + let url = format!("{}/api/protocols/{}/{}", OECUA_API_BASE, vendor, id); + + let mut response = ureq::get(&url) + .header("User-Agent", USER_AGENT) + .call() + .map_err(|e| match e { + ureq::Error::StatusCode(status) => ApiError::ApiResponseError { + status, + message: format!("HTTP {}", status), + }, + _ => ApiError::NetworkError(e.to_string()), + })?; + + response + .body_mut() + .read_json() + .map_err(|e| ApiError::ParseError(e.to_string())) +} + +/// Fetch all protocols with full details +/// This fetches the list first, then fetches each protocol individually +pub fn fetch_all_protocols() -> Result, ApiError> { + let summaries = fetch_protocol_list()?; + + let mut protocols = Vec::with_capacity(summaries.len()); + for summary in summaries { + match fetch_protocol(&summary.vendor, &summary.id) { + Ok(protocol) => protocols.push(protocol), + Err(e) => { + tracing::warn!( + "Failed to fetch protocol {}/{}: {}", + summary.vendor, + summary.id, + e + ); + // Continue with other protocols + } + } + } + + Ok(protocols) +} + +#[cfg(test)] +mod tests { + use super::*; + + // Note: These tests require network access and a running API server + // They are marked as ignored by default for CI/CD pipelines + + #[test] + #[ignore] + fn test_fetch_adapter_list() { + let result = fetch_adapter_list(); + assert!(result.is_ok(), "Failed to fetch adapter list: {:?}", result); + + let adapters = result.unwrap(); + assert!(!adapters.is_empty(), "Adapter list should not be empty"); + } + + #[test] + #[ignore] + fn test_fetch_protocol_list() { + let result = fetch_protocol_list(); + assert!( + result.is_ok(), + "Failed to fetch protocol list: {:?}", + result + ); + + let protocols = result.unwrap(); + assert!(!protocols.is_empty(), "Protocol list should not be empty"); + } +} diff --git a/src/adapters/cache.rs b/src/adapters/cache.rs new file mode 100644 index 0000000..33ccf63 --- /dev/null +++ b/src/adapters/cache.rs @@ -0,0 +1,354 @@ +//! Disk cache for OpenECUAlliance adapter and protocol specs. +//! +//! This module provides local caching of specs fetched from the API, +//! reducing network requests and providing offline access. + +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::PathBuf; +use std::time::{Duration, SystemTime}; +use thiserror::Error; + +use super::types::{AdapterSpec, ProtocolSpec}; + +/// Cache directory name within app data +const CACHE_DIR_NAME: &str = "oecua_specs"; + +/// Adapters subdirectory +const ADAPTERS_DIR: &str = "adapters"; + +/// Protocols subdirectory +const PROTOCOLS_DIR: &str = "protocols"; + +/// Metadata file name +const METADATA_FILE: &str = "metadata.json"; + +/// Default cache staleness threshold (24 hours) +const DEFAULT_CACHE_MAX_AGE_SECS: u64 = 24 * 60 * 60; + +// ============================================================================ +// Error Types +// ============================================================================ + +/// Errors that can occur during cache operations +#[derive(Debug, Error)] +pub enum CacheError { + /// Failed to create cache directory + #[error("Failed to create cache directory: {0}")] + CreateDirError(String), + + /// Failed to read cache file + #[error("Failed to read cache file: {0}")] + ReadError(String), + + /// Failed to write cache file + #[error("Failed to write cache file: {0}")] + WriteError(String), + + /// Failed to parse cached data + #[error("Failed to parse cached data: {0}")] + ParseError(String), + + /// Cache directory not found + #[error("Cache directory not available")] + NoCacheDir, +} + +// ============================================================================ +// Cache Metadata +// ============================================================================ + +/// Metadata about the cache state +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CacheMetadata { + /// Timestamp of last successful fetch (Unix epoch seconds) + pub last_fetch_timestamp: u64, + + /// Version of the cache format + pub cache_version: u32, + + /// Number of cached adapters + pub adapter_count: u32, + + /// Number of cached protocols + pub protocol_count: u32, +} + +impl Default for CacheMetadata { + fn default() -> Self { + Self { + last_fetch_timestamp: 0, + cache_version: 1, + adapter_count: 0, + protocol_count: 0, + } + } +} + +// ============================================================================ +// Cache Directory Functions +// ============================================================================ + +/// Get the cache directory path +/// Returns None if the app data directory cannot be determined +pub fn get_cache_dir() -> Option { + dirs::data_dir().map(|base| base.join("UltraLog").join(CACHE_DIR_NAME)) +} + +/// Ensure the cache directory structure exists +fn ensure_cache_dirs() -> Result { + let cache_dir = get_cache_dir().ok_or(CacheError::NoCacheDir)?; + + let adapters_dir = cache_dir.join(ADAPTERS_DIR); + let protocols_dir = cache_dir.join(PROTOCOLS_DIR); + + fs::create_dir_all(&adapters_dir) + .map_err(|e| CacheError::CreateDirError(format!("adapters: {}", e)))?; + + fs::create_dir_all(&protocols_dir) + .map_err(|e| CacheError::CreateDirError(format!("protocols: {}", e)))?; + + Ok(cache_dir) +} + +// ============================================================================ +// Metadata Operations +// ============================================================================ + +/// Load cache metadata +pub fn load_metadata() -> Option { + let cache_dir = get_cache_dir()?; + let metadata_path = cache_dir.join(METADATA_FILE); + + let content = fs::read_to_string(&metadata_path).ok()?; + serde_json::from_str(&content).ok() +} + +/// Save cache metadata +fn save_metadata(metadata: &CacheMetadata) -> Result<(), CacheError> { + let cache_dir = ensure_cache_dirs()?; + let metadata_path = cache_dir.join(METADATA_FILE); + + let content = serde_json::to_string_pretty(metadata) + .map_err(|e| CacheError::WriteError(e.to_string()))?; + + fs::write(&metadata_path, content).map_err(|e| CacheError::WriteError(e.to_string())) +} + +/// Check if the cache is stale (older than max age) +pub fn is_cache_stale() -> bool { + is_cache_stale_with_max_age(DEFAULT_CACHE_MAX_AGE_SECS) +} + +/// Check if the cache is stale with a custom max age +pub fn is_cache_stale_with_max_age(max_age_secs: u64) -> bool { + let Some(metadata) = load_metadata() else { + return true; // No metadata means cache is stale + }; + + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0); + + let age = now.saturating_sub(metadata.last_fetch_timestamp); + age > max_age_secs +} + +/// Get the age of the cache in seconds, or None if no cache +pub fn get_cache_age() -> Option { + let metadata = load_metadata()?; + + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .ok()?; + + let fetch_time = Duration::from_secs(metadata.last_fetch_timestamp); + now.checked_sub(fetch_time) +} + +// ============================================================================ +// Adapter Cache Operations +// ============================================================================ + +/// Load all cached adapters +pub fn load_cached_adapters() -> Option> { + let cache_dir = get_cache_dir()?; + let adapters_dir = cache_dir.join(ADAPTERS_DIR); + + if !adapters_dir.exists() { + return None; + } + + let mut adapters = Vec::new(); + + let entries = fs::read_dir(&adapters_dir).ok()?; + for entry in entries.flatten() { + let path = entry.path(); + if path.extension().is_some_and(|ext| ext == "json") { + if let Ok(content) = fs::read_to_string(&path) { + if let Ok(adapter) = serde_json::from_str::(&content) { + adapters.push(adapter); + } else { + tracing::warn!("Failed to parse cached adapter: {:?}", path); + } + } + } + } + + if adapters.is_empty() { + None + } else { + Some(adapters) + } +} + +/// Save adapters to cache +pub fn save_adapters_to_cache(adapters: &[AdapterSpec]) -> Result<(), CacheError> { + let cache_dir = ensure_cache_dirs()?; + let adapters_dir = cache_dir.join(ADAPTERS_DIR); + + // Clear existing cached adapters + if let Ok(entries) = fs::read_dir(&adapters_dir) { + for entry in entries.flatten() { + let _ = fs::remove_file(entry.path()); + } + } + + // Save each adapter + for adapter in adapters { + let filename = format!("{}-{}.json", adapter.vendor, adapter.id); + let path = adapters_dir.join(&filename); + + let content = serde_json::to_string_pretty(adapter) + .map_err(|e| CacheError::WriteError(format!("{}: {}", filename, e)))?; + + fs::write(&path, content) + .map_err(|e| CacheError::WriteError(format!("{}: {}", filename, e)))?; + } + + // Update metadata + let mut metadata = load_metadata().unwrap_or_default(); + metadata.adapter_count = adapters.len() as u32; + metadata.last_fetch_timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0); + save_metadata(&metadata)?; + + Ok(()) +} + +// ============================================================================ +// Protocol Cache Operations +// ============================================================================ + +/// Load all cached protocols +pub fn load_cached_protocols() -> Option> { + let cache_dir = get_cache_dir()?; + let protocols_dir = cache_dir.join(PROTOCOLS_DIR); + + if !protocols_dir.exists() { + return None; + } + + let mut protocols = Vec::new(); + + let entries = fs::read_dir(&protocols_dir).ok()?; + for entry in entries.flatten() { + let path = entry.path(); + if path.extension().is_some_and(|ext| ext == "json") { + if let Ok(content) = fs::read_to_string(&path) { + if let Ok(protocol) = serde_json::from_str::(&content) { + protocols.push(protocol); + } else { + tracing::warn!("Failed to parse cached protocol: {:?}", path); + } + } + } + } + + if protocols.is_empty() { + None + } else { + Some(protocols) + } +} + +/// Save protocols to cache +pub fn save_protocols_to_cache(protocols: &[ProtocolSpec]) -> Result<(), CacheError> { + let cache_dir = ensure_cache_dirs()?; + let protocols_dir = cache_dir.join(PROTOCOLS_DIR); + + // Clear existing cached protocols + if let Ok(entries) = fs::read_dir(&protocols_dir) { + for entry in entries.flatten() { + let _ = fs::remove_file(entry.path()); + } + } + + // Save each protocol + for protocol in protocols { + let filename = format!("{}-{}.json", protocol.vendor, protocol.id); + let path = protocols_dir.join(&filename); + + let content = serde_json::to_string_pretty(protocol) + .map_err(|e| CacheError::WriteError(format!("{}: {}", filename, e)))?; + + fs::write(&path, content) + .map_err(|e| CacheError::WriteError(format!("{}: {}", filename, e)))?; + } + + // Update metadata + let mut metadata = load_metadata().unwrap_or_default(); + metadata.protocol_count = protocols.len() as u32; + metadata.last_fetch_timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0); + save_metadata(&metadata)?; + + Ok(()) +} + +/// Clear the entire cache +pub fn clear_cache() -> Result<(), CacheError> { + let cache_dir = get_cache_dir().ok_or(CacheError::NoCacheDir)?; + + if cache_dir.exists() { + fs::remove_dir_all(&cache_dir) + .map_err(|e| CacheError::WriteError(format!("Failed to clear cache: {}", e)))?; + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_get_cache_dir() { + let cache_dir = get_cache_dir(); + assert!(cache_dir.is_some(), "Cache directory should be available"); + + let path = cache_dir.unwrap(); + assert!( + path.to_string_lossy().contains("UltraLog"), + "Cache path should contain UltraLog" + ); + assert!( + path.to_string_lossy().contains("oecua_specs"), + "Cache path should contain oecua_specs" + ); + } + + #[test] + fn test_cache_staleness_with_no_metadata() { + // With no metadata file, cache should be considered stale + // This test doesn't create any files, so it checks default behavior + // Note: This may pass or fail depending on whether metadata exists from other runs + let stale = is_cache_stale_with_max_age(0); + assert!(stale, "Cache with no metadata should be stale"); + } +} diff --git a/src/adapters/mod.rs b/src/adapters/mod.rs index 1b4a15b..eb8f01c 100644 --- a/src/adapters/mod.rs +++ b/src/adapters/mod.rs @@ -20,6 +20,8 @@ //! } //! ``` +pub mod api; +pub mod cache; pub mod registry; pub mod types; @@ -27,8 +29,9 @@ pub mod types; pub use registry::{ find_adapters_by_extension, find_protocols_by_vendor, get_adapter_by_id, get_adapters, get_adapters_by_vendor, get_all_categories, get_channel_metadata, get_channels_by_category, - get_protocol_by_id, get_protocols, get_spec_normalizations, has_spec_normalization, - normalize_from_spec, ChannelMetadata, + get_protocol_by_id, get_protocols, get_spec_normalizations, get_spec_source, + has_spec_normalization, normalize_from_spec, refresh_specs_from_api, specs_refreshed, + ChannelMetadata, RefreshResult, }; pub use types::{ AdapterSpec, ByteOrder, ChannelCategory, ChannelSpec, DataType, EnumSpec, FileFormatSpec, diff --git a/src/adapters/registry.rs b/src/adapters/registry.rs index 0c35430..f8ca0de 100644 --- a/src/adapters/registry.rs +++ b/src/adapters/registry.rs @@ -1,13 +1,17 @@ //! Adapter registry for loading and managing OpenECU Alliance adapter specifications. //! //! This module provides functionality to: -//! - Load embedded adapter YAML files at compile time +//! - Load adapter YAML files with fallback chain: API -> cache -> embedded //! - Build normalization maps from channel source_names //! - Look up channel metadata by source name +//! - Support background refresh of specs from the API use std::collections::HashMap; -use std::sync::LazyLock; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{LazyLock, RwLock}; +use super::api; +use super::cache; use super::types::{AdapterSpec, ChannelCategory, ChannelSpec, ProtocolSpec}; // Embed adapter YAML files at compile time @@ -75,33 +79,78 @@ static EMBEDDED_PROTOCOLS: &[&str] = &[ EMTRON_PROTOCOL_YAML, ]; -/// Parsed adapter specifications (loaded lazily) -static ADAPTER_SPECS: LazyLock> = LazyLock::new(|| { +/// Parse embedded adapter YAML strings +fn parse_embedded_adapters() -> Vec { EMBEDDED_ADAPTERS .iter() .filter_map(|yaml| match serde_yaml::from_str(yaml) { Ok(spec) => Some(spec), Err(e) => { - tracing::warn!("Failed to parse adapter YAML: {}", e); + tracing::warn!("Failed to parse embedded adapter YAML: {}", e); None } }) .collect() -}); +} -/// Parsed protocol specifications (loaded lazily) -static PROTOCOL_SPECS: LazyLock> = LazyLock::new(|| { +/// Parse embedded protocol YAML strings +fn parse_embedded_protocols() -> Vec { EMBEDDED_PROTOCOLS .iter() .filter_map(|yaml| match serde_yaml::from_str(yaml) { Ok(spec) => Some(spec), Err(e) => { - tracing::warn!("Failed to parse protocol YAML: {}", e); + tracing::warn!("Failed to parse embedded protocol YAML: {}", e); None } }) .collect() -}); +} + +/// Load adapters with fallback chain: cache -> embedded +/// API fetch is done in background to avoid blocking startup +fn load_adapters_with_fallback() -> Vec { + // 1. Try loading from cache first (fast, non-blocking) + if !cache::is_cache_stale() { + if let Some(cached) = cache::load_cached_adapters() { + tracing::info!("Loaded {} adapters from cache", cached.len()); + return cached; + } + } + + // 2. Fall back to embedded specs (always available) + tracing::info!("Using embedded adapter specs"); + parse_embedded_adapters() +} + +/// Load protocols with fallback chain: cache -> embedded +/// API fetch is done in background to avoid blocking startup +fn load_protocols_with_fallback() -> Vec { + // 1. Try loading from cache first (fast, non-blocking) + if !cache::is_cache_stale() { + if let Some(cached) = cache::load_cached_protocols() { + tracing::info!("Loaded {} protocols from cache", cached.len()); + return cached; + } + } + + // 2. Fall back to embedded specs (always available) + tracing::info!("Using embedded protocol specs"); + parse_embedded_protocols() +} + +/// Tracks whether specs have been refreshed from API +static SPECS_REFRESHED: AtomicBool = AtomicBool::new(false); + +/// Dynamically updatable adapter specifications +/// Initial load uses cache/embedded, background refresh updates from API +static ADAPTER_SPECS: LazyLock>> = + LazyLock::new(|| RwLock::new(load_adapters_with_fallback())); + +/// Dynamically updatable protocol specifications +/// Initial load uses cache/embedded, background refresh updates from API +static PROTOCOL_SPECS: LazyLock>> = + LazyLock::new(|| RwLock::new(load_protocols_with_fallback())); /// Channel metadata lookup by source name (lowercase) #[derive(Debug, Clone)] @@ -125,9 +174,26 @@ pub struct ChannelMetadata { } /// Normalization map: source name (lowercase) -> canonical display name -static SPEC_NORMALIZATION_MAP: LazyLock> = LazyLock::new(|| { +/// Uses RwLock to allow dynamic updates from API refresh +static SPEC_NORMALIZATION_MAP: LazyLock>> = LazyLock::new(|| { + RwLock::new(build_normalization_map( + &ADAPTER_SPECS.read().expect("Failed to read adapter specs"), + )) +}); + +/// Channel metadata lookup: source name (lowercase) -> full metadata +/// Uses RwLock to allow dynamic updates from API refresh +static CHANNEL_METADATA_MAP: LazyLock>> = + LazyLock::new(|| { + RwLock::new(build_metadata_map( + &ADAPTER_SPECS.read().expect("Failed to read adapter specs"), + )) + }); + +/// Build normalization map from adapter specs +fn build_normalization_map(adapters: &[AdapterSpec]) -> HashMap { let mut map = HashMap::new(); - for adapter in ADAPTER_SPECS.iter() { + for adapter in adapters { for channel in &adapter.channels { for source_name in &channel.source_names { // Use display name as the normalized name @@ -136,12 +202,12 @@ static SPEC_NORMALIZATION_MAP: LazyLock> = LazyLock::new } } map -}); +} -/// Channel metadata lookup: source name (lowercase) -> full metadata -static CHANNEL_METADATA_MAP: LazyLock> = LazyLock::new(|| { +/// Build metadata map from adapter specs +fn build_metadata_map(adapters: &[AdapterSpec]) -> HashMap { let mut map = HashMap::new(); - for adapter in ADAPTER_SPECS.iter() { + for adapter in adapters { for channel in &adapter.channels { let metadata = ChannelMetadata { canonical_id: channel.id.clone(), @@ -159,51 +225,78 @@ static CHANNEL_METADATA_MAP: LazyLock> = LazyLo } } map -}); +} /// Get all loaded adapter specifications -pub fn get_adapters() -> &'static Vec { - &ADAPTER_SPECS +pub fn get_adapters() -> Vec { + ADAPTER_SPECS + .read() + .expect("Failed to read adapter specs") + .clone() } /// Get adapter by ID -pub fn get_adapter_by_id(id: &str) -> Option<&'static AdapterSpec> { - ADAPTER_SPECS.iter().find(|a| a.id == id) +pub fn get_adapter_by_id(id: &str) -> Option { + ADAPTER_SPECS + .read() + .expect("Failed to read adapter specs") + .iter() + .find(|a| a.id == id) + .cloned() } /// Get adapter by vendor name -pub fn get_adapters_by_vendor(vendor: &str) -> Vec<&'static AdapterSpec> { +pub fn get_adapters_by_vendor(vendor: &str) -> Vec { let vendor_lower = vendor.to_lowercase(); ADAPTER_SPECS + .read() + .expect("Failed to read adapter specs") .iter() .filter(|a| a.vendor.to_lowercase() == vendor_lower) + .cloned() .collect() } /// Normalize a channel name using the spec-driven normalization map. /// Returns the canonical display name if found, otherwise returns the original name. pub fn normalize_from_spec(name: &str) -> Option { - SPEC_NORMALIZATION_MAP.get(&name.to_lowercase()).cloned() + SPEC_NORMALIZATION_MAP + .read() + .expect("Failed to read normalization map") + .get(&name.to_lowercase()) + .cloned() } /// Get channel metadata by source name -pub fn get_channel_metadata(name: &str) -> Option<&'static ChannelMetadata> { - CHANNEL_METADATA_MAP.get(&name.to_lowercase()) +pub fn get_channel_metadata(name: &str) -> Option { + CHANNEL_METADATA_MAP + .read() + .expect("Failed to read metadata map") + .get(&name.to_lowercase()) + .cloned() } /// Get all spec-based normalization mappings as (source_name, display_name) pairs. /// This can be used to merge with or enhance the existing normalize.rs mappings. -pub fn get_spec_normalizations() -> impl Iterator { - SPEC_NORMALIZATION_MAP.iter() +pub fn get_spec_normalizations() -> Vec<(String, String)> { + SPEC_NORMALIZATION_MAP + .read() + .expect("Failed to read normalization map") + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect() } /// Check if a channel name has spec-based normalization pub fn has_spec_normalization(name: &str) -> bool { - SPEC_NORMALIZATION_MAP.contains_key(&name.to_lowercase()) + SPEC_NORMALIZATION_MAP + .read() + .expect("Failed to read normalization map") + .contains_key(&name.to_lowercase()) } /// Find matching adapters for a file based on extension -pub fn find_adapters_by_extension(extension: &str) -> Vec<&'static AdapterSpec> { +pub fn find_adapters_by_extension(extension: &str) -> Vec { let ext = if extension.starts_with('.') { extension.to_lowercase() } else { @@ -211,6 +304,8 @@ pub fn find_adapters_by_extension(extension: &str) -> Vec<&'static AdapterSpec> }; ADAPTER_SPECS + .read() + .expect("Failed to read adapter specs") .iter() .filter(|a| { a.file_format @@ -218,12 +313,15 @@ pub fn find_adapters_by_extension(extension: &str) -> Vec<&'static AdapterSpec> .iter() .any(|e| e.to_lowercase() == ext) }) + .cloned() .collect() } /// Get all unique channel categories from loaded adapters pub fn get_all_categories() -> Vec { let mut categories: Vec = ADAPTER_SPECS + .read() + .expect("Failed to read adapter specs") .iter() .flat_map(|a| a.channels.iter().map(|c| c.category)) .collect(); @@ -233,10 +331,12 @@ pub fn get_all_categories() -> Vec { } /// Get all channels for a specific category across all adapters -pub fn get_channels_by_category(category: ChannelCategory) -> Vec<&'static ChannelSpec> { +pub fn get_channels_by_category(category: ChannelCategory) -> Vec { ADAPTER_SPECS + .read() + .expect("Failed to read adapter specs") .iter() - .flat_map(|a| a.channels.iter()) + .flat_map(|a| a.channels.iter().cloned()) .filter(|c| c.category == category) .collect() } @@ -246,24 +346,137 @@ pub fn get_channels_by_category(category: ChannelCategory) -> Vec<&'static Chann // ============================================================================ /// Get all loaded protocol specifications -pub fn get_protocols() -> &'static Vec { - &PROTOCOL_SPECS +pub fn get_protocols() -> Vec { + PROTOCOL_SPECS + .read() + .expect("Failed to read protocol specs") + .clone() } /// Get protocol by ID -pub fn get_protocol_by_id(id: &str) -> Option<&'static ProtocolSpec> { - PROTOCOL_SPECS.iter().find(|p| p.id == id) +pub fn get_protocol_by_id(id: &str) -> Option { + PROTOCOL_SPECS + .read() + .expect("Failed to read protocol specs") + .iter() + .find(|p| p.id == id) + .cloned() } /// Get protocols by vendor name -pub fn find_protocols_by_vendor(vendor: &str) -> Vec<&'static ProtocolSpec> { +pub fn find_protocols_by_vendor(vendor: &str) -> Vec { let vendor_lower = vendor.to_lowercase(); PROTOCOL_SPECS + .read() + .expect("Failed to read protocol specs") .iter() .filter(|p| p.vendor.to_lowercase() == vendor_lower) + .cloned() .collect() } +// ============================================================================ +// Background Refresh Functions +// ============================================================================ + +/// Result of a spec refresh operation +#[derive(Debug, Clone)] +pub enum RefreshResult { + /// Successfully refreshed from API + Success { + adapters_count: usize, + protocols_count: usize, + }, + /// Failed to refresh (using cached/embedded data) + Failed(String), + /// Already refreshed, skipped + AlreadyRefreshed, +} + +/// Refresh specs from the API and update the registry +/// This function is designed to be called from a background thread +pub fn refresh_specs_from_api() -> RefreshResult { + // Check if already refreshed to avoid redundant API calls + if SPECS_REFRESHED.load(Ordering::SeqCst) { + return RefreshResult::AlreadyRefreshed; + } + + tracing::info!("Refreshing specs from OpenECUAlliance API..."); + + // Fetch adapters from API + let adapters_result = api::fetch_all_adapters(); + let protocols_result = api::fetch_all_protocols(); + + match (adapters_result, protocols_result) { + (Ok(adapters), Ok(protocols)) => { + let adapters_count = adapters.len(); + let protocols_count = protocols.len(); + + // Save to cache + if let Err(e) = cache::save_adapters_to_cache(&adapters) { + tracing::warn!("Failed to cache adapters: {}", e); + } + if let Err(e) = cache::save_protocols_to_cache(&protocols) { + tracing::warn!("Failed to cache protocols: {}", e); + } + + // Update the registry + if let Ok(mut adapter_lock) = ADAPTER_SPECS.write() { + *adapter_lock = adapters; + } + if let Ok(mut protocol_lock) = PROTOCOL_SPECS.write() { + *protocol_lock = protocols; + } + + // Rebuild derived maps + if let Ok(specs) = ADAPTER_SPECS.read() { + if let Ok(mut norm_lock) = SPEC_NORMALIZATION_MAP.write() { + *norm_lock = build_normalization_map(&specs); + } + if let Ok(mut meta_lock) = CHANNEL_METADATA_MAP.write() { + *meta_lock = build_metadata_map(&specs); + } + } + + SPECS_REFRESHED.store(true, Ordering::SeqCst); + tracing::info!( + "Successfully refreshed {} adapters and {} protocols from API", + adapters_count, + protocols_count + ); + + RefreshResult::Success { + adapters_count, + protocols_count, + } + } + (Err(e), _) => { + tracing::warn!("Failed to fetch adapters from API: {}", e); + RefreshResult::Failed(format!("Adapter fetch failed: {}", e)) + } + (_, Err(e)) => { + tracing::warn!("Failed to fetch protocols from API: {}", e); + RefreshResult::Failed(format!("Protocol fetch failed: {}", e)) + } + } +} + +/// Check if specs have been refreshed from API +pub fn specs_refreshed() -> bool { + SPECS_REFRESHED.load(Ordering::SeqCst) +} + +/// Get the current spec source (for display purposes) +pub fn get_spec_source() -> &'static str { + if SPECS_REFRESHED.load(Ordering::SeqCst) { + "API (refreshed)" + } else if !cache::is_cache_stale() && cache::load_cached_adapters().is_some() { + "Cache" + } else { + "Embedded" + } +} + #[cfg(test)] mod tests { use super::*; @@ -274,9 +487,9 @@ mod tests { assert!(!adapters.is_empty(), "Should load at least one adapter"); // Check that we have the expected adapters - let adapter_ids: Vec<&str> = adapters.iter().map(|a| a.id.as_str()).collect(); + let adapter_ids: Vec = adapters.iter().map(|a| a.id.clone()).collect(); assert!( - adapter_ids.contains(&"haltech-nsp"), + adapter_ids.iter().any(|id| id == "haltech-nsp"), "Should have haltech-nsp adapter" ); } @@ -322,9 +535,11 @@ mod tests { assert!(!protocols.is_empty(), "Should load at least one protocol"); // Check that we have the expected protocols - let protocol_ids: Vec<&str> = protocols.iter().map(|p| p.id.as_str()).collect(); + let protocol_ids: Vec = protocols.iter().map(|p| p.id.clone()).collect(); assert!( - protocol_ids.contains(&"haltech-elite-broadcast"), + protocol_ids + .iter() + .any(|id| id == "haltech-elite-broadcast"), "Should have haltech-elite-broadcast protocol" ); } @@ -352,4 +567,14 @@ mod tests { "Should find Speeduino protocols" ); } + + #[test] + fn test_get_spec_source() { + // Initially should be "Embedded" or "Cache" depending on environment + let source = get_spec_source(); + assert!( + source == "Embedded" || source == "Cache" || source == "API (refreshed)", + "Spec source should be valid" + ); + } } diff --git a/src/adapters/types.rs b/src/adapters/types.rs index 9e12320..d97b8f1 100644 --- a/src/adapters/types.rs +++ b/src/adapters/types.rs @@ -3,10 +3,10 @@ //! These types mirror the OpenECU Alliance adapter schema for parsing //! adapter YAML files that define ECU log format specifications. -use serde::Deserialize; +use serde::{Deserialize, Serialize}; /// OpenECU Alliance adapter specification -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct AdapterSpec { /// Specification version (e.g., "1.0") pub openecualliance: String, @@ -37,7 +37,7 @@ pub struct AdapterSpec { } /// Branding assets for the vendor -#[derive(Debug, Clone, Deserialize, Default)] +#[derive(Debug, Clone, Deserialize, Serialize, Default)] pub struct BrandingSpec { /// Logo file path (relative to assets/logos/) #[serde(default)] @@ -57,7 +57,7 @@ pub struct BrandingSpec { } /// File format specification -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct FileFormatSpec { /// Format type: "csv" or "binary" #[serde(rename = "type")] @@ -100,7 +100,7 @@ pub struct FileFormatSpec { } /// Channel specification -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct ChannelSpec { /// Canonical channel identifier (e.g., "rpm", "coolant_temp") pub id: String, @@ -138,7 +138,7 @@ pub struct ChannelSpec { } /// Channel categories -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ChannelCategory { Engine, @@ -188,7 +188,7 @@ impl ChannelCategory { } /// Data types for channel values -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum DataType { Float, @@ -199,7 +199,7 @@ pub enum DataType { } /// Adapter metadata -#[derive(Debug, Clone, Deserialize, Default)] +#[derive(Debug, Clone, Deserialize, Serialize, Default)] pub struct MetadataSpec { /// Adapter author #[serde(default)] @@ -222,7 +222,7 @@ pub struct MetadataSpec { } /// Changelog entry -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct ChangelogEntry { /// Version pub version: String, @@ -237,7 +237,7 @@ pub struct ChangelogEntry { // ============================================================================ /// OpenECU Alliance CAN protocol specification -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct ProtocolSpec { /// Specification version (e.g., "1.0") pub openecualliance: String, @@ -274,7 +274,7 @@ pub struct ProtocolSpec { } /// Protocol configuration -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct ProtocolInfo { /// Protocol type: "can", "canfd", "lin", "k-line" #[serde(rename = "type")] @@ -299,7 +299,7 @@ pub struct ProtocolInfo { } /// Protocol types -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ProtocolType { Can, @@ -310,7 +310,7 @@ pub enum ProtocolType { } /// CAN message definition -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct MessageSpec { /// CAN message ID (supports hex strings like "0x360" or decimal integers) #[serde(deserialize_with = "deserialize_message_id")] @@ -333,7 +333,7 @@ pub struct MessageSpec { } /// Signal within a CAN message -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct SignalSpec { /// Signal name pub name: String, @@ -376,7 +376,7 @@ fn default_scale() -> f64 { } /// Byte order for multi-byte signals -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ByteOrder { LittleEndian, @@ -384,7 +384,7 @@ pub enum ByteOrder { } /// Signal data types -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum SignalDataType { Unsigned, @@ -394,7 +394,7 @@ pub enum SignalDataType { } /// Enumeration definition for discrete signal values -#[derive(Debug, Clone, Deserialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct EnumSpec { /// Enumeration name pub name: String, diff --git a/src/app.rs b/src/app.rs index d877a7e..edcc0e3 100644 --- a/src/app.rs +++ b/src/app.rs @@ -12,6 +12,7 @@ use std::path::PathBuf; use std::sync::mpsc::{channel, Receiver, Sender}; use std::thread; +use crate::adapters; use crate::analysis::{AnalysisResult, AnalyzerRegistry}; use crate::analytics; use crate::computed::{ComputedChannel, ComputedChannelLibrary, FormulaEditorState}; @@ -147,6 +148,9 @@ pub struct UltraLogApp { pub(crate) user_settings: UserSettings, /// Current language selection pub(crate) language: Language, + // === Spec Refresh === + /// Whether spec refresh from API has been started + spec_refresh_started: bool, } impl Default for UltraLogApp { @@ -202,6 +206,7 @@ impl Default for UltraLogApp { analysis_selected_category: None, user_settings: UserSettings::default(), language: Language::default(), + spec_refresh_started: false, } } } @@ -1219,6 +1224,40 @@ impl UltraLogApp { } } + // ======================================================================== + // Spec Refresh (Background) + // ======================================================================== + + /// Start refreshing adapter/protocol specs from OpenECUAlliance API in background + /// This runs once on startup to ensure specs are up to date + fn start_spec_refresh(&mut self) { + if self.spec_refresh_started || adapters::specs_refreshed() { + return; + } + + self.spec_refresh_started = true; + + // Spawn background thread to refresh specs from API + thread::spawn(|| match adapters::refresh_specs_from_api() { + adapters::RefreshResult::Success { + adapters_count, + protocols_count, + } => { + tracing::info!( + "Spec refresh complete: {} adapters, {} protocols", + adapters_count, + protocols_count + ); + } + adapters::RefreshResult::Failed(e) => { + tracing::debug!("Spec refresh from API failed (using cache/embedded): {}", e); + } + adapters::RefreshResult::AlreadyRefreshed => { + tracing::debug!("Specs already refreshed, skipping"); + } + }); + } + // ======================================================================== // Toast Notifications // ======================================================================== @@ -1396,6 +1435,9 @@ impl eframe::App for UltraLogApp { // Check for updates on startup (runs once) self.check_startup_update(); + // Start spec refresh from API in background (runs once) + self.start_spec_refresh(); + // Check for completed update operations self.check_update_complete(); diff --git a/src/normalize.rs b/src/normalize.rs index bebf377..441f2cd 100644 --- a/src/normalize.rs +++ b/src/normalize.rs @@ -426,7 +426,7 @@ pub fn get_display_name(name: &str, show_original: bool) -> String { /// Get channel metadata from OpenECU Alliance specs if available. /// This provides additional information like category, unit, min/max values. -pub fn get_spec_metadata(name: &str) -> Option<&'static adapters::ChannelMetadata> { +pub fn get_spec_metadata(name: &str) -> Option { adapters::get_channel_metadata(name) } diff --git a/src/parsers/types.rs b/src/parsers/types.rs index 5bf5bbb..c11cc05 100644 --- a/src/parsers/types.rs +++ b/src/parsers/types.rs @@ -166,7 +166,7 @@ impl Channel { } /// Get OpenECU Alliance spec metadata for this channel (if available). - pub fn spec_metadata(&self) -> Option<&'static ChannelMetadata> { + pub fn spec_metadata(&self) -> Option { get_channel_metadata(&self.name()) } diff --git a/src/ui/histogram.rs b/src/ui/histogram.rs index ce1893d..29b9e9a 100644 --- a/src/ui/histogram.rs +++ b/src/ui/histogram.rs @@ -1993,7 +1993,7 @@ impl UltraLogApp { if grid_rows == 0 { return; } - let grid_cols = values.get(0).map(|row| row.len()).unwrap_or(0); + let grid_cols = values.first().map(|row| row.len()).unwrap_or(0); if grid_cols == 0 { return; } From 959148775e6c5cb96b741d0e118fe112abc55119 Mon Sep 17 00:00:00 2001 From: Cole Gentry Date: Sat, 24 Jan 2026 19:18:53 -0500 Subject: [PATCH 2/2] Documentation updates Signed-off-by: Cole Gentry --- CLAUDE.md | 65 ++++++++++- ULTRALOG_INTEGRATION.md | 248 +++++++++++++++++++++++++++++++++++++--- 2 files changed, 293 insertions(+), 20 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 938abf2..42a6236 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -60,6 +60,12 @@ src/ ├── expression.rs # Formula parsing and evaluation engine ├── updater.rs # Auto-update functionality ├── analytics.rs # Privacy-respecting analytics +├── adapters/ +│ ├── mod.rs # Adapter module exports +│ ├── types.rs # OpenECU Alliance spec types (AdapterSpec, ProtocolSpec, etc.) +│ ├── api.rs # API client for fetching specs from openecualliance.org +│ ├── cache.rs # Local disk cache at {app_data_dir}/UltraLog/oecua_specs/ +│ └── registry.rs # Spec registry with fallback chain (cache -> embedded -> API) ├── parsers/ │ ├── mod.rs # Parser module exports │ ├── types.rs # Core parser types (Log, Channel, Value, etc.) @@ -130,6 +136,38 @@ src/ - Handles installation on Windows, macOS, and Linux - Supports seamless background updates +### Adapter System (src/adapters/) + +The adapter system integrates with the **OpenECU Alliance** specification ecosystem, providing runtime access to adapter and protocol definitions with automatic updates. + +- **`types.rs`** - OpenECU Alliance spec types: + - `AdapterSpec` - Log file format adapter definitions with channel specifications + - `ProtocolSpec` - CAN/network protocol definitions for real-time streaming + - `ChannelSpec` - Channel metadata (name, category, unit, min/max, precision) + - `MessageSpec`, `SignalSpec` - CAN message and signal definitions + - `ChannelCategory` - Categorization enum (Engine, Fuel, Ignition, etc.) + +- **`api.rs`** - OpenECU Alliance API client: + - Fetches specs from `https://openecualliance.org/api/` + - Endpoints: `/api/adapters`, `/api/adapters/{vendor}/{id}`, `/api/protocols`, `/api/protocols/{vendor}/{id}` + - HTTP client using `ureq` with custom user agent + - Error handling for network and parsing failures + +- **`cache.rs`** - Local disk cache: + - Cache location: `{app_data_dir}/UltraLog/oecua_specs/adapters/` and `.../protocols/` + - Cache metadata tracks last fetch timestamp, version, and counts + - 24-hour staleness threshold (configurable) + - Individual JSON files per adapter/protocol (e.g., `haltech-haltech-nsp.json`) + - Cache clearing and age inspection utilities + +- **`registry.rs`** - Spec registry with multi-tier fallback: + - **Embedded specs** - Compile-time YAML files from `spec/OECUASpecs/` git submodule + - **Cache layer** - Loads from disk cache if fresh (< 24 hours old) + - **API refresh** - Background thread fetches updates on app startup (non-blocking) + - **Normalization maps** - Builds source_name → display_name mappings for field normalization + - **Metadata lookup** - Provides channel metadata (category, unit, min/max, precision) + - Thread-safe `RwLock` for dynamic spec updates without restart + ### UI Modules (src/ui/) UI rendering is split into focused modules that implement methods on `UltraLogApp`: @@ -182,14 +220,35 @@ To add a new ECU format: ### Data Flow +**Startup and Spec Loading:** + +1. **App initialization** - `registry.rs` loads adapter/protocol specs via fallback chain: + - If cache is fresh (< 24 hours old) → load from disk cache + - Else → load embedded YAML specs from `spec/OECUASpecs/` + - Background thread spawned to fetch latest specs from OpenECU Alliance API +2. **Background refresh** - Non-blocking API fetch updates cache and registry for next startup +3. **Normalization maps built** - Extract `source_names` from adapter specs to build field name mappings + +**File Loading and Visualization:** + 1. Files are loaded asynchronously via `start_loading_file()` → background thread 2. Parser converts file (CSV or binary) to `Log` struct with channels, times, and data vectors -3. Field normalization optionally applied to standardize channel names +3. Field normalization optionally applied using spec-driven or custom mappings to standardize channel names 4. User selects channels (raw or computed) → added to `selected_channels` with unique color assignment 5. Computed channels evaluate formulas across all records with time-shifting support 6. Chart renders downsampled data from cache, limited to 2000 points per channel using LTTB algorithm 7. Unit conversions applied at display time based on `unit_preferences` +**Adapter Spec Fallback Chain:** + +```text +1. Startup (fast, non-blocking): + Cache (< 24h old) → Embedded YAML → Background API fetch + +2. Background API fetch (async): + OpenECU Alliance API → Update cache → Rebuild normalization maps → Ready for next startup +``` + ## Key Features - **Multi-ECU Support** - Supports Haltech, ECUMaster, RomRaider, Speeduino, rusEFI, AiM, and Link ECU formats @@ -215,12 +274,14 @@ To add a new ECU format: - **strum** (0.27) - Enum string conversion for channel types - **regex** (1.12) - Log file parsing - **meval** (0.2) - Mathematical expression evaluation for computed channels -- **ureq** (3.0) - HTTP client for auto-updates +- **ureq** (3.0) - HTTP client for auto-updates and OpenECU Alliance API - **semver** (1.0) - Version comparison +- **serde_yaml** (0.9) - YAML parsing for adapter/protocol specs - **printpdf** (0.7) - PDF generation - **image** (0.25) - PNG export - **memmap2** (0.9) - Memory-mapped file loading for large files - **rayon** (1.11) - Parallel iteration for parsing +- **dirs** (5.0) - Cross-platform app data directory detection for cache ## Test Data diff --git a/ULTRALOG_INTEGRATION.md b/ULTRALOG_INTEGRATION.md index 409eac9..d0857c3 100644 --- a/ULTRALOG_INTEGRATION.md +++ b/ULTRALOG_INTEGRATION.md @@ -11,6 +11,10 @@ This document describes how UltraLog integrates with OpenECU Alliance adapter sp - [x] Integration with existing `normalize.rs` (spec as fallback) - [x] Channel metadata lookup (min/max/precision/category) - [x] 8 adapter specs integrated (Haltech, ECUMaster, Link, AiM, RomRaider, Speeduino, rusEFI, Emerald) +- [x] **API client for fetching specs from openecualliance.org** +- [x] **Local disk cache at `{app_data_dir}/UltraLog/oecua_specs/`** +- [x] **Background API refresh with fallback chain (cache → embedded → API)** +- [x] **24-hour cache staleness threshold** - [ ] Runtime adapter loading from user directory - [ ] Generic CSV/binary parser driven by specs - [ ] Adapter marketplace integration @@ -21,6 +25,9 @@ This document describes how UltraLog integrates with OpenECU Alliance adapter sp - [x] Protocol type definitions (ProtocolSpec, MessageSpec, SignalSpec) - [x] 9 protocol specs integrated (Haltech, ECUMaster, Speeduino, rusEFI, AEM, Megasquirt, MaxxECU, Syvecs, Emtron) - [x] Protocol registry API (get_protocols, get_protocol_by_id, find_protocols_by_vendor) +- [x] **API client for fetching protocol specs** +- [x] **Local disk cache for protocols** +- [x] **Background API refresh for protocols** - [ ] CAN bus message encoder/decoder - [ ] Real-time CAN streaming support - [ ] DBC file export from protocol specs @@ -72,7 +79,9 @@ src/ ├── adapters/ │ ├── mod.rs # Module exports and re-exports │ ├── types.rs # AdapterSpec, ProtocolSpec, ChannelSpec, MessageSpec, etc. -│ └── registry.rs # Spec loading, normalization maps, metadata lookup, protocol registry +│ ├── registry.rs # Spec loading, normalization maps, metadata lookup, protocol registry +│ ├── api.rs # API client for fetching specs from openecualliance.org +│ └── cache.rs # Local disk cache at {app_data_dir}/UltraLog/oecua_specs/ ├── normalize.rs # Field normalization (uses adapters for fallback) └── parsers/ └── types.rs # Channel type enhanced with spec metadata methods @@ -81,10 +90,51 @@ spec/OECUASpecs/ # Git submodule: github.com/ClassicMiniDIY/OECUASpecs ├── adapters/ # Log file format adapters (8 specs) └── protocols/ # CAN bus protocol definitions (9 specs) build.rs # Auto-downloads specs if submodule missing + +Cache locations: +- Linux: ~/.local/share/UltraLog/oecua_specs/ +- macOS: ~/Library/Application Support/UltraLog/oecua_specs/ +- Windows: %APPDATA%\UltraLog\oecua_specs\ ``` ## How It Works +### Spec Loading Architecture: Multi-Tier Fallback + +UltraLog uses a **three-tier fallback system** for loading adapter and protocol specifications, optimizing for fast startup while keeping specs up-to-date: + +``` +┌─────────────────────────────────────────────────────────┐ +│ Application Startup │ +├─────────────────────────────────────────────────────────┤ +│ 1. Load specs via registry.rs (FAST, non-blocking) │ +│ ┌───────────────────────────────────────────────┐ │ +│ │ Check cache freshness (< 24 hours?) │ │ +│ │ ├── YES → Load from disk cache (fastest) │ │ +│ │ └── NO → Load embedded YAML specs │ │ +│ └───────────────────────────────────────────────┘ │ +│ │ +│ 2. Spawn background thread (non-blocking) │ +│ ┌───────────────────────────────────────────────┐ │ +│ │ Fetch from openecualliance.org API │ │ +│ │ ├── Success → Update cache & registry │ │ +│ │ └── Failure → Continue with cache/embedded │ │ +│ └───────────────────────────────────────────────┘ │ +│ │ +│ 3. User experience │ +│ - App starts immediately with cached/embedded specs │ +│ - Background refresh happens silently │ +│ - Next startup gets latest specs from cache │ +└─────────────────────────────────────────────────────────┘ +``` + +**Key Design Principles:** + +- **Zero blocking** - App never waits for API calls +- **Always available** - Embedded specs ensure offline functionality +- **Auto-updating** - Latest specs fetched in background +- **Cache efficiency** - 24-hour staleness threshold reduces API load + ### 1. Spec Source: GitHub Submodule Adapter specs come from the [OECUASpecs](https://github.com/ClassicMiniDIY/OECUASpecs) repository as a git submodule: @@ -103,32 +153,76 @@ The `build.rs` script ensures specs are available: 2. If not, attempts `git submodule update --init` 3. If git fails, downloads specs directly from GitHub raw content -### 2. Compile-Time Spec Embedding +### 2. Runtime Spec Loading with Fallback -Adapter YAML files are embedded at compile time using `include_str!`: +Adapter specs are loaded at runtime via a fallback chain: ```rust // src/adapters/registry.rs + +// Embedded YAML as compile-time fallback const HALTECH_NSP_YAML: &str = include_str!("../../spec/OECUASpecs/adapters/haltech/haltech-nsp.adapter.yaml"); // ... 7 more adapters -static ADAPTER_SPECS: LazyLock> = LazyLock::new(|| { - EMBEDDED_ADAPTERS.iter() - .filter_map(|yaml| serde_yaml::from_str(yaml).ok()) - .collect() +// Load with cache -> embedded fallback +fn load_adapters_with_fallback() -> Vec { + // 1. Try cache first (if fresh) + if !cache::is_cache_stale() { + if let Some(cached) = cache::load_cached_adapters() { + return cached; // Fast path: use cache + } + } + + // 2. Fall back to embedded specs + parse_embedded_adapters() +} + +// Dynamic registry with RwLock for API updates +static ADAPTER_SPECS: LazyLock>> = + LazyLock::new(|| RwLock::new(load_adapters_with_fallback())); +``` + +### 3. Background API Refresh + +On app startup, a background thread fetches latest specs from the API: + +```rust +// src/app.rs +thread::spawn(|| match adapters::refresh_specs_from_api() { + RefreshResult::Success { adapters_count, protocols_count } => { + tracing::info!("Refreshed {} adapters, {} protocols", + adapters_count, protocols_count); + } + RefreshResult::Failed(e) => { + tracing::debug!("API refresh failed (using cache/embedded): {}", e); + } + RefreshResult::AlreadyRefreshed => {} }); ``` -### 2. Normalization Map Building +The refresh process: + +1. Fetches from `https://openecualliance.org/api/adapters` and `/api/protocols` +2. Saves to local disk cache (`{app_data_dir}/UltraLog/oecua_specs/`) +3. Updates in-memory registry (`ADAPTER_SPECS` and `PROTOCOL_SPECS`) +4. Rebuilds normalization and metadata maps +5. Next startup loads fresh specs from cache + +### 4. Normalization Map Building -A reverse lookup map is built from all adapter `source_names`: +A reverse lookup map is built from all adapter `source_names` (dynamically updated on API refresh): ```rust // src/adapters/registry.rs -static SPEC_NORMALIZATION_MAP: LazyLock> = LazyLock::new(|| { + +// RwLock allows dynamic updates when API refresh completes +static SPEC_NORMALIZATION_MAP: LazyLock>> = + LazyLock::new(|| RwLock::new(build_normalization_map(&ADAPTER_SPECS.read().unwrap()))); + +fn build_normalization_map(adapters: &[AdapterSpec]) -> HashMap { let mut map = HashMap::new(); - for adapter in ADAPTER_SPECS.iter() { + for adapter in adapters { for channel in &adapter.channels { for source_name in &channel.source_names { map.insert(source_name.to_lowercase(), channel.name.clone()); @@ -136,10 +230,19 @@ static SPEC_NORMALIZATION_MAP: LazyLock> = LazyLock::new } } map -}); +} ``` -### 3. Normalization Integration +When API refresh completes, the map is rebuilt: + +```rust +// After successful API fetch +if let Ok(mut norm_lock) = SPEC_NORMALIZATION_MAP.write() { + *norm_lock = build_normalization_map(&new_adapters); +} +``` + +### 5. Normalization Integration The `normalize.rs` module uses spec normalization as a fallback: @@ -160,7 +263,7 @@ pub fn normalize_channel_name_with_custom( } ``` -### 4. Channel Metadata Access +### 6. Channel Metadata Access The `Channel` type provides methods that fall back to spec metadata: @@ -185,6 +288,104 @@ impl Channel { } ``` +## API Client and Caching + +### OpenECU Alliance API + +The API client (`src/adapters/api.rs`) fetches specs from the OpenECU Alliance website: + +**Base URL:** `https://openecualliance.org` + +**Endpoints:** + +| Endpoint | Method | Description | Response | +|----------|--------|-------------|----------| +| `/api/adapters` | GET | List all adapters (summary) | `{ data: [AdapterSummary] }` | +| `/api/adapters/{vendor}/{id}` | GET | Get full adapter spec | `AdapterSpec` | +| `/api/protocols` | GET | List all protocols (summary) | `{ data: [ProtocolSummary] }` | +| `/api/protocols/{vendor}/{id}` | GET | Get full protocol spec | `ProtocolSpec` | + +**Fetch Strategy:** + +```rust +// Fetch all adapters (list + individual fetches) +pub fn fetch_all_adapters() -> Result, ApiError> { + let summaries = fetch_adapter_list()?; // GET /api/adapters + + let mut adapters = Vec::new(); + for summary in summaries { + // GET /api/adapters/{vendor}/{id} for each + if let Ok(adapter) = fetch_adapter(&summary.vendor, &summary.id) { + adapters.push(adapter); + } + } + Ok(adapters) +} +``` + +**Error Handling:** + +- Network failures → Fall back to cache or embedded specs +- API errors (4xx/5xx) → Log warning, continue with fallback +- Parse errors → Log warning, skip that spec +- Non-blocking: API failures don't prevent app startup + +### Local Disk Cache + +The cache module (`src/adapters/cache.rs`) manages persistent storage of fetched specs: + +**Cache Structure:** + +``` +{app_data_dir}/UltraLog/oecua_specs/ +├── metadata.json # Cache metadata (timestamp, counts) +├── adapters/ +│ ├── haltech-haltech-nsp.json +│ ├── ecumaster-ecumaster-emu-csv.json +│ └── ... (8 adapter files) +└── protocols/ + ├── haltech-haltech-elite-broadcast.json + ├── ecumaster-ecumaster-emu-broadcast.json + └── ... (9 protocol files) +``` + +**Cache Metadata:** + +```json +{ + "last_fetch_timestamp": 1706123456, + "cache_version": 1, + "adapter_count": 8, + "protocol_count": 9 +} +``` + +**Cache Functions:** + +```rust +// Check if cache is stale (> 24 hours old) +pub fn is_cache_stale() -> bool + +// Load cached adapters/protocols +pub fn load_cached_adapters() -> Option> +pub fn load_cached_protocols() -> Option> + +// Save fetched specs to cache +pub fn save_adapters_to_cache(adapters: &[AdapterSpec]) -> Result<()> +pub fn save_protocols_to_cache(protocols: &[ProtocolSpec]) -> Result<()> + +// Utility functions +pub fn get_cache_age() -> Option +pub fn clear_cache() -> Result<()> +``` + +**Staleness Threshold:** + +- Default: 24 hours +- Configurable via `is_cache_stale_with_max_age(secs)` +- Prevents excessive API requests +- Balance between freshness and performance + ## CAN Bus Protocol Support In addition to adapter specs (for parsing log files), UltraLog also integrates with OpenECU Alliance protocol specifications. These define CAN bus message structures for real-time ECU data streaming. @@ -311,6 +512,11 @@ use ultralog::adapters::{ get_all_categories, // Get all unique ChannelCategory values get_channels_by_category, // Get all channels for a category + // API and cache + refresh_specs_from_api, // Trigger background refresh (returns RefreshResult) + specs_refreshed, // Check if specs have been refreshed from API + get_spec_source, // Get current spec source ("API", "Cache", "Embedded") + // Adapter types AdapterSpec, ChannelSpec, @@ -327,6 +533,9 @@ use ultralog::adapters::{ ByteOrder, SignalDataType, EnumSpec, + + // Result types + RefreshResult, // Success/Failed/AlreadyRefreshed }; ``` @@ -378,19 +587,22 @@ static EMBEDDED_ADAPTERS: &[&str] = &[ ## Future Enhancements -### Runtime Adapter Loading +### Runtime Adapter Loading (User Directory) -Load user adapters from `~/.ultralog/adapters/`: +Load user-created adapters from local directory (not yet implemented): ```rust impl AdapterRegistry { pub fn load_user_adapters(path: &Path) -> Result> { - // Read YAML files from user directory - // Merge with embedded adapters + // Read YAML files from ~/.ultralog/adapters/ + // Merge with embedded/cached adapters + // Allow users to test custom adapters without rebuilding } } ``` +**Note:** The OpenECU Alliance API integration (implemented) provides runtime spec updates from the official repository. User directory loading would be for custom/experimental adapters not yet in the official specs. + ### Generic CSV Parser Use adapter specs to drive parsing without format-specific code: