This guide provides best practices for implementing and using the structured logging system in ADIC Core. Following these guidelines ensures consistent, useful, and performant logging across the codebase.
β Bad: Logging that you're about to do something
info!("Starting balance transfer");
balance.transfer(from, to, amount)?;
info!("Completed balance transfer");β Good: Log the actual state change with before/after values
let balance_before = balance.get(from)?;
balance.transfer(from, to, amount)?;
let balance_after = balance.get(from)?;
info!(
from = %from,
to = %to,
amount = amount.to_adic(),
balance_before = balance_before.to_adic(),
balance_after = balance_after.to_adic(),
"πΈ Balance transferred"
);β Bad: Embedding data in the message string
info!("Peer {} connected with reputation {}", peer_id, reputation);β Good: Using structured fields
info!(
peer_id = %peer_id,
reputation = reputation,
"π€ Peer connected"
);The message should describe WHAT happened in 2-5 words. Details go in fields.
β Bad: Long, detailed messages
info!("Successfully validated message {} from peer {} with score {} after checking {} parents",
message_id, peer_id, score, parent_count);β Good: Concise message with structured fields
info!(
message_id = %message_id,
peer_id = %peer_id,
score = score,
parent_count = parent_count,
"β
Message validated"
);Use ERROR for unrecoverable failures that require immediate attention:
error!(
error = %e,
peer_id = %peer_id,
retry_count = retries,
"Connection failed permanently"
);Use WARN for recoverable issues or suspicious behavior:
warn!(
peer_id = %peer_id,
invalid_messages = count,
threshold = max_invalid,
"β οΈ Peer misbehavior detected"
);Use INFO for significant state changes and major operations:
info!(
height_before = old_height,
height_after = new_height,
"π¦ Blockchain height updated"
);Use DEBUG for detailed operation flow and intermediate states:
debug!(
message_id = %msg_id,
validation_steps = steps.len(),
"Processing validation pipeline"
);Use TRACE for very detailed debugging information:
trace!(
raw_bytes = ?bytes,
decoded_value = ?value,
"Decoding network packet"
);Use consistent field names across the codebase:
| Field Pattern | Example | Usage |
|---|---|---|
*_before |
balance_before |
State before change |
*_after |
balance_after |
State after change |
*_count |
peer_count |
Quantities |
*_id |
message_id |
Identifiers |
*_ms |
duration_ms |
Time in milliseconds |
*_bytes |
size_bytes |
Size in bytes |
is_* |
is_valid |
Boolean flags |
has_* |
has_parent |
Boolean existence |
Fields are only evaluated when the log level is active:
// This expensive computation only runs at DEBUG level
debug!(
expensive_metric = calculate_complex_metric(),
"Metric calculated"
);Use references and display implementations:
// Good: Uses Display trait, no allocation
info!(peer_id = %peer_id, "Peer added");
// Avoid: Creates String unnecessarily
info!(peer_id = format!("{}", peer_id), "Peer added");For high-frequency events, use sampling or rate limiting:
use std::sync::atomic::{AtomicUsize, Ordering};
static MESSAGE_COUNT: AtomicUsize = AtomicUsize::new(0);
// Only log every 1000th message
let count = MESSAGE_COUNT.fetch_add(1, Ordering::Relaxed);
if count % 1000 == 0 {
info!(
total_messages = count,
"Message processing milestone"
);
}let start = Instant::now();
let result = expensive_operation()?;
let elapsed = start.elapsed();
info!(
duration_ms = elapsed.as_millis(),
result_size = result.len(),
"π― Operation completed"
);let old_state = entity.state.clone();
entity.transition_to(new_state)?;
info!(
entity_id = %entity.id,
state_before = ?old_state,
state_after = ?entity.state,
"π State transitioned"
);let total = items.len();
let mut processed = 0;
let mut failed = 0;
for item in items {
match process(item) {
Ok(_) => processed += 1,
Err(e) => {
failed += 1;
debug!(error = %e, item = ?item, "Item failed");
}
}
}
info!(
total = total,
processed = processed,
failed = failed,
success_rate = (processed as f64 / total as f64),
"π¦ Batch processed"
);let cache_hit = cache.get(key).is_some();
let value = if cache_hit {
cache.get(key).unwrap()
} else {
let value = compute_value(key)?;
cache.insert(key, value.clone());
value
};
debug!(
key = %key,
cache_hit = cache_hit,
cache_size = cache.len(),
"Cache accessed"
);Always include amounts in ADIC units:
info!(
amount_adic = amount.to_adic(), // Not raw units
"π° Amount processed"
);Include peer context for network operations:
info!(
peer_id = %peer_id,
remote_addr = %addr,
direction = if outgoing { "outgoing" } else { "incoming" },
"π Connection established"
);Include validation details:
info!(
message_id = %msg_id,
score = score,
threshold = required_score,
passed = score >= required_score,
"π³οΈ Consensus check"
);Include I/O metrics:
info!(
key = %key,
size_bytes = value.len(),
operation = "write",
backend = "rocksdb",
"πΎ Storage operation"
);# See all structured fields
RUST_LOG=debug cargo test
# Focus on specific module
RUST_LOG=adic_consensus=trace cargo test consensus_tests
# Disable test harness output
RUST_LOG=debug cargo test -- --nocapture# JSON output for parsing
RUST_LOG=info cargo run -- --log-format json
# File output with rotation
cargo run -- --log-file /var/log/adic/test.log --log-rotation 100Look for before/after patterns:
grep "balance_before\|balance_after" adic.logUse structured field filtering:
grep "peer_id=12D3KooW" adic.logExtract timing fields:
grep -o "duration_ms=[0-9]*" adic.log | cut -d= -f2 | statsFind error context:
grep -B5 -A5 "ERROR\|WARN" adic.logNever log private keys, passwords, or PII:
// NEVER DO THIS
error!(private_key = %key, "Key error");
// Safe alternative
error!(key_id = %key.public_id(), "Key error");These bypass the logging system:
// Bad
println!("Debug: {}", value);
dbg!(value);
// Good
debug!(value = ?value, "Debug output");Avoid logging in performance-critical loops:
// Bad
for i in 0..1_000_000 {
debug!(iteration = i, "Processing");
process(i);
}
// Good
for i in 0..1_000_000 {
process(i);
}
debug!(total_processed = 1_000_000, "Batch complete");Avoid logging the same information multiple times:
// Bad
info!(peer_id = %peer, "Starting handshake");
info!(peer_id = %peer, "Handshake step 1");
info!(peer_id = %peer, "Handshake step 2");
// Good - log once with all context
info!(
peer_id = %peer,
steps_completed = 2,
"Handshake completed"
);Extract metrics using log parsing:
// Log with metric-friendly fields
info!(
metric_type = "counter",
metric_name = "messages_processed",
value = 1,
labels = { peer_id: peer.to_string() },
"Metric recorded"
);Define alerts based on log patterns:
alert: HighErrorRate
expr: rate(log_lines{level="ERROR"}[5m]) > 10
annotations:
summary: "High error rate detected"Following these best practices ensures that ADIC Core's logging is:
- Consistent: Same patterns across all modules
- Useful: Contains actionable information
- Performant: Minimal overhead in production
- Debuggable: Rich context when needed
Remember: Good logging is an investment in operational excellence and developer productivity.