Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 0 additions & 11 deletions audit_report.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,6 @@

| # | Severity | Title | File |
|---|----------|-------|------|
| 1 | **Critical** | SVG served inline — stored XSS | `handlers/board.rs:753` |
| 2 | **Critical** | ChanNet Ed25519 signatures not verified | `chan_net/import.rs:~90` |
| 3 | **Critical** | ffmpeg process not killed on timeout | `workers/mod.rs:460` |
| 4 | **High** | Swapped format args in mod log entry | `handlers/admin/moderation.rs:183` |
| 5 | **High** | Unbounded body on admin restore endpoints | `server/server.rs:598,606` |
| 6 | **High** | `edit_post` misuses `execute_batch` for transactions | `db/posts.rs:~190` |
| 7 | **High** | `insert_board_if_absent` uses `last_insert_rowid()` | `db/chan_net.rs:~57` |
| 8 | **High** | Catalog has no ETag caching | `handlers/board.rs:500` |
| 9 | **High** | ChanNet `/chan/refresh` and `/chan/poll` unauthenticated | `chan_net/mod.rs:146` |
| 10 | **High** | Worker `JoinHandle`s discarded; shutdown is blind sleep | `server/server.rs:239` |
| 11 | **High** | `get_per_board_stats` still uses correlated subqueries | `db/boards.rs:416` |
| 12 | **Medium** | PoW nonce prune not rate-gated under concurrency | `utils/crypto.rs:~165` |
| 13 | **Medium** | `constant_time_eq` in posts.rs — use `subtle` crate | `db/posts.rs:~270` |
| 14 | **Medium** | `update_settings_file_site_names` matches comment lines | `config.rs:~320` |
Expand Down
Empty file removed clippy_reports/clippy_raw.txt
Empty file.
5 changes: 0 additions & 5 deletions clippy_reports/summary.txt

This file was deleted.

66 changes: 42 additions & 24 deletions src/db/posts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
use crate::models::Post;
use anyhow::{Context, Result};
use rusqlite::{params, OptionalExtension};
use subtle::ConstantTimeEq as _;

// ─── Retry budget constant ────────────────────────────────────────────────────

Expand Down Expand Up @@ -351,8 +352,16 @@ pub fn verify_deletion_token(

/// Edit a post's body, verified against the deletion token and a per-board edit window.
///
/// `edit_window_secs` comes from the board (0 means use the default 300s window).
/// The caller is responsible for checking `board.allow_editing` before calling this.
/// `edit_window_secs` semantics (FIX[#25]):
/// - `> 0` — use this many seconds as the edit window.
/// - `== 0` — no time limit; editing is always allowed regardless of post age.
/// Use this when the board's `edit_window_secs` column is 0.
/// - `< 0` — use the built-in default of 300 s.
///
/// Previously, both `0` and negative values mapped to 300 s, making it
/// impossible for a board to configure "no time limit". The caller is
/// responsible for checking `board.allow_editing` before calling this.
///
/// Returns `Ok(true)` on success, `Ok(false)` if the token is wrong or the
/// edit window has closed; `Err` for database failures.
///
Expand All @@ -376,10 +385,11 @@ pub fn edit_post(
new_body_html: &str,
edit_window_secs: i64,
) -> Result<bool> {
let window = if edit_window_secs <= 0 {
300
} else {
edit_window_secs
// FIX[#25]: `0` now means "no time limit". Negative values use the 300 s default.
let window_opt: Option<i64> = match edit_window_secs.cmp(&0) {
std::cmp::Ordering::Greater => Some(edit_window_secs),
std::cmp::Ordering::Equal => None, // no limit — skip the window check entirely
std::cmp::Ordering::Less => Some(300), // negative → built-in default
};

// FIX[High-6]: Use rusqlite's typed transaction API instead of raw
Expand Down Expand Up @@ -413,9 +423,12 @@ pub fn edit_post(
}

let now = chrono::Utc::now().timestamp();
if now.saturating_sub(created_at) > window {
return Ok(false);
if let Some(window) = window_opt {
if now.saturating_sub(created_at) > window {
return Ok(false);
}
}
// window_opt == None → no time limit, skip the check

tx.execute(
"UPDATE posts SET body = ?1, body_html = ?2, edited_at = ?3 WHERE id = ?4",
Expand All @@ -431,20 +444,14 @@ pub fn edit_post(

/// Constant-time byte slice comparison to prevent timing side-channel attacks.
///
/// FIX[MED-7]: The previous implementation returned false immediately when
/// lengths differed, leaking token length as a timing signal. The comparison
/// now processes all bytes from the longer slice regardless of length, folding
/// the length mismatch into the accumulator.
/// FIX[#13]: Replaced hand-rolled implementation with the `subtle` crate's
/// `ConstantTimeEq`. The previous hand-rolled version was correct, but using an
/// audited crate is preferable to maintaining our own. Note: `subtle`'s
/// implementation for `[u8]` short-circuits on a length mismatch (leaking
/// length), but deletion tokens are always 32-char hex strings so lengths will
/// never differ in practice.
fn constant_time_eq(a: &[u8], b: &[u8]) -> bool {
let max_len = a.len().max(b.len());
// Non-zero when lengths differ.
let mut diff = u8::try_from(a.len() ^ b.len()).unwrap_or(u8::MAX);
for i in 0..max_len {
let x = a.get(i).copied().unwrap_or(0);
let y = b.get(i).copied().unwrap_or(0);
diff |= x ^ y;
}
diff == 0
a.ct_eq(b).into()
}

// ─── LIKE escape helper ───────────────────────────────────────────────────────
Expand Down Expand Up @@ -686,9 +693,17 @@ pub fn get_poll_for_thread(
/// the same INSERT statement via a correlated WHERE EXISTS. A mismatched
/// (`poll_id`, `option_id`) pair inserts nothing and returns false.
///
/// Note (MED-14): This function returns false for two distinct cases:
/// FIX[#16]: Poll expiry is now re-checked atomically inside the INSERT's
/// WHERE clause. Previously, expiry was only verified by the handler before
/// calling this function. A poll expiring between the handler's check and the
/// INSERT would allow a vote on an expired poll. The `expires_at` guard here
/// closes that race: the INSERT is a no-op if the poll has expired by the time
/// `SQLite` evaluates it.
///
/// Note (MED-14): This function returns false for three distinct cases:
/// 1. The voter has already voted (UNIQUE constraint fires INSERT OR IGNORE)
/// 2. The `option_id` does not belong to `poll_id` (EXISTS check fails)
/// 3. The poll has expired (`expires_at` guard in EXISTS fails)
///
/// Callers that need to distinguish these cases should call `cast_vote` and, on
/// false, separately query whether the IP has voted on this poll. A future
Expand All @@ -706,8 +721,11 @@ pub fn cast_vote(
"INSERT OR IGNORE INTO poll_votes (poll_id, option_id, ip_hash)
SELECT ?1, ?2, ?3
WHERE EXISTS (
SELECT 1 FROM poll_options
WHERE id = ?2 AND poll_id = ?1
SELECT 1 FROM poll_options po
JOIN polls p ON p.id = po.poll_id
WHERE po.id = ?2
AND po.poll_id = ?1
AND (p.expires_at IS NULL OR p.expires_at > unixepoch())
)",
params![poll_id, option_id, ip_hash],
)?;
Expand Down
146 changes: 103 additions & 43 deletions src/handlers/admin/backup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,56 @@ use tokio::io::AsyncWriteExt as _;
use tokio_util::io::ReaderStream;
use tracing::{info, warn};

// ─── URL query-string encoder ─────────────────────────────────────────────────
//
// FIX[#30]: `encode_q` was previously defined as an inner function inside two
// separate async handlers, with one implementation slightly less efficient than
// the other. Extracted here so both call sites share a single definition.
//
// Encodes `s` using percent-encoding, with spaces as `+` (form-URL encoding).
// Used only for error messages in redirect query strings — not for URL paths.
fn encode_q(s: &str) -> String {
const fn nibble(n: u8) -> char {
match n {
0 => '0',
1 => '1',
2 => '2',
3 => '3',
4 => '4',
5 => '5',
6 => '6',
7 => '7',
8 => '8',
9 => '9',
10 => 'A',
11 => 'B',
12 => 'C',
13 => 'D',
14 => 'E',
_ => 'F',
}
}
let mut out = String::with_capacity(s.len());
for b in s.bytes() {
match b {
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => {
out.push(b as char);
}
b' ' => out.push('+'),
b => {
out.push('%');
out.push(nibble(b >> 4));
out.push(nibble(b & 0xf));
}
}
}
out
}

// FIX[#19]: Module-level constant so it can be referenced inside closures and
// loops without triggering the "item after statements" clippy lint.
const SQLITE_HEADER: &[u8; 16] = b"SQLite format 3\0";

#[allow(clippy::too_many_lines)]
pub async fn admin_backup(State(state): State<AppState>, jar: CookieJar) -> Result<Response> {
let session_id = jar
Expand Down Expand Up @@ -397,6 +447,35 @@ pub async fn admin_restore(
.map_err(|e| AppError::Internal(anyhow::anyhow!("Create temp DB: {e}")))?;
copy_limited(&mut entry, &mut out, ZIP_ENTRY_MAX_BYTES)
.map_err(|e| AppError::Internal(anyhow::anyhow!("Write temp DB: {e}")))?;

// FIX[#19]: Validate SQLite magic bytes before handing this
// file to the backup API. Without this check a malicious or
// corrupted "chan.db" inside the zip (e.g. a shell script or
// truncated file) would be opened by rusqlite, which could
// panic or return opaque internal errors. The magic sequence
// is the first 16 bytes of every valid SQLite 3 database.
let mut header = [0u8; 16];
{
use std::io::Read;
let mut f = std::fs::File::open(&temp_db).map_err(|e| {
AppError::Internal(anyhow::anyhow!("Magic check open: {e}"))
})?;
if f.read_exact(&mut header).is_err() {
let _ = std::fs::remove_file(&temp_db);
return Err(AppError::BadRequest(
"Uploaded chan.db is not a valid SQLite database (file too small)."
.into(),
));
}
}
if &header != SQLITE_HEADER {
let _ = std::fs::remove_file(&temp_db);
return Err(AppError::BadRequest(
"Uploaded chan.db is not a valid SQLite database (invalid magic bytes)."
.into(),
));
}

db_extracted = true;

} else if let Some(rel) = name.strip_prefix("uploads/") {
Expand Down Expand Up @@ -1430,6 +1509,30 @@ pub async fn restore_saved_full_backup(
.map_err(|e| AppError::Internal(anyhow::anyhow!("Create temp DB: {e}")))?;
copy_limited(&mut entry, &mut out, ZIP_ENTRY_MAX_BYTES)
.map_err(|e| AppError::Internal(anyhow::anyhow!("Write temp DB: {e}")))?;

// FIX[#19]: Validate SQLite magic bytes (same guard as admin_restore).
let mut header = [0u8; 16];
{
use std::io::Read;
let mut f = std::fs::File::open(&temp_db).map_err(|e| {
AppError::Internal(anyhow::anyhow!("Magic check open: {e}"))
})?;
if f.read_exact(&mut header).is_err() {
let _ = std::fs::remove_file(&temp_db);
return Err(AppError::BadRequest(
"Uploaded chan.db is not a valid SQLite database (file too small)."
.into(),
));
}
}
if &header != SQLITE_HEADER {
let _ = std::fs::remove_file(&temp_db);
return Err(AppError::BadRequest(
"Uploaded chan.db is not a valid SQLite database (invalid magic bytes)."
.into(),
));
}

db_extracted = true;
} else if let Some(rel) = name.strip_prefix("uploads/") {
if rel.is_empty() {
Expand Down Expand Up @@ -1519,24 +1622,6 @@ pub async fn restore_saved_board_backup(
jar: CookieJar,
Form(form): Form<RestoreSavedForm>,
) -> Result<Response> {
fn encode_q(s: &str) -> String {
#[allow(clippy::arithmetic_side_effects)]
const fn nibble(n: u8) -> char {
match n {
0..=9 => (b'0' + n) as char,
_ => (b'A' + n - 10) as char,
}
}
s.bytes()
.flat_map(|b| match b {
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => {
vec![b as char]
}
b' ' => vec!['+'],
b => vec!['%', nibble(b >> 4), nibble(b & 0xf)],
})
.collect()
}
let session_id = jar
.get(super::SESSION_COOKIE)
.map(|c| c.value().to_string());
Expand Down Expand Up @@ -2250,31 +2335,6 @@ pub async fn board_restore(
jar: CookieJar,
mut multipart: Multipart,
) -> Response {
#[allow(clippy::arithmetic_side_effects)]
const fn nibble(n: u8) -> char {
match n {
0..=9 => (b'0' + n) as char,
_ => (b'A' + n - 10) as char,
}
}
fn encode_q(s: &str) -> String {
let mut out = String::with_capacity(s.len());
for b in s.bytes() {
match b {
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => {
out.push(b as char);
}
b' ' => out.push('+'),
b => {
out.push('%');
out.push(nibble(b >> 4));
out.push(nibble(b & 0xf));
}
}
}
out
}

// Run the whole operation as a fallible async block so any early return
// with Err(...) is caught below and turned into a redirect.
let result: Result<String> = async {
Expand Down
46 changes: 33 additions & 13 deletions src/logging.rs
Original file line number Diff line number Diff line change
@@ -1,36 +1,56 @@
// logging.rs — Structured logging initialisation.
// db/logging.rs — Structured logging initialisation.
//
// Sets up two output layers:
// 1. stderr — human-readable, `info` and above (terminal/systemd journal)
// 2. file — JSON formatted, `debug` and above, written to
// `rustchan.log` inside `log_dir`
// `rustchan.log` inside `db_dir`
//
// Respects `RUST_LOG` env var if set.
//
// NOTE: this module lives in `db/` so that log files are written alongside the
// database file rather than next to the binary. The caller should pass the
// same directory used for `chan.db` (typically the directory containing the
// binary at first run, but overridable via config).

use std::path::Path;
use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};

/// Initialise the global tracing subscriber.
///
/// `log_dir` is the directory where `rustchan.log` will be written.
/// Typically this is the directory that contains the binary itself
/// (`std::env::current_exe()?.parent()`), so log output stays alongside
/// the executable and is easy to find.
/// `db_dir` is the directory where `rustchan.log` will be written.
/// This should be the same directory as `chan.db` so that logs and the
/// database stay together and are easy to locate, back up, and rotate as
/// a unit.
///
/// FIX[#33]: The file appender now uses daily rotation
/// (`tracing_appender::rolling::daily`) instead of `rolling::never`. A single
/// append-only log file grows without bound on a busy instance; if the
/// filesystem fills, `SQLite`'s WAL checkpoint will fail and can corrupt the
/// database. Daily rotation caps each log file to roughly one day of output
/// and leaves old files on disk with a date suffix so they can be pruned by a
/// standard `logrotate` rule or a cron job.
///
/// The file appender is non-rotating — a single `rustchan.log` is used for
/// the lifetime of the process. Rotation can be added later via
/// `tracing_appender::rolling::daily` / `hourly` without changing callers.
pub fn init_logging(log_dir: &Path) {
/// Suggested logrotate stanza (`/etc/logrotate.d/rustchan`):
/// ```text
/// /path/to/db/rustchan.log.* {
/// daily
/// rotate 14
/// compress
/// missingok
/// notifempty
/// copytruncate
/// }
/// ```
pub fn init_logging(db_dir: &Path) {
let env_filter = EnvFilter::try_from_default_env()
.unwrap_or_else(|_| EnvFilter::new("rustchan=info,tower_http=warn"));

// stderr layer — compact human-readable output for the terminal.
let stderr_layer = fmt::layer().with_target(false).compact();

// File layer — JSON, includes file/line for structured log analysis.
// Uses `tracing_appender::rolling::never` so the file is never rotated
// automatically; restart the process to start a fresh log.
let file_appender = tracing_appender::rolling::never(log_dir, "rustchan.log");
// FIX[#33]: daily rotation; files are named `rustchan.log.YYYY-MM-DD`.
let file_appender = tracing_appender::rolling::daily(db_dir, "rustchan.log");
let file_layer = fmt::layer()
.json()
.with_writer(file_appender)
Expand Down
Loading